summaryrefslogtreecommitdiffstats
path: root/src/cmd/compile/internal/ssa/gen/ARM64.rules
diff options
context:
space:
mode:
Diffstat (limited to 'src/cmd/compile/internal/ssa/gen/ARM64.rules')
-rw-r--r--src/cmd/compile/internal/ssa/gen/ARM64.rules2789
1 files changed, 2789 insertions, 0 deletions
diff --git a/src/cmd/compile/internal/ssa/gen/ARM64.rules b/src/cmd/compile/internal/ssa/gen/ARM64.rules
new file mode 100644
index 0000000..80b4005
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/gen/ARM64.rules
@@ -0,0 +1,2789 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+(Add(Ptr|64|32|16|8) ...) => (ADD ...)
+(Add(32F|64F) ...) => (FADD(S|D) ...)
+
+(Sub(Ptr|64|32|16|8) ...) => (SUB ...)
+(Sub(32F|64F) ...) => (FSUB(S|D) ...)
+
+(Mul64 ...) => (MUL ...)
+(Mul(32|16|8) ...) => (MULW ...)
+(Mul(32F|64F) ...) => (FMUL(S|D) ...)
+
+(Hmul64 ...) => (MULH ...)
+(Hmul64u ...) => (UMULH ...)
+(Hmul32 x y) => (SRAconst (MULL <typ.Int64> x y) [32])
+(Hmul32u x y) => (SRAconst (UMULL <typ.UInt64> x y) [32])
+(Mul64uhilo ...) => (LoweredMuluhilo ...)
+
+(Div64 [false] x y) => (DIV x y)
+(Div64u ...) => (UDIV ...)
+(Div32 [false] x y) => (DIVW x y)
+(Div32u ...) => (UDIVW ...)
+(Div16 [false] x y) => (DIVW (SignExt16to32 x) (SignExt16to32 y))
+(Div16u x y) => (UDIVW (ZeroExt16to32 x) (ZeroExt16to32 y))
+(Div8 x y) => (DIVW (SignExt8to32 x) (SignExt8to32 y))
+(Div8u x y) => (UDIVW (ZeroExt8to32 x) (ZeroExt8to32 y))
+(Div32F ...) => (FDIVS ...)
+(Div64F ...) => (FDIVD ...)
+
+(Mod64 x y) => (MOD x y)
+(Mod64u ...) => (UMOD ...)
+(Mod32 x y) => (MODW x y)
+(Mod32u ...) => (UMODW ...)
+(Mod16 x y) => (MODW (SignExt16to32 x) (SignExt16to32 y))
+(Mod16u x y) => (UMODW (ZeroExt16to32 x) (ZeroExt16to32 y))
+(Mod8 x y) => (MODW (SignExt8to32 x) (SignExt8to32 y))
+(Mod8u x y) => (UMODW (ZeroExt8to32 x) (ZeroExt8to32 y))
+
+// (x + y) / 2 with x>=y => (x - y) / 2 + y
+(Avg64u <t> x y) => (ADD (SRLconst <t> (SUB <t> x y) [1]) y)
+
+(And(64|32|16|8) ...) => (AND ...)
+(Or(64|32|16|8) ...) => (OR ...)
+(Xor(64|32|16|8) ...) => (XOR ...)
+
+// unary ops
+(Neg(64|32|16|8) ...) => (NEG ...)
+(Neg(32F|64F) ...) => (FNEG(S|D) ...)
+(Com(64|32|16|8) ...) => (MVN ...)
+
+// math package intrinsics
+(Abs ...) => (FABSD ...)
+(Sqrt ...) => (FSQRTD ...)
+(Ceil ...) => (FRINTPD ...)
+(Floor ...) => (FRINTMD ...)
+(Round ...) => (FRINTAD ...)
+(RoundToEven ...) => (FRINTND ...)
+(Trunc ...) => (FRINTZD ...)
+(FMA x y z) => (FMADDD z x y)
+
+// lowering rotates
+(RotateLeft8 <t> x (MOVDconst [c])) => (Or8 (Lsh8x64 <t> x (MOVDconst [c&7])) (Rsh8Ux64 <t> x (MOVDconst [-c&7])))
+(RotateLeft16 <t> x (MOVDconst [c])) => (Or16 (Lsh16x64 <t> x (MOVDconst [c&15])) (Rsh16Ux64 <t> x (MOVDconst [-c&15])))
+(RotateLeft32 x y) => (RORW x (NEG <y.Type> y))
+(RotateLeft64 x y) => (ROR x (NEG <y.Type> y))
+
+(Ctz(64|32|16|8)NonZero ...) => (Ctz(64|32|32|32) ...)
+
+(Ctz64 <t> x) => (CLZ (RBIT <t> x))
+(Ctz32 <t> x) => (CLZW (RBITW <t> x))
+(Ctz16 <t> x) => (CLZW <t> (RBITW <typ.UInt32> (ORconst <typ.UInt32> [0x10000] x)))
+(Ctz8 <t> x) => (CLZW <t> (RBITW <typ.UInt32> (ORconst <typ.UInt32> [0x100] x)))
+
+(PopCount64 <t> x) => (FMOVDfpgp <t> (VUADDLV <typ.Float64> (VCNT <typ.Float64> (FMOVDgpfp <typ.Float64> x))))
+(PopCount32 <t> x) => (FMOVDfpgp <t> (VUADDLV <typ.Float64> (VCNT <typ.Float64> (FMOVDgpfp <typ.Float64> (ZeroExt32to64 x)))))
+(PopCount16 <t> x) => (FMOVDfpgp <t> (VUADDLV <typ.Float64> (VCNT <typ.Float64> (FMOVDgpfp <typ.Float64> (ZeroExt16to64 x)))))
+
+// Load args directly into the register class where it will be used.
+(FMOVDgpfp <t> (Arg [off] {sym})) => @b.Func.Entry (Arg <t> [off] {sym})
+(FMOVDfpgp <t> (Arg [off] {sym})) => @b.Func.Entry (Arg <t> [off] {sym})
+
+// Similarly for stores, if we see a store after FPR <=> GPR move, then redirect store to use the other register set.
+(MOVDstore [off] {sym} ptr (FMOVDfpgp val) mem) => (FMOVDstore [off] {sym} ptr val mem)
+(FMOVDstore [off] {sym} ptr (FMOVDgpfp val) mem) => (MOVDstore [off] {sym} ptr val mem)
+(MOVWstore [off] {sym} ptr (FMOVSfpgp val) mem) => (FMOVSstore [off] {sym} ptr val mem)
+(FMOVSstore [off] {sym} ptr (FMOVSgpfp val) mem) => (MOVWstore [off] {sym} ptr val mem)
+
+// float <=> int register moves, with no conversion.
+// These come up when compiling math.{Float64bits, Float64frombits, Float32bits, Float32frombits}.
+(MOVDload [off] {sym} ptr (FMOVDstore [off] {sym} ptr val _)) => (FMOVDfpgp val)
+(FMOVDload [off] {sym} ptr (MOVDstore [off] {sym} ptr val _)) => (FMOVDgpfp val)
+(MOVWUload [off] {sym} ptr (FMOVSstore [off] {sym} ptr val _)) => (FMOVSfpgp val)
+(FMOVSload [off] {sym} ptr (MOVWstore [off] {sym} ptr val _)) => (FMOVSgpfp val)
+
+(BitLen64 x) => (SUB (MOVDconst [64]) (CLZ <typ.Int> x))
+(BitLen32 x) => (SUB (MOVDconst [32]) (CLZW <typ.Int> x))
+
+(Bswap64 ...) => (REV ...)
+(Bswap32 ...) => (REVW ...)
+
+(BitRev64 ...) => (RBIT ...)
+(BitRev32 ...) => (RBITW ...)
+(BitRev16 x) => (SRLconst [48] (RBIT <typ.UInt64> x))
+(BitRev8 x) => (SRLconst [56] (RBIT <typ.UInt64> x))
+
+// In fact, UMOD will be translated into UREM instruction, and UREM is originally translated into
+// UDIV and MSUB instructions. But if there is already an identical UDIV instruction just before or
+// after UREM (case like quo, rem := z/y, z%y), then the second UDIV instruction becomes redundant.
+// The purpose of this rule is to have this extra UDIV instruction removed in CSE pass.
+(UMOD <typ.UInt64> x y) => (MSUB <typ.UInt64> x y (UDIV <typ.UInt64> x y))
+(UMODW <typ.UInt32> x y) => (MSUBW <typ.UInt32> x y (UDIVW <typ.UInt32> x y))
+
+// 64-bit addition with carry.
+(Select0 (Add64carry x y c)) => (Select0 <typ.UInt64> (ADCSflags x y (Select1 <types.TypeFlags> (ADDSconstflags [-1] c))))
+(Select1 (Add64carry x y c)) => (ADCzerocarry <typ.UInt64> (Select1 <types.TypeFlags> (ADCSflags x y (Select1 <types.TypeFlags> (ADDSconstflags [-1] c)))))
+
+// 64-bit subtraction with borrowing.
+(Select0 (Sub64borrow x y bo)) => (Select0 <typ.UInt64> (SBCSflags x y (Select1 <types.TypeFlags> (NEGSflags bo))))
+(Select1 (Sub64borrow x y bo)) => (NEG <typ.UInt64> (NGCzerocarry <typ.UInt64> (Select1 <types.TypeFlags> (SBCSflags x y (Select1 <types.TypeFlags> (NEGSflags bo))))))
+
+// boolean ops -- booleans are represented with 0=false, 1=true
+(AndB ...) => (AND ...)
+(OrB ...) => (OR ...)
+(EqB x y) => (XOR (MOVDconst [1]) (XOR <typ.Bool> x y))
+(NeqB ...) => (XOR ...)
+(Not x) => (XOR (MOVDconst [1]) x)
+
+// shifts
+// hardware instruction uses only the low 6 bits of the shift
+// we compare to 64 to ensure Go semantics for large shifts
+// Rules about rotates with non-const shift are based on the following rules,
+// if the following rules change, please also modify the rules based on them.
+(Lsh64x64 <t> x y) => (CSEL [OpARM64LessThanU] (SLL <t> x y) (Const64 <t> [0]) (CMPconst [64] y))
+(Lsh64x32 <t> x y) => (CSEL [OpARM64LessThanU] (SLL <t> x (ZeroExt32to64 y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt32to64 y)))
+(Lsh64x16 <t> x y) => (CSEL [OpARM64LessThanU] (SLL <t> x (ZeroExt16to64 y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt16to64 y)))
+(Lsh64x8 <t> x y) => (CSEL [OpARM64LessThanU] (SLL <t> x (ZeroExt8to64 y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt8to64 y)))
+
+(Lsh32x64 <t> x y) => (CSEL [OpARM64LessThanU] (SLL <t> x y) (Const64 <t> [0]) (CMPconst [64] y))
+(Lsh32x32 <t> x y) => (CSEL [OpARM64LessThanU] (SLL <t> x (ZeroExt32to64 y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt32to64 y)))
+(Lsh32x16 <t> x y) => (CSEL [OpARM64LessThanU] (SLL <t> x (ZeroExt16to64 y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt16to64 y)))
+(Lsh32x8 <t> x y) => (CSEL [OpARM64LessThanU] (SLL <t> x (ZeroExt8to64 y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt8to64 y)))
+
+(Lsh16x64 <t> x y) => (CSEL [OpARM64LessThanU] (SLL <t> x y) (Const64 <t> [0]) (CMPconst [64] y))
+(Lsh16x32 <t> x y) => (CSEL [OpARM64LessThanU] (SLL <t> x (ZeroExt32to64 y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt32to64 y)))
+(Lsh16x16 <t> x y) => (CSEL [OpARM64LessThanU] (SLL <t> x (ZeroExt16to64 y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt16to64 y)))
+(Lsh16x8 <t> x y) => (CSEL [OpARM64LessThanU] (SLL <t> x (ZeroExt8to64 y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt8to64 y)))
+
+(Lsh8x64 <t> x y) => (CSEL [OpARM64LessThanU] (SLL <t> x y) (Const64 <t> [0]) (CMPconst [64] y))
+(Lsh8x32 <t> x y) => (CSEL [OpARM64LessThanU] (SLL <t> x (ZeroExt32to64 y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt32to64 y)))
+(Lsh8x16 <t> x y) => (CSEL [OpARM64LessThanU] (SLL <t> x (ZeroExt16to64 y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt16to64 y)))
+(Lsh8x8 <t> x y) => (CSEL [OpARM64LessThanU] (SLL <t> x (ZeroExt8to64 y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt8to64 y)))
+
+(Rsh64Ux64 <t> x y) => (CSEL [OpARM64LessThanU] (SRL <t> x y) (Const64 <t> [0]) (CMPconst [64] y))
+(Rsh64Ux32 <t> x y) => (CSEL [OpARM64LessThanU] (SRL <t> x (ZeroExt32to64 y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt32to64 y)))
+(Rsh64Ux16 <t> x y) => (CSEL [OpARM64LessThanU] (SRL <t> x (ZeroExt16to64 y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt16to64 y)))
+(Rsh64Ux8 <t> x y) => (CSEL [OpARM64LessThanU] (SRL <t> x (ZeroExt8to64 y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt8to64 y)))
+
+(Rsh32Ux64 <t> x y) => (CSEL [OpARM64LessThanU] (SRL <t> (ZeroExt32to64 x) y) (Const64 <t> [0]) (CMPconst [64] y))
+(Rsh32Ux32 <t> x y) => (CSEL [OpARM64LessThanU] (SRL <t> (ZeroExt32to64 x) (ZeroExt32to64 y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt32to64 y)))
+(Rsh32Ux16 <t> x y) => (CSEL [OpARM64LessThanU] (SRL <t> (ZeroExt32to64 x) (ZeroExt16to64 y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt16to64 y)))
+(Rsh32Ux8 <t> x y) => (CSEL [OpARM64LessThanU] (SRL <t> (ZeroExt32to64 x) (ZeroExt8to64 y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt8to64 y)))
+
+(Rsh16Ux64 <t> x y) => (CSEL [OpARM64LessThanU] (SRL <t> (ZeroExt16to64 x) y) (Const64 <t> [0]) (CMPconst [64] y))
+(Rsh16Ux32 <t> x y) => (CSEL [OpARM64LessThanU] (SRL <t> (ZeroExt16to64 x) (ZeroExt32to64 y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt32to64 y)))
+(Rsh16Ux16 <t> x y) => (CSEL [OpARM64LessThanU] (SRL <t> (ZeroExt16to64 x) (ZeroExt16to64 y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt16to64 y)))
+(Rsh16Ux8 <t> x y) => (CSEL [OpARM64LessThanU] (SRL <t> (ZeroExt16to64 x) (ZeroExt8to64 y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt8to64 y)))
+
+(Rsh8Ux64 <t> x y) => (CSEL [OpARM64LessThanU] (SRL <t> (ZeroExt8to64 x) y) (Const64 <t> [0]) (CMPconst [64] y))
+(Rsh8Ux32 <t> x y) => (CSEL [OpARM64LessThanU] (SRL <t> (ZeroExt8to64 x) (ZeroExt32to64 y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt32to64 y)))
+(Rsh8Ux16 <t> x y) => (CSEL [OpARM64LessThanU] (SRL <t> (ZeroExt8to64 x) (ZeroExt16to64 y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt16to64 y)))
+(Rsh8Ux8 <t> x y) => (CSEL [OpARM64LessThanU] (SRL <t> (ZeroExt8to64 x) (ZeroExt8to64 y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt8to64 y)))
+
+(Rsh64x64 x y) => (SRA x (CSEL [OpARM64LessThanU] <y.Type> y (Const64 <y.Type> [63]) (CMPconst [64] y)))
+(Rsh64x32 x y) => (SRA x (CSEL [OpARM64LessThanU] <y.Type> (ZeroExt32to64 y) (Const64 <y.Type> [63]) (CMPconst [64] (ZeroExt32to64 y))))
+(Rsh64x16 x y) => (SRA x (CSEL [OpARM64LessThanU] <y.Type> (ZeroExt16to64 y) (Const64 <y.Type> [63]) (CMPconst [64] (ZeroExt16to64 y))))
+(Rsh64x8 x y) => (SRA x (CSEL [OpARM64LessThanU] <y.Type> (ZeroExt8to64 y) (Const64 <y.Type> [63]) (CMPconst [64] (ZeroExt8to64 y))))
+
+(Rsh32x64 x y) => (SRA (SignExt32to64 x) (CSEL [OpARM64LessThanU] <y.Type> y (Const64 <y.Type> [63]) (CMPconst [64] y)))
+(Rsh32x32 x y) => (SRA (SignExt32to64 x) (CSEL [OpARM64LessThanU] <y.Type> (ZeroExt32to64 y) (Const64 <y.Type> [63]) (CMPconst [64] (ZeroExt32to64 y))))
+(Rsh32x16 x y) => (SRA (SignExt32to64 x) (CSEL [OpARM64LessThanU] <y.Type> (ZeroExt16to64 y) (Const64 <y.Type> [63]) (CMPconst [64] (ZeroExt16to64 y))))
+(Rsh32x8 x y) => (SRA (SignExt32to64 x) (CSEL [OpARM64LessThanU] <y.Type> (ZeroExt8to64 y) (Const64 <y.Type> [63]) (CMPconst [64] (ZeroExt8to64 y))))
+
+(Rsh16x64 x y) => (SRA (SignExt16to64 x) (CSEL [OpARM64LessThanU] <y.Type> y (Const64 <y.Type> [63]) (CMPconst [64] y)))
+(Rsh16x32 x y) => (SRA (SignExt16to64 x) (CSEL [OpARM64LessThanU] <y.Type> (ZeroExt32to64 y) (Const64 <y.Type> [63]) (CMPconst [64] (ZeroExt32to64 y))))
+(Rsh16x16 x y) => (SRA (SignExt16to64 x) (CSEL [OpARM64LessThanU] <y.Type> (ZeroExt16to64 y) (Const64 <y.Type> [63]) (CMPconst [64] (ZeroExt16to64 y))))
+(Rsh16x8 x y) => (SRA (SignExt16to64 x) (CSEL [OpARM64LessThanU] <y.Type> (ZeroExt8to64 y) (Const64 <y.Type> [63]) (CMPconst [64] (ZeroExt8to64 y))))
+
+(Rsh8x64 x y) => (SRA (SignExt8to64 x) (CSEL [OpARM64LessThanU] <y.Type> y (Const64 <y.Type> [63]) (CMPconst [64] y)))
+(Rsh8x32 x y) => (SRA (SignExt8to64 x) (CSEL [OpARM64LessThanU] <y.Type> (ZeroExt32to64 y) (Const64 <y.Type> [63]) (CMPconst [64] (ZeroExt32to64 y))))
+(Rsh8x16 x y) => (SRA (SignExt8to64 x) (CSEL [OpARM64LessThanU] <y.Type> (ZeroExt16to64 y) (Const64 <y.Type> [63]) (CMPconst [64] (ZeroExt16to64 y))))
+(Rsh8x8 x y) => (SRA (SignExt8to64 x) (CSEL [OpARM64LessThanU] <y.Type> (ZeroExt8to64 y) (Const64 <y.Type> [63]) (CMPconst [64] (ZeroExt8to64 y))))
+
+// constants
+(Const(64|32|16|8) [val]) => (MOVDconst [int64(val)])
+(Const(32F|64F) [val]) => (FMOV(S|D)const [float64(val)])
+(ConstNil) => (MOVDconst [0])
+(ConstBool [b]) => (MOVDconst [b2i(b)])
+
+(Slicemask <t> x) => (SRAconst (NEG <t> x) [63])
+
+// truncations
+// Because we ignore high parts of registers, truncates are just copies.
+(Trunc16to8 ...) => (Copy ...)
+(Trunc32to8 ...) => (Copy ...)
+(Trunc32to16 ...) => (Copy ...)
+(Trunc64to8 ...) => (Copy ...)
+(Trunc64to16 ...) => (Copy ...)
+(Trunc64to32 ...) => (Copy ...)
+
+// Zero-/Sign-extensions
+(ZeroExt8to16 ...) => (MOVBUreg ...)
+(ZeroExt8to32 ...) => (MOVBUreg ...)
+(ZeroExt16to32 ...) => (MOVHUreg ...)
+(ZeroExt8to64 ...) => (MOVBUreg ...)
+(ZeroExt16to64 ...) => (MOVHUreg ...)
+(ZeroExt32to64 ...) => (MOVWUreg ...)
+
+(SignExt8to16 ...) => (MOVBreg ...)
+(SignExt8to32 ...) => (MOVBreg ...)
+(SignExt16to32 ...) => (MOVHreg ...)
+(SignExt8to64 ...) => (MOVBreg ...)
+(SignExt16to64 ...) => (MOVHreg ...)
+(SignExt32to64 ...) => (MOVWreg ...)
+
+// float <=> int conversion
+(Cvt32to32F ...) => (SCVTFWS ...)
+(Cvt32to64F ...) => (SCVTFWD ...)
+(Cvt64to32F ...) => (SCVTFS ...)
+(Cvt64to64F ...) => (SCVTFD ...)
+(Cvt32Uto32F ...) => (UCVTFWS ...)
+(Cvt32Uto64F ...) => (UCVTFWD ...)
+(Cvt64Uto32F ...) => (UCVTFS ...)
+(Cvt64Uto64F ...) => (UCVTFD ...)
+(Cvt32Fto32 ...) => (FCVTZSSW ...)
+(Cvt64Fto32 ...) => (FCVTZSDW ...)
+(Cvt32Fto64 ...) => (FCVTZSS ...)
+(Cvt64Fto64 ...) => (FCVTZSD ...)
+(Cvt32Fto32U ...) => (FCVTZUSW ...)
+(Cvt64Fto32U ...) => (FCVTZUDW ...)
+(Cvt32Fto64U ...) => (FCVTZUS ...)
+(Cvt64Fto64U ...) => (FCVTZUD ...)
+(Cvt32Fto64F ...) => (FCVTSD ...)
+(Cvt64Fto32F ...) => (FCVTDS ...)
+
+(CvtBoolToUint8 ...) => (Copy ...)
+
+(Round32F ...) => (LoweredRound32F ...)
+(Round64F ...) => (LoweredRound64F ...)
+
+// comparisons
+(Eq8 x y) => (Equal (CMPW (ZeroExt8to32 x) (ZeroExt8to32 y)))
+(Eq16 x y) => (Equal (CMPW (ZeroExt16to32 x) (ZeroExt16to32 y)))
+(Eq32 x y) => (Equal (CMPW x y))
+(Eq64 x y) => (Equal (CMP x y))
+(EqPtr x y) => (Equal (CMP x y))
+(Eq32F x y) => (Equal (FCMPS x y))
+(Eq64F x y) => (Equal (FCMPD x y))
+
+(Neq8 x y) => (NotEqual (CMPW (ZeroExt8to32 x) (ZeroExt8to32 y)))
+(Neq16 x y) => (NotEqual (CMPW (ZeroExt16to32 x) (ZeroExt16to32 y)))
+(Neq32 x y) => (NotEqual (CMPW x y))
+(Neq64 x y) => (NotEqual (CMP x y))
+(NeqPtr x y) => (NotEqual (CMP x y))
+(Neq32F x y) => (NotEqual (FCMPS x y))
+(Neq64F x y) => (NotEqual (FCMPD x y))
+
+(Less8 x y) => (LessThan (CMPW (SignExt8to32 x) (SignExt8to32 y)))
+(Less16 x y) => (LessThan (CMPW (SignExt16to32 x) (SignExt16to32 y)))
+(Less32 x y) => (LessThan (CMPW x y))
+(Less64 x y) => (LessThan (CMP x y))
+
+// Set condition flags for floating-point comparisons "x < y"
+// and "x <= y". Because if either or both of the operands are
+// NaNs, all three of (x < y), (x == y) and (x > y) are false,
+// and ARM Manual says FCMP instruction sets PSTATE.<N,Z,C,V>
+// of this case to (0, 0, 1, 1).
+(Less32F x y) => (LessThanF (FCMPS x y))
+(Less64F x y) => (LessThanF (FCMPD x y))
+
+// For an unsigned integer x, the following rules are useful when combining branch
+// 0 < x => x != 0
+// x <= 0 => x == 0
+// x < 1 => x == 0
+// 1 <= x => x != 0
+(Less(8U|16U|32U|64U) zero:(MOVDconst [0]) x) => (Neq(8|16|32|64) zero x)
+(Leq(8U|16U|32U|64U) x zero:(MOVDconst [0])) => (Eq(8|16|32|64) x zero)
+(Less(8U|16U|32U|64U) x (MOVDconst [1])) => (Eq(8|16|32|64) x (MOVDconst [0]))
+(Leq(8U|16U|32U|64U) (MOVDconst [1]) x) => (Neq(8|16|32|64) (MOVDconst [0]) x)
+
+(Less8U x y) => (LessThanU (CMPW (ZeroExt8to32 x) (ZeroExt8to32 y)))
+(Less16U x y) => (LessThanU (CMPW (ZeroExt16to32 x) (ZeroExt16to32 y)))
+(Less32U x y) => (LessThanU (CMPW x y))
+(Less64U x y) => (LessThanU (CMP x y))
+
+(Leq8 x y) => (LessEqual (CMPW (SignExt8to32 x) (SignExt8to32 y)))
+(Leq16 x y) => (LessEqual (CMPW (SignExt16to32 x) (SignExt16to32 y)))
+(Leq32 x y) => (LessEqual (CMPW x y))
+(Leq64 x y) => (LessEqual (CMP x y))
+
+// Refer to the comments for op Less64F above.
+(Leq32F x y) => (LessEqualF (FCMPS x y))
+(Leq64F x y) => (LessEqualF (FCMPD x y))
+
+(Leq8U x y) => (LessEqualU (CMPW (ZeroExt8to32 x) (ZeroExt8to32 y)))
+(Leq16U x y) => (LessEqualU (CMPW (ZeroExt16to32 x) (ZeroExt16to32 y)))
+(Leq32U x y) => (LessEqualU (CMPW x y))
+(Leq64U x y) => (LessEqualU (CMP x y))
+
+// Optimize comparison between a floating-point value and 0.0 with "FCMP $(0.0), Fn"
+(FCMPS x (FMOVSconst [0])) => (FCMPS0 x)
+(FCMPS (FMOVSconst [0]) x) => (InvertFlags (FCMPS0 x))
+(FCMPD x (FMOVDconst [0])) => (FCMPD0 x)
+(FCMPD (FMOVDconst [0]) x) => (InvertFlags (FCMPD0 x))
+
+// CSEL needs a flag-generating argument. Synthesize a CMPW if necessary.
+(CondSelect x y boolval) && flagArg(boolval) != nil => (CSEL [boolval.Op] x y flagArg(boolval))
+(CondSelect x y boolval) && flagArg(boolval) == nil => (CSEL [OpARM64NotEqual] x y (CMPWconst [0] boolval))
+
+(OffPtr [off] ptr:(SP)) && is32Bit(off) => (MOVDaddr [int32(off)] ptr)
+(OffPtr [off] ptr) => (ADDconst [off] ptr)
+
+(Addr {sym} base) => (MOVDaddr {sym} base)
+(LocalAddr {sym} base _) => (MOVDaddr {sym} base)
+
+// loads
+(Load <t> ptr mem) && t.IsBoolean() => (MOVBUload ptr mem)
+(Load <t> ptr mem) && (is8BitInt(t) && isSigned(t)) => (MOVBload ptr mem)
+(Load <t> ptr mem) && (is8BitInt(t) && !isSigned(t)) => (MOVBUload ptr mem)
+(Load <t> ptr mem) && (is16BitInt(t) && isSigned(t)) => (MOVHload ptr mem)
+(Load <t> ptr mem) && (is16BitInt(t) && !isSigned(t)) => (MOVHUload ptr mem)
+(Load <t> ptr mem) && (is32BitInt(t) && isSigned(t)) => (MOVWload ptr mem)
+(Load <t> ptr mem) && (is32BitInt(t) && !isSigned(t)) => (MOVWUload ptr mem)
+(Load <t> ptr mem) && (is64BitInt(t) || isPtr(t)) => (MOVDload ptr mem)
+(Load <t> ptr mem) && is32BitFloat(t) => (FMOVSload ptr mem)
+(Load <t> ptr mem) && is64BitFloat(t) => (FMOVDload ptr mem)
+
+// stores
+(Store {t} ptr val mem) && t.Size() == 1 => (MOVBstore ptr val mem)
+(Store {t} ptr val mem) && t.Size() == 2 => (MOVHstore ptr val mem)
+(Store {t} ptr val mem) && t.Size() == 4 && !is32BitFloat(val.Type) => (MOVWstore ptr val mem)
+(Store {t} ptr val mem) && t.Size() == 8 && !is64BitFloat(val.Type) => (MOVDstore ptr val mem)
+(Store {t} ptr val mem) && t.Size() == 4 && is32BitFloat(val.Type) => (FMOVSstore ptr val mem)
+(Store {t} ptr val mem) && t.Size() == 8 && is64BitFloat(val.Type) => (FMOVDstore ptr val mem)
+
+// zeroing
+(Zero [0] _ mem) => mem
+(Zero [1] ptr mem) => (MOVBstore ptr (MOVDconst [0]) mem)
+(Zero [2] ptr mem) => (MOVHstore ptr (MOVDconst [0]) mem)
+(Zero [4] ptr mem) => (MOVWstore ptr (MOVDconst [0]) mem)
+(Zero [8] ptr mem) => (MOVDstore ptr (MOVDconst [0]) mem)
+
+(Zero [3] ptr mem) =>
+ (MOVBstore [2] ptr (MOVDconst [0])
+ (MOVHstore ptr (MOVDconst [0]) mem))
+(Zero [5] ptr mem) =>
+ (MOVBstore [4] ptr (MOVDconst [0])
+ (MOVWstore ptr (MOVDconst [0]) mem))
+(Zero [6] ptr mem) =>
+ (MOVHstore [4] ptr (MOVDconst [0])
+ (MOVWstore ptr (MOVDconst [0]) mem))
+(Zero [7] ptr mem) =>
+ (MOVBstore [6] ptr (MOVDconst [0])
+ (MOVHstore [4] ptr (MOVDconst [0])
+ (MOVWstore ptr (MOVDconst [0]) mem)))
+(Zero [9] ptr mem) =>
+ (MOVBstore [8] ptr (MOVDconst [0])
+ (MOVDstore ptr (MOVDconst [0]) mem))
+(Zero [10] ptr mem) =>
+ (MOVHstore [8] ptr (MOVDconst [0])
+ (MOVDstore ptr (MOVDconst [0]) mem))
+(Zero [11] ptr mem) =>
+ (MOVBstore [10] ptr (MOVDconst [0])
+ (MOVHstore [8] ptr (MOVDconst [0])
+ (MOVDstore ptr (MOVDconst [0]) mem)))
+(Zero [12] ptr mem) =>
+ (MOVWstore [8] ptr (MOVDconst [0])
+ (MOVDstore ptr (MOVDconst [0]) mem))
+(Zero [13] ptr mem) =>
+ (MOVBstore [12] ptr (MOVDconst [0])
+ (MOVWstore [8] ptr (MOVDconst [0])
+ (MOVDstore ptr (MOVDconst [0]) mem)))
+(Zero [14] ptr mem) =>
+ (MOVHstore [12] ptr (MOVDconst [0])
+ (MOVWstore [8] ptr (MOVDconst [0])
+ (MOVDstore ptr (MOVDconst [0]) mem)))
+(Zero [15] ptr mem) =>
+ (MOVBstore [14] ptr (MOVDconst [0])
+ (MOVHstore [12] ptr (MOVDconst [0])
+ (MOVWstore [8] ptr (MOVDconst [0])
+ (MOVDstore ptr (MOVDconst [0]) mem))))
+(Zero [16] ptr mem) =>
+ (STP [0] ptr (MOVDconst [0]) (MOVDconst [0]) mem)
+
+(Zero [32] ptr mem) =>
+ (STP [16] ptr (MOVDconst [0]) (MOVDconst [0])
+ (STP [0] ptr (MOVDconst [0]) (MOVDconst [0]) mem))
+
+(Zero [48] ptr mem) =>
+ (STP [32] ptr (MOVDconst [0]) (MOVDconst [0])
+ (STP [16] ptr (MOVDconst [0]) (MOVDconst [0])
+ (STP [0] ptr (MOVDconst [0]) (MOVDconst [0]) mem)))
+
+(Zero [64] ptr mem) =>
+ (STP [48] ptr (MOVDconst [0]) (MOVDconst [0])
+ (STP [32] ptr (MOVDconst [0]) (MOVDconst [0])
+ (STP [16] ptr (MOVDconst [0]) (MOVDconst [0])
+ (STP [0] ptr (MOVDconst [0]) (MOVDconst [0]) mem))))
+
+// strip off fractional word zeroing
+(Zero [s] ptr mem) && s%16 != 0 && s%16 <= 8 && s > 16 =>
+ (Zero [8]
+ (OffPtr <ptr.Type> ptr [s-8])
+ (Zero [s-s%16] ptr mem))
+(Zero [s] ptr mem) && s%16 != 0 && s%16 > 8 && s > 16 =>
+ (Zero [16]
+ (OffPtr <ptr.Type> ptr [s-16])
+ (Zero [s-s%16] ptr mem))
+
+// medium zeroing uses a duff device
+// 4, 16, and 64 are magic constants, see runtime/mkduff.go
+(Zero [s] ptr mem)
+ && s%16 == 0 && s > 64 && s <= 16*64
+ && !config.noDuffDevice =>
+ (DUFFZERO [4 * (64 - s/16)] ptr mem)
+
+// large zeroing uses a loop
+(Zero [s] ptr mem)
+ && s%16 == 0 && (s > 16*64 || config.noDuffDevice) =>
+ (LoweredZero
+ ptr
+ (ADDconst <ptr.Type> [s-16] ptr)
+ mem)
+
+// moves
+(Move [0] _ _ mem) => mem
+(Move [1] dst src mem) => (MOVBstore dst (MOVBUload src mem) mem)
+(Move [2] dst src mem) => (MOVHstore dst (MOVHUload src mem) mem)
+(Move [4] dst src mem) => (MOVWstore dst (MOVWUload src mem) mem)
+(Move [8] dst src mem) => (MOVDstore dst (MOVDload src mem) mem)
+
+(Move [3] dst src mem) =>
+ (MOVBstore [2] dst (MOVBUload [2] src mem)
+ (MOVHstore dst (MOVHUload src mem) mem))
+(Move [5] dst src mem) =>
+ (MOVBstore [4] dst (MOVBUload [4] src mem)
+ (MOVWstore dst (MOVWUload src mem) mem))
+(Move [6] dst src mem) =>
+ (MOVHstore [4] dst (MOVHUload [4] src mem)
+ (MOVWstore dst (MOVWUload src mem) mem))
+(Move [7] dst src mem) =>
+ (MOVBstore [6] dst (MOVBUload [6] src mem)
+ (MOVHstore [4] dst (MOVHUload [4] src mem)
+ (MOVWstore dst (MOVWUload src mem) mem)))
+(Move [12] dst src mem) =>
+ (MOVWstore [8] dst (MOVWUload [8] src mem)
+ (MOVDstore dst (MOVDload src mem) mem))
+(Move [16] dst src mem) =>
+ (MOVDstore [8] dst (MOVDload [8] src mem)
+ (MOVDstore dst (MOVDload src mem) mem))
+(Move [24] dst src mem) =>
+ (MOVDstore [16] dst (MOVDload [16] src mem)
+ (MOVDstore [8] dst (MOVDload [8] src mem)
+ (MOVDstore dst (MOVDload src mem) mem)))
+
+// strip off fractional word move
+(Move [s] dst src mem) && s%8 != 0 && s > 8 =>
+ (Move [s%8]
+ (OffPtr <dst.Type> dst [s-s%8])
+ (OffPtr <src.Type> src [s-s%8])
+ (Move [s-s%8] dst src mem))
+
+// medium move uses a duff device
+(Move [s] dst src mem)
+ && s > 32 && s <= 16*64 && s%16 == 8
+ && !config.noDuffDevice && logLargeCopy(v, s) =>
+ (MOVDstore [int32(s-8)] dst (MOVDload [int32(s-8)] src mem)
+ (DUFFCOPY <types.TypeMem> [8*(64-(s-8)/16)] dst src mem))
+(Move [s] dst src mem)
+ && s > 32 && s <= 16*64 && s%16 == 0
+ && !config.noDuffDevice && logLargeCopy(v, s) =>
+ (DUFFCOPY [8 * (64 - s/16)] dst src mem)
+// 8 is the number of bytes to encode:
+//
+// LDP.P 16(R16), (R26, R27)
+// STP.P (R26, R27), 16(R17)
+//
+// 64 is number of these blocks. See runtime/duff_arm64.s:duffcopy
+
+// large move uses a loop
+(Move [s] dst src mem)
+ && s > 24 && s%8 == 0 && logLargeCopy(v, s) =>
+ (LoweredMove
+ dst
+ src
+ (ADDconst <src.Type> src [s-8])
+ mem)
+
+// calls
+(StaticCall ...) => (CALLstatic ...)
+(ClosureCall ...) => (CALLclosure ...)
+(InterCall ...) => (CALLinter ...)
+
+// checks
+(NilCheck ...) => (LoweredNilCheck ...)
+(IsNonNil ptr) => (NotEqual (CMPconst [0] ptr))
+(IsInBounds idx len) => (LessThanU (CMP idx len))
+(IsSliceInBounds idx len) => (LessEqualU (CMP idx len))
+
+// pseudo-ops
+(GetClosurePtr ...) => (LoweredGetClosurePtr ...)
+(GetCallerSP ...) => (LoweredGetCallerSP ...)
+(GetCallerPC ...) => (LoweredGetCallerPC ...)
+
+// Absorb pseudo-ops into blocks.
+(If (Equal cc) yes no) => (EQ cc yes no)
+(If (NotEqual cc) yes no) => (NE cc yes no)
+(If (LessThan cc) yes no) => (LT cc yes no)
+(If (LessThanU cc) yes no) => (ULT cc yes no)
+(If (LessEqual cc) yes no) => (LE cc yes no)
+(If (LessEqualU cc) yes no) => (ULE cc yes no)
+(If (GreaterThan cc) yes no) => (GT cc yes no)
+(If (GreaterThanU cc) yes no) => (UGT cc yes no)
+(If (GreaterEqual cc) yes no) => (GE cc yes no)
+(If (GreaterEqualU cc) yes no) => (UGE cc yes no)
+(If (LessThanF cc) yes no) => (FLT cc yes no)
+(If (LessEqualF cc) yes no) => (FLE cc yes no)
+(If (GreaterThanF cc) yes no) => (FGT cc yes no)
+(If (GreaterEqualF cc) yes no) => (FGE cc yes no)
+
+(If cond yes no) => (NZ cond yes no)
+
+// atomic intrinsics
+// Note: these ops do not accept offset.
+(AtomicLoad8 ...) => (LDARB ...)
+(AtomicLoad32 ...) => (LDARW ...)
+(AtomicLoad64 ...) => (LDAR ...)
+(AtomicLoadPtr ...) => (LDAR ...)
+
+(AtomicStore8 ...) => (STLRB ...)
+(AtomicStore32 ...) => (STLRW ...)
+(AtomicStore64 ...) => (STLR ...)
+(AtomicStorePtrNoWB ...) => (STLR ...)
+
+(AtomicExchange(32|64) ...) => (LoweredAtomicExchange(32|64) ...)
+(AtomicAdd(32|64) ...) => (LoweredAtomicAdd(32|64) ...)
+(AtomicCompareAndSwap(32|64) ...) => (LoweredAtomicCas(32|64) ...)
+
+(AtomicAdd(32|64)Variant ...) => (LoweredAtomicAdd(32|64)Variant ...)
+(AtomicExchange(32|64)Variant ...) => (LoweredAtomicExchange(32|64)Variant ...)
+(AtomicCompareAndSwap(32|64)Variant ...) => (LoweredAtomicCas(32|64)Variant ...)
+
+// Currently the updated value is not used, but we need a register to temporarily hold it.
+(AtomicAnd8 ptr val mem) => (Select1 (LoweredAtomicAnd8 ptr val mem))
+(AtomicAnd32 ptr val mem) => (Select1 (LoweredAtomicAnd32 ptr val mem))
+(AtomicOr8 ptr val mem) => (Select1 (LoweredAtomicOr8 ptr val mem))
+(AtomicOr32 ptr val mem) => (Select1 (LoweredAtomicOr32 ptr val mem))
+
+(AtomicAnd8Variant ptr val mem) => (Select1 (LoweredAtomicAnd8Variant ptr val mem))
+(AtomicAnd32Variant ptr val mem) => (Select1 (LoweredAtomicAnd32Variant ptr val mem))
+(AtomicOr8Variant ptr val mem) => (Select1 (LoweredAtomicOr8Variant ptr val mem))
+(AtomicOr32Variant ptr val mem) => (Select1 (LoweredAtomicOr32Variant ptr val mem))
+
+// Write barrier.
+(WB ...) => (LoweredWB ...)
+
+(PanicBounds [kind] x y mem) && boundsABI(kind) == 0 => (LoweredPanicBoundsA [kind] x y mem)
+(PanicBounds [kind] x y mem) && boundsABI(kind) == 1 => (LoweredPanicBoundsB [kind] x y mem)
+(PanicBounds [kind] x y mem) && boundsABI(kind) == 2 => (LoweredPanicBoundsC [kind] x y mem)
+
+// Optimizations
+
+// Absorb boolean tests into block
+(NZ (Equal cc) yes no) => (EQ cc yes no)
+(NZ (NotEqual cc) yes no) => (NE cc yes no)
+(NZ (LessThan cc) yes no) => (LT cc yes no)
+(NZ (LessThanU cc) yes no) => (ULT cc yes no)
+(NZ (LessEqual cc) yes no) => (LE cc yes no)
+(NZ (LessEqualU cc) yes no) => (ULE cc yes no)
+(NZ (GreaterThan cc) yes no) => (GT cc yes no)
+(NZ (GreaterThanU cc) yes no) => (UGT cc yes no)
+(NZ (GreaterEqual cc) yes no) => (GE cc yes no)
+(NZ (GreaterEqualU cc) yes no) => (UGE cc yes no)
+(NZ (LessThanF cc) yes no) => (FLT cc yes no)
+(NZ (LessEqualF cc) yes no) => (FLE cc yes no)
+(NZ (GreaterThanF cc) yes no) => (FGT cc yes no)
+(NZ (GreaterEqualF cc) yes no) => (FGE cc yes no)
+
+(EQ (CMPWconst [0] x:(ANDconst [c] y)) yes no) && x.Uses == 1 => (EQ (TSTWconst [int32(c)] y) yes no)
+(NE (CMPWconst [0] x:(ANDconst [c] y)) yes no) && x.Uses == 1 => (NE (TSTWconst [int32(c)] y) yes no)
+(LT (CMPWconst [0] x:(ANDconst [c] y)) yes no) && x.Uses == 1 => (LT (TSTWconst [int32(c)] y) yes no)
+(LE (CMPWconst [0] x:(ANDconst [c] y)) yes no) && x.Uses == 1 => (LE (TSTWconst [int32(c)] y) yes no)
+(GT (CMPWconst [0] x:(ANDconst [c] y)) yes no) && x.Uses == 1 => (GT (TSTWconst [int32(c)] y) yes no)
+(GE (CMPWconst [0] x:(ANDconst [c] y)) yes no) && x.Uses == 1 => (GE (TSTWconst [int32(c)] y) yes no)
+
+(EQ (CMPconst [0] z:(AND x y)) yes no) && z.Uses == 1 => (EQ (TST x y) yes no)
+(NE (CMPconst [0] z:(AND x y)) yes no) && z.Uses == 1 => (NE (TST x y) yes no)
+(LT (CMPconst [0] z:(AND x y)) yes no) && z.Uses == 1 => (LT (TST x y) yes no)
+(LE (CMPconst [0] z:(AND x y)) yes no) && z.Uses == 1 => (LE (TST x y) yes no)
+(GT (CMPconst [0] z:(AND x y)) yes no) && z.Uses == 1 => (GT (TST x y) yes no)
+(GE (CMPconst [0] z:(AND x y)) yes no) && z.Uses == 1 => (GE (TST x y) yes no)
+
+(EQ (CMPWconst [0] z:(AND x y)) yes no) && z.Uses == 1 => (EQ (TSTW x y) yes no)
+(NE (CMPWconst [0] z:(AND x y)) yes no) && z.Uses == 1 => (NE (TSTW x y) yes no)
+(LT (CMPWconst [0] z:(AND x y)) yes no) && z.Uses == 1 => (LT (TSTW x y) yes no)
+(LE (CMPWconst [0] z:(AND x y)) yes no) && z.Uses == 1 => (LE (TSTW x y) yes no)
+(GT (CMPWconst [0] z:(AND x y)) yes no) && z.Uses == 1 => (GT (TSTW x y) yes no)
+(GE (CMPWconst [0] z:(AND x y)) yes no) && z.Uses == 1 => (GE (TSTW x y) yes no)
+
+(EQ (CMPconst [0] x:(ANDconst [c] y)) yes no) && x.Uses == 1 => (EQ (TSTconst [c] y) yes no)
+(NE (CMPconst [0] x:(ANDconst [c] y)) yes no) && x.Uses == 1 => (NE (TSTconst [c] y) yes no)
+(LT (CMPconst [0] x:(ANDconst [c] y)) yes no) && x.Uses == 1 => (LT (TSTconst [c] y) yes no)
+(LE (CMPconst [0] x:(ANDconst [c] y)) yes no) && x.Uses == 1 => (LE (TSTconst [c] y) yes no)
+(GT (CMPconst [0] x:(ANDconst [c] y)) yes no) && x.Uses == 1 => (GT (TSTconst [c] y) yes no)
+(GE (CMPconst [0] x:(ANDconst [c] y)) yes no) && x.Uses == 1 => (GE (TSTconst [c] y) yes no)
+
+(EQ (CMPconst [0] x:(ADDconst [c] y)) yes no) && x.Uses == 1 => (EQ (CMNconst [c] y) yes no)
+(NE (CMPconst [0] x:(ADDconst [c] y)) yes no) && x.Uses == 1 => (NE (CMNconst [c] y) yes no)
+(LT (CMPconst [0] x:(ADDconst [c] y)) yes no) && x.Uses == 1 => (LTnoov (CMNconst [c] y) yes no)
+(LE (CMPconst [0] x:(ADDconst [c] y)) yes no) && x.Uses == 1 => (LEnoov (CMNconst [c] y) yes no)
+(GT (CMPconst [0] x:(ADDconst [c] y)) yes no) && x.Uses == 1 => (GTnoov (CMNconst [c] y) yes no)
+(GE (CMPconst [0] x:(ADDconst [c] y)) yes no) && x.Uses == 1 => (GEnoov (CMNconst [c] y) yes no)
+
+(EQ (CMPWconst [0] x:(ADDconst [c] y)) yes no) && x.Uses == 1 => (EQ (CMNWconst [int32(c)] y) yes no)
+(NE (CMPWconst [0] x:(ADDconst [c] y)) yes no) && x.Uses == 1 => (NE (CMNWconst [int32(c)] y) yes no)
+(LT (CMPWconst [0] x:(ADDconst [c] y)) yes no) && x.Uses == 1 => (LTnoov (CMNWconst [int32(c)] y) yes no)
+(LE (CMPWconst [0] x:(ADDconst [c] y)) yes no) && x.Uses == 1 => (LEnoov (CMNWconst [int32(c)] y) yes no)
+(GT (CMPWconst [0] x:(ADDconst [c] y)) yes no) && x.Uses == 1 => (GTnoov (CMNWconst [int32(c)] y) yes no)
+(GE (CMPWconst [0] x:(ADDconst [c] y)) yes no) && x.Uses == 1 => (GEnoov (CMNWconst [int32(c)] y) yes no)
+
+(EQ (CMPconst [0] z:(ADD x y)) yes no) && z.Uses == 1 => (EQ (CMN x y) yes no)
+(NE (CMPconst [0] z:(ADD x y)) yes no) && z.Uses == 1 => (NE (CMN x y) yes no)
+(LT (CMPconst [0] z:(ADD x y)) yes no) && z.Uses == 1 => (LTnoov (CMN x y) yes no)
+(LE (CMPconst [0] z:(ADD x y)) yes no) && z.Uses == 1 => (LEnoov (CMN x y) yes no)
+(GT (CMPconst [0] z:(ADD x y)) yes no) && z.Uses == 1 => (GTnoov (CMN x y) yes no)
+(GE (CMPconst [0] z:(ADD x y)) yes no) && z.Uses == 1 => (GEnoov (CMN x y) yes no)
+
+(EQ (CMPWconst [0] z:(ADD x y)) yes no) && z.Uses == 1 => (EQ (CMNW x y) yes no)
+(NE (CMPWconst [0] z:(ADD x y)) yes no) && z.Uses == 1 => (NE (CMNW x y) yes no)
+(LT (CMPWconst [0] z:(ADD x y)) yes no) && z.Uses == 1 => (LTnoov (CMNW x y) yes no)
+(LE (CMPWconst [0] z:(ADD x y)) yes no) && z.Uses == 1 => (LEnoov (CMNW x y) yes no)
+(GT (CMPWconst [0] z:(ADD x y)) yes no) && z.Uses == 1 => (GTnoov (CMNW x y) yes no)
+(GE (CMPWconst [0] z:(ADD x y)) yes no) && z.Uses == 1 => (GEnoov (CMNW x y) yes no)
+
+(EQ (CMP x z:(NEG y)) yes no) && z.Uses == 1 => (EQ (CMN x y) yes no)
+(NE (CMP x z:(NEG y)) yes no) && z.Uses == 1 => (NE (CMN x y) yes no)
+(LT (CMP x z:(NEG y)) yes no) && z.Uses == 1 => (LT (CMN x y) yes no)
+(LE (CMP x z:(NEG y)) yes no) && z.Uses == 1 => (LE (CMN x y) yes no)
+(GT (CMP x z:(NEG y)) yes no) && z.Uses == 1 => (GT (CMN x y) yes no)
+(GE (CMP x z:(NEG y)) yes no) && z.Uses == 1 => (GE (CMN x y) yes no)
+
+(EQ (CMPW x z:(NEG y)) yes no) && z.Uses == 1 => (EQ (CMNW x y) yes no)
+(NE (CMPW x z:(NEG y)) yes no) && z.Uses == 1 => (NE (CMNW x y) yes no)
+(LT (CMPW x z:(NEG y)) yes no) && z.Uses == 1 => (LT (CMNW x y) yes no)
+(LE (CMPW x z:(NEG y)) yes no) && z.Uses == 1 => (LE (CMNW x y) yes no)
+(GT (CMPW x z:(NEG y)) yes no) && z.Uses == 1 => (GT (CMNW x y) yes no)
+(GE (CMPW x z:(NEG y)) yes no) && z.Uses == 1 => (GE (CMNW x y) yes no)
+
+(EQ (CMPconst [0] x) yes no) => (Z x yes no)
+(NE (CMPconst [0] x) yes no) => (NZ x yes no)
+(EQ (CMPWconst [0] x) yes no) => (ZW x yes no)
+(NE (CMPWconst [0] x) yes no) => (NZW x yes no)
+
+(EQ (CMPconst [0] z:(MADD a x y)) yes no) && z.Uses==1 => (EQ (CMN a (MUL <x.Type> x y)) yes no)
+(NE (CMPconst [0] z:(MADD a x y)) yes no) && z.Uses==1 => (NE (CMN a (MUL <x.Type> x y)) yes no)
+(LT (CMPconst [0] z:(MADD a x y)) yes no) && z.Uses==1 => (LTnoov (CMN a (MUL <x.Type> x y)) yes no)
+(LE (CMPconst [0] z:(MADD a x y)) yes no) && z.Uses==1 => (LEnoov (CMN a (MUL <x.Type> x y)) yes no)
+(GT (CMPconst [0] z:(MADD a x y)) yes no) && z.Uses==1 => (GTnoov (CMN a (MUL <x.Type> x y)) yes no)
+(GE (CMPconst [0] z:(MADD a x y)) yes no) && z.Uses==1 => (GEnoov (CMN a (MUL <x.Type> x y)) yes no)
+
+(EQ (CMPconst [0] z:(MSUB a x y)) yes no) && z.Uses==1 => (EQ (CMP a (MUL <x.Type> x y)) yes no)
+(NE (CMPconst [0] z:(MSUB a x y)) yes no) && z.Uses==1 => (NE (CMP a (MUL <x.Type> x y)) yes no)
+(LE (CMPconst [0] z:(MSUB a x y)) yes no) && z.Uses==1 => (LEnoov (CMP a (MUL <x.Type> x y)) yes no)
+(LT (CMPconst [0] z:(MSUB a x y)) yes no) && z.Uses==1 => (LTnoov (CMP a (MUL <x.Type> x y)) yes no)
+(GE (CMPconst [0] z:(MSUB a x y)) yes no) && z.Uses==1 => (GEnoov (CMP a (MUL <x.Type> x y)) yes no)
+(GT (CMPconst [0] z:(MSUB a x y)) yes no) && z.Uses==1 => (GTnoov (CMP a (MUL <x.Type> x y)) yes no)
+
+(EQ (CMPWconst [0] z:(MADDW a x y)) yes no) && z.Uses==1 => (EQ (CMNW a (MULW <x.Type> x y)) yes no)
+(NE (CMPWconst [0] z:(MADDW a x y)) yes no) && z.Uses==1 => (NE (CMNW a (MULW <x.Type> x y)) yes no)
+(LE (CMPWconst [0] z:(MADDW a x y)) yes no) && z.Uses==1 => (LEnoov (CMNW a (MULW <x.Type> x y)) yes no)
+(LT (CMPWconst [0] z:(MADDW a x y)) yes no) && z.Uses==1 => (LTnoov (CMNW a (MULW <x.Type> x y)) yes no)
+(GE (CMPWconst [0] z:(MADDW a x y)) yes no) && z.Uses==1 => (GEnoov (CMNW a (MULW <x.Type> x y)) yes no)
+(GT (CMPWconst [0] z:(MADDW a x y)) yes no) && z.Uses==1 => (GTnoov (CMNW a (MULW <x.Type> x y)) yes no)
+
+(EQ (CMPWconst [0] z:(MSUBW a x y)) yes no) && z.Uses==1 => (EQ (CMPW a (MULW <x.Type> x y)) yes no)
+(NE (CMPWconst [0] z:(MSUBW a x y)) yes no) && z.Uses==1 => (NE (CMPW a (MULW <x.Type> x y)) yes no)
+(LE (CMPWconst [0] z:(MSUBW a x y)) yes no) && z.Uses==1 => (LEnoov (CMPW a (MULW <x.Type> x y)) yes no)
+(LT (CMPWconst [0] z:(MSUBW a x y)) yes no) && z.Uses==1 => (LTnoov (CMPW a (MULW <x.Type> x y)) yes no)
+(GE (CMPWconst [0] z:(MSUBW a x y)) yes no) && z.Uses==1 => (GEnoov (CMPW a (MULW <x.Type> x y)) yes no)
+(GT (CMPWconst [0] z:(MSUBW a x y)) yes no) && z.Uses==1 => (GTnoov (CMPW a (MULW <x.Type> x y)) yes no)
+
+// Absorb bit-tests into block
+(Z (ANDconst [c] x) yes no) && oneBit(c) => (TBZ [int64(ntz64(c))] x yes no)
+(NZ (ANDconst [c] x) yes no) && oneBit(c) => (TBNZ [int64(ntz64(c))] x yes no)
+(ZW (ANDconst [c] x) yes no) && oneBit(int64(uint32(c))) => (TBZ [int64(ntz64(int64(uint32(c))))] x yes no)
+(NZW (ANDconst [c] x) yes no) && oneBit(int64(uint32(c))) => (TBNZ [int64(ntz64(int64(uint32(c))))] x yes no)
+(EQ (TSTconst [c] x) yes no) && oneBit(c) => (TBZ [int64(ntz64(c))] x yes no)
+(NE (TSTconst [c] x) yes no) && oneBit(c) => (TBNZ [int64(ntz64(c))] x yes no)
+(EQ (TSTWconst [c] x) yes no) && oneBit(int64(uint32(c))) => (TBZ [int64(ntz64(int64(uint32(c))))] x yes no)
+(NE (TSTWconst [c] x) yes no) && oneBit(int64(uint32(c))) => (TBNZ [int64(ntz64(int64(uint32(c))))] x yes no)
+
+// Test sign-bit for signed comparisons against zero
+(GE (CMPWconst [0] x) yes no) => (TBZ [31] x yes no)
+(GE (CMPconst [0] x) yes no) => (TBZ [63] x yes no)
+(LT (CMPWconst [0] x) yes no) => (TBNZ [31] x yes no)
+(LT (CMPconst [0] x) yes no) => (TBNZ [63] x yes no)
+
+// fold offset into address
+(ADDconst [off1] (MOVDaddr [off2] {sym} ptr)) && is32Bit(off1+int64(off2)) =>
+ (MOVDaddr [int32(off1)+off2] {sym} ptr)
+
+// fold address into load/store
+(MOVBload [off1] {sym} (ADDconst [off2] ptr) mem) && is32Bit(int64(off1)+off2)
+ && (ptr.Op != OpSB || !config.ctxt.Flag_shared) =>
+ (MOVBload [off1+int32(off2)] {sym} ptr mem)
+(MOVBUload [off1] {sym} (ADDconst [off2] ptr) mem) && is32Bit(int64(off1)+off2)
+ && (ptr.Op != OpSB || !config.ctxt.Flag_shared) =>
+ (MOVBUload [off1+int32(off2)] {sym} ptr mem)
+(MOVHload [off1] {sym} (ADDconst [off2] ptr) mem) && is32Bit(int64(off1)+off2)
+ && (ptr.Op != OpSB || !config.ctxt.Flag_shared) =>
+ (MOVHload [off1+int32(off2)] {sym} ptr mem)
+(MOVHUload [off1] {sym} (ADDconst [off2] ptr) mem) && is32Bit(int64(off1)+off2)
+ && (ptr.Op != OpSB || !config.ctxt.Flag_shared) =>
+ (MOVHUload [off1+int32(off2)] {sym} ptr mem)
+(MOVWload [off1] {sym} (ADDconst [off2] ptr) mem) && is32Bit(int64(off1)+off2)
+ && (ptr.Op != OpSB || !config.ctxt.Flag_shared) =>
+ (MOVWload [off1+int32(off2)] {sym} ptr mem)
+(MOVWUload [off1] {sym} (ADDconst [off2] ptr) mem) && is32Bit(int64(off1)+off2)
+ && (ptr.Op != OpSB || !config.ctxt.Flag_shared) =>
+ (MOVWUload [off1+int32(off2)] {sym} ptr mem)
+(MOVDload [off1] {sym} (ADDconst [off2] ptr) mem) && is32Bit(int64(off1)+off2)
+ && (ptr.Op != OpSB || !config.ctxt.Flag_shared) =>
+ (MOVDload [off1+int32(off2)] {sym} ptr mem)
+(FMOVSload [off1] {sym} (ADDconst [off2] ptr) mem) && is32Bit(int64(off1)+off2)
+ && (ptr.Op != OpSB || !config.ctxt.Flag_shared) =>
+ (FMOVSload [off1+int32(off2)] {sym} ptr mem)
+(FMOVDload [off1] {sym} (ADDconst [off2] ptr) mem) && is32Bit(int64(off1)+off2)
+ && (ptr.Op != OpSB || !config.ctxt.Flag_shared) =>
+ (FMOVDload [off1+int32(off2)] {sym} ptr mem)
+
+// register indexed load
+(MOVDload [off] {sym} (ADD ptr idx) mem) && off == 0 && sym == nil => (MOVDloadidx ptr idx mem)
+(MOVWUload [off] {sym} (ADD ptr idx) mem) && off == 0 && sym == nil => (MOVWUloadidx ptr idx mem)
+(MOVWload [off] {sym} (ADD ptr idx) mem) && off == 0 && sym == nil => (MOVWloadidx ptr idx mem)
+(MOVHUload [off] {sym} (ADD ptr idx) mem) && off == 0 && sym == nil => (MOVHUloadidx ptr idx mem)
+(MOVHload [off] {sym} (ADD ptr idx) mem) && off == 0 && sym == nil => (MOVHloadidx ptr idx mem)
+(MOVBUload [off] {sym} (ADD ptr idx) mem) && off == 0 && sym == nil => (MOVBUloadidx ptr idx mem)
+(MOVBload [off] {sym} (ADD ptr idx) mem) && off == 0 && sym == nil => (MOVBloadidx ptr idx mem)
+(FMOVSload [off] {sym} (ADD ptr idx) mem) && off == 0 && sym == nil => (FMOVSloadidx ptr idx mem)
+(FMOVDload [off] {sym} (ADD ptr idx) mem) && off == 0 && sym == nil => (FMOVDloadidx ptr idx mem)
+(MOVDloadidx ptr (MOVDconst [c]) mem) && is32Bit(c) => (MOVDload [int32(c)] ptr mem)
+(MOVDloadidx (MOVDconst [c]) ptr mem) && is32Bit(c) => (MOVDload [int32(c)] ptr mem)
+(MOVWUloadidx ptr (MOVDconst [c]) mem) && is32Bit(c) => (MOVWUload [int32(c)] ptr mem)
+(MOVWUloadidx (MOVDconst [c]) ptr mem) && is32Bit(c) => (MOVWUload [int32(c)] ptr mem)
+(MOVWloadidx ptr (MOVDconst [c]) mem) && is32Bit(c) => (MOVWload [int32(c)] ptr mem)
+(MOVWloadidx (MOVDconst [c]) ptr mem) && is32Bit(c) => (MOVWload [int32(c)] ptr mem)
+(MOVHUloadidx ptr (MOVDconst [c]) mem) && is32Bit(c) => (MOVHUload [int32(c)] ptr mem)
+(MOVHUloadidx (MOVDconst [c]) ptr mem) && is32Bit(c) => (MOVHUload [int32(c)] ptr mem)
+(MOVHloadidx ptr (MOVDconst [c]) mem) && is32Bit(c) => (MOVHload [int32(c)] ptr mem)
+(MOVHloadidx (MOVDconst [c]) ptr mem) && is32Bit(c) => (MOVHload [int32(c)] ptr mem)
+(MOVBUloadidx ptr (MOVDconst [c]) mem) && is32Bit(c) => (MOVBUload [int32(c)] ptr mem)
+(MOVBUloadidx (MOVDconst [c]) ptr mem) && is32Bit(c) => (MOVBUload [int32(c)] ptr mem)
+(MOVBloadidx ptr (MOVDconst [c]) mem) && is32Bit(c) => (MOVBload [int32(c)] ptr mem)
+(MOVBloadidx (MOVDconst [c]) ptr mem) && is32Bit(c) => (MOVBload [int32(c)] ptr mem)
+(FMOVSloadidx ptr (MOVDconst [c]) mem) && is32Bit(c) => (FMOVSload [int32(c)] ptr mem)
+(FMOVSloadidx (MOVDconst [c]) ptr mem) && is32Bit(c) => (FMOVSload [int32(c)] ptr mem)
+(FMOVDloadidx ptr (MOVDconst [c]) mem) && is32Bit(c) => (FMOVDload [int32(c)] ptr mem)
+(FMOVDloadidx (MOVDconst [c]) ptr mem) && is32Bit(c) => (FMOVDload [int32(c)] ptr mem)
+
+// shifted register indexed load
+(MOVDload [off] {sym} (ADDshiftLL [3] ptr idx) mem) && off == 0 && sym == nil => (MOVDloadidx8 ptr idx mem)
+(MOVWUload [off] {sym} (ADDshiftLL [2] ptr idx) mem) && off == 0 && sym == nil => (MOVWUloadidx4 ptr idx mem)
+(MOVWload [off] {sym} (ADDshiftLL [2] ptr idx) mem) && off == 0 && sym == nil => (MOVWloadidx4 ptr idx mem)
+(MOVHUload [off] {sym} (ADDshiftLL [1] ptr idx) mem) && off == 0 && sym == nil => (MOVHUloadidx2 ptr idx mem)
+(MOVHload [off] {sym} (ADDshiftLL [1] ptr idx) mem) && off == 0 && sym == nil => (MOVHloadidx2 ptr idx mem)
+(MOVDloadidx ptr (SLLconst [3] idx) mem) => (MOVDloadidx8 ptr idx mem)
+(MOVWloadidx ptr (SLLconst [2] idx) mem) => (MOVWloadidx4 ptr idx mem)
+(MOVWUloadidx ptr (SLLconst [2] idx) mem) => (MOVWUloadidx4 ptr idx mem)
+(MOVHloadidx ptr (SLLconst [1] idx) mem) => (MOVHloadidx2 ptr idx mem)
+(MOVHUloadidx ptr (SLLconst [1] idx) mem) => (MOVHUloadidx2 ptr idx mem)
+(MOVHloadidx ptr (ADD idx idx) mem) => (MOVHloadidx2 ptr idx mem)
+(MOVHUloadidx ptr (ADD idx idx) mem) => (MOVHUloadidx2 ptr idx mem)
+(MOVDloadidx (SLLconst [3] idx) ptr mem) => (MOVDloadidx8 ptr idx mem)
+(MOVWloadidx (SLLconst [2] idx) ptr mem) => (MOVWloadidx4 ptr idx mem)
+(MOVWUloadidx (SLLconst [2] idx) ptr mem) => (MOVWUloadidx4 ptr idx mem)
+(MOVHloadidx (ADD idx idx) ptr mem) => (MOVHloadidx2 ptr idx mem)
+(MOVHUloadidx (ADD idx idx) ptr mem) => (MOVHUloadidx2 ptr idx mem)
+(MOVDloadidx8 ptr (MOVDconst [c]) mem) && is32Bit(c<<3) => (MOVDload [int32(c)<<3] ptr mem)
+(MOVWUloadidx4 ptr (MOVDconst [c]) mem) && is32Bit(c<<2) => (MOVWUload [int32(c)<<2] ptr mem)
+(MOVWloadidx4 ptr (MOVDconst [c]) mem) && is32Bit(c<<2) => (MOVWload [int32(c)<<2] ptr mem)
+(MOVHUloadidx2 ptr (MOVDconst [c]) mem) && is32Bit(c<<1) => (MOVHUload [int32(c)<<1] ptr mem)
+(MOVHloadidx2 ptr (MOVDconst [c]) mem) && is32Bit(c<<1) => (MOVHload [int32(c)<<1] ptr mem)
+
+(MOVBstore [off1] {sym} (ADDconst [off2] ptr) val mem) && is32Bit(int64(off1)+off2)
+ && (ptr.Op != OpSB || !config.ctxt.Flag_shared) =>
+ (MOVBstore [off1+int32(off2)] {sym} ptr val mem)
+(MOVHstore [off1] {sym} (ADDconst [off2] ptr) val mem) && is32Bit(int64(off1)+off2)
+ && (ptr.Op != OpSB || !config.ctxt.Flag_shared) =>
+ (MOVHstore [off1+int32(off2)] {sym} ptr val mem)
+(MOVWstore [off1] {sym} (ADDconst [off2] ptr) val mem) && is32Bit(int64(off1)+off2)
+ && (ptr.Op != OpSB || !config.ctxt.Flag_shared) =>
+ (MOVWstore [off1+int32(off2)] {sym} ptr val mem)
+(MOVDstore [off1] {sym} (ADDconst [off2] ptr) val mem) && is32Bit(int64(off1)+off2)
+ && (ptr.Op != OpSB || !config.ctxt.Flag_shared) =>
+ (MOVDstore [off1+int32(off2)] {sym} ptr val mem)
+(STP [off1] {sym} (ADDconst [off2] ptr) val1 val2 mem) && is32Bit(int64(off1)+off2)
+ && (ptr.Op != OpSB || !config.ctxt.Flag_shared) =>
+ (STP [off1+int32(off2)] {sym} ptr val1 val2 mem)
+(FMOVSstore [off1] {sym} (ADDconst [off2] ptr) val mem) && is32Bit(int64(off1)+off2)
+ && (ptr.Op != OpSB || !config.ctxt.Flag_shared) =>
+ (FMOVSstore [off1+int32(off2)] {sym} ptr val mem)
+(FMOVDstore [off1] {sym} (ADDconst [off2] ptr) val mem) && is32Bit(int64(off1)+off2)
+ && (ptr.Op != OpSB || !config.ctxt.Flag_shared) =>
+ (FMOVDstore [off1+int32(off2)] {sym} ptr val mem)
+(MOVBstorezero [off1] {sym} (ADDconst [off2] ptr) mem) && is32Bit(int64(off1)+off2)
+ && (ptr.Op != OpSB || !config.ctxt.Flag_shared) =>
+ (MOVBstorezero [off1+int32(off2)] {sym} ptr mem)
+(MOVHstorezero [off1] {sym} (ADDconst [off2] ptr) mem) && is32Bit(int64(off1)+off2)
+ && (ptr.Op != OpSB || !config.ctxt.Flag_shared) =>
+ (MOVHstorezero [off1+int32(off2)] {sym} ptr mem)
+(MOVWstorezero [off1] {sym} (ADDconst [off2] ptr) mem) && is32Bit(int64(off1)+off2)
+ && (ptr.Op != OpSB || !config.ctxt.Flag_shared) =>
+ (MOVWstorezero [off1+int32(off2)] {sym} ptr mem)
+(MOVDstorezero [off1] {sym} (ADDconst [off2] ptr) mem) && is32Bit(int64(off1)+off2)
+ && (ptr.Op != OpSB || !config.ctxt.Flag_shared) =>
+ (MOVDstorezero [off1+int32(off2)] {sym} ptr mem)
+(MOVQstorezero [off1] {sym} (ADDconst [off2] ptr) mem) && is32Bit(int64(off1)+off2)
+ && (ptr.Op != OpSB || !config.ctxt.Flag_shared) =>
+ (MOVQstorezero [off1+int32(off2)] {sym} ptr mem)
+
+// register indexed store
+(MOVDstore [off] {sym} (ADD ptr idx) val mem) && off == 0 && sym == nil => (MOVDstoreidx ptr idx val mem)
+(MOVWstore [off] {sym} (ADD ptr idx) val mem) && off == 0 && sym == nil => (MOVWstoreidx ptr idx val mem)
+(MOVHstore [off] {sym} (ADD ptr idx) val mem) && off == 0 && sym == nil => (MOVHstoreidx ptr idx val mem)
+(MOVBstore [off] {sym} (ADD ptr idx) val mem) && off == 0 && sym == nil => (MOVBstoreidx ptr idx val mem)
+(FMOVDstore [off] {sym} (ADD ptr idx) val mem) && off == 0 && sym == nil => (FMOVDstoreidx ptr idx val mem)
+(FMOVSstore [off] {sym} (ADD ptr idx) val mem) && off == 0 && sym == nil => (FMOVSstoreidx ptr idx val mem)
+(MOVDstoreidx ptr (MOVDconst [c]) val mem) && is32Bit(c) => (MOVDstore [int32(c)] ptr val mem)
+(MOVDstoreidx (MOVDconst [c]) idx val mem) && is32Bit(c) => (MOVDstore [int32(c)] idx val mem)
+(MOVWstoreidx ptr (MOVDconst [c]) val mem) && is32Bit(c) => (MOVWstore [int32(c)] ptr val mem)
+(MOVWstoreidx (MOVDconst [c]) idx val mem) && is32Bit(c) => (MOVWstore [int32(c)] idx val mem)
+(MOVHstoreidx ptr (MOVDconst [c]) val mem) && is32Bit(c) => (MOVHstore [int32(c)] ptr val mem)
+(MOVHstoreidx (MOVDconst [c]) idx val mem) && is32Bit(c) => (MOVHstore [int32(c)] idx val mem)
+(MOVBstoreidx ptr (MOVDconst [c]) val mem) && is32Bit(c) => (MOVBstore [int32(c)] ptr val mem)
+(MOVBstoreidx (MOVDconst [c]) idx val mem) && is32Bit(c) => (MOVBstore [int32(c)] idx val mem)
+(FMOVDstoreidx ptr (MOVDconst [c]) val mem) && is32Bit(c) => (FMOVDstore [int32(c)] ptr val mem)
+(FMOVDstoreidx (MOVDconst [c]) idx val mem) && is32Bit(c) => (FMOVDstore [int32(c)] idx val mem)
+(FMOVSstoreidx ptr (MOVDconst [c]) val mem) && is32Bit(c) => (FMOVSstore [int32(c)] ptr val mem)
+(FMOVSstoreidx (MOVDconst [c]) idx val mem) && is32Bit(c) => (FMOVSstore [int32(c)] idx val mem)
+
+// shifted register indexed store
+(MOVDstore [off] {sym} (ADDshiftLL [3] ptr idx) val mem) && off == 0 && sym == nil => (MOVDstoreidx8 ptr idx val mem)
+(MOVWstore [off] {sym} (ADDshiftLL [2] ptr idx) val mem) && off == 0 && sym == nil => (MOVWstoreidx4 ptr idx val mem)
+(MOVHstore [off] {sym} (ADDshiftLL [1] ptr idx) val mem) && off == 0 && sym == nil => (MOVHstoreidx2 ptr idx val mem)
+(MOVDstoreidx ptr (SLLconst [3] idx) val mem) => (MOVDstoreidx8 ptr idx val mem)
+(MOVWstoreidx ptr (SLLconst [2] idx) val mem) => (MOVWstoreidx4 ptr idx val mem)
+(MOVHstoreidx ptr (SLLconst [1] idx) val mem) => (MOVHstoreidx2 ptr idx val mem)
+(MOVHstoreidx ptr (ADD idx idx) val mem) => (MOVHstoreidx2 ptr idx val mem)
+(MOVDstoreidx (SLLconst [3] idx) ptr val mem) => (MOVDstoreidx8 ptr idx val mem)
+(MOVWstoreidx (SLLconst [2] idx) ptr val mem) => (MOVWstoreidx4 ptr idx val mem)
+(MOVHstoreidx (SLLconst [1] idx) ptr val mem) => (MOVHstoreidx2 ptr idx val mem)
+(MOVHstoreidx (ADD idx idx) ptr val mem) => (MOVHstoreidx2 ptr idx val mem)
+(MOVDstoreidx8 ptr (MOVDconst [c]) val mem) && is32Bit(c<<3) => (MOVDstore [int32(c)<<3] ptr val mem)
+(MOVWstoreidx4 ptr (MOVDconst [c]) val mem) && is32Bit(c<<2) => (MOVWstore [int32(c)<<2] ptr val mem)
+(MOVHstoreidx2 ptr (MOVDconst [c]) val mem) && is32Bit(c<<1) => (MOVHstore [int32(c)<<1] ptr val mem)
+
+(MOVBload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
+ && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
+ && (ptr.Op != OpSB || !config.ctxt.Flag_shared) =>
+ (MOVBload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+(MOVBUload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
+ && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
+ && (ptr.Op != OpSB || !config.ctxt.Flag_shared) =>
+ (MOVBUload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+(MOVHload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
+ && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
+ && (ptr.Op != OpSB || !config.ctxt.Flag_shared) =>
+ (MOVHload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+(MOVHUload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
+ && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
+ && (ptr.Op != OpSB || !config.ctxt.Flag_shared) =>
+ (MOVHUload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+(MOVWload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
+ && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
+ && (ptr.Op != OpSB || !config.ctxt.Flag_shared) =>
+ (MOVWload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+(MOVWUload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
+ && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
+ && (ptr.Op != OpSB || !config.ctxt.Flag_shared) =>
+ (MOVWUload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+(MOVDload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
+ && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
+ && (ptr.Op != OpSB || !config.ctxt.Flag_shared) =>
+ (MOVDload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+(FMOVSload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
+ && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
+ && (ptr.Op != OpSB || !config.ctxt.Flag_shared) =>
+ (FMOVSload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+(FMOVDload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
+ && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
+ && (ptr.Op != OpSB || !config.ctxt.Flag_shared) =>
+ (FMOVDload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+
+(MOVBstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem)
+ && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
+ && (ptr.Op != OpSB || !config.ctxt.Flag_shared) =>
+ (MOVBstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
+(MOVHstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem)
+ && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
+ && (ptr.Op != OpSB || !config.ctxt.Flag_shared) =>
+ (MOVHstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
+(MOVWstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem)
+ && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
+ && (ptr.Op != OpSB || !config.ctxt.Flag_shared) =>
+ (MOVWstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
+(MOVDstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem)
+ && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
+ && (ptr.Op != OpSB || !config.ctxt.Flag_shared) =>
+ (MOVDstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
+(STP [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val1 val2 mem)
+ && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
+ && (ptr.Op != OpSB || !config.ctxt.Flag_shared) =>
+ (STP [off1+off2] {mergeSym(sym1,sym2)} ptr val1 val2 mem)
+(FMOVSstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem)
+ && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
+ && (ptr.Op != OpSB || !config.ctxt.Flag_shared) =>
+ (FMOVSstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
+(FMOVDstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem)
+ && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
+ && (ptr.Op != OpSB || !config.ctxt.Flag_shared) =>
+ (FMOVDstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
+(MOVBstorezero [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
+ && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
+ && (ptr.Op != OpSB || !config.ctxt.Flag_shared) =>
+ (MOVBstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+(MOVHstorezero [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
+ && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
+ && (ptr.Op != OpSB || !config.ctxt.Flag_shared) =>
+ (MOVHstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+(MOVWstorezero [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
+ && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
+ && (ptr.Op != OpSB || !config.ctxt.Flag_shared) =>
+ (MOVWstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+(MOVDstorezero [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
+ && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
+ && (ptr.Op != OpSB || !config.ctxt.Flag_shared) =>
+ (MOVDstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+(MOVQstorezero [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
+ && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
+ && (ptr.Op != OpSB || !config.ctxt.Flag_shared) =>
+ (MOVQstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+
+// store zero
+(MOVBstore [off] {sym} ptr (MOVDconst [0]) mem) => (MOVBstorezero [off] {sym} ptr mem)
+(MOVHstore [off] {sym} ptr (MOVDconst [0]) mem) => (MOVHstorezero [off] {sym} ptr mem)
+(MOVWstore [off] {sym} ptr (MOVDconst [0]) mem) => (MOVWstorezero [off] {sym} ptr mem)
+(MOVDstore [off] {sym} ptr (MOVDconst [0]) mem) => (MOVDstorezero [off] {sym} ptr mem)
+(STP [off] {sym} ptr (MOVDconst [0]) (MOVDconst [0]) mem) => (MOVQstorezero [off] {sym} ptr mem)
+
+// register indexed store zero
+(MOVDstorezero [off] {sym} (ADD ptr idx) mem) && off == 0 && sym == nil => (MOVDstorezeroidx ptr idx mem)
+(MOVWstorezero [off] {sym} (ADD ptr idx) mem) && off == 0 && sym == nil => (MOVWstorezeroidx ptr idx mem)
+(MOVHstorezero [off] {sym} (ADD ptr idx) mem) && off == 0 && sym == nil => (MOVHstorezeroidx ptr idx mem)
+(MOVBstorezero [off] {sym} (ADD ptr idx) mem) && off == 0 && sym == nil => (MOVBstorezeroidx ptr idx mem)
+(MOVDstoreidx ptr idx (MOVDconst [0]) mem) => (MOVDstorezeroidx ptr idx mem)
+(MOVWstoreidx ptr idx (MOVDconst [0]) mem) => (MOVWstorezeroidx ptr idx mem)
+(MOVHstoreidx ptr idx (MOVDconst [0]) mem) => (MOVHstorezeroidx ptr idx mem)
+(MOVBstoreidx ptr idx (MOVDconst [0]) mem) => (MOVBstorezeroidx ptr idx mem)
+(MOVDstorezeroidx ptr (MOVDconst [c]) mem) && is32Bit(c) => (MOVDstorezero [int32(c)] ptr mem)
+(MOVDstorezeroidx (MOVDconst [c]) idx mem) && is32Bit(c) => (MOVDstorezero [int32(c)] idx mem)
+(MOVWstorezeroidx ptr (MOVDconst [c]) mem) && is32Bit(c) => (MOVWstorezero [int32(c)] ptr mem)
+(MOVWstorezeroidx (MOVDconst [c]) idx mem) && is32Bit(c) => (MOVWstorezero [int32(c)] idx mem)
+(MOVHstorezeroidx ptr (MOVDconst [c]) mem) && is32Bit(c) => (MOVHstorezero [int32(c)] ptr mem)
+(MOVHstorezeroidx (MOVDconst [c]) idx mem) && is32Bit(c) => (MOVHstorezero [int32(c)] idx mem)
+(MOVBstorezeroidx ptr (MOVDconst [c]) mem) && is32Bit(c) => (MOVBstorezero [int32(c)] ptr mem)
+(MOVBstorezeroidx (MOVDconst [c]) idx mem) && is32Bit(c) => (MOVBstorezero [int32(c)] idx mem)
+
+// shifted register indexed store zero
+(MOVDstorezero [off] {sym} (ADDshiftLL [3] ptr idx) mem) && off == 0 && sym == nil => (MOVDstorezeroidx8 ptr idx mem)
+(MOVWstorezero [off] {sym} (ADDshiftLL [2] ptr idx) mem) && off == 0 && sym == nil => (MOVWstorezeroidx4 ptr idx mem)
+(MOVHstorezero [off] {sym} (ADDshiftLL [1] ptr idx) mem) && off == 0 && sym == nil => (MOVHstorezeroidx2 ptr idx mem)
+(MOVDstorezeroidx ptr (SLLconst [3] idx) mem) => (MOVDstorezeroidx8 ptr idx mem)
+(MOVWstorezeroidx ptr (SLLconst [2] idx) mem) => (MOVWstorezeroidx4 ptr idx mem)
+(MOVHstorezeroidx ptr (SLLconst [1] idx) mem) => (MOVHstorezeroidx2 ptr idx mem)
+(MOVHstorezeroidx ptr (ADD idx idx) mem) => (MOVHstorezeroidx2 ptr idx mem)
+(MOVDstorezeroidx (SLLconst [3] idx) ptr mem) => (MOVDstorezeroidx8 ptr idx mem)
+(MOVWstorezeroidx (SLLconst [2] idx) ptr mem) => (MOVWstorezeroidx4 ptr idx mem)
+(MOVHstorezeroidx (SLLconst [1] idx) ptr mem) => (MOVHstorezeroidx2 ptr idx mem)
+(MOVHstorezeroidx (ADD idx idx) ptr mem) => (MOVHstorezeroidx2 ptr idx mem)
+(MOVDstoreidx8 ptr idx (MOVDconst [0]) mem) => (MOVDstorezeroidx8 ptr idx mem)
+(MOVWstoreidx4 ptr idx (MOVDconst [0]) mem) => (MOVWstorezeroidx4 ptr idx mem)
+(MOVHstoreidx2 ptr idx (MOVDconst [0]) mem) => (MOVHstorezeroidx2 ptr idx mem)
+(MOVDstorezeroidx8 ptr (MOVDconst [c]) mem) && is32Bit(c<<3) => (MOVDstorezero [int32(c<<3)] ptr mem)
+(MOVWstorezeroidx4 ptr (MOVDconst [c]) mem) && is32Bit(c<<2) => (MOVWstorezero [int32(c<<2)] ptr mem)
+(MOVHstorezeroidx2 ptr (MOVDconst [c]) mem) && is32Bit(c<<1) => (MOVHstorezero [int32(c<<1)] ptr mem)
+
+// replace load from same location as preceding store with zero/sign extension (or copy in case of full width)
+// these seem to have bad interaction with other rules, resulting in slower code
+//(MOVBload [off] {sym} ptr (MOVBstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) -> (MOVBreg x)
+//(MOVBUload [off] {sym} ptr (MOVBstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) -> (MOVBUreg x)
+//(MOVHload [off] {sym} ptr (MOVHstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) -> (MOVHreg x)
+//(MOVHUload [off] {sym} ptr (MOVHstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) -> (MOVHUreg x)
+//(MOVWload [off] {sym} ptr (MOVWstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) -> (MOVWreg x)
+//(MOVWUload [off] {sym} ptr (MOVWstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) -> (MOVWUreg x)
+//(MOVDload [off] {sym} ptr (MOVDstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) -> x
+//(FMOVSload [off] {sym} ptr (FMOVSstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) -> x
+//(FMOVDload [off] {sym} ptr (FMOVDstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) -> x
+
+(MOVBload [off] {sym} ptr (MOVBstorezero [off2] {sym2} ptr2 _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) => (MOVDconst [0])
+(MOVBUload [off] {sym} ptr (MOVBstorezero [off2] {sym2} ptr2 _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) => (MOVDconst [0])
+(MOVHload [off] {sym} ptr (MOVHstorezero [off2] {sym2} ptr2 _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) => (MOVDconst [0])
+(MOVHUload [off] {sym} ptr (MOVHstorezero [off2] {sym2} ptr2 _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) => (MOVDconst [0])
+(MOVWload [off] {sym} ptr (MOVWstorezero [off2] {sym2} ptr2 _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) => (MOVDconst [0])
+(MOVWUload [off] {sym} ptr (MOVWstorezero [off2] {sym2} ptr2 _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) => (MOVDconst [0])
+(MOVDload [off] {sym} ptr (MOVDstorezero [off2] {sym2} ptr2 _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) => (MOVDconst [0])
+
+(MOVBloadidx ptr idx (MOVBstorezeroidx ptr2 idx2 _))
+ && (isSamePtr(ptr, ptr2) && isSamePtr(idx, idx2) || isSamePtr(ptr, idx2) && isSamePtr(idx, ptr2)) => (MOVDconst [0])
+(MOVBUloadidx ptr idx (MOVBstorezeroidx ptr2 idx2 _))
+ && (isSamePtr(ptr, ptr2) && isSamePtr(idx, idx2) || isSamePtr(ptr, idx2) && isSamePtr(idx, ptr2)) => (MOVDconst [0])
+(MOVHloadidx ptr idx (MOVHstorezeroidx ptr2 idx2 _))
+ && (isSamePtr(ptr, ptr2) && isSamePtr(idx, idx2) || isSamePtr(ptr, idx2) && isSamePtr(idx, ptr2)) => (MOVDconst [0])
+(MOVHUloadidx ptr idx (MOVHstorezeroidx ptr2 idx2 _))
+ && (isSamePtr(ptr, ptr2) && isSamePtr(idx, idx2) || isSamePtr(ptr, idx2) && isSamePtr(idx, ptr2)) => (MOVDconst [0])
+(MOVWloadidx ptr idx (MOVWstorezeroidx ptr2 idx2 _))
+ && (isSamePtr(ptr, ptr2) && isSamePtr(idx, idx2) || isSamePtr(ptr, idx2) && isSamePtr(idx, ptr2)) => (MOVDconst [0])
+(MOVWUloadidx ptr idx (MOVWstorezeroidx ptr2 idx2 _))
+ && (isSamePtr(ptr, ptr2) && isSamePtr(idx, idx2) || isSamePtr(ptr, idx2) && isSamePtr(idx, ptr2)) => (MOVDconst [0])
+(MOVDloadidx ptr idx (MOVDstorezeroidx ptr2 idx2 _))
+ && (isSamePtr(ptr, ptr2) && isSamePtr(idx, idx2) || isSamePtr(ptr, idx2) && isSamePtr(idx, ptr2)) => (MOVDconst [0])
+
+(MOVHloadidx2 ptr idx (MOVHstorezeroidx2 ptr2 idx2 _)) && isSamePtr(ptr, ptr2) && isSamePtr(idx, idx2) => (MOVDconst [0])
+(MOVHUloadidx2 ptr idx (MOVHstorezeroidx2 ptr2 idx2 _)) && isSamePtr(ptr, ptr2) && isSamePtr(idx, idx2) => (MOVDconst [0])
+(MOVWloadidx4 ptr idx (MOVWstorezeroidx4 ptr2 idx2 _)) && isSamePtr(ptr, ptr2) && isSamePtr(idx, idx2) => (MOVDconst [0])
+(MOVWUloadidx4 ptr idx (MOVWstorezeroidx4 ptr2 idx2 _)) && isSamePtr(ptr, ptr2) && isSamePtr(idx, idx2) => (MOVDconst [0])
+(MOVDloadidx8 ptr idx (MOVDstorezeroidx8 ptr2 idx2 _)) && isSamePtr(ptr, ptr2) && isSamePtr(idx, idx2) => (MOVDconst [0])
+
+// don't extend after proper load
+(MOVBreg x:(MOVBload _ _)) => (MOVDreg x)
+(MOVBUreg x:(MOVBUload _ _)) => (MOVDreg x)
+(MOVHreg x:(MOVBload _ _)) => (MOVDreg x)
+(MOVHreg x:(MOVBUload _ _)) => (MOVDreg x)
+(MOVHreg x:(MOVHload _ _)) => (MOVDreg x)
+(MOVHUreg x:(MOVBUload _ _)) => (MOVDreg x)
+(MOVHUreg x:(MOVHUload _ _)) => (MOVDreg x)
+(MOVWreg x:(MOVBload _ _)) => (MOVDreg x)
+(MOVWreg x:(MOVBUload _ _)) => (MOVDreg x)
+(MOVWreg x:(MOVHload _ _)) => (MOVDreg x)
+(MOVWreg x:(MOVHUload _ _)) => (MOVDreg x)
+(MOVWreg x:(MOVWload _ _)) => (MOVDreg x)
+(MOVWUreg x:(MOVBUload _ _)) => (MOVDreg x)
+(MOVWUreg x:(MOVHUload _ _)) => (MOVDreg x)
+(MOVWUreg x:(MOVWUload _ _)) => (MOVDreg x)
+(MOVBreg x:(MOVBloadidx _ _ _)) => (MOVDreg x)
+(MOVBUreg x:(MOVBUloadidx _ _ _)) => (MOVDreg x)
+(MOVHreg x:(MOVBloadidx _ _ _)) => (MOVDreg x)
+(MOVHreg x:(MOVBUloadidx _ _ _)) => (MOVDreg x)
+(MOVHreg x:(MOVHloadidx _ _ _)) => (MOVDreg x)
+(MOVHUreg x:(MOVBUloadidx _ _ _)) => (MOVDreg x)
+(MOVHUreg x:(MOVHUloadidx _ _ _)) => (MOVDreg x)
+(MOVWreg x:(MOVBloadidx _ _ _)) => (MOVDreg x)
+(MOVWreg x:(MOVBUloadidx _ _ _)) => (MOVDreg x)
+(MOVWreg x:(MOVHloadidx _ _ _)) => (MOVDreg x)
+(MOVWreg x:(MOVHUloadidx _ _ _)) => (MOVDreg x)
+(MOVWreg x:(MOVWloadidx _ _ _)) => (MOVDreg x)
+(MOVWUreg x:(MOVBUloadidx _ _ _)) => (MOVDreg x)
+(MOVWUreg x:(MOVHUloadidx _ _ _)) => (MOVDreg x)
+(MOVWUreg x:(MOVWUloadidx _ _ _)) => (MOVDreg x)
+(MOVHreg x:(MOVHloadidx2 _ _ _)) => (MOVDreg x)
+(MOVHUreg x:(MOVHUloadidx2 _ _ _)) => (MOVDreg x)
+(MOVWreg x:(MOVHloadidx2 _ _ _)) => (MOVDreg x)
+(MOVWreg x:(MOVHUloadidx2 _ _ _)) => (MOVDreg x)
+(MOVWreg x:(MOVWloadidx4 _ _ _)) => (MOVDreg x)
+(MOVWUreg x:(MOVHUloadidx2 _ _ _)) => (MOVDreg x)
+(MOVWUreg x:(MOVWUloadidx4 _ _ _)) => (MOVDreg x)
+
+// fold double extensions
+(MOVBreg x:(MOVBreg _)) => (MOVDreg x)
+(MOVBUreg x:(MOVBUreg _)) => (MOVDreg x)
+(MOVHreg x:(MOVBreg _)) => (MOVDreg x)
+(MOVHreg x:(MOVBUreg _)) => (MOVDreg x)
+(MOVHreg x:(MOVHreg _)) => (MOVDreg x)
+(MOVHUreg x:(MOVBUreg _)) => (MOVDreg x)
+(MOVHUreg x:(MOVHUreg _)) => (MOVDreg x)
+(MOVWreg x:(MOVBreg _)) => (MOVDreg x)
+(MOVWreg x:(MOVBUreg _)) => (MOVDreg x)
+(MOVWreg x:(MOVHreg _)) => (MOVDreg x)
+(MOVWreg x:(MOVWreg _)) => (MOVDreg x)
+(MOVWUreg x:(MOVBUreg _)) => (MOVDreg x)
+(MOVWUreg x:(MOVHUreg _)) => (MOVDreg x)
+(MOVWUreg x:(MOVWUreg _)) => (MOVDreg x)
+
+// don't extend before store
+(MOVBstore [off] {sym} ptr (MOVBreg x) mem) => (MOVBstore [off] {sym} ptr x mem)
+(MOVBstore [off] {sym} ptr (MOVBUreg x) mem) => (MOVBstore [off] {sym} ptr x mem)
+(MOVBstore [off] {sym} ptr (MOVHreg x) mem) => (MOVBstore [off] {sym} ptr x mem)
+(MOVBstore [off] {sym} ptr (MOVHUreg x) mem) => (MOVBstore [off] {sym} ptr x mem)
+(MOVBstore [off] {sym} ptr (MOVWreg x) mem) => (MOVBstore [off] {sym} ptr x mem)
+(MOVBstore [off] {sym} ptr (MOVWUreg x) mem) => (MOVBstore [off] {sym} ptr x mem)
+(MOVHstore [off] {sym} ptr (MOVHreg x) mem) => (MOVHstore [off] {sym} ptr x mem)
+(MOVHstore [off] {sym} ptr (MOVHUreg x) mem) => (MOVHstore [off] {sym} ptr x mem)
+(MOVHstore [off] {sym} ptr (MOVWreg x) mem) => (MOVHstore [off] {sym} ptr x mem)
+(MOVHstore [off] {sym} ptr (MOVWUreg x) mem) => (MOVHstore [off] {sym} ptr x mem)
+(MOVWstore [off] {sym} ptr (MOVWreg x) mem) => (MOVWstore [off] {sym} ptr x mem)
+(MOVWstore [off] {sym} ptr (MOVWUreg x) mem) => (MOVWstore [off] {sym} ptr x mem)
+(MOVBstoreidx ptr idx (MOVBreg x) mem) => (MOVBstoreidx ptr idx x mem)
+(MOVBstoreidx ptr idx (MOVBUreg x) mem) => (MOVBstoreidx ptr idx x mem)
+(MOVBstoreidx ptr idx (MOVHreg x) mem) => (MOVBstoreidx ptr idx x mem)
+(MOVBstoreidx ptr idx (MOVHUreg x) mem) => (MOVBstoreidx ptr idx x mem)
+(MOVBstoreidx ptr idx (MOVWreg x) mem) => (MOVBstoreidx ptr idx x mem)
+(MOVBstoreidx ptr idx (MOVWUreg x) mem) => (MOVBstoreidx ptr idx x mem)
+(MOVHstoreidx ptr idx (MOVHreg x) mem) => (MOVHstoreidx ptr idx x mem)
+(MOVHstoreidx ptr idx (MOVHUreg x) mem) => (MOVHstoreidx ptr idx x mem)
+(MOVHstoreidx ptr idx (MOVWreg x) mem) => (MOVHstoreidx ptr idx x mem)
+(MOVHstoreidx ptr idx (MOVWUreg x) mem) => (MOVHstoreidx ptr idx x mem)
+(MOVWstoreidx ptr idx (MOVWreg x) mem) => (MOVWstoreidx ptr idx x mem)
+(MOVWstoreidx ptr idx (MOVWUreg x) mem) => (MOVWstoreidx ptr idx x mem)
+(MOVHstoreidx2 ptr idx (MOVHreg x) mem) => (MOVHstoreidx2 ptr idx x mem)
+(MOVHstoreidx2 ptr idx (MOVHUreg x) mem) => (MOVHstoreidx2 ptr idx x mem)
+(MOVHstoreidx2 ptr idx (MOVWreg x) mem) => (MOVHstoreidx2 ptr idx x mem)
+(MOVHstoreidx2 ptr idx (MOVWUreg x) mem) => (MOVHstoreidx2 ptr idx x mem)
+(MOVWstoreidx4 ptr idx (MOVWreg x) mem) => (MOVWstoreidx4 ptr idx x mem)
+(MOVWstoreidx4 ptr idx (MOVWUreg x) mem) => (MOVWstoreidx4 ptr idx x mem)
+
+// if a register move has only 1 use, just use the same register without emitting instruction
+// MOVDnop doesn't emit instruction, only for ensuring the type.
+(MOVDreg x) && x.Uses == 1 => (MOVDnop x)
+
+// fold constant into arithmatic ops
+(ADD x (MOVDconst [c])) => (ADDconst [c] x)
+(SUB x (MOVDconst [c])) => (SUBconst [c] x)
+(AND x (MOVDconst [c])) => (ANDconst [c] x)
+(OR x (MOVDconst [c])) => (ORconst [c] x)
+(XOR x (MOVDconst [c])) => (XORconst [c] x)
+(TST x (MOVDconst [c])) => (TSTconst [c] x)
+(TSTW x (MOVDconst [c])) => (TSTWconst [int32(c)] x)
+(CMN x (MOVDconst [c])) => (CMNconst [c] x)
+(CMNW x (MOVDconst [c])) => (CMNWconst [int32(c)] x)
+(BIC x (MOVDconst [c])) => (ANDconst [^c] x)
+(EON x (MOVDconst [c])) => (XORconst [^c] x)
+(ORN x (MOVDconst [c])) => (ORconst [^c] x)
+
+(SLL x (MOVDconst [c])) => (SLLconst x [c&63]) // Note: I don't think we ever generate bad constant shifts (i.e. c>=64)
+(SRL x (MOVDconst [c])) => (SRLconst x [c&63])
+(SRA x (MOVDconst [c])) => (SRAconst x [c&63])
+
+(CMP x (MOVDconst [c])) => (CMPconst [c] x)
+(CMP (MOVDconst [c]) x) => (InvertFlags (CMPconst [c] x))
+(CMPW x (MOVDconst [c])) => (CMPWconst [int32(c)] x)
+(CMPW (MOVDconst [c]) x) => (InvertFlags (CMPWconst [int32(c)] x))
+
+// Canonicalize the order of arguments to comparisons - helps with CSE.
+((CMP|CMPW) x y) && x.ID > y.ID => (InvertFlags ((CMP|CMPW) y x))
+
+// mul-neg => mneg
+(NEG (MUL x y)) => (MNEG x y)
+(NEG (MULW x y)) => (MNEGW x y)
+(MUL (NEG x) y) => (MNEG x y)
+(MULW (NEG x) y) => (MNEGW x y)
+
+// madd/msub
+(ADD a l:(MUL x y)) && l.Uses==1 && clobber(l) => (MADD a x y)
+(SUB a l:(MUL x y)) && l.Uses==1 && clobber(l) => (MSUB a x y)
+(ADD a l:(MNEG x y)) && l.Uses==1 && clobber(l) => (MSUB a x y)
+(SUB a l:(MNEG x y)) && l.Uses==1 && clobber(l) => (MADD a x y)
+
+(ADD a l:(MULW x y)) && a.Type.Size() != 8 && l.Uses==1 && clobber(l) => (MADDW a x y)
+(SUB a l:(MULW x y)) && a.Type.Size() != 8 && l.Uses==1 && clobber(l) => (MSUBW a x y)
+(ADD a l:(MNEGW x y)) && a.Type.Size() != 8 && l.Uses==1 && clobber(l) => (MSUBW a x y)
+(SUB a l:(MNEGW x y)) && a.Type.Size() != 8 && l.Uses==1 && clobber(l) => (MADDW a x y)
+
+// optimize ADCSflags, SBCSflags and friends
+(ADCSflags x y (Select1 <types.TypeFlags> (ADDSconstflags [-1] (ADCzerocarry <typ.UInt64> c)))) => (ADCSflags x y c)
+(ADCSflags x y (Select1 <types.TypeFlags> (ADDSconstflags [-1] (MOVDconst [0])))) => (ADDSflags x y)
+(SBCSflags x y (Select1 <types.TypeFlags> (NEGSflags (NEG <typ.UInt64> (NGCzerocarry <typ.UInt64> bo))))) => (SBCSflags x y bo)
+(SBCSflags x y (Select1 <types.TypeFlags> (NEGSflags (MOVDconst [0])))) => (SUBSflags x y)
+
+// mul by constant
+(MUL x (MOVDconst [-1])) => (NEG x)
+(MUL _ (MOVDconst [0])) => (MOVDconst [0])
+(MUL x (MOVDconst [1])) => x
+(MUL x (MOVDconst [c])) && isPowerOfTwo64(c) => (SLLconst [log64(c)] x)
+(MUL x (MOVDconst [c])) && isPowerOfTwo64(c-1) && c >= 3 => (ADDshiftLL x x [log64(c-1)])
+(MUL x (MOVDconst [c])) && isPowerOfTwo64(c+1) && c >= 7 => (ADDshiftLL (NEG <x.Type> x) x [log64(c+1)])
+(MUL x (MOVDconst [c])) && c%3 == 0 && isPowerOfTwo64(c/3) => (SLLconst [log64(c/3)] (ADDshiftLL <x.Type> x x [1]))
+(MUL x (MOVDconst [c])) && c%5 == 0 && isPowerOfTwo64(c/5) => (SLLconst [log64(c/5)] (ADDshiftLL <x.Type> x x [2]))
+(MUL x (MOVDconst [c])) && c%7 == 0 && isPowerOfTwo64(c/7) => (SLLconst [log64(c/7)] (ADDshiftLL <x.Type> (NEG <x.Type> x) x [3]))
+(MUL x (MOVDconst [c])) && c%9 == 0 && isPowerOfTwo64(c/9) => (SLLconst [log64(c/9)] (ADDshiftLL <x.Type> x x [3]))
+
+(MULW x (MOVDconst [c])) && int32(c)==-1 => (NEG x)
+(MULW _ (MOVDconst [c])) && int32(c)==0 => (MOVDconst [0])
+(MULW x (MOVDconst [c])) && int32(c)==1 => x
+(MULW x (MOVDconst [c])) && isPowerOfTwo64(c) => (SLLconst [log64(c)] x)
+(MULW x (MOVDconst [c])) && isPowerOfTwo64(c-1) && int32(c) >= 3 => (ADDshiftLL x x [log64(c-1)])
+(MULW x (MOVDconst [c])) && isPowerOfTwo64(c+1) && int32(c) >= 7 => (ADDshiftLL (NEG <x.Type> x) x [log64(c+1)])
+(MULW x (MOVDconst [c])) && c%3 == 0 && isPowerOfTwo64(c/3) && is32Bit(c) => (SLLconst [log64(c/3)] (ADDshiftLL <x.Type> x x [1]))
+(MULW x (MOVDconst [c])) && c%5 == 0 && isPowerOfTwo64(c/5) && is32Bit(c) => (SLLconst [log64(c/5)] (ADDshiftLL <x.Type> x x [2]))
+(MULW x (MOVDconst [c])) && c%7 == 0 && isPowerOfTwo64(c/7) && is32Bit(c) => (SLLconst [log64(c/7)] (ADDshiftLL <x.Type> (NEG <x.Type> x) x [3]))
+(MULW x (MOVDconst [c])) && c%9 == 0 && isPowerOfTwo64(c/9) && is32Bit(c) => (SLLconst [log64(c/9)] (ADDshiftLL <x.Type> x x [3]))
+
+// mneg by constant
+(MNEG x (MOVDconst [-1])) => x
+(MNEG _ (MOVDconst [0])) => (MOVDconst [0])
+(MNEG x (MOVDconst [1])) => (NEG x)
+(MNEG x (MOVDconst [c])) && isPowerOfTwo64(c) => (NEG (SLLconst <x.Type> [log64(c)] x))
+(MNEG x (MOVDconst [c])) && isPowerOfTwo64(c-1) && c >= 3 => (NEG (ADDshiftLL <x.Type> x x [log64(c-1)]))
+(MNEG x (MOVDconst [c])) && isPowerOfTwo64(c+1) && c >= 7 => (NEG (ADDshiftLL <x.Type> (NEG <x.Type> x) x [log64(c+1)]))
+(MNEG x (MOVDconst [c])) && c%3 == 0 && isPowerOfTwo64(c/3) => (SLLconst <x.Type> [log64(c/3)] (SUBshiftLL <x.Type> x x [2]))
+(MNEG x (MOVDconst [c])) && c%5 == 0 && isPowerOfTwo64(c/5) => (NEG (SLLconst <x.Type> [log64(c/5)] (ADDshiftLL <x.Type> x x [2])))
+(MNEG x (MOVDconst [c])) && c%7 == 0 && isPowerOfTwo64(c/7) => (SLLconst <x.Type> [log64(c/7)] (SUBshiftLL <x.Type> x x [3]))
+(MNEG x (MOVDconst [c])) && c%9 == 0 && isPowerOfTwo64(c/9) => (NEG (SLLconst <x.Type> [log64(c/9)] (ADDshiftLL <x.Type> x x [3])))
+
+
+(MNEGW x (MOVDconst [c])) && int32(c)==-1 => x
+(MNEGW _ (MOVDconst [c])) && int32(c)==0 => (MOVDconst [0])
+(MNEGW x (MOVDconst [c])) && int32(c)==1 => (NEG x)
+(MNEGW x (MOVDconst [c])) && isPowerOfTwo64(c) => (NEG (SLLconst <x.Type> [log64(c)] x))
+(MNEGW x (MOVDconst [c])) && isPowerOfTwo64(c-1) && int32(c) >= 3 => (NEG (ADDshiftLL <x.Type> x x [log64(c-1)]))
+(MNEGW x (MOVDconst [c])) && isPowerOfTwo64(c+1) && int32(c) >= 7 => (NEG (ADDshiftLL <x.Type> (NEG <x.Type> x) x [log64(c+1)]))
+(MNEGW x (MOVDconst [c])) && c%3 == 0 && isPowerOfTwo64(c/3) && is32Bit(c) => (SLLconst <x.Type> [log64(c/3)] (SUBshiftLL <x.Type> x x [2]))
+(MNEGW x (MOVDconst [c])) && c%5 == 0 && isPowerOfTwo64(c/5) && is32Bit(c) => (NEG (SLLconst <x.Type> [log64(c/5)] (ADDshiftLL <x.Type> x x [2])))
+(MNEGW x (MOVDconst [c])) && c%7 == 0 && isPowerOfTwo64(c/7) && is32Bit(c) => (SLLconst <x.Type> [log64(c/7)] (SUBshiftLL <x.Type> x x [3]))
+(MNEGW x (MOVDconst [c])) && c%9 == 0 && isPowerOfTwo64(c/9) && is32Bit(c) => (NEG (SLLconst <x.Type> [log64(c/9)] (ADDshiftLL <x.Type> x x [3])))
+
+
+(MADD a x (MOVDconst [-1])) => (SUB a x)
+(MADD a _ (MOVDconst [0])) => a
+(MADD a x (MOVDconst [1])) => (ADD a x)
+(MADD a x (MOVDconst [c])) && isPowerOfTwo64(c) => (ADDshiftLL a x [log64(c)])
+(MADD a x (MOVDconst [c])) && isPowerOfTwo64(c-1) && c>=3 => (ADD a (ADDshiftLL <x.Type> x x [log64(c-1)]))
+(MADD a x (MOVDconst [c])) && isPowerOfTwo64(c+1) && c>=7 => (SUB a (SUBshiftLL <x.Type> x x [log64(c+1)]))
+(MADD a x (MOVDconst [c])) && c%3 == 0 && isPowerOfTwo64(c/3) => (SUBshiftLL a (SUBshiftLL <x.Type> x x [2]) [log64(c/3)])
+(MADD a x (MOVDconst [c])) && c%5 == 0 && isPowerOfTwo64(c/5) => (ADDshiftLL a (ADDshiftLL <x.Type> x x [2]) [log64(c/5)])
+(MADD a x (MOVDconst [c])) && c%7 == 0 && isPowerOfTwo64(c/7) => (SUBshiftLL a (SUBshiftLL <x.Type> x x [3]) [log64(c/7)])
+(MADD a x (MOVDconst [c])) && c%9 == 0 && isPowerOfTwo64(c/9) => (ADDshiftLL a (ADDshiftLL <x.Type> x x [3]) [log64(c/9)])
+
+(MADD a (MOVDconst [-1]) x) => (SUB a x)
+(MADD a (MOVDconst [0]) _) => a
+(MADD a (MOVDconst [1]) x) => (ADD a x)
+(MADD a (MOVDconst [c]) x) && isPowerOfTwo64(c) => (ADDshiftLL a x [log64(c)])
+(MADD a (MOVDconst [c]) x) && isPowerOfTwo64(c-1) && c>=3 => (ADD a (ADDshiftLL <x.Type> x x [log64(c-1)]))
+(MADD a (MOVDconst [c]) x) && isPowerOfTwo64(c+1) && c>=7 => (SUB a (SUBshiftLL <x.Type> x x [log64(c+1)]))
+(MADD a (MOVDconst [c]) x) && c%3 == 0 && isPowerOfTwo64(c/3) => (SUBshiftLL a (SUBshiftLL <x.Type> x x [2]) [log64(c/3)])
+(MADD a (MOVDconst [c]) x) && c%5 == 0 && isPowerOfTwo64(c/5) => (ADDshiftLL a (ADDshiftLL <x.Type> x x [2]) [log64(c/5)])
+(MADD a (MOVDconst [c]) x) && c%7 == 0 && isPowerOfTwo64(c/7) => (SUBshiftLL a (SUBshiftLL <x.Type> x x [3]) [log64(c/7)])
+(MADD a (MOVDconst [c]) x) && c%9 == 0 && isPowerOfTwo64(c/9) => (ADDshiftLL a (ADDshiftLL <x.Type> x x [3]) [log64(c/9)])
+
+(MADDW a x (MOVDconst [c])) && int32(c)==-1 => (SUB a x)
+(MADDW a _ (MOVDconst [c])) && int32(c)==0 => a
+(MADDW a x (MOVDconst [c])) && int32(c)==1 => (ADD a x)
+(MADDW a x (MOVDconst [c])) && isPowerOfTwo64(c) => (ADDshiftLL a x [log64(c)])
+(MADDW a x (MOVDconst [c])) && isPowerOfTwo64(c-1) && int32(c)>=3 => (ADD a (ADDshiftLL <x.Type> x x [log64(c-1)]))
+(MADDW a x (MOVDconst [c])) && isPowerOfTwo64(c+1) && int32(c)>=7 => (SUB a (SUBshiftLL <x.Type> x x [log64(c+1)]))
+(MADDW a x (MOVDconst [c])) && c%3 == 0 && isPowerOfTwo64(c/3) && is32Bit(c) => (SUBshiftLL a (SUBshiftLL <x.Type> x x [2]) [log64(c/3)])
+(MADDW a x (MOVDconst [c])) && c%5 == 0 && isPowerOfTwo64(c/5) && is32Bit(c) => (ADDshiftLL a (ADDshiftLL <x.Type> x x [2]) [log64(c/5)])
+(MADDW a x (MOVDconst [c])) && c%7 == 0 && isPowerOfTwo64(c/7) && is32Bit(c) => (SUBshiftLL a (SUBshiftLL <x.Type> x x [3]) [log64(c/7)])
+(MADDW a x (MOVDconst [c])) && c%9 == 0 && isPowerOfTwo64(c/9) && is32Bit(c) => (ADDshiftLL a (ADDshiftLL <x.Type> x x [3]) [log64(c/9)])
+
+(MADDW a (MOVDconst [c]) x) && int32(c)==-1 => (SUB a x)
+(MADDW a (MOVDconst [c]) _) && int32(c)==0 => a
+(MADDW a (MOVDconst [c]) x) && int32(c)==1 => (ADD a x)
+(MADDW a (MOVDconst [c]) x) && isPowerOfTwo64(c) => (ADDshiftLL a x [log64(c)])
+(MADDW a (MOVDconst [c]) x) && isPowerOfTwo64(c-1) && int32(c)>=3 => (ADD a (ADDshiftLL <x.Type> x x [log64(c-1)]))
+(MADDW a (MOVDconst [c]) x) && isPowerOfTwo64(c+1) && int32(c)>=7 => (SUB a (SUBshiftLL <x.Type> x x [log64(c+1)]))
+(MADDW a (MOVDconst [c]) x) && c%3 == 0 && isPowerOfTwo64(c/3) && is32Bit(c) => (SUBshiftLL a (SUBshiftLL <x.Type> x x [2]) [log64(c/3)])
+(MADDW a (MOVDconst [c]) x) && c%5 == 0 && isPowerOfTwo64(c/5) && is32Bit(c) => (ADDshiftLL a (ADDshiftLL <x.Type> x x [2]) [log64(c/5)])
+(MADDW a (MOVDconst [c]) x) && c%7 == 0 && isPowerOfTwo64(c/7) && is32Bit(c) => (SUBshiftLL a (SUBshiftLL <x.Type> x x [3]) [log64(c/7)])
+(MADDW a (MOVDconst [c]) x) && c%9 == 0 && isPowerOfTwo64(c/9) && is32Bit(c) => (ADDshiftLL a (ADDshiftLL <x.Type> x x [3]) [log64(c/9)])
+
+(MSUB a x (MOVDconst [-1])) => (ADD a x)
+(MSUB a _ (MOVDconst [0])) => a
+(MSUB a x (MOVDconst [1])) => (SUB a x)
+(MSUB a x (MOVDconst [c])) && isPowerOfTwo64(c) => (SUBshiftLL a x [log64(c)])
+(MSUB a x (MOVDconst [c])) && isPowerOfTwo64(c-1) && c>=3 => (SUB a (ADDshiftLL <x.Type> x x [log64(c-1)]))
+(MSUB a x (MOVDconst [c])) && isPowerOfTwo64(c+1) && c>=7 => (ADD a (SUBshiftLL <x.Type> x x [log64(c+1)]))
+(MSUB a x (MOVDconst [c])) && c%3 == 0 && isPowerOfTwo64(c/3) => (ADDshiftLL a (SUBshiftLL <x.Type> x x [2]) [log64(c/3)])
+(MSUB a x (MOVDconst [c])) && c%5 == 0 && isPowerOfTwo64(c/5) => (SUBshiftLL a (ADDshiftLL <x.Type> x x [2]) [log64(c/5)])
+(MSUB a x (MOVDconst [c])) && c%7 == 0 && isPowerOfTwo64(c/7) => (ADDshiftLL a (SUBshiftLL <x.Type> x x [3]) [log64(c/7)])
+(MSUB a x (MOVDconst [c])) && c%9 == 0 && isPowerOfTwo64(c/9) => (SUBshiftLL a (ADDshiftLL <x.Type> x x [3]) [log64(c/9)])
+
+(MSUB a (MOVDconst [-1]) x) => (ADD a x)
+(MSUB a (MOVDconst [0]) _) => a
+(MSUB a (MOVDconst [1]) x) => (SUB a x)
+(MSUB a (MOVDconst [c]) x) && isPowerOfTwo64(c) => (SUBshiftLL a x [log64(c)])
+(MSUB a (MOVDconst [c]) x) && isPowerOfTwo64(c-1) && c>=3 => (SUB a (ADDshiftLL <x.Type> x x [log64(c-1)]))
+(MSUB a (MOVDconst [c]) x) && isPowerOfTwo64(c+1) && c>=7 => (ADD a (SUBshiftLL <x.Type> x x [log64(c+1)]))
+(MSUB a (MOVDconst [c]) x) && c%3 == 0 && isPowerOfTwo64(c/3) => (ADDshiftLL a (SUBshiftLL <x.Type> x x [2]) [log64(c/3)])
+(MSUB a (MOVDconst [c]) x) && c%5 == 0 && isPowerOfTwo64(c/5) => (SUBshiftLL a (ADDshiftLL <x.Type> x x [2]) [log64(c/5)])
+(MSUB a (MOVDconst [c]) x) && c%7 == 0 && isPowerOfTwo64(c/7) => (ADDshiftLL a (SUBshiftLL <x.Type> x x [3]) [log64(c/7)])
+(MSUB a (MOVDconst [c]) x) && c%9 == 0 && isPowerOfTwo64(c/9) => (SUBshiftLL a (ADDshiftLL <x.Type> x x [3]) [log64(c/9)])
+
+(MSUBW a x (MOVDconst [c])) && int32(c)==-1 => (ADD a x)
+(MSUBW a _ (MOVDconst [c])) && int32(c)==0 => a
+(MSUBW a x (MOVDconst [c])) && int32(c)==1 => (SUB a x)
+(MSUBW a x (MOVDconst [c])) && isPowerOfTwo64(c) => (SUBshiftLL a x [log64(c)])
+(MSUBW a x (MOVDconst [c])) && isPowerOfTwo64(c-1) && int32(c)>=3 => (SUB a (ADDshiftLL <x.Type> x x [log64(c-1)]))
+(MSUBW a x (MOVDconst [c])) && isPowerOfTwo64(c+1) && int32(c)>=7 => (ADD a (SUBshiftLL <x.Type> x x [log64(c+1)]))
+(MSUBW a x (MOVDconst [c])) && c%3 == 0 && isPowerOfTwo64(c/3) && is32Bit(c) => (ADDshiftLL a (SUBshiftLL <x.Type> x x [2]) [log64(c/3)])
+(MSUBW a x (MOVDconst [c])) && c%5 == 0 && isPowerOfTwo64(c/5) && is32Bit(c) => (SUBshiftLL a (ADDshiftLL <x.Type> x x [2]) [log64(c/5)])
+(MSUBW a x (MOVDconst [c])) && c%7 == 0 && isPowerOfTwo64(c/7) && is32Bit(c) => (ADDshiftLL a (SUBshiftLL <x.Type> x x [3]) [log64(c/7)])
+(MSUBW a x (MOVDconst [c])) && c%9 == 0 && isPowerOfTwo64(c/9) && is32Bit(c) => (SUBshiftLL a (ADDshiftLL <x.Type> x x [3]) [log64(c/9)])
+
+(MSUBW a (MOVDconst [c]) x) && int32(c)==-1 => (ADD a x)
+(MSUBW a (MOVDconst [c]) _) && int32(c)==0 => a
+(MSUBW a (MOVDconst [c]) x) && int32(c)==1 => (SUB a x)
+(MSUBW a (MOVDconst [c]) x) && isPowerOfTwo64(c) => (SUBshiftLL a x [log64(c)])
+(MSUBW a (MOVDconst [c]) x) && isPowerOfTwo64(c-1) && int32(c)>=3 => (SUB a (ADDshiftLL <x.Type> x x [log64(c-1)]))
+(MSUBW a (MOVDconst [c]) x) && isPowerOfTwo64(c+1) && int32(c)>=7 => (ADD a (SUBshiftLL <x.Type> x x [log64(c+1)]))
+(MSUBW a (MOVDconst [c]) x) && c%3 == 0 && isPowerOfTwo64(c/3) && is32Bit(c) => (ADDshiftLL a (SUBshiftLL <x.Type> x x [2]) [log64(c/3)])
+(MSUBW a (MOVDconst [c]) x) && c%5 == 0 && isPowerOfTwo64(c/5) && is32Bit(c) => (SUBshiftLL a (ADDshiftLL <x.Type> x x [2]) [log64(c/5)])
+(MSUBW a (MOVDconst [c]) x) && c%7 == 0 && isPowerOfTwo64(c/7) && is32Bit(c) => (ADDshiftLL a (SUBshiftLL <x.Type> x x [3]) [log64(c/7)])
+(MSUBW a (MOVDconst [c]) x) && c%9 == 0 && isPowerOfTwo64(c/9) && is32Bit(c) => (SUBshiftLL a (ADDshiftLL <x.Type> x x [3]) [log64(c/9)])
+
+// div by constant
+(UDIV x (MOVDconst [1])) => x
+(UDIV x (MOVDconst [c])) && isPowerOfTwo64(c) => (SRLconst [log64(c)] x)
+(UDIVW x (MOVDconst [c])) && uint32(c)==1 => x
+(UDIVW x (MOVDconst [c])) && isPowerOfTwo64(c) && is32Bit(c) => (SRLconst [log64(c)] x)
+(UMOD _ (MOVDconst [1])) => (MOVDconst [0])
+(UMOD x (MOVDconst [c])) && isPowerOfTwo64(c) => (ANDconst [c-1] x)
+(UMODW _ (MOVDconst [c])) && uint32(c)==1 => (MOVDconst [0])
+(UMODW x (MOVDconst [c])) && isPowerOfTwo64(c) && is32Bit(c) => (ANDconst [c-1] x)
+
+// generic simplifications
+(ADD x (NEG y)) => (SUB x y)
+(SUB x x) => (MOVDconst [0])
+(AND x x) => x
+(OR x x) => x
+(XOR x x) => (MOVDconst [0])
+(BIC x x) => (MOVDconst [0])
+(EON x x) => (MOVDconst [-1])
+(ORN x x) => (MOVDconst [-1])
+(AND x (MVN y)) => (BIC x y)
+(XOR x (MVN y)) => (EON x y)
+(OR x (MVN y)) => (ORN x y)
+(MVN (XOR x y)) => (EON x y)
+(CSEL [cc] x (MOVDconst [0]) flag) => (CSEL0 [cc] x flag)
+(CSEL [cc] (MOVDconst [0]) y flag) => (CSEL0 [arm64Negate(cc)] y flag)
+(SUB x (SUB y z)) => (SUB (ADD <v.Type> x z) y)
+(SUB (SUB x y) z) => (SUB x (ADD <y.Type> y z))
+
+// remove redundant *const ops
+(ADDconst [0] x) => x
+(SUBconst [0] x) => x
+(ANDconst [0] _) => (MOVDconst [0])
+(ANDconst [-1] x) => x
+(ORconst [0] x) => x
+(ORconst [-1] _) => (MOVDconst [-1])
+(XORconst [0] x) => x
+(XORconst [-1] x) => (MVN x)
+
+// generic constant folding
+(ADDconst [c] (MOVDconst [d])) => (MOVDconst [c+d])
+(ADDconst [c] (ADDconst [d] x)) => (ADDconst [c+d] x)
+(ADDconst [c] (SUBconst [d] x)) => (ADDconst [c-d] x)
+(SUBconst [c] (MOVDconst [d])) => (MOVDconst [d-c])
+(SUBconst [c] (SUBconst [d] x)) => (ADDconst [-c-d] x)
+(SUBconst [c] (ADDconst [d] x)) => (ADDconst [-c+d] x)
+(SLLconst [c] (MOVDconst [d])) => (MOVDconst [d<<uint64(c)])
+(SRLconst [c] (MOVDconst [d])) => (MOVDconst [int64(uint64(d)>>uint64(c))])
+(SRAconst [c] (MOVDconst [d])) => (MOVDconst [d>>uint64(c)])
+(MUL (MOVDconst [c]) (MOVDconst [d])) => (MOVDconst [c*d])
+(MULW (MOVDconst [c]) (MOVDconst [d])) => (MOVDconst [int64(int32(c)*int32(d))])
+(MNEG (MOVDconst [c]) (MOVDconst [d])) => (MOVDconst [-c*d])
+(MNEGW (MOVDconst [c]) (MOVDconst [d])) => (MOVDconst [-int64(int32(c)*int32(d))])
+(MADD (MOVDconst [c]) x y) => (ADDconst [c] (MUL <x.Type> x y))
+(MADDW (MOVDconst [c]) x y) => (ADDconst [c] (MULW <x.Type> x y))
+(MSUB (MOVDconst [c]) x y) => (ADDconst [c] (MNEG <x.Type> x y))
+(MSUBW (MOVDconst [c]) x y) => (ADDconst [c] (MNEGW <x.Type> x y))
+(MADD a (MOVDconst [c]) (MOVDconst [d])) => (ADDconst [c*d] a)
+(MADDW a (MOVDconst [c]) (MOVDconst [d])) => (ADDconst [int64(int32(c)*int32(d))] a)
+(MSUB a (MOVDconst [c]) (MOVDconst [d])) => (SUBconst [c*d] a)
+(MSUBW a (MOVDconst [c]) (MOVDconst [d])) => (SUBconst [int64(int32(c)*int32(d))] a)
+(DIV (MOVDconst [c]) (MOVDconst [d])) && d != 0 => (MOVDconst [c/d])
+(UDIV (MOVDconst [c]) (MOVDconst [d])) && d != 0 => (MOVDconst [int64(uint64(c)/uint64(d))])
+(DIVW (MOVDconst [c]) (MOVDconst [d])) && d != 0 => (MOVDconst [int64(int32(c)/int32(d))])
+(UDIVW (MOVDconst [c]) (MOVDconst [d])) && d != 0 => (MOVDconst [int64(uint32(c)/uint32(d))])
+(MOD (MOVDconst [c]) (MOVDconst [d])) && d != 0 => (MOVDconst [c%d])
+(UMOD (MOVDconst [c]) (MOVDconst [d])) && d != 0 => (MOVDconst [int64(uint64(c)%uint64(d))])
+(MODW (MOVDconst [c]) (MOVDconst [d])) && d != 0 => (MOVDconst [int64(int32(c)%int32(d))])
+(UMODW (MOVDconst [c]) (MOVDconst [d])) && d != 0 => (MOVDconst [int64(uint32(c)%uint32(d))])
+(ANDconst [c] (MOVDconst [d])) => (MOVDconst [c&d])
+(ANDconst [c] (ANDconst [d] x)) => (ANDconst [c&d] x)
+(ANDconst [c] (MOVWUreg x)) => (ANDconst [c&(1<<32-1)] x)
+(ANDconst [c] (MOVHUreg x)) => (ANDconst [c&(1<<16-1)] x)
+(ANDconst [c] (MOVBUreg x)) => (ANDconst [c&(1<<8-1)] x)
+(MOVWUreg (ANDconst [c] x)) => (ANDconst [c&(1<<32-1)] x)
+(MOVHUreg (ANDconst [c] x)) => (ANDconst [c&(1<<16-1)] x)
+(MOVBUreg (ANDconst [c] x)) => (ANDconst [c&(1<<8-1)] x)
+(ORconst [c] (MOVDconst [d])) => (MOVDconst [c|d])
+(ORconst [c] (ORconst [d] x)) => (ORconst [c|d] x)
+(XORconst [c] (MOVDconst [d])) => (MOVDconst [c^d])
+(XORconst [c] (XORconst [d] x)) => (XORconst [c^d] x)
+(MVN (MOVDconst [c])) => (MOVDconst [^c])
+(NEG (MOVDconst [c])) => (MOVDconst [-c])
+(MOVBreg (MOVDconst [c])) => (MOVDconst [int64(int8(c))])
+(MOVBUreg (MOVDconst [c])) => (MOVDconst [int64(uint8(c))])
+(MOVHreg (MOVDconst [c])) => (MOVDconst [int64(int16(c))])
+(MOVHUreg (MOVDconst [c])) => (MOVDconst [int64(uint16(c))])
+(MOVWreg (MOVDconst [c])) => (MOVDconst [int64(int32(c))])
+(MOVWUreg (MOVDconst [c])) => (MOVDconst [int64(uint32(c))])
+(MOVDreg (MOVDconst [c])) => (MOVDconst [c])
+
+// constant comparisons
+(CMPconst (MOVDconst [x]) [y]) => (FlagConstant [subFlags64(x,y)])
+(CMPWconst (MOVDconst [x]) [y]) => (FlagConstant [subFlags32(int32(x),y)])
+(TSTconst (MOVDconst [x]) [y]) => (FlagConstant [logicFlags64(x&y)])
+(TSTWconst (MOVDconst [x]) [y]) => (FlagConstant [logicFlags32(int32(x)&y)])
+(CMNconst (MOVDconst [x]) [y]) => (FlagConstant [addFlags64(x,y)])
+(CMNWconst (MOVDconst [x]) [y]) => (FlagConstant [addFlags32(int32(x),y)])
+
+// other known comparisons
+(CMPconst (MOVBUreg _) [c]) && 0xff < c => (FlagConstant [subFlags64(0,1)])
+(CMPconst (MOVHUreg _) [c]) && 0xffff < c => (FlagConstant [subFlags64(0,1)])
+(CMPconst (MOVWUreg _) [c]) && 0xffffffff < c => (FlagConstant [subFlags64(0,1)])
+(CMPconst (ANDconst _ [m]) [n]) && 0 <= m && m < n => (FlagConstant [subFlags64(0,1)])
+(CMPconst (SRLconst _ [c]) [n]) && 0 <= n && 0 < c && c <= 63 && (1<<uint64(64-c)) <= uint64(n) => (FlagConstant [subFlags64(0,1)])
+(CMPWconst (MOVBUreg _) [c]) && 0xff < c => (FlagConstant [subFlags64(0,1)])
+(CMPWconst (MOVHUreg _) [c]) && 0xffff < c => (FlagConstant [subFlags64(0,1)])
+
+// absorb flag constants into branches
+(EQ (FlagConstant [fc]) yes no) && fc.eq() => (First yes no)
+(EQ (FlagConstant [fc]) yes no) && !fc.eq() => (First no yes)
+
+(NE (FlagConstant [fc]) yes no) && fc.ne() => (First yes no)
+(NE (FlagConstant [fc]) yes no) && !fc.ne() => (First no yes)
+
+(LT (FlagConstant [fc]) yes no) && fc.lt() => (First yes no)
+(LT (FlagConstant [fc]) yes no) && !fc.lt() => (First no yes)
+
+(LE (FlagConstant [fc]) yes no) && fc.le() => (First yes no)
+(LE (FlagConstant [fc]) yes no) && !fc.le() => (First no yes)
+
+(GT (FlagConstant [fc]) yes no) && fc.gt() => (First yes no)
+(GT (FlagConstant [fc]) yes no) && !fc.gt() => (First no yes)
+
+(GE (FlagConstant [fc]) yes no) && fc.ge() => (First yes no)
+(GE (FlagConstant [fc]) yes no) && !fc.ge() => (First no yes)
+
+(ULT (FlagConstant [fc]) yes no) && fc.ult() => (First yes no)
+(ULT (FlagConstant [fc]) yes no) && !fc.ult() => (First no yes)
+
+(ULE (FlagConstant [fc]) yes no) && fc.ule() => (First yes no)
+(ULE (FlagConstant [fc]) yes no) && !fc.ule() => (First no yes)
+
+(UGT (FlagConstant [fc]) yes no) && fc.ugt() => (First yes no)
+(UGT (FlagConstant [fc]) yes no) && !fc.ugt() => (First no yes)
+
+(UGE (FlagConstant [fc]) yes no) && fc.uge() => (First yes no)
+(UGE (FlagConstant [fc]) yes no) && !fc.uge() => (First no yes)
+
+(LTnoov (FlagConstant [fc]) yes no) && fc.ltNoov() => (First yes no)
+(LTnoov (FlagConstant [fc]) yes no) && !fc.ltNoov() => (First no yes)
+
+(LEnoov (FlagConstant [fc]) yes no) && fc.leNoov() => (First yes no)
+(LEnoov (FlagConstant [fc]) yes no) && !fc.leNoov() => (First no yes)
+
+(GTnoov (FlagConstant [fc]) yes no) && fc.gtNoov() => (First yes no)
+(GTnoov (FlagConstant [fc]) yes no) && !fc.gtNoov() => (First no yes)
+
+(GEnoov (FlagConstant [fc]) yes no) && fc.geNoov() => (First yes no)
+(GEnoov (FlagConstant [fc]) yes no) && !fc.geNoov() => (First no yes)
+
+(Z (MOVDconst [0]) yes no) => (First yes no)
+(Z (MOVDconst [c]) yes no) && c != 0 => (First no yes)
+(NZ (MOVDconst [0]) yes no) => (First no yes)
+(NZ (MOVDconst [c]) yes no) && c != 0 => (First yes no)
+(ZW (MOVDconst [c]) yes no) && int32(c) == 0 => (First yes no)
+(ZW (MOVDconst [c]) yes no) && int32(c) != 0 => (First no yes)
+(NZW (MOVDconst [c]) yes no) && int32(c) == 0 => (First no yes)
+(NZW (MOVDconst [c]) yes no) && int32(c) != 0 => (First yes no)
+
+// absorb InvertFlags into branches
+(LT (InvertFlags cmp) yes no) => (GT cmp yes no)
+(GT (InvertFlags cmp) yes no) => (LT cmp yes no)
+(LE (InvertFlags cmp) yes no) => (GE cmp yes no)
+(GE (InvertFlags cmp) yes no) => (LE cmp yes no)
+(ULT (InvertFlags cmp) yes no) => (UGT cmp yes no)
+(UGT (InvertFlags cmp) yes no) => (ULT cmp yes no)
+(ULE (InvertFlags cmp) yes no) => (UGE cmp yes no)
+(UGE (InvertFlags cmp) yes no) => (ULE cmp yes no)
+(EQ (InvertFlags cmp) yes no) => (EQ cmp yes no)
+(NE (InvertFlags cmp) yes no) => (NE cmp yes no)
+(FLT (InvertFlags cmp) yes no) => (FGT cmp yes no)
+(FGT (InvertFlags cmp) yes no) => (FLT cmp yes no)
+(FLE (InvertFlags cmp) yes no) => (FGE cmp yes no)
+(FGE (InvertFlags cmp) yes no) => (FLE cmp yes no)
+(LTnoov (InvertFlags cmp) yes no) => (GTnoov cmp yes no)
+(GEnoov (InvertFlags cmp) yes no) => (LEnoov cmp yes no)
+(LEnoov (InvertFlags cmp) yes no) => (GEnoov cmp yes no)
+(GTnoov (InvertFlags cmp) yes no) => (LTnoov cmp yes no)
+
+// absorb InvertFlags into CSEL(0)
+(CSEL [cc] x y (InvertFlags cmp)) => (CSEL [arm64Invert(cc)] x y cmp)
+(CSEL0 [cc] x (InvertFlags cmp)) => (CSEL0 [arm64Invert(cc)] x cmp)
+
+// absorb flag constants into boolean values
+(Equal (FlagConstant [fc])) => (MOVDconst [b2i(fc.eq())])
+(NotEqual (FlagConstant [fc])) => (MOVDconst [b2i(fc.ne())])
+(LessThan (FlagConstant [fc])) => (MOVDconst [b2i(fc.lt())])
+(LessThanU (FlagConstant [fc])) => (MOVDconst [b2i(fc.ult())])
+(LessEqual (FlagConstant [fc])) => (MOVDconst [b2i(fc.le())])
+(LessEqualU (FlagConstant [fc])) => (MOVDconst [b2i(fc.ule())])
+(GreaterThan (FlagConstant [fc])) => (MOVDconst [b2i(fc.gt())])
+(GreaterThanU (FlagConstant [fc])) => (MOVDconst [b2i(fc.ugt())])
+(GreaterEqual (FlagConstant [fc])) => (MOVDconst [b2i(fc.ge())])
+(GreaterEqualU (FlagConstant [fc])) => (MOVDconst [b2i(fc.uge())])
+
+// absorb InvertFlags into boolean values
+(Equal (InvertFlags x)) => (Equal x)
+(NotEqual (InvertFlags x)) => (NotEqual x)
+(LessThan (InvertFlags x)) => (GreaterThan x)
+(LessThanU (InvertFlags x)) => (GreaterThanU x)
+(GreaterThan (InvertFlags x)) => (LessThan x)
+(GreaterThanU (InvertFlags x)) => (LessThanU x)
+(LessEqual (InvertFlags x)) => (GreaterEqual x)
+(LessEqualU (InvertFlags x)) => (GreaterEqualU x)
+(GreaterEqual (InvertFlags x)) => (LessEqual x)
+(GreaterEqualU (InvertFlags x)) => (LessEqualU x)
+(LessThanF (InvertFlags x)) => (GreaterThanF x)
+(LessEqualF (InvertFlags x)) => (GreaterEqualF x)
+(GreaterThanF (InvertFlags x)) => (LessThanF x)
+(GreaterEqualF (InvertFlags x)) => (LessEqualF x)
+
+// Boolean-generating instructions always
+// zero upper bit of the register; no need to zero-extend
+(MOVBUreg x) && x.Type.IsBoolean() => (MOVDreg x)
+
+// absorb flag constants into conditional instructions
+(CSEL [cc] x _ flag) && ccARM64Eval(cc, flag) > 0 => x
+(CSEL [cc] _ y flag) && ccARM64Eval(cc, flag) < 0 => y
+(CSEL0 [cc] x flag) && ccARM64Eval(cc, flag) > 0 => x
+(CSEL0 [cc] _ flag) && ccARM64Eval(cc, flag) < 0 => (MOVDconst [0])
+
+// absorb flags back into boolean CSEL
+(CSEL [cc] x y (CMPWconst [0] boolval)) && cc == OpARM64NotEqual && flagArg(boolval) != nil =>
+ (CSEL [boolval.Op] x y flagArg(boolval))
+(CSEL [cc] x y (CMPWconst [0] boolval)) && cc == OpARM64Equal && flagArg(boolval) != nil =>
+ (CSEL [arm64Negate(boolval.Op)] x y flagArg(boolval))
+(CSEL0 [cc] x (CMPWconst [0] boolval)) && cc == OpARM64NotEqual && flagArg(boolval) != nil =>
+ (CSEL0 [boolval.Op] x flagArg(boolval))
+(CSEL0 [cc] x (CMPWconst [0] boolval)) && cc == OpARM64Equal && flagArg(boolval) != nil =>
+ (CSEL0 [arm64Negate(boolval.Op)] x flagArg(boolval))
+
+// absorb shifts into ops
+(NEG x:(SLLconst [c] y)) && clobberIfDead(x) => (NEGshiftLL [c] y)
+(NEG x:(SRLconst [c] y)) && clobberIfDead(x) => (NEGshiftRL [c] y)
+(NEG x:(SRAconst [c] y)) && clobberIfDead(x) => (NEGshiftRA [c] y)
+(MVN x:(SLLconst [c] y)) && clobberIfDead(x) => (MVNshiftLL [c] y)
+(MVN x:(SRLconst [c] y)) && clobberIfDead(x) => (MVNshiftRL [c] y)
+(MVN x:(SRAconst [c] y)) && clobberIfDead(x) => (MVNshiftRA [c] y)
+(ADD x0 x1:(SLLconst [c] y)) && clobberIfDead(x1) => (ADDshiftLL x0 y [c])
+(ADD x0 x1:(SRLconst [c] y)) && clobberIfDead(x1) => (ADDshiftRL x0 y [c])
+(ADD x0 x1:(SRAconst [c] y)) && clobberIfDead(x1) => (ADDshiftRA x0 y [c])
+(SUB x0 x1:(SLLconst [c] y)) && clobberIfDead(x1) => (SUBshiftLL x0 y [c])
+(SUB x0 x1:(SRLconst [c] y)) && clobberIfDead(x1) => (SUBshiftRL x0 y [c])
+(SUB x0 x1:(SRAconst [c] y)) && clobberIfDead(x1) => (SUBshiftRA x0 y [c])
+(AND x0 x1:(SLLconst [c] y)) && clobberIfDead(x1) => (ANDshiftLL x0 y [c])
+(AND x0 x1:(SRLconst [c] y)) && clobberIfDead(x1) => (ANDshiftRL x0 y [c])
+(AND x0 x1:(SRAconst [c] y)) && clobberIfDead(x1) => (ANDshiftRA x0 y [c])
+(OR x0 x1:(SLLconst [c] y)) && clobberIfDead(x1) => (ORshiftLL x0 y [c]) // useful for combined load
+(OR x0 x1:(SRLconst [c] y)) && clobberIfDead(x1) => (ORshiftRL x0 y [c])
+(OR x0 x1:(SRAconst [c] y)) && clobberIfDead(x1) => (ORshiftRA x0 y [c])
+(XOR x0 x1:(SLLconst [c] y)) && clobberIfDead(x1) => (XORshiftLL x0 y [c])
+(XOR x0 x1:(SRLconst [c] y)) && clobberIfDead(x1) => (XORshiftRL x0 y [c])
+(XOR x0 x1:(SRAconst [c] y)) && clobberIfDead(x1) => (XORshiftRA x0 y [c])
+(BIC x0 x1:(SLLconst [c] y)) && clobberIfDead(x1) => (BICshiftLL x0 y [c])
+(BIC x0 x1:(SRLconst [c] y)) && clobberIfDead(x1) => (BICshiftRL x0 y [c])
+(BIC x0 x1:(SRAconst [c] y)) && clobberIfDead(x1) => (BICshiftRA x0 y [c])
+(ORN x0 x1:(SLLconst [c] y)) && clobberIfDead(x1) => (ORNshiftLL x0 y [c])
+(ORN x0 x1:(SRLconst [c] y)) && clobberIfDead(x1) => (ORNshiftRL x0 y [c])
+(ORN x0 x1:(SRAconst [c] y)) && clobberIfDead(x1) => (ORNshiftRA x0 y [c])
+(EON x0 x1:(SLLconst [c] y)) && clobberIfDead(x1) => (EONshiftLL x0 y [c])
+(EON x0 x1:(SRLconst [c] y)) && clobberIfDead(x1) => (EONshiftRL x0 y [c])
+(EON x0 x1:(SRAconst [c] y)) && clobberIfDead(x1) => (EONshiftRA x0 y [c])
+(CMP x0 x1:(SLLconst [c] y)) && clobberIfDead(x1) => (CMPshiftLL x0 y [c])
+(CMP x0:(SLLconst [c] y) x1) && clobberIfDead(x0) => (InvertFlags (CMPshiftLL x1 y [c]))
+(CMP x0 x1:(SRLconst [c] y)) && clobberIfDead(x1) => (CMPshiftRL x0 y [c])
+(CMP x0:(SRLconst [c] y) x1) && clobberIfDead(x0) => (InvertFlags (CMPshiftRL x1 y [c]))
+(CMP x0 x1:(SRAconst [c] y)) && clobberIfDead(x1) => (CMPshiftRA x0 y [c])
+(CMP x0:(SRAconst [c] y) x1) && clobberIfDead(x0) => (InvertFlags (CMPshiftRA x1 y [c]))
+(CMN x0 x1:(SLLconst [c] y)) && clobberIfDead(x1) => (CMNshiftLL x0 y [c])
+(CMN x0 x1:(SRLconst [c] y)) && clobberIfDead(x1) => (CMNshiftRL x0 y [c])
+(CMN x0 x1:(SRAconst [c] y)) && clobberIfDead(x1) => (CMNshiftRA x0 y [c])
+(TST x0 x1:(SLLconst [c] y)) && clobberIfDead(x1) => (TSTshiftLL x0 y [c])
+(TST x0 x1:(SRLconst [c] y)) && clobberIfDead(x1) => (TSTshiftRL x0 y [c])
+(TST x0 x1:(SRAconst [c] y)) && clobberIfDead(x1) => (TSTshiftRA x0 y [c])
+
+// prefer *const ops to *shift ops
+(ADDshiftLL (MOVDconst [c]) x [d]) => (ADDconst [c] (SLLconst <x.Type> x [d]))
+(ADDshiftRL (MOVDconst [c]) x [d]) => (ADDconst [c] (SRLconst <x.Type> x [d]))
+(ADDshiftRA (MOVDconst [c]) x [d]) => (ADDconst [c] (SRAconst <x.Type> x [d]))
+(ANDshiftLL (MOVDconst [c]) x [d]) => (ANDconst [c] (SLLconst <x.Type> x [d]))
+(ANDshiftRL (MOVDconst [c]) x [d]) => (ANDconst [c] (SRLconst <x.Type> x [d]))
+(ANDshiftRA (MOVDconst [c]) x [d]) => (ANDconst [c] (SRAconst <x.Type> x [d]))
+(ORshiftLL (MOVDconst [c]) x [d]) => (ORconst [c] (SLLconst <x.Type> x [d]))
+(ORshiftRL (MOVDconst [c]) x [d]) => (ORconst [c] (SRLconst <x.Type> x [d]))
+(ORshiftRA (MOVDconst [c]) x [d]) => (ORconst [c] (SRAconst <x.Type> x [d]))
+(XORshiftLL (MOVDconst [c]) x [d]) => (XORconst [c] (SLLconst <x.Type> x [d]))
+(XORshiftRL (MOVDconst [c]) x [d]) => (XORconst [c] (SRLconst <x.Type> x [d]))
+(XORshiftRA (MOVDconst [c]) x [d]) => (XORconst [c] (SRAconst <x.Type> x [d]))
+(CMPshiftLL (MOVDconst [c]) x [d]) => (InvertFlags (CMPconst [c] (SLLconst <x.Type> x [d])))
+(CMPshiftRL (MOVDconst [c]) x [d]) => (InvertFlags (CMPconst [c] (SRLconst <x.Type> x [d])))
+(CMPshiftRA (MOVDconst [c]) x [d]) => (InvertFlags (CMPconst [c] (SRAconst <x.Type> x [d])))
+(CMNshiftLL (MOVDconst [c]) x [d]) => (CMNconst [c] (SLLconst <x.Type> x [d]))
+(CMNshiftRL (MOVDconst [c]) x [d]) => (CMNconst [c] (SRLconst <x.Type> x [d]))
+(CMNshiftRA (MOVDconst [c]) x [d]) => (CMNconst [c] (SRAconst <x.Type> x [d]))
+(TSTshiftLL (MOVDconst [c]) x [d]) => (TSTconst [c] (SLLconst <x.Type> x [d]))
+(TSTshiftRL (MOVDconst [c]) x [d]) => (TSTconst [c] (SRLconst <x.Type> x [d]))
+(TSTshiftRA (MOVDconst [c]) x [d]) => (TSTconst [c] (SRAconst <x.Type> x [d]))
+
+// constant folding in *shift ops
+(MVNshiftLL (MOVDconst [c]) [d]) => (MOVDconst [^int64(uint64(c)<<uint64(d))])
+(MVNshiftRL (MOVDconst [c]) [d]) => (MOVDconst [^int64(uint64(c)>>uint64(d))])
+(MVNshiftRA (MOVDconst [c]) [d]) => (MOVDconst [^(c>>uint64(d))])
+(NEGshiftLL (MOVDconst [c]) [d]) => (MOVDconst [-int64(uint64(c)<<uint64(d))])
+(NEGshiftRL (MOVDconst [c]) [d]) => (MOVDconst [-int64(uint64(c)>>uint64(d))])
+(NEGshiftRA (MOVDconst [c]) [d]) => (MOVDconst [-(c>>uint64(d))])
+(ADDshiftLL x (MOVDconst [c]) [d]) => (ADDconst x [int64(uint64(c)<<uint64(d))])
+(ADDshiftRL x (MOVDconst [c]) [d]) => (ADDconst x [int64(uint64(c)>>uint64(d))])
+(ADDshiftRA x (MOVDconst [c]) [d]) => (ADDconst x [c>>uint64(d)])
+(SUBshiftLL x (MOVDconst [c]) [d]) => (SUBconst x [int64(uint64(c)<<uint64(d))])
+(SUBshiftRL x (MOVDconst [c]) [d]) => (SUBconst x [int64(uint64(c)>>uint64(d))])
+(SUBshiftRA x (MOVDconst [c]) [d]) => (SUBconst x [c>>uint64(d)])
+(ANDshiftLL x (MOVDconst [c]) [d]) => (ANDconst x [int64(uint64(c)<<uint64(d))])
+(ANDshiftRL x (MOVDconst [c]) [d]) => (ANDconst x [int64(uint64(c)>>uint64(d))])
+(ANDshiftRA x (MOVDconst [c]) [d]) => (ANDconst x [c>>uint64(d)])
+(ORshiftLL x (MOVDconst [c]) [d]) => (ORconst x [int64(uint64(c)<<uint64(d))])
+(ORshiftRL x (MOVDconst [c]) [d]) => (ORconst x [int64(uint64(c)>>uint64(d))])
+(ORshiftRA x (MOVDconst [c]) [d]) => (ORconst x [c>>uint64(d)])
+(XORshiftLL x (MOVDconst [c]) [d]) => (XORconst x [int64(uint64(c)<<uint64(d))])
+(XORshiftRL x (MOVDconst [c]) [d]) => (XORconst x [int64(uint64(c)>>uint64(d))])
+(XORshiftRA x (MOVDconst [c]) [d]) => (XORconst x [c>>uint64(d)])
+(BICshiftLL x (MOVDconst [c]) [d]) => (ANDconst x [^int64(uint64(c)<<uint64(d))])
+(BICshiftRL x (MOVDconst [c]) [d]) => (ANDconst x [^int64(uint64(c)>>uint64(d))])
+(BICshiftRA x (MOVDconst [c]) [d]) => (ANDconst x [^(c>>uint64(d))])
+(ORNshiftLL x (MOVDconst [c]) [d]) => (ORconst x [^int64(uint64(c)<<uint64(d))])
+(ORNshiftRL x (MOVDconst [c]) [d]) => (ORconst x [^int64(uint64(c)>>uint64(d))])
+(ORNshiftRA x (MOVDconst [c]) [d]) => (ORconst x [^(c>>uint64(d))])
+(EONshiftLL x (MOVDconst [c]) [d]) => (XORconst x [^int64(uint64(c)<<uint64(d))])
+(EONshiftRL x (MOVDconst [c]) [d]) => (XORconst x [^int64(uint64(c)>>uint64(d))])
+(EONshiftRA x (MOVDconst [c]) [d]) => (XORconst x [^(c>>uint64(d))])
+(CMPshiftLL x (MOVDconst [c]) [d]) => (CMPconst x [int64(uint64(c)<<uint64(d))])
+(CMPshiftRL x (MOVDconst [c]) [d]) => (CMPconst x [int64(uint64(c)>>uint64(d))])
+(CMPshiftRA x (MOVDconst [c]) [d]) => (CMPconst x [c>>uint64(d)])
+(CMNshiftLL x (MOVDconst [c]) [d]) => (CMNconst x [int64(uint64(c)<<uint64(d))])
+(CMNshiftRL x (MOVDconst [c]) [d]) => (CMNconst x [int64(uint64(c)>>uint64(d))])
+(CMNshiftRA x (MOVDconst [c]) [d]) => (CMNconst x [c>>uint64(d)])
+(TSTshiftLL x (MOVDconst [c]) [d]) => (TSTconst x [int64(uint64(c)<<uint64(d))])
+(TSTshiftRL x (MOVDconst [c]) [d]) => (TSTconst x [int64(uint64(c)>>uint64(d))])
+(TSTshiftRA x (MOVDconst [c]) [d]) => (TSTconst x [c>>uint64(d)])
+
+// simplification with *shift ops
+(SUBshiftLL (SLLconst x [c]) x [c]) => (MOVDconst [0])
+(SUBshiftRL (SRLconst x [c]) x [c]) => (MOVDconst [0])
+(SUBshiftRA (SRAconst x [c]) x [c]) => (MOVDconst [0])
+(ANDshiftLL y:(SLLconst x [c]) x [c]) => y
+(ANDshiftRL y:(SRLconst x [c]) x [c]) => y
+(ANDshiftRA y:(SRAconst x [c]) x [c]) => y
+(ORshiftLL y:(SLLconst x [c]) x [c]) => y
+(ORshiftRL y:(SRLconst x [c]) x [c]) => y
+(ORshiftRA y:(SRAconst x [c]) x [c]) => y
+(XORshiftLL (SLLconst x [c]) x [c]) => (MOVDconst [0])
+(XORshiftRL (SRLconst x [c]) x [c]) => (MOVDconst [0])
+(XORshiftRA (SRAconst x [c]) x [c]) => (MOVDconst [0])
+(BICshiftLL (SLLconst x [c]) x [c]) => (MOVDconst [0])
+(BICshiftRL (SRLconst x [c]) x [c]) => (MOVDconst [0])
+(BICshiftRA (SRAconst x [c]) x [c]) => (MOVDconst [0])
+(EONshiftLL (SLLconst x [c]) x [c]) => (MOVDconst [-1])
+(EONshiftRL (SRLconst x [c]) x [c]) => (MOVDconst [-1])
+(EONshiftRA (SRAconst x [c]) x [c]) => (MOVDconst [-1])
+(ORNshiftLL (SLLconst x [c]) x [c]) => (MOVDconst [-1])
+(ORNshiftRL (SRLconst x [c]) x [c]) => (MOVDconst [-1])
+(ORNshiftRA (SRAconst x [c]) x [c]) => (MOVDconst [-1])
+
+// Generate rotates with const shift
+(ADDshiftLL [c] (SRLconst x [64-c]) x) => (RORconst [64-c] x)
+( ORshiftLL [c] (SRLconst x [64-c]) x) => (RORconst [64-c] x)
+(XORshiftLL [c] (SRLconst x [64-c]) x) => (RORconst [64-c] x)
+(ADDshiftRL [c] (SLLconst x [64-c]) x) => (RORconst [ c] x)
+( ORshiftRL [c] (SLLconst x [64-c]) x) => (RORconst [ c] x)
+(XORshiftRL [c] (SLLconst x [64-c]) x) => (RORconst [ c] x)
+
+(ADDshiftLL <t> [c] (UBFX [bfc] x) x) && c < 32 && t.Size() == 4 && bfc == armBFAuxInt(32-c, c)
+ => (RORWconst [32-c] x)
+( ORshiftLL <t> [c] (UBFX [bfc] x) x) && c < 32 && t.Size() == 4 && bfc == armBFAuxInt(32-c, c)
+ => (RORWconst [32-c] x)
+(XORshiftLL <t> [c] (UBFX [bfc] x) x) && c < 32 && t.Size() == 4 && bfc == armBFAuxInt(32-c, c)
+ => (RORWconst [32-c] x)
+(ADDshiftRL <t> [c] (SLLconst x [32-c]) (MOVWUreg x)) && c < 32 && t.Size() == 4 => (RORWconst [c] x)
+( ORshiftRL <t> [c] (SLLconst x [32-c]) (MOVWUreg x)) && c < 32 && t.Size() == 4 => (RORWconst [c] x)
+(XORshiftRL <t> [c] (SLLconst x [32-c]) (MOVWUreg x)) && c < 32 && t.Size() == 4 => (RORWconst [c] x)
+
+(RORconst [c] (RORconst [d] x)) => (RORconst [(c+d)&63] x)
+(RORWconst [c] (RORWconst [d] x)) => (RORWconst [(c+d)&31] x)
+
+// Generate rotates with non-const shift.
+// These rules match the Go source code like
+// y &= 63
+// x << y | x >> (64-y)
+// "|" can also be "^" or "+".
+// As arm64 does not have a ROL instruction, so ROL(x, y) is replaced by ROR(x, -y).
+((ADD|OR|XOR) (SLL x (ANDconst <t> [63] y))
+ (CSEL0 <typ.UInt64> [cc] (SRL <typ.UInt64> x (SUB <t> (MOVDconst [64]) (ANDconst <t> [63] y)))
+ (CMPconst [64] (SUB <t> (MOVDconst [64]) (ANDconst <t> [63] y))))) && cc == OpARM64LessThanU
+ => (ROR x (NEG <t> y))
+((ADD|OR|XOR) (SRL <typ.UInt64> x (ANDconst <t> [63] y))
+ (CSEL0 <typ.UInt64> [cc] (SLL x (SUB <t> (MOVDconst [64]) (ANDconst <t> [63] y)))
+ (CMPconst [64] (SUB <t> (MOVDconst [64]) (ANDconst <t> [63] y))))) && cc == OpARM64LessThanU
+ => (ROR x y)
+
+// These rules match the Go source code like
+// y &= 31
+// x << y | x >> (32-y)
+// "|" can also be "^" or "+".
+// As arm64 does not have a ROLW instruction, so ROLW(x, y) is replaced by RORW(x, -y).
+((ADD|OR|XOR) (SLL x (ANDconst <t> [31] y))
+ (CSEL0 <typ.UInt32> [cc] (SRL <typ.UInt32> (MOVWUreg x) (SUB <t> (MOVDconst [32]) (ANDconst <t> [31] y)))
+ (CMPconst [64] (SUB <t> (MOVDconst [32]) (ANDconst <t> [31] y))))) && cc == OpARM64LessThanU
+ => (RORW x (NEG <t> y))
+((ADD|OR|XOR) (SRL <typ.UInt32> (MOVWUreg x) (ANDconst <t> [31] y))
+ (CSEL0 <typ.UInt32> [cc] (SLL x (SUB <t> (MOVDconst [32]) (ANDconst <t> [31] y)))
+ (CMPconst [64] (SUB <t> (MOVDconst [32]) (ANDconst <t> [31] y))))) && cc == OpARM64LessThanU
+ => (RORW x y)
+
+// ((x>>8) | (x<<8)) => (REV16W x), the type of x is uint16, "|" can also be "^" or "+".
+((ADDshiftLL|ORshiftLL|XORshiftLL) <typ.UInt16> [8] (UBFX <typ.UInt16> [armBFAuxInt(8, 8)] x) x) => (REV16W x)
+
+// Extract from reg pair
+(ADDshiftLL [c] (SRLconst x [64-c]) x2) => (EXTRconst [64-c] x2 x)
+( ORshiftLL [c] (SRLconst x [64-c]) x2) => (EXTRconst [64-c] x2 x)
+(XORshiftLL [c] (SRLconst x [64-c]) x2) => (EXTRconst [64-c] x2 x)
+
+(ADDshiftLL <t> [c] (UBFX [bfc] x) x2) && c < 32 && t.Size() == 4 && bfc == armBFAuxInt(32-c, c)
+ => (EXTRWconst [32-c] x2 x)
+( ORshiftLL <t> [c] (UBFX [bfc] x) x2) && c < 32 && t.Size() == 4 && bfc == armBFAuxInt(32-c, c)
+ => (EXTRWconst [32-c] x2 x)
+(XORshiftLL <t> [c] (UBFX [bfc] x) x2) && c < 32 && t.Size() == 4 && bfc == armBFAuxInt(32-c, c)
+ => (EXTRWconst [32-c] x2 x)
+
+// Rewrite special pairs of shifts to AND.
+// On ARM64 the bitmask can fit into an instruction.
+(SRLconst [c] (SLLconst [c] x)) && 0 < c && c < 64 => (ANDconst [1<<uint(64-c)-1] x) // mask out high bits
+(SLLconst [c] (SRLconst [c] x)) && 0 < c && c < 64 => (ANDconst [^(1<<uint(c)-1)] x) // mask out low bits
+
+// Special case setting bit as 1. An example is math.Copysign(c,-1)
+(ORconst [c1] (ANDconst [c2] x)) && c2|c1 == ^0 => (ORconst [c1] x)
+
+// bitfield ops
+
+// sbfiz
+// (x << lc) >> rc
+(SRAconst [rc] (SLLconst [lc] x)) && lc > rc => (SBFIZ [armBFAuxInt(lc-rc, 64-lc)] x)
+(MOVWreg (SLLconst [lc] x)) && lc < 32 => (SBFIZ [armBFAuxInt(lc, 32-lc)] x)
+(MOVHreg (SLLconst [lc] x)) && lc < 16 => (SBFIZ [armBFAuxInt(lc, 16-lc)] x)
+(MOVBreg (SLLconst [lc] x)) && lc < 8 => (SBFIZ [armBFAuxInt(lc, 8-lc)] x)
+
+// sbfx
+// (x << lc) >> rc
+(SRAconst [rc] (SLLconst [lc] x)) && lc <= rc => (SBFX [armBFAuxInt(rc-lc, 64-rc)] x)
+(SRAconst [rc] (MOVWreg x)) && rc < 32 => (SBFX [armBFAuxInt(rc, 32-rc)] x)
+(SRAconst [rc] (MOVHreg x)) && rc < 16 => (SBFX [armBFAuxInt(rc, 16-rc)] x)
+(SRAconst [rc] (MOVBreg x)) && rc < 8 => (SBFX [armBFAuxInt(rc, 8-rc)] x)
+
+// sbfiz/sbfx combinations: merge shifts into bitfield ops
+(SRAconst [sc] (SBFIZ [bfc] x)) && sc < bfc.getARM64BFlsb()
+ => (SBFIZ [armBFAuxInt(bfc.getARM64BFlsb()-sc, bfc.getARM64BFwidth())] x)
+(SRAconst [sc] (SBFIZ [bfc] x)) && sc >= bfc.getARM64BFlsb()
+ && sc < bfc.getARM64BFlsb()+bfc.getARM64BFwidth()
+ => (SBFX [armBFAuxInt(sc-bfc.getARM64BFlsb(), bfc.getARM64BFlsb()+bfc.getARM64BFwidth()-sc)] x)
+
+// ubfiz
+// (x & ac) << sc
+(SLLconst [sc] (ANDconst [ac] x)) && isARM64BFMask(sc, ac, 0)
+ => (UBFIZ [armBFAuxInt(sc, arm64BFWidth(ac, 0))] x)
+(SLLconst [sc] (MOVWUreg x)) && isARM64BFMask(sc, 1<<32-1, 0) => (UBFIZ [armBFAuxInt(sc, 32)] x)
+(SLLconst [sc] (MOVHUreg x)) && isARM64BFMask(sc, 1<<16-1, 0) => (UBFIZ [armBFAuxInt(sc, 16)] x)
+(SLLconst [sc] (MOVBUreg x)) && isARM64BFMask(sc, 1<<8-1, 0) => (UBFIZ [armBFAuxInt(sc, 8)] x)
+// (x << sc) & ac
+(ANDconst [ac] (SLLconst [sc] x)) && isARM64BFMask(sc, ac, sc)
+ => (UBFIZ [armBFAuxInt(sc, arm64BFWidth(ac, sc))] x)
+(MOVWUreg (SLLconst [sc] x)) && isARM64BFMask(sc, 1<<32-1, sc)
+ => (UBFIZ [armBFAuxInt(sc, arm64BFWidth(1<<32-1, sc))] x)
+(MOVHUreg (SLLconst [sc] x)) && isARM64BFMask(sc, 1<<16-1, sc)
+ => (UBFIZ [armBFAuxInt(sc, arm64BFWidth(1<<16-1, sc))] x)
+(MOVBUreg (SLLconst [sc] x)) && isARM64BFMask(sc, 1<<8-1, sc)
+ => (UBFIZ [armBFAuxInt(sc, arm64BFWidth(1<<8-1, sc))] x)
+// (x << lc) >> rc
+(SRLconst [rc] (SLLconst [lc] x)) && lc > rc => (UBFIZ [armBFAuxInt(lc-rc, 64-lc)] x)
+
+// ubfx
+// (x >> sc) & ac
+(ANDconst [ac] (SRLconst [sc] x)) && isARM64BFMask(sc, ac, 0)
+ => (UBFX [armBFAuxInt(sc, arm64BFWidth(ac, 0))] x)
+(MOVWUreg (SRLconst [sc] x)) && isARM64BFMask(sc, 1<<32-1, 0) => (UBFX [armBFAuxInt(sc, 32)] x)
+(MOVHUreg (SRLconst [sc] x)) && isARM64BFMask(sc, 1<<16-1, 0) => (UBFX [armBFAuxInt(sc, 16)] x)
+(MOVBUreg (SRLconst [sc] x)) && isARM64BFMask(sc, 1<<8-1, 0) => (UBFX [armBFAuxInt(sc, 8)] x)
+// (x & ac) >> sc
+(SRLconst [sc] (ANDconst [ac] x)) && isARM64BFMask(sc, ac, sc)
+ => (UBFX [armBFAuxInt(sc, arm64BFWidth(ac, sc))] x)
+(SRLconst [sc] (MOVWUreg x)) && isARM64BFMask(sc, 1<<32-1, sc)
+ => (UBFX [armBFAuxInt(sc, arm64BFWidth(1<<32-1, sc))] x)
+(SRLconst [sc] (MOVHUreg x)) && isARM64BFMask(sc, 1<<16-1, sc)
+ => (UBFX [armBFAuxInt(sc, arm64BFWidth(1<<16-1, sc))] x)
+(SRLconst [sc] (MOVBUreg x)) && isARM64BFMask(sc, 1<<8-1, sc)
+ => (UBFX [armBFAuxInt(sc, arm64BFWidth(1<<8-1, sc))] x)
+// (x << lc) >> rc
+(SRLconst [rc] (SLLconst [lc] x)) && lc < rc => (UBFX [armBFAuxInt(rc-lc, 64-rc)] x)
+
+// ubfiz/ubfx combinations: merge shifts into bitfield ops
+(SRLconst [sc] (UBFX [bfc] x)) && sc < bfc.getARM64BFwidth()
+ => (UBFX [armBFAuxInt(bfc.getARM64BFlsb()+sc, bfc.getARM64BFwidth()-sc)] x)
+(UBFX [bfc] (SRLconst [sc] x)) && sc+bfc.getARM64BFwidth()+bfc.getARM64BFlsb() < 64
+ => (UBFX [armBFAuxInt(bfc.getARM64BFlsb()+sc, bfc.getARM64BFwidth())] x)
+(SLLconst [sc] (UBFIZ [bfc] x)) && sc+bfc.getARM64BFwidth()+bfc.getARM64BFlsb() < 64
+ => (UBFIZ [armBFAuxInt(bfc.getARM64BFlsb()+sc, bfc.getARM64BFwidth())] x)
+(UBFIZ [bfc] (SLLconst [sc] x)) && sc < bfc.getARM64BFwidth()
+ => (UBFIZ [armBFAuxInt(bfc.getARM64BFlsb()+sc, bfc.getARM64BFwidth()-sc)] x)
+// ((x << c1) >> c2) >> c3
+(SRLconst [sc] (UBFIZ [bfc] x)) && sc == bfc.getARM64BFlsb()
+ => (ANDconst [1<<uint(bfc.getARM64BFwidth())-1] x)
+(SRLconst [sc] (UBFIZ [bfc] x)) && sc < bfc.getARM64BFlsb()
+ => (UBFIZ [armBFAuxInt(bfc.getARM64BFlsb()-sc, bfc.getARM64BFwidth())] x)
+(SRLconst [sc] (UBFIZ [bfc] x)) && sc > bfc.getARM64BFlsb()
+ && sc < bfc.getARM64BFlsb()+bfc.getARM64BFwidth()
+ => (UBFX [armBFAuxInt(sc-bfc.getARM64BFlsb(), bfc.getARM64BFlsb()+bfc.getARM64BFwidth()-sc)] x)
+// ((x << c1) << c2) >> c3
+(UBFX [bfc] (SLLconst [sc] x)) && sc == bfc.getARM64BFlsb()
+ => (ANDconst [1<<uint(bfc.getARM64BFwidth())-1] x)
+(UBFX [bfc] (SLLconst [sc] x)) && sc < bfc.getARM64BFlsb()
+ => (UBFX [armBFAuxInt(bfc.getARM64BFlsb()-sc, bfc.getARM64BFwidth())] x)
+(UBFX [bfc] (SLLconst [sc] x)) && sc > bfc.getARM64BFlsb()
+ && sc < bfc.getARM64BFlsb()+bfc.getARM64BFwidth()
+ => (UBFIZ [armBFAuxInt(sc-bfc.getARM64BFlsb(), bfc.getARM64BFlsb()+bfc.getARM64BFwidth()-sc)] x)
+
+// bfi
+(OR (UBFIZ [bfc] x) (ANDconst [ac] y))
+ && ac == ^((1<<uint(bfc.getARM64BFwidth())-1) << uint(bfc.getARM64BFlsb()))
+ => (BFI [bfc] y x)
+(ORshiftRL [rc] (ANDconst [ac] x) (SLLconst [lc] y))
+ && lc > rc && ac == ^((1<<uint(64-lc)-1) << uint64(lc-rc))
+ => (BFI [armBFAuxInt(lc-rc, 64-lc)] x y)
+// bfxil
+(OR (UBFX [bfc] x) (ANDconst [ac] y)) && ac == ^(1<<uint(bfc.getARM64BFwidth())-1)
+ => (BFXIL [bfc] y x)
+(ORshiftLL [sc] (UBFX [bfc] x) (SRLconst [sc] y)) && sc == bfc.getARM64BFwidth()
+ => (BFXIL [bfc] y x)
+(ORshiftRL [rc] (ANDconst [ac] y) (SLLconst [lc] x)) && lc < rc && ac == ^((1<<uint(64-rc)-1))
+ => (BFXIL [armBFAuxInt(rc-lc, 64-rc)] y x)
+
+// do combined loads
+// little endian loads
+// b[0] | b[1]<<8 => load 16-bit
+(ORshiftLL <t> [8]
+ y0:(MOVDnop x0:(MOVBUload [i0] {s} p mem))
+ y1:(MOVDnop x1:(MOVBUload [i1] {s} p mem)))
+ && i1 == i0+1
+ && x0.Uses == 1 && x1.Uses == 1
+ && y0.Uses == 1 && y1.Uses == 1
+ && mergePoint(b,x0,x1) != nil
+ && clobber(x0, x1, y0, y1)
+ => @mergePoint(b,x0,x1) (MOVHUload <t> {s} (OffPtr <p.Type> [int64(i0)] p) mem)
+(ORshiftLL <t> [8]
+ y0:(MOVDnop x0:(MOVBUloadidx ptr0 idx0 mem))
+ y1:(MOVDnop x1:(MOVBUload [1] {s} p1:(ADD ptr1 idx1) mem)))
+ && s == nil
+ && x0.Uses == 1 && x1.Uses == 1
+ && y0.Uses == 1 && y1.Uses == 1
+ && mergePoint(b,x0,x1) != nil
+ && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1))
+ && clobber(x0, x1, y0, y1)
+ => @mergePoint(b,x0,x1) (MOVHUloadidx <t> ptr0 idx0 mem)
+(ORshiftLL <t> [8]
+ y0:(MOVDnop x0:(MOVBUloadidx ptr idx mem))
+ y1:(MOVDnop x1:(MOVBUloadidx ptr (ADDconst [1] idx) mem)))
+ && x0.Uses == 1 && x1.Uses == 1
+ && y0.Uses == 1 && y1.Uses == 1
+ && mergePoint(b,x0,x1) != nil
+ && clobber(x0, x1, y0, y1)
+ => @mergePoint(b,x0,x1) (MOVHUloadidx <t> ptr idx mem)
+
+// b[0] | b[1]<<8 | b[2]<<16 | b[3]<<24 => load 32-bit
+(ORshiftLL <t> [24] o0:(ORshiftLL [16]
+ x0:(MOVHUload [i0] {s} p mem)
+ y1:(MOVDnop x1:(MOVBUload [i2] {s} p mem)))
+ y2:(MOVDnop x2:(MOVBUload [i3] {s} p mem)))
+ && i2 == i0+2
+ && i3 == i0+3
+ && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1
+ && y1.Uses == 1 && y2.Uses == 1
+ && o0.Uses == 1
+ && mergePoint(b,x0,x1,x2) != nil
+ && clobber(x0, x1, x2, y1, y2, o0)
+ => @mergePoint(b,x0,x1,x2) (MOVWUload <t> {s} (OffPtr <p.Type> [int64(i0)] p) mem)
+(ORshiftLL <t> [24] o0:(ORshiftLL [16]
+ x0:(MOVHUloadidx ptr0 idx0 mem)
+ y1:(MOVDnop x1:(MOVBUload [2] {s} p1:(ADD ptr1 idx1) mem)))
+ y2:(MOVDnop x2:(MOVBUload [3] {s} p mem)))
+ && s == nil
+ && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1
+ && y1.Uses == 1 && y2.Uses == 1
+ && o0.Uses == 1
+ && mergePoint(b,x0,x1,x2) != nil
+ && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1))
+ && isSamePtr(p1, p)
+ && clobber(x0, x1, x2, y1, y2, o0)
+ => @mergePoint(b,x0,x1,x2) (MOVWUloadidx <t> ptr0 idx0 mem)
+(ORshiftLL <t> [24] o0:(ORshiftLL [16]
+ x0:(MOVHUloadidx ptr idx mem)
+ y1:(MOVDnop x1:(MOVBUloadidx ptr (ADDconst [2] idx) mem)))
+ y2:(MOVDnop x2:(MOVBUloadidx ptr (ADDconst [3] idx) mem)))
+ && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1
+ && y1.Uses == 1 && y2.Uses == 1
+ && o0.Uses == 1
+ && mergePoint(b,x0,x1,x2) != nil
+ && clobber(x0, x1, x2, y1, y2, o0)
+ => @mergePoint(b,x0,x1,x2) (MOVWUloadidx <t> ptr idx mem)
+(ORshiftLL <t> [24] o0:(ORshiftLL [16]
+ x0:(MOVHUloadidx2 ptr0 idx0 mem)
+ y1:(MOVDnop x1:(MOVBUload [2] {s} p1:(ADDshiftLL [1] ptr1 idx1) mem)))
+ y2:(MOVDnop x2:(MOVBUload [3] {s} p mem)))
+ && s == nil
+ && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1
+ && y1.Uses == 1 && y2.Uses == 1
+ && o0.Uses == 1
+ && mergePoint(b,x0,x1,x2) != nil
+ && isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1)
+ && isSamePtr(p1, p)
+ && clobber(x0, x1, x2, y1, y2, o0)
+ => @mergePoint(b,x0,x1,x2) (MOVWUloadidx <t> ptr0 (SLLconst <idx0.Type> [1] idx0) mem)
+
+// b[0] | b[1]<<8 | b[2]<<16 | b[3]<<24 | b[4]<<32 | b[5]<<40 | b[6]<<48 | b[7]<<56 => load 64-bit
+(ORshiftLL <t> [56] o0:(ORshiftLL [48] o1:(ORshiftLL [40] o2:(ORshiftLL [32]
+ x0:(MOVWUload [i0] {s} p mem)
+ y1:(MOVDnop x1:(MOVBUload [i4] {s} p mem)))
+ y2:(MOVDnop x2:(MOVBUload [i5] {s} p mem)))
+ y3:(MOVDnop x3:(MOVBUload [i6] {s} p mem)))
+ y4:(MOVDnop x4:(MOVBUload [i7] {s} p mem)))
+ && i4 == i0+4
+ && i5 == i0+5
+ && i6 == i0+6
+ && i7 == i0+7
+ && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1
+ && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && y4.Uses == 1
+ && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1
+ && mergePoint(b,x0,x1,x2,x3,x4) != nil
+ && clobber(x0, x1, x2, x3, x4, y1, y2, y3, y4, o0, o1, o2)
+ => @mergePoint(b,x0,x1,x2,x3,x4) (MOVDload <t> {s} (OffPtr <p.Type> [int64(i0)] p) mem)
+(ORshiftLL <t> [56] o0:(ORshiftLL [48] o1:(ORshiftLL [40] o2:(ORshiftLL [32]
+ x0:(MOVWUloadidx ptr0 idx0 mem)
+ y1:(MOVDnop x1:(MOVBUload [4] {s} p1:(ADD ptr1 idx1) mem)))
+ y2:(MOVDnop x2:(MOVBUload [5] {s} p mem)))
+ y3:(MOVDnop x3:(MOVBUload [6] {s} p mem)))
+ y4:(MOVDnop x4:(MOVBUload [7] {s} p mem)))
+ && s == nil
+ && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1
+ && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && y4.Uses == 1
+ && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1
+ && mergePoint(b,x0,x1,x2,x3,x4) != nil
+ && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1))
+ && isSamePtr(p1, p)
+ && clobber(x0, x1, x2, x3, x4, y1, y2, y3, y4, o0, o1, o2)
+ => @mergePoint(b,x0,x1,x2,x3,x4) (MOVDloadidx <t> ptr0 idx0 mem)
+(ORshiftLL <t> [56] o0:(ORshiftLL [48] o1:(ORshiftLL [40] o2:(ORshiftLL [32]
+ x0:(MOVWUloadidx4 ptr0 idx0 mem)
+ y1:(MOVDnop x1:(MOVBUload [4] {s} p1:(ADDshiftLL [2] ptr1 idx1) mem)))
+ y2:(MOVDnop x2:(MOVBUload [5] {s} p mem)))
+ y3:(MOVDnop x3:(MOVBUload [6] {s} p mem)))
+ y4:(MOVDnop x4:(MOVBUload [7] {s} p mem)))
+ && s == nil
+ && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1
+ && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && y4.Uses == 1
+ && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1
+ && mergePoint(b,x0,x1,x2,x3,x4) != nil
+ && isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1)
+ && isSamePtr(p1, p)
+ && clobber(x0, x1, x2, x3, x4, y1, y2, y3, y4, o0, o1, o2)
+ => @mergePoint(b,x0,x1,x2,x3,x4) (MOVDloadidx <t> ptr0 (SLLconst <idx0.Type> [2] idx0) mem)
+(ORshiftLL <t> [56] o0:(ORshiftLL [48] o1:(ORshiftLL [40] o2:(ORshiftLL [32]
+ x0:(MOVWUloadidx ptr idx mem)
+ y1:(MOVDnop x1:(MOVBUloadidx ptr (ADDconst [4] idx) mem)))
+ y2:(MOVDnop x2:(MOVBUloadidx ptr (ADDconst [5] idx) mem)))
+ y3:(MOVDnop x3:(MOVBUloadidx ptr (ADDconst [6] idx) mem)))
+ y4:(MOVDnop x4:(MOVBUloadidx ptr (ADDconst [7] idx) mem)))
+ && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1
+ && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && y4.Uses == 1
+ && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1
+ && mergePoint(b,x0,x1,x2,x3,x4) != nil
+ && clobber(x0, x1, x2, x3, x4, y1, y2, y3, y4, o0, o1, o2)
+ => @mergePoint(b,x0,x1,x2,x3,x4) (MOVDloadidx <t> ptr idx mem)
+
+// b[3]<<24 | b[2]<<16 | b[1]<<8 | b[0] => load 32-bit
+(OR <t> o0:(ORshiftLL [8] o1:(ORshiftLL [16] s0:(SLLconst [24]
+ y0:(MOVDnop x0:(MOVBUload [i3] {s} p mem)))
+ y1:(MOVDnop x1:(MOVBUload [i2] {s} p mem)))
+ y2:(MOVDnop x2:(MOVBUload [i1] {s} p mem)))
+ y3:(MOVDnop x3:(MOVBUload [i0] {s} p mem)))
+ && i1 == i0+1
+ && i2 == i0+2
+ && i3 == i0+3
+ && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1
+ && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1
+ && o0.Uses == 1 && o1.Uses == 1 && s0.Uses == 1
+ && mergePoint(b,x0,x1,x2,x3) != nil
+ && clobber(x0, x1, x2, x3, y0, y1, y2, y3, o0, o1, s0)
+ => @mergePoint(b,x0,x1,x2,x3) (MOVWUload <t> {s} (OffPtr <p.Type> [int64(i0)] p) mem)
+(OR <t> o0:(ORshiftLL [8] o1:(ORshiftLL [16] s0:(SLLconst [24]
+ y0:(MOVDnop x0:(MOVBUload [3] {s} p mem)))
+ y1:(MOVDnop x1:(MOVBUload [2] {s} p mem)))
+ y2:(MOVDnop x2:(MOVBUload [1] {s} p1:(ADD ptr1 idx1) mem)))
+ y3:(MOVDnop x3:(MOVBUloadidx ptr0 idx0 mem)))
+ && s == nil
+ && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1
+ && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1
+ && o0.Uses == 1 && o1.Uses == 1 && s0.Uses == 1
+ && mergePoint(b,x0,x1,x2,x3) != nil
+ && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1))
+ && isSamePtr(p1, p)
+ && clobber(x0, x1, x2, x3, y0, y1, y2, y3, o0, o1, s0)
+ => @mergePoint(b,x0,x1,x2,x3) (MOVWUloadidx <t> ptr0 idx0 mem)
+(OR <t> o0:(ORshiftLL [8] o1:(ORshiftLL [16] s0:(SLLconst [24]
+ y0:(MOVDnop x0:(MOVBUloadidx ptr (ADDconst [3] idx) mem)))
+ y1:(MOVDnop x1:(MOVBUloadidx ptr (ADDconst [2] idx) mem)))
+ y2:(MOVDnop x2:(MOVBUloadidx ptr (ADDconst [1] idx) mem)))
+ y3:(MOVDnop x3:(MOVBUloadidx ptr idx mem)))
+ && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1
+ && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1
+ && o0.Uses == 1 && o1.Uses == 1 && s0.Uses == 1
+ && mergePoint(b,x0,x1,x2,x3) != nil
+ && clobber(x0, x1, x2, x3, y0, y1, y2, y3, o0, o1, s0)
+ => @mergePoint(b,x0,x1,x2,x3) (MOVWUloadidx <t> ptr idx mem)
+
+// b[7]<<56 | b[6]<<48 | b[5]<<40 | b[4]<<32 | b[3]<<24 | b[2]<<16 | b[1]<<8 | b[0] => load 64-bit
+(OR <t> o0:(ORshiftLL [8] o1:(ORshiftLL [16] o2:(ORshiftLL [24] o3:(ORshiftLL [32] o4:(ORshiftLL [40] o5:(ORshiftLL [48] s0:(SLLconst [56]
+ y0:(MOVDnop x0:(MOVBUload [i7] {s} p mem)))
+ y1:(MOVDnop x1:(MOVBUload [i6] {s} p mem)))
+ y2:(MOVDnop x2:(MOVBUload [i5] {s} p mem)))
+ y3:(MOVDnop x3:(MOVBUload [i4] {s} p mem)))
+ y4:(MOVDnop x4:(MOVBUload [i3] {s} p mem)))
+ y5:(MOVDnop x5:(MOVBUload [i2] {s} p mem)))
+ y6:(MOVDnop x6:(MOVBUload [i1] {s} p mem)))
+ y7:(MOVDnop x7:(MOVBUload [i0] {s} p mem)))
+ && i1 == i0+1
+ && i2 == i0+2
+ && i3 == i0+3
+ && i4 == i0+4
+ && i5 == i0+5
+ && i6 == i0+6
+ && i7 == i0+7
+ && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1
+ && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1
+ && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1
+ && y4.Uses == 1 && y5.Uses == 1 && y6.Uses == 1 && y7.Uses == 1
+ && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1
+ && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1
+ && mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) != nil
+ && clobber(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, y6, y7, o0, o1, o2, o3, o4, o5, s0)
+ => @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (MOVDload <t> {s} (OffPtr <p.Type> [int64(i0)] p) mem)
+(OR <t> o0:(ORshiftLL [8] o1:(ORshiftLL [16] o2:(ORshiftLL [24] o3:(ORshiftLL [32] o4:(ORshiftLL [40] o5:(ORshiftLL [48] s0:(SLLconst [56]
+ y0:(MOVDnop x0:(MOVBUload [7] {s} p mem)))
+ y1:(MOVDnop x1:(MOVBUload [6] {s} p mem)))
+ y2:(MOVDnop x2:(MOVBUload [5] {s} p mem)))
+ y3:(MOVDnop x3:(MOVBUload [4] {s} p mem)))
+ y4:(MOVDnop x4:(MOVBUload [3] {s} p mem)))
+ y5:(MOVDnop x5:(MOVBUload [2] {s} p mem)))
+ y6:(MOVDnop x6:(MOVBUload [1] {s} p1:(ADD ptr1 idx1) mem)))
+ y7:(MOVDnop x7:(MOVBUloadidx ptr0 idx0 mem)))
+ && s == nil
+ && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1
+ && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1
+ && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1
+ && y4.Uses == 1 && y5.Uses == 1 && y6.Uses == 1 && y7.Uses == 1
+ && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1
+ && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1
+ && mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) != nil
+ && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1))
+ && isSamePtr(p1, p)
+ && clobber(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, y6, y7, o0, o1, o2, o3, o4, o5, s0)
+ => @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (MOVDloadidx <t> ptr0 idx0 mem)
+(OR <t> o0:(ORshiftLL [8] o1:(ORshiftLL [16] o2:(ORshiftLL [24] o3:(ORshiftLL [32] o4:(ORshiftLL [40] o5:(ORshiftLL [48] s0:(SLLconst [56]
+ y0:(MOVDnop x0:(MOVBUloadidx ptr (ADDconst [7] idx) mem)))
+ y1:(MOVDnop x1:(MOVBUloadidx ptr (ADDconst [6] idx) mem)))
+ y2:(MOVDnop x2:(MOVBUloadidx ptr (ADDconst [5] idx) mem)))
+ y3:(MOVDnop x3:(MOVBUloadidx ptr (ADDconst [4] idx) mem)))
+ y4:(MOVDnop x4:(MOVBUloadidx ptr (ADDconst [3] idx) mem)))
+ y5:(MOVDnop x5:(MOVBUloadidx ptr (ADDconst [2] idx) mem)))
+ y6:(MOVDnop x6:(MOVBUloadidx ptr (ADDconst [1] idx) mem)))
+ y7:(MOVDnop x7:(MOVBUloadidx ptr idx mem)))
+ && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1
+ && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1
+ && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1
+ && y4.Uses == 1 && y5.Uses == 1 && y6.Uses == 1 && y7.Uses == 1
+ && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1
+ && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1
+ && mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) != nil
+ && clobber(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, y6, y7, o0, o1, o2, o3, o4, o5, s0)
+ => @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (MOVDloadidx <t> ptr idx mem)
+
+// big endian loads
+// b[1] | b[0]<<8 => load 16-bit, reverse
+(ORshiftLL <t> [8]
+ y0:(MOVDnop x0:(MOVBUload [i1] {s} p mem))
+ y1:(MOVDnop x1:(MOVBUload [i0] {s} p mem)))
+ && i1 == i0+1
+ && x0.Uses == 1 && x1.Uses == 1
+ && y0.Uses == 1 && y1.Uses == 1
+ && mergePoint(b,x0,x1) != nil
+ && clobber(x0, x1, y0, y1)
+ => @mergePoint(b,x0,x1) (REV16W <t> (MOVHUload <t> [i0] {s} p mem))
+(ORshiftLL <t> [8]
+ y0:(MOVDnop x0:(MOVBUload [1] {s} p1:(ADD ptr1 idx1) mem))
+ y1:(MOVDnop x1:(MOVBUloadidx ptr0 idx0 mem)))
+ && s == nil
+ && x0.Uses == 1 && x1.Uses == 1
+ && y0.Uses == 1 && y1.Uses == 1
+ && mergePoint(b,x0,x1) != nil
+ && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1))
+ && clobber(x0, x1, y0, y1)
+ => @mergePoint(b,x0,x1) (REV16W <t> (MOVHUloadidx <t> ptr0 idx0 mem))
+(ORshiftLL <t> [8]
+ y0:(MOVDnop x0:(MOVBUloadidx ptr (ADDconst [1] idx) mem))
+ y1:(MOVDnop x1:(MOVBUloadidx ptr idx mem)))
+ && x0.Uses == 1 && x1.Uses == 1
+ && y0.Uses == 1 && y1.Uses == 1
+ && mergePoint(b,x0,x1) != nil
+ && clobber(x0, x1, y0, y1)
+ => @mergePoint(b,x0,x1) (REV16W <t> (MOVHUloadidx <t> ptr idx mem))
+
+// b[3] | b[2]<<8 | b[1]<<16 | b[0]<<24 => load 32-bit, reverse
+(ORshiftLL <t> [24] o0:(ORshiftLL [16]
+ y0:(REV16W x0:(MOVHUload [i2] {s} p mem))
+ y1:(MOVDnop x1:(MOVBUload [i1] {s} p mem)))
+ y2:(MOVDnop x2:(MOVBUload [i0] {s} p mem)))
+ && i1 == i0+1
+ && i2 == i0+2
+ && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1
+ && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1
+ && o0.Uses == 1
+ && mergePoint(b,x0,x1,x2) != nil
+ && clobber(x0, x1, x2, y0, y1, y2, o0)
+ => @mergePoint(b,x0,x1,x2) (REVW <t> (MOVWUload <t> {s} (OffPtr <p.Type> [int64(i0)] p) mem))
+(ORshiftLL <t> [24] o0:(ORshiftLL [16]
+ y0:(REV16W x0:(MOVHUload [2] {s} p mem))
+ y1:(MOVDnop x1:(MOVBUload [1] {s} p1:(ADD ptr1 idx1) mem)))
+ y2:(MOVDnop x2:(MOVBUloadidx ptr0 idx0 mem)))
+ && s == nil
+ && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1
+ && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1
+ && o0.Uses == 1
+ && mergePoint(b,x0,x1,x2) != nil
+ && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1))
+ && isSamePtr(p1, p)
+ && clobber(x0, x1, x2, y0, y1, y2, o0)
+ => @mergePoint(b,x0,x1,x2) (REVW <t> (MOVWUloadidx <t> ptr0 idx0 mem))
+(ORshiftLL <t> [24] o0:(ORshiftLL [16]
+ y0:(REV16W x0:(MOVHUloadidx ptr (ADDconst [2] idx) mem))
+ y1:(MOVDnop x1:(MOVBUloadidx ptr (ADDconst [1] idx) mem)))
+ y2:(MOVDnop x2:(MOVBUloadidx ptr idx mem)))
+ && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1
+ && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1
+ && o0.Uses == 1
+ && mergePoint(b,x0,x1,x2) != nil
+ && clobber(x0, x1, x2, y0, y1, y2, o0)
+ => @mergePoint(b,x0,x1,x2) (REVW <t> (MOVWUloadidx <t> ptr idx mem))
+
+// b[7] | b[6]<<8 | b[5]<<16 | b[4]<<24 | b[3]<<32 | b[2]<<40 | b[1]<<48 | b[0]<<56 => load 64-bit, reverse
+(ORshiftLL <t> [56] o0:(ORshiftLL [48] o1:(ORshiftLL [40] o2:(ORshiftLL [32]
+ y0:(REVW x0:(MOVWUload [i4] {s} p mem))
+ y1:(MOVDnop x1:(MOVBUload [i3] {s} p mem)))
+ y2:(MOVDnop x2:(MOVBUload [i2] {s} p mem)))
+ y3:(MOVDnop x3:(MOVBUload [i1] {s} p mem)))
+ y4:(MOVDnop x4:(MOVBUload [i0] {s} p mem)))
+ && i1 == i0+1
+ && i2 == i0+2
+ && i3 == i0+3
+ && i4 == i0+4
+ && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1
+ && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && y4.Uses == 1
+ && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1
+ && mergePoint(b,x0,x1,x2,x3,x4) != nil
+ && clobber(x0, x1, x2, x3, x4, y0, y1, y2, y3, y4, o0, o1, o2)
+ => @mergePoint(b,x0,x1,x2,x3,x4) (REV <t> (MOVDload <t> {s} (OffPtr <p.Type> [int64(i0)] p) mem))
+(ORshiftLL <t> [56] o0:(ORshiftLL [48] o1:(ORshiftLL [40] o2:(ORshiftLL [32]
+ y0:(REVW x0:(MOVWUload [4] {s} p mem))
+ y1:(MOVDnop x1:(MOVBUload [3] {s} p mem)))
+ y2:(MOVDnop x2:(MOVBUload [2] {s} p mem)))
+ y3:(MOVDnop x3:(MOVBUload [1] {s} p1:(ADD ptr1 idx1) mem)))
+ y4:(MOVDnop x4:(MOVBUloadidx ptr0 idx0 mem)))
+ && s == nil
+ && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1
+ && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && y4.Uses == 1
+ && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1
+ && mergePoint(b,x0,x1,x2,x3,x4) != nil
+ && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1))
+ && isSamePtr(p1, p)
+ && clobber(x0, x1, x2, x3, x4, y0, y1, y2, y3, y4, o0, o1, o2)
+ => @mergePoint(b,x0,x1,x2,x3,x4) (REV <t> (MOVDloadidx <t> ptr0 idx0 mem))
+(ORshiftLL <t> [56] o0:(ORshiftLL [48] o1:(ORshiftLL [40] o2:(ORshiftLL [32]
+ y0:(REVW x0:(MOVWUloadidx ptr (ADDconst [4] idx) mem))
+ y1:(MOVDnop x1:(MOVBUloadidx ptr (ADDconst [3] idx) mem)))
+ y2:(MOVDnop x2:(MOVBUloadidx ptr (ADDconst [2] idx) mem)))
+ y3:(MOVDnop x3:(MOVBUloadidx ptr (ADDconst [1] idx) mem)))
+ y4:(MOVDnop x4:(MOVBUloadidx ptr idx mem)))
+ && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1
+ && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && y4.Uses == 1
+ && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1
+ && mergePoint(b,x0,x1,x2,x3,x4) != nil
+ && clobber(x0, x1, x2, x3, x4, y0, y1, y2, y3, y4, o0, o1, o2)
+ => @mergePoint(b,x0,x1,x2,x3,x4) (REV <t> (MOVDloadidx <t> ptr idx mem))
+
+// b[0]<<24 | b[1]<<16 | b[2]<<8 | b[3] => load 32-bit, reverse
+(OR <t> o0:(ORshiftLL [8] o1:(ORshiftLL [16] s0:(SLLconst [24]
+ y0:(MOVDnop x0:(MOVBUload [i0] {s} p mem)))
+ y1:(MOVDnop x1:(MOVBUload [i1] {s} p mem)))
+ y2:(MOVDnop x2:(MOVBUload [i2] {s} p mem)))
+ y3:(MOVDnop x3:(MOVBUload [i3] {s} p mem)))
+ && i1 == i0+1
+ && i2 == i0+2
+ && i3 == i0+3
+ && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1
+ && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1
+ && o0.Uses == 1 && o1.Uses == 1 && s0.Uses == 1
+ && mergePoint(b,x0,x1,x2,x3) != nil
+ && clobber(x0, x1, x2, x3, y0, y1, y2, y3, o0, o1, s0)
+ => @mergePoint(b,x0,x1,x2,x3) (REVW <t> (MOVWUload <t> {s} (OffPtr <p.Type> [int64(i0)] p) mem))
+(OR <t> o0:(ORshiftLL [8] o1:(ORshiftLL [16] s0:(SLLconst [24]
+ y0:(MOVDnop x0:(MOVBUloadidx ptr0 idx0 mem)))
+ y1:(MOVDnop x1:(MOVBUload [1] {s} p1:(ADD ptr1 idx1) mem)))
+ y2:(MOVDnop x2:(MOVBUload [2] {s} p mem)))
+ y3:(MOVDnop x3:(MOVBUload [3] {s} p mem)))
+ && s == nil
+ && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1
+ && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1
+ && o0.Uses == 1 && o1.Uses == 1 && s0.Uses == 1
+ && mergePoint(b,x0,x1,x2,x3) != nil
+ && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1))
+ && isSamePtr(p1, p)
+ && clobber(x0, x1, x2, x3, y0, y1, y2, y3, o0, o1, s0)
+ => @mergePoint(b,x0,x1,x2,x3) (REVW <t> (MOVWUloadidx <t> ptr0 idx0 mem))
+(OR <t> o0:(ORshiftLL [8] o1:(ORshiftLL [16] s0:(SLLconst [24]
+ y0:(MOVDnop x0:(MOVBUloadidx ptr idx mem)))
+ y1:(MOVDnop x1:(MOVBUloadidx ptr (ADDconst [1] idx) mem)))
+ y2:(MOVDnop x2:(MOVBUloadidx ptr (ADDconst [2] idx) mem)))
+ y3:(MOVDnop x3:(MOVBUloadidx ptr (ADDconst [3] idx) mem)))
+ && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1
+ && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1
+ && o0.Uses == 1 && o1.Uses == 1 && s0.Uses == 1
+ && mergePoint(b,x0,x1,x2,x3) != nil
+ && clobber(x0, x1, x2, x3, y0, y1, y2, y3, o0, o1, s0)
+ => @mergePoint(b,x0,x1,x2,x3) (REVW <t> (MOVWUloadidx <t> ptr idx mem))
+
+// b[0]<<56 | b[1]<<48 | b[2]<<40 | b[3]<<32 | b[4]<<24 | b[5]<<16 | b[6]<<8 | b[7] => load 64-bit, reverse
+(OR <t> o0:(ORshiftLL [8] o1:(ORshiftLL [16] o2:(ORshiftLL [24] o3:(ORshiftLL [32] o4:(ORshiftLL [40] o5:(ORshiftLL [48] s0:(SLLconst [56]
+ y0:(MOVDnop x0:(MOVBUload [i0] {s} p mem)))
+ y1:(MOVDnop x1:(MOVBUload [i1] {s} p mem)))
+ y2:(MOVDnop x2:(MOVBUload [i2] {s} p mem)))
+ y3:(MOVDnop x3:(MOVBUload [i3] {s} p mem)))
+ y4:(MOVDnop x4:(MOVBUload [i4] {s} p mem)))
+ y5:(MOVDnop x5:(MOVBUload [i5] {s} p mem)))
+ y6:(MOVDnop x6:(MOVBUload [i6] {s} p mem)))
+ y7:(MOVDnop x7:(MOVBUload [i7] {s} p mem)))
+ && i1 == i0+1
+ && i2 == i0+2
+ && i3 == i0+3
+ && i4 == i0+4
+ && i5 == i0+5
+ && i6 == i0+6
+ && i7 == i0+7
+ && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1
+ && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1
+ && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1
+ && y4.Uses == 1 && y5.Uses == 1 && y6.Uses == 1 && y7.Uses == 1
+ && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1
+ && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1
+ && mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) != nil
+ && clobber(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, y6, y7, o0, o1, o2, o3, o4, o5, s0)
+ => @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (REV <t> (MOVDload <t> {s} (OffPtr <p.Type> [int64(i0)] p) mem))
+(OR <t> o0:(ORshiftLL [8] o1:(ORshiftLL [16] o2:(ORshiftLL [24] o3:(ORshiftLL [32] o4:(ORshiftLL [40] o5:(ORshiftLL [48] s0:(SLLconst [56]
+ y0:(MOVDnop x0:(MOVBUloadidx ptr0 idx0 mem)))
+ y1:(MOVDnop x1:(MOVBUload [1] {s} p1:(ADD ptr1 idx1) mem)))
+ y2:(MOVDnop x2:(MOVBUload [2] {s} p mem)))
+ y3:(MOVDnop x3:(MOVBUload [3] {s} p mem)))
+ y4:(MOVDnop x4:(MOVBUload [4] {s} p mem)))
+ y5:(MOVDnop x5:(MOVBUload [5] {s} p mem)))
+ y6:(MOVDnop x6:(MOVBUload [6] {s} p mem)))
+ y7:(MOVDnop x7:(MOVBUload [7] {s} p mem)))
+ && s == nil
+ && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1
+ && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1
+ && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1
+ && y4.Uses == 1 && y5.Uses == 1 && y6.Uses == 1 && y7.Uses == 1
+ && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1
+ && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1
+ && mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) != nil
+ && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1))
+ && isSamePtr(p1, p)
+ && clobber(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, y6, y7, o0, o1, o2, o3, o4, o5, s0)
+ => @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (REV <t> (MOVDloadidx <t> ptr0 idx0 mem))
+(OR <t> o0:(ORshiftLL [8] o1:(ORshiftLL [16] o2:(ORshiftLL [24] o3:(ORshiftLL [32] o4:(ORshiftLL [40] o5:(ORshiftLL [48] s0:(SLLconst [56]
+ y0:(MOVDnop x0:(MOVBUloadidx ptr idx mem)))
+ y1:(MOVDnop x1:(MOVBUloadidx ptr (ADDconst [1] idx) mem)))
+ y2:(MOVDnop x2:(MOVBUloadidx ptr (ADDconst [2] idx) mem)))
+ y3:(MOVDnop x3:(MOVBUloadidx ptr (ADDconst [3] idx) mem)))
+ y4:(MOVDnop x4:(MOVBUloadidx ptr (ADDconst [4] idx) mem)))
+ y5:(MOVDnop x5:(MOVBUloadidx ptr (ADDconst [5] idx) mem)))
+ y6:(MOVDnop x6:(MOVBUloadidx ptr (ADDconst [6] idx) mem)))
+ y7:(MOVDnop x7:(MOVBUloadidx ptr (ADDconst [7] idx) mem)))
+ && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1
+ && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1
+ && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1
+ && y4.Uses == 1 && y5.Uses == 1 && y6.Uses == 1 && y7.Uses == 1
+ && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1
+ && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1
+ && mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) != nil
+ && clobber(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, y6, y7, o0, o1, o2, o3, o4, o5, s0)
+ => @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (REV <t> (MOVDloadidx <t> ptr idx mem))
+
+// Combine zero stores into larger (unaligned) stores.
+(MOVBstorezero [i] {s} ptr0 x:(MOVBstorezero [j] {s} ptr1 mem))
+ && x.Uses == 1
+ && areAdjacentOffsets(int64(i),int64(j),1)
+ && isSamePtr(ptr0, ptr1)
+ && clobber(x)
+ => (MOVHstorezero [int32(min(int64(i),int64(j)))] {s} ptr0 mem)
+(MOVBstorezero [1] {s} (ADD ptr0 idx0) x:(MOVBstorezeroidx ptr1 idx1 mem))
+ && x.Uses == 1
+ && s == nil
+ && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1))
+ && clobber(x)
+ => (MOVHstorezeroidx ptr1 idx1 mem)
+(MOVBstorezeroidx ptr (ADDconst [1] idx) x:(MOVBstorezeroidx ptr idx mem))
+ && x.Uses == 1
+ && clobber(x)
+ => (MOVHstorezeroidx ptr idx mem)
+(MOVHstorezero [i] {s} ptr0 x:(MOVHstorezero [j] {s} ptr1 mem))
+ && x.Uses == 1
+ && areAdjacentOffsets(int64(i),int64(j),2)
+ && isSamePtr(ptr0, ptr1)
+ && clobber(x)
+ => (MOVWstorezero [int32(min(int64(i),int64(j)))] {s} ptr0 mem)
+(MOVHstorezero [2] {s} (ADD ptr0 idx0) x:(MOVHstorezeroidx ptr1 idx1 mem))
+ && x.Uses == 1
+ && s == nil
+ && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1))
+ && clobber(x)
+ => (MOVWstorezeroidx ptr1 idx1 mem)
+(MOVHstorezeroidx ptr (ADDconst [2] idx) x:(MOVHstorezeroidx ptr idx mem))
+ && x.Uses == 1
+ && clobber(x)
+ => (MOVWstorezeroidx ptr idx mem)
+(MOVHstorezero [2] {s} (ADDshiftLL [1] ptr0 idx0) x:(MOVHstorezeroidx2 ptr1 idx1 mem))
+ && x.Uses == 1
+ && s == nil
+ && isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1)
+ && clobber(x)
+ => (MOVWstorezeroidx ptr1 (SLLconst <idx1.Type> [1] idx1) mem)
+(MOVWstorezero [i] {s} ptr0 x:(MOVWstorezero [j] {s} ptr1 mem))
+ && x.Uses == 1
+ && areAdjacentOffsets(int64(i),int64(j),4)
+ && isSamePtr(ptr0, ptr1)
+ && clobber(x)
+ => (MOVDstorezero [int32(min(int64(i),int64(j)))] {s} ptr0 mem)
+(MOVWstorezero [4] {s} (ADD ptr0 idx0) x:(MOVWstorezeroidx ptr1 idx1 mem))
+ && x.Uses == 1
+ && s == nil
+ && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1))
+ && clobber(x)
+ => (MOVDstorezeroidx ptr1 idx1 mem)
+(MOVWstorezeroidx ptr (ADDconst [4] idx) x:(MOVWstorezeroidx ptr idx mem))
+ && x.Uses == 1
+ && clobber(x)
+ => (MOVDstorezeroidx ptr idx mem)
+(MOVWstorezero [4] {s} (ADDshiftLL [2] ptr0 idx0) x:(MOVWstorezeroidx4 ptr1 idx1 mem))
+ && x.Uses == 1
+ && s == nil
+ && isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1)
+ && clobber(x)
+ => (MOVDstorezeroidx ptr1 (SLLconst <idx1.Type> [2] idx1) mem)
+(MOVDstorezero [i] {s} ptr0 x:(MOVDstorezero [j] {s} ptr1 mem))
+ && x.Uses == 1
+ && areAdjacentOffsets(int64(i),int64(j),8)
+ && isSamePtr(ptr0, ptr1)
+ && clobber(x)
+ => (MOVQstorezero [int32(min(int64(i),int64(j)))] {s} ptr0 mem)
+(MOVDstorezero [8] {s} p0:(ADD ptr0 idx0) x:(MOVDstorezeroidx ptr1 idx1 mem))
+ && x.Uses == 1
+ && s == nil
+ && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1))
+ && clobber(x)
+ => (MOVQstorezero [0] {s} p0 mem)
+(MOVDstorezero [8] {s} p0:(ADDshiftLL [3] ptr0 idx0) x:(MOVDstorezeroidx8 ptr1 idx1 mem))
+ && x.Uses == 1
+ && s == nil
+ && isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1)
+ && clobber(x)
+ => (MOVQstorezero [0] {s} p0 mem)
+
+// Combine stores into larger (unaligned) stores.
+(MOVBstore [i] {s} ptr0 (SRLconst [8] w) x:(MOVBstore [i-1] {s} ptr1 w mem))
+ && x.Uses == 1
+ && isSamePtr(ptr0, ptr1)
+ && clobber(x)
+ => (MOVHstore [i-1] {s} ptr0 w mem)
+(MOVBstore [1] {s} (ADD ptr0 idx0) (SRLconst [8] w) x:(MOVBstoreidx ptr1 idx1 w mem))
+ && x.Uses == 1
+ && s == nil
+ && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1))
+ && clobber(x)
+ => (MOVHstoreidx ptr1 idx1 w mem)
+(MOVBstoreidx ptr (ADDconst [1] idx) (SRLconst [8] w) x:(MOVBstoreidx ptr idx w mem))
+ && x.Uses == 1
+ && clobber(x)
+ => (MOVHstoreidx ptr idx w mem)
+(MOVBstore [i] {s} ptr0 (UBFX [armBFAuxInt(8, 8)] w) x:(MOVBstore [i-1] {s} ptr1 w mem))
+ && x.Uses == 1
+ && isSamePtr(ptr0, ptr1)
+ && clobber(x)
+ => (MOVHstore [i-1] {s} ptr0 w mem)
+(MOVBstore [1] {s} (ADD ptr0 idx0) (UBFX [armBFAuxInt(8, 8)] w) x:(MOVBstoreidx ptr1 idx1 w mem))
+ && x.Uses == 1
+ && s == nil
+ && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1))
+ && clobber(x)
+ => (MOVHstoreidx ptr1 idx1 w mem)
+(MOVBstore [i] {s} ptr0 (UBFX [armBFAuxInt(8, 24)] w) x:(MOVBstore [i-1] {s} ptr1 w mem))
+ && x.Uses == 1
+ && isSamePtr(ptr0, ptr1)
+ && clobber(x)
+ => (MOVHstore [i-1] {s} ptr0 w mem)
+(MOVBstore [1] {s} (ADD ptr0 idx0) (UBFX [armBFAuxInt(8, 24)] w) x:(MOVBstoreidx ptr1 idx1 w mem))
+ && x.Uses == 1
+ && s == nil
+ && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1))
+ && clobber(x)
+ => (MOVHstoreidx ptr1 idx1 w mem)
+(MOVBstore [i] {s} ptr0 (SRLconst [8] (MOVDreg w)) x:(MOVBstore [i-1] {s} ptr1 w mem))
+ && x.Uses == 1
+ && isSamePtr(ptr0, ptr1)
+ && clobber(x)
+ => (MOVHstore [i-1] {s} ptr0 w mem)
+(MOVBstore [1] {s} (ADD ptr0 idx0) (SRLconst [8] (MOVDreg w)) x:(MOVBstoreidx ptr1 idx1 w mem))
+ && x.Uses == 1
+ && s == nil
+ && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1))
+ && clobber(x)
+ => (MOVHstoreidx ptr1 idx1 w mem)
+(MOVBstore [i] {s} ptr0 (SRLconst [j] w) x:(MOVBstore [i-1] {s} ptr1 w0:(SRLconst [j-8] w) mem))
+ && x.Uses == 1
+ && isSamePtr(ptr0, ptr1)
+ && clobber(x)
+ => (MOVHstore [i-1] {s} ptr0 w0 mem)
+(MOVBstore [1] {s} (ADD ptr0 idx0) (SRLconst [j] w) x:(MOVBstoreidx ptr1 idx1 w0:(SRLconst [j-8] w) mem))
+ && x.Uses == 1
+ && s == nil
+ && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1))
+ && clobber(x)
+ => (MOVHstoreidx ptr1 idx1 w0 mem)
+(MOVBstore [i] {s} ptr0 (UBFX [bfc] w) x:(MOVBstore [i-1] {s} ptr1 w0:(UBFX [bfc2] w) mem))
+ && x.Uses == 1
+ && isSamePtr(ptr0, ptr1)
+ && bfc.getARM64BFwidth() == 32 - bfc.getARM64BFlsb()
+ && bfc2.getARM64BFwidth() == 32 - bfc2.getARM64BFlsb()
+ && bfc2.getARM64BFlsb() == bfc.getARM64BFlsb() - 8
+ && clobber(x)
+ => (MOVHstore [i-1] {s} ptr0 w0 mem)
+(MOVBstore [1] {s} (ADD ptr0 idx0) (UBFX [bfc] w) x:(MOVBstoreidx ptr1 idx1 w0:(UBFX [bfc2] w) mem))
+ && x.Uses == 1
+ && s == nil
+ && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1))
+ && bfc.getARM64BFwidth() == 32 - bfc.getARM64BFlsb()
+ && bfc2.getARM64BFwidth() == 32 - bfc2.getARM64BFlsb()
+ && bfc2.getARM64BFlsb() == bfc.getARM64BFlsb() - 8
+ && clobber(x)
+ => (MOVHstoreidx ptr1 idx1 w0 mem)
+(MOVBstore [i] {s} ptr0 (SRLconst [j] (MOVDreg w)) x:(MOVBstore [i-1] {s} ptr1 w0:(SRLconst [j-8] (MOVDreg w)) mem))
+ && x.Uses == 1
+ && isSamePtr(ptr0, ptr1)
+ && clobber(x)
+ => (MOVHstore [i-1] {s} ptr0 w0 mem)
+(MOVBstore [1] {s} (ADD ptr0 idx0) (SRLconst [j] (MOVDreg w)) x:(MOVBstoreidx ptr1 idx1 w0:(SRLconst [j-8] (MOVDreg w)) mem))
+ && x.Uses == 1
+ && s == nil
+ && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1))
+ && clobber(x)
+ => (MOVHstoreidx ptr1 idx1 w0 mem)
+(MOVHstore [i] {s} ptr0 (SRLconst [16] w) x:(MOVHstore [i-2] {s} ptr1 w mem))
+ && x.Uses == 1
+ && isSamePtr(ptr0, ptr1)
+ && clobber(x)
+ => (MOVWstore [i-2] {s} ptr0 w mem)
+(MOVHstore [2] {s} (ADD ptr0 idx0) (SRLconst [16] w) x:(MOVHstoreidx ptr1 idx1 w mem))
+ && x.Uses == 1
+ && s == nil
+ && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1))
+ && clobber(x)
+ => (MOVWstoreidx ptr1 idx1 w mem)
+(MOVHstoreidx ptr (ADDconst [2] idx) (SRLconst [16] w) x:(MOVHstoreidx ptr idx w mem))
+ && x.Uses == 1
+ && clobber(x)
+ => (MOVWstoreidx ptr idx w mem)
+(MOVHstore [2] {s} (ADDshiftLL [1] ptr0 idx0) (SRLconst [16] w) x:(MOVHstoreidx2 ptr1 idx1 w mem))
+ && x.Uses == 1
+ && s == nil
+ && isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1)
+ && clobber(x)
+ => (MOVWstoreidx ptr1 (SLLconst <idx1.Type> [1] idx1) w mem)
+(MOVHstore [i] {s} ptr0 (UBFX [armBFAuxInt(16, 16)] w) x:(MOVHstore [i-2] {s} ptr1 w mem))
+ && x.Uses == 1
+ && isSamePtr(ptr0, ptr1)
+ && clobber(x)
+ => (MOVWstore [i-2] {s} ptr0 w mem)
+(MOVHstore [2] {s} (ADD ptr0 idx0) (UBFX [armBFAuxInt(16, 16)] w) x:(MOVHstoreidx ptr1 idx1 w mem))
+ && x.Uses == 1
+ && s == nil
+ && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1))
+ && clobber(x)
+ => (MOVWstoreidx ptr1 idx1 w mem)
+(MOVHstore [2] {s} (ADDshiftLL [1] ptr0 idx0) (UBFX [armBFAuxInt(16, 16)] w) x:(MOVHstoreidx2 ptr1 idx1 w mem))
+ && x.Uses == 1
+ && s == nil
+ && isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1)
+ && clobber(x)
+ => (MOVWstoreidx ptr1 (SLLconst <idx1.Type> [1] idx1) w mem)
+(MOVHstore [i] {s} ptr0 (SRLconst [16] (MOVDreg w)) x:(MOVHstore [i-2] {s} ptr1 w mem))
+ && x.Uses == 1
+ && isSamePtr(ptr0, ptr1)
+ && clobber(x)
+ => (MOVWstore [i-2] {s} ptr0 w mem)
+(MOVHstore [2] {s} (ADD ptr0 idx0) (SRLconst [16] (MOVDreg w)) x:(MOVHstoreidx ptr1 idx1 w mem))
+ && x.Uses == 1
+ && s == nil
+ && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1))
+ && clobber(x)
+ => (MOVWstoreidx ptr1 idx1 w mem)
+(MOVHstore [2] {s} (ADDshiftLL [1] ptr0 idx0) (SRLconst [16] (MOVDreg w)) x:(MOVHstoreidx2 ptr1 idx1 w mem))
+ && x.Uses == 1
+ && s == nil
+ && isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1)
+ && clobber(x)
+ => (MOVWstoreidx ptr1 (SLLconst <idx1.Type> [1] idx1) w mem)
+(MOVHstore [i] {s} ptr0 (SRLconst [j] w) x:(MOVHstore [i-2] {s} ptr1 w0:(SRLconst [j-16] w) mem))
+ && x.Uses == 1
+ && isSamePtr(ptr0, ptr1)
+ && clobber(x)
+ => (MOVWstore [i-2] {s} ptr0 w0 mem)
+(MOVHstore [2] {s} (ADD ptr0 idx0) (SRLconst [j] w) x:(MOVHstoreidx ptr1 idx1 w0:(SRLconst [j-16] w) mem))
+ && x.Uses == 1
+ && s == nil
+ && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1))
+ && clobber(x)
+ => (MOVWstoreidx ptr1 idx1 w0 mem)
+(MOVHstore [2] {s} (ADDshiftLL [1] ptr0 idx0) (SRLconst [j] w) x:(MOVHstoreidx2 ptr1 idx1 w0:(SRLconst [j-16] w) mem))
+ && x.Uses == 1
+ && s == nil
+ && isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1)
+ && clobber(x)
+ => (MOVWstoreidx ptr1 (SLLconst <idx1.Type> [1] idx1) w0 mem)
+(MOVWstore [i] {s} ptr0 (SRLconst [32] w) x:(MOVWstore [i-4] {s} ptr1 w mem))
+ && x.Uses == 1
+ && isSamePtr(ptr0, ptr1)
+ && clobber(x)
+ => (MOVDstore [i-4] {s} ptr0 w mem)
+(MOVWstore [4] {s} (ADD ptr0 idx0) (SRLconst [32] w) x:(MOVWstoreidx ptr1 idx1 w mem))
+ && x.Uses == 1
+ && s == nil
+ && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1))
+ && clobber(x)
+ => (MOVDstoreidx ptr1 idx1 w mem)
+(MOVWstoreidx ptr (ADDconst [4] idx) (SRLconst [32] w) x:(MOVWstoreidx ptr idx w mem))
+ && x.Uses == 1
+ && clobber(x)
+ => (MOVDstoreidx ptr idx w mem)
+(MOVWstore [4] {s} (ADDshiftLL [2] ptr0 idx0) (SRLconst [32] w) x:(MOVWstoreidx4 ptr1 idx1 w mem))
+ && x.Uses == 1
+ && s == nil
+ && isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1)
+ && clobber(x)
+ => (MOVDstoreidx ptr1 (SLLconst <idx1.Type> [2] idx1) w mem)
+(MOVWstore [i] {s} ptr0 (SRLconst [j] w) x:(MOVWstore [i-4] {s} ptr1 w0:(SRLconst [j-32] w) mem))
+ && x.Uses == 1
+ && isSamePtr(ptr0, ptr1)
+ && clobber(x)
+ => (MOVDstore [i-4] {s} ptr0 w0 mem)
+(MOVWstore [4] {s} (ADD ptr0 idx0) (SRLconst [j] w) x:(MOVWstoreidx ptr1 idx1 w0:(SRLconst [j-32] w) mem))
+ && x.Uses == 1
+ && s == nil
+ && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1))
+ && clobber(x)
+ => (MOVDstoreidx ptr1 idx1 w0 mem)
+(MOVWstore [4] {s} (ADDshiftLL [2] ptr0 idx0) (SRLconst [j] w) x:(MOVWstoreidx4 ptr1 idx1 w0:(SRLconst [j-32] w) mem))
+ && x.Uses == 1
+ && s == nil
+ && isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1)
+ && clobber(x)
+ => (MOVDstoreidx ptr1 (SLLconst <idx1.Type> [2] idx1) w0 mem)
+(MOVBstore [i] {s} ptr w
+ x0:(MOVBstore [i-1] {s} ptr (SRLconst [8] w)
+ x1:(MOVBstore [i-2] {s} ptr (SRLconst [16] w)
+ x2:(MOVBstore [i-3] {s} ptr (SRLconst [24] w)
+ x3:(MOVBstore [i-4] {s} ptr (SRLconst [32] w)
+ x4:(MOVBstore [i-5] {s} ptr (SRLconst [40] w)
+ x5:(MOVBstore [i-6] {s} ptr (SRLconst [48] w)
+ x6:(MOVBstore [i-7] {s} ptr (SRLconst [56] w) mem))))))))
+ && x0.Uses == 1
+ && x1.Uses == 1
+ && x2.Uses == 1
+ && x3.Uses == 1
+ && x4.Uses == 1
+ && x5.Uses == 1
+ && x6.Uses == 1
+ && clobber(x0, x1, x2, x3, x4, x5, x6)
+ => (MOVDstore [i-7] {s} ptr (REV <w.Type> w) mem)
+(MOVBstore [7] {s} p w
+ x0:(MOVBstore [6] {s} p (SRLconst [8] w)
+ x1:(MOVBstore [5] {s} p (SRLconst [16] w)
+ x2:(MOVBstore [4] {s} p (SRLconst [24] w)
+ x3:(MOVBstore [3] {s} p (SRLconst [32] w)
+ x4:(MOVBstore [2] {s} p (SRLconst [40] w)
+ x5:(MOVBstore [1] {s} p1:(ADD ptr1 idx1) (SRLconst [48] w)
+ x6:(MOVBstoreidx ptr0 idx0 (SRLconst [56] w) mem))))))))
+ && x0.Uses == 1
+ && x1.Uses == 1
+ && x2.Uses == 1
+ && x3.Uses == 1
+ && x4.Uses == 1
+ && x5.Uses == 1
+ && x6.Uses == 1
+ && s == nil
+ && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1))
+ && isSamePtr(p1, p)
+ && clobber(x0, x1, x2, x3, x4, x5, x6)
+ => (MOVDstoreidx ptr0 idx0 (REV <w.Type> w) mem)
+(MOVBstore [i] {s} ptr w
+ x0:(MOVBstore [i-1] {s} ptr (UBFX [armBFAuxInt(8, 24)] w)
+ x1:(MOVBstore [i-2] {s} ptr (UBFX [armBFAuxInt(16, 16)] w)
+ x2:(MOVBstore [i-3] {s} ptr (UBFX [armBFAuxInt(24, 8)] w) mem))))
+ && x0.Uses == 1
+ && x1.Uses == 1
+ && x2.Uses == 1
+ && clobber(x0, x1, x2)
+ => (MOVWstore [i-3] {s} ptr (REVW <w.Type> w) mem)
+(MOVBstore [3] {s} p w
+ x0:(MOVBstore [2] {s} p (UBFX [armBFAuxInt(8, 24)] w)
+ x1:(MOVBstore [1] {s} p1:(ADD ptr1 idx1) (UBFX [armBFAuxInt(16, 16)] w)
+ x2:(MOVBstoreidx ptr0 idx0 (UBFX [armBFAuxInt(24, 8)] w) mem))))
+ && x0.Uses == 1
+ && x1.Uses == 1
+ && x2.Uses == 1
+ && s == nil
+ && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1))
+ && isSamePtr(p1, p)
+ && clobber(x0, x1, x2)
+ => (MOVWstoreidx ptr0 idx0 (REVW <w.Type> w) mem)
+(MOVBstoreidx ptr (ADDconst [3] idx) w
+ x0:(MOVBstoreidx ptr (ADDconst [2] idx) (UBFX [armBFAuxInt(8, 24)] w)
+ x1:(MOVBstoreidx ptr (ADDconst [1] idx) (UBFX [armBFAuxInt(16, 16)] w)
+ x2:(MOVBstoreidx ptr idx (UBFX [armBFAuxInt(24, 8)] w) mem))))
+ && x0.Uses == 1
+ && x1.Uses == 1
+ && x2.Uses == 1
+ && clobber(x0, x1, x2)
+ => (MOVWstoreidx ptr idx (REVW <w.Type> w) mem)
+(MOVBstoreidx ptr idx w
+ x0:(MOVBstoreidx ptr (ADDconst [1] idx) (UBFX [armBFAuxInt(8, 24)] w)
+ x1:(MOVBstoreidx ptr (ADDconst [2] idx) (UBFX [armBFAuxInt(16, 16)] w)
+ x2:(MOVBstoreidx ptr (ADDconst [3] idx) (UBFX [armBFAuxInt(24, 8)] w) mem))))
+ && x0.Uses == 1
+ && x1.Uses == 1
+ && x2.Uses == 1
+ && clobber(x0, x1, x2)
+ => (MOVWstoreidx ptr idx w mem)
+(MOVBstore [i] {s} ptr w
+ x0:(MOVBstore [i-1] {s} ptr (SRLconst [8] (MOVDreg w))
+ x1:(MOVBstore [i-2] {s} ptr (SRLconst [16] (MOVDreg w))
+ x2:(MOVBstore [i-3] {s} ptr (SRLconst [24] (MOVDreg w)) mem))))
+ && x0.Uses == 1
+ && x1.Uses == 1
+ && x2.Uses == 1
+ && clobber(x0, x1, x2)
+ => (MOVWstore [i-3] {s} ptr (REVW <w.Type> w) mem)
+(MOVBstore [3] {s} p w
+ x0:(MOVBstore [2] {s} p (SRLconst [8] (MOVDreg w))
+ x1:(MOVBstore [1] {s} p1:(ADD ptr1 idx1) (SRLconst [16] (MOVDreg w))
+ x2:(MOVBstoreidx ptr0 idx0 (SRLconst [24] (MOVDreg w)) mem))))
+ && x0.Uses == 1
+ && x1.Uses == 1
+ && x2.Uses == 1
+ && s == nil
+ && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1))
+ && isSamePtr(p1, p)
+ && clobber(x0, x1, x2)
+ => (MOVWstoreidx ptr0 idx0 (REVW <w.Type> w) mem)
+(MOVBstore [i] {s} ptr w
+ x0:(MOVBstore [i-1] {s} ptr (SRLconst [8] w)
+ x1:(MOVBstore [i-2] {s} ptr (SRLconst [16] w)
+ x2:(MOVBstore [i-3] {s} ptr (SRLconst [24] w) mem))))
+ && x0.Uses == 1
+ && x1.Uses == 1
+ && x2.Uses == 1
+ && clobber(x0, x1, x2)
+ => (MOVWstore [i-3] {s} ptr (REVW <w.Type> w) mem)
+(MOVBstore [3] {s} p w
+ x0:(MOVBstore [2] {s} p (SRLconst [8] w)
+ x1:(MOVBstore [1] {s} p1:(ADD ptr1 idx1) (SRLconst [16] w)
+ x2:(MOVBstoreidx ptr0 idx0 (SRLconst [24] w) mem))))
+ && x0.Uses == 1
+ && x1.Uses == 1
+ && x2.Uses == 1
+ && s == nil
+ && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1))
+ && isSamePtr(p1, p)
+ && clobber(x0, x1, x2)
+ => (MOVWstoreidx ptr0 idx0 (REVW <w.Type> w) mem)
+(MOVBstore [i] {s} ptr w x:(MOVBstore [i-1] {s} ptr (SRLconst [8] w) mem))
+ && x.Uses == 1
+ && clobber(x)
+ => (MOVHstore [i-1] {s} ptr (REV16W <w.Type> w) mem)
+(MOVBstore [1] {s} (ADD ptr1 idx1) w x:(MOVBstoreidx ptr0 idx0 (SRLconst [8] w) mem))
+ && x.Uses == 1
+ && s == nil
+ && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1))
+ && clobber(x)
+ => (MOVHstoreidx ptr0 idx0 (REV16W <w.Type> w) mem)
+(MOVBstore [i] {s} ptr w x:(MOVBstore [i-1] {s} ptr (UBFX [armBFAuxInt(8, 8)] w) mem))
+ && x.Uses == 1
+ && clobber(x)
+ => (MOVHstore [i-1] {s} ptr (REV16W <w.Type> w) mem)
+(MOVBstore [1] {s} (ADD ptr1 idx1) w x:(MOVBstoreidx ptr0 idx0 (UBFX [armBFAuxInt(8, 8)] w) mem))
+ && x.Uses == 1
+ && s == nil
+ && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1))
+ && clobber(x)
+ => (MOVHstoreidx ptr0 idx0 (REV16W <w.Type> w) mem)
+(MOVBstoreidx ptr (ADDconst [1] idx) w x:(MOVBstoreidx ptr idx (UBFX [armBFAuxInt(8, 8)] w) mem))
+ && x.Uses == 1
+ && clobber(x)
+ => (MOVHstoreidx ptr idx (REV16W <w.Type> w) mem)
+(MOVBstoreidx ptr idx w x:(MOVBstoreidx ptr (ADDconst [1] idx) (UBFX [armBFAuxInt(8, 8)] w) mem))
+ && x.Uses == 1
+ && clobber(x)
+ => (MOVHstoreidx ptr idx w mem)
+(MOVBstore [i] {s} ptr w x:(MOVBstore [i-1] {s} ptr (SRLconst [8] (MOVDreg w)) mem))
+ && x.Uses == 1
+ && clobber(x)
+ => (MOVHstore [i-1] {s} ptr (REV16W <w.Type> w) mem)
+(MOVBstore [1] {s} (ADD ptr1 idx1) w x:(MOVBstoreidx ptr0 idx0 (SRLconst [8] (MOVDreg w)) mem))
+ && x.Uses == 1
+ && s == nil
+ && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1))
+ && clobber(x)
+ => (MOVHstoreidx ptr0 idx0 (REV16W <w.Type> w) mem)
+(MOVBstore [i] {s} ptr w x:(MOVBstore [i-1] {s} ptr (UBFX [armBFAuxInt(8, 24)] w) mem))
+ && x.Uses == 1
+ && clobber(x)
+ => (MOVHstore [i-1] {s} ptr (REV16W <w.Type> w) mem)
+(MOVBstore [1] {s} (ADD ptr1 idx1) w x:(MOVBstoreidx ptr0 idx0 (UBFX [armBFAuxInt(8, 24)] w) mem))
+ && x.Uses == 1
+ && s == nil
+ && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1))
+ && clobber(x)
+ => (MOVHstoreidx ptr0 idx0 (REV16W <w.Type> w) mem)
+
+// FP simplification
+(FNEGS (FMULS x y)) => (FNMULS x y)
+(FNEGD (FMULD x y)) => (FNMULD x y)
+(FMULS (FNEGS x) y) => (FNMULS x y)
+(FMULD (FNEGD x) y) => (FNMULD x y)
+(FNEGS (FNMULS x y)) => (FMULS x y)
+(FNEGD (FNMULD x y)) => (FMULD x y)
+(FNMULS (FNEGS x) y) => (FMULS x y)
+(FNMULD (FNEGD x) y) => (FMULD x y)
+(FADDS a (FMULS x y)) => (FMADDS a x y)
+(FADDD a (FMULD x y)) => (FMADDD a x y)
+(FSUBS a (FMULS x y)) => (FMSUBS a x y)
+(FSUBD a (FMULD x y)) => (FMSUBD a x y)
+(FSUBS (FMULS x y) a) => (FNMSUBS a x y)
+(FSUBD (FMULD x y) a) => (FNMSUBD a x y)
+(FADDS a (FNMULS x y)) => (FMSUBS a x y)
+(FADDD a (FNMULD x y)) => (FMSUBD a x y)
+(FSUBS a (FNMULS x y)) => (FMADDS a x y)
+(FSUBD a (FNMULD x y)) => (FMADDD a x y)
+(FSUBS (FNMULS x y) a) => (FNMADDS a x y)
+(FSUBD (FNMULD x y) a) => (FNMADDD a x y)
+
+(MOVBUload [off] {sym} (SB) _) && symIsRO(sym) => (MOVDconst [int64(read8(sym, int64(off)))])
+(MOVHUload [off] {sym} (SB) _) && symIsRO(sym) => (MOVDconst [int64(read16(sym, int64(off), config.ctxt.Arch.ByteOrder))])
+(MOVWUload [off] {sym} (SB) _) && symIsRO(sym) => (MOVDconst [int64(read32(sym, int64(off), config.ctxt.Arch.ByteOrder))])
+(MOVDload [off] {sym} (SB) _) && symIsRO(sym) => (MOVDconst [int64(read64(sym, int64(off), config.ctxt.Arch.ByteOrder))])