diff options
author | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-04-28 13:16:40 +0000 |
---|---|---|
committer | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-04-28 13:16:40 +0000 |
commit | 47ab3d4a42e9ab51c465c4322d2ec233f6324e6b (patch) | |
tree | a61a0ffd83f4a3def4b36e5c8e99630c559aa723 /src/cmd/compile/internal/ssa/gen/AMD64.rules | |
parent | Initial commit. (diff) | |
download | golang-1.18-upstream.tar.xz golang-1.18-upstream.zip |
Adding upstream version 1.18.10.upstream/1.18.10upstream
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'src/cmd/compile/internal/ssa/gen/AMD64.rules')
-rw-r--r-- | src/cmd/compile/internal/ssa/gen/AMD64.rules | 2247 |
1 files changed, 2247 insertions, 0 deletions
diff --git a/src/cmd/compile/internal/ssa/gen/AMD64.rules b/src/cmd/compile/internal/ssa/gen/AMD64.rules new file mode 100644 index 0000000..47a6af0 --- /dev/null +++ b/src/cmd/compile/internal/ssa/gen/AMD64.rules @@ -0,0 +1,2247 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Lowering arithmetic +(Add(64|32|16|8) ...) => (ADD(Q|L|L|L) ...) +(AddPtr ...) => (ADDQ ...) +(Add(32|64)F ...) => (ADDS(S|D) ...) + +(Sub(64|32|16|8) ...) => (SUB(Q|L|L|L) ...) +(SubPtr ...) => (SUBQ ...) +(Sub(32|64)F ...) => (SUBS(S|D) ...) + +(Mul(64|32|16|8) ...) => (MUL(Q|L|L|L) ...) +(Mul(32|64)F ...) => (MULS(S|D) ...) + +(Select0 (Mul64uover x y)) => (Select0 <typ.UInt64> (MULQU x y)) +(Select0 (Mul32uover x y)) => (Select0 <typ.UInt32> (MULLU x y)) +(Select1 (Mul(64|32)uover x y)) => (SETO (Select1 <types.TypeFlags> (MUL(Q|L)U x y))) + +(Hmul(64|32) ...) => (HMUL(Q|L) ...) +(Hmul(64|32)u ...) => (HMUL(Q|L)U ...) + +(Div(64|32|16) [a] x y) => (Select0 (DIV(Q|L|W) [a] x y)) +(Div8 x y) => (Select0 (DIVW (SignExt8to16 x) (SignExt8to16 y))) +(Div(64|32|16)u x y) => (Select0 (DIV(Q|L|W)U x y)) +(Div8u x y) => (Select0 (DIVWU (ZeroExt8to16 x) (ZeroExt8to16 y))) +(Div(32|64)F ...) => (DIVS(S|D) ...) + +(Select0 (Add64carry x y c)) => + (Select0 <typ.UInt64> (ADCQ x y (Select1 <types.TypeFlags> (NEGLflags c)))) +(Select1 (Add64carry x y c)) => + (NEGQ <typ.UInt64> (SBBQcarrymask <typ.UInt64> (Select1 <types.TypeFlags> (ADCQ x y (Select1 <types.TypeFlags> (NEGLflags c)))))) +(Select0 (Sub64borrow x y c)) => + (Select0 <typ.UInt64> (SBBQ x y (Select1 <types.TypeFlags> (NEGLflags c)))) +(Select1 (Sub64borrow x y c)) => + (NEGQ <typ.UInt64> (SBBQcarrymask <typ.UInt64> (Select1 <types.TypeFlags> (SBBQ x y (Select1 <types.TypeFlags> (NEGLflags c)))))) + +// Optimize ADCQ and friends +(ADCQ x (MOVQconst [c]) carry) && is32Bit(c) => (ADCQconst x [int32(c)] carry) +(ADCQ x y (FlagEQ)) => (ADDQcarry x y) +(ADCQconst x [c] (FlagEQ)) => (ADDQconstcarry x [c]) +(ADDQcarry x (MOVQconst [c])) && is32Bit(c) => (ADDQconstcarry x [int32(c)]) +(SBBQ x (MOVQconst [c]) borrow) && is32Bit(c) => (SBBQconst x [int32(c)] borrow) +(SBBQ x y (FlagEQ)) => (SUBQborrow x y) +(SBBQconst x [c] (FlagEQ)) => (SUBQconstborrow x [c]) +(SUBQborrow x (MOVQconst [c])) && is32Bit(c) => (SUBQconstborrow x [int32(c)]) +(Select1 (NEGLflags (MOVQconst [0]))) => (FlagEQ) +(Select1 (NEGLflags (NEGQ (SBBQcarrymask x)))) => x + + +(Mul64uhilo ...) => (MULQU2 ...) +(Div128u ...) => (DIVQU2 ...) + +(Avg64u ...) => (AVGQU ...) + +(Mod(64|32|16) [a] x y) => (Select1 (DIV(Q|L|W) [a] x y)) +(Mod8 x y) => (Select1 (DIVW (SignExt8to16 x) (SignExt8to16 y))) +(Mod(64|32|16)u x y) => (Select1 (DIV(Q|L|W)U x y)) +(Mod8u x y) => (Select1 (DIVWU (ZeroExt8to16 x) (ZeroExt8to16 y))) + +(And(64|32|16|8) ...) => (AND(Q|L|L|L) ...) +(Or(64|32|16|8) ...) => (OR(Q|L|L|L) ...) +(Xor(64|32|16|8) ...) => (XOR(Q|L|L|L) ...) +(Com(64|32|16|8) ...) => (NOT(Q|L|L|L) ...) + +(Neg(64|32|16|8) ...) => (NEG(Q|L|L|L) ...) +(Neg32F x) => (PXOR x (MOVSSconst <typ.Float32> [float32(math.Copysign(0, -1))])) +(Neg64F x) => (PXOR x (MOVSDconst <typ.Float64> [math.Copysign(0, -1)])) + +// Lowering boolean ops +(AndB ...) => (ANDL ...) +(OrB ...) => (ORL ...) +(Not x) => (XORLconst [1] x) + +// Lowering pointer arithmetic +(OffPtr [off] ptr) && is32Bit(off) => (ADDQconst [int32(off)] ptr) +(OffPtr [off] ptr) => (ADDQ (MOVQconst [off]) ptr) + +// Lowering other arithmetic +(Ctz64 x) && buildcfg.GOAMD64 >= 3 => (TZCNTQ x) +(Ctz32 x) && buildcfg.GOAMD64 >= 3 => (TZCNTL x) +(Ctz64 <t> x) && buildcfg.GOAMD64 < 3 => (CMOVQEQ (Select0 <t> (BSFQ x)) (MOVQconst <t> [64]) (Select1 <types.TypeFlags> (BSFQ x))) +(Ctz32 x) && buildcfg.GOAMD64 < 3 => (Select0 (BSFQ (BTSQconst <typ.UInt64> [32] x))) +(Ctz16 x) => (BSFL (BTSLconst <typ.UInt32> [16] x)) +(Ctz8 x) => (BSFL (BTSLconst <typ.UInt32> [ 8] x)) + +(Ctz64NonZero x) && buildcfg.GOAMD64 >= 3 => (TZCNTQ x) +(Ctz32NonZero x) && buildcfg.GOAMD64 >= 3 => (TZCNTL x) +(Ctz16NonZero x) && buildcfg.GOAMD64 >= 3 => (TZCNTL x) +(Ctz8NonZero x) && buildcfg.GOAMD64 >= 3 => (TZCNTL x) +(Ctz64NonZero x) && buildcfg.GOAMD64 < 3 => (Select0 (BSFQ x)) +(Ctz32NonZero x) && buildcfg.GOAMD64 < 3 => (BSFL x) +(Ctz16NonZero x) && buildcfg.GOAMD64 < 3 => (BSFL x) +(Ctz8NonZero x) && buildcfg.GOAMD64 < 3 => (BSFL x) + +// BitLen64 of a 64 bit value x requires checking whether x == 0, since BSRQ is undefined when x == 0. +// However, for zero-extended values, we can cheat a bit, and calculate +// BSR(x<<1 + 1), which is guaranteed to be non-zero, and which conveniently +// places the index of the highest set bit where we want it. +(BitLen64 <t> x) => (ADDQconst [1] (CMOVQEQ <t> (Select0 <t> (BSRQ x)) (MOVQconst <t> [-1]) (Select1 <types.TypeFlags> (BSRQ x)))) +(BitLen32 x) => (Select0 (BSRQ (LEAQ1 <typ.UInt64> [1] (MOVLQZX <typ.UInt64> x) (MOVLQZX <typ.UInt64> x)))) +(BitLen16 x) => (BSRL (LEAL1 <typ.UInt32> [1] (MOVWQZX <typ.UInt32> x) (MOVWQZX <typ.UInt32> x))) +(BitLen8 x) => (BSRL (LEAL1 <typ.UInt32> [1] (MOVBQZX <typ.UInt32> x) (MOVBQZX <typ.UInt32> x))) + +(Bswap(64|32) ...) => (BSWAP(Q|L) ...) + +(PopCount(64|32) ...) => (POPCNT(Q|L) ...) +(PopCount16 x) => (POPCNTL (MOVWQZX <typ.UInt32> x)) +(PopCount8 x) => (POPCNTL (MOVBQZX <typ.UInt32> x)) + +(Sqrt ...) => (SQRTSD ...) +(Sqrt32 ...) => (SQRTSS ...) + +(RoundToEven x) => (ROUNDSD [0] x) +(Floor x) => (ROUNDSD [1] x) +(Ceil x) => (ROUNDSD [2] x) +(Trunc x) => (ROUNDSD [3] x) + +(FMA x y z) => (VFMADD231SD z x y) + +// Lowering extension +// Note: we always extend to 64 bits even though some ops don't need that many result bits. +(SignExt8to16 ...) => (MOVBQSX ...) +(SignExt8to32 ...) => (MOVBQSX ...) +(SignExt8to64 ...) => (MOVBQSX ...) +(SignExt16to32 ...) => (MOVWQSX ...) +(SignExt16to64 ...) => (MOVWQSX ...) +(SignExt32to64 ...) => (MOVLQSX ...) + +(ZeroExt8to16 ...) => (MOVBQZX ...) +(ZeroExt8to32 ...) => (MOVBQZX ...) +(ZeroExt8to64 ...) => (MOVBQZX ...) +(ZeroExt16to32 ...) => (MOVWQZX ...) +(ZeroExt16to64 ...) => (MOVWQZX ...) +(ZeroExt32to64 ...) => (MOVLQZX ...) + +(Slicemask <t> x) => (SARQconst (NEGQ <t> x) [63]) + +(SpectreIndex <t> x y) => (CMOVQCC x (MOVQconst [0]) (CMPQ x y)) +(SpectreSliceIndex <t> x y) => (CMOVQHI x (MOVQconst [0]) (CMPQ x y)) + +// Lowering truncation +// Because we ignore high parts of registers, truncates are just copies. +(Trunc16to8 ...) => (Copy ...) +(Trunc32to8 ...) => (Copy ...) +(Trunc32to16 ...) => (Copy ...) +(Trunc64to8 ...) => (Copy ...) +(Trunc64to16 ...) => (Copy ...) +(Trunc64to32 ...) => (Copy ...) + +// Lowering float <-> int +(Cvt32to32F ...) => (CVTSL2SS ...) +(Cvt32to64F ...) => (CVTSL2SD ...) +(Cvt64to32F ...) => (CVTSQ2SS ...) +(Cvt64to64F ...) => (CVTSQ2SD ...) + +(Cvt32Fto32 ...) => (CVTTSS2SL ...) +(Cvt32Fto64 ...) => (CVTTSS2SQ ...) +(Cvt64Fto32 ...) => (CVTTSD2SL ...) +(Cvt64Fto64 ...) => (CVTTSD2SQ ...) + +(Cvt32Fto64F ...) => (CVTSS2SD ...) +(Cvt64Fto32F ...) => (CVTSD2SS ...) + +(Round(32|64)F ...) => (Copy ...) + +(CvtBoolToUint8 ...) => (Copy ...) + +// Lowering shifts +// Unsigned shifts need to return 0 if shift amount is >= width of shifted value. +// result = (arg << shift) & (shift >= argbits ? 0 : 0xffffffffffffffff) +(Lsh64x(64|32|16|8) <t> x y) && !shiftIsBounded(v) => (ANDQ (SHLQ <t> x y) (SBBQcarrymask <t> (CMP(Q|L|W|B)const y [64]))) +(Lsh32x(64|32|16|8) <t> x y) && !shiftIsBounded(v) => (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMP(Q|L|W|B)const y [32]))) +(Lsh16x(64|32|16|8) <t> x y) && !shiftIsBounded(v) => (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMP(Q|L|W|B)const y [32]))) +(Lsh8x(64|32|16|8) <t> x y) && !shiftIsBounded(v) => (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMP(Q|L|W|B)const y [32]))) + +(Lsh64x(64|32|16|8) x y) && shiftIsBounded(v) => (SHLQ x y) +(Lsh32x(64|32|16|8) x y) && shiftIsBounded(v) => (SHLL x y) +(Lsh16x(64|32|16|8) x y) && shiftIsBounded(v) => (SHLL x y) +(Lsh8x(64|32|16|8) x y) && shiftIsBounded(v) => (SHLL x y) + +(Rsh64Ux(64|32|16|8) <t> x y) && !shiftIsBounded(v) => (ANDQ (SHRQ <t> x y) (SBBQcarrymask <t> (CMP(Q|L|W|B)const y [64]))) +(Rsh32Ux(64|32|16|8) <t> x y) && !shiftIsBounded(v) => (ANDL (SHRL <t> x y) (SBBLcarrymask <t> (CMP(Q|L|W|B)const y [32]))) +(Rsh16Ux(64|32|16|8) <t> x y) && !shiftIsBounded(v) => (ANDL (SHRW <t> x y) (SBBLcarrymask <t> (CMP(Q|L|W|B)const y [16]))) +(Rsh8Ux(64|32|16|8) <t> x y) && !shiftIsBounded(v) => (ANDL (SHRB <t> x y) (SBBLcarrymask <t> (CMP(Q|L|W|B)const y [8]))) + +(Rsh64Ux(64|32|16|8) x y) && shiftIsBounded(v) => (SHRQ x y) +(Rsh32Ux(64|32|16|8) x y) && shiftIsBounded(v) => (SHRL x y) +(Rsh16Ux(64|32|16|8) x y) && shiftIsBounded(v) => (SHRW x y) +(Rsh8Ux(64|32|16|8) x y) && shiftIsBounded(v) => (SHRB x y) + +// Signed right shift needs to return 0/-1 if shift amount is >= width of shifted value. +// We implement this by setting the shift value to -1 (all ones) if the shift value is >= width. +(Rsh64x(64|32|16|8) <t> x y) && !shiftIsBounded(v) => (SARQ <t> x (OR(Q|L|L|L) <y.Type> y (NOT(Q|L|L|L) <y.Type> (SBB(Q|L|L|L)carrymask <y.Type> (CMP(Q|L|W|B)const y [64]))))) +(Rsh32x(64|32|16|8) <t> x y) && !shiftIsBounded(v) => (SARL <t> x (OR(Q|L|L|L) <y.Type> y (NOT(Q|L|L|L) <y.Type> (SBB(Q|L|L|L)carrymask <y.Type> (CMP(Q|L|W|B)const y [32]))))) +(Rsh16x(64|32|16|8) <t> x y) && !shiftIsBounded(v) => (SARW <t> x (OR(Q|L|L|L) <y.Type> y (NOT(Q|L|L|L) <y.Type> (SBB(Q|L|L|L)carrymask <y.Type> (CMP(Q|L|W|B)const y [16]))))) +(Rsh8x(64|32|16|8) <t> x y) && !shiftIsBounded(v) => (SARB <t> x (OR(Q|L|L|L) <y.Type> y (NOT(Q|L|L|L) <y.Type> (SBB(Q|L|L|L)carrymask <y.Type> (CMP(Q|L|W|B)const y [8]))))) + +(Rsh64x(64|32|16|8) x y) && shiftIsBounded(v) => (SARQ x y) +(Rsh32x(64|32|16|8) x y) && shiftIsBounded(v) => (SARL x y) +(Rsh16x(64|32|16|8) x y) && shiftIsBounded(v) => (SARW x y) +(Rsh8x(64|32|16|8) x y) && shiftIsBounded(v) => (SARB x y) + +// Lowering integer comparisons +(Less(64|32|16|8) x y) => (SETL (CMP(Q|L|W|B) x y)) +(Less(64|32|16|8)U x y) => (SETB (CMP(Q|L|W|B) x y)) +(Leq(64|32|16|8) x y) => (SETLE (CMP(Q|L|W|B) x y)) +(Leq(64|32|16|8)U x y) => (SETBE (CMP(Q|L|W|B) x y)) +(Eq(Ptr|64|32|16|8|B) x y) => (SETEQ (CMP(Q|Q|L|W|B|B) x y)) +(Neq(Ptr|64|32|16|8|B) x y) => (SETNE (CMP(Q|Q|L|W|B|B) x y)) + +// Lowering floating point comparisons +// Note Go assembler gets UCOMISx operand order wrong, but it is right here +// and the operands are reversed when generating assembly language. +(Eq(32|64)F x y) => (SETEQF (UCOMIS(S|D) x y)) +(Neq(32|64)F x y) => (SETNEF (UCOMIS(S|D) x y)) +// Use SETGF/SETGEF with reversed operands to dodge NaN case. +(Less(32|64)F x y) => (SETGF (UCOMIS(S|D) y x)) +(Leq(32|64)F x y) => (SETGEF (UCOMIS(S|D) y x)) + +// Lowering loads +(Load <t> ptr mem) && (is64BitInt(t) || isPtr(t)) => (MOVQload ptr mem) +(Load <t> ptr mem) && is32BitInt(t) => (MOVLload ptr mem) +(Load <t> ptr mem) && is16BitInt(t) => (MOVWload ptr mem) +(Load <t> ptr mem) && (t.IsBoolean() || is8BitInt(t)) => (MOVBload ptr mem) +(Load <t> ptr mem) && is32BitFloat(t) => (MOVSSload ptr mem) +(Load <t> ptr mem) && is64BitFloat(t) => (MOVSDload ptr mem) + +// Lowering stores +// These more-specific FP versions of Store pattern should come first. +(Store {t} ptr val mem) && t.Size() == 8 && is64BitFloat(val.Type) => (MOVSDstore ptr val mem) +(Store {t} ptr val mem) && t.Size() == 4 && is32BitFloat(val.Type) => (MOVSSstore ptr val mem) + +(Store {t} ptr val mem) && t.Size() == 8 => (MOVQstore ptr val mem) +(Store {t} ptr val mem) && t.Size() == 4 => (MOVLstore ptr val mem) +(Store {t} ptr val mem) && t.Size() == 2 => (MOVWstore ptr val mem) +(Store {t} ptr val mem) && t.Size() == 1 => (MOVBstore ptr val mem) + +// Lowering moves +(Move [0] _ _ mem) => mem +(Move [1] dst src mem) => (MOVBstore dst (MOVBload src mem) mem) +(Move [2] dst src mem) => (MOVWstore dst (MOVWload src mem) mem) +(Move [4] dst src mem) => (MOVLstore dst (MOVLload src mem) mem) +(Move [8] dst src mem) => (MOVQstore dst (MOVQload src mem) mem) +(Move [16] dst src mem) && config.useSSE => (MOVOstore dst (MOVOload src mem) mem) +(Move [16] dst src mem) && !config.useSSE => + (MOVQstore [8] dst (MOVQload [8] src mem) + (MOVQstore dst (MOVQload src mem) mem)) + +(Move [32] dst src mem) => + (Move [16] + (OffPtr <dst.Type> dst [16]) + (OffPtr <src.Type> src [16]) + (Move [16] dst src mem)) + +(Move [48] dst src mem) && config.useSSE => + (Move [32] + (OffPtr <dst.Type> dst [16]) + (OffPtr <src.Type> src [16]) + (Move [16] dst src mem)) + +(Move [64] dst src mem) && config.useSSE => + (Move [32] + (OffPtr <dst.Type> dst [32]) + (OffPtr <src.Type> src [32]) + (Move [32] dst src mem)) + +(Move [3] dst src mem) => + (MOVBstore [2] dst (MOVBload [2] src mem) + (MOVWstore dst (MOVWload src mem) mem)) +(Move [5] dst src mem) => + (MOVBstore [4] dst (MOVBload [4] src mem) + (MOVLstore dst (MOVLload src mem) mem)) +(Move [6] dst src mem) => + (MOVWstore [4] dst (MOVWload [4] src mem) + (MOVLstore dst (MOVLload src mem) mem)) +(Move [7] dst src mem) => + (MOVLstore [3] dst (MOVLload [3] src mem) + (MOVLstore dst (MOVLload src mem) mem)) +(Move [9] dst src mem) => + (MOVBstore [8] dst (MOVBload [8] src mem) + (MOVQstore dst (MOVQload src mem) mem)) +(Move [10] dst src mem) => + (MOVWstore [8] dst (MOVWload [8] src mem) + (MOVQstore dst (MOVQload src mem) mem)) +(Move [12] dst src mem) => + (MOVLstore [8] dst (MOVLload [8] src mem) + (MOVQstore dst (MOVQload src mem) mem)) +(Move [s] dst src mem) && s == 11 || s >= 13 && s <= 15 => + (MOVQstore [int32(s-8)] dst (MOVQload [int32(s-8)] src mem) + (MOVQstore dst (MOVQload src mem) mem)) + +// Adjust moves to be a multiple of 16 bytes. +(Move [s] dst src mem) + && s > 16 && s%16 != 0 && s%16 <= 8 => + (Move [s-s%16] + (OffPtr <dst.Type> dst [s%16]) + (OffPtr <src.Type> src [s%16]) + (MOVQstore dst (MOVQload src mem) mem)) +(Move [s] dst src mem) + && s > 16 && s%16 != 0 && s%16 > 8 && config.useSSE => + (Move [s-s%16] + (OffPtr <dst.Type> dst [s%16]) + (OffPtr <src.Type> src [s%16]) + (MOVOstore dst (MOVOload src mem) mem)) +(Move [s] dst src mem) + && s > 16 && s%16 != 0 && s%16 > 8 && !config.useSSE => + (Move [s-s%16] + (OffPtr <dst.Type> dst [s%16]) + (OffPtr <src.Type> src [s%16]) + (MOVQstore [8] dst (MOVQload [8] src mem) + (MOVQstore dst (MOVQload src mem) mem))) + +// Medium copying uses a duff device. +(Move [s] dst src mem) + && s > 64 && s <= 16*64 && s%16 == 0 + && !config.noDuffDevice && logLargeCopy(v, s) => + (DUFFCOPY [s] dst src mem) + +// Large copying uses REP MOVSQ. +(Move [s] dst src mem) && (s > 16*64 || config.noDuffDevice) && s%8 == 0 && logLargeCopy(v, s) => + (REPMOVSQ dst src (MOVQconst [s/8]) mem) + +// Lowering Zero instructions +(Zero [0] _ mem) => mem +(Zero [1] destptr mem) => (MOVBstoreconst [makeValAndOff(0,0)] destptr mem) +(Zero [2] destptr mem) => (MOVWstoreconst [makeValAndOff(0,0)] destptr mem) +(Zero [4] destptr mem) => (MOVLstoreconst [makeValAndOff(0,0)] destptr mem) +(Zero [8] destptr mem) => (MOVQstoreconst [makeValAndOff(0,0)] destptr mem) + +(Zero [3] destptr mem) => + (MOVBstoreconst [makeValAndOff(0,2)] destptr + (MOVWstoreconst [makeValAndOff(0,0)] destptr mem)) +(Zero [5] destptr mem) => + (MOVBstoreconst [makeValAndOff(0,4)] destptr + (MOVLstoreconst [makeValAndOff(0,0)] destptr mem)) +(Zero [6] destptr mem) => + (MOVWstoreconst [makeValAndOff(0,4)] destptr + (MOVLstoreconst [makeValAndOff(0,0)] destptr mem)) +(Zero [7] destptr mem) => + (MOVLstoreconst [makeValAndOff(0,3)] destptr + (MOVLstoreconst [makeValAndOff(0,0)] destptr mem)) + +// Strip off any fractional word zeroing. +(Zero [s] destptr mem) && s%8 != 0 && s > 8 && !config.useSSE => + (Zero [s-s%8] (OffPtr <destptr.Type> destptr [s%8]) + (MOVQstoreconst [makeValAndOff(0,0)] destptr mem)) + +// Zero small numbers of words directly. +(Zero [16] destptr mem) && !config.useSSE => + (MOVQstoreconst [makeValAndOff(0,8)] destptr + (MOVQstoreconst [makeValAndOff(0,0)] destptr mem)) +(Zero [24] destptr mem) && !config.useSSE => + (MOVQstoreconst [makeValAndOff(0,16)] destptr + (MOVQstoreconst [makeValAndOff(0,8)] destptr + (MOVQstoreconst [makeValAndOff(0,0)] destptr mem))) +(Zero [32] destptr mem) && !config.useSSE => + (MOVQstoreconst [makeValAndOff(0,24)] destptr + (MOVQstoreconst [makeValAndOff(0,16)] destptr + (MOVQstoreconst [makeValAndOff(0,8)] destptr + (MOVQstoreconst [makeValAndOff(0,0)] destptr mem)))) + +(Zero [s] destptr mem) && s > 8 && s < 16 && config.useSSE => + (MOVQstoreconst [makeValAndOff(0,int32(s-8))] destptr + (MOVQstoreconst [makeValAndOff(0,0)] destptr mem)) + +// Adjust zeros to be a multiple of 16 bytes. +(Zero [s] destptr mem) && s%16 != 0 && s > 16 && s%16 > 8 && config.useSSE => + (Zero [s-s%16] (OffPtr <destptr.Type> destptr [s%16]) + (MOVOstoreconst [makeValAndOff(0,0)] destptr mem)) + +(Zero [s] destptr mem) && s%16 != 0 && s > 16 && s%16 <= 8 && config.useSSE => + (Zero [s-s%16] (OffPtr <destptr.Type> destptr [s%16]) + (MOVOstoreconst [makeValAndOff(0,0)] destptr mem)) + +(Zero [16] destptr mem) && config.useSSE => + (MOVOstoreconst [makeValAndOff(0,0)] destptr mem) +(Zero [32] destptr mem) && config.useSSE => + (MOVOstoreconst [makeValAndOff(0,16)] destptr + (MOVOstoreconst [makeValAndOff(0,0)] destptr mem)) +(Zero [48] destptr mem) && config.useSSE => + (MOVOstoreconst [makeValAndOff(0,32)] destptr + (MOVOstoreconst [makeValAndOff(0,16)] destptr + (MOVOstoreconst [makeValAndOff(0,0)] destptr mem))) +(Zero [64] destptr mem) && config.useSSE => + (MOVOstoreconst [makeValAndOff(0,48)] destptr + (MOVOstoreconst [makeValAndOff(0,32)] destptr + (MOVOstoreconst [makeValAndOff(0,16)] destptr + (MOVOstoreconst [makeValAndOff(0,0)] destptr mem)))) + +// Medium zeroing uses a duff device. +(Zero [s] destptr mem) + && s > 64 && s <= 1024 && s%16 == 0 && !config.noDuffDevice => + (DUFFZERO [s] destptr mem) + +// Large zeroing uses REP STOSQ. +(Zero [s] destptr mem) + && (s > 1024 || (config.noDuffDevice && s > 64 || !config.useSSE && s > 32)) + && s%8 == 0 => + (REPSTOSQ destptr (MOVQconst [s/8]) (MOVQconst [0]) mem) + +// Lowering constants +(Const8 [c]) => (MOVLconst [int32(c)]) +(Const16 [c]) => (MOVLconst [int32(c)]) +(Const32 ...) => (MOVLconst ...) +(Const64 ...) => (MOVQconst ...) +(Const32F ...) => (MOVSSconst ...) +(Const64F ...) => (MOVSDconst ...) +(ConstNil ) => (MOVQconst [0]) +(ConstBool [c]) => (MOVLconst [b2i32(c)]) + +// Lowering calls +(StaticCall ...) => (CALLstatic ...) +(ClosureCall ...) => (CALLclosure ...) +(InterCall ...) => (CALLinter ...) +(TailCall ...) => (CALLtail ...) + +// Lowering conditional moves +// If the condition is a SETxx, we can just run a CMOV from the comparison that was +// setting the flags. +// Legend: HI=unsigned ABOVE, CS=unsigned BELOW, CC=unsigned ABOVE EQUAL, LS=unsigned BELOW EQUAL +(CondSelect <t> x y (SET(EQ|NE|L|G|LE|GE|A|B|AE|BE|EQF|NEF|GF|GEF) cond)) && (is64BitInt(t) || isPtr(t)) + => (CMOVQ(EQ|NE|LT|GT|LE|GE|HI|CS|CC|LS|EQF|NEF|GTF|GEF) y x cond) +(CondSelect <t> x y (SET(EQ|NE|L|G|LE|GE|A|B|AE|BE|EQF|NEF|GF|GEF) cond)) && is32BitInt(t) + => (CMOVL(EQ|NE|LT|GT|LE|GE|HI|CS|CC|LS|EQF|NEF|GTF|GEF) y x cond) +(CondSelect <t> x y (SET(EQ|NE|L|G|LE|GE|A|B|AE|BE|EQF|NEF|GF|GEF) cond)) && is16BitInt(t) + => (CMOVW(EQ|NE|LT|GT|LE|GE|HI|CS|CC|LS|EQF|NEF|GTF|GEF) y x cond) + +// If the condition does not set the flags, we need to generate a comparison. +(CondSelect <t> x y check) && !check.Type.IsFlags() && check.Type.Size() == 1 + => (CondSelect <t> x y (MOVBQZX <typ.UInt64> check)) +(CondSelect <t> x y check) && !check.Type.IsFlags() && check.Type.Size() == 2 + => (CondSelect <t> x y (MOVWQZX <typ.UInt64> check)) +(CondSelect <t> x y check) && !check.Type.IsFlags() && check.Type.Size() == 4 + => (CondSelect <t> x y (MOVLQZX <typ.UInt64> check)) + +(CondSelect <t> x y check) && !check.Type.IsFlags() && check.Type.Size() == 8 && (is64BitInt(t) || isPtr(t)) + => (CMOVQNE y x (CMPQconst [0] check)) +(CondSelect <t> x y check) && !check.Type.IsFlags() && check.Type.Size() == 8 && is32BitInt(t) + => (CMOVLNE y x (CMPQconst [0] check)) +(CondSelect <t> x y check) && !check.Type.IsFlags() && check.Type.Size() == 8 && is16BitInt(t) + => (CMOVWNE y x (CMPQconst [0] check)) + +// Absorb InvertFlags +(CMOVQ(EQ|NE|LT|GT|LE|GE|HI|CS|CC|LS) x y (InvertFlags cond)) + => (CMOVQ(EQ|NE|GT|LT|GE|LE|CS|HI|LS|CC) x y cond) +(CMOVL(EQ|NE|LT|GT|LE|GE|HI|CS|CC|LS) x y (InvertFlags cond)) + => (CMOVL(EQ|NE|GT|LT|GE|LE|CS|HI|LS|CC) x y cond) +(CMOVW(EQ|NE|LT|GT|LE|GE|HI|CS|CC|LS) x y (InvertFlags cond)) + => (CMOVW(EQ|NE|GT|LT|GE|LE|CS|HI|LS|CC) x y cond) + +// Absorb constants generated during lower +(CMOV(QEQ|QLE|QGE|QCC|QLS|LEQ|LLE|LGE|LCC|LLS|WEQ|WLE|WGE|WCC|WLS) _ x (FlagEQ)) => x +(CMOV(QNE|QLT|QGT|QCS|QHI|LNE|LLT|LGT|LCS|LHI|WNE|WLT|WGT|WCS|WHI) y _ (FlagEQ)) => y +(CMOV(QNE|QGT|QGE|QHI|QCC|LNE|LGT|LGE|LHI|LCC|WNE|WGT|WGE|WHI|WCC) _ x (FlagGT_UGT)) => x +(CMOV(QEQ|QLE|QLT|QLS|QCS|LEQ|LLE|LLT|LLS|LCS|WEQ|WLE|WLT|WLS|WCS) y _ (FlagGT_UGT)) => y +(CMOV(QNE|QGT|QGE|QLS|QCS|LNE|LGT|LGE|LLS|LCS|WNE|WGT|WGE|WLS|WCS) _ x (FlagGT_ULT)) => x +(CMOV(QEQ|QLE|QLT|QHI|QCC|LEQ|LLE|LLT|LHI|LCC|WEQ|WLE|WLT|WHI|WCC) y _ (FlagGT_ULT)) => y +(CMOV(QNE|QLT|QLE|QCS|QLS|LNE|LLT|LLE|LCS|LLS|WNE|WLT|WLE|WCS|WLS) _ x (FlagLT_ULT)) => x +(CMOV(QEQ|QGT|QGE|QHI|QCC|LEQ|LGT|LGE|LHI|LCC|WEQ|WGT|WGE|WHI|WCC) y _ (FlagLT_ULT)) => y +(CMOV(QNE|QLT|QLE|QHI|QCC|LNE|LLT|LLE|LHI|LCC|WNE|WLT|WLE|WHI|WCC) _ x (FlagLT_UGT)) => x +(CMOV(QEQ|QGT|QGE|QCS|QLS|LEQ|LGT|LGE|LCS|LLS|WEQ|WGT|WGE|WCS|WLS) y _ (FlagLT_UGT)) => y + +// Miscellaneous +(IsNonNil p) => (SETNE (TESTQ p p)) +(IsInBounds idx len) => (SETB (CMPQ idx len)) +(IsSliceInBounds idx len) => (SETBE (CMPQ idx len)) +(NilCheck ...) => (LoweredNilCheck ...) +(GetG mem) && v.Block.Func.OwnAux.Fn.ABI() != obj.ABIInternal => (LoweredGetG mem) // only lower in old ABI. in new ABI we have a G register. +(GetClosurePtr ...) => (LoweredGetClosurePtr ...) +(GetCallerPC ...) => (LoweredGetCallerPC ...) +(GetCallerSP ...) => (LoweredGetCallerSP ...) + +(HasCPUFeature {s}) => (SETNE (CMPLconst [0] (LoweredHasCPUFeature {s}))) +(Addr {sym} base) => (LEAQ {sym} base) +(LocalAddr {sym} base _) => (LEAQ {sym} base) + +(MOVBstore [off] {sym} ptr y:(SETL x) mem) && y.Uses == 1 => (SETLstore [off] {sym} ptr x mem) +(MOVBstore [off] {sym} ptr y:(SETLE x) mem) && y.Uses == 1 => (SETLEstore [off] {sym} ptr x mem) +(MOVBstore [off] {sym} ptr y:(SETG x) mem) && y.Uses == 1 => (SETGstore [off] {sym} ptr x mem) +(MOVBstore [off] {sym} ptr y:(SETGE x) mem) && y.Uses == 1 => (SETGEstore [off] {sym} ptr x mem) +(MOVBstore [off] {sym} ptr y:(SETEQ x) mem) && y.Uses == 1 => (SETEQstore [off] {sym} ptr x mem) +(MOVBstore [off] {sym} ptr y:(SETNE x) mem) && y.Uses == 1 => (SETNEstore [off] {sym} ptr x mem) +(MOVBstore [off] {sym} ptr y:(SETB x) mem) && y.Uses == 1 => (SETBstore [off] {sym} ptr x mem) +(MOVBstore [off] {sym} ptr y:(SETBE x) mem) && y.Uses == 1 => (SETBEstore [off] {sym} ptr x mem) +(MOVBstore [off] {sym} ptr y:(SETA x) mem) && y.Uses == 1 => (SETAstore [off] {sym} ptr x mem) +(MOVBstore [off] {sym} ptr y:(SETAE x) mem) && y.Uses == 1 => (SETAEstore [off] {sym} ptr x mem) + +// block rewrites +(If (SETL cmp) yes no) => (LT cmp yes no) +(If (SETLE cmp) yes no) => (LE cmp yes no) +(If (SETG cmp) yes no) => (GT cmp yes no) +(If (SETGE cmp) yes no) => (GE cmp yes no) +(If (SETEQ cmp) yes no) => (EQ cmp yes no) +(If (SETNE cmp) yes no) => (NE cmp yes no) +(If (SETB cmp) yes no) => (ULT cmp yes no) +(If (SETBE cmp) yes no) => (ULE cmp yes no) +(If (SETA cmp) yes no) => (UGT cmp yes no) +(If (SETAE cmp) yes no) => (UGE cmp yes no) +(If (SETO cmp) yes no) => (OS cmp yes no) + +// Special case for floating point - LF/LEF not generated +(If (SETGF cmp) yes no) => (UGT cmp yes no) +(If (SETGEF cmp) yes no) => (UGE cmp yes no) +(If (SETEQF cmp) yes no) => (EQF cmp yes no) +(If (SETNEF cmp) yes no) => (NEF cmp yes no) + +(If cond yes no) => (NE (TESTB cond cond) yes no) + +// Atomic loads. Other than preserving their ordering with respect to other loads, nothing special here. +(AtomicLoad8 ptr mem) => (MOVBatomicload ptr mem) +(AtomicLoad32 ptr mem) => (MOVLatomicload ptr mem) +(AtomicLoad64 ptr mem) => (MOVQatomicload ptr mem) +(AtomicLoadPtr ptr mem) => (MOVQatomicload ptr mem) + +// Atomic stores. We use XCHG to prevent the hardware reordering a subsequent load. +// TODO: most runtime uses of atomic stores don't need that property. Use normal stores for those? +(AtomicStore8 ptr val mem) => (Select1 (XCHGB <types.NewTuple(typ.UInt8,types.TypeMem)> val ptr mem)) +(AtomicStore32 ptr val mem) => (Select1 (XCHGL <types.NewTuple(typ.UInt32,types.TypeMem)> val ptr mem)) +(AtomicStore64 ptr val mem) => (Select1 (XCHGQ <types.NewTuple(typ.UInt64,types.TypeMem)> val ptr mem)) +(AtomicStorePtrNoWB ptr val mem) => (Select1 (XCHGQ <types.NewTuple(typ.BytePtr,types.TypeMem)> val ptr mem)) + +// Atomic exchanges. +(AtomicExchange32 ptr val mem) => (XCHGL val ptr mem) +(AtomicExchange64 ptr val mem) => (XCHGQ val ptr mem) + +// Atomic adds. +(AtomicAdd32 ptr val mem) => (AddTupleFirst32 val (XADDLlock val ptr mem)) +(AtomicAdd64 ptr val mem) => (AddTupleFirst64 val (XADDQlock val ptr mem)) +(Select0 <t> (AddTupleFirst32 val tuple)) => (ADDL val (Select0 <t> tuple)) +(Select1 (AddTupleFirst32 _ tuple)) => (Select1 tuple) +(Select0 <t> (AddTupleFirst64 val tuple)) => (ADDQ val (Select0 <t> tuple)) +(Select1 (AddTupleFirst64 _ tuple)) => (Select1 tuple) + +// Atomic compare and swap. +(AtomicCompareAndSwap32 ptr old new_ mem) => (CMPXCHGLlock ptr old new_ mem) +(AtomicCompareAndSwap64 ptr old new_ mem) => (CMPXCHGQlock ptr old new_ mem) + +// Atomic memory updates. +(AtomicAnd8 ptr val mem) => (ANDBlock ptr val mem) +(AtomicAnd32 ptr val mem) => (ANDLlock ptr val mem) +(AtomicOr8 ptr val mem) => (ORBlock ptr val mem) +(AtomicOr32 ptr val mem) => (ORLlock ptr val mem) + +// Write barrier. +(WB ...) => (LoweredWB ...) + +(PanicBounds [kind] x y mem) && boundsABI(kind) == 0 => (LoweredPanicBoundsA [kind] x y mem) +(PanicBounds [kind] x y mem) && boundsABI(kind) == 1 => (LoweredPanicBoundsB [kind] x y mem) +(PanicBounds [kind] x y mem) && boundsABI(kind) == 2 => (LoweredPanicBoundsC [kind] x y mem) + +// *************************** +// Above: lowering rules +// Below: optimizations +// *************************** +// TODO: Should the optimizations be a separate pass? + +// Fold boolean tests into blocks +(NE (TESTB (SETL cmp) (SETL cmp)) yes no) => (LT cmp yes no) +(NE (TESTB (SETLE cmp) (SETLE cmp)) yes no) => (LE cmp yes no) +(NE (TESTB (SETG cmp) (SETG cmp)) yes no) => (GT cmp yes no) +(NE (TESTB (SETGE cmp) (SETGE cmp)) yes no) => (GE cmp yes no) +(NE (TESTB (SETEQ cmp) (SETEQ cmp)) yes no) => (EQ cmp yes no) +(NE (TESTB (SETNE cmp) (SETNE cmp)) yes no) => (NE cmp yes no) +(NE (TESTB (SETB cmp) (SETB cmp)) yes no) => (ULT cmp yes no) +(NE (TESTB (SETBE cmp) (SETBE cmp)) yes no) => (ULE cmp yes no) +(NE (TESTB (SETA cmp) (SETA cmp)) yes no) => (UGT cmp yes no) +(NE (TESTB (SETAE cmp) (SETAE cmp)) yes no) => (UGE cmp yes no) +(NE (TESTB (SETO cmp) (SETO cmp)) yes no) => (OS cmp yes no) + +// Unsigned comparisons to 0/1 +(ULT (TEST(Q|L|W|B) x x) yes no) => (First no yes) +(UGE (TEST(Q|L|W|B) x x) yes no) => (First yes no) +(SETB (TEST(Q|L|W|B) x x)) => (ConstBool [false]) +(SETAE (TEST(Q|L|W|B) x x)) => (ConstBool [true]) + +// x & 1 != 0 -> x & 1 +(SETNE (TEST(B|W)const [1] x)) => (AND(L|L)const [1] x) +(SETB (BT(L|Q)const [0] x)) => (AND(L|Q)const [1] x) + +// Recognize bit tests: a&(1<<b) != 0 for b suitably bounded +// Note that BTx instructions use the carry bit, so we need to convert tests for zero flag +// into tests for carry flags. +// ULT and SETB check the carry flag; they are identical to CS and SETCS. Same, mutatis +// mutandis, for UGE and SETAE, and CC and SETCC. +((NE|EQ) (TESTL (SHLL (MOVLconst [1]) x) y)) => ((ULT|UGE) (BTL x y)) +((NE|EQ) (TESTQ (SHLQ (MOVQconst [1]) x) y)) => ((ULT|UGE) (BTQ x y)) +((NE|EQ) (TESTLconst [c] x)) && isUint32PowerOfTwo(int64(c)) + => ((ULT|UGE) (BTLconst [int8(log32(c))] x)) +((NE|EQ) (TESTQconst [c] x)) && isUint64PowerOfTwo(int64(c)) + => ((ULT|UGE) (BTQconst [int8(log32(c))] x)) +((NE|EQ) (TESTQ (MOVQconst [c]) x)) && isUint64PowerOfTwo(c) + => ((ULT|UGE) (BTQconst [int8(log64(c))] x)) +(SET(NE|EQ) (TESTL (SHLL (MOVLconst [1]) x) y)) => (SET(B|AE) (BTL x y)) +(SET(NE|EQ) (TESTQ (SHLQ (MOVQconst [1]) x) y)) => (SET(B|AE) (BTQ x y)) +(SET(NE|EQ) (TESTLconst [c] x)) && isUint32PowerOfTwo(int64(c)) + => (SET(B|AE) (BTLconst [int8(log32(c))] x)) +(SET(NE|EQ) (TESTQconst [c] x)) && isUint64PowerOfTwo(int64(c)) + => (SET(B|AE) (BTQconst [int8(log32(c))] x)) +(SET(NE|EQ) (TESTQ (MOVQconst [c]) x)) && isUint64PowerOfTwo(c) + => (SET(B|AE) (BTQconst [int8(log64(c))] x)) +// SET..store variant +(SET(NE|EQ)store [off] {sym} ptr (TESTL (SHLL (MOVLconst [1]) x) y) mem) + => (SET(B|AE)store [off] {sym} ptr (BTL x y) mem) +(SET(NE|EQ)store [off] {sym} ptr (TESTQ (SHLQ (MOVQconst [1]) x) y) mem) + => (SET(B|AE)store [off] {sym} ptr (BTQ x y) mem) +(SET(NE|EQ)store [off] {sym} ptr (TESTLconst [c] x) mem) && isUint32PowerOfTwo(int64(c)) + => (SET(B|AE)store [off] {sym} ptr (BTLconst [int8(log32(c))] x) mem) +(SET(NE|EQ)store [off] {sym} ptr (TESTQconst [c] x) mem) && isUint64PowerOfTwo(int64(c)) + => (SET(B|AE)store [off] {sym} ptr (BTQconst [int8(log32(c))] x) mem) +(SET(NE|EQ)store [off] {sym} ptr (TESTQ (MOVQconst [c]) x) mem) && isUint64PowerOfTwo(c) + => (SET(B|AE)store [off] {sym} ptr (BTQconst [int8(log64(c))] x) mem) + +// Handle bit-testing in the form (a>>b)&1 != 0 by building the above rules +// and further combining shifts. +(BT(Q|L)const [c] (SHRQconst [d] x)) && (c+d)<64 => (BTQconst [c+d] x) +(BT(Q|L)const [c] (SHLQconst [d] x)) && c>d => (BT(Q|L)const [c-d] x) +(BT(Q|L)const [0] s:(SHRQ x y)) => (BTQ y x) +(BTLconst [c] (SHRLconst [d] x)) && (c+d)<32 => (BTLconst [c+d] x) +(BTLconst [c] (SHLLconst [d] x)) && c>d => (BTLconst [c-d] x) +(BTLconst [0] s:(SHRL x y)) => (BTL y x) + +// Rewrite a & 1 != 1 into a & 1 == 0. +// Among other things, this lets us turn (a>>b)&1 != 1 into a bit test. +(SET(NE|EQ) (CMPLconst [1] s:(ANDLconst [1] _))) => (SET(EQ|NE) (CMPLconst [0] s)) +(SET(NE|EQ)store [off] {sym} ptr (CMPLconst [1] s:(ANDLconst [1] _)) mem) => (SET(EQ|NE)store [off] {sym} ptr (CMPLconst [0] s) mem) +(SET(NE|EQ) (CMPQconst [1] s:(ANDQconst [1] _))) => (SET(EQ|NE) (CMPQconst [0] s)) +(SET(NE|EQ)store [off] {sym} ptr (CMPQconst [1] s:(ANDQconst [1] _)) mem) => (SET(EQ|NE)store [off] {sym} ptr (CMPQconst [0] s) mem) + +// Recognize bit setting (a |= 1<<b) and toggling (a ^= 1<<b) +(OR(Q|L) (SHL(Q|L) (MOV(Q|L)const [1]) y) x) => (BTS(Q|L) x y) +(XOR(Q|L) (SHL(Q|L) (MOV(Q|L)const [1]) y) x) => (BTC(Q|L) x y) + +// Convert ORconst into BTS, if the code gets smaller, with boundary being +// (ORL $40,AX is 3 bytes, ORL $80,AX is 6 bytes). +((ORQ|XORQ)const [c] x) && isUint64PowerOfTwo(int64(c)) && uint64(c) >= 128 + => (BT(S|C)Qconst [int8(log32(c))] x) +((ORL|XORL)const [c] x) && isUint32PowerOfTwo(int64(c)) && uint64(c) >= 128 + => (BT(S|C)Lconst [int8(log32(c))] x) +((ORQ|XORQ) (MOVQconst [c]) x) && isUint64PowerOfTwo(c) && uint64(c) >= 128 + => (BT(S|C)Qconst [int8(log64(c))] x) +((ORL|XORL) (MOVLconst [c]) x) && isUint32PowerOfTwo(int64(c)) && uint64(c) >= 128 + => (BT(S|C)Lconst [int8(log32(c))] x) + +// Recognize bit clearing: a &^= 1<<b +(AND(Q|L) (NOT(Q|L) (SHL(Q|L) (MOV(Q|L)const [1]) y)) x) => (BTR(Q|L) x y) +(ANDN(Q|L) x (SHL(Q|L) (MOV(Q|L)const [1]) y)) => (BTR(Q|L) x y) +(ANDQconst [c] x) && isUint64PowerOfTwo(int64(^c)) && uint64(^c) >= 128 + => (BTRQconst [int8(log32(^c))] x) +(ANDLconst [c] x) && isUint32PowerOfTwo(int64(^c)) && uint64(^c) >= 128 + => (BTRLconst [int8(log32(^c))] x) +(ANDQ (MOVQconst [c]) x) && isUint64PowerOfTwo(^c) && uint64(^c) >= 128 + => (BTRQconst [int8(log64(^c))] x) +(ANDL (MOVLconst [c]) x) && isUint32PowerOfTwo(int64(^c)) && uint64(^c) >= 128 + => (BTRLconst [int8(log32(^c))] x) + +// Special-case bit patterns on first/last bit. +// generic.rules changes ANDs of high-part/low-part masks into a couple of shifts, +// for instance: +// x & 0xFFFF0000 -> (x >> 16) << 16 +// x & 0x80000000 -> (x >> 31) << 31 +// +// In case the mask is just one bit (like second example above), it conflicts +// with the above rules to detect bit-testing / bit-clearing of first/last bit. +// We thus special-case them, by detecting the shift patterns. + +// Special case resetting first/last bit +(SHL(L|Q)const [1] (SHR(L|Q)const [1] x)) + => (BTR(L|Q)const [0] x) +(SHRLconst [1] (SHLLconst [1] x)) + => (BTRLconst [31] x) +(SHRQconst [1] (SHLQconst [1] x)) + => (BTRQconst [63] x) + +// Special case testing first/last bit (with double-shift generated by generic.rules) +((SETNE|SETEQ|NE|EQ) (TESTQ z1:(SHLQconst [63] (SHRQconst [63] x)) z2)) && z1==z2 + => ((SETB|SETAE|ULT|UGE) (BTQconst [63] x)) +((SETNE|SETEQ|NE|EQ) (TESTL z1:(SHLLconst [31] (SHRQconst [31] x)) z2)) && z1==z2 + => ((SETB|SETAE|ULT|UGE) (BTQconst [31] x)) +(SET(NE|EQ)store [off] {sym} ptr (TESTQ z1:(SHLQconst [63] (SHRQconst [63] x)) z2) mem) && z1==z2 + => (SET(B|AE)store [off] {sym} ptr (BTQconst [63] x) mem) +(SET(NE|EQ)store [off] {sym} ptr (TESTL z1:(SHLLconst [31] (SHRLconst [31] x)) z2) mem) && z1==z2 + => (SET(B|AE)store [off] {sym} ptr (BTLconst [31] x) mem) + +((SETNE|SETEQ|NE|EQ) (TESTQ z1:(SHRQconst [63] (SHLQconst [63] x)) z2)) && z1==z2 + => ((SETB|SETAE|ULT|UGE) (BTQconst [0] x)) +((SETNE|SETEQ|NE|EQ) (TESTL z1:(SHRLconst [31] (SHLLconst [31] x)) z2)) && z1==z2 + => ((SETB|SETAE|ULT|UGE) (BTLconst [0] x)) +(SET(NE|EQ)store [off] {sym} ptr (TESTQ z1:(SHRQconst [63] (SHLQconst [63] x)) z2) mem) && z1==z2 + => (SET(B|AE)store [off] {sym} ptr (BTQconst [0] x) mem) +(SET(NE|EQ)store [off] {sym} ptr (TESTL z1:(SHRLconst [31] (SHLLconst [31] x)) z2) mem) && z1==z2 + => (SET(B|AE)store [off] {sym} ptr (BTLconst [0] x) mem) + +// Special-case manually testing last bit with "a>>63 != 0" (without "&1") +((SETNE|SETEQ|NE|EQ) (TESTQ z1:(SHRQconst [63] x) z2)) && z1==z2 + => ((SETB|SETAE|ULT|UGE) (BTQconst [63] x)) +((SETNE|SETEQ|NE|EQ) (TESTL z1:(SHRLconst [31] x) z2)) && z1==z2 + => ((SETB|SETAE|ULT|UGE) (BTLconst [31] x)) +(SET(NE|EQ)store [off] {sym} ptr (TESTQ z1:(SHRQconst [63] x) z2) mem) && z1==z2 + => (SET(B|AE)store [off] {sym} ptr (BTQconst [63] x) mem) +(SET(NE|EQ)store [off] {sym} ptr (TESTL z1:(SHRLconst [31] x) z2) mem) && z1==z2 + => (SET(B|AE)store [off] {sym} ptr (BTLconst [31] x) mem) + +// Fold combinations of bit ops on same bit. An example is math.Copysign(c,-1) +(BTS(Q|L)const [c] (BTR(Q|L)const [c] x)) => (BTS(Q|L)const [c] x) +(BTS(Q|L)const [c] (BTC(Q|L)const [c] x)) => (BTS(Q|L)const [c] x) +(BTR(Q|L)const [c] (BTS(Q|L)const [c] x)) => (BTR(Q|L)const [c] x) +(BTR(Q|L)const [c] (BTC(Q|L)const [c] x)) => (BTR(Q|L)const [c] x) + +// Fold boolean negation into SETcc. +(XORLconst [1] (SETNE x)) => (SETEQ x) +(XORLconst [1] (SETEQ x)) => (SETNE x) +(XORLconst [1] (SETL x)) => (SETGE x) +(XORLconst [1] (SETGE x)) => (SETL x) +(XORLconst [1] (SETLE x)) => (SETG x) +(XORLconst [1] (SETG x)) => (SETLE x) +(XORLconst [1] (SETB x)) => (SETAE x) +(XORLconst [1] (SETAE x)) => (SETB x) +(XORLconst [1] (SETBE x)) => (SETA x) +(XORLconst [1] (SETA x)) => (SETBE x) + +// Special case for floating point - LF/LEF not generated +(NE (TESTB (SETGF cmp) (SETGF cmp)) yes no) => (UGT cmp yes no) +(NE (TESTB (SETGEF cmp) (SETGEF cmp)) yes no) => (UGE cmp yes no) +(NE (TESTB (SETEQF cmp) (SETEQF cmp)) yes no) => (EQF cmp yes no) +(NE (TESTB (SETNEF cmp) (SETNEF cmp)) yes no) => (NEF cmp yes no) + +// Disabled because it interferes with the pattern match above and makes worse code. +// (SETNEF x) => (ORQ (SETNE <typ.Int8> x) (SETNAN <typ.Int8> x)) +// (SETEQF x) => (ANDQ (SETEQ <typ.Int8> x) (SETORD <typ.Int8> x)) + +// fold constants into instructions +(ADDQ x (MOVQconst [c])) && is32Bit(c) => (ADDQconst [int32(c)] x) +(ADDQ x (MOVLconst [c])) => (ADDQconst [c] x) +(ADDL x (MOVLconst [c])) => (ADDLconst [c] x) + +(SUBQ x (MOVQconst [c])) && is32Bit(c) => (SUBQconst x [int32(c)]) +(SUBQ (MOVQconst [c]) x) && is32Bit(c) => (NEGQ (SUBQconst <v.Type> x [int32(c)])) +(SUBL x (MOVLconst [c])) => (SUBLconst x [c]) +(SUBL (MOVLconst [c]) x) => (NEGL (SUBLconst <v.Type> x [c])) + +(MULQ x (MOVQconst [c])) && is32Bit(c) => (MULQconst [int32(c)] x) +(MULL x (MOVLconst [c])) => (MULLconst [c] x) + +(ANDQ x (MOVQconst [c])) && is32Bit(c) => (ANDQconst [int32(c)] x) +(ANDL x (MOVLconst [c])) => (ANDLconst [c] x) + +(AND(L|Q)const [c] (AND(L|Q)const [d] x)) => (AND(L|Q)const [c & d] x) +(XOR(L|Q)const [c] (XOR(L|Q)const [d] x)) => (XOR(L|Q)const [c ^ d] x) +(OR(L|Q)const [c] (OR(L|Q)const [d] x)) => (OR(L|Q)const [c | d] x) + +(BTRLconst [c] (ANDLconst [d] x)) => (ANDLconst [d &^ (1<<uint32(c))] x) +(ANDLconst [c] (BTRLconst [d] x)) => (ANDLconst [c &^ (1<<uint32(d))] x) +(BTRLconst [c] (BTRLconst [d] x)) => (ANDLconst [^(1<<uint32(c) | 1<<uint32(d))] x) + +(BTCLconst [c] (XORLconst [d] x)) => (XORLconst [d ^ 1<<uint32(c)] x) +(XORLconst [c] (BTCLconst [d] x)) => (XORLconst [c ^ 1<<uint32(d)] x) +(BTCLconst [c] (BTCLconst [d] x)) => (XORLconst [1<<uint32(c) | 1<<uint32(d)] x) + +(BTSLconst [c] (ORLconst [d] x)) => (ORLconst [d | 1<<uint32(c)] x) +(ORLconst [c] (BTSLconst [d] x)) => (ORLconst [c | 1<<uint32(d)] x) +(BTSLconst [c] (BTSLconst [d] x)) => (ORLconst [1<<uint32(c) | 1<<uint32(d)] x) + +(BTRQconst [c] (ANDQconst [d] x)) && is32Bit(int64(d) &^ (1<<uint32(c))) => (ANDQconst [d &^ (1<<uint32(c))] x) +(ANDQconst [c] (BTRQconst [d] x)) && is32Bit(int64(c) &^ (1<<uint32(d))) => (ANDQconst [c &^ (1<<uint32(d))] x) +(BTRQconst [c] (BTRQconst [d] x)) && is32Bit(^(1<<uint32(c) | 1<<uint32(d))) => (ANDQconst [^(1<<uint32(c) | 1<<uint32(d))] x) + +(BTCQconst [c] (XORQconst [d] x)) && is32Bit(int64(d) ^ 1<<uint32(c)) => (XORQconst [d ^ 1<<uint32(c)] x) +(XORQconst [c] (BTCQconst [d] x)) && is32Bit(int64(c) ^ 1<<uint32(d)) => (XORQconst [c ^ 1<<uint32(d)] x) +(BTCQconst [c] (BTCQconst [d] x)) && is32Bit(1<<uint32(c) ^ 1<<uint32(d)) => (XORQconst [1<<uint32(c) ^ 1<<uint32(d)] x) + +(BTSQconst [c] (ORQconst [d] x)) && is32Bit(int64(d) | 1<<uint32(c)) => (ORQconst [d | 1<<uint32(c)] x) +(ORQconst [c] (BTSQconst [d] x)) && is32Bit(int64(c) | 1<<uint32(d)) => (ORQconst [c | 1<<uint32(d)] x) +(BTSQconst [c] (BTSQconst [d] x)) && is32Bit(1<<uint32(c) | 1<<uint32(d)) => (ORQconst [1<<uint32(c) | 1<<uint32(d)] x) + + +(MULLconst [c] (MULLconst [d] x)) => (MULLconst [c * d] x) +(MULQconst [c] (MULQconst [d] x)) && is32Bit(int64(c)*int64(d)) => (MULQconst [c * d] x) + +(ORQ x (MOVQconst [c])) && is32Bit(c) => (ORQconst [int32(c)] x) +(ORQ x (MOVLconst [c])) => (ORQconst [c] x) +(ORL x (MOVLconst [c])) => (ORLconst [c] x) + +(XORQ x (MOVQconst [c])) && is32Bit(c) => (XORQconst [int32(c)] x) +(XORL x (MOVLconst [c])) => (XORLconst [c] x) + +(SHLQ x (MOV(Q|L)const [c])) => (SHLQconst [int8(c&63)] x) +(SHLL x (MOV(Q|L)const [c])) => (SHLLconst [int8(c&31)] x) + +(SHRQ x (MOV(Q|L)const [c])) => (SHRQconst [int8(c&63)] x) +(SHRL x (MOV(Q|L)const [c])) => (SHRLconst [int8(c&31)] x) +(SHRW x (MOV(Q|L)const [c])) && c&31 < 16 => (SHRWconst [int8(c&31)] x) +(SHRW _ (MOV(Q|L)const [c])) && c&31 >= 16 => (MOVLconst [0]) +(SHRB x (MOV(Q|L)const [c])) && c&31 < 8 => (SHRBconst [int8(c&31)] x) +(SHRB _ (MOV(Q|L)const [c])) && c&31 >= 8 => (MOVLconst [0]) + +(SARQ x (MOV(Q|L)const [c])) => (SARQconst [int8(c&63)] x) +(SARL x (MOV(Q|L)const [c])) => (SARLconst [int8(c&31)] x) +(SARW x (MOV(Q|L)const [c])) => (SARWconst [int8(min(int64(c)&31,15))] x) +(SARB x (MOV(Q|L)const [c])) => (SARBconst [int8(min(int64(c)&31,7))] x) + + +// Operations which don't affect the low 6/5 bits of the shift amount are NOPs. +((SHLQ|SHRQ|SARQ) x (ADDQconst [c] y)) && c & 63 == 0 => ((SHLQ|SHRQ|SARQ) x y) +((SHLQ|SHRQ|SARQ) x (NEGQ <t> (ADDQconst [c] y))) && c & 63 == 0 => ((SHLQ|SHRQ|SARQ) x (NEGQ <t> y)) +((SHLQ|SHRQ|SARQ) x (ANDQconst [c] y)) && c & 63 == 63 => ((SHLQ|SHRQ|SARQ) x y) +((SHLQ|SHRQ|SARQ) x (NEGQ <t> (ANDQconst [c] y))) && c & 63 == 63 => ((SHLQ|SHRQ|SARQ) x (NEGQ <t> y)) + +((SHLL|SHRL|SARL) x (ADDQconst [c] y)) && c & 31 == 0 => ((SHLL|SHRL|SARL) x y) +((SHLL|SHRL|SARL) x (NEGQ <t> (ADDQconst [c] y))) && c & 31 == 0 => ((SHLL|SHRL|SARL) x (NEGQ <t> y)) +((SHLL|SHRL|SARL) x (ANDQconst [c] y)) && c & 31 == 31 => ((SHLL|SHRL|SARL) x y) +((SHLL|SHRL|SARL) x (NEGQ <t> (ANDQconst [c] y))) && c & 31 == 31 => ((SHLL|SHRL|SARL) x (NEGQ <t> y)) + +((SHLQ|SHRQ|SARQ) x (ADDLconst [c] y)) && c & 63 == 0 => ((SHLQ|SHRQ|SARQ) x y) +((SHLQ|SHRQ|SARQ) x (NEGL <t> (ADDLconst [c] y))) && c & 63 == 0 => ((SHLQ|SHRQ|SARQ) x (NEGL <t> y)) +((SHLQ|SHRQ|SARQ) x (ANDLconst [c] y)) && c & 63 == 63 => ((SHLQ|SHRQ|SARQ) x y) +((SHLQ|SHRQ|SARQ) x (NEGL <t> (ANDLconst [c] y))) && c & 63 == 63 => ((SHLQ|SHRQ|SARQ) x (NEGL <t> y)) + +((SHLL|SHRL|SARL) x (ADDLconst [c] y)) && c & 31 == 0 => ((SHLL|SHRL|SARL) x y) +((SHLL|SHRL|SARL) x (NEGL <t> (ADDLconst [c] y))) && c & 31 == 0 => ((SHLL|SHRL|SARL) x (NEGL <t> y)) +((SHLL|SHRL|SARL) x (ANDLconst [c] y)) && c & 31 == 31 => ((SHLL|SHRL|SARL) x y) +((SHLL|SHRL|SARL) x (NEGL <t> (ANDLconst [c] y))) && c & 31 == 31 => ((SHLL|SHRL|SARL) x (NEGL <t> y)) + +// Constant rotate instructions +((ADDQ|ORQ|XORQ) (SHLQconst x [c]) (SHRQconst x [d])) && d==64-c => (ROLQconst x [c]) +((ADDL|ORL|XORL) (SHLLconst x [c]) (SHRLconst x [d])) && d==32-c => (ROLLconst x [c]) + +((ADDL|ORL|XORL) <t> (SHLLconst x [c]) (SHRWconst x [d])) && d==16-c && c < 16 && t.Size() == 2 => (ROLWconst x [c]) +((ADDL|ORL|XORL) <t> (SHLLconst x [c]) (SHRBconst x [d])) && d==8-c && c < 8 && t.Size() == 1 => (ROLBconst x [c]) + +(ROLQconst [c] (ROLQconst [d] x)) => (ROLQconst [(c+d)&63] x) +(ROLLconst [c] (ROLLconst [d] x)) => (ROLLconst [(c+d)&31] x) +(ROLWconst [c] (ROLWconst [d] x)) => (ROLWconst [(c+d)&15] x) +(ROLBconst [c] (ROLBconst [d] x)) => (ROLBconst [(c+d)& 7] x) + +(RotateLeft8 ...) => (ROLB ...) +(RotateLeft16 ...) => (ROLW ...) +(RotateLeft32 ...) => (ROLL ...) +(RotateLeft64 ...) => (ROLQ ...) + +// Non-constant rotates. +// We want to issue a rotate when the Go source contains code like +// y &= 63 +// x << y | x >> (64-y) +// The shift rules above convert << to SHLx and >> to SHRx. +// SHRx converts its shift argument from 64-y to -y. +// A tricky situation occurs when y==0. Then the original code would be: +// x << 0 | x >> 64 +// But x >> 64 is 0, not x. So there's an additional mask that is ANDed in +// to force the second term to 0. We don't need that mask, but we must match +// it in order to strip it out. +(ORQ (SHLQ x y) (ANDQ (SHRQ x (NEG(Q|L) y)) (SBBQcarrymask (CMP(Q|L)const (NEG(Q|L) (ADD(Q|L)const (AND(Q|L)const y [63]) [-64])) [64])))) => (ROLQ x y) +(ORQ (SHRQ x y) (ANDQ (SHLQ x (NEG(Q|L) y)) (SBBQcarrymask (CMP(Q|L)const (NEG(Q|L) (ADD(Q|L)const (AND(Q|L)const y [63]) [-64])) [64])))) => (RORQ x y) + +(ORL (SHLL x y) (ANDL (SHRL x (NEG(Q|L) y)) (SBBLcarrymask (CMP(Q|L)const (NEG(Q|L) (ADD(Q|L)const (AND(Q|L)const y [31]) [-32])) [32])))) => (ROLL x y) +(ORL (SHRL x y) (ANDL (SHLL x (NEG(Q|L) y)) (SBBLcarrymask (CMP(Q|L)const (NEG(Q|L) (ADD(Q|L)const (AND(Q|L)const y [31]) [-32])) [32])))) => (RORL x y) + +// Help with rotate detection +(CMPQconst (NEGQ (ADDQconst [-16] (ANDQconst [15] _))) [32]) => (FlagLT_ULT) +(CMPQconst (NEGQ (ADDQconst [ -8] (ANDQconst [7] _))) [32]) => (FlagLT_ULT) + +(ORL (SHLL x (AND(Q|L)const y [15])) + (ANDL (SHRW x (NEG(Q|L) (ADD(Q|L)const (AND(Q|L)const y [15]) [-16]))) + (SBBLcarrymask (CMP(Q|L)const (NEG(Q|L) (ADD(Q|L)const (AND(Q|L)const y [15]) [-16])) [16])))) + && v.Type.Size() == 2 + => (ROLW x y) +(ORL (SHRW x (AND(Q|L)const y [15])) + (SHLL x (NEG(Q|L) (ADD(Q|L)const (AND(Q|L)const y [15]) [-16])))) + && v.Type.Size() == 2 + => (RORW x y) + +(ORL (SHLL x (AND(Q|L)const y [ 7])) + (ANDL (SHRB x (NEG(Q|L) (ADD(Q|L)const (AND(Q|L)const y [ 7]) [ -8]))) + (SBBLcarrymask (CMP(Q|L)const (NEG(Q|L) (ADD(Q|L)const (AND(Q|L)const y [ 7]) [ -8])) [ 8])))) + && v.Type.Size() == 1 + => (ROLB x y) +(ORL (SHRB x (AND(Q|L)const y [ 7])) + (SHLL x (NEG(Q|L) (ADD(Q|L)const (AND(Q|L)const y [ 7]) [ -8])))) + && v.Type.Size() == 1 + => (RORB x y) + +// rotate left negative = rotate right +(ROLQ x (NEG(Q|L) y)) => (RORQ x y) +(ROLL x (NEG(Q|L) y)) => (RORL x y) +(ROLW x (NEG(Q|L) y)) => (RORW x y) +(ROLB x (NEG(Q|L) y)) => (RORB x y) + +// rotate right negative = rotate left +(RORQ x (NEG(Q|L) y)) => (ROLQ x y) +(RORL x (NEG(Q|L) y)) => (ROLL x y) +(RORW x (NEG(Q|L) y)) => (ROLW x y) +(RORB x (NEG(Q|L) y)) => (ROLB x y) + +// rotate by constants +(ROLQ x (MOV(Q|L)const [c])) => (ROLQconst [int8(c&63)] x) +(ROLL x (MOV(Q|L)const [c])) => (ROLLconst [int8(c&31)] x) +(ROLW x (MOV(Q|L)const [c])) => (ROLWconst [int8(c&15)] x) +(ROLB x (MOV(Q|L)const [c])) => (ROLBconst [int8(c&7) ] x) + +(RORQ x (MOV(Q|L)const [c])) => (ROLQconst [int8((-c)&63)] x) +(RORL x (MOV(Q|L)const [c])) => (ROLLconst [int8((-c)&31)] x) +(RORW x (MOV(Q|L)const [c])) => (ROLWconst [int8((-c)&15)] x) +(RORB x (MOV(Q|L)const [c])) => (ROLBconst [int8((-c)&7) ] x) + +// Constant shift simplifications +((SHLQ|SHRQ|SARQ)const x [0]) => x +((SHLL|SHRL|SARL)const x [0]) => x +((SHRW|SARW)const x [0]) => x +((SHRB|SARB)const x [0]) => x +((ROLQ|ROLL|ROLW|ROLB)const x [0]) => x + +// Multi-register shifts +(ORQ (SH(R|L)Q lo bits) (SH(L|R)Q hi (NEGQ bits))) => (SH(R|L)DQ lo hi bits) + +// Note: the word and byte shifts keep the low 5 bits (not the low 4 or 3 bits) +// because the x86 instructions are defined to use all 5 bits of the shift even +// for the small shifts. I don't think we'll ever generate a weird shift (e.g. +// (SHRW x (MOVLconst [24])), but just in case. + +(CMPQ x (MOVQconst [c])) && is32Bit(c) => (CMPQconst x [int32(c)]) +(CMPQ (MOVQconst [c]) x) && is32Bit(c) => (InvertFlags (CMPQconst x [int32(c)])) +(CMPL x (MOVLconst [c])) => (CMPLconst x [c]) +(CMPL (MOVLconst [c]) x) => (InvertFlags (CMPLconst x [c])) +(CMPW x (MOVLconst [c])) => (CMPWconst x [int16(c)]) +(CMPW (MOVLconst [c]) x) => (InvertFlags (CMPWconst x [int16(c)])) +(CMPB x (MOVLconst [c])) => (CMPBconst x [int8(c)]) +(CMPB (MOVLconst [c]) x) => (InvertFlags (CMPBconst x [int8(c)])) + +// Canonicalize the order of arguments to comparisons - helps with CSE. +(CMP(Q|L|W|B) x y) && canonLessThan(x,y) => (InvertFlags (CMP(Q|L|W|B) y x)) + +// Using MOVZX instead of AND is cheaper. +(AND(Q|L)const [ 0xFF] x) => (MOVBQZX x) +(AND(Q|L)const [0xFFFF] x) => (MOVWQZX x) +// This rule is currently invalid because 0xFFFFFFFF is not representable by a signed int32. +// Commenting out for now, because it also can't trigger because of the is32bit guard on the +// ANDQconst lowering-rule, above, prevents 0xFFFFFFFF from matching (for the same reason) +// Using an alternate form of this rule segfaults some binaries because of +// adverse interactions with other passes. +// (ANDQconst [0xFFFFFFFF] x) => (MOVLQZX x) + +// strength reduction +// Assumes that the following costs from https://gmplib.org/~tege/x86-timing.pdf: +// 1 - addq, shlq, leaq, negq, subq +// 3 - imulq +// This limits the rewrites to two instructions. +// Note that negq always operates in-place, +// which can require a register-register move +// to preserve the original value, +// so it must be used with care. +(MUL(Q|L)const [-9] x) => (NEG(Q|L) (LEA(Q|L)8 <v.Type> x x)) +(MUL(Q|L)const [-5] x) => (NEG(Q|L) (LEA(Q|L)4 <v.Type> x x)) +(MUL(Q|L)const [-3] x) => (NEG(Q|L) (LEA(Q|L)2 <v.Type> x x)) +(MUL(Q|L)const [-1] x) => (NEG(Q|L) x) +(MUL(Q|L)const [ 0] _) => (MOV(Q|L)const [0]) +(MUL(Q|L)const [ 1] x) => x +(MUL(Q|L)const [ 3] x) => (LEA(Q|L)2 x x) +(MUL(Q|L)const [ 5] x) => (LEA(Q|L)4 x x) +(MUL(Q|L)const [ 7] x) => (LEA(Q|L)2 x (LEA(Q|L)2 <v.Type> x x)) +(MUL(Q|L)const [ 9] x) => (LEA(Q|L)8 x x) +(MUL(Q|L)const [11] x) => (LEA(Q|L)2 x (LEA(Q|L)4 <v.Type> x x)) +(MUL(Q|L)const [13] x) => (LEA(Q|L)4 x (LEA(Q|L)2 <v.Type> x x)) +(MUL(Q|L)const [19] x) => (LEA(Q|L)2 x (LEA(Q|L)8 <v.Type> x x)) +(MUL(Q|L)const [21] x) => (LEA(Q|L)4 x (LEA(Q|L)4 <v.Type> x x)) +(MUL(Q|L)const [25] x) => (LEA(Q|L)8 x (LEA(Q|L)2 <v.Type> x x)) +(MUL(Q|L)const [27] x) => (LEA(Q|L)8 (LEA(Q|L)2 <v.Type> x x) (LEA(Q|L)2 <v.Type> x x)) +(MUL(Q|L)const [37] x) => (LEA(Q|L)4 x (LEA(Q|L)8 <v.Type> x x)) +(MUL(Q|L)const [41] x) => (LEA(Q|L)8 x (LEA(Q|L)4 <v.Type> x x)) +(MUL(Q|L)const [45] x) => (LEA(Q|L)8 (LEA(Q|L)4 <v.Type> x x) (LEA(Q|L)4 <v.Type> x x)) +(MUL(Q|L)const [73] x) => (LEA(Q|L)8 x (LEA(Q|L)8 <v.Type> x x)) +(MUL(Q|L)const [81] x) => (LEA(Q|L)8 (LEA(Q|L)8 <v.Type> x x) (LEA(Q|L)8 <v.Type> x x)) + +(MUL(Q|L)const [c] x) && isPowerOfTwo64(int64(c)+1) && c >= 15 => (SUB(Q|L) (SHL(Q|L)const <v.Type> [int8(log64(int64(c)+1))] x) x) +(MUL(Q|L)const [c] x) && isPowerOfTwo32(c-1) && c >= 17 => (LEA(Q|L)1 (SHL(Q|L)const <v.Type> [int8(log32(c-1))] x) x) +(MUL(Q|L)const [c] x) && isPowerOfTwo32(c-2) && c >= 34 => (LEA(Q|L)2 (SHL(Q|L)const <v.Type> [int8(log32(c-2))] x) x) +(MUL(Q|L)const [c] x) && isPowerOfTwo32(c-4) && c >= 68 => (LEA(Q|L)4 (SHL(Q|L)const <v.Type> [int8(log32(c-4))] x) x) +(MUL(Q|L)const [c] x) && isPowerOfTwo32(c-8) && c >= 136 => (LEA(Q|L)8 (SHL(Q|L)const <v.Type> [int8(log32(c-8))] x) x) +(MUL(Q|L)const [c] x) && c%3 == 0 && isPowerOfTwo32(c/3) => (SHL(Q|L)const [int8(log32(c/3))] (LEA(Q|L)2 <v.Type> x x)) +(MUL(Q|L)const [c] x) && c%5 == 0 && isPowerOfTwo32(c/5) => (SHL(Q|L)const [int8(log32(c/5))] (LEA(Q|L)4 <v.Type> x x)) +(MUL(Q|L)const [c] x) && c%9 == 0 && isPowerOfTwo32(c/9) => (SHL(Q|L)const [int8(log32(c/9))] (LEA(Q|L)8 <v.Type> x x)) + +// combine add/shift into LEAQ/LEAL +(ADD(L|Q) x (SHL(L|Q)const [3] y)) => (LEA(L|Q)8 x y) +(ADD(L|Q) x (SHL(L|Q)const [2] y)) => (LEA(L|Q)4 x y) +(ADD(L|Q) x (SHL(L|Q)const [1] y)) => (LEA(L|Q)2 x y) +(ADD(L|Q) x (ADD(L|Q) y y)) => (LEA(L|Q)2 x y) +(ADD(L|Q) x (ADD(L|Q) x y)) => (LEA(L|Q)2 y x) + +// combine ADDQ/ADDQconst into LEAQ1/LEAL1 +(ADD(Q|L)const [c] (ADD(Q|L) x y)) => (LEA(Q|L)1 [c] x y) +(ADD(Q|L) (ADD(Q|L)const [c] x) y) => (LEA(Q|L)1 [c] x y) +(ADD(Q|L)const [c] (SHL(Q|L)const [1] x)) => (LEA(Q|L)1 [c] x x) + +// fold ADDQ/ADDL into LEAQ/LEAL +(ADD(Q|L)const [c] (LEA(Q|L) [d] {s} x)) && is32Bit(int64(c)+int64(d)) => (LEA(Q|L) [c+d] {s} x) +(LEA(Q|L) [c] {s} (ADD(Q|L)const [d] x)) && is32Bit(int64(c)+int64(d)) => (LEA(Q|L) [c+d] {s} x) +(LEA(Q|L) [c] {s} (ADD(Q|L) x y)) && x.Op != OpSB && y.Op != OpSB => (LEA(Q|L)1 [c] {s} x y) +(ADD(Q|L) x (LEA(Q|L) [c] {s} y)) && x.Op != OpSB && y.Op != OpSB => (LEA(Q|L)1 [c] {s} x y) + +// fold ADDQconst/ADDLconst into LEAQx/LEALx +(ADD(Q|L)const [c] (LEA(Q|L)1 [d] {s} x y)) && is32Bit(int64(c)+int64(d)) => (LEA(Q|L)1 [c+d] {s} x y) +(ADD(Q|L)const [c] (LEA(Q|L)2 [d] {s} x y)) && is32Bit(int64(c)+int64(d)) => (LEA(Q|L)2 [c+d] {s} x y) +(ADD(Q|L)const [c] (LEA(Q|L)4 [d] {s} x y)) && is32Bit(int64(c)+int64(d)) => (LEA(Q|L)4 [c+d] {s} x y) +(ADD(Q|L)const [c] (LEA(Q|L)8 [d] {s} x y)) && is32Bit(int64(c)+int64(d)) => (LEA(Q|L)8 [c+d] {s} x y) +(LEA(Q|L)1 [c] {s} (ADD(Q|L)const [d] x) y) && is32Bit(int64(c)+int64(d)) && x.Op != OpSB => (LEA(Q|L)1 [c+d] {s} x y) +(LEA(Q|L)2 [c] {s} (ADD(Q|L)const [d] x) y) && is32Bit(int64(c)+int64(d)) && x.Op != OpSB => (LEA(Q|L)2 [c+d] {s} x y) +(LEA(Q|L)2 [c] {s} x (ADD(Q|L)const [d] y)) && is32Bit(int64(c)+2*int64(d)) && y.Op != OpSB => (LEA(Q|L)2 [c+2*d] {s} x y) +(LEA(Q|L)4 [c] {s} (ADD(Q|L)const [d] x) y) && is32Bit(int64(c)+int64(d)) && x.Op != OpSB => (LEA(Q|L)4 [c+d] {s} x y) +(LEA(Q|L)4 [c] {s} x (ADD(Q|L)const [d] y)) && is32Bit(int64(c)+4*int64(d)) && y.Op != OpSB => (LEA(Q|L)4 [c+4*d] {s} x y) +(LEA(Q|L)8 [c] {s} (ADD(Q|L)const [d] x) y) && is32Bit(int64(c)+int64(d)) && x.Op != OpSB => (LEA(Q|L)8 [c+d] {s} x y) +(LEA(Q|L)8 [c] {s} x (ADD(Q|L)const [d] y)) && is32Bit(int64(c)+8*int64(d)) && y.Op != OpSB => (LEA(Q|L)8 [c+8*d] {s} x y) + +// fold shifts into LEAQx/LEALx +(LEA(Q|L)1 [c] {s} x (SHL(Q|L)const [1] y)) => (LEA(Q|L)2 [c] {s} x y) +(LEA(Q|L)1 [c] {s} x (SHL(Q|L)const [2] y)) => (LEA(Q|L)4 [c] {s} x y) +(LEA(Q|L)1 [c] {s} x (SHL(Q|L)const [3] y)) => (LEA(Q|L)8 [c] {s} x y) +(LEA(Q|L)2 [c] {s} x (SHL(Q|L)const [1] y)) => (LEA(Q|L)4 [c] {s} x y) +(LEA(Q|L)2 [c] {s} x (SHL(Q|L)const [2] y)) => (LEA(Q|L)8 [c] {s} x y) +(LEA(Q|L)4 [c] {s} x (SHL(Q|L)const [1] y)) => (LEA(Q|L)8 [c] {s} x y) + +// reverse ordering of compare instruction +(SETL (InvertFlags x)) => (SETG x) +(SETG (InvertFlags x)) => (SETL x) +(SETB (InvertFlags x)) => (SETA x) +(SETA (InvertFlags x)) => (SETB x) +(SETLE (InvertFlags x)) => (SETGE x) +(SETGE (InvertFlags x)) => (SETLE x) +(SETBE (InvertFlags x)) => (SETAE x) +(SETAE (InvertFlags x)) => (SETBE x) +(SETEQ (InvertFlags x)) => (SETEQ x) +(SETNE (InvertFlags x)) => (SETNE x) + +(SETLstore [off] {sym} ptr (InvertFlags x) mem) => (SETGstore [off] {sym} ptr x mem) +(SETGstore [off] {sym} ptr (InvertFlags x) mem) => (SETLstore [off] {sym} ptr x mem) +(SETBstore [off] {sym} ptr (InvertFlags x) mem) => (SETAstore [off] {sym} ptr x mem) +(SETAstore [off] {sym} ptr (InvertFlags x) mem) => (SETBstore [off] {sym} ptr x mem) +(SETLEstore [off] {sym} ptr (InvertFlags x) mem) => (SETGEstore [off] {sym} ptr x mem) +(SETGEstore [off] {sym} ptr (InvertFlags x) mem) => (SETLEstore [off] {sym} ptr x mem) +(SETBEstore [off] {sym} ptr (InvertFlags x) mem) => (SETAEstore [off] {sym} ptr x mem) +(SETAEstore [off] {sym} ptr (InvertFlags x) mem) => (SETBEstore [off] {sym} ptr x mem) +(SETEQstore [off] {sym} ptr (InvertFlags x) mem) => (SETEQstore [off] {sym} ptr x mem) +(SETNEstore [off] {sym} ptr (InvertFlags x) mem) => (SETNEstore [off] {sym} ptr x mem) + +// sign extended loads +// Note: The combined instruction must end up in the same block +// as the original load. If not, we end up making a value with +// memory type live in two different blocks, which can lead to +// multiple memory values alive simultaneously. +// Make sure we don't combine these ops if the load has another use. +// This prevents a single load from being split into multiple loads +// which then might return different values. See test/atomicload.go. +(MOVBQSX x:(MOVBload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (MOVBQSXload <v.Type> [off] {sym} ptr mem) +(MOVBQSX x:(MOVWload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (MOVBQSXload <v.Type> [off] {sym} ptr mem) +(MOVBQSX x:(MOVLload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (MOVBQSXload <v.Type> [off] {sym} ptr mem) +(MOVBQSX x:(MOVQload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (MOVBQSXload <v.Type> [off] {sym} ptr mem) +(MOVBQZX x:(MOVBload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (MOVBload <v.Type> [off] {sym} ptr mem) +(MOVBQZX x:(MOVWload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (MOVBload <v.Type> [off] {sym} ptr mem) +(MOVBQZX x:(MOVLload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (MOVBload <v.Type> [off] {sym} ptr mem) +(MOVBQZX x:(MOVQload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (MOVBload <v.Type> [off] {sym} ptr mem) +(MOVWQSX x:(MOVWload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (MOVWQSXload <v.Type> [off] {sym} ptr mem) +(MOVWQSX x:(MOVLload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (MOVWQSXload <v.Type> [off] {sym} ptr mem) +(MOVWQSX x:(MOVQload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (MOVWQSXload <v.Type> [off] {sym} ptr mem) +(MOVWQZX x:(MOVWload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (MOVWload <v.Type> [off] {sym} ptr mem) +(MOVWQZX x:(MOVLload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (MOVWload <v.Type> [off] {sym} ptr mem) +(MOVWQZX x:(MOVQload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (MOVWload <v.Type> [off] {sym} ptr mem) +(MOVLQSX x:(MOVLload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (MOVLQSXload <v.Type> [off] {sym} ptr mem) +(MOVLQSX x:(MOVQload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (MOVLQSXload <v.Type> [off] {sym} ptr mem) +(MOVLQZX x:(MOVLload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (MOVLload <v.Type> [off] {sym} ptr mem) +(MOVLQZX x:(MOVQload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (MOVLload <v.Type> [off] {sym} ptr mem) + +(MOVLQZX x) && zeroUpper32Bits(x,3) => x +(MOVWQZX x) && zeroUpper48Bits(x,3) => x +(MOVBQZX x) && zeroUpper56Bits(x,3) => x + +// replace load from same location as preceding store with zero/sign extension (or copy in case of full width) +(MOVBload [off] {sym} ptr (MOVBstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) => (MOVBQZX x) +(MOVWload [off] {sym} ptr (MOVWstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) => (MOVWQZX x) +(MOVLload [off] {sym} ptr (MOVLstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) => (MOVLQZX x) +(MOVQload [off] {sym} ptr (MOVQstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) => x +(MOVBQSXload [off] {sym} ptr (MOVBstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) => (MOVBQSX x) +(MOVWQSXload [off] {sym} ptr (MOVWstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) => (MOVWQSX x) +(MOVLQSXload [off] {sym} ptr (MOVLstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) => (MOVLQSX x) + +// Fold extensions and ANDs together. +(MOVBQZX (ANDLconst [c] x)) => (ANDLconst [c & 0xff] x) +(MOVWQZX (ANDLconst [c] x)) => (ANDLconst [c & 0xffff] x) +(MOVLQZX (ANDLconst [c] x)) => (ANDLconst [c] x) +(MOVBQSX (ANDLconst [c] x)) && c & 0x80 == 0 => (ANDLconst [c & 0x7f] x) +(MOVWQSX (ANDLconst [c] x)) && c & 0x8000 == 0 => (ANDLconst [c & 0x7fff] x) +(MOVLQSX (ANDLconst [c] x)) && uint32(c) & 0x80000000 == 0 => (ANDLconst [c & 0x7fffffff] x) + +// Don't extend before storing +(MOVLstore [off] {sym} ptr (MOVLQSX x) mem) => (MOVLstore [off] {sym} ptr x mem) +(MOVWstore [off] {sym} ptr (MOVWQSX x) mem) => (MOVWstore [off] {sym} ptr x mem) +(MOVBstore [off] {sym} ptr (MOVBQSX x) mem) => (MOVBstore [off] {sym} ptr x mem) +(MOVLstore [off] {sym} ptr (MOVLQZX x) mem) => (MOVLstore [off] {sym} ptr x mem) +(MOVWstore [off] {sym} ptr (MOVWQZX x) mem) => (MOVWstore [off] {sym} ptr x mem) +(MOVBstore [off] {sym} ptr (MOVBQZX x) mem) => (MOVBstore [off] {sym} ptr x mem) + +// fold constants into memory operations +// Note that this is not always a good idea because if not all the uses of +// the ADDQconst get eliminated, we still have to compute the ADDQconst and we now +// have potentially two live values (ptr and (ADDQconst [off] ptr)) instead of one. +// Nevertheless, let's do it! +(MOV(Q|L|W|B|SS|SD|O)load [off1] {sym} (ADDQconst [off2] ptr) mem) && is32Bit(int64(off1)+int64(off2)) => + (MOV(Q|L|W|B|SS|SD|O)load [off1+off2] {sym} ptr mem) +(MOV(Q|L|W|B|SS|SD|O)store [off1] {sym} (ADDQconst [off2] ptr) val mem) && is32Bit(int64(off1)+int64(off2)) => + (MOV(Q|L|W|B|SS|SD|O)store [off1+off2] {sym} ptr val mem) +(SET(L|G|B|A|LE|GE|BE|AE|EQ|NE)store [off1] {sym} (ADDQconst [off2] base) val mem) && is32Bit(int64(off1)+int64(off2)) => + (SET(L|G|B|A|LE|GE|BE|AE|EQ|NE)store [off1+off2] {sym} base val mem) +((ADD|SUB|AND|OR|XOR)Qload [off1] {sym} val (ADDQconst [off2] base) mem) && is32Bit(int64(off1)+int64(off2)) => + ((ADD|SUB|AND|OR|XOR)Qload [off1+off2] {sym} val base mem) +((ADD|SUB|AND|OR|XOR)Lload [off1] {sym} val (ADDQconst [off2] base) mem) && is32Bit(int64(off1)+int64(off2)) => + ((ADD|SUB|AND|OR|XOR)Lload [off1+off2] {sym} val base mem) +(CMP(Q|L|W|B)load [off1] {sym} (ADDQconst [off2] base) val mem) && is32Bit(int64(off1)+int64(off2)) => + (CMP(Q|L|W|B)load [off1+off2] {sym} base val mem) +(CMP(Q|L|W|B)constload [valoff1] {sym} (ADDQconst [off2] base) mem) && ValAndOff(valoff1).canAdd32(off2) => + (CMP(Q|L|W|B)constload [ValAndOff(valoff1).addOffset32(off2)] {sym} base mem) + +((ADD|SUB|MUL|DIV)SSload [off1] {sym} val (ADDQconst [off2] base) mem) && is32Bit(int64(off1)+int64(off2)) => + ((ADD|SUB|MUL|DIV)SSload [off1+off2] {sym} val base mem) +((ADD|SUB|MUL|DIV)SDload [off1] {sym} val (ADDQconst [off2] base) mem) && is32Bit(int64(off1)+int64(off2)) => + ((ADD|SUB|MUL|DIV)SDload [off1+off2] {sym} val base mem) +((ADD|AND|OR|XOR)Qconstmodify [valoff1] {sym} (ADDQconst [off2] base) mem) && ValAndOff(valoff1).canAdd32(off2) => + ((ADD|AND|OR|XOR)Qconstmodify [ValAndOff(valoff1).addOffset32(off2)] {sym} base mem) +((ADD|AND|OR|XOR)Lconstmodify [valoff1] {sym} (ADDQconst [off2] base) mem) && ValAndOff(valoff1).canAdd32(off2) => + ((ADD|AND|OR|XOR)Lconstmodify [ValAndOff(valoff1).addOffset32(off2)] {sym} base mem) +((ADD|SUB|AND|OR|XOR)Qmodify [off1] {sym} (ADDQconst [off2] base) val mem) && is32Bit(int64(off1)+int64(off2)) => + ((ADD|SUB|AND|OR|XOR)Qmodify [off1+off2] {sym} base val mem) +((ADD|SUB|AND|OR|XOR)Lmodify [off1] {sym} (ADDQconst [off2] base) val mem) && is32Bit(int64(off1)+int64(off2)) => + ((ADD|SUB|AND|OR|XOR)Lmodify [off1+off2] {sym} base val mem) + +// Fold constants into stores. +(MOVQstore [off] {sym} ptr (MOVQconst [c]) mem) && validVal(c) => + (MOVQstoreconst [makeValAndOff(int32(c),off)] {sym} ptr mem) +(MOVLstore [off] {sym} ptr (MOV(L|Q)const [c]) mem) => + (MOVLstoreconst [makeValAndOff(int32(c),off)] {sym} ptr mem) +(MOVWstore [off] {sym} ptr (MOV(L|Q)const [c]) mem) => + (MOVWstoreconst [makeValAndOff(int32(int16(c)),off)] {sym} ptr mem) +(MOVBstore [off] {sym} ptr (MOV(L|Q)const [c]) mem) => + (MOVBstoreconst [makeValAndOff(int32(int8(c)),off)] {sym} ptr mem) + +// Fold address offsets into constant stores. +(MOV(Q|L|W|B|O)storeconst [sc] {s} (ADDQconst [off] ptr) mem) && ValAndOff(sc).canAdd32(off) => + (MOV(Q|L|W|B|O)storeconst [ValAndOff(sc).addOffset32(off)] {s} ptr mem) + +// We need to fold LEAQ into the MOVx ops so that the live variable analysis knows +// what variables are being read/written by the ops. +(MOV(Q|L|W|B|SS|SD|O|BQSX|WQSX|LQSX)load [off1] {sym1} (LEAQ [off2] {sym2} base) mem) + && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) => + (MOV(Q|L|W|B|SS|SD|O|BQSX|WQSX|LQSX)load [off1+off2] {mergeSym(sym1,sym2)} base mem) +(MOV(Q|L|W|B|SS|SD|O)store [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) + && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) => + (MOV(Q|L|W|B|SS|SD|O)store [off1+off2] {mergeSym(sym1,sym2)} base val mem) +(MOV(Q|L|W|B|O)storeconst [sc] {sym1} (LEAQ [off] {sym2} ptr) mem) && canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd32(off) => + (MOV(Q|L|W|B|O)storeconst [ValAndOff(sc).addOffset32(off)] {mergeSym(sym1, sym2)} ptr mem) +(SET(L|G|B|A|LE|GE|BE|AE|EQ|NE)store [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) + && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) => + (SET(L|G|B|A|LE|GE|BE|AE|EQ|NE)store [off1+off2] {mergeSym(sym1,sym2)} base val mem) +((ADD|SUB|AND|OR|XOR)Qload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem) + && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) => + ((ADD|SUB|AND|OR|XOR)Qload [off1+off2] {mergeSym(sym1,sym2)} val base mem) +((ADD|SUB|AND|OR|XOR)Lload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem) + && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) => + ((ADD|SUB|AND|OR|XOR)Lload [off1+off2] {mergeSym(sym1,sym2)} val base mem) +(CMP(Q|L|W|B)load [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) + && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) => + (CMP(Q|L|W|B)load [off1+off2] {mergeSym(sym1,sym2)} base val mem) +(CMP(Q|L|W|B)constload [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem) + && ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2) => + (CMP(Q|L|W|B)constload [ValAndOff(valoff1).addOffset32(off2)] {mergeSym(sym1,sym2)} base mem) + +((ADD|SUB|MUL|DIV)SSload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem) + && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) => + ((ADD|SUB|MUL|DIV)SSload [off1+off2] {mergeSym(sym1,sym2)} val base mem) +((ADD|SUB|MUL|DIV)SDload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem) + && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) => + ((ADD|SUB|MUL|DIV)SDload [off1+off2] {mergeSym(sym1,sym2)} val base mem) +((ADD|AND|OR|XOR)Qconstmodify [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem) + && ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2) => + ((ADD|AND|OR|XOR)Qconstmodify [ValAndOff(valoff1).addOffset32(off2)] {mergeSym(sym1,sym2)} base mem) +((ADD|AND|OR|XOR)Lconstmodify [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem) + && ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2) => + ((ADD|AND|OR|XOR)Lconstmodify [ValAndOff(valoff1).addOffset32(off2)] {mergeSym(sym1,sym2)} base mem) +((ADD|SUB|AND|OR|XOR)Qmodify [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) + && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) => + ((ADD|SUB|AND|OR|XOR)Qmodify [off1+off2] {mergeSym(sym1,sym2)} base val mem) +((ADD|SUB|AND|OR|XOR)Lmodify [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) + && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) => + ((ADD|SUB|AND|OR|XOR)Lmodify [off1+off2] {mergeSym(sym1,sym2)} base val mem) + +// fold LEAQs together +(LEAQ [off1] {sym1} (LEAQ [off2] {sym2} x)) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) => + (LEAQ [off1+off2] {mergeSym(sym1,sym2)} x) + +// LEAQ into LEAQ1 +(LEAQ1 [off1] {sym1} (LEAQ [off2] {sym2} x) y) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && x.Op != OpSB => + (LEAQ1 [off1+off2] {mergeSym(sym1,sym2)} x y) + +// LEAQ1 into LEAQ +(LEAQ [off1] {sym1} (LEAQ1 [off2] {sym2} x y)) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) => + (LEAQ1 [off1+off2] {mergeSym(sym1,sym2)} x y) + +// LEAQ into LEAQ[248] +(LEAQ2 [off1] {sym1} (LEAQ [off2] {sym2} x) y) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && x.Op != OpSB => + (LEAQ2 [off1+off2] {mergeSym(sym1,sym2)} x y) +(LEAQ4 [off1] {sym1} (LEAQ [off2] {sym2} x) y) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && x.Op != OpSB => + (LEAQ4 [off1+off2] {mergeSym(sym1,sym2)} x y) +(LEAQ8 [off1] {sym1} (LEAQ [off2] {sym2} x) y) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && x.Op != OpSB => + (LEAQ8 [off1+off2] {mergeSym(sym1,sym2)} x y) + +// LEAQ[248] into LEAQ +(LEAQ [off1] {sym1} (LEAQ2 [off2] {sym2} x y)) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) => + (LEAQ2 [off1+off2] {mergeSym(sym1,sym2)} x y) +(LEAQ [off1] {sym1} (LEAQ4 [off2] {sym2} x y)) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) => + (LEAQ4 [off1+off2] {mergeSym(sym1,sym2)} x y) +(LEAQ [off1] {sym1} (LEAQ8 [off2] {sym2} x y)) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) => + (LEAQ8 [off1+off2] {mergeSym(sym1,sym2)} x y) + +// LEAQ[1248] into LEAQ[1248]. Only some such merges are possible. +(LEAQ1 [off1] {sym1} x (LEAQ1 [off2] {sym2} y y)) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) => + (LEAQ2 [off1+off2] {mergeSym(sym1, sym2)} x y) +(LEAQ1 [off1] {sym1} x (LEAQ1 [off2] {sym2} x y)) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) => + (LEAQ2 [off1+off2] {mergeSym(sym1, sym2)} y x) +(LEAQ2 [off1] {sym1} x (LEAQ1 [off2] {sym2} y y)) && is32Bit(int64(off1)+2*int64(off2)) && sym2 == nil => + (LEAQ4 [off1+2*off2] {sym1} x y) +(LEAQ4 [off1] {sym1} x (LEAQ1 [off2] {sym2} y y)) && is32Bit(int64(off1)+4*int64(off2)) && sym2 == nil => + (LEAQ8 [off1+4*off2] {sym1} x y) +// TODO: more? + +// Lower LEAQ2/4/8 when the offset is a constant +(LEAQ2 [off] {sym} x (MOV(Q|L)const [scale])) && is32Bit(int64(off)+int64(scale)*2) => + (LEAQ [off+int32(scale)*2] {sym} x) +(LEAQ4 [off] {sym} x (MOV(Q|L)const [scale])) && is32Bit(int64(off)+int64(scale)*4) => + (LEAQ [off+int32(scale)*4] {sym} x) +(LEAQ8 [off] {sym} x (MOV(Q|L)const [scale])) && is32Bit(int64(off)+int64(scale)*8) => + (LEAQ [off+int32(scale)*8] {sym} x) + +// Absorb InvertFlags into branches. +(LT (InvertFlags cmp) yes no) => (GT cmp yes no) +(GT (InvertFlags cmp) yes no) => (LT cmp yes no) +(LE (InvertFlags cmp) yes no) => (GE cmp yes no) +(GE (InvertFlags cmp) yes no) => (LE cmp yes no) +(ULT (InvertFlags cmp) yes no) => (UGT cmp yes no) +(UGT (InvertFlags cmp) yes no) => (ULT cmp yes no) +(ULE (InvertFlags cmp) yes no) => (UGE cmp yes no) +(UGE (InvertFlags cmp) yes no) => (ULE cmp yes no) +(EQ (InvertFlags cmp) yes no) => (EQ cmp yes no) +(NE (InvertFlags cmp) yes no) => (NE cmp yes no) + +// Constant comparisons. +(CMPQconst (MOVQconst [x]) [y]) && x==int64(y) => (FlagEQ) +(CMPQconst (MOVQconst [x]) [y]) && x<int64(y) && uint64(x)<uint64(int64(y)) => (FlagLT_ULT) +(CMPQconst (MOVQconst [x]) [y]) && x<int64(y) && uint64(x)>uint64(int64(y)) => (FlagLT_UGT) +(CMPQconst (MOVQconst [x]) [y]) && x>int64(y) && uint64(x)<uint64(int64(y)) => (FlagGT_ULT) +(CMPQconst (MOVQconst [x]) [y]) && x>int64(y) && uint64(x)>uint64(int64(y)) => (FlagGT_UGT) +(CMPLconst (MOVLconst [x]) [y]) && x==y => (FlagEQ) +(CMPLconst (MOVLconst [x]) [y]) && x<y && uint32(x)<uint32(y) => (FlagLT_ULT) +(CMPLconst (MOVLconst [x]) [y]) && x<y && uint32(x)>uint32(y) => (FlagLT_UGT) +(CMPLconst (MOVLconst [x]) [y]) && x>y && uint32(x)<uint32(y) => (FlagGT_ULT) +(CMPLconst (MOVLconst [x]) [y]) && x>y && uint32(x)>uint32(y) => (FlagGT_UGT) +(CMPWconst (MOVLconst [x]) [y]) && int16(x)==y => (FlagEQ) +(CMPWconst (MOVLconst [x]) [y]) && int16(x)<y && uint16(x)<uint16(y) => (FlagLT_ULT) +(CMPWconst (MOVLconst [x]) [y]) && int16(x)<y && uint16(x)>uint16(y) => (FlagLT_UGT) +(CMPWconst (MOVLconst [x]) [y]) && int16(x)>y && uint16(x)<uint16(y) => (FlagGT_ULT) +(CMPWconst (MOVLconst [x]) [y]) && int16(x)>y && uint16(x)>uint16(y) => (FlagGT_UGT) +(CMPBconst (MOVLconst [x]) [y]) && int8(x)==y => (FlagEQ) +(CMPBconst (MOVLconst [x]) [y]) && int8(x)<y && uint8(x)<uint8(y) => (FlagLT_ULT) +(CMPBconst (MOVLconst [x]) [y]) && int8(x)<y && uint8(x)>uint8(y) => (FlagLT_UGT) +(CMPBconst (MOVLconst [x]) [y]) && int8(x)>y && uint8(x)<uint8(y) => (FlagGT_ULT) +(CMPBconst (MOVLconst [x]) [y]) && int8(x)>y && uint8(x)>uint8(y) => (FlagGT_UGT) + +// CMPQconst requires a 32 bit const, but we can still constant-fold 64 bit consts. +// In theory this applies to any of the simplifications above, +// but CMPQ is the only one I've actually seen occur. +(CMPQ (MOVQconst [x]) (MOVQconst [y])) && x==y => (FlagEQ) +(CMPQ (MOVQconst [x]) (MOVQconst [y])) && x<y && uint64(x)<uint64(y) => (FlagLT_ULT) +(CMPQ (MOVQconst [x]) (MOVQconst [y])) && x<y && uint64(x)>uint64(y) => (FlagLT_UGT) +(CMPQ (MOVQconst [x]) (MOVQconst [y])) && x>y && uint64(x)<uint64(y) => (FlagGT_ULT) +(CMPQ (MOVQconst [x]) (MOVQconst [y])) && x>y && uint64(x)>uint64(y) => (FlagGT_UGT) + +// Other known comparisons. +(CMPQconst (MOVBQZX _) [c]) && 0xFF < c => (FlagLT_ULT) +(CMPQconst (MOVWQZX _) [c]) && 0xFFFF < c => (FlagLT_ULT) +(CMPLconst (SHRLconst _ [c]) [n]) && 0 <= n && 0 < c && c <= 32 && (1<<uint64(32-c)) <= uint64(n) => (FlagLT_ULT) +(CMPQconst (SHRQconst _ [c]) [n]) && 0 <= n && 0 < c && c <= 64 && (1<<uint64(64-c)) <= uint64(n) => (FlagLT_ULT) +(CMPQconst (ANDQconst _ [m]) [n]) && 0 <= m && m < n => (FlagLT_ULT) +(CMPQconst (ANDLconst _ [m]) [n]) && 0 <= m && m < n => (FlagLT_ULT) +(CMPLconst (ANDLconst _ [m]) [n]) && 0 <= m && m < n => (FlagLT_ULT) +(CMPWconst (ANDLconst _ [m]) [n]) && 0 <= int16(m) && int16(m) < n => (FlagLT_ULT) +(CMPBconst (ANDLconst _ [m]) [n]) && 0 <= int8(m) && int8(m) < n => (FlagLT_ULT) + +// TESTQ c c sets flags like CMPQ c 0. +(TESTQconst [c] (MOVQconst [d])) && int64(c) == d && c == 0 => (FlagEQ) +(TESTLconst [c] (MOVLconst [c])) && c == 0 => (FlagEQ) +(TESTQconst [c] (MOVQconst [d])) && int64(c) == d && c < 0 => (FlagLT_UGT) +(TESTLconst [c] (MOVLconst [c])) && c < 0 => (FlagLT_UGT) +(TESTQconst [c] (MOVQconst [d])) && int64(c) == d && c > 0 => (FlagGT_UGT) +(TESTLconst [c] (MOVLconst [c])) && c > 0 => (FlagGT_UGT) + +// TODO: DIVxU also. + +// Absorb flag constants into SBB ops. +(SBBQcarrymask (FlagEQ)) => (MOVQconst [0]) +(SBBQcarrymask (FlagLT_ULT)) => (MOVQconst [-1]) +(SBBQcarrymask (FlagLT_UGT)) => (MOVQconst [0]) +(SBBQcarrymask (FlagGT_ULT)) => (MOVQconst [-1]) +(SBBQcarrymask (FlagGT_UGT)) => (MOVQconst [0]) +(SBBLcarrymask (FlagEQ)) => (MOVLconst [0]) +(SBBLcarrymask (FlagLT_ULT)) => (MOVLconst [-1]) +(SBBLcarrymask (FlagLT_UGT)) => (MOVLconst [0]) +(SBBLcarrymask (FlagGT_ULT)) => (MOVLconst [-1]) +(SBBLcarrymask (FlagGT_UGT)) => (MOVLconst [0]) + +// Absorb flag constants into branches. +((EQ|LE|GE|ULE|UGE) (FlagEQ) yes no) => (First yes no) +((NE|LT|GT|ULT|UGT) (FlagEQ) yes no) => (First no yes) +((NE|LT|LE|ULT|ULE) (FlagLT_ULT) yes no) => (First yes no) +((EQ|GT|GE|UGT|UGE) (FlagLT_ULT) yes no) => (First no yes) +((NE|LT|LE|UGT|UGE) (FlagLT_UGT) yes no) => (First yes no) +((EQ|GT|GE|ULT|ULE) (FlagLT_UGT) yes no) => (First no yes) +((NE|GT|GE|ULT|ULE) (FlagGT_ULT) yes no) => (First yes no) +((EQ|LT|LE|UGT|UGE) (FlagGT_ULT) yes no) => (First no yes) +((NE|GT|GE|UGT|UGE) (FlagGT_UGT) yes no) => (First yes no) +((EQ|LT|LE|ULT|ULE) (FlagGT_UGT) yes no) => (First no yes) + +// Absorb flag constants into SETxx ops. +((SETEQ|SETLE|SETGE|SETBE|SETAE) (FlagEQ)) => (MOVLconst [1]) +((SETNE|SETL|SETG|SETB|SETA) (FlagEQ)) => (MOVLconst [0]) +((SETNE|SETL|SETLE|SETB|SETBE) (FlagLT_ULT)) => (MOVLconst [1]) +((SETEQ|SETG|SETGE|SETA|SETAE) (FlagLT_ULT)) => (MOVLconst [0]) +((SETNE|SETL|SETLE|SETA|SETAE) (FlagLT_UGT)) => (MOVLconst [1]) +((SETEQ|SETG|SETGE|SETB|SETBE) (FlagLT_UGT)) => (MOVLconst [0]) +((SETNE|SETG|SETGE|SETB|SETBE) (FlagGT_ULT)) => (MOVLconst [1]) +((SETEQ|SETL|SETLE|SETA|SETAE) (FlagGT_ULT)) => (MOVLconst [0]) +((SETNE|SETG|SETGE|SETA|SETAE) (FlagGT_UGT)) => (MOVLconst [1]) +((SETEQ|SETL|SETLE|SETB|SETBE) (FlagGT_UGT)) => (MOVLconst [0]) + +(SETEQstore [off] {sym} ptr (FlagEQ) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem) +(SETEQstore [off] {sym} ptr (FlagLT_ULT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem) +(SETEQstore [off] {sym} ptr (FlagLT_UGT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem) +(SETEQstore [off] {sym} ptr (FlagGT_ULT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem) +(SETEQstore [off] {sym} ptr (FlagGT_UGT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem) + +(SETNEstore [off] {sym} ptr (FlagEQ) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem) +(SETNEstore [off] {sym} ptr (FlagLT_ULT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem) +(SETNEstore [off] {sym} ptr (FlagLT_UGT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem) +(SETNEstore [off] {sym} ptr (FlagGT_ULT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem) +(SETNEstore [off] {sym} ptr (FlagGT_UGT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem) + +(SETLstore [off] {sym} ptr (FlagEQ) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem) +(SETLstore [off] {sym} ptr (FlagLT_ULT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem) +(SETLstore [off] {sym} ptr (FlagLT_UGT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem) +(SETLstore [off] {sym} ptr (FlagGT_ULT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem) +(SETLstore [off] {sym} ptr (FlagGT_UGT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem) + +(SETLEstore [off] {sym} ptr (FlagEQ) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem) +(SETLEstore [off] {sym} ptr (FlagLT_ULT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem) +(SETLEstore [off] {sym} ptr (FlagLT_UGT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem) +(SETLEstore [off] {sym} ptr (FlagGT_ULT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem) +(SETLEstore [off] {sym} ptr (FlagGT_UGT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem) + +(SETGstore [off] {sym} ptr (FlagEQ) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem) +(SETGstore [off] {sym} ptr (FlagLT_ULT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem) +(SETGstore [off] {sym} ptr (FlagLT_UGT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem) +(SETGstore [off] {sym} ptr (FlagGT_ULT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem) +(SETGstore [off] {sym} ptr (FlagGT_UGT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem) + +(SETGEstore [off] {sym} ptr (FlagEQ) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem) +(SETGEstore [off] {sym} ptr (FlagLT_ULT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem) +(SETGEstore [off] {sym} ptr (FlagLT_UGT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem) +(SETGEstore [off] {sym} ptr (FlagGT_ULT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem) +(SETGEstore [off] {sym} ptr (FlagGT_UGT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem) + +(SETBstore [off] {sym} ptr (FlagEQ) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem) +(SETBstore [off] {sym} ptr (FlagLT_ULT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem) +(SETBstore [off] {sym} ptr (FlagLT_UGT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem) +(SETBstore [off] {sym} ptr (FlagGT_ULT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem) +(SETBstore [off] {sym} ptr (FlagGT_UGT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem) + +(SETBEstore [off] {sym} ptr (FlagEQ) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem) +(SETBEstore [off] {sym} ptr (FlagLT_ULT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem) +(SETBEstore [off] {sym} ptr (FlagLT_UGT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem) +(SETBEstore [off] {sym} ptr (FlagGT_ULT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem) +(SETBEstore [off] {sym} ptr (FlagGT_UGT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem) + +(SETAstore [off] {sym} ptr (FlagEQ) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem) +(SETAstore [off] {sym} ptr (FlagLT_ULT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem) +(SETAstore [off] {sym} ptr (FlagLT_UGT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem) +(SETAstore [off] {sym} ptr (FlagGT_ULT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem) +(SETAstore [off] {sym} ptr (FlagGT_UGT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem) + +(SETAEstore [off] {sym} ptr (FlagEQ) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem) +(SETAEstore [off] {sym} ptr (FlagLT_ULT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem) +(SETAEstore [off] {sym} ptr (FlagLT_UGT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem) +(SETAEstore [off] {sym} ptr (FlagGT_ULT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem) +(SETAEstore [off] {sym} ptr (FlagGT_UGT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem) + +// Remove redundant *const ops +(ADDQconst [0] x) => x +(ADDLconst [c] x) && c==0 => x +(SUBQconst [0] x) => x +(SUBLconst [c] x) && c==0 => x +(ANDQconst [0] _) => (MOVQconst [0]) +(ANDLconst [c] _) && c==0 => (MOVLconst [0]) +(ANDQconst [-1] x) => x +(ANDLconst [c] x) && c==-1 => x +(ORQconst [0] x) => x +(ORLconst [c] x) && c==0 => x +(ORQconst [-1] _) => (MOVQconst [-1]) +(ORLconst [c] _) && c==-1 => (MOVLconst [-1]) +(XORQconst [0] x) => x +(XORLconst [c] x) && c==0 => x +// TODO: since we got rid of the W/B versions, we might miss +// things like (ANDLconst [0x100] x) which were formerly +// (ANDBconst [0] x). Probably doesn't happen very often. +// If we cared, we might do: +// (ANDLconst <t> [c] x) && t.Size()==1 && int8(x)==0 -> (MOVLconst [0]) + +// Remove redundant ops +// Not in generic rules, because they may appear after lowering e. g. Slicemask +(NEG(Q|L) (NEG(Q|L) x)) => x +(NEG(Q|L) s:(SUB(Q|L) x y)) && s.Uses == 1 => (SUB(Q|L) y x) + +// Convert constant subtracts to constant adds +(SUBQconst [c] x) && c != -(1<<31) => (ADDQconst [-c] x) +(SUBLconst [c] x) => (ADDLconst [-c] x) + +// generic constant folding +// TODO: more of this +(ADDQconst [c] (MOVQconst [d])) => (MOVQconst [int64(c)+d]) +(ADDLconst [c] (MOVLconst [d])) => (MOVLconst [c+d]) +(ADDQconst [c] (ADDQconst [d] x)) && is32Bit(int64(c)+int64(d)) => (ADDQconst [c+d] x) +(ADDLconst [c] (ADDLconst [d] x)) => (ADDLconst [c+d] x) +(SUBQconst (MOVQconst [d]) [c]) => (MOVQconst [d-int64(c)]) +(SUBQconst (SUBQconst x [d]) [c]) && is32Bit(int64(-c)-int64(d)) => (ADDQconst [-c-d] x) +(SARQconst [c] (MOVQconst [d])) => (MOVQconst [d>>uint64(c)]) +(SARLconst [c] (MOVQconst [d])) => (MOVQconst [int64(int32(d))>>uint64(c)]) +(SARWconst [c] (MOVQconst [d])) => (MOVQconst [int64(int16(d))>>uint64(c)]) +(SARBconst [c] (MOVQconst [d])) => (MOVQconst [int64(int8(d))>>uint64(c)]) +(NEGQ (MOVQconst [c])) => (MOVQconst [-c]) +(NEGL (MOVLconst [c])) => (MOVLconst [-c]) +(MULQconst [c] (MOVQconst [d])) => (MOVQconst [int64(c)*d]) +(MULLconst [c] (MOVLconst [d])) => (MOVLconst [c*d]) +(ANDQconst [c] (MOVQconst [d])) => (MOVQconst [int64(c)&d]) +(ANDLconst [c] (MOVLconst [d])) => (MOVLconst [c&d]) +(ORQconst [c] (MOVQconst [d])) => (MOVQconst [int64(c)|d]) +(ORLconst [c] (MOVLconst [d])) => (MOVLconst [c|d]) +(XORQconst [c] (MOVQconst [d])) => (MOVQconst [int64(c)^d]) +(XORLconst [c] (MOVLconst [d])) => (MOVLconst [c^d]) +(NOTQ (MOVQconst [c])) => (MOVQconst [^c]) +(NOTL (MOVLconst [c])) => (MOVLconst [^c]) +(BTSQconst [c] (MOVQconst [d])) => (MOVQconst [d|(1<<uint32(c))]) +(BTSLconst [c] (MOVLconst [d])) => (MOVLconst [d|(1<<uint32(c))]) +(BTRQconst [c] (MOVQconst [d])) => (MOVQconst [d&^(1<<uint32(c))]) +(BTRLconst [c] (MOVLconst [d])) => (MOVLconst [d&^(1<<uint32(c))]) +(BTCQconst [c] (MOVQconst [d])) => (MOVQconst [d^(1<<uint32(c))]) +(BTCLconst [c] (MOVLconst [d])) => (MOVLconst [d^(1<<uint32(c))]) + +// If c or d doesn't fit into 32 bits, then we can't construct ORQconst, +// but we can still constant-fold. +// In theory this applies to any of the simplifications above, +// but ORQ is the only one I've actually seen occur. +(ORQ (MOVQconst [c]) (MOVQconst [d])) => (MOVQconst [c|d]) + +// generic simplifications +// TODO: more of this +(ADDQ x (NEGQ y)) => (SUBQ x y) +(ADDL x (NEGL y)) => (SUBL x y) +(SUBQ x x) => (MOVQconst [0]) +(SUBL x x) => (MOVLconst [0]) +(ANDQ x x) => x +(ANDL x x) => x +(ORQ x x) => x +(ORL x x) => x +(XORQ x x) => (MOVQconst [0]) +(XORL x x) => (MOVLconst [0]) + +(SHLLconst [d] (MOVLconst [c])) => (MOVLconst [c << uint64(d)]) +(SHLQconst [d] (MOVQconst [c])) => (MOVQconst [c << uint64(d)]) +(SHLQconst [d] (MOVLconst [c])) => (MOVQconst [int64(c) << uint64(d)]) + +// Fold NEG into ADDconst/MULconst. Take care to keep c in 32 bit range. +(NEGQ (ADDQconst [c] (NEGQ x))) && c != -(1<<31) => (ADDQconst [-c] x) +(MULQconst [c] (NEGQ x)) && c != -(1<<31) => (MULQconst [-c] x) + +// checking AND against 0. +(CMPQconst a:(ANDQ x y) [0]) && a.Uses == 1 => (TESTQ x y) +(CMPLconst a:(ANDL x y) [0]) && a.Uses == 1 => (TESTL x y) +(CMPWconst a:(ANDL x y) [0]) && a.Uses == 1 => (TESTW x y) +(CMPBconst a:(ANDL x y) [0]) && a.Uses == 1 => (TESTB x y) +(CMPQconst a:(ANDQconst [c] x) [0]) && a.Uses == 1 => (TESTQconst [c] x) +(CMPLconst a:(ANDLconst [c] x) [0]) && a.Uses == 1 => (TESTLconst [c] x) +(CMPWconst a:(ANDLconst [c] x) [0]) && a.Uses == 1 => (TESTWconst [int16(c)] x) +(CMPBconst a:(ANDLconst [c] x) [0]) && a.Uses == 1 => (TESTBconst [int8(c)] x) + +// Convert TESTx to TESTxconst if possible. +(TESTQ (MOVQconst [c]) x) && is32Bit(c) => (TESTQconst [int32(c)] x) +(TESTL (MOVLconst [c]) x) => (TESTLconst [c] x) +(TESTW (MOVLconst [c]) x) => (TESTWconst [int16(c)] x) +(TESTB (MOVLconst [c]) x) => (TESTBconst [int8(c)] x) + +// TEST %reg,%reg is shorter than CMP +(CMPQconst x [0]) => (TESTQ x x) +(CMPLconst x [0]) => (TESTL x x) +(CMPWconst x [0]) => (TESTW x x) +(CMPBconst x [0]) => (TESTB x x) +(TESTQconst [-1] x) && x.Op != OpAMD64MOVQconst => (TESTQ x x) +(TESTLconst [-1] x) && x.Op != OpAMD64MOVLconst => (TESTL x x) +(TESTWconst [-1] x) && x.Op != OpAMD64MOVLconst => (TESTW x x) +(TESTBconst [-1] x) && x.Op != OpAMD64MOVLconst => (TESTB x x) + +// Convert LEAQ1 back to ADDQ if we can +(LEAQ1 [0] x y) && v.Aux == nil => (ADDQ x y) + +// Combining byte loads into larger (unaligned) loads. +// There are many ways these combinations could occur. This is +// designed to match the way encoding/binary.LittleEndian does it. + +// Little-endian loads + +(OR(L|Q) x0:(MOVBload [i0] {s} p mem) + sh:(SHL(L|Q)const [8] x1:(MOVBload [i1] {s} p mem))) + && i1 == i0+1 + && x0.Uses == 1 + && x1.Uses == 1 + && sh.Uses == 1 + && mergePoint(b,x0,x1) != nil + && clobber(x0, x1, sh) + => @mergePoint(b,x0,x1) (MOVWload [i0] {s} p mem) + +(OR(L|Q) x0:(MOVBload [i] {s} p0 mem) + sh:(SHL(L|Q)const [8] x1:(MOVBload [i] {s} p1 mem))) + && x0.Uses == 1 + && x1.Uses == 1 + && sh.Uses == 1 + && sequentialAddresses(p0, p1, 1) + && mergePoint(b,x0,x1) != nil + && clobber(x0, x1, sh) + => @mergePoint(b,x0,x1) (MOVWload [i] {s} p0 mem) + +(OR(L|Q) x0:(MOVWload [i0] {s} p mem) + sh:(SHL(L|Q)const [16] x1:(MOVWload [i1] {s} p mem))) + && i1 == i0+2 + && x0.Uses == 1 + && x1.Uses == 1 + && sh.Uses == 1 + && mergePoint(b,x0,x1) != nil + && clobber(x0, x1, sh) + => @mergePoint(b,x0,x1) (MOVLload [i0] {s} p mem) + +(OR(L|Q) x0:(MOVWload [i] {s} p0 mem) + sh:(SHL(L|Q)const [16] x1:(MOVWload [i] {s} p1 mem))) + && x0.Uses == 1 + && x1.Uses == 1 + && sh.Uses == 1 + && sequentialAddresses(p0, p1, 2) + && mergePoint(b,x0,x1) != nil + && clobber(x0, x1, sh) + => @mergePoint(b,x0,x1) (MOVLload [i] {s} p0 mem) + +(ORQ x0:(MOVLload [i0] {s} p mem) + sh:(SHLQconst [32] x1:(MOVLload [i1] {s} p mem))) + && i1 == i0+4 + && x0.Uses == 1 + && x1.Uses == 1 + && sh.Uses == 1 + && mergePoint(b,x0,x1) != nil + && clobber(x0, x1, sh) + => @mergePoint(b,x0,x1) (MOVQload [i0] {s} p mem) + +(ORQ x0:(MOVLload [i] {s} p0 mem) + sh:(SHLQconst [32] x1:(MOVLload [i] {s} p1 mem))) + && x0.Uses == 1 + && x1.Uses == 1 + && sh.Uses == 1 + && sequentialAddresses(p0, p1, 4) + && mergePoint(b,x0,x1) != nil + && clobber(x0, x1, sh) + => @mergePoint(b,x0,x1) (MOVQload [i] {s} p0 mem) + +(OR(L|Q) + s1:(SHL(L|Q)const [j1] x1:(MOVBload [i1] {s} p mem)) + or:(OR(L|Q) + s0:(SHL(L|Q)const [j0] x0:(MOVBload [i0] {s} p mem)) + y)) + && i1 == i0+1 + && j1 == j0+8 + && j0 % 16 == 0 + && x0.Uses == 1 + && x1.Uses == 1 + && s0.Uses == 1 + && s1.Uses == 1 + && or.Uses == 1 + && mergePoint(b,x0,x1,y) != nil + && clobber(x0, x1, s0, s1, or) + => @mergePoint(b,x0,x1,y) (OR(L|Q) <v.Type> (SHL(L|Q)const <v.Type> [j0] (MOVWload [i0] {s} p mem)) y) + +(OR(L|Q) + s1:(SHL(L|Q)const [j1] x1:(MOVBload [i] {s} p1 mem)) + or:(OR(L|Q) + s0:(SHL(L|Q)const [j0] x0:(MOVBload [i] {s} p0 mem)) + y)) + && j1 == j0+8 + && j0 % 16 == 0 + && x0.Uses == 1 + && x1.Uses == 1 + && s0.Uses == 1 + && s1.Uses == 1 + && or.Uses == 1 + && sequentialAddresses(p0, p1, 1) + && mergePoint(b,x0,x1,y) != nil + && clobber(x0, x1, s0, s1, or) + => @mergePoint(b,x0,x1,y) (OR(L|Q) <v.Type> (SHL(L|Q)const <v.Type> [j0] (MOVWload [i] {s} p0 mem)) y) + +(ORQ + s1:(SHLQconst [j1] x1:(MOVWload [i1] {s} p mem)) + or:(ORQ + s0:(SHLQconst [j0] x0:(MOVWload [i0] {s} p mem)) + y)) + && i1 == i0+2 + && j1 == j0+16 + && j0 % 32 == 0 + && x0.Uses == 1 + && x1.Uses == 1 + && s0.Uses == 1 + && s1.Uses == 1 + && or.Uses == 1 + && mergePoint(b,x0,x1,y) != nil + && clobber(x0, x1, s0, s1, or) + => @mergePoint(b,x0,x1,y) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVLload [i0] {s} p mem)) y) + +(ORQ + s1:(SHLQconst [j1] x1:(MOVWload [i] {s} p1 mem)) + or:(ORQ + s0:(SHLQconst [j0] x0:(MOVWload [i] {s} p0 mem)) + y)) + && j1 == j0+16 + && j0 % 32 == 0 + && x0.Uses == 1 + && x1.Uses == 1 + && s0.Uses == 1 + && s1.Uses == 1 + && or.Uses == 1 + && sequentialAddresses(p0, p1, 2) + && mergePoint(b,x0,x1,y) != nil + && clobber(x0, x1, s0, s1, or) + => @mergePoint(b,x0,x1,y) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVLload [i] {s} p0 mem)) y) + +// Big-endian loads + +(OR(L|Q) + x1:(MOVBload [i1] {s} p mem) + sh:(SHL(L|Q)const [8] x0:(MOVBload [i0] {s} p mem))) + && i1 == i0+1 + && x0.Uses == 1 + && x1.Uses == 1 + && sh.Uses == 1 + && mergePoint(b,x0,x1) != nil + && clobber(x0, x1, sh) + => @mergePoint(b,x0,x1) (ROLWconst <v.Type> [8] (MOVWload [i0] {s} p mem)) + +(OR(L|Q) + x1:(MOVBload [i] {s} p1 mem) + sh:(SHL(L|Q)const [8] x0:(MOVBload [i] {s} p0 mem))) + && x0.Uses == 1 + && x1.Uses == 1 + && sh.Uses == 1 + && sequentialAddresses(p0, p1, 1) + && mergePoint(b,x0,x1) != nil + && clobber(x0, x1, sh) + => @mergePoint(b,x0,x1) (ROLWconst <v.Type> [8] (MOVWload [i] {s} p0 mem)) + +(OR(L|Q) + r1:(ROLWconst [8] x1:(MOVWload [i1] {s} p mem)) + sh:(SHL(L|Q)const [16] r0:(ROLWconst [8] x0:(MOVWload [i0] {s} p mem)))) + && i1 == i0+2 + && x0.Uses == 1 + && x1.Uses == 1 + && r0.Uses == 1 + && r1.Uses == 1 + && sh.Uses == 1 + && mergePoint(b,x0,x1) != nil + && clobber(x0, x1, r0, r1, sh) + => @mergePoint(b,x0,x1) (BSWAPL <v.Type> (MOVLload [i0] {s} p mem)) + +(OR(L|Q) + r1:(ROLWconst [8] x1:(MOVWload [i] {s} p1 mem)) + sh:(SHL(L|Q)const [16] r0:(ROLWconst [8] x0:(MOVWload [i] {s} p0 mem)))) + && x0.Uses == 1 + && x1.Uses == 1 + && r0.Uses == 1 + && r1.Uses == 1 + && sh.Uses == 1 + && sequentialAddresses(p0, p1, 2) + && mergePoint(b,x0,x1) != nil + && clobber(x0, x1, r0, r1, sh) + => @mergePoint(b,x0,x1) (BSWAPL <v.Type> (MOVLload [i] {s} p0 mem)) + +(ORQ + r1:(BSWAPL x1:(MOVLload [i1] {s} p mem)) + sh:(SHLQconst [32] r0:(BSWAPL x0:(MOVLload [i0] {s} p mem)))) + && i1 == i0+4 + && x0.Uses == 1 + && x1.Uses == 1 + && r0.Uses == 1 + && r1.Uses == 1 + && sh.Uses == 1 + && mergePoint(b,x0,x1) != nil + && clobber(x0, x1, r0, r1, sh) + => @mergePoint(b,x0,x1) (BSWAPQ <v.Type> (MOVQload [i0] {s} p mem)) + +(ORQ + r1:(BSWAPL x1:(MOVLload [i] {s} p1 mem)) + sh:(SHLQconst [32] r0:(BSWAPL x0:(MOVLload [i] {s} p0 mem)))) + && x0.Uses == 1 + && x1.Uses == 1 + && r0.Uses == 1 + && r1.Uses == 1 + && sh.Uses == 1 + && sequentialAddresses(p0, p1, 4) + && mergePoint(b,x0,x1) != nil + && clobber(x0, x1, r0, r1, sh) + => @mergePoint(b,x0,x1) (BSWAPQ <v.Type> (MOVQload [i] {s} p0 mem)) + +(OR(L|Q) + s0:(SHL(L|Q)const [j0] x0:(MOVBload [i0] {s} p mem)) + or:(OR(L|Q) + s1:(SHL(L|Q)const [j1] x1:(MOVBload [i1] {s} p mem)) + y)) + && i1 == i0+1 + && j1 == j0-8 + && j1 % 16 == 0 + && x0.Uses == 1 + && x1.Uses == 1 + && s0.Uses == 1 + && s1.Uses == 1 + && or.Uses == 1 + && mergePoint(b,x0,x1,y) != nil + && clobber(x0, x1, s0, s1, or) + => @mergePoint(b,x0,x1,y) (OR(L|Q) <v.Type> (SHL(L|Q)const <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWload [i0] {s} p mem))) y) + +(OR(L|Q) + s0:(SHL(L|Q)const [j0] x0:(MOVBload [i] {s} p0 mem)) + or:(OR(L|Q) + s1:(SHL(L|Q)const [j1] x1:(MOVBload [i] {s} p1 mem)) + y)) + && j1 == j0-8 + && j1 % 16 == 0 + && x0.Uses == 1 + && x1.Uses == 1 + && s0.Uses == 1 + && s1.Uses == 1 + && or.Uses == 1 + && sequentialAddresses(p0, p1, 1) + && mergePoint(b,x0,x1,y) != nil + && clobber(x0, x1, s0, s1, or) + => @mergePoint(b,x0,x1,y) (OR(L|Q) <v.Type> (SHL(L|Q)const <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWload [i] {s} p0 mem))) y) + +(ORQ + s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWload [i0] {s} p mem))) + or:(ORQ + s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWload [i1] {s} p mem))) + y)) + && i1 == i0+2 + && j1 == j0-16 + && j1 % 32 == 0 + && x0.Uses == 1 + && x1.Uses == 1 + && r0.Uses == 1 + && r1.Uses == 1 + && s0.Uses == 1 + && s1.Uses == 1 + && or.Uses == 1 + && mergePoint(b,x0,x1,y) != nil + && clobber(x0, x1, r0, r1, s0, s1, or) + => @mergePoint(b,x0,x1,y) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <typ.UInt32> (MOVLload [i0] {s} p mem))) y) + +(ORQ + s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWload [i] {s} p0 mem))) + or:(ORQ + s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWload [i] {s} p1 mem))) + y)) + && j1 == j0-16 + && j1 % 32 == 0 + && x0.Uses == 1 + && x1.Uses == 1 + && r0.Uses == 1 + && r1.Uses == 1 + && s0.Uses == 1 + && s1.Uses == 1 + && or.Uses == 1 + && sequentialAddresses(p0, p1, 2) + && mergePoint(b,x0,x1,y) != nil + && clobber(x0, x1, r0, r1, s0, s1, or) + => @mergePoint(b,x0,x1,y) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <typ.UInt32> (MOVLload [i] {s} p0 mem))) y) + +// Combine 2 byte stores + shift into rolw 8 + word store +(MOVBstore [i] {s} p w + x0:(MOVBstore [i-1] {s} p (SHRWconst [8] w) mem)) + && x0.Uses == 1 + && clobber(x0) + => (MOVWstore [i-1] {s} p (ROLWconst <w.Type> [8] w) mem) +(MOVBstore [i] {s} p1 w + x0:(MOVBstore [i] {s} p0 (SHRWconst [8] w) mem)) + && x0.Uses == 1 + && sequentialAddresses(p0, p1, 1) + && clobber(x0) + => (MOVWstore [i] {s} p0 (ROLWconst <w.Type> [8] w) mem) + +// Combine stores + shifts into bswap and larger (unaligned) stores +(MOVBstore [i] {s} p w + x2:(MOVBstore [i-1] {s} p (SHRLconst [8] w) + x1:(MOVBstore [i-2] {s} p (SHRLconst [16] w) + x0:(MOVBstore [i-3] {s} p (SHRLconst [24] w) mem)))) + && x0.Uses == 1 + && x1.Uses == 1 + && x2.Uses == 1 + && clobber(x0, x1, x2) + => (MOVLstore [i-3] {s} p (BSWAPL <w.Type> w) mem) +(MOVBstore [i] {s} p3 w + x2:(MOVBstore [i] {s} p2 (SHRLconst [8] w) + x1:(MOVBstore [i] {s} p1 (SHRLconst [16] w) + x0:(MOVBstore [i] {s} p0 (SHRLconst [24] w) mem)))) + && x0.Uses == 1 + && x1.Uses == 1 + && x2.Uses == 1 + && sequentialAddresses(p0, p1, 1) + && sequentialAddresses(p1, p2, 1) + && sequentialAddresses(p2, p3, 1) + && clobber(x0, x1, x2) + => (MOVLstore [i] {s} p0 (BSWAPL <w.Type> w) mem) + +(MOVBstore [i] {s} p w + x6:(MOVBstore [i-1] {s} p (SHRQconst [8] w) + x5:(MOVBstore [i-2] {s} p (SHRQconst [16] w) + x4:(MOVBstore [i-3] {s} p (SHRQconst [24] w) + x3:(MOVBstore [i-4] {s} p (SHRQconst [32] w) + x2:(MOVBstore [i-5] {s} p (SHRQconst [40] w) + x1:(MOVBstore [i-6] {s} p (SHRQconst [48] w) + x0:(MOVBstore [i-7] {s} p (SHRQconst [56] w) mem)))))))) + && x0.Uses == 1 + && x1.Uses == 1 + && x2.Uses == 1 + && x3.Uses == 1 + && x4.Uses == 1 + && x5.Uses == 1 + && x6.Uses == 1 + && clobber(x0, x1, x2, x3, x4, x5, x6) + => (MOVQstore [i-7] {s} p (BSWAPQ <w.Type> w) mem) +(MOVBstore [i] {s} p7 w + x6:(MOVBstore [i] {s} p6 (SHRQconst [8] w) + x5:(MOVBstore [i] {s} p5 (SHRQconst [16] w) + x4:(MOVBstore [i] {s} p4 (SHRQconst [24] w) + x3:(MOVBstore [i] {s} p3 (SHRQconst [32] w) + x2:(MOVBstore [i] {s} p2 (SHRQconst [40] w) + x1:(MOVBstore [i] {s} p1 (SHRQconst [48] w) + x0:(MOVBstore [i] {s} p0 (SHRQconst [56] w) mem)))))))) + && x0.Uses == 1 + && x1.Uses == 1 + && x2.Uses == 1 + && x3.Uses == 1 + && x4.Uses == 1 + && x5.Uses == 1 + && x6.Uses == 1 + && sequentialAddresses(p0, p1, 1) + && sequentialAddresses(p1, p2, 1) + && sequentialAddresses(p2, p3, 1) + && sequentialAddresses(p3, p4, 1) + && sequentialAddresses(p4, p5, 1) + && sequentialAddresses(p5, p6, 1) + && sequentialAddresses(p6, p7, 1) + && clobber(x0, x1, x2, x3, x4, x5, x6) + => (MOVQstore [i] {s} p0 (BSWAPQ <w.Type> w) mem) + +// Combine constant stores into larger (unaligned) stores. +(MOVBstoreconst [c] {s} p x:(MOVBstoreconst [a] {s} p mem)) + && x.Uses == 1 + && a.Off() + 1 == c.Off() + && clobber(x) + => (MOVWstoreconst [makeValAndOff(a.Val()&0xff | c.Val()<<8, a.Off())] {s} p mem) +(MOVBstoreconst [a] {s} p x:(MOVBstoreconst [c] {s} p mem)) + && x.Uses == 1 + && a.Off() + 1 == c.Off() + && clobber(x) + => (MOVWstoreconst [makeValAndOff(a.Val()&0xff | c.Val()<<8, a.Off())] {s} p mem) +(MOVWstoreconst [c] {s} p x:(MOVWstoreconst [a] {s} p mem)) + && x.Uses == 1 + && a.Off() + 2 == c.Off() + && clobber(x) + => (MOVLstoreconst [makeValAndOff(a.Val()&0xffff | c.Val()<<16, a.Off())] {s} p mem) +(MOVWstoreconst [a] {s} p x:(MOVWstoreconst [c] {s} p mem)) + && x.Uses == 1 + && a.Off() + 2 == c.Off() + && clobber(x) + => (MOVLstoreconst [makeValAndOff(a.Val()&0xffff | c.Val()<<16, a.Off())] {s} p mem) +(MOVLstoreconst [c] {s} p x:(MOVLstoreconst [a] {s} p mem)) + && x.Uses == 1 + && a.Off() + 4 == c.Off() + && clobber(x) + => (MOVQstore [a.Off()] {s} p (MOVQconst [a.Val64()&0xffffffff | c.Val64()<<32]) mem) +(MOVLstoreconst [a] {s} p x:(MOVLstoreconst [c] {s} p mem)) + && x.Uses == 1 + && a.Off() + 4 == c.Off() + && clobber(x) + => (MOVQstore [a.Off()] {s} p (MOVQconst [a.Val64()&0xffffffff | c.Val64()<<32]) mem) +(MOVQstoreconst [c] {s} p x:(MOVQstoreconst [a] {s} p mem)) + && config.useSSE + && x.Uses == 1 + && a.Off() + 8 == c.Off() + && a.Val() == 0 + && c.Val() == 0 + && clobber(x) + => (MOVOstoreconst [makeValAndOff(0,a.Off())] {s} p mem) +(MOVQstoreconst [a] {s} p x:(MOVQstoreconst [c] {s} p mem)) + && config.useSSE + && x.Uses == 1 + && a.Off() + 8 == c.Off() + && a.Val() == 0 + && c.Val() == 0 + && clobber(x) + => (MOVOstoreconst [makeValAndOff(0,a.Off())] {s} p mem) + +// Combine stores into larger (unaligned) stores. Little endian. +(MOVBstore [i] {s} p (SHR(W|L|Q)const [8] w) x:(MOVBstore [i-1] {s} p w mem)) + && x.Uses == 1 + && clobber(x) + => (MOVWstore [i-1] {s} p w mem) +(MOVBstore [i] {s} p w x:(MOVBstore [i+1] {s} p (SHR(W|L|Q)const [8] w) mem)) + && x.Uses == 1 + && clobber(x) + => (MOVWstore [i] {s} p w mem) +(MOVBstore [i] {s} p (SHR(L|Q)const [j] w) x:(MOVBstore [i-1] {s} p w0:(SHR(L|Q)const [j-8] w) mem)) + && x.Uses == 1 + && clobber(x) + => (MOVWstore [i-1] {s} p w0 mem) +(MOVBstore [i] {s} p1 (SHR(W|L|Q)const [8] w) x:(MOVBstore [i] {s} p0 w mem)) + && x.Uses == 1 + && sequentialAddresses(p0, p1, 1) + && clobber(x) + => (MOVWstore [i] {s} p0 w mem) +(MOVBstore [i] {s} p0 w x:(MOVBstore [i] {s} p1 (SHR(W|L|Q)const [8] w) mem)) + && x.Uses == 1 + && sequentialAddresses(p0, p1, 1) + && clobber(x) + => (MOVWstore [i] {s} p0 w mem) +(MOVBstore [i] {s} p1 (SHR(L|Q)const [j] w) x:(MOVBstore [i] {s} p0 w0:(SHR(L|Q)const [j-8] w) mem)) + && x.Uses == 1 + && sequentialAddresses(p0, p1, 1) + && clobber(x) + => (MOVWstore [i] {s} p0 w0 mem) + +(MOVWstore [i] {s} p (SHR(L|Q)const [16] w) x:(MOVWstore [i-2] {s} p w mem)) + && x.Uses == 1 + && clobber(x) + => (MOVLstore [i-2] {s} p w mem) +(MOVWstore [i] {s} p (SHR(L|Q)const [j] w) x:(MOVWstore [i-2] {s} p w0:(SHR(L|Q)const [j-16] w) mem)) + && x.Uses == 1 + && clobber(x) + => (MOVLstore [i-2] {s} p w0 mem) +(MOVWstore [i] {s} p1 (SHR(L|Q)const [16] w) x:(MOVWstore [i] {s} p0 w mem)) + && x.Uses == 1 + && sequentialAddresses(p0, p1, 2) + && clobber(x) + => (MOVLstore [i] {s} p0 w mem) +(MOVWstore [i] {s} p1 (SHR(L|Q)const [j] w) x:(MOVWstore [i] {s} p0 w0:(SHR(L|Q)const [j-16] w) mem)) + && x.Uses == 1 + && sequentialAddresses(p0, p1, 2) + && clobber(x) + => (MOVLstore [i] {s} p0 w0 mem) + +(MOVLstore [i] {s} p (SHRQconst [32] w) x:(MOVLstore [i-4] {s} p w mem)) + && x.Uses == 1 + && clobber(x) + => (MOVQstore [i-4] {s} p w mem) +(MOVLstore [i] {s} p (SHRQconst [j] w) x:(MOVLstore [i-4] {s} p w0:(SHRQconst [j-32] w) mem)) + && x.Uses == 1 + && clobber(x) + => (MOVQstore [i-4] {s} p w0 mem) +(MOVLstore [i] {s} p1 (SHRQconst [32] w) x:(MOVLstore [i] {s} p0 w mem)) + && x.Uses == 1 + && sequentialAddresses(p0, p1, 4) + && clobber(x) + => (MOVQstore [i] {s} p0 w mem) +(MOVLstore [i] {s} p1 (SHRQconst [j] w) x:(MOVLstore [i] {s} p0 w0:(SHRQconst [j-32] w) mem)) + && x.Uses == 1 + && sequentialAddresses(p0, p1, 4) + && clobber(x) + => (MOVQstore [i] {s} p0 w0 mem) + +(MOVBstore [7] {s} p1 (SHRQconst [56] w) + x1:(MOVWstore [5] {s} p1 (SHRQconst [40] w) + x2:(MOVLstore [1] {s} p1 (SHRQconst [8] w) + x3:(MOVBstore [0] {s} p1 w mem)))) + && x1.Uses == 1 + && x2.Uses == 1 + && x3.Uses == 1 + && clobber(x1, x2, x3) + => (MOVQstore {s} p1 w mem) + +(MOVBstore [i] {s} p + x1:(MOVBload [j] {s2} p2 mem) + mem2:(MOVBstore [i-1] {s} p + x2:(MOVBload [j-1] {s2} p2 mem) mem)) + && x1.Uses == 1 + && x2.Uses == 1 + && mem2.Uses == 1 + && clobber(x1, x2, mem2) + => (MOVWstore [i-1] {s} p (MOVWload [j-1] {s2} p2 mem) mem) + +(MOVWstore [i] {s} p + x1:(MOVWload [j] {s2} p2 mem) + mem2:(MOVWstore [i-2] {s} p + x2:(MOVWload [j-2] {s2} p2 mem) mem)) + && x1.Uses == 1 + && x2.Uses == 1 + && mem2.Uses == 1 + && clobber(x1, x2, mem2) + => (MOVLstore [i-2] {s} p (MOVLload [j-2] {s2} p2 mem) mem) + +(MOVLstore [i] {s} p + x1:(MOVLload [j] {s2} p2 mem) + mem2:(MOVLstore [i-4] {s} p + x2:(MOVLload [j-4] {s2} p2 mem) mem)) + && x1.Uses == 1 + && x2.Uses == 1 + && mem2.Uses == 1 + && clobber(x1, x2, mem2) + => (MOVQstore [i-4] {s} p (MOVQload [j-4] {s2} p2 mem) mem) + +// Merge load and op +// TODO: add indexed variants? +((ADD|SUB|AND|OR|XOR)Q x l:(MOVQload [off] {sym} ptr mem)) && canMergeLoadClobber(v, l, x) && clobber(l) => ((ADD|SUB|AND|OR|XOR)Qload x [off] {sym} ptr mem) +((ADD|SUB|AND|OR|XOR)L x l:(MOVLload [off] {sym} ptr mem)) && canMergeLoadClobber(v, l, x) && clobber(l) => ((ADD|SUB|AND|OR|XOR)Lload x [off] {sym} ptr mem) +((ADD|SUB|MUL|DIV)SD x l:(MOVSDload [off] {sym} ptr mem)) && canMergeLoadClobber(v, l, x) && clobber(l) => ((ADD|SUB|MUL|DIV)SDload x [off] {sym} ptr mem) +((ADD|SUB|MUL|DIV)SS x l:(MOVSSload [off] {sym} ptr mem)) && canMergeLoadClobber(v, l, x) && clobber(l) => ((ADD|SUB|MUL|DIV)SSload x [off] {sym} ptr mem) +(MOVLstore {sym} [off] ptr y:((ADD|AND|OR|XOR)Lload x [off] {sym} ptr mem) mem) && y.Uses==1 && clobber(y) => ((ADD|AND|OR|XOR)Lmodify [off] {sym} ptr x mem) +(MOVLstore {sym} [off] ptr y:((ADD|SUB|AND|OR|XOR)L l:(MOVLload [off] {sym} ptr mem) x) mem) && y.Uses==1 && l.Uses==1 && clobber(y, l) => + ((ADD|SUB|AND|OR|XOR)Lmodify [off] {sym} ptr x mem) +(MOVQstore {sym} [off] ptr y:((ADD|AND|OR|XOR)Qload x [off] {sym} ptr mem) mem) && y.Uses==1 && clobber(y) => ((ADD|AND|OR|XOR)Qmodify [off] {sym} ptr x mem) +(MOVQstore {sym} [off] ptr y:((ADD|SUB|AND|OR|XOR)Q l:(MOVQload [off] {sym} ptr mem) x) mem) && y.Uses==1 && l.Uses==1 && clobber(y, l) => + ((ADD|SUB|AND|OR|XOR)Qmodify [off] {sym} ptr x mem) + +// Merge ADDQconst and LEAQ into atomic loads. +(MOV(Q|L|B)atomicload [off1] {sym} (ADDQconst [off2] ptr) mem) && is32Bit(int64(off1)+int64(off2)) => + (MOV(Q|L|B)atomicload [off1+off2] {sym} ptr mem) +(MOV(Q|L|B)atomicload [off1] {sym1} (LEAQ [off2] {sym2} ptr) mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) => + (MOV(Q|L|B)atomicload [off1+off2] {mergeSym(sym1, sym2)} ptr mem) + +// Merge ADDQconst and LEAQ into atomic stores. +(XCHGQ [off1] {sym} val (ADDQconst [off2] ptr) mem) && is32Bit(int64(off1)+int64(off2)) => + (XCHGQ [off1+off2] {sym} val ptr mem) +(XCHGQ [off1] {sym1} val (LEAQ [off2] {sym2} ptr) mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && ptr.Op != OpSB => + (XCHGQ [off1+off2] {mergeSym(sym1,sym2)} val ptr mem) +(XCHGL [off1] {sym} val (ADDQconst [off2] ptr) mem) && is32Bit(int64(off1)+int64(off2)) => + (XCHGL [off1+off2] {sym} val ptr mem) +(XCHGL [off1] {sym1} val (LEAQ [off2] {sym2} ptr) mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && ptr.Op != OpSB => + (XCHGL [off1+off2] {mergeSym(sym1,sym2)} val ptr mem) + +// Merge ADDQconst into atomic adds. +// TODO: merging LEAQ doesn't work, assembler doesn't like the resulting instructions. +(XADDQlock [off1] {sym} val (ADDQconst [off2] ptr) mem) && is32Bit(int64(off1)+int64(off2)) => + (XADDQlock [off1+off2] {sym} val ptr mem) +(XADDLlock [off1] {sym} val (ADDQconst [off2] ptr) mem) && is32Bit(int64(off1)+int64(off2)) => + (XADDLlock [off1+off2] {sym} val ptr mem) + +// Merge ADDQconst into atomic compare and swaps. +// TODO: merging LEAQ doesn't work, assembler doesn't like the resulting instructions. +(CMPXCHGQlock [off1] {sym} (ADDQconst [off2] ptr) old new_ mem) && is32Bit(int64(off1)+int64(off2)) => + (CMPXCHGQlock [off1+off2] {sym} ptr old new_ mem) +(CMPXCHGLlock [off1] {sym} (ADDQconst [off2] ptr) old new_ mem) && is32Bit(int64(off1)+int64(off2)) => + (CMPXCHGLlock [off1+off2] {sym} ptr old new_ mem) + +// We don't need the conditional move if we know the arg of BSF is not zero. +(CMOVQEQ x _ (Select1 (BSFQ (ORQconst [c] _)))) && c != 0 => x +// Extension is unnecessary for trailing zeros. +(BSFQ (ORQconst <t> [1<<8] (MOVBQZX x))) => (BSFQ (ORQconst <t> [1<<8] x)) +(BSFQ (ORQconst <t> [1<<16] (MOVWQZX x))) => (BSFQ (ORQconst <t> [1<<16] x)) + +// Redundant sign/zero extensions +// Note: see issue 21963. We have to make sure we use the right type on +// the resulting extension (the outer type, not the inner type). +(MOVLQSX (MOVLQSX x)) => (MOVLQSX x) +(MOVLQSX (MOVWQSX x)) => (MOVWQSX x) +(MOVLQSX (MOVBQSX x)) => (MOVBQSX x) +(MOVWQSX (MOVWQSX x)) => (MOVWQSX x) +(MOVWQSX (MOVBQSX x)) => (MOVBQSX x) +(MOVBQSX (MOVBQSX x)) => (MOVBQSX x) +(MOVLQZX (MOVLQZX x)) => (MOVLQZX x) +(MOVLQZX (MOVWQZX x)) => (MOVWQZX x) +(MOVLQZX (MOVBQZX x)) => (MOVBQZX x) +(MOVWQZX (MOVWQZX x)) => (MOVWQZX x) +(MOVWQZX (MOVBQZX x)) => (MOVBQZX x) +(MOVBQZX (MOVBQZX x)) => (MOVBQZX x) + +(MOVQstore [off] {sym} ptr a:((ADD|AND|OR|XOR)Qconst [c] l:(MOVQload [off] {sym} ptr2 mem)) mem) + && isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && clobber(l, a) => + ((ADD|AND|OR|XOR)Qconstmodify {sym} [makeValAndOff(int32(c),off)] ptr mem) +(MOVLstore [off] {sym} ptr a:((ADD|AND|OR|XOR)Lconst [c] l:(MOVLload [off] {sym} ptr2 mem)) mem) + && isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && clobber(l, a) => + ((ADD|AND|OR|XOR)Lconstmodify {sym} [makeValAndOff(int32(c),off)] ptr mem) + +// float <-> int register moves, with no conversion. +// These come up when compiling math.{Float{32,64}bits,Float{32,64}frombits}. +(MOVQload [off] {sym} ptr (MOVSDstore [off] {sym} ptr val _)) => (MOVQf2i val) +(MOVLload [off] {sym} ptr (MOVSSstore [off] {sym} ptr val _)) => (MOVLf2i val) +(MOVSDload [off] {sym} ptr (MOVQstore [off] {sym} ptr val _)) => (MOVQi2f val) +(MOVSSload [off] {sym} ptr (MOVLstore [off] {sym} ptr val _)) => (MOVLi2f val) + +// Other load-like ops. +(ADDQload x [off] {sym} ptr (MOVSDstore [off] {sym} ptr y _)) => (ADDQ x (MOVQf2i y)) +(ADDLload x [off] {sym} ptr (MOVSSstore [off] {sym} ptr y _)) => (ADDL x (MOVLf2i y)) +(SUBQload x [off] {sym} ptr (MOVSDstore [off] {sym} ptr y _)) => (SUBQ x (MOVQf2i y)) +(SUBLload x [off] {sym} ptr (MOVSSstore [off] {sym} ptr y _)) => (SUBL x (MOVLf2i y)) +(ANDQload x [off] {sym} ptr (MOVSDstore [off] {sym} ptr y _)) => (ANDQ x (MOVQf2i y)) +(ANDLload x [off] {sym} ptr (MOVSSstore [off] {sym} ptr y _)) => (ANDL x (MOVLf2i y)) +( ORQload x [off] {sym} ptr (MOVSDstore [off] {sym} ptr y _)) => ( ORQ x (MOVQf2i y)) +( ORLload x [off] {sym} ptr (MOVSSstore [off] {sym} ptr y _)) => ( ORL x (MOVLf2i y)) +(XORQload x [off] {sym} ptr (MOVSDstore [off] {sym} ptr y _)) => (XORQ x (MOVQf2i y)) +(XORLload x [off] {sym} ptr (MOVSSstore [off] {sym} ptr y _)) => (XORL x (MOVLf2i y)) + +(ADDSDload x [off] {sym} ptr (MOVQstore [off] {sym} ptr y _)) => (ADDSD x (MOVQi2f y)) +(ADDSSload x [off] {sym} ptr (MOVLstore [off] {sym} ptr y _)) => (ADDSS x (MOVLi2f y)) +(SUBSDload x [off] {sym} ptr (MOVQstore [off] {sym} ptr y _)) => (SUBSD x (MOVQi2f y)) +(SUBSSload x [off] {sym} ptr (MOVLstore [off] {sym} ptr y _)) => (SUBSS x (MOVLi2f y)) +(MULSDload x [off] {sym} ptr (MOVQstore [off] {sym} ptr y _)) => (MULSD x (MOVQi2f y)) +(MULSSload x [off] {sym} ptr (MOVLstore [off] {sym} ptr y _)) => (MULSS x (MOVLi2f y)) + +// Redirect stores to use the other register set. +(MOVQstore [off] {sym} ptr (MOVQf2i val) mem) => (MOVSDstore [off] {sym} ptr val mem) +(MOVLstore [off] {sym} ptr (MOVLf2i val) mem) => (MOVSSstore [off] {sym} ptr val mem) +(MOVSDstore [off] {sym} ptr (MOVQi2f val) mem) => (MOVQstore [off] {sym} ptr val mem) +(MOVSSstore [off] {sym} ptr (MOVLi2f val) mem) => (MOVLstore [off] {sym} ptr val mem) + +// Load args directly into the register class where it will be used. +// We do this by just modifying the type of the Arg. +(MOVQf2i <t> (Arg <u> [off] {sym})) && t.Size() == u.Size() => @b.Func.Entry (Arg <t> [off] {sym}) +(MOVLf2i <t> (Arg <u> [off] {sym})) && t.Size() == u.Size() => @b.Func.Entry (Arg <t> [off] {sym}) +(MOVQi2f <t> (Arg <u> [off] {sym})) && t.Size() == u.Size() => @b.Func.Entry (Arg <t> [off] {sym}) +(MOVLi2f <t> (Arg <u> [off] {sym})) && t.Size() == u.Size() => @b.Func.Entry (Arg <t> [off] {sym}) + +// LEAQ is rematerializeable, so this helps to avoid register spill. +// See issue 22947 for details +(ADD(Q|L)const [off] x:(SP)) => (LEA(Q|L) [off] x) + +// HMULx is commutative, but its first argument must go in AX. +// If possible, put a rematerializeable value in the first argument slot, +// to reduce the odds that another value will be have to spilled +// specifically to free up AX. +(HMUL(Q|L) x y) && !x.rematerializeable() && y.rematerializeable() => (HMUL(Q|L) y x) +(HMUL(Q|L)U x y) && !x.rematerializeable() && y.rematerializeable() => (HMUL(Q|L)U y x) + +// Fold loads into compares +// Note: these may be undone by the flagalloc pass. +(CMP(Q|L|W|B) l:(MOV(Q|L|W|B)load {sym} [off] ptr mem) x) && canMergeLoad(v, l) && clobber(l) => (CMP(Q|L|W|B)load {sym} [off] ptr x mem) +(CMP(Q|L|W|B) x l:(MOV(Q|L|W|B)load {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (InvertFlags (CMP(Q|L|W|B)load {sym} [off] ptr x mem)) + +(CMP(Q|L)const l:(MOV(Q|L)load {sym} [off] ptr mem) [c]) + && l.Uses == 1 + && clobber(l) => +@l.Block (CMP(Q|L)constload {sym} [makeValAndOff(c,off)] ptr mem) +(CMP(W|B)const l:(MOV(W|B)load {sym} [off] ptr mem) [c]) + && l.Uses == 1 + && clobber(l) => +@l.Block (CMP(W|B)constload {sym} [makeValAndOff(int32(c),off)] ptr mem) + +(CMPQload {sym} [off] ptr (MOVQconst [c]) mem) && validVal(c) => (CMPQconstload {sym} [makeValAndOff(int32(c),off)] ptr mem) +(CMPLload {sym} [off] ptr (MOVLconst [c]) mem) => (CMPLconstload {sym} [makeValAndOff(c,off)] ptr mem) +(CMPWload {sym} [off] ptr (MOVLconst [c]) mem) => (CMPWconstload {sym} [makeValAndOff(int32(int16(c)),off)] ptr mem) +(CMPBload {sym} [off] ptr (MOVLconst [c]) mem) => (CMPBconstload {sym} [makeValAndOff(int32(int8(c)),off)] ptr mem) + +(TEST(Q|L|W|B) l:(MOV(Q|L|W|B)load {sym} [off] ptr mem) l2) + && l == l2 + && l.Uses == 2 + && clobber(l) => + @l.Block (CMP(Q|L|W|B)constload {sym} [makeValAndOff(0, off)] ptr mem) + +// Convert ANDload to MOVload when we can do the AND in a containing TEST op. +// Only do when it's within the same block, so we don't have flags live across basic block boundaries. +// See issue 44228. +(TEST(Q|L) a:(AND(Q|L)load [off] {sym} x ptr mem) a) && a.Uses == 2 && a.Block == v.Block && clobber(a) => (TEST(Q|L) (MOV(Q|L)load <a.Type> [off] {sym} ptr mem) x) + +(MOVBload [off] {sym} (SB) _) && symIsRO(sym) => (MOVLconst [int32(read8(sym, int64(off)))]) +(MOVWload [off] {sym} (SB) _) && symIsRO(sym) => (MOVLconst [int32(read16(sym, int64(off), config.ctxt.Arch.ByteOrder))]) +(MOVLload [off] {sym} (SB) _) && symIsRO(sym) => (MOVQconst [int64(read32(sym, int64(off), config.ctxt.Arch.ByteOrder))]) +(MOVQload [off] {sym} (SB) _) && symIsRO(sym) => (MOVQconst [int64(read64(sym, int64(off), config.ctxt.Arch.ByteOrder))]) +(MOVOstore [dstOff] {dstSym} ptr (MOVOload [srcOff] {srcSym} (SB) _) mem) && symIsRO(srcSym) => + (MOVQstore [dstOff+8] {dstSym} ptr (MOVQconst [int64(read64(srcSym, int64(srcOff)+8, config.ctxt.Arch.ByteOrder))]) + (MOVQstore [dstOff] {dstSym} ptr (MOVQconst [int64(read64(srcSym, int64(srcOff), config.ctxt.Arch.ByteOrder))]) mem)) + +// Arch-specific inlining for small or disjoint runtime.memmove +// Match post-lowering calls, memory version. +(SelectN [0] call:(CALLstatic {sym} s1:(MOVQstoreconst _ [sc] s2:(MOVQstore _ src s3:(MOVQstore _ dst mem))))) + && sc.Val64() >= 0 + && isSameCall(sym, "runtime.memmove") + && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 + && isInlinableMemmove(dst, src, sc.Val64(), config) + && clobber(s1, s2, s3, call) + => (Move [sc.Val64()] dst src mem) + +// Match post-lowering calls, register version. +(SelectN [0] call:(CALLstatic {sym} dst src (MOVQconst [sz]) mem)) + && sz >= 0 + && isSameCall(sym, "runtime.memmove") + && call.Uses == 1 + && isInlinableMemmove(dst, src, sz, config) + && clobber(call) + => (Move [sz] dst src mem) + +// Prefetch instructions +(PrefetchCache ...) => (PrefetchT0 ...) +(PrefetchCacheStreamed ...) => (PrefetchNTA ...) + +// CPUID feature: BMI1. +(AND(Q|L) x (NOT(Q|L) y)) && buildcfg.GOAMD64 >= 3 => (ANDN(Q|L) x y) +(AND(Q|L) x (NEG(Q|L) x)) && buildcfg.GOAMD64 >= 3 => (BLSI(Q|L) x) +(XOR(Q|L) x (ADD(Q|L)const [-1] x)) && buildcfg.GOAMD64 >= 3 => (BLSMSK(Q|L) x) +(AND(Q|L) x (ADD(Q|L)const [-1] x)) && buildcfg.GOAMD64 >= 3 => (BLSR(Q|L) x) + +(BSWAP(Q|L) (BSWAP(Q|L) p)) => p + +// CPUID feature: MOVBE. +(MOV(Q|L)store [i] {s} p x:(BSWAP(Q|L) w) mem) && x.Uses == 1 && buildcfg.GOAMD64 >= 3 => (MOVBE(Q|L)store [i] {s} p w mem) +(BSWAP(Q|L) x:(MOV(Q|L)load [i] {s} p mem)) && x.Uses == 1 && buildcfg.GOAMD64 >= 3 => (MOVBE(Q|L)load [i] {s} p mem) +(BSWAP(Q|L) (MOVBE(Q|L)load [i] {s} p m)) => (MOV(Q|L)load [i] {s} p m) +(MOVBE(Q|L)store [i] {s} p (BSWAP(Q|L) x) m) => (MOV(Q|L)store [i] {s} p x m) + +(ORQ x0:(MOVBELload [i0] {s} p mem) + sh:(SHLQconst [32] x1:(MOVBELload [i1] {s} p mem))) + && i0 == i1+4 + && x0.Uses == 1 + && x1.Uses == 1 + && sh.Uses == 1 + && mergePoint(b,x0,x1) != nil + && clobber(x0, x1, sh) + => @mergePoint(b,x0,x1) (MOVBEQload [i1] {s} p mem) + +(ORQ x0:(MOVBELload [i] {s} p0 mem) + sh:(SHLQconst [32] x1:(MOVBELload [i] {s} p1 mem))) + && x0.Uses == 1 + && x1.Uses == 1 + && sh.Uses == 1 + && sequentialAddresses(p1, p0, 4) + && mergePoint(b,x0,x1) != nil + && clobber(x0, x1, sh) + => @mergePoint(b,x0,x1) (MOVBEQload [i] {s} p1 mem) |