mirror of https://github.com/golang/go.git
cmd/compile: automatically handle commuting ops in rewrite rules
We have lots of rewrite rules that vary only in the fact that we have 2 versions for the 2 different orderings of various commuting ops. For example: (ADDL x (MOVLconst [c])) -> (ADDLconst [c] x) (ADDL (MOVLconst [c]) x) -> (ADDLconst [c] x) It can get unwieldly quickly, especially when there is more than one commuting op in a rule. Our existing "fix" for this problem is to have rules that canonicalize the operations first. For example: (Eq64 x (Const64 <t> [c])) && x.Op != OpConst64 -> (Eq64 (Const64 <t> [c]) x) Subsequent rules can then assume if there is a constant arg to Eq64, it will be the first one. This fix kinda works, but it is fragile and only works when we remember to include the required extra rules. The fundamental problem is that the rule matcher doesn't know anything about commuting ops. This CL fixes that fact. We already have information about which ops commute. (The register allocator takes advantage of commutivity.) The rule generator now automatically generates multiple rules for a single source rule when there are commutative ops in the rule. We can now drop all of our almost-duplicate source-level rules and the canonicalization rules. I have some CLs in progress that will be a lot less verbose when the rule generator handles commutivity for me. I had to reorganize the load-combining rules a bit. The 8-way OR rules generated 128 different reorderings, which was causing the generator to put too much code in the rewrite*.go files (the big ones were going from 25K lines to 132K lines). Instead I reorganized the rules to combine pairs of loads at a time. The generated rule files are now actually a bit (5%) smaller. [Note to reviewers: check these carefully. Most of the other rule changes are trivial.] Make.bash times are ~unchanged. Compiler benchmarks are not observably different. Probably because we don't spend much compiler time in rule matching anyway. I've also done a pass over all of our ops adding commutative markings for ops which hadn't had them previously. Fixes #18292 Change-Id: I999b1307272e91965b66754576019dedcbe7527a Reviewed-on: https://go-review.googlesource.com/38666 Run-TryBot: Keith Randall <khr@golang.org> TryBot-Result: Gobot Gobot <gobot@golang.org> Reviewed-by: David Chase <drchase@google.com>
This commit is contained in:
parent
627798db4e
commit
041ecb697f
|
|
@ -431,9 +431,7 @@
|
|||
|
||||
// fold constants into instructions
|
||||
(ADDL x (MOVLconst [c])) -> (ADDLconst [c] x)
|
||||
(ADDL (MOVLconst [c]) x) -> (ADDLconst [c] x)
|
||||
(ADDLcarry x (MOVLconst [c])) -> (ADDLconstcarry [c] x)
|
||||
(ADDLcarry (MOVLconst [c]) x) -> (ADDLconstcarry [c] x)
|
||||
(ADCL x (MOVLconst [c]) f) -> (ADCLconst [c] x f)
|
||||
(ADCL (MOVLconst [c]) x f) -> (ADCLconst [c] x f)
|
||||
|
||||
|
|
@ -443,10 +441,8 @@
|
|||
(SBBL x (MOVLconst [c]) f) -> (SBBLconst [c] x f)
|
||||
|
||||
(MULL x (MOVLconst [c])) -> (MULLconst [c] x)
|
||||
(MULL (MOVLconst [c]) x) -> (MULLconst [c] x)
|
||||
|
||||
(ANDL x (MOVLconst [c])) -> (ANDLconst [c] x)
|
||||
(ANDL (MOVLconst [c]) x) -> (ANDLconst [c] x)
|
||||
|
||||
(ANDLconst [c] (ANDLconst [d] x)) -> (ANDLconst [c & d] x)
|
||||
|
||||
|
|
@ -455,10 +451,8 @@
|
|||
(MULLconst [c] (MULLconst [d] x)) -> (MULLconst [int64(int32(c * d))] x)
|
||||
|
||||
(ORL x (MOVLconst [c])) -> (ORLconst [c] x)
|
||||
(ORL (MOVLconst [c]) x) -> (ORLconst [c] x)
|
||||
|
||||
(XORL x (MOVLconst [c])) -> (XORLconst [c] x)
|
||||
(XORL (MOVLconst [c]) x) -> (XORLconst [c] x)
|
||||
|
||||
(SHLL x (MOVLconst [c])) -> (SHLLconst [c&31] x)
|
||||
(SHRL x (MOVLconst [c])) -> (SHRLconst [c&31] x)
|
||||
|
|
@ -479,26 +473,17 @@
|
|||
|
||||
// Rotate instructions
|
||||
|
||||
(ADDL (SHLLconst [c] x) (SHRLconst [32-c] x)) -> (ROLLconst [c ] x)
|
||||
( ORL (SHLLconst [c] x) (SHRLconst [32-c] x)) -> (ROLLconst [c ] x)
|
||||
(XORL (SHLLconst [c] x) (SHRLconst [32-c] x)) -> (ROLLconst [c ] x)
|
||||
(ADDL (SHRLconst [c] x) (SHLLconst [32-c] x)) -> (ROLLconst [32-c] x)
|
||||
( ORL (SHRLconst [c] x) (SHLLconst [32-c] x)) -> (ROLLconst [32-c] x)
|
||||
(XORL (SHRLconst [c] x) (SHLLconst [32-c] x)) -> (ROLLconst [32-c] x)
|
||||
(ADDL (SHLLconst [c] x) (SHRLconst [d] x)) && d == 32-c -> (ROLLconst [c] x)
|
||||
( ORL (SHLLconst [c] x) (SHRLconst [d] x)) && d == 32-c -> (ROLLconst [c] x)
|
||||
(XORL (SHLLconst [c] x) (SHRLconst [d] x)) && d == 32-c -> (ROLLconst [c] x)
|
||||
|
||||
(ADDL <t> (SHLLconst x [c]) (SHRWconst x [16-c])) && c < 16 && t.Size() == 2 -> (ROLWconst x [ c])
|
||||
( ORL <t> (SHLLconst x [c]) (SHRWconst x [16-c])) && c < 16 && t.Size() == 2 -> (ROLWconst x [ c])
|
||||
(XORL <t> (SHLLconst x [c]) (SHRWconst x [16-c])) && c < 16 && t.Size() == 2 -> (ROLWconst x [ c])
|
||||
(ADDL <t> (SHRWconst x [c]) (SHLLconst x [16-c])) && c > 0 && t.Size() == 2 -> (ROLWconst x [16-c])
|
||||
( ORL <t> (SHRWconst x [c]) (SHLLconst x [16-c])) && c > 0 && t.Size() == 2 -> (ROLWconst x [16-c])
|
||||
(XORL <t> (SHRWconst x [c]) (SHLLconst x [16-c])) && c > 0 && t.Size() == 2 -> (ROLWconst x [16-c])
|
||||
(ADDL <t> (SHLLconst x [c]) (SHRWconst x [d])) && c < 16 && d == 16-c && t.Size() == 2 -> (ROLWconst x [c])
|
||||
( ORL <t> (SHLLconst x [c]) (SHRWconst x [d])) && c < 16 && d == 16-c && t.Size() == 2 -> (ROLWconst x [c])
|
||||
(XORL <t> (SHLLconst x [c]) (SHRWconst x [d])) && c < 16 && d == 16-c && t.Size() == 2 -> (ROLWconst x [c])
|
||||
|
||||
(ADDL <t> (SHLLconst x [c]) (SHRBconst x [ 8-c])) && c < 8 && t.Size() == 1 -> (ROLBconst x [ c])
|
||||
( ORL <t> (SHLLconst x [c]) (SHRBconst x [ 8-c])) && c < 8 && t.Size() == 1 -> (ROLBconst x [ c])
|
||||
(XORL <t> (SHLLconst x [c]) (SHRBconst x [ 8-c])) && c < 8 && t.Size() == 1 -> (ROLBconst x [ c])
|
||||
(ADDL <t> (SHRBconst x [c]) (SHLLconst x [ 8-c])) && c > 0 && t.Size() == 1 -> (ROLBconst x [ 8-c])
|
||||
( ORL <t> (SHRBconst x [c]) (SHLLconst x [ 8-c])) && c > 0 && t.Size() == 1 -> (ROLBconst x [ 8-c])
|
||||
(XORL <t> (SHRBconst x [c]) (SHLLconst x [ 8-c])) && c > 0 && t.Size() == 1 -> (ROLBconst x [ 8-c])
|
||||
(ADDL <t> (SHLLconst x [c]) (SHRBconst x [d])) && c < 8 && d == 8-c && t.Size() == 1 -> (ROLBconst x [c])
|
||||
( ORL <t> (SHLLconst x [c]) (SHRBconst x [d])) && c < 8 && d == 8-c && t.Size() == 1 -> (ROLBconst x [c])
|
||||
(XORL <t> (SHLLconst x [c]) (SHRBconst x [d])) && c < 8 && d == 8-c && t.Size() == 1 -> (ROLBconst x [c])
|
||||
|
||||
(ROLLconst [c] (ROLLconst [d] x)) -> (ROLLconst [(c+d)&31] x)
|
||||
(ROLWconst [c] (ROLWconst [d] x)) -> (ROLWconst [(c+d)&15] x)
|
||||
|
|
@ -559,9 +544,9 @@
|
|||
(MULLconst [c] x) && isPowerOfTwo(c-2) && c >= 34 -> (LEAL2 (SHLLconst <v.Type> [log2(c-2)] x) x)
|
||||
(MULLconst [c] x) && isPowerOfTwo(c-4) && c >= 68 -> (LEAL4 (SHLLconst <v.Type> [log2(c-4)] x) x)
|
||||
(MULLconst [c] x) && isPowerOfTwo(c-8) && c >= 136 -> (LEAL8 (SHLLconst <v.Type> [log2(c-8)] x) x)
|
||||
(MULLconst [c] x) && c%3 == 0 && isPowerOfTwo(c/3)-> (SHLLconst [log2(c/3)] (LEAL2 <v.Type> x x))
|
||||
(MULLconst [c] x) && c%5 == 0 && isPowerOfTwo(c/5)-> (SHLLconst [log2(c/5)] (LEAL4 <v.Type> x x))
|
||||
(MULLconst [c] x) && c%9 == 0 && isPowerOfTwo(c/9)-> (SHLLconst [log2(c/9)] (LEAL8 <v.Type> x x))
|
||||
(MULLconst [c] x) && c%3 == 0 && isPowerOfTwo(c/3) -> (SHLLconst [log2(c/3)] (LEAL2 <v.Type> x x))
|
||||
(MULLconst [c] x) && c%5 == 0 && isPowerOfTwo(c/5) -> (SHLLconst [log2(c/5)] (LEAL4 <v.Type> x x))
|
||||
(MULLconst [c] x) && c%9 == 0 && isPowerOfTwo(c/9) -> (SHLLconst [log2(c/9)] (LEAL8 <v.Type> x x))
|
||||
|
||||
// combine add/shift into LEAL
|
||||
(ADDL x (SHLLconst [3] y)) -> (LEAL8 x y)
|
||||
|
|
@ -569,19 +554,16 @@
|
|||
(ADDL x (SHLLconst [1] y)) -> (LEAL2 x y)
|
||||
(ADDL x (ADDL y y)) -> (LEAL2 x y)
|
||||
(ADDL x (ADDL x y)) -> (LEAL2 y x)
|
||||
(ADDL x (ADDL y x)) -> (LEAL2 y x)
|
||||
|
||||
// combine ADDL/ADDLconst into LEAL1
|
||||
(ADDLconst [c] (ADDL x y)) -> (LEAL1 [c] x y)
|
||||
(ADDL (ADDLconst [c] x) y) -> (LEAL1 [c] x y)
|
||||
(ADDL x (ADDLconst [c] y)) -> (LEAL1 [c] x y)
|
||||
|
||||
// fold ADDL into LEAL
|
||||
(ADDLconst [c] (LEAL [d] {s} x)) && is32Bit(c+d) -> (LEAL [c+d] {s} x)
|
||||
(LEAL [c] {s} (ADDLconst [d] x)) && is32Bit(c+d) -> (LEAL [c+d] {s} x)
|
||||
(LEAL [c] {s} (ADDL x y)) && x.Op != OpSB && y.Op != OpSB -> (LEAL1 [c] {s} x y)
|
||||
(ADDL x (LEAL [c] {s} y)) && x.Op != OpSB && y.Op != OpSB -> (LEAL1 [c] {s} x y)
|
||||
(ADDL (LEAL [c] {s} x) y) && x.Op != OpSB && y.Op != OpSB -> (LEAL1 [c] {s} x y)
|
||||
|
||||
// fold ADDLconst into LEALx
|
||||
(ADDLconst [c] (LEAL1 [d] {s} x y)) && is32Bit(c+d) -> (LEAL1 [c+d] {s} x y)
|
||||
|
|
@ -589,7 +571,6 @@
|
|||
(ADDLconst [c] (LEAL4 [d] {s} x y)) && is32Bit(c+d) -> (LEAL4 [c+d] {s} x y)
|
||||
(ADDLconst [c] (LEAL8 [d] {s} x y)) && is32Bit(c+d) -> (LEAL8 [c+d] {s} x y)
|
||||
(LEAL1 [c] {s} (ADDLconst [d] x) y) && is32Bit(c+d) && x.Op != OpSB -> (LEAL1 [c+d] {s} x y)
|
||||
(LEAL1 [c] {s} x (ADDLconst [d] y)) && is32Bit(c+d) && y.Op != OpSB -> (LEAL1 [c+d] {s} x y)
|
||||
(LEAL2 [c] {s} (ADDLconst [d] x) y) && is32Bit(c+d) && x.Op != OpSB -> (LEAL2 [c+d] {s} x y)
|
||||
(LEAL2 [c] {s} x (ADDLconst [d] y)) && is32Bit(c+2*d) && y.Op != OpSB -> (LEAL2 [c+2*d] {s} x y)
|
||||
(LEAL4 [c] {s} (ADDLconst [d] x) y) && is32Bit(c+d) && x.Op != OpSB -> (LEAL4 [c+d] {s} x y)
|
||||
|
|
@ -599,12 +580,8 @@
|
|||
|
||||
// fold shifts into LEALx
|
||||
(LEAL1 [c] {s} x (SHLLconst [1] y)) -> (LEAL2 [c] {s} x y)
|
||||
(LEAL1 [c] {s} (SHLLconst [1] x) y) -> (LEAL2 [c] {s} y x)
|
||||
(LEAL1 [c] {s} x (SHLLconst [2] y)) -> (LEAL4 [c] {s} x y)
|
||||
(LEAL1 [c] {s} (SHLLconst [2] x) y) -> (LEAL4 [c] {s} y x)
|
||||
(LEAL1 [c] {s} x (SHLLconst [3] y)) -> (LEAL8 [c] {s} x y)
|
||||
(LEAL1 [c] {s} (SHLLconst [3] x) y) -> (LEAL8 [c] {s} y x)
|
||||
|
||||
(LEAL2 [c] {s} x (SHLLconst [1] y)) -> (LEAL4 [c] {s} x y)
|
||||
(LEAL2 [c] {s} x (SHLLconst [2] y)) -> (LEAL8 [c] {s} x y)
|
||||
(LEAL4 [c] {s} x (SHLLconst [1] y)) -> (LEAL8 [c] {s} x y)
|
||||
|
|
@ -888,8 +865,6 @@
|
|||
// LEAL into LEAL1
|
||||
(LEAL1 [off1] {sym1} (LEAL [off2] {sym2} x) y) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB ->
|
||||
(LEAL1 [off1+off2] {mergeSym(sym1,sym2)} x y)
|
||||
(LEAL1 [off1] {sym1} x (LEAL [off2] {sym2} y)) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) && y.Op != OpSB ->
|
||||
(LEAL1 [off1+off2] {mergeSym(sym1,sym2)} x y)
|
||||
|
||||
// LEAL1 into LEAL
|
||||
(LEAL [off1] {sym1} (LEAL1 [off2] {sym2} x y)) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
|
||||
|
|
@ -1128,14 +1103,12 @@
|
|||
(CMPWconst x [0]) -> (TESTW x x)
|
||||
(CMPBconst x [0]) -> (TESTB x x)
|
||||
|
||||
// Move shifts to second argument of ORs. Helps load combining rules below.
|
||||
(ORL x:(SHLLconst _) y) && y.Op != Op386SHLLconst -> (ORL y x)
|
||||
|
||||
// Combining byte loads into larger (unaligned) loads.
|
||||
// There are many ways these combinations could occur. This is
|
||||
// designed to match the way encoding/binary.LittleEndian does it.
|
||||
(ORL x0:(MOVBload [i] {s} p mem)
|
||||
s0:(SHLLconst [8] x1:(MOVBload [i+1] {s} p mem)))
|
||||
(ORL x0:(MOVBload [i0] {s} p mem)
|
||||
s0:(SHLLconst [8] x1:(MOVBload [i1] {s} p mem)))
|
||||
&& i1 == i0+1
|
||||
&& x0.Uses == 1
|
||||
&& x1.Uses == 1
|
||||
&& s0.Uses == 1
|
||||
|
|
@ -1143,12 +1116,14 @@
|
|||
&& clobber(x0)
|
||||
&& clobber(x1)
|
||||
&& clobber(s0)
|
||||
-> @mergePoint(b,x0,x1) (MOVWload [i] {s} p mem)
|
||||
-> @mergePoint(b,x0,x1) (MOVWload [i0] {s} p mem)
|
||||
|
||||
(ORL o0:(ORL
|
||||
x0:(MOVWload [i] {s} p mem)
|
||||
s0:(SHLLconst [16] x1:(MOVBload [i+2] {s} p mem)))
|
||||
s1:(SHLLconst [24] x2:(MOVBload [i+3] {s} p mem)))
|
||||
x0:(MOVWload [i0] {s} p mem)
|
||||
s0:(SHLLconst [16] x1:(MOVBload [i2] {s} p mem)))
|
||||
s1:(SHLLconst [24] x2:(MOVBload [i3] {s} p mem)))
|
||||
&& i2 == i0+2
|
||||
&& i3 == i0+3
|
||||
&& x0.Uses == 1
|
||||
&& x1.Uses == 1
|
||||
&& x2.Uses == 1
|
||||
|
|
@ -1162,10 +1137,11 @@
|
|||
&& clobber(s0)
|
||||
&& clobber(s1)
|
||||
&& clobber(o0)
|
||||
-> @mergePoint(b,x0,x1,x2) (MOVLload [i] {s} p mem)
|
||||
-> @mergePoint(b,x0,x1,x2) (MOVLload [i0] {s} p mem)
|
||||
|
||||
(ORL x0:(MOVBloadidx1 [i] {s} p idx mem)
|
||||
s0:(SHLLconst [8] x1:(MOVBloadidx1 [i+1] {s} p idx mem)))
|
||||
(ORL x0:(MOVBloadidx1 [i0] {s} p idx mem)
|
||||
s0:(SHLLconst [8] x1:(MOVBloadidx1 [i1] {s} p idx mem)))
|
||||
&& i1==i0+1
|
||||
&& x0.Uses == 1
|
||||
&& x1.Uses == 1
|
||||
&& s0.Uses == 1
|
||||
|
|
@ -1173,12 +1149,14 @@
|
|||
&& clobber(x0)
|
||||
&& clobber(x1)
|
||||
&& clobber(s0)
|
||||
-> @mergePoint(b,x0,x1) (MOVWloadidx1 <v.Type> [i] {s} p idx mem)
|
||||
-> @mergePoint(b,x0,x1) (MOVWloadidx1 <v.Type> [i0] {s} p idx mem)
|
||||
|
||||
(ORL o0:(ORL
|
||||
x0:(MOVWloadidx1 [i] {s} p idx mem)
|
||||
s0:(SHLLconst [16] x1:(MOVBloadidx1 [i+2] {s} p idx mem)))
|
||||
s1:(SHLLconst [24] x2:(MOVBloadidx1 [i+3] {s} p idx mem)))
|
||||
x0:(MOVWloadidx1 [i0] {s} p idx mem)
|
||||
s0:(SHLLconst [16] x1:(MOVBloadidx1 [i2] {s} p idx mem)))
|
||||
s1:(SHLLconst [24] x2:(MOVBloadidx1 [i3] {s} p idx mem)))
|
||||
&& i2 == i0+2
|
||||
&& i3 == i0+3
|
||||
&& x0.Uses == 1
|
||||
&& x1.Uses == 1
|
||||
&& x2.Uses == 1
|
||||
|
|
@ -1192,7 +1170,7 @@
|
|||
&& clobber(s0)
|
||||
&& clobber(s1)
|
||||
&& clobber(o0)
|
||||
-> @mergePoint(b,x0,x1,x2) (MOVLloadidx1 <v.Type> [i] {s} p idx mem)
|
||||
-> @mergePoint(b,x0,x1,x2) (MOVLloadidx1 <v.Type> [i0] {s} p idx mem)
|
||||
|
||||
// Combine constant stores into larger (unaligned) stores.
|
||||
(MOVBstoreconst [c] {s} p x:(MOVBstoreconst [a] {s} p mem))
|
||||
|
|
|
|||
|
|
@ -193,10 +193,10 @@ func init() {
|
|||
{name: "MULL", argLength: 2, reg: gp21, asm: "IMULL", commutative: true, resultInArg0: true, clobberFlags: true}, // arg0 * arg1
|
||||
{name: "MULLconst", argLength: 1, reg: gp11, asm: "IMULL", aux: "Int32", resultInArg0: true, clobberFlags: true}, // arg0 * auxint
|
||||
|
||||
{name: "HMULL", argLength: 2, reg: gp21hmul, asm: "IMULL", clobberFlags: true}, // (arg0 * arg1) >> width
|
||||
{name: "HMULLU", argLength: 2, reg: gp21hmul, asm: "MULL", clobberFlags: true}, // (arg0 * arg1) >> width
|
||||
{name: "HMULL", argLength: 2, reg: gp21hmul, commutative: true, asm: "IMULL", clobberFlags: true}, // (arg0 * arg1) >> width
|
||||
{name: "HMULLU", argLength: 2, reg: gp21hmul, commutative: true, asm: "MULL", clobberFlags: true}, // (arg0 * arg1) >> width
|
||||
|
||||
{name: "MULLQU", argLength: 2, reg: gp21mul, asm: "MULL", clobberFlags: true}, // arg0 * arg1, high 32 in result[0], low 32 in result[1]
|
||||
{name: "MULLQU", argLength: 2, reg: gp21mul, commutative: true, asm: "MULL", clobberFlags: true}, // arg0 * arg1, high 32 in result[0], low 32 in result[1]
|
||||
|
||||
{name: "AVGLU", argLength: 2, reg: gp21, commutative: true, resultInArg0: true, clobberFlags: true}, // (arg0 + arg1) / 2 as unsigned, all 32 result bits
|
||||
|
||||
|
|
@ -229,9 +229,9 @@ func init() {
|
|||
{name: "UCOMISS", argLength: 2, reg: fp2flags, asm: "UCOMISS", typ: "Flags", usesScratch: true}, // arg0 compare to arg1, f32
|
||||
{name: "UCOMISD", argLength: 2, reg: fp2flags, asm: "UCOMISD", typ: "Flags", usesScratch: true}, // arg0 compare to arg1, f64
|
||||
|
||||
{name: "TESTL", argLength: 2, reg: gp2flags, asm: "TESTL", typ: "Flags"}, // (arg0 & arg1) compare to 0
|
||||
{name: "TESTW", argLength: 2, reg: gp2flags, asm: "TESTW", typ: "Flags"}, // (arg0 & arg1) compare to 0
|
||||
{name: "TESTB", argLength: 2, reg: gp2flags, asm: "TESTB", typ: "Flags"}, // (arg0 & arg1) compare to 0
|
||||
{name: "TESTL", argLength: 2, reg: gp2flags, commutative: true, asm: "TESTL", typ: "Flags"}, // (arg0 & arg1) compare to 0
|
||||
{name: "TESTW", argLength: 2, reg: gp2flags, commutative: true, asm: "TESTW", typ: "Flags"}, // (arg0 & arg1) compare to 0
|
||||
{name: "TESTB", argLength: 2, reg: gp2flags, commutative: true, asm: "TESTB", typ: "Flags"}, // (arg0 & arg1) compare to 0
|
||||
{name: "TESTLconst", argLength: 1, reg: gp1flags, asm: "TESTL", typ: "Flags", aux: "Int32"}, // (arg0 & auxint) compare to 0
|
||||
{name: "TESTWconst", argLength: 1, reg: gp1flags, asm: "TESTW", typ: "Flags", aux: "Int16"}, // (arg0 & auxint) compare to 0
|
||||
{name: "TESTBconst", argLength: 1, reg: gp1flags, asm: "TESTB", typ: "Flags", aux: "Int8"}, // (arg0 & auxint) compare to 0
|
||||
|
|
@ -314,7 +314,7 @@ func init() {
|
|||
{name: "PXOR", argLength: 2, reg: fp21, asm: "PXOR", commutative: true, resultInArg0: true}, // exclusive or, applied to X regs for float negation.
|
||||
|
||||
{name: "LEAL", argLength: 1, reg: gp11sb, aux: "SymOff", rematerializeable: true, symEffect: "Addr"}, // arg0 + auxint + offset encoded in aux
|
||||
{name: "LEAL1", argLength: 2, reg: gp21sb, aux: "SymOff", symEffect: "Addr"}, // arg0 + arg1 + auxint + aux
|
||||
{name: "LEAL1", argLength: 2, reg: gp21sb, commutative: true, aux: "SymOff", symEffect: "Addr"}, // arg0 + arg1 + auxint + aux
|
||||
{name: "LEAL2", argLength: 2, reg: gp21sb, aux: "SymOff", symEffect: "Addr"}, // arg0 + 2*arg1 + auxint + aux
|
||||
{name: "LEAL4", argLength: 2, reg: gp21sb, aux: "SymOff", symEffect: "Addr"}, // arg0 + 4*arg1 + auxint + aux
|
||||
{name: "LEAL8", argLength: 2, reg: gp21sb, aux: "SymOff", symEffect: "Addr"}, // arg0 + 8*arg1 + auxint + aux
|
||||
|
|
@ -331,17 +331,17 @@ func init() {
|
|||
{name: "MOVLstore", argLength: 3, reg: gpstore, asm: "MOVL", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store 4 bytes in arg1 to arg0+auxint+aux. arg2=mem
|
||||
|
||||
// indexed loads/stores
|
||||
{name: "MOVBloadidx1", argLength: 3, reg: gploadidx, asm: "MOVBLZX", aux: "SymOff", symEffect: "Read"}, // load a byte from arg0+arg1+auxint+aux. arg2=mem
|
||||
{name: "MOVWloadidx1", argLength: 3, reg: gploadidx, asm: "MOVWLZX", aux: "SymOff", symEffect: "Read"}, // load 2 bytes from arg0+arg1+auxint+aux. arg2=mem
|
||||
{name: "MOVWloadidx2", argLength: 3, reg: gploadidx, asm: "MOVWLZX", aux: "SymOff", symEffect: "Read"}, // load 2 bytes from arg0+2*arg1+auxint+aux. arg2=mem
|
||||
{name: "MOVLloadidx1", argLength: 3, reg: gploadidx, asm: "MOVL", aux: "SymOff", symEffect: "Read"}, // load 4 bytes from arg0+arg1+auxint+aux. arg2=mem
|
||||
{name: "MOVLloadidx4", argLength: 3, reg: gploadidx, asm: "MOVL", aux: "SymOff", symEffect: "Read"}, // load 4 bytes from arg0+4*arg1+auxint+aux. arg2=mem
|
||||
{name: "MOVBloadidx1", argLength: 3, reg: gploadidx, commutative: true, asm: "MOVBLZX", aux: "SymOff", symEffect: "Read"}, // load a byte from arg0+arg1+auxint+aux. arg2=mem
|
||||
{name: "MOVWloadidx1", argLength: 3, reg: gploadidx, commutative: true, asm: "MOVWLZX", aux: "SymOff", symEffect: "Read"}, // load 2 bytes from arg0+arg1+auxint+aux. arg2=mem
|
||||
{name: "MOVWloadidx2", argLength: 3, reg: gploadidx, asm: "MOVWLZX", aux: "SymOff", symEffect: "Read"}, // load 2 bytes from arg0+2*arg1+auxint+aux. arg2=mem
|
||||
{name: "MOVLloadidx1", argLength: 3, reg: gploadidx, commutative: true, asm: "MOVL", aux: "SymOff", symEffect: "Read"}, // load 4 bytes from arg0+arg1+auxint+aux. arg2=mem
|
||||
{name: "MOVLloadidx4", argLength: 3, reg: gploadidx, asm: "MOVL", aux: "SymOff", symEffect: "Read"}, // load 4 bytes from arg0+4*arg1+auxint+aux. arg2=mem
|
||||
// TODO: sign-extending indexed loads
|
||||
{name: "MOVBstoreidx1", argLength: 4, reg: gpstoreidx, asm: "MOVB", aux: "SymOff", symEffect: "Write"}, // store byte in arg2 to arg0+arg1+auxint+aux. arg3=mem
|
||||
{name: "MOVWstoreidx1", argLength: 4, reg: gpstoreidx, asm: "MOVW", aux: "SymOff", symEffect: "Write"}, // store 2 bytes in arg2 to arg0+arg1+auxint+aux. arg3=mem
|
||||
{name: "MOVWstoreidx2", argLength: 4, reg: gpstoreidx, asm: "MOVW", aux: "SymOff", symEffect: "Write"}, // store 2 bytes in arg2 to arg0+2*arg1+auxint+aux. arg3=mem
|
||||
{name: "MOVLstoreidx1", argLength: 4, reg: gpstoreidx, asm: "MOVL", aux: "SymOff", symEffect: "Write"}, // store 4 bytes in arg2 to arg0+arg1+auxint+aux. arg3=mem
|
||||
{name: "MOVLstoreidx4", argLength: 4, reg: gpstoreidx, asm: "MOVL", aux: "SymOff", symEffect: "Write"}, // store 4 bytes in arg2 to arg0+4*arg1+auxint+aux. arg3=mem
|
||||
{name: "MOVBstoreidx1", argLength: 4, reg: gpstoreidx, commutative: true, asm: "MOVB", aux: "SymOff", symEffect: "Write"}, // store byte in arg2 to arg0+arg1+auxint+aux. arg3=mem
|
||||
{name: "MOVWstoreidx1", argLength: 4, reg: gpstoreidx, commutative: true, asm: "MOVW", aux: "SymOff", symEffect: "Write"}, // store 2 bytes in arg2 to arg0+arg1+auxint+aux. arg3=mem
|
||||
{name: "MOVWstoreidx2", argLength: 4, reg: gpstoreidx, asm: "MOVW", aux: "SymOff", symEffect: "Write"}, // store 2 bytes in arg2 to arg0+2*arg1+auxint+aux. arg3=mem
|
||||
{name: "MOVLstoreidx1", argLength: 4, reg: gpstoreidx, commutative: true, asm: "MOVL", aux: "SymOff", symEffect: "Write"}, // store 4 bytes in arg2 to arg0+arg1+auxint+aux. arg3=mem
|
||||
{name: "MOVLstoreidx4", argLength: 4, reg: gpstoreidx, asm: "MOVL", aux: "SymOff", symEffect: "Write"}, // store 4 bytes in arg2 to arg0+4*arg1+auxint+aux. arg3=mem
|
||||
// TODO: add size-mismatched indexed loads, like MOVBstoreidx4.
|
||||
|
||||
// For storeconst ops, the AuxInt field encodes both
|
||||
|
|
|
|||
File diff suppressed because it is too large
Load Diff
|
|
@ -202,10 +202,10 @@ func init() {
|
|||
{name: "MULQconst", argLength: 1, reg: gp11, asm: "IMULQ", aux: "Int64", resultInArg0: true, clobberFlags: true}, // arg0 * auxint
|
||||
{name: "MULLconst", argLength: 1, reg: gp11, asm: "IMULL", aux: "Int32", resultInArg0: true, clobberFlags: true}, // arg0 * auxint
|
||||
|
||||
{name: "HMULQ", argLength: 2, reg: gp21hmul, asm: "IMULQ", clobberFlags: true}, // (arg0 * arg1) >> width
|
||||
{name: "HMULL", argLength: 2, reg: gp21hmul, asm: "IMULL", clobberFlags: true}, // (arg0 * arg1) >> width
|
||||
{name: "HMULQU", argLength: 2, reg: gp21hmul, asm: "MULQ", clobberFlags: true}, // (arg0 * arg1) >> width
|
||||
{name: "HMULLU", argLength: 2, reg: gp21hmul, asm: "MULL", clobberFlags: true}, // (arg0 * arg1) >> width
|
||||
{name: "HMULQ", argLength: 2, reg: gp21hmul, commutative: true, asm: "IMULQ", clobberFlags: true}, // (arg0 * arg1) >> width
|
||||
{name: "HMULL", argLength: 2, reg: gp21hmul, commutative: true, asm: "IMULL", clobberFlags: true}, // (arg0 * arg1) >> width
|
||||
{name: "HMULQU", argLength: 2, reg: gp21hmul, commutative: true, asm: "MULQ", clobberFlags: true}, // (arg0 * arg1) >> width
|
||||
{name: "HMULLU", argLength: 2, reg: gp21hmul, commutative: true, asm: "MULL", clobberFlags: true}, // (arg0 * arg1) >> width
|
||||
|
||||
{name: "AVGQU", argLength: 2, reg: gp21, commutative: true, resultInArg0: true, clobberFlags: true}, // (arg0 + arg1) / 2 as unsigned, all 64 result bits
|
||||
|
||||
|
|
@ -216,8 +216,8 @@ func init() {
|
|||
{name: "DIVLU", argLength: 2, reg: gp11div, typ: "(UInt32,UInt32)", asm: "DIVL", clobberFlags: true}, // [arg0 / arg1, arg0 % arg1]
|
||||
{name: "DIVWU", argLength: 2, reg: gp11div, typ: "(UInt16,UInt16)", asm: "DIVW", clobberFlags: true}, // [arg0 / arg1, arg0 % arg1]
|
||||
|
||||
{name: "MULQU2", argLength: 2, reg: regInfo{inputs: []regMask{ax, gpsp}, outputs: []regMask{dx, ax}}, asm: "MULQ", clobberFlags: true}, // arg0 * arg1, returns (hi, lo)
|
||||
{name: "DIVQU2", argLength: 3, reg: regInfo{inputs: []regMask{dx, ax, gpsp}, outputs: []regMask{ax, dx}}, asm: "DIVQ", clobberFlags: true}, // arg0:arg1 / arg2 (128-bit divided by 64-bit), returns (q, r)
|
||||
{name: "MULQU2", argLength: 2, reg: regInfo{inputs: []regMask{ax, gpsp}, outputs: []regMask{dx, ax}}, commutative: true, asm: "MULQ", clobberFlags: true}, // arg0 * arg1, returns (hi, lo)
|
||||
{name: "DIVQU2", argLength: 3, reg: regInfo{inputs: []regMask{dx, ax, gpsp}, outputs: []regMask{ax, dx}}, asm: "DIVQ", clobberFlags: true}, // arg0:arg1 / arg2 (128-bit divided by 64-bit), returns (q, r)
|
||||
|
||||
{name: "ANDQ", argLength: 2, reg: gp21, asm: "ANDQ", commutative: true, resultInArg0: true, clobberFlags: true}, // arg0 & arg1
|
||||
{name: "ANDL", argLength: 2, reg: gp21, asm: "ANDL", commutative: true, resultInArg0: true, clobberFlags: true}, // arg0 & arg1
|
||||
|
|
@ -251,43 +251,43 @@ func init() {
|
|||
{name: "BTLconst", argLength: 1, reg: gp1flags, asm: "BTL", typ: "Flags", aux: "Int8"}, // test whether bit auxint in arg0 is set, 0 <= auxint < 32
|
||||
{name: "BTQconst", argLength: 1, reg: gp1flags, asm: "BTQ", typ: "Flags", aux: "Int8"}, // test whether bit auxint in arg0 is set, 0 <= auxint < 64
|
||||
|
||||
{name: "TESTQ", argLength: 2, reg: gp2flags, asm: "TESTQ", typ: "Flags"}, // (arg0 & arg1) compare to 0
|
||||
{name: "TESTL", argLength: 2, reg: gp2flags, asm: "TESTL", typ: "Flags"}, // (arg0 & arg1) compare to 0
|
||||
{name: "TESTW", argLength: 2, reg: gp2flags, asm: "TESTW", typ: "Flags"}, // (arg0 & arg1) compare to 0
|
||||
{name: "TESTB", argLength: 2, reg: gp2flags, asm: "TESTB", typ: "Flags"}, // (arg0 & arg1) compare to 0
|
||||
{name: "TESTQ", argLength: 2, reg: gp2flags, commutative: true, asm: "TESTQ", typ: "Flags"}, // (arg0 & arg1) compare to 0
|
||||
{name: "TESTL", argLength: 2, reg: gp2flags, commutative: true, asm: "TESTL", typ: "Flags"}, // (arg0 & arg1) compare to 0
|
||||
{name: "TESTW", argLength: 2, reg: gp2flags, commutative: true, asm: "TESTW", typ: "Flags"}, // (arg0 & arg1) compare to 0
|
||||
{name: "TESTB", argLength: 2, reg: gp2flags, commutative: true, asm: "TESTB", typ: "Flags"}, // (arg0 & arg1) compare to 0
|
||||
{name: "TESTQconst", argLength: 1, reg: gp1flags, asm: "TESTQ", typ: "Flags", aux: "Int64"}, // (arg0 & auxint) compare to 0
|
||||
{name: "TESTLconst", argLength: 1, reg: gp1flags, asm: "TESTL", typ: "Flags", aux: "Int32"}, // (arg0 & auxint) compare to 0
|
||||
{name: "TESTWconst", argLength: 1, reg: gp1flags, asm: "TESTW", typ: "Flags", aux: "Int16"}, // (arg0 & auxint) compare to 0
|
||||
{name: "TESTBconst", argLength: 1, reg: gp1flags, asm: "TESTB", typ: "Flags", aux: "Int8"}, // (arg0 & auxint) compare to 0
|
||||
|
||||
{name: "SHLQ", argLength: 2, reg: gp21shift, asm: "SHLQ", resultInArg0: true, clobberFlags: true}, // arg0 << arg1, shift amount is mod 64
|
||||
{name: "SHLL", argLength: 2, reg: gp21shift, asm: "SHLL", resultInArg0: true, clobberFlags: true}, // arg0 << arg1, shift amount is mod 32
|
||||
{name: "SHLQconst", argLength: 1, reg: gp11, asm: "SHLQ", aux: "Int64", resultInArg0: true, clobberFlags: true}, // arg0 << auxint, shift amount 0-63
|
||||
{name: "SHLLconst", argLength: 1, reg: gp11, asm: "SHLL", aux: "Int32", resultInArg0: true, clobberFlags: true}, // arg0 << auxint, shift amount 0-31
|
||||
{name: "SHLQ", argLength: 2, reg: gp21shift, asm: "SHLQ", resultInArg0: true, clobberFlags: true}, // arg0 << arg1, shift amount is mod 64
|
||||
{name: "SHLL", argLength: 2, reg: gp21shift, asm: "SHLL", resultInArg0: true, clobberFlags: true}, // arg0 << arg1, shift amount is mod 32
|
||||
{name: "SHLQconst", argLength: 1, reg: gp11, asm: "SHLQ", aux: "Int8", resultInArg0: true, clobberFlags: true}, // arg0 << auxint, shift amount 0-63
|
||||
{name: "SHLLconst", argLength: 1, reg: gp11, asm: "SHLL", aux: "Int8", resultInArg0: true, clobberFlags: true}, // arg0 << auxint, shift amount 0-31
|
||||
// Note: x86 is weird, the 16 and 8 byte shifts still use all 5 bits of shift amount!
|
||||
|
||||
{name: "SHRQ", argLength: 2, reg: gp21shift, asm: "SHRQ", resultInArg0: true, clobberFlags: true}, // unsigned arg0 >> arg1, shift amount is mod 64
|
||||
{name: "SHRL", argLength: 2, reg: gp21shift, asm: "SHRL", resultInArg0: true, clobberFlags: true}, // unsigned arg0 >> arg1, shift amount is mod 32
|
||||
{name: "SHRW", argLength: 2, reg: gp21shift, asm: "SHRW", resultInArg0: true, clobberFlags: true}, // unsigned arg0 >> arg1, shift amount is mod 32
|
||||
{name: "SHRB", argLength: 2, reg: gp21shift, asm: "SHRB", resultInArg0: true, clobberFlags: true}, // unsigned arg0 >> arg1, shift amount is mod 32
|
||||
{name: "SHRQconst", argLength: 1, reg: gp11, asm: "SHRQ", aux: "Int64", resultInArg0: true, clobberFlags: true}, // unsigned arg0 >> auxint, shift amount 0-63
|
||||
{name: "SHRLconst", argLength: 1, reg: gp11, asm: "SHRL", aux: "Int32", resultInArg0: true, clobberFlags: true}, // unsigned arg0 >> auxint, shift amount 0-31
|
||||
{name: "SHRWconst", argLength: 1, reg: gp11, asm: "SHRW", aux: "Int16", resultInArg0: true, clobberFlags: true}, // unsigned arg0 >> auxint, shift amount 0-15
|
||||
{name: "SHRBconst", argLength: 1, reg: gp11, asm: "SHRB", aux: "Int8", resultInArg0: true, clobberFlags: true}, // unsigned arg0 >> auxint, shift amount 0-7
|
||||
{name: "SHRQ", argLength: 2, reg: gp21shift, asm: "SHRQ", resultInArg0: true, clobberFlags: true}, // unsigned arg0 >> arg1, shift amount is mod 64
|
||||
{name: "SHRL", argLength: 2, reg: gp21shift, asm: "SHRL", resultInArg0: true, clobberFlags: true}, // unsigned arg0 >> arg1, shift amount is mod 32
|
||||
{name: "SHRW", argLength: 2, reg: gp21shift, asm: "SHRW", resultInArg0: true, clobberFlags: true}, // unsigned arg0 >> arg1, shift amount is mod 32
|
||||
{name: "SHRB", argLength: 2, reg: gp21shift, asm: "SHRB", resultInArg0: true, clobberFlags: true}, // unsigned arg0 >> arg1, shift amount is mod 32
|
||||
{name: "SHRQconst", argLength: 1, reg: gp11, asm: "SHRQ", aux: "Int8", resultInArg0: true, clobberFlags: true}, // unsigned arg0 >> auxint, shift amount 0-63
|
||||
{name: "SHRLconst", argLength: 1, reg: gp11, asm: "SHRL", aux: "Int8", resultInArg0: true, clobberFlags: true}, // unsigned arg0 >> auxint, shift amount 0-31
|
||||
{name: "SHRWconst", argLength: 1, reg: gp11, asm: "SHRW", aux: "Int8", resultInArg0: true, clobberFlags: true}, // unsigned arg0 >> auxint, shift amount 0-15
|
||||
{name: "SHRBconst", argLength: 1, reg: gp11, asm: "SHRB", aux: "Int8", resultInArg0: true, clobberFlags: true}, // unsigned arg0 >> auxint, shift amount 0-7
|
||||
|
||||
{name: "SARQ", argLength: 2, reg: gp21shift, asm: "SARQ", resultInArg0: true, clobberFlags: true}, // signed arg0 >> arg1, shift amount is mod 64
|
||||
{name: "SARL", argLength: 2, reg: gp21shift, asm: "SARL", resultInArg0: true, clobberFlags: true}, // signed arg0 >> arg1, shift amount is mod 32
|
||||
{name: "SARW", argLength: 2, reg: gp21shift, asm: "SARW", resultInArg0: true, clobberFlags: true}, // signed arg0 >> arg1, shift amount is mod 32
|
||||
{name: "SARB", argLength: 2, reg: gp21shift, asm: "SARB", resultInArg0: true, clobberFlags: true}, // signed arg0 >> arg1, shift amount is mod 32
|
||||
{name: "SARQconst", argLength: 1, reg: gp11, asm: "SARQ", aux: "Int64", resultInArg0: true, clobberFlags: true}, // signed arg0 >> auxint, shift amount 0-63
|
||||
{name: "SARLconst", argLength: 1, reg: gp11, asm: "SARL", aux: "Int32", resultInArg0: true, clobberFlags: true}, // signed arg0 >> auxint, shift amount 0-31
|
||||
{name: "SARWconst", argLength: 1, reg: gp11, asm: "SARW", aux: "Int16", resultInArg0: true, clobberFlags: true}, // signed arg0 >> auxint, shift amount 0-15
|
||||
{name: "SARBconst", argLength: 1, reg: gp11, asm: "SARB", aux: "Int8", resultInArg0: true, clobberFlags: true}, // signed arg0 >> auxint, shift amount 0-7
|
||||
{name: "SARQ", argLength: 2, reg: gp21shift, asm: "SARQ", resultInArg0: true, clobberFlags: true}, // signed arg0 >> arg1, shift amount is mod 64
|
||||
{name: "SARL", argLength: 2, reg: gp21shift, asm: "SARL", resultInArg0: true, clobberFlags: true}, // signed arg0 >> arg1, shift amount is mod 32
|
||||
{name: "SARW", argLength: 2, reg: gp21shift, asm: "SARW", resultInArg0: true, clobberFlags: true}, // signed arg0 >> arg1, shift amount is mod 32
|
||||
{name: "SARB", argLength: 2, reg: gp21shift, asm: "SARB", resultInArg0: true, clobberFlags: true}, // signed arg0 >> arg1, shift amount is mod 32
|
||||
{name: "SARQconst", argLength: 1, reg: gp11, asm: "SARQ", aux: "Int8", resultInArg0: true, clobberFlags: true}, // signed arg0 >> auxint, shift amount 0-63
|
||||
{name: "SARLconst", argLength: 1, reg: gp11, asm: "SARL", aux: "Int8", resultInArg0: true, clobberFlags: true}, // signed arg0 >> auxint, shift amount 0-31
|
||||
{name: "SARWconst", argLength: 1, reg: gp11, asm: "SARW", aux: "Int8", resultInArg0: true, clobberFlags: true}, // signed arg0 >> auxint, shift amount 0-15
|
||||
{name: "SARBconst", argLength: 1, reg: gp11, asm: "SARB", aux: "Int8", resultInArg0: true, clobberFlags: true}, // signed arg0 >> auxint, shift amount 0-7
|
||||
|
||||
{name: "ROLQconst", argLength: 1, reg: gp11, asm: "ROLQ", aux: "Int64", resultInArg0: true, clobberFlags: true}, // arg0 rotate left auxint, rotate amount 0-63
|
||||
{name: "ROLLconst", argLength: 1, reg: gp11, asm: "ROLL", aux: "Int32", resultInArg0: true, clobberFlags: true}, // arg0 rotate left auxint, rotate amount 0-31
|
||||
{name: "ROLWconst", argLength: 1, reg: gp11, asm: "ROLW", aux: "Int16", resultInArg0: true, clobberFlags: true}, // arg0 rotate left auxint, rotate amount 0-15
|
||||
{name: "ROLBconst", argLength: 1, reg: gp11, asm: "ROLB", aux: "Int8", resultInArg0: true, clobberFlags: true}, // arg0 rotate left auxint, rotate amount 0-7
|
||||
{name: "ROLQconst", argLength: 1, reg: gp11, asm: "ROLQ", aux: "Int8", resultInArg0: true, clobberFlags: true}, // arg0 rotate left auxint, rotate amount 0-63
|
||||
{name: "ROLLconst", argLength: 1, reg: gp11, asm: "ROLL", aux: "Int8", resultInArg0: true, clobberFlags: true}, // arg0 rotate left auxint, rotate amount 0-31
|
||||
{name: "ROLWconst", argLength: 1, reg: gp11, asm: "ROLW", aux: "Int8", resultInArg0: true, clobberFlags: true}, // arg0 rotate left auxint, rotate amount 0-15
|
||||
{name: "ROLBconst", argLength: 1, reg: gp11, asm: "ROLB", aux: "Int8", resultInArg0: true, clobberFlags: true}, // arg0 rotate left auxint, rotate amount 0-7
|
||||
|
||||
{name: "ADDLmem", argLength: 3, reg: gp21load, asm: "ADDL", aux: "SymOff", resultInArg0: true, clobberFlags: true, faultOnNilArg1: true, symEffect: "Read"}, // arg0 + tmp, tmp loaded from arg1+auxint+aux, arg2 = mem
|
||||
{name: "ADDQmem", argLength: 3, reg: gp21load, asm: "ADDQ", aux: "SymOff", resultInArg0: true, clobberFlags: true, faultOnNilArg1: true, symEffect: "Read"}, // arg0 + tmp, tmp loaded from arg1+auxint+aux, arg2 = mem
|
||||
|
|
@ -374,7 +374,7 @@ func init() {
|
|||
{name: "PXOR", argLength: 2, reg: fp21, asm: "PXOR", commutative: true, resultInArg0: true}, // exclusive or, applied to X regs for float negation.
|
||||
|
||||
{name: "LEAQ", argLength: 1, reg: gp11sb, asm: "LEAQ", aux: "SymOff", rematerializeable: true, symEffect: "Addr"}, // arg0 + auxint + offset encoded in aux
|
||||
{name: "LEAQ1", argLength: 2, reg: gp21sb, aux: "SymOff", symEffect: "Addr"}, // arg0 + arg1 + auxint + aux
|
||||
{name: "LEAQ1", argLength: 2, reg: gp21sb, commutative: true, aux: "SymOff", symEffect: "Addr"}, // arg0 + arg1 + auxint + aux
|
||||
{name: "LEAQ2", argLength: 2, reg: gp21sb, aux: "SymOff", symEffect: "Addr"}, // arg0 + 2*arg1 + auxint + aux
|
||||
{name: "LEAQ4", argLength: 2, reg: gp21sb, aux: "SymOff", symEffect: "Addr"}, // arg0 + 4*arg1 + auxint + aux
|
||||
{name: "LEAQ8", argLength: 2, reg: gp21sb, aux: "SymOff", symEffect: "Addr"}, // arg0 + 8*arg1 + auxint + aux
|
||||
|
|
@ -398,21 +398,21 @@ func init() {
|
|||
{name: "MOVOstore", argLength: 3, reg: fpstore, asm: "MOVUPS", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store 16 bytes in arg1 to arg0+auxint+aux. arg2=mem
|
||||
|
||||
// indexed loads/stores
|
||||
{name: "MOVBloadidx1", argLength: 3, reg: gploadidx, asm: "MOVBLZX", aux: "SymOff", symEffect: "Read"}, // load a byte from arg0+arg1+auxint+aux. arg2=mem
|
||||
{name: "MOVWloadidx1", argLength: 3, reg: gploadidx, asm: "MOVWLZX", aux: "SymOff", symEffect: "Read"}, // load 2 bytes from arg0+arg1+auxint+aux. arg2=mem
|
||||
{name: "MOVWloadidx2", argLength: 3, reg: gploadidx, asm: "MOVWLZX", aux: "SymOff", symEffect: "Read"}, // load 2 bytes from arg0+2*arg1+auxint+aux. arg2=mem
|
||||
{name: "MOVLloadidx1", argLength: 3, reg: gploadidx, asm: "MOVL", aux: "SymOff", symEffect: "Read"}, // load 4 bytes from arg0+arg1+auxint+aux. arg2=mem
|
||||
{name: "MOVLloadidx4", argLength: 3, reg: gploadidx, asm: "MOVL", aux: "SymOff", symEffect: "Read"}, // load 4 bytes from arg0+4*arg1+auxint+aux. arg2=mem
|
||||
{name: "MOVQloadidx1", argLength: 3, reg: gploadidx, asm: "MOVQ", aux: "SymOff", symEffect: "Read"}, // load 8 bytes from arg0+arg1+auxint+aux. arg2=mem
|
||||
{name: "MOVQloadidx8", argLength: 3, reg: gploadidx, asm: "MOVQ", aux: "SymOff", symEffect: "Read"}, // load 8 bytes from arg0+8*arg1+auxint+aux. arg2=mem
|
||||
{name: "MOVBloadidx1", argLength: 3, reg: gploadidx, commutative: true, asm: "MOVBLZX", aux: "SymOff", typ: "UInt8", symEffect: "Read"}, // load a byte from arg0+arg1+auxint+aux. arg2=mem
|
||||
{name: "MOVWloadidx1", argLength: 3, reg: gploadidx, commutative: true, asm: "MOVWLZX", aux: "SymOff", typ: "UInt16", symEffect: "Read"}, // load 2 bytes from arg0+arg1+auxint+aux. arg2=mem
|
||||
{name: "MOVWloadidx2", argLength: 3, reg: gploadidx, asm: "MOVWLZX", aux: "SymOff", typ: "UInt16", symEffect: "Read"}, // load 2 bytes from arg0+2*arg1+auxint+aux. arg2=mem
|
||||
{name: "MOVLloadidx1", argLength: 3, reg: gploadidx, commutative: true, asm: "MOVL", aux: "SymOff", typ: "UInt32", symEffect: "Read"}, // load 4 bytes from arg0+arg1+auxint+aux. arg2=mem
|
||||
{name: "MOVLloadidx4", argLength: 3, reg: gploadidx, asm: "MOVL", aux: "SymOff", typ: "UInt32", symEffect: "Read"}, // load 4 bytes from arg0+4*arg1+auxint+aux. arg2=mem
|
||||
{name: "MOVQloadidx1", argLength: 3, reg: gploadidx, commutative: true, asm: "MOVQ", aux: "SymOff", typ: "UInt64", symEffect: "Read"}, // load 8 bytes from arg0+arg1+auxint+aux. arg2=mem
|
||||
{name: "MOVQloadidx8", argLength: 3, reg: gploadidx, asm: "MOVQ", aux: "SymOff", typ: "UInt64", symEffect: "Read"}, // load 8 bytes from arg0+8*arg1+auxint+aux. arg2=mem
|
||||
// TODO: sign-extending indexed loads
|
||||
{name: "MOVBstoreidx1", argLength: 4, reg: gpstoreidx, asm: "MOVB", aux: "SymOff", symEffect: "Write"}, // store byte in arg2 to arg0+arg1+auxint+aux. arg3=mem
|
||||
{name: "MOVWstoreidx1", argLength: 4, reg: gpstoreidx, asm: "MOVW", aux: "SymOff", symEffect: "Write"}, // store 2 bytes in arg2 to arg0+arg1+auxint+aux. arg3=mem
|
||||
{name: "MOVWstoreidx2", argLength: 4, reg: gpstoreidx, asm: "MOVW", aux: "SymOff", symEffect: "Write"}, // store 2 bytes in arg2 to arg0+2*arg1+auxint+aux. arg3=mem
|
||||
{name: "MOVLstoreidx1", argLength: 4, reg: gpstoreidx, asm: "MOVL", aux: "SymOff", symEffect: "Write"}, // store 4 bytes in arg2 to arg0+arg1+auxint+aux. arg3=mem
|
||||
{name: "MOVLstoreidx4", argLength: 4, reg: gpstoreidx, asm: "MOVL", aux: "SymOff", symEffect: "Write"}, // store 4 bytes in arg2 to arg0+4*arg1+auxint+aux. arg3=mem
|
||||
{name: "MOVQstoreidx1", argLength: 4, reg: gpstoreidx, asm: "MOVQ", aux: "SymOff", symEffect: "Write"}, // store 8 bytes in arg2 to arg0+arg1+auxint+aux. arg3=mem
|
||||
{name: "MOVQstoreidx8", argLength: 4, reg: gpstoreidx, asm: "MOVQ", aux: "SymOff", symEffect: "Write"}, // store 8 bytes in arg2 to arg0+8*arg1+auxint+aux. arg3=mem
|
||||
{name: "MOVBstoreidx1", argLength: 4, reg: gpstoreidx, commutative: true, asm: "MOVB", aux: "SymOff", symEffect: "Write"}, // store byte in arg2 to arg0+arg1+auxint+aux. arg3=mem
|
||||
{name: "MOVWstoreidx1", argLength: 4, reg: gpstoreidx, commutative: true, asm: "MOVW", aux: "SymOff", symEffect: "Write"}, // store 2 bytes in arg2 to arg0+arg1+auxint+aux. arg3=mem
|
||||
{name: "MOVWstoreidx2", argLength: 4, reg: gpstoreidx, asm: "MOVW", aux: "SymOff", symEffect: "Write"}, // store 2 bytes in arg2 to arg0+2*arg1+auxint+aux. arg3=mem
|
||||
{name: "MOVLstoreidx1", argLength: 4, reg: gpstoreidx, commutative: true, asm: "MOVL", aux: "SymOff", symEffect: "Write"}, // store 4 bytes in arg2 to arg0+arg1+auxint+aux. arg3=mem
|
||||
{name: "MOVLstoreidx4", argLength: 4, reg: gpstoreidx, asm: "MOVL", aux: "SymOff", symEffect: "Write"}, // store 4 bytes in arg2 to arg0+4*arg1+auxint+aux. arg3=mem
|
||||
{name: "MOVQstoreidx1", argLength: 4, reg: gpstoreidx, commutative: true, asm: "MOVQ", aux: "SymOff", symEffect: "Write"}, // store 8 bytes in arg2 to arg0+arg1+auxint+aux. arg3=mem
|
||||
{name: "MOVQstoreidx8", argLength: 4, reg: gpstoreidx, asm: "MOVQ", aux: "SymOff", symEffect: "Write"}, // store 8 bytes in arg2 to arg0+8*arg1+auxint+aux. arg3=mem
|
||||
// TODO: add size-mismatched indexed loads, like MOVBstoreidx4.
|
||||
|
||||
// For storeconst ops, the AuxInt field encodes both
|
||||
|
|
|
|||
|
|
@ -481,16 +481,13 @@
|
|||
(MOVWloadshiftRA ptr idx [c] (MOVWstoreshiftRA ptr2 idx [d] x _)) && c==d && isSamePtr(ptr, ptr2) -> x
|
||||
|
||||
// fold constant into arithmatic ops
|
||||
(ADD (MOVWconst [c]) x) -> (ADDconst [c] x)
|
||||
(ADD x (MOVWconst [c])) -> (ADDconst [c] x)
|
||||
(SUB (MOVWconst [c]) x) -> (RSBconst [c] x)
|
||||
(SUB x (MOVWconst [c])) -> (SUBconst [c] x)
|
||||
(RSB (MOVWconst [c]) x) -> (SUBconst [c] x)
|
||||
(RSB x (MOVWconst [c])) -> (RSBconst [c] x)
|
||||
|
||||
(ADDS (MOVWconst [c]) x) -> (ADDSconst [c] x)
|
||||
(ADDS x (MOVWconst [c])) -> (ADDSconst [c] x)
|
||||
(SUBS (MOVWconst [c]) x) -> (RSBSconst [c] x)
|
||||
(SUBS x (MOVWconst [c])) -> (SUBSconst [c] x)
|
||||
|
||||
(ADC (MOVWconst [c]) x flags) -> (ADCconst [c] x flags)
|
||||
|
|
@ -498,11 +495,8 @@
|
|||
(SBC (MOVWconst [c]) x flags) -> (RSCconst [c] x flags)
|
||||
(SBC x (MOVWconst [c]) flags) -> (SBCconst [c] x flags)
|
||||
|
||||
(AND (MOVWconst [c]) x) -> (ANDconst [c] x)
|
||||
(AND x (MOVWconst [c])) -> (ANDconst [c] x)
|
||||
(OR (MOVWconst [c]) x) -> (ORconst [c] x)
|
||||
(OR x (MOVWconst [c])) -> (ORconst [c] x)
|
||||
(XOR (MOVWconst [c]) x) -> (XORconst [c] x)
|
||||
(OR x (MOVWconst [c])) -> (ORconst [c] x)
|
||||
(XOR x (MOVWconst [c])) -> (XORconst [c] x)
|
||||
(BIC x (MOVWconst [c])) -> (BICconst [c] x)
|
||||
|
||||
|
|
@ -562,17 +556,6 @@
|
|||
(MUL x (MOVWconst [c])) && c%7 == 0 && isPowerOfTwo(c/7) && is32Bit(c) -> (SLLconst [log2(c/7)] (RSBshiftLL <x.Type> x x [3]))
|
||||
(MUL x (MOVWconst [c])) && c%9 == 0 && isPowerOfTwo(c/9) && is32Bit(c) -> (SLLconst [log2(c/9)] (ADDshiftLL <x.Type> x x [3]))
|
||||
|
||||
(MUL (MOVWconst [c]) x) && int32(c) == -1 -> (RSBconst [0] x)
|
||||
(MUL (MOVWconst [0]) _) -> (MOVWconst [0])
|
||||
(MUL (MOVWconst [1]) x) -> x
|
||||
(MUL (MOVWconst [c]) x) && isPowerOfTwo(c) -> (SLLconst [log2(c)] x)
|
||||
(MUL (MOVWconst [c]) x) && isPowerOfTwo(c-1) && int32(c) >= 3 -> (ADDshiftLL x x [log2(c-1)])
|
||||
(MUL (MOVWconst [c]) x) && isPowerOfTwo(c+1) && int32(c) >= 7 -> (RSBshiftLL x x [log2(c+1)])
|
||||
(MUL (MOVWconst [c]) x) && c%3 == 0 && isPowerOfTwo(c/3) && is32Bit(c) -> (SLLconst [log2(c/3)] (ADDshiftLL <x.Type> x x [1]))
|
||||
(MUL (MOVWconst [c]) x) && c%5 == 0 && isPowerOfTwo(c/5) && is32Bit(c) -> (SLLconst [log2(c/5)] (ADDshiftLL <x.Type> x x [2]))
|
||||
(MUL (MOVWconst [c]) x) && c%7 == 0 && isPowerOfTwo(c/7) && is32Bit(c) -> (SLLconst [log2(c/7)] (RSBshiftLL <x.Type> x x [3]))
|
||||
(MUL (MOVWconst [c]) x) && c%9 == 0 && isPowerOfTwo(c/9) && is32Bit(c) -> (SLLconst [log2(c/9)] (ADDshiftLL <x.Type> x x [3]))
|
||||
|
||||
(MULA x (MOVWconst [c]) a) && int32(c) == -1 -> (SUB a x)
|
||||
(MULA _ (MOVWconst [0]) a) -> a
|
||||
(MULA x (MOVWconst [1]) a) -> (ADD x a)
|
||||
|
|
@ -835,17 +818,11 @@
|
|||
|
||||
// absorb shifts into ops
|
||||
(ADD x (SLLconst [c] y)) -> (ADDshiftLL x y [c])
|
||||
(ADD (SLLconst [c] y) x) -> (ADDshiftLL x y [c])
|
||||
(ADD x (SRLconst [c] y)) -> (ADDshiftRL x y [c])
|
||||
(ADD (SRLconst [c] y) x) -> (ADDshiftRL x y [c])
|
||||
(ADD x (SRAconst [c] y)) -> (ADDshiftRA x y [c])
|
||||
(ADD (SRAconst [c] y) x) -> (ADDshiftRA x y [c])
|
||||
(ADD x (SLL y z)) -> (ADDshiftLLreg x y z)
|
||||
(ADD (SLL y z) x) -> (ADDshiftLLreg x y z)
|
||||
(ADD x (SRL y z)) -> (ADDshiftRLreg x y z)
|
||||
(ADD (SRL y z) x) -> (ADDshiftRLreg x y z)
|
||||
(ADD x (SRA y z)) -> (ADDshiftRAreg x y z)
|
||||
(ADD (SRA y z) x) -> (ADDshiftRAreg x y z)
|
||||
(ADC x (SLLconst [c] y) flags) -> (ADCshiftLL x y [c] flags)
|
||||
(ADC (SLLconst [c] y) x flags) -> (ADCshiftLL x y [c] flags)
|
||||
(ADC x (SRLconst [c] y) flags) -> (ADCshiftRL x y [c] flags)
|
||||
|
|
@ -859,17 +836,11 @@
|
|||
(ADC x (SRA y z) flags) -> (ADCshiftRAreg x y z flags)
|
||||
(ADC (SRA y z) x flags) -> (ADCshiftRAreg x y z flags)
|
||||
(ADDS x (SLLconst [c] y)) -> (ADDSshiftLL x y [c])
|
||||
(ADDS (SLLconst [c] y) x) -> (ADDSshiftLL x y [c])
|
||||
(ADDS x (SRLconst [c] y)) -> (ADDSshiftRL x y [c])
|
||||
(ADDS (SRLconst [c] y) x) -> (ADDSshiftRL x y [c])
|
||||
(ADDS x (SRAconst [c] y)) -> (ADDSshiftRA x y [c])
|
||||
(ADDS (SRAconst [c] y) x) -> (ADDSshiftRA x y [c])
|
||||
(ADDS x (SLL y z)) -> (ADDSshiftLLreg x y z)
|
||||
(ADDS (SLL y z) x) -> (ADDSshiftLLreg x y z)
|
||||
(ADDS x (SRL y z)) -> (ADDSshiftRLreg x y z)
|
||||
(ADDS (SRL y z) x) -> (ADDSshiftRLreg x y z)
|
||||
(ADDS x (SRA y z)) -> (ADDSshiftRAreg x y z)
|
||||
(ADDS (SRA y z) x) -> (ADDSshiftRAreg x y z)
|
||||
(SUB x (SLLconst [c] y)) -> (SUBshiftLL x y [c])
|
||||
(SUB (SLLconst [c] y) x) -> (RSBshiftLL x y [c])
|
||||
(SUB x (SRLconst [c] y)) -> (SUBshiftRL x y [c])
|
||||
|
|
@ -919,43 +890,24 @@
|
|||
(RSB x (SRA y z)) -> (RSBshiftRAreg x y z)
|
||||
(RSB (SRA y z) x) -> (SUBshiftRAreg x y z)
|
||||
(AND x (SLLconst [c] y)) -> (ANDshiftLL x y [c])
|
||||
(AND (SLLconst [c] y) x) -> (ANDshiftLL x y [c])
|
||||
(AND x (SRLconst [c] y)) -> (ANDshiftRL x y [c])
|
||||
(AND (SRLconst [c] y) x) -> (ANDshiftRL x y [c])
|
||||
(AND x (SRAconst [c] y)) -> (ANDshiftRA x y [c])
|
||||
(AND (SRAconst [c] y) x) -> (ANDshiftRA x y [c])
|
||||
(AND x (SLL y z)) -> (ANDshiftLLreg x y z)
|
||||
(AND (SLL y z) x) -> (ANDshiftLLreg x y z)
|
||||
(AND x (SRL y z)) -> (ANDshiftRLreg x y z)
|
||||
(AND (SRL y z) x) -> (ANDshiftRLreg x y z)
|
||||
(AND x (SRA y z)) -> (ANDshiftRAreg x y z)
|
||||
(AND (SRA y z) x) -> (ANDshiftRAreg x y z)
|
||||
(OR x (SLLconst [c] y)) -> (ORshiftLL x y [c])
|
||||
(OR (SLLconst [c] y) x) -> (ORshiftLL x y [c])
|
||||
(OR x (SRLconst [c] y)) -> (ORshiftRL x y [c])
|
||||
(OR (SRLconst [c] y) x) -> (ORshiftRL x y [c])
|
||||
(OR x (SRAconst [c] y)) -> (ORshiftRA x y [c])
|
||||
(OR (SRAconst [c] y) x) -> (ORshiftRA x y [c])
|
||||
(OR x (SLL y z)) -> (ORshiftLLreg x y z)
|
||||
(OR (SLL y z) x) -> (ORshiftLLreg x y z)
|
||||
(OR x (SRL y z)) -> (ORshiftRLreg x y z)
|
||||
(OR (SRL y z) x) -> (ORshiftRLreg x y z)
|
||||
(OR x (SRA y z)) -> (ORshiftRAreg x y z)
|
||||
(OR (SRA y z) x) -> (ORshiftRAreg x y z)
|
||||
(XOR x (SLLconst [c] y)) -> (XORshiftLL x y [c])
|
||||
(XOR (SLLconst [c] y) x) -> (XORshiftLL x y [c])
|
||||
(XOR x (SRLconst [c] y)) -> (XORshiftRL x y [c])
|
||||
(XOR (SRLconst [c] y) x) -> (XORshiftRL x y [c])
|
||||
(XOR x (SRAconst [c] y)) -> (XORshiftRA x y [c])
|
||||
(XOR (SRAconst [c] y) x) -> (XORshiftRA x y [c])
|
||||
(XOR x (SRRconst [c] y)) -> (XORshiftRR x y [c])
|
||||
(XOR (SRRconst [c] y) x) -> (XORshiftRR x y [c])
|
||||
(XOR x (SLL y z)) -> (XORshiftLLreg x y z)
|
||||
(XOR (SLL y z) x) -> (XORshiftLLreg x y z)
|
||||
(XOR x (SRL y z)) -> (XORshiftRLreg x y z)
|
||||
(XOR (SRL y z) x) -> (XORshiftRLreg x y z)
|
||||
(XOR x (SRA y z)) -> (XORshiftRAreg x y z)
|
||||
(XOR (SRA y z) x) -> (XORshiftRAreg x y z)
|
||||
(BIC x (SLLconst [c] y)) -> (BICshiftLL x y [c])
|
||||
(BIC x (SRLconst [c] y)) -> (BICshiftRL x y [c])
|
||||
(BIC x (SRAconst [c] y)) -> (BICshiftRA x y [c])
|
||||
|
|
@ -1207,7 +1159,6 @@
|
|||
|
||||
// generic simplifications
|
||||
(ADD x (RSBconst [0] y)) -> (SUB x y)
|
||||
(ADD (RSBconst [0] y) x) -> (SUB x y)
|
||||
(ADD <t> (RSBconst [c] x) (RSBconst [d] y)) -> (RSBconst [c+d] (ADD <t> x y))
|
||||
(SUB x x) -> (MOVWconst [0])
|
||||
(RSB x x) -> (MOVWconst [0])
|
||||
|
|
@ -1217,10 +1168,8 @@
|
|||
(BIC x x) -> (MOVWconst [0])
|
||||
|
||||
(ADD (MUL x y) a) -> (MULA x y a)
|
||||
(ADD a (MUL x y)) -> (MULA x y a)
|
||||
|
||||
(AND x (MVN y)) -> (BIC x y)
|
||||
(AND (MVN y) x) -> (BIC x y)
|
||||
|
||||
// simplification with *shift ops
|
||||
(SUBshiftLL x (SLLconst x [c]) [d]) && c==d -> (MOVWconst [0])
|
||||
|
|
@ -1242,11 +1191,8 @@
|
|||
(BICshiftRL x (SRLconst x [c]) [d]) && c==d -> (MOVWconst [0])
|
||||
(BICshiftRA x (SRAconst x [c]) [d]) && c==d -> (MOVWconst [0])
|
||||
(AND x (MVNshiftLL y [c])) -> (BICshiftLL x y [c])
|
||||
(AND (MVNshiftLL y [c]) x) -> (BICshiftLL x y [c])
|
||||
(AND x (MVNshiftRL y [c])) -> (BICshiftRL x y [c])
|
||||
(AND (MVNshiftRL y [c]) x) -> (BICshiftRL x y [c])
|
||||
(AND x (MVNshiftRA y [c])) -> (BICshiftRA x y [c])
|
||||
(AND (MVNshiftRA y [c]) x) -> (BICshiftRA x y [c])
|
||||
|
||||
// floating point optimizations
|
||||
(CMPF x (MOVFconst [0])) -> (CMPF0 x)
|
||||
|
|
|
|||
|
|
@ -752,14 +752,10 @@
|
|||
(MOVDreg x) && x.Uses == 1 -> (MOVDnop x)
|
||||
|
||||
// fold constant into arithmatic ops
|
||||
(ADD (MOVDconst [c]) x) -> (ADDconst [c] x)
|
||||
(ADD x (MOVDconst [c])) -> (ADDconst [c] x)
|
||||
(SUB x (MOVDconst [c])) -> (SUBconst [c] x)
|
||||
(AND (MOVDconst [c]) x) -> (ANDconst [c] x)
|
||||
(AND x (MOVDconst [c])) -> (ANDconst [c] x)
|
||||
(OR (MOVDconst [c]) x) -> (ORconst [c] x)
|
||||
(OR x (MOVDconst [c])) -> (ORconst [c] x)
|
||||
(XOR (MOVDconst [c]) x) -> (XORconst [c] x)
|
||||
(XOR x (MOVDconst [c])) -> (XORconst [c] x)
|
||||
(BIC x (MOVDconst [c])) -> (BICconst [c] x)
|
||||
|
||||
|
|
@ -784,18 +780,6 @@
|
|||
(MUL x (MOVDconst [c])) && c%7 == 0 && isPowerOfTwo(c/7) -> (SLLconst [log2(c/7)] (ADDshiftLL <x.Type> (NEG <x.Type> x) x [3]))
|
||||
(MUL x (MOVDconst [c])) && c%9 == 0 && isPowerOfTwo(c/9) -> (SLLconst [log2(c/9)] (ADDshiftLL <x.Type> x x [3]))
|
||||
|
||||
(MUL (MOVDconst [-1]) x) -> (NEG x)
|
||||
(MUL (MOVDconst [0]) _) -> (MOVDconst [0])
|
||||
(MUL (MOVDconst [1]) x) -> x
|
||||
(MUL (MOVDconst [c]) x) && isPowerOfTwo(c) -> (SLLconst [log2(c)] x)
|
||||
(MUL (MOVDconst [c]) x) && isPowerOfTwo(c) -> (SLLconst [log2(c)] x)
|
||||
(MUL (MOVDconst [c]) x) && isPowerOfTwo(c-1) && c >= 3 -> (ADDshiftLL x x [log2(c-1)])
|
||||
(MUL (MOVDconst [c]) x) && isPowerOfTwo(c+1) && c >= 7 -> (ADDshiftLL (NEG <x.Type> x) x [log2(c+1)])
|
||||
(MUL (MOVDconst [c]) x) && c%3 == 0 && isPowerOfTwo(c/3) -> (SLLconst [log2(c/3)] (ADDshiftLL <x.Type> x x [1]))
|
||||
(MUL (MOVDconst [c]) x) && c%5 == 0 && isPowerOfTwo(c/5) -> (SLLconst [log2(c/5)] (ADDshiftLL <x.Type> x x [2]))
|
||||
(MUL (MOVDconst [c]) x) && c%7 == 0 && isPowerOfTwo(c/7) -> (SLLconst [log2(c/7)] (ADDshiftLL <x.Type> (NEG <x.Type> x) x [3]))
|
||||
(MUL (MOVDconst [c]) x) && c%9 == 0 && isPowerOfTwo(c/9) -> (SLLconst [log2(c/9)] (ADDshiftLL <x.Type> x x [3]))
|
||||
|
||||
(MULW x (MOVDconst [c])) && int32(c)==-1 -> (NEG x)
|
||||
(MULW _ (MOVDconst [c])) && int32(c)==0 -> (MOVDconst [0])
|
||||
(MULW x (MOVDconst [c])) && int32(c)==1 -> x
|
||||
|
|
@ -807,17 +791,6 @@
|
|||
(MULW x (MOVDconst [c])) && c%7 == 0 && isPowerOfTwo(c/7) && is32Bit(c) -> (SLLconst [log2(c/7)] (ADDshiftLL <x.Type> (NEG <x.Type> x) x [3]))
|
||||
(MULW x (MOVDconst [c])) && c%9 == 0 && isPowerOfTwo(c/9) && is32Bit(c) -> (SLLconst [log2(c/9)] (ADDshiftLL <x.Type> x x [3]))
|
||||
|
||||
(MULW (MOVDconst [c]) x) && int32(c)==-1 -> (NEG x)
|
||||
(MULW (MOVDconst [c]) _) && int32(c)==0 -> (MOVDconst [0])
|
||||
(MULW (MOVDconst [c]) x) && int32(c)==1 -> x
|
||||
(MULW (MOVDconst [c]) x) && isPowerOfTwo(c) -> (SLLconst [log2(c)] x)
|
||||
(MULW (MOVDconst [c]) x) && isPowerOfTwo(c-1) && int32(c) >= 3 -> (ADDshiftLL x x [log2(c-1)])
|
||||
(MULW (MOVDconst [c]) x) && isPowerOfTwo(c+1) && int32(c) >= 7 -> (ADDshiftLL (NEG <x.Type> x) x [log2(c+1)])
|
||||
(MULW (MOVDconst [c]) x) && c%3 == 0 && isPowerOfTwo(c/3) && is32Bit(c) -> (SLLconst [log2(c/3)] (ADDshiftLL <x.Type> x x [1]))
|
||||
(MULW (MOVDconst [c]) x) && c%5 == 0 && isPowerOfTwo(c/5) && is32Bit(c) -> (SLLconst [log2(c/5)] (ADDshiftLL <x.Type> x x [2]))
|
||||
(MULW (MOVDconst [c]) x) && c%7 == 0 && isPowerOfTwo(c/7) && is32Bit(c) -> (SLLconst [log2(c/7)] (ADDshiftLL <x.Type> (NEG <x.Type> x) x [3]))
|
||||
(MULW (MOVDconst [c]) x) && c%9 == 0 && isPowerOfTwo(c/9) && is32Bit(c) -> (SLLconst [log2(c/9)] (ADDshiftLL <x.Type> x x [3]))
|
||||
|
||||
// div by constant
|
||||
(UDIV x (MOVDconst [1])) -> x
|
||||
(UDIV x (MOVDconst [c])) && isPowerOfTwo(c) -> (SRLconst [log2(c)] x)
|
||||
|
|
@ -830,7 +803,6 @@
|
|||
|
||||
// generic simplifications
|
||||
(ADD x (NEG y)) -> (SUB x y)
|
||||
(ADD (NEG y) x) -> (SUB x y)
|
||||
(SUB x x) -> (MOVDconst [0])
|
||||
(AND x x) -> x
|
||||
(OR x x) -> x
|
||||
|
|
@ -1080,34 +1052,20 @@
|
|||
|
||||
// absorb shifts into ops
|
||||
(ADD x (SLLconst [c] y)) -> (ADDshiftLL x y [c])
|
||||
(ADD (SLLconst [c] y) x) -> (ADDshiftLL x y [c])
|
||||
(ADD x (SRLconst [c] y)) -> (ADDshiftRL x y [c])
|
||||
(ADD (SRLconst [c] y) x) -> (ADDshiftRL x y [c])
|
||||
(ADD x (SRAconst [c] y)) -> (ADDshiftRA x y [c])
|
||||
(ADD (SRAconst [c] y) x) -> (ADDshiftRA x y [c])
|
||||
(SUB x (SLLconst [c] y)) -> (SUBshiftLL x y [c])
|
||||
(SUB x (SRLconst [c] y)) -> (SUBshiftRL x y [c])
|
||||
(SUB x (SRAconst [c] y)) -> (SUBshiftRA x y [c])
|
||||
(AND x (SLLconst [c] y)) -> (ANDshiftLL x y [c])
|
||||
(AND (SLLconst [c] y) x) -> (ANDshiftLL x y [c])
|
||||
(AND x (SRLconst [c] y)) -> (ANDshiftRL x y [c])
|
||||
(AND (SRLconst [c] y) x) -> (ANDshiftRL x y [c])
|
||||
(AND x (SRAconst [c] y)) -> (ANDshiftRA x y [c])
|
||||
(AND (SRAconst [c] y) x) -> (ANDshiftRA x y [c])
|
||||
(OR x s:(SLLconst [c] y)) && s.Uses == 1 && clobber(s) -> (ORshiftLL x y [c]) // useful for combined load
|
||||
(OR s:(SLLconst [c] y) x) && s.Uses == 1 && clobber(s) -> (ORshiftLL x y [c])
|
||||
(OR x (SLLconst [c] y)) -> (ORshiftLL x y [c])
|
||||
(OR (SLLconst [c] y) x) -> (ORshiftLL x y [c])
|
||||
(OR x (SLLconst [c] y)) -> (ORshiftLL x y [c]) // useful for combined load
|
||||
(OR x (SRLconst [c] y)) -> (ORshiftRL x y [c])
|
||||
(OR (SRLconst [c] y) x) -> (ORshiftRL x y [c])
|
||||
(OR x (SRAconst [c] y)) -> (ORshiftRA x y [c])
|
||||
(OR (SRAconst [c] y) x) -> (ORshiftRA x y [c])
|
||||
(XOR x (SLLconst [c] y)) -> (XORshiftLL x y [c])
|
||||
(XOR (SLLconst [c] y) x) -> (XORshiftLL x y [c])
|
||||
(XOR x (SRLconst [c] y)) -> (XORshiftRL x y [c])
|
||||
(XOR (SRLconst [c] y) x) -> (XORshiftRL x y [c])
|
||||
(XOR x (SRAconst [c] y)) -> (XORshiftRA x y [c])
|
||||
(XOR (SRAconst [c] y) x) -> (XORshiftRA x y [c])
|
||||
(BIC x (SLLconst [c] y)) -> (BICshiftLL x y [c])
|
||||
(BIC x (SRLconst [c] y)) -> (BICshiftRL x y [c])
|
||||
(BIC x (SRAconst [c] y)) -> (BICshiftRA x y [c])
|
||||
|
|
@ -1194,20 +1152,23 @@
|
|||
// little endian loads
|
||||
// b[0] | b[1]<<8 -> load 16-bit
|
||||
(ORshiftLL <t> [8]
|
||||
y0:(MOVDnop x0:(MOVBUload [i] {s} p mem))
|
||||
y1:(MOVDnop x1:(MOVBUload [i+1] {s} p mem)))
|
||||
y0:(MOVDnop x0:(MOVBUload [i0] {s} p mem))
|
||||
y1:(MOVDnop x1:(MOVBUload [i1] {s} p mem)))
|
||||
&& i1 == i0+1
|
||||
&& x0.Uses == 1 && x1.Uses == 1
|
||||
&& y0.Uses == 1 && y1.Uses == 1
|
||||
&& mergePoint(b,x0,x1) != nil
|
||||
&& clobber(x0) && clobber(x1)
|
||||
&& clobber(y0) && clobber(y1)
|
||||
-> @mergePoint(b,x0,x1) (MOVHUload <t> {s} (OffPtr <p.Type> [i] p) mem)
|
||||
-> @mergePoint(b,x0,x1) (MOVHUload <t> {s} (OffPtr <p.Type> [i0] p) mem)
|
||||
|
||||
// b[0] | b[1]<<8 | b[2]<<16 | b[3]<<24 -> load 32-bit
|
||||
(ORshiftLL <t> [24] o0:(ORshiftLL [16]
|
||||
x0:(MOVHUload [i] {s} p mem)
|
||||
y1:(MOVDnop x1:(MOVBUload [i+2] {s} p mem)))
|
||||
y2:(MOVDnop x2:(MOVBUload [i+3] {s} p mem)))
|
||||
x0:(MOVHUload [i0] {s} p mem)
|
||||
y1:(MOVDnop x1:(MOVBUload [i2] {s} p mem)))
|
||||
y2:(MOVDnop x2:(MOVBUload [i3] {s} p mem)))
|
||||
&& i2 == i0+2
|
||||
&& i3 == i0+3
|
||||
&& x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1
|
||||
&& y1.Uses == 1 && y2.Uses == 1
|
||||
&& o0.Uses == 1
|
||||
|
|
@ -1215,15 +1176,19 @@
|
|||
&& clobber(x0) && clobber(x1) && clobber(x2)
|
||||
&& clobber(y1) && clobber(y2)
|
||||
&& clobber(o0)
|
||||
-> @mergePoint(b,x0,x1,x2) (MOVWUload <t> {s} (OffPtr <p.Type> [i] p) mem)
|
||||
-> @mergePoint(b,x0,x1,x2) (MOVWUload <t> {s} (OffPtr <p.Type> [i0] p) mem)
|
||||
|
||||
// b[0] | b[1]<<8 | b[2]<<16 | b[3]<<24 | b[4]<<32 | b[5]<<40 | b[6]<<48 | b[7]<<56 -> load 64-bit
|
||||
(ORshiftLL <t> [56] o0:(ORshiftLL [48] o1:(ORshiftLL [40] o2:(ORshiftLL [32]
|
||||
x0:(MOVWUload [i] {s} p mem)
|
||||
y1:(MOVDnop x1:(MOVBUload [i+4] {s} p mem)))
|
||||
y2:(MOVDnop x2:(MOVBUload [i+5] {s} p mem)))
|
||||
y3:(MOVDnop x3:(MOVBUload [i+6] {s} p mem)))
|
||||
y4:(MOVDnop x4:(MOVBUload [i+7] {s} p mem)))
|
||||
x0:(MOVWUload [i0] {s} p mem)
|
||||
y1:(MOVDnop x1:(MOVBUload [i4] {s} p mem)))
|
||||
y2:(MOVDnop x2:(MOVBUload [i5] {s} p mem)))
|
||||
y3:(MOVDnop x3:(MOVBUload [i6] {s} p mem)))
|
||||
y4:(MOVDnop x4:(MOVBUload [i7] {s} p mem)))
|
||||
&& i4 == i0+4
|
||||
&& i5 == i0+5
|
||||
&& i6 == i0+6
|
||||
&& i7 == i0+7
|
||||
&& x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1
|
||||
&& y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && y4.Uses == 1
|
||||
&& o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1
|
||||
|
|
@ -1231,14 +1196,17 @@
|
|||
&& clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4)
|
||||
&& clobber(y1) && clobber(y2) && clobber(y3) && clobber(y4)
|
||||
&& clobber(o0) && clobber(o1) && clobber(o2)
|
||||
-> @mergePoint(b,x0,x1,x2,x3,x4) (MOVDload <t> {s} (OffPtr <p.Type> [i] p) mem)
|
||||
-> @mergePoint(b,x0,x1,x2,x3,x4) (MOVDload <t> {s} (OffPtr <p.Type> [i0] p) mem)
|
||||
|
||||
// b[3]<<24 | b[2]<<16 | b[1]<<8 | b[0] -> load 32-bit
|
||||
(OR <t> o0:(ORshiftLL [8] o1:(ORshiftLL [16] s0:(SLLconst [24]
|
||||
y0:(MOVDnop x0:(MOVBUload [i] {s} p mem)))
|
||||
y1:(MOVDnop x1:(MOVBUload [i-1] {s} p mem)))
|
||||
y2:(MOVDnop x2:(MOVBUload [i-2] {s} p mem)))
|
||||
y3:(MOVDnop x3:(MOVBUload [i-3] {s} p mem)))
|
||||
y0:(MOVDnop x0:(MOVBUload [i3] {s} p mem)))
|
||||
y1:(MOVDnop x1:(MOVBUload [i2] {s} p mem)))
|
||||
y2:(MOVDnop x2:(MOVBUload [i1] {s} p mem)))
|
||||
y3:(MOVDnop x3:(MOVBUload [i0] {s} p mem)))
|
||||
&& i1 == i0+1
|
||||
&& i2 == i0+2
|
||||
&& i3 == i0+3
|
||||
&& x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1
|
||||
&& y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1
|
||||
&& o0.Uses == 1 && o1.Uses == 1 && s0.Uses == 1
|
||||
|
|
@ -1246,18 +1214,25 @@
|
|||
&& clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3)
|
||||
&& clobber(y0) && clobber(y1) && clobber(y2) && clobber(y3)
|
||||
&& clobber(o0) && clobber(o1) && clobber(s0)
|
||||
-> @mergePoint(b,x0,x1,x2,x3) (MOVWUload <t> {s} (OffPtr <p.Type> [i-3] p) mem)
|
||||
-> @mergePoint(b,x0,x1,x2,x3) (MOVWUload <t> {s} (OffPtr <p.Type> [i0] p) mem)
|
||||
|
||||
// b[7]<<56 | b[6]<<48 | b[5]<<40 | b[4]<<32 | b[3]<<24 | b[2]<<16 | b[1]<<8 | b[0] -> load 64-bit, reverse
|
||||
(OR <t> o0:(ORshiftLL [8] o1:(ORshiftLL [16] o2:(ORshiftLL [24] o3:(ORshiftLL [32] o4:(ORshiftLL [40] o5:(ORshiftLL [48] s0:(SLLconst [56]
|
||||
y0:(MOVDnop x0:(MOVBUload [i] {s} p mem)))
|
||||
y1:(MOVDnop x1:(MOVBUload [i-1] {s} p mem)))
|
||||
y2:(MOVDnop x2:(MOVBUload [i-2] {s} p mem)))
|
||||
y3:(MOVDnop x3:(MOVBUload [i-3] {s} p mem)))
|
||||
y4:(MOVDnop x4:(MOVBUload [i-4] {s} p mem)))
|
||||
y5:(MOVDnop x5:(MOVBUload [i-5] {s} p mem)))
|
||||
y6:(MOVDnop x6:(MOVBUload [i-6] {s} p mem)))
|
||||
y7:(MOVDnop x7:(MOVBUload [i-7] {s} p mem)))
|
||||
y0:(MOVDnop x0:(MOVBUload [i7] {s} p mem)))
|
||||
y1:(MOVDnop x1:(MOVBUload [i6] {s} p mem)))
|
||||
y2:(MOVDnop x2:(MOVBUload [i5] {s} p mem)))
|
||||
y3:(MOVDnop x3:(MOVBUload [i4] {s} p mem)))
|
||||
y4:(MOVDnop x4:(MOVBUload [i3] {s} p mem)))
|
||||
y5:(MOVDnop x5:(MOVBUload [i2] {s} p mem)))
|
||||
y6:(MOVDnop x6:(MOVBUload [i1] {s} p mem)))
|
||||
y7:(MOVDnop x7:(MOVBUload [i0] {s} p mem)))
|
||||
&& i1 == i0+1
|
||||
&& i2 == i0+2
|
||||
&& i3 == i0+3
|
||||
&& i4 == i0+4
|
||||
&& i5 == i0+5
|
||||
&& i6 == i0+6
|
||||
&& i7 == i0+7
|
||||
&& x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1
|
||||
&& x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1
|
||||
&& y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1
|
||||
|
|
@ -1271,26 +1246,29 @@
|
|||
&& clobber(y4) && clobber(y5) && clobber(y6) && clobber(y7)
|
||||
&& clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3)
|
||||
&& clobber(o4) && clobber(o5) && clobber(s0)
|
||||
-> @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (REV <t> (MOVDload <t> {s} (OffPtr <p.Type> [i-7] p) mem))
|
||||
-> @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (REV <t> (MOVDload <t> {s} (OffPtr <p.Type> [i0] p) mem))
|
||||
|
||||
// big endian loads
|
||||
// b[1] | b[0]<<8 -> load 16-bit, reverse
|
||||
(ORshiftLL <t> [8]
|
||||
y0:(MOVDnop x0:(MOVBUload [i] {s} p mem))
|
||||
y1:(MOVDnop x1:(MOVBUload [i-1] {s} p mem)))
|
||||
&& ((i-1)%2 == 0 || i-1<256 && i-1>-256 && !isArg(s) && !isAuto(s))
|
||||
y0:(MOVDnop x0:(MOVBUload [i1] {s} p mem))
|
||||
y1:(MOVDnop x1:(MOVBUload [i0] {s} p mem)))
|
||||
&& i1 == i0+1
|
||||
&& (i0%2 == 0 || i0<256 && i0>-256 && !isArg(s) && !isAuto(s))
|
||||
&& x0.Uses == 1 && x1.Uses == 1
|
||||
&& y0.Uses == 1 && y1.Uses == 1
|
||||
&& mergePoint(b,x0,x1) != nil
|
||||
&& clobber(x0) && clobber(x1)
|
||||
&& clobber(y0) && clobber(y1)
|
||||
-> @mergePoint(b,x0,x1) (REV16W <t> (MOVHUload <t> [i-1] {s} p mem))
|
||||
-> @mergePoint(b,x0,x1) (REV16W <t> (MOVHUload <t> [i0] {s} p mem))
|
||||
|
||||
// b[3] | b[2]<<8 | b[1]<<16 | b[0]<<24 -> load 32-bit, reverse
|
||||
(ORshiftLL <t> [24] o0:(ORshiftLL [16]
|
||||
y0:(REV16W x0:(MOVHUload [i] {s} p mem))
|
||||
y1:(MOVDnop x1:(MOVBUload [i-1] {s} p mem)))
|
||||
y2:(MOVDnop x2:(MOVBUload [i-2] {s} p mem)))
|
||||
y0:(REV16W x0:(MOVHUload [i2] {s} p mem))
|
||||
y1:(MOVDnop x1:(MOVBUload [i1] {s} p mem)))
|
||||
y2:(MOVDnop x2:(MOVBUload [i0] {s} p mem)))
|
||||
&& i1 == i0+1
|
||||
&& i2 == i0+2
|
||||
&& x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1
|
||||
&& y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1
|
||||
&& o0.Uses == 1
|
||||
|
|
@ -1298,15 +1276,19 @@
|
|||
&& clobber(x0) && clobber(x1) && clobber(x2)
|
||||
&& clobber(y0) && clobber(y1) && clobber(y2)
|
||||
&& clobber(o0)
|
||||
-> @mergePoint(b,x0,x1,x2) (REVW <t> (MOVWUload <t> {s} (OffPtr <p.Type> [i-2] p) mem))
|
||||
-> @mergePoint(b,x0,x1,x2) (REVW <t> (MOVWUload <t> {s} (OffPtr <p.Type> [i0] p) mem))
|
||||
|
||||
// b[7] | b[6]<<8 | b[5]<<16 | b[4]<<24 | b[3]<<32 | b[2]<<40 | b[1]<<48 | b[0]<<56 -> load 64-bit, reverse
|
||||
(ORshiftLL <t> [56] o0:(ORshiftLL [48] o1:(ORshiftLL [40] o2:(ORshiftLL [32]
|
||||
y0:(REVW x0:(MOVWUload [i] {s} p mem))
|
||||
y1:(MOVDnop x1:(MOVBUload [i-1] {s} p mem)))
|
||||
y2:(MOVDnop x2:(MOVBUload [i-2] {s} p mem)))
|
||||
y3:(MOVDnop x3:(MOVBUload [i-3] {s} p mem)))
|
||||
y4:(MOVDnop x4:(MOVBUload [i-4] {s} p mem)))
|
||||
y0:(REVW x0:(MOVWUload [i4] {s} p mem))
|
||||
y1:(MOVDnop x1:(MOVBUload [i3] {s} p mem)))
|
||||
y2:(MOVDnop x2:(MOVBUload [i2] {s} p mem)))
|
||||
y3:(MOVDnop x3:(MOVBUload [i1] {s} p mem)))
|
||||
y4:(MOVDnop x4:(MOVBUload [i0] {s} p mem)))
|
||||
&& i1 == i0+1
|
||||
&& i2 == i0+2
|
||||
&& i3 == i0+3
|
||||
&& i4 == i0+4
|
||||
&& x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1
|
||||
&& y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && y4.Uses == 1
|
||||
&& o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1
|
||||
|
|
@ -1314,14 +1296,17 @@
|
|||
&& clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4)
|
||||
&& clobber(y0) && clobber(y1) && clobber(y2) && clobber(y3) && clobber(y4)
|
||||
&& clobber(o0) && clobber(o1) && clobber(o2)
|
||||
-> @mergePoint(b,x0,x1,x2,x3,x4) (REV <t> (MOVDload <t> {s} (OffPtr <p.Type> [i-4] p) mem))
|
||||
-> @mergePoint(b,x0,x1,x2,x3,x4) (REV <t> (MOVDload <t> {s} (OffPtr <p.Type> [i0] p) mem))
|
||||
|
||||
// b[0]<<24 | b[1]<<16 | b[2]<<8 | b[3] -> load 32-bit, reverse
|
||||
(OR <t> o0:(ORshiftLL [8] o1:(ORshiftLL [16] s0:(SLLconst [24]
|
||||
y0:(MOVDnop x0:(MOVBUload [i] {s} p mem)))
|
||||
y1:(MOVDnop x1:(MOVBUload [i+1] {s} p mem)))
|
||||
y2:(MOVDnop x2:(MOVBUload [i+2] {s} p mem)))
|
||||
y3:(MOVDnop x3:(MOVBUload [i+3] {s} p mem)))
|
||||
y0:(MOVDnop x0:(MOVBUload [i0] {s} p mem)))
|
||||
y1:(MOVDnop x1:(MOVBUload [i1] {s} p mem)))
|
||||
y2:(MOVDnop x2:(MOVBUload [i2] {s} p mem)))
|
||||
y3:(MOVDnop x3:(MOVBUload [i3] {s} p mem)))
|
||||
&& i1 == i0+1
|
||||
&& i2 == i0+2
|
||||
&& i3 == i0+3
|
||||
&& x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1
|
||||
&& y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1
|
||||
&& o0.Uses == 1 && o1.Uses == 1 && s0.Uses == 1
|
||||
|
|
@ -1329,18 +1314,25 @@
|
|||
&& clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3)
|
||||
&& clobber(y0) && clobber(y1) && clobber(y2) && clobber(y3)
|
||||
&& clobber(o0) && clobber(o1) && clobber(s0)
|
||||
-> @mergePoint(b,x0,x1,x2,x3) (REVW <t> (MOVWUload <t> {s} (OffPtr <p.Type> [i] p) mem))
|
||||
-> @mergePoint(b,x0,x1,x2,x3) (REVW <t> (MOVWUload <t> {s} (OffPtr <p.Type> [i0] p) mem))
|
||||
|
||||
// b[0]<<56 | b[1]<<48 | b[2]<<40 | b[3]<<32 | b[4]<<24 | b[5]<<16 | b[6]<<8 | b[7] -> load 64-bit, reverse
|
||||
(OR <t> o0:(ORshiftLL [8] o1:(ORshiftLL [16] o2:(ORshiftLL [24] o3:(ORshiftLL [32] o4:(ORshiftLL [40] o5:(ORshiftLL [48] s0:(SLLconst [56]
|
||||
y0:(MOVDnop x0:(MOVBUload [i] {s} p mem)))
|
||||
y1:(MOVDnop x1:(MOVBUload [i+1] {s} p mem)))
|
||||
y2:(MOVDnop x2:(MOVBUload [i+2] {s} p mem)))
|
||||
y3:(MOVDnop x3:(MOVBUload [i+3] {s} p mem)))
|
||||
y4:(MOVDnop x4:(MOVBUload [i+4] {s} p mem)))
|
||||
y5:(MOVDnop x5:(MOVBUload [i+5] {s} p mem)))
|
||||
y6:(MOVDnop x6:(MOVBUload [i+6] {s} p mem)))
|
||||
y7:(MOVDnop x7:(MOVBUload [i+7] {s} p mem)))
|
||||
y0:(MOVDnop x0:(MOVBUload [i0] {s} p mem)))
|
||||
y1:(MOVDnop x1:(MOVBUload [i1] {s} p mem)))
|
||||
y2:(MOVDnop x2:(MOVBUload [i2] {s} p mem)))
|
||||
y3:(MOVDnop x3:(MOVBUload [i3] {s} p mem)))
|
||||
y4:(MOVDnop x4:(MOVBUload [i4] {s} p mem)))
|
||||
y5:(MOVDnop x5:(MOVBUload [i5] {s} p mem)))
|
||||
y6:(MOVDnop x6:(MOVBUload [i6] {s} p mem)))
|
||||
y7:(MOVDnop x7:(MOVBUload [i7] {s} p mem)))
|
||||
&& i1 == i0+1
|
||||
&& i2 == i0+2
|
||||
&& i3 == i0+3
|
||||
&& i4 == i0+4
|
||||
&& i5 == i0+5
|
||||
&& i6 == i0+6
|
||||
&& i7 == i0+7
|
||||
&& x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1
|
||||
&& x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1
|
||||
&& y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1
|
||||
|
|
@ -1354,4 +1346,4 @@
|
|||
&& clobber(y4) && clobber(y5) && clobber(y6) && clobber(y7)
|
||||
&& clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3)
|
||||
&& clobber(o4) && clobber(o5) && clobber(s0)
|
||||
-> @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (REV <t> (MOVDload <t> {s} (OffPtr <p.Type> [i] p) mem))
|
||||
-> @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (REV <t> (MOVDload <t> {s} (OffPtr <p.Type> [i0] p) mem))
|
||||
|
|
|
|||
|
|
@ -591,16 +591,11 @@
|
|||
(MOVWreg x) && x.Uses == 1 -> (MOVWnop x)
|
||||
|
||||
// fold constant into arithmatic ops
|
||||
(ADD (MOVWconst [c]) x) -> (ADDconst [c] x)
|
||||
(ADD x (MOVWconst [c])) -> (ADDconst [c] x)
|
||||
(SUB x (MOVWconst [c])) -> (SUBconst [c] x)
|
||||
(AND (MOVWconst [c]) x) -> (ANDconst [c] x)
|
||||
(AND x (MOVWconst [c])) -> (ANDconst [c] x)
|
||||
(OR (MOVWconst [c]) x) -> (ORconst [c] x)
|
||||
(OR x (MOVWconst [c])) -> (ORconst [c] x)
|
||||
(XOR (MOVWconst [c]) x) -> (XORconst [c] x)
|
||||
(XOR x (MOVWconst [c])) -> (XORconst [c] x)
|
||||
(NOR (MOVWconst [c]) x) -> (NORconst [c] x)
|
||||
(NOR x (MOVWconst [c])) -> (NORconst [c] x)
|
||||
|
||||
(SLL _ (MOVWconst [c])) && uint32(c)>=32 -> (MOVWconst [0])
|
||||
|
|
@ -635,7 +630,6 @@
|
|||
|
||||
// generic simplifications
|
||||
(ADD x (NEG y)) -> (SUB x y)
|
||||
(ADD (NEG y) x) -> (SUB x y)
|
||||
(SUB x x) -> (MOVWconst [0])
|
||||
(SUB (MOVWconst [0]) x) -> (NEG x)
|
||||
(AND x x) -> x
|
||||
|
|
@ -729,12 +723,12 @@
|
|||
|
||||
// conditional move
|
||||
(CMOVZ _ b (MOVWconst [0])) -> b
|
||||
(CMOVZ a _ (MOVWconst [c])) && c!=0-> a
|
||||
(CMOVZ a _ (MOVWconst [c])) && c!=0 -> a
|
||||
(CMOVZzero _ (MOVWconst [0])) -> (MOVWconst [0])
|
||||
(CMOVZzero a (MOVWconst [c])) && c!=0-> a
|
||||
(CMOVZzero a (MOVWconst [c])) && c!=0 -> a
|
||||
(CMOVZ a (MOVWconst [0]) c) -> (CMOVZzero a c)
|
||||
|
||||
// atomic
|
||||
(LoweredAtomicStore ptr (MOVWconst [0]) mem) -> (LoweredAtomicStorezero ptr mem)
|
||||
(LoweredAtomicAdd ptr (MOVWconst [c]) mem) && is16Bit(c)-> (LoweredAtomicAddconst [c] ptr mem)
|
||||
(LoweredAtomicAdd ptr (MOVWconst [c]) mem) && is16Bit(c) -> (LoweredAtomicAddconst [c] ptr mem)
|
||||
|
||||
|
|
|
|||
|
|
@ -579,16 +579,11 @@
|
|||
(MOVVreg x) && x.Uses == 1 -> (MOVVnop x)
|
||||
|
||||
// fold constant into arithmatic ops
|
||||
(ADDV (MOVVconst [c]) x) && is32Bit(c) -> (ADDVconst [c] x)
|
||||
(ADDV x (MOVVconst [c])) && is32Bit(c) -> (ADDVconst [c] x)
|
||||
(SUBV x (MOVVconst [c])) && is32Bit(c) -> (SUBVconst [c] x)
|
||||
(AND (MOVVconst [c]) x) && is32Bit(c) -> (ANDconst [c] x)
|
||||
(AND x (MOVVconst [c])) && is32Bit(c) -> (ANDconst [c] x)
|
||||
(OR (MOVVconst [c]) x) && is32Bit(c) -> (ORconst [c] x)
|
||||
(OR x (MOVVconst [c])) && is32Bit(c) -> (ORconst [c] x)
|
||||
(XOR (MOVVconst [c]) x) && is32Bit(c) -> (XORconst [c] x)
|
||||
(XOR x (MOVVconst [c])) && is32Bit(c) -> (XORconst [c] x)
|
||||
(NOR (MOVVconst [c]) x) && is32Bit(c) -> (NORconst [c] x)
|
||||
(NOR x (MOVVconst [c])) && is32Bit(c) -> (NORconst [c] x)
|
||||
|
||||
(SLLV _ (MOVVconst [c])) && uint64(c)>=64 -> (MOVVconst [0])
|
||||
|
|
@ -620,7 +615,6 @@
|
|||
|
||||
// generic simplifications
|
||||
(ADDV x (NEGV y)) -> (SUBV x y)
|
||||
(ADDV (NEGV y) x) -> (SUBV x y)
|
||||
(SUBV x x) -> (MOVVconst [0])
|
||||
(SUBV (MOVVconst [0]) x) -> (NEGV x)
|
||||
(AND x x) -> x
|
||||
|
|
|
|||
|
|
@ -577,7 +577,7 @@
|
|||
(Move [8] {t} dst src mem) && t.(Type).Alignment()%4 == 0 ->
|
||||
(MOVWstore [4] dst (MOVWZload [4] src mem)
|
||||
(MOVWstore dst (MOVWZload src mem) mem))
|
||||
(Move [8] {t} dst src mem) && t.(Type).Alignment()%2 == 0->
|
||||
(Move [8] {t} dst src mem) && t.(Type).Alignment()%2 == 0 ->
|
||||
(MOVHstore [6] dst (MOVHZload [6] src mem)
|
||||
(MOVHstore [4] dst (MOVHZload [4] src mem)
|
||||
(MOVHstore [2] dst (MOVHZload [2] src mem)
|
||||
|
|
@ -624,9 +624,6 @@
|
|||
(AND x (MOVDconst [c])) && isU16Bit(c) -> (ANDconst [c] x)
|
||||
(XOR x (MOVDconst [c])) && isU32Bit(c) -> (XORconst [c] x)
|
||||
(OR x (MOVDconst [c])) && isU32Bit(c) -> (ORconst [c] x)
|
||||
(AND (MOVDconst [c]) x) && isU16Bit(c) -> (ANDconst [c] x)
|
||||
(XOR (MOVDconst [c]) x) && isU32Bit(c) -> (XORconst [c] x)
|
||||
(OR (MOVDconst [c]) x) && isU32Bit(c) -> (ORconst [c] x)
|
||||
|
||||
// Simplify consts
|
||||
(ANDconst [c] (ANDconst [d] x)) -> (ANDconst [c&d] x)
|
||||
|
|
@ -692,7 +689,6 @@
|
|||
|
||||
// Arithmetic constant ops
|
||||
|
||||
(ADD (MOVDconst [c]) x) && is32Bit(c) -> (ADDconst [c] x)
|
||||
(ADD x (MOVDconst [c])) && is32Bit(c) -> (ADDconst [c] x)
|
||||
(ADDconst [c] (ADDconst [d] x)) && is32Bit(c+d) -> (ADDconst [c+d] x)
|
||||
(ADDconst [0] x) -> x
|
||||
|
|
@ -864,9 +860,7 @@
|
|||
(AND x:(MOVBZload _ _) (MOVDconst [c])) -> (ANDconst [c&0xFF] x)
|
||||
|
||||
// floating-point fused multiply-add/sub
|
||||
(FADD z (FMUL x y)) -> (FMADD x y z)
|
||||
(FADD (FMUL x y) z) -> (FMADD x y z)
|
||||
(FSUB (FMUL x y) z) -> (FMSUB x y z)
|
||||
(FADDS z (FMULS x y)) -> (FMADDS x y z)
|
||||
(FADDS (FMULS x y) z) -> (FMADDS x y z)
|
||||
(FSUBS (FMULS x y) z) -> (FMSUBS x y z)
|
||||
|
|
|
|||
|
|
@ -224,7 +224,7 @@ func init() {
|
|||
{name: "ANDN", argLength: 2, reg: gp21, asm: "ANDN"}, // arg0&^arg1
|
||||
{name: "OR", argLength: 2, reg: gp21, asm: "OR", commutative: true}, // arg0|arg1
|
||||
{name: "ORN", argLength: 2, reg: gp21, asm: "ORN"}, // arg0|^arg1
|
||||
{name: "NOR", argLength: 2, reg: gp21, asm: "NOR"}, // ^(arg0|arg1)
|
||||
{name: "NOR", argLength: 2, reg: gp21, asm: "NOR", commutative: true}, // ^(arg0|arg1)
|
||||
{name: "XOR", argLength: 2, reg: gp21, asm: "XOR", typ: "Int64", commutative: true}, // arg0^arg1
|
||||
{name: "EQV", argLength: 2, reg: gp21, asm: "EQV", typ: "Int64", commutative: true}, // arg0^^arg1
|
||||
{name: "NEG", argLength: 1, reg: gp11, asm: "NEG"}, // -arg0 (integer)
|
||||
|
|
|
|||
|
|
@ -514,9 +514,7 @@
|
|||
|
||||
// Fold constants into instructions.
|
||||
(ADD x (MOVDconst [c])) && is32Bit(c) -> (ADDconst [c] x)
|
||||
(ADD (MOVDconst [c]) x) && is32Bit(c) -> (ADDconst [c] x)
|
||||
(ADDW x (MOVDconst [c])) -> (ADDWconst [c] x)
|
||||
(ADDW (MOVDconst [c]) x) -> (ADDWconst [c] x)
|
||||
|
||||
(SUB x (MOVDconst [c])) && is32Bit(c) -> (SUBconst x [c])
|
||||
(SUB (MOVDconst [c]) x) && is32Bit(c) -> (NEG (SUBconst <v.Type> x [c]))
|
||||
|
|
@ -524,31 +522,23 @@
|
|||
(SUBW (MOVDconst [c]) x) -> (NEGW (SUBWconst <v.Type> x [c]))
|
||||
|
||||
(MULLD x (MOVDconst [c])) && is32Bit(c) -> (MULLDconst [c] x)
|
||||
(MULLD (MOVDconst [c]) x) && is32Bit(c) -> (MULLDconst [c] x)
|
||||
(MULLW x (MOVDconst [c])) -> (MULLWconst [c] x)
|
||||
(MULLW (MOVDconst [c]) x) -> (MULLWconst [c] x)
|
||||
|
||||
// NILF instructions leave the high 32 bits unchanged which is
|
||||
// equivalent to the leftmost 32 bits being set.
|
||||
// TODO(mundaym): modify the assembler to accept 64-bit values
|
||||
// and use isU32Bit(^c).
|
||||
(AND x (MOVDconst [c])) && is32Bit(c) && c < 0 -> (ANDconst [c] x)
|
||||
(AND (MOVDconst [c]) x) && is32Bit(c) && c < 0 -> (ANDconst [c] x)
|
||||
(ANDW x (MOVDconst [c])) -> (ANDWconst [c] x)
|
||||
(ANDW (MOVDconst [c]) x) -> (ANDWconst [c] x)
|
||||
|
||||
(ANDWconst [c] (ANDWconst [d] x)) -> (ANDWconst [c & d] x)
|
||||
(ANDconst [c] (ANDconst [d] x)) -> (ANDconst [c & d] x)
|
||||
|
||||
(OR x (MOVDconst [c])) && isU32Bit(c) -> (ORconst [c] x)
|
||||
(OR (MOVDconst [c]) x) && isU32Bit(c) -> (ORconst [c] x)
|
||||
(ORW x (MOVDconst [c])) -> (ORWconst [c] x)
|
||||
(ORW (MOVDconst [c]) x) -> (ORWconst [c] x)
|
||||
|
||||
(XOR x (MOVDconst [c])) && isU32Bit(c) -> (XORconst [c] x)
|
||||
(XOR (MOVDconst [c]) x) && isU32Bit(c) -> (XORconst [c] x)
|
||||
(XORW x (MOVDconst [c])) -> (XORWconst [c] x)
|
||||
(XORW (MOVDconst [c]) x) -> (XORWconst [c] x)
|
||||
|
||||
(SLD x (MOVDconst [c])) -> (SLDconst [c&63] x)
|
||||
(SLW x (MOVDconst [c])) -> (SLWconst [c&63] x)
|
||||
|
|
@ -565,19 +555,13 @@
|
|||
(SRD x (ANDconst [63] y)) -> (SRD x y)
|
||||
|
||||
// Rotate generation
|
||||
(ADD (SLDconst x [c]) (SRDconst x [64-c])) -> (RLLGconst [ c] x)
|
||||
( OR (SLDconst x [c]) (SRDconst x [64-c])) -> (RLLGconst [ c] x)
|
||||
(XOR (SLDconst x [c]) (SRDconst x [64-c])) -> (RLLGconst [ c] x)
|
||||
(ADD (SRDconst x [c]) (SLDconst x [64-c])) -> (RLLGconst [64-c] x)
|
||||
( OR (SRDconst x [c]) (SLDconst x [64-c])) -> (RLLGconst [64-c] x)
|
||||
(XOR (SRDconst x [c]) (SLDconst x [64-c])) -> (RLLGconst [64-c] x)
|
||||
(ADD (SLDconst x [c]) (SRDconst x [d])) && d == 64-c -> (RLLGconst [c] x)
|
||||
( OR (SLDconst x [c]) (SRDconst x [d])) && d == 64-c -> (RLLGconst [c] x)
|
||||
(XOR (SLDconst x [c]) (SRDconst x [d])) && d == 64-c -> (RLLGconst [c] x)
|
||||
|
||||
(ADDW (SLWconst x [c]) (SRWconst x [32-c])) -> (RLLconst [ c] x)
|
||||
( ORW (SLWconst x [c]) (SRWconst x [32-c])) -> (RLLconst [ c] x)
|
||||
(XORW (SLWconst x [c]) (SRWconst x [32-c])) -> (RLLconst [ c] x)
|
||||
(ADDW (SRWconst x [c]) (SLWconst x [32-c])) -> (RLLconst [32-c] x)
|
||||
( ORW (SRWconst x [c]) (SLWconst x [32-c])) -> (RLLconst [32-c] x)
|
||||
(XORW (SRWconst x [c]) (SLWconst x [32-c])) -> (RLLconst [32-c] x)
|
||||
(ADDW (SLWconst x [c]) (SRWconst x [d])) && d == 32-c -> (RLLconst [c] x)
|
||||
( ORW (SLWconst x [c]) (SRWconst x [d])) && d == 32-c -> (RLLconst [c] x)
|
||||
(XORW (SLWconst x [c]) (SRWconst x [d])) && d == 32-c -> (RLLconst [c] x)
|
||||
|
||||
(CMP x (MOVDconst [c])) && is32Bit(c) -> (CMPconst x [c])
|
||||
(CMP (MOVDconst [c]) x) && is32Bit(c) -> (InvertFlags (CMPconst x [c]))
|
||||
|
|
@ -589,11 +573,8 @@
|
|||
(CMPWU (MOVDconst [c]) x) -> (InvertFlags (CMPWUconst x [int64(uint32(c))]))
|
||||
|
||||
// Using MOV{W,H,B}Zreg instead of AND is cheaper.
|
||||
(AND (MOVDconst [0xFF]) x) -> (MOVBZreg x)
|
||||
(AND x (MOVDconst [0xFF])) -> (MOVBZreg x)
|
||||
(AND (MOVDconst [0xFFFF]) x) -> (MOVHZreg x)
|
||||
(AND x (MOVDconst [0xFFFF])) -> (MOVHZreg x)
|
||||
(AND (MOVDconst [0xFFFFFFFF]) x) -> (MOVWZreg x)
|
||||
(AND x (MOVDconst [0xFFFFFFFF])) -> (MOVWZreg x)
|
||||
(ANDWconst [0xFF] x) -> (MOVBZreg x)
|
||||
(ANDWconst [0xFFFF] x) -> (MOVHZreg x)
|
||||
|
|
@ -617,7 +598,6 @@
|
|||
(ADDconst [c] (MOVDaddr [d] {s} x:(SB))) && ((c+d)&1 == 0) && is32Bit(c+d) -> (MOVDaddr [c+d] {s} x)
|
||||
(ADDconst [c] (MOVDaddr [d] {s} x)) && x.Op != OpSB && is20Bit(c+d) -> (MOVDaddr [c+d] {s} x)
|
||||
(ADD x (MOVDaddr [c] {s} y)) && x.Op != OpSB && y.Op != OpSB -> (MOVDaddridx [c] {s} x y)
|
||||
(ADD (MOVDaddr [c] {s} x) y) && x.Op != OpSB && y.Op != OpSB -> (MOVDaddridx [c] {s} x y)
|
||||
|
||||
// fold ADDconst into MOVDaddrx
|
||||
(ADDconst [c] (MOVDaddridx [d] {s} x y)) && is20Bit(c+d) -> (MOVDaddridx [c+d] {s} x y)
|
||||
|
|
@ -1027,8 +1007,6 @@
|
|||
(NEG (ADDconst [c] (NEG x))) && c != -(1<<31) -> (ADDconst [-c] x)
|
||||
|
||||
// fused multiply-add
|
||||
(FADD x (FMUL y z)) -> (FMADD x y z)
|
||||
(FADDS x (FMULS y z)) -> (FMADDS x y z)
|
||||
(FADD (FMUL y z) x) -> (FMADD x y z)
|
||||
(FADDS (FMULS y z) x) -> (FMADDS x y z)
|
||||
(FSUB (FMUL y z) x) -> (FMSUB x y z)
|
||||
|
|
@ -1311,369 +1289,549 @@
|
|||
&& clobber(x)
|
||||
-> (MOVDBRstoreidx [i-4] {s} p idx w0 mem)
|
||||
|
||||
// Move shifts to second argument of ORs. Helps load combining rules below.
|
||||
(ORW x:(SLWconst _) y) && y.Op != OpS390XSLWconst -> (ORW y x)
|
||||
(OR x:(SLDconst _) y) && y.Op != OpS390XSLDconst -> (OR y x)
|
||||
|
||||
// Combining byte loads into larger (unaligned) loads.
|
||||
|
||||
// Little endian loads.
|
||||
// Big-endian loads
|
||||
|
||||
// b[0] | b[1]<<8 -> load 16-bit, reverse bytes
|
||||
(ORW x0:(MOVBZload [i] {s} p mem)
|
||||
s0:(SLWconst [8] x1:(MOVBZload [i+1] {s} p mem)))
|
||||
(ORW x1:(MOVBZload [i1] {s} p mem)
|
||||
sh:(SLWconst [8] x0:(MOVBZload [i0] {s} p mem)))
|
||||
&& i1 == i0+1
|
||||
&& p.Op != OpSB
|
||||
&& x0.Uses == 1
|
||||
&& x1.Uses == 1
|
||||
&& sh.Uses == 1
|
||||
&& mergePoint(b,x0,x1) != nil
|
||||
&& clobber(x0)
|
||||
&& clobber(x1)
|
||||
&& clobber(sh)
|
||||
-> @mergePoint(b,x0,x1) (MOVHZload [i0] {s} p mem)
|
||||
|
||||
(OR x1:(MOVBZload [i1] {s} p mem)
|
||||
sh:(SLDconst [8] x0:(MOVBZload [i0] {s} p mem)))
|
||||
&& i1 == i0+1
|
||||
&& p.Op != OpSB
|
||||
&& x0.Uses == 1
|
||||
&& x1.Uses == 1
|
||||
&& sh.Uses == 1
|
||||
&& mergePoint(b,x0,x1) != nil
|
||||
&& clobber(x0)
|
||||
&& clobber(x1)
|
||||
&& clobber(sh)
|
||||
-> @mergePoint(b,x0,x1) (MOVHZload [i0] {s} p mem)
|
||||
|
||||
(ORW x1:(MOVHZload [i1] {s} p mem)
|
||||
sh:(SLWconst [16] x0:(MOVHZload [i0] {s} p mem)))
|
||||
&& i1 == i0+2
|
||||
&& p.Op != OpSB
|
||||
&& x0.Uses == 1
|
||||
&& x1.Uses == 1
|
||||
&& sh.Uses == 1
|
||||
&& mergePoint(b,x0,x1) != nil
|
||||
&& clobber(x0)
|
||||
&& clobber(x1)
|
||||
&& clobber(sh)
|
||||
-> @mergePoint(b,x0,x1) (MOVWZload [i0] {s} p mem)
|
||||
|
||||
(OR x1:(MOVHZload [i1] {s} p mem)
|
||||
sh:(SLDconst [16] x0:(MOVHZload [i0] {s} p mem)))
|
||||
&& i1 == i0+2
|
||||
&& p.Op != OpSB
|
||||
&& x0.Uses == 1
|
||||
&& x1.Uses == 1
|
||||
&& sh.Uses == 1
|
||||
&& mergePoint(b,x0,x1) != nil
|
||||
&& clobber(x0)
|
||||
&& clobber(x1)
|
||||
&& clobber(sh)
|
||||
-> @mergePoint(b,x0,x1) (MOVWZload [i0] {s} p mem)
|
||||
|
||||
(OR x1:(MOVWZload [i1] {s} p mem)
|
||||
sh:(SLDconst [32] x0:(MOVWZload [i0] {s} p mem)))
|
||||
&& i1 == i0+4
|
||||
&& p.Op != OpSB
|
||||
&& x0.Uses == 1
|
||||
&& x1.Uses == 1
|
||||
&& sh.Uses == 1
|
||||
&& mergePoint(b,x0,x1) != nil
|
||||
&& clobber(x0)
|
||||
&& clobber(x1)
|
||||
&& clobber(sh)
|
||||
-> @mergePoint(b,x0,x1) (MOVDload [i0] {s} p mem)
|
||||
|
||||
(ORW
|
||||
s0:(SLWconst [j0] x0:(MOVBZload [i0] {s} p mem))
|
||||
or:(ORW
|
||||
s1:(SLWconst [j1] x1:(MOVBZload [i1] {s} p mem))
|
||||
y))
|
||||
&& i1 == i0+1
|
||||
&& j1 == j0-8
|
||||
&& j1 % 16 == 0
|
||||
&& x0.Uses == 1
|
||||
&& x1.Uses == 1
|
||||
&& s0.Uses == 1
|
||||
&& s1.Uses == 1
|
||||
&& or.Uses == 1
|
||||
&& mergePoint(b,x0,x1) != nil
|
||||
&& clobber(x0)
|
||||
&& clobber(x1)
|
||||
&& clobber(s0)
|
||||
-> @mergePoint(b,x0,x1) (MOVHZreg (MOVHBRload [i] {s} p mem))
|
||||
&& clobber(s1)
|
||||
&& clobber(or)
|
||||
-> @mergePoint(b,x0,x1) (ORW <v.Type> (SLWconst <v.Type> [j1] (MOVHZload [i0] {s} p mem)) y)
|
||||
|
||||
// b[0] | b[1]<<8 | b[2]<<16 | b[3]<<24 -> load 32-bit, reverse bytes
|
||||
(ORW o0:(ORW z0:(MOVHZreg x0:(MOVHBRload [i] {s} p mem))
|
||||
s0:(SLWconst [16] x1:(MOVBZload [i+2] {s} p mem)))
|
||||
s1:(SLWconst [24] x2:(MOVBZload [i+3] {s} p mem)))
|
||||
&& p.Op != OpSB
|
||||
&& z0.Uses == 1
|
||||
(OR
|
||||
s0:(SLDconst [j0] x0:(MOVBZload [i0] {s} p mem))
|
||||
or:(OR
|
||||
s1:(SLDconst [j1] x1:(MOVBZload [i1] {s} p mem))
|
||||
y))
|
||||
&& i1 == i0+1
|
||||
&& j1 == j0-8
|
||||
&& j1 % 16 == 0
|
||||
&& x0.Uses == 1
|
||||
&& x1.Uses == 1
|
||||
&& x2.Uses == 1
|
||||
&& s0.Uses == 1
|
||||
&& s1.Uses == 1
|
||||
&& o0.Uses == 1
|
||||
&& mergePoint(b,x0,x1,x2) != nil
|
||||
&& clobber(z0)
|
||||
&& clobber(x0)
|
||||
&& clobber(x1)
|
||||
&& clobber(x2)
|
||||
&& clobber(s0)
|
||||
&& clobber(s1)
|
||||
&& clobber(o0)
|
||||
-> @mergePoint(b,x0,x1,x2) (MOVWBRload [i] {s} p mem)
|
||||
|
||||
// b[0] | b[1]<<8 | b[2]<<16 | b[3]<<24 | b[4]<<32 | b[5]<<40 | b[6]<<48 | b[7]<<56 -> load 64-bit, reverse bytes
|
||||
(OR o0:(OR o1:(OR o2:(OR o3:(OR o4:(OR o5:(OR
|
||||
x0:(MOVBZload [i] {s} p mem)
|
||||
s0:(SLDconst [8] x1:(MOVBZload [i+1] {s} p mem)))
|
||||
s1:(SLDconst [16] x2:(MOVBZload [i+2] {s} p mem)))
|
||||
s2:(SLDconst [24] x3:(MOVBZload [i+3] {s} p mem)))
|
||||
s3:(SLDconst [32] x4:(MOVBZload [i+4] {s} p mem)))
|
||||
s4:(SLDconst [40] x5:(MOVBZload [i+5] {s} p mem)))
|
||||
s5:(SLDconst [48] x6:(MOVBZload [i+6] {s} p mem)))
|
||||
s6:(SLDconst [56] x7:(MOVBZload [i+7] {s} p mem)))
|
||||
&& p.Op != OpSB
|
||||
&& x0.Uses == 1
|
||||
&& x1.Uses == 1
|
||||
&& x2.Uses == 1
|
||||
&& x3.Uses == 1
|
||||
&& x4.Uses == 1
|
||||
&& x5.Uses == 1
|
||||
&& x6.Uses == 1
|
||||
&& x7.Uses == 1
|
||||
&& s0.Uses == 1
|
||||
&& s1.Uses == 1
|
||||
&& s2.Uses == 1
|
||||
&& s3.Uses == 1
|
||||
&& s4.Uses == 1
|
||||
&& s5.Uses == 1
|
||||
&& s6.Uses == 1
|
||||
&& o0.Uses == 1
|
||||
&& o1.Uses == 1
|
||||
&& o2.Uses == 1
|
||||
&& o3.Uses == 1
|
||||
&& o4.Uses == 1
|
||||
&& o5.Uses == 1
|
||||
&& mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) != nil
|
||||
&& clobber(x0)
|
||||
&& clobber(x1)
|
||||
&& clobber(x2)
|
||||
&& clobber(x3)
|
||||
&& clobber(x4)
|
||||
&& clobber(x5)
|
||||
&& clobber(x6)
|
||||
&& clobber(x7)
|
||||
&& clobber(s0)
|
||||
&& clobber(s1)
|
||||
&& clobber(s2)
|
||||
&& clobber(s3)
|
||||
&& clobber(s4)
|
||||
&& clobber(s5)
|
||||
&& clobber(s6)
|
||||
&& clobber(o0)
|
||||
&& clobber(o1)
|
||||
&& clobber(o2)
|
||||
&& clobber(o3)
|
||||
&& clobber(o4)
|
||||
&& clobber(o5)
|
||||
-> @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (MOVDBRload [i] {s} p mem)
|
||||
|
||||
// b[0] | b[1]<<8 -> load 16-bit, reverse bytes
|
||||
(ORW x0:(MOVBZloadidx [i] {s} p idx mem)
|
||||
s0:(SLWconst [8] x1:(MOVBZloadidx [i+1] {s} p idx mem)))
|
||||
&& x0.Uses == 1
|
||||
&& x1.Uses == 1
|
||||
&& s0.Uses == 1
|
||||
&& or.Uses == 1
|
||||
&& mergePoint(b,x0,x1) != nil
|
||||
&& clobber(x0)
|
||||
&& clobber(x1)
|
||||
&& clobber(s0)
|
||||
-> @mergePoint(b,x0,x1) (MOVHZreg (MOVHBRloadidx <v.Type> [i] {s} p idx mem))
|
||||
&& clobber(s1)
|
||||
&& clobber(or)
|
||||
-> @mergePoint(b,x0,x1) (OR <v.Type> (SLDconst <v.Type> [j1] (MOVHZload [i0] {s} p mem)) y)
|
||||
|
||||
// b[0] | b[1]<<8 | b[2]<<16 | b[3]<<24 -> load 32-bit, reverse bytes
|
||||
(ORW o0:(ORW z0:(MOVHZreg x0:(MOVHBRloadidx [i] {s} p idx mem))
|
||||
s0:(SLWconst [16] x1:(MOVBZloadidx [i+2] {s} p idx mem)))
|
||||
s1:(SLWconst [24] x2:(MOVBZloadidx [i+3] {s} p idx mem)))
|
||||
&& z0.Uses == 1
|
||||
(OR
|
||||
s0:(SLDconst [j0] x0:(MOVHZload [i0] {s} p mem))
|
||||
or:(OR
|
||||
s1:(SLDconst [j1] x1:(MOVHZload [i1] {s} p mem))
|
||||
y))
|
||||
&& i1 == i0+2
|
||||
&& j1 == j0-16
|
||||
&& j1 % 32 == 0
|
||||
&& x0.Uses == 1
|
||||
&& x1.Uses == 1
|
||||
&& x2.Uses == 1
|
||||
&& s0.Uses == 1
|
||||
&& s1.Uses == 1
|
||||
&& o0.Uses == 1
|
||||
&& mergePoint(b,x0,x1,x2) != nil
|
||||
&& clobber(z0)
|
||||
&& clobber(x0)
|
||||
&& clobber(x1)
|
||||
&& clobber(x2)
|
||||
&& clobber(s0)
|
||||
&& clobber(s1)
|
||||
&& clobber(o0)
|
||||
-> @mergePoint(b,x0,x1,x2) (MOVWZreg (MOVWBRloadidx <v.Type> [i] {s} p idx mem))
|
||||
|
||||
// b[0] | b[1]<<8 | b[2]<<16 | b[3]<<24 | b[4]<<32 | b[5]<<40 | b[6]<<48 | b[7]<<56 -> load 64-bit, reverse bytes
|
||||
(OR o0:(OR o1:(OR o2:(OR o3:(OR o4:(OR o5:(OR
|
||||
x0:(MOVBZloadidx [i] {s} p idx mem)
|
||||
s0:(SLDconst [8] x1:(MOVBZloadidx [i+1] {s} p idx mem)))
|
||||
s1:(SLDconst [16] x2:(MOVBZloadidx [i+2] {s} p idx mem)))
|
||||
s2:(SLDconst [24] x3:(MOVBZloadidx [i+3] {s} p idx mem)))
|
||||
s3:(SLDconst [32] x4:(MOVBZloadidx [i+4] {s} p idx mem)))
|
||||
s4:(SLDconst [40] x5:(MOVBZloadidx [i+5] {s} p idx mem)))
|
||||
s5:(SLDconst [48] x6:(MOVBZloadidx [i+6] {s} p idx mem)))
|
||||
s6:(SLDconst [56] x7:(MOVBZloadidx [i+7] {s} p idx mem)))
|
||||
&& x0.Uses == 1
|
||||
&& x1.Uses == 1
|
||||
&& x2.Uses == 1
|
||||
&& x3.Uses == 1
|
||||
&& x4.Uses == 1
|
||||
&& x5.Uses == 1
|
||||
&& x6.Uses == 1
|
||||
&& x7.Uses == 1
|
||||
&& s0.Uses == 1
|
||||
&& s1.Uses == 1
|
||||
&& s2.Uses == 1
|
||||
&& s3.Uses == 1
|
||||
&& s4.Uses == 1
|
||||
&& s5.Uses == 1
|
||||
&& s6.Uses == 1
|
||||
&& o0.Uses == 1
|
||||
&& o1.Uses == 1
|
||||
&& o2.Uses == 1
|
||||
&& o3.Uses == 1
|
||||
&& o4.Uses == 1
|
||||
&& o5.Uses == 1
|
||||
&& mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) != nil
|
||||
&& clobber(x0)
|
||||
&& clobber(x1)
|
||||
&& clobber(x2)
|
||||
&& clobber(x3)
|
||||
&& clobber(x4)
|
||||
&& clobber(x5)
|
||||
&& clobber(x6)
|
||||
&& clobber(x7)
|
||||
&& clobber(s0)
|
||||
&& clobber(s1)
|
||||
&& clobber(s2)
|
||||
&& clobber(s3)
|
||||
&& clobber(s4)
|
||||
&& clobber(s5)
|
||||
&& clobber(s6)
|
||||
&& clobber(o0)
|
||||
&& clobber(o1)
|
||||
&& clobber(o2)
|
||||
&& clobber(o3)
|
||||
&& clobber(o4)
|
||||
&& clobber(o5)
|
||||
-> @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (MOVDBRloadidx <v.Type> [i] {s} p idx mem)
|
||||
|
||||
// Big endian loads.
|
||||
|
||||
// b[1] | b[0]<<8 -> load 16-bit
|
||||
(ORW x0:(MOVBZload [i] {s} p mem)
|
||||
s0:(SLWconst [8] x1:(MOVBZload [i-1] {s} p mem)))
|
||||
&& p.Op != OpSB
|
||||
&& x0.Uses == 1
|
||||
&& x1.Uses == 1
|
||||
&& s0.Uses == 1
|
||||
&& or.Uses == 1
|
||||
&& mergePoint(b,x0,x1) != nil
|
||||
&& clobber(x0)
|
||||
&& clobber(x1)
|
||||
&& clobber(s0)
|
||||
-> @mergePoint(b,x0,x1) (MOVHZload [i-1] {s} p mem)
|
||||
&& clobber(s1)
|
||||
&& clobber(or)
|
||||
-> @mergePoint(b,x0,x1) (OR <v.Type> (SLDconst <v.Type> [j1] (MOVWZload [i0] {s} p mem)) y)
|
||||
|
||||
// b[3] | b[2]<<8 | b[1]<<16 | b[0]<<24 -> load 32-bit
|
||||
(ORW o0:(ORW x0:(MOVHZload [i] {s} p mem)
|
||||
s0:(SLWconst [16] x1:(MOVBZload [i-1] {s} p mem)))
|
||||
s1:(SLWconst [24] x2:(MOVBZload [i-2] {s} p mem)))
|
||||
// Big-endian indexed loads
|
||||
|
||||
(ORW x1:(MOVBZloadidx [i1] {s} p idx mem)
|
||||
sh:(SLWconst [8] x0:(MOVBZloadidx [i0] {s} p idx mem)))
|
||||
&& i1 == i0+1
|
||||
&& p.Op != OpSB
|
||||
&& x0.Uses == 1
|
||||
&& x1.Uses == 1
|
||||
&& x2.Uses == 1
|
||||
&& s0.Uses == 1
|
||||
&& s1.Uses == 1
|
||||
&& o0.Uses == 1
|
||||
&& mergePoint(b,x0,x1,x2) != nil
|
||||
&& sh.Uses == 1
|
||||
&& mergePoint(b,x0,x1) != nil
|
||||
&& clobber(x0)
|
||||
&& clobber(x1)
|
||||
&& clobber(x2)
|
||||
&& clobber(s0)
|
||||
&& clobber(s1)
|
||||
&& clobber(o0)
|
||||
-> @mergePoint(b,x0,x1,x2) (MOVWZload [i-2] {s} p mem)
|
||||
&& clobber(sh)
|
||||
-> @mergePoint(b,x0,x1) (MOVHZloadidx [i0] {s} p idx mem)
|
||||
|
||||
// b[7] | b[6]<<8 | b[5]<<16 | b[4]<<24 | b[3]<<32 | b[2]<<40 | b[1]<<48 | b[0]<<56 -> load 64-bit
|
||||
(OR o0:(OR o1:(OR o2:(OR o3:(OR o4:(OR o5:(OR
|
||||
x0:(MOVBZload [i] {s} p mem)
|
||||
s0:(SLDconst [8] x1:(MOVBZload [i-1] {s} p mem)))
|
||||
s1:(SLDconst [16] x2:(MOVBZload [i-2] {s} p mem)))
|
||||
s2:(SLDconst [24] x3:(MOVBZload [i-3] {s} p mem)))
|
||||
s3:(SLDconst [32] x4:(MOVBZload [i-4] {s} p mem)))
|
||||
s4:(SLDconst [40] x5:(MOVBZload [i-5] {s} p mem)))
|
||||
s5:(SLDconst [48] x6:(MOVBZload [i-6] {s} p mem)))
|
||||
s6:(SLDconst [56] x7:(MOVBZload [i-7] {s} p mem)))
|
||||
(OR x1:(MOVBZloadidx [i1] {s} p idx mem)
|
||||
sh:(SLDconst [8] x0:(MOVBZloadidx [i0] {s} p idx mem)))
|
||||
&& i1 == i0+1
|
||||
&& p.Op != OpSB
|
||||
&& x0.Uses == 1
|
||||
&& x1.Uses == 1
|
||||
&& x2.Uses == 1
|
||||
&& x3.Uses == 1
|
||||
&& x4.Uses == 1
|
||||
&& x5.Uses == 1
|
||||
&& x6.Uses == 1
|
||||
&& x7.Uses == 1
|
||||
&& s0.Uses == 1
|
||||
&& s1.Uses == 1
|
||||
&& s2.Uses == 1
|
||||
&& s3.Uses == 1
|
||||
&& s4.Uses == 1
|
||||
&& s5.Uses == 1
|
||||
&& s6.Uses == 1
|
||||
&& o0.Uses == 1
|
||||
&& o1.Uses == 1
|
||||
&& o2.Uses == 1
|
||||
&& o3.Uses == 1
|
||||
&& o4.Uses == 1
|
||||
&& o5.Uses == 1
|
||||
&& mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) != nil
|
||||
&& sh.Uses == 1
|
||||
&& mergePoint(b,x0,x1) != nil
|
||||
&& clobber(x0)
|
||||
&& clobber(x1)
|
||||
&& clobber(x2)
|
||||
&& clobber(x3)
|
||||
&& clobber(x4)
|
||||
&& clobber(x5)
|
||||
&& clobber(x6)
|
||||
&& clobber(x7)
|
||||
&& clobber(s0)
|
||||
&& clobber(s1)
|
||||
&& clobber(s2)
|
||||
&& clobber(s3)
|
||||
&& clobber(s4)
|
||||
&& clobber(s5)
|
||||
&& clobber(s6)
|
||||
&& clobber(o0)
|
||||
&& clobber(o1)
|
||||
&& clobber(o2)
|
||||
&& clobber(o3)
|
||||
&& clobber(o4)
|
||||
&& clobber(o5)
|
||||
-> @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (MOVDload [i-7] {s} p mem)
|
||||
&& clobber(sh)
|
||||
-> @mergePoint(b,x0,x1) (MOVHZloadidx [i0] {s} p idx mem)
|
||||
|
||||
// b[1] | b[0]<<8 -> load 16-bit
|
||||
(ORW x0:(MOVBZloadidx [i] {s} p idx mem)
|
||||
s0:(SLWconst [8] x1:(MOVBZloadidx [i-1] {s} p idx mem)))
|
||||
(ORW x1:(MOVHZloadidx [i1] {s} p idx mem)
|
||||
sh:(SLWconst [16] x0:(MOVHZloadidx [i0] {s} p idx mem)))
|
||||
&& i1 == i0+2
|
||||
&& p.Op != OpSB
|
||||
&& x0.Uses == 1
|
||||
&& x1.Uses == 1
|
||||
&& sh.Uses == 1
|
||||
&& mergePoint(b,x0,x1) != nil
|
||||
&& clobber(x0)
|
||||
&& clobber(x1)
|
||||
&& clobber(sh)
|
||||
-> @mergePoint(b,x0,x1) (MOVWZloadidx [i0] {s} p idx mem)
|
||||
|
||||
(OR x1:(MOVHZloadidx [i1] {s} p idx mem)
|
||||
sh:(SLDconst [16] x0:(MOVHZloadidx [i0] {s} p idx mem)))
|
||||
&& i1 == i0+2
|
||||
&& p.Op != OpSB
|
||||
&& x0.Uses == 1
|
||||
&& x1.Uses == 1
|
||||
&& sh.Uses == 1
|
||||
&& mergePoint(b,x0,x1) != nil
|
||||
&& clobber(x0)
|
||||
&& clobber(x1)
|
||||
&& clobber(sh)
|
||||
-> @mergePoint(b,x0,x1) (MOVWZloadidx [i0] {s} p idx mem)
|
||||
|
||||
(OR x1:(MOVWZloadidx [i1] {s} p idx mem)
|
||||
sh:(SLDconst [32] x0:(MOVWZloadidx [i0] {s} p idx mem)))
|
||||
&& i1 == i0+4
|
||||
&& p.Op != OpSB
|
||||
&& x0.Uses == 1
|
||||
&& x1.Uses == 1
|
||||
&& sh.Uses == 1
|
||||
&& mergePoint(b,x0,x1) != nil
|
||||
&& clobber(x0)
|
||||
&& clobber(x1)
|
||||
&& clobber(sh)
|
||||
-> @mergePoint(b,x0,x1) (MOVDloadidx [i0] {s} p idx mem)
|
||||
|
||||
(ORW
|
||||
s0:(SLWconst [j0] x0:(MOVBZloadidx [i0] {s} p idx mem))
|
||||
or:(ORW
|
||||
s1:(SLWconst [j1] x1:(MOVBZloadidx [i1] {s} p idx mem))
|
||||
y))
|
||||
&& i1 == i0+1
|
||||
&& j1 == j0-8
|
||||
&& j1 % 16 == 0
|
||||
&& x0.Uses == 1
|
||||
&& x1.Uses == 1
|
||||
&& s0.Uses == 1
|
||||
&& s1.Uses == 1
|
||||
&& or.Uses == 1
|
||||
&& mergePoint(b,x0,x1) != nil
|
||||
&& clobber(x0)
|
||||
&& clobber(x1)
|
||||
&& clobber(s0)
|
||||
-> @mergePoint(b,x0,x1) (MOVHZloadidx <v.Type> [i-1] {s} p idx mem)
|
||||
&& clobber(s1)
|
||||
&& clobber(or)
|
||||
-> @mergePoint(b,x0,x1) (ORW <v.Type> (SLWconst <v.Type> [j1] (MOVHZloadidx [i0] {s} p idx mem)) y)
|
||||
|
||||
// b[3] | b[2]<<8 | b[1]<<16 | b[0]<<24 -> load 32-bit
|
||||
(ORW o0:(ORW x0:(MOVHZloadidx [i] {s} p idx mem)
|
||||
s0:(SLWconst [16] x1:(MOVBZloadidx [i-1] {s} p idx mem)))
|
||||
s1:(SLWconst [24] x2:(MOVBZloadidx [i-2] {s} p idx mem)))
|
||||
(OR
|
||||
s0:(SLDconst [j0] x0:(MOVBZloadidx [i0] {s} p idx mem))
|
||||
or:(OR
|
||||
s1:(SLDconst [j1] x1:(MOVBZloadidx [i1] {s} p idx mem))
|
||||
y))
|
||||
&& i1 == i0+1
|
||||
&& j1 == j0-8
|
||||
&& j1 % 16 == 0
|
||||
&& x0.Uses == 1
|
||||
&& x1.Uses == 1
|
||||
&& x2.Uses == 1
|
||||
&& s0.Uses == 1
|
||||
&& s1.Uses == 1
|
||||
&& o0.Uses == 1
|
||||
&& mergePoint(b,x0,x1,x2) != nil
|
||||
&& or.Uses == 1
|
||||
&& mergePoint(b,x0,x1) != nil
|
||||
&& clobber(x0)
|
||||
&& clobber(x1)
|
||||
&& clobber(x2)
|
||||
&& clobber(s0)
|
||||
&& clobber(s1)
|
||||
&& clobber(o0)
|
||||
-> @mergePoint(b,x0,x1,x2) (MOVWZloadidx <v.Type> [i-2] {s} p idx mem)
|
||||
&& clobber(or)
|
||||
-> @mergePoint(b,x0,x1) (OR <v.Type> (SLDconst <v.Type> [j1] (MOVHZloadidx [i0] {s} p idx mem)) y)
|
||||
|
||||
// b[7] | b[6]<<8 | b[5]<<16 | b[4]<<24 | b[3]<<32 | b[2]<<40 | b[1]<<48 | b[0]<<56 -> load 64-bit
|
||||
(OR o0:(OR o1:(OR o2:(OR o3:(OR o4:(OR o5:(OR
|
||||
x0:(MOVBZloadidx [i] {s} p idx mem)
|
||||
s0:(SLDconst [8] x1:(MOVBZloadidx [i-1] {s} p idx mem)))
|
||||
s1:(SLDconst [16] x2:(MOVBZloadidx [i-2] {s} p idx mem)))
|
||||
s2:(SLDconst [24] x3:(MOVBZloadidx [i-3] {s} p idx mem)))
|
||||
s3:(SLDconst [32] x4:(MOVBZloadidx [i-4] {s} p idx mem)))
|
||||
s4:(SLDconst [40] x5:(MOVBZloadidx [i-5] {s} p idx mem)))
|
||||
s5:(SLDconst [48] x6:(MOVBZloadidx [i-6] {s} p idx mem)))
|
||||
s6:(SLDconst [56] x7:(MOVBZloadidx [i-7] {s} p idx mem)))
|
||||
(OR
|
||||
s0:(SLDconst [j0] x0:(MOVHZloadidx [i0] {s} p idx mem))
|
||||
or:(OR
|
||||
s1:(SLDconst [j1] x1:(MOVHZloadidx [i1] {s} p idx mem))
|
||||
y))
|
||||
&& i1 == i0+2
|
||||
&& j1 == j0-16
|
||||
&& j1 % 32 == 0
|
||||
&& x0.Uses == 1
|
||||
&& x1.Uses == 1
|
||||
&& x2.Uses == 1
|
||||
&& x3.Uses == 1
|
||||
&& x4.Uses == 1
|
||||
&& x5.Uses == 1
|
||||
&& x6.Uses == 1
|
||||
&& x7.Uses == 1
|
||||
&& s0.Uses == 1
|
||||
&& s1.Uses == 1
|
||||
&& s2.Uses == 1
|
||||
&& s3.Uses == 1
|
||||
&& s4.Uses == 1
|
||||
&& s5.Uses == 1
|
||||
&& s6.Uses == 1
|
||||
&& o0.Uses == 1
|
||||
&& o1.Uses == 1
|
||||
&& o2.Uses == 1
|
||||
&& o3.Uses == 1
|
||||
&& o4.Uses == 1
|
||||
&& o5.Uses == 1
|
||||
&& mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) != nil
|
||||
&& or.Uses == 1
|
||||
&& mergePoint(b,x0,x1) != nil
|
||||
&& clobber(x0)
|
||||
&& clobber(x1)
|
||||
&& clobber(x2)
|
||||
&& clobber(x3)
|
||||
&& clobber(x4)
|
||||
&& clobber(x5)
|
||||
&& clobber(x6)
|
||||
&& clobber(x7)
|
||||
&& clobber(s0)
|
||||
&& clobber(s1)
|
||||
&& clobber(s2)
|
||||
&& clobber(s3)
|
||||
&& clobber(s4)
|
||||
&& clobber(s5)
|
||||
&& clobber(s6)
|
||||
&& clobber(o0)
|
||||
&& clobber(o1)
|
||||
&& clobber(o2)
|
||||
&& clobber(o3)
|
||||
&& clobber(o4)
|
||||
&& clobber(o5)
|
||||
-> @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (MOVDloadidx <v.Type> [i-7] {s} p idx mem)
|
||||
&& clobber(or)
|
||||
-> @mergePoint(b,x0,x1) (OR <v.Type> (SLDconst <v.Type> [j1] (MOVWZloadidx [i0] {s} p idx mem)) y)
|
||||
|
||||
// Little-endian loads
|
||||
|
||||
(ORW x0:(MOVBZload [i0] {s} p mem)
|
||||
sh:(SLWconst [8] x1:(MOVBZload [i1] {s} p mem)))
|
||||
&& i1 == i0+1
|
||||
&& x0.Uses == 1
|
||||
&& x1.Uses == 1
|
||||
&& sh.Uses == 1
|
||||
&& mergePoint(b,x0,x1) != nil
|
||||
&& clobber(x0)
|
||||
&& clobber(x1)
|
||||
&& clobber(sh)
|
||||
-> @mergePoint(b,x0,x1) (MOVHZreg (MOVHBRload [i0] {s} p mem))
|
||||
|
||||
(OR x0:(MOVBZload [i0] {s} p mem)
|
||||
sh:(SLDconst [8] x1:(MOVBZload [i1] {s} p mem)))
|
||||
&& i1 == i0+1
|
||||
&& x0.Uses == 1
|
||||
&& x1.Uses == 1
|
||||
&& sh.Uses == 1
|
||||
&& mergePoint(b,x0,x1) != nil
|
||||
&& clobber(x0)
|
||||
&& clobber(x1)
|
||||
&& clobber(sh)
|
||||
-> @mergePoint(b,x0,x1) (MOVHZreg (MOVHBRload [i0] {s} p mem))
|
||||
|
||||
(ORW r0:(MOVHZreg x0:(MOVHBRload [i0] {s} p mem))
|
||||
sh:(SLWconst [16] r1:(MOVHZreg x1:(MOVHBRload [i1] {s} p mem))))
|
||||
&& i1 == i0+2
|
||||
&& x0.Uses == 1
|
||||
&& x1.Uses == 1
|
||||
&& r0.Uses == 1
|
||||
&& r1.Uses == 1
|
||||
&& sh.Uses == 1
|
||||
&& mergePoint(b,x0,x1) != nil
|
||||
&& clobber(x0)
|
||||
&& clobber(x1)
|
||||
&& clobber(r0)
|
||||
&& clobber(r1)
|
||||
&& clobber(sh)
|
||||
-> @mergePoint(b,x0,x1) (MOVWBRload [i0] {s} p mem)
|
||||
|
||||
(OR r0:(MOVHZreg x0:(MOVHBRload [i0] {s} p mem))
|
||||
sh:(SLDconst [16] r1:(MOVHZreg x1:(MOVHBRload [i1] {s} p mem))))
|
||||
&& i1 == i0+2
|
||||
&& x0.Uses == 1
|
||||
&& x1.Uses == 1
|
||||
&& r0.Uses == 1
|
||||
&& r1.Uses == 1
|
||||
&& sh.Uses == 1
|
||||
&& mergePoint(b,x0,x1) != nil
|
||||
&& clobber(x0)
|
||||
&& clobber(x1)
|
||||
&& clobber(r0)
|
||||
&& clobber(r1)
|
||||
&& clobber(sh)
|
||||
-> @mergePoint(b,x0,x1) (MOVWZreg (MOVWBRload [i0] {s} p mem))
|
||||
|
||||
(OR r0:(MOVWZreg x0:(MOVWBRload [i0] {s} p mem))
|
||||
sh:(SLDconst [32] r1:(MOVWZreg x1:(MOVWBRload [i1] {s} p mem))))
|
||||
&& i1 == i0+4
|
||||
&& x0.Uses == 1
|
||||
&& x1.Uses == 1
|
||||
&& r0.Uses == 1
|
||||
&& r1.Uses == 1
|
||||
&& sh.Uses == 1
|
||||
&& mergePoint(b,x0,x1) != nil
|
||||
&& clobber(x0)
|
||||
&& clobber(x1)
|
||||
&& clobber(r0)
|
||||
&& clobber(r1)
|
||||
&& clobber(sh)
|
||||
-> @mergePoint(b,x0,x1) (MOVDBRload [i0] {s} p mem)
|
||||
|
||||
(ORW
|
||||
s1:(SLWconst [j1] x1:(MOVBZload [i1] {s} p mem))
|
||||
or:(ORW
|
||||
s0:(SLWconst [j0] x0:(MOVBZload [i0] {s} p mem))
|
||||
y))
|
||||
&& i1 == i0+1
|
||||
&& j1 == j0+8
|
||||
&& j0 % 16 == 0
|
||||
&& x0.Uses == 1
|
||||
&& x1.Uses == 1
|
||||
&& s0.Uses == 1
|
||||
&& s1.Uses == 1
|
||||
&& or.Uses == 1
|
||||
&& mergePoint(b,x0,x1) != nil
|
||||
&& clobber(x0)
|
||||
&& clobber(x1)
|
||||
&& clobber(s0)
|
||||
&& clobber(s1)
|
||||
&& clobber(or)
|
||||
-> @mergePoint(b,x0,x1) (ORW <v.Type> (SLWconst <v.Type> [j0] (MOVHZreg (MOVHBRload [i0] {s} p mem))) y)
|
||||
|
||||
(OR
|
||||
s1:(SLDconst [j1] x1:(MOVBZload [i1] {s} p mem))
|
||||
or:(OR
|
||||
s0:(SLDconst [j0] x0:(MOVBZload [i0] {s} p mem))
|
||||
y))
|
||||
&& i1 == i0+1
|
||||
&& j1 == j0+8
|
||||
&& j0 % 16 == 0
|
||||
&& x0.Uses == 1
|
||||
&& x1.Uses == 1
|
||||
&& s0.Uses == 1
|
||||
&& s1.Uses == 1
|
||||
&& or.Uses == 1
|
||||
&& mergePoint(b,x0,x1) != nil
|
||||
&& clobber(x0)
|
||||
&& clobber(x1)
|
||||
&& clobber(s0)
|
||||
&& clobber(s1)
|
||||
&& clobber(or)
|
||||
-> @mergePoint(b,x0,x1) (OR <v.Type> (SLDconst <v.Type> [j0] (MOVHZreg (MOVHBRload [i0] {s} p mem))) y)
|
||||
|
||||
(OR
|
||||
s1:(SLDconst [j1] r1:(MOVHZreg x1:(MOVHBRload [i1] {s} p mem)))
|
||||
or:(OR
|
||||
s0:(SLDconst [j0] r0:(MOVHZreg x0:(MOVHBRload [i0] {s} p mem)))
|
||||
y))
|
||||
&& i1 == i0+2
|
||||
&& j1 == j0+16
|
||||
&& j0 % 32 == 0
|
||||
&& x0.Uses == 1
|
||||
&& x1.Uses == 1
|
||||
&& r0.Uses == 1
|
||||
&& r1.Uses == 1
|
||||
&& s0.Uses == 1
|
||||
&& s1.Uses == 1
|
||||
&& or.Uses == 1
|
||||
&& mergePoint(b,x0,x1) != nil
|
||||
&& clobber(x0)
|
||||
&& clobber(x1)
|
||||
&& clobber(r0)
|
||||
&& clobber(r1)
|
||||
&& clobber(s0)
|
||||
&& clobber(s1)
|
||||
&& clobber(or)
|
||||
-> @mergePoint(b,x0,x1) (OR <v.Type> (SLDconst <v.Type> [j0] (MOVWZreg (MOVWBRload [i0] {s} p mem))) y)
|
||||
|
||||
// Little-endian indexed loads
|
||||
|
||||
(ORW x0:(MOVBZloadidx [i0] {s} p idx mem)
|
||||
sh:(SLWconst [8] x1:(MOVBZloadidx [i1] {s} p idx mem)))
|
||||
&& i1 == i0+1
|
||||
&& x0.Uses == 1
|
||||
&& x1.Uses == 1
|
||||
&& sh.Uses == 1
|
||||
&& mergePoint(b,x0,x1) != nil
|
||||
&& clobber(x0)
|
||||
&& clobber(x1)
|
||||
&& clobber(sh)
|
||||
-> @mergePoint(b,x0,x1) (MOVHZreg (MOVHBRloadidx [i0] {s} p idx mem))
|
||||
|
||||
(OR x0:(MOVBZloadidx [i0] {s} p idx mem)
|
||||
sh:(SLDconst [8] x1:(MOVBZloadidx [i1] {s} p idx mem)))
|
||||
&& i1 == i0+1
|
||||
&& x0.Uses == 1
|
||||
&& x1.Uses == 1
|
||||
&& sh.Uses == 1
|
||||
&& mergePoint(b,x0,x1) != nil
|
||||
&& clobber(x0)
|
||||
&& clobber(x1)
|
||||
&& clobber(sh)
|
||||
-> @mergePoint(b,x0,x1) (MOVHZreg (MOVHBRloadidx [i0] {s} p idx mem))
|
||||
|
||||
(ORW r0:(MOVHZreg x0:(MOVHBRloadidx [i0] {s} p idx mem))
|
||||
sh:(SLWconst [16] r1:(MOVHZreg x1:(MOVHBRloadidx [i1] {s} p idx mem))))
|
||||
&& i1 == i0+2
|
||||
&& x0.Uses == 1
|
||||
&& x1.Uses == 1
|
||||
&& r0.Uses == 1
|
||||
&& r1.Uses == 1
|
||||
&& sh.Uses == 1
|
||||
&& mergePoint(b,x0,x1) != nil
|
||||
&& clobber(x0)
|
||||
&& clobber(x1)
|
||||
&& clobber(r0)
|
||||
&& clobber(r1)
|
||||
&& clobber(sh)
|
||||
-> @mergePoint(b,x0,x1) (MOVWBRloadidx [i0] {s} p idx mem)
|
||||
|
||||
(OR r0:(MOVHZreg x0:(MOVHBRloadidx [i0] {s} p idx mem))
|
||||
sh:(SLDconst [16] r1:(MOVHZreg x1:(MOVHBRloadidx [i1] {s} p idx mem))))
|
||||
&& i1 == i0+2
|
||||
&& x0.Uses == 1
|
||||
&& x1.Uses == 1
|
||||
&& r0.Uses == 1
|
||||
&& r1.Uses == 1
|
||||
&& sh.Uses == 1
|
||||
&& mergePoint(b,x0,x1) != nil
|
||||
&& clobber(x0)
|
||||
&& clobber(x1)
|
||||
&& clobber(r0)
|
||||
&& clobber(r1)
|
||||
&& clobber(sh)
|
||||
-> @mergePoint(b,x0,x1) (MOVWZreg (MOVWBRloadidx [i0] {s} p idx mem))
|
||||
|
||||
(OR r0:(MOVWZreg x0:(MOVWBRloadidx [i0] {s} p idx mem))
|
||||
sh:(SLDconst [32] r1:(MOVWZreg x1:(MOVWBRloadidx [i1] {s} p idx mem))))
|
||||
&& i1 == i0+4
|
||||
&& x0.Uses == 1
|
||||
&& x1.Uses == 1
|
||||
&& r0.Uses == 1
|
||||
&& r1.Uses == 1
|
||||
&& sh.Uses == 1
|
||||
&& mergePoint(b,x0,x1) != nil
|
||||
&& clobber(x0)
|
||||
&& clobber(x1)
|
||||
&& clobber(r0)
|
||||
&& clobber(r1)
|
||||
&& clobber(sh)
|
||||
-> @mergePoint(b,x0,x1) (MOVDBRloadidx [i0] {s} p idx mem)
|
||||
|
||||
(ORW
|
||||
s1:(SLWconst [j1] x1:(MOVBZloadidx [i1] {s} p idx mem))
|
||||
or:(ORW
|
||||
s0:(SLWconst [j0] x0:(MOVBZloadidx [i0] {s} p idx mem))
|
||||
y))
|
||||
&& i1 == i0+1
|
||||
&& j1 == j0+8
|
||||
&& j0 % 16 == 0
|
||||
&& x0.Uses == 1
|
||||
&& x1.Uses == 1
|
||||
&& s0.Uses == 1
|
||||
&& s1.Uses == 1
|
||||
&& or.Uses == 1
|
||||
&& mergePoint(b,x0,x1) != nil
|
||||
&& clobber(x0)
|
||||
&& clobber(x1)
|
||||
&& clobber(s0)
|
||||
&& clobber(s1)
|
||||
&& clobber(or)
|
||||
-> @mergePoint(b,x0,x1) (ORW <v.Type> (SLWconst <v.Type> [j0] (MOVHZreg (MOVHBRloadidx [i0] {s} p idx mem))) y)
|
||||
|
||||
(OR
|
||||
s1:(SLDconst [j1] x1:(MOVBZloadidx [i1] {s} p idx mem))
|
||||
or:(OR
|
||||
s0:(SLDconst [j0] x0:(MOVBZloadidx [i0] {s} p idx mem))
|
||||
y))
|
||||
&& i1 == i0+1
|
||||
&& j1 == j0+8
|
||||
&& j0 % 16 == 0
|
||||
&& x0.Uses == 1
|
||||
&& x1.Uses == 1
|
||||
&& s0.Uses == 1
|
||||
&& s1.Uses == 1
|
||||
&& or.Uses == 1
|
||||
&& mergePoint(b,x0,x1) != nil
|
||||
&& clobber(x0)
|
||||
&& clobber(x1)
|
||||
&& clobber(s0)
|
||||
&& clobber(s1)
|
||||
&& clobber(or)
|
||||
-> @mergePoint(b,x0,x1) (OR <v.Type> (SLDconst <v.Type> [j0] (MOVHZreg (MOVHBRloadidx [i0] {s} p idx mem))) y)
|
||||
|
||||
(OR
|
||||
s1:(SLDconst [j1] r1:(MOVHZreg x1:(MOVHBRloadidx [i1] {s} p idx mem)))
|
||||
or:(OR
|
||||
s0:(SLDconst [j0] r0:(MOVHZreg x0:(MOVHBRloadidx [i0] {s} p idx mem)))
|
||||
y))
|
||||
&& i1 == i0+2
|
||||
&& j1 == j0+16
|
||||
&& j0 % 32 == 0
|
||||
&& x0.Uses == 1
|
||||
&& x1.Uses == 1
|
||||
&& r0.Uses == 1
|
||||
&& r1.Uses == 1
|
||||
&& s0.Uses == 1
|
||||
&& s1.Uses == 1
|
||||
&& or.Uses == 1
|
||||
&& mergePoint(b,x0,x1) != nil
|
||||
&& clobber(x0)
|
||||
&& clobber(x1)
|
||||
&& clobber(r0)
|
||||
&& clobber(r1)
|
||||
&& clobber(s0)
|
||||
&& clobber(s1)
|
||||
&& clobber(or)
|
||||
-> @mergePoint(b,x0,x1) (OR <v.Type> (SLDconst <v.Type> [j0] (MOVWZreg (MOVWBRloadidx [i0] {s} p idx mem))) y)
|
||||
|
||||
// Combine stores into store multiples.
|
||||
// 32-bit
|
||||
|
|
|
|||
|
|
@ -216,8 +216,8 @@ func init() {
|
|||
{name: "MULLDload", argLength: 3, reg: gpopload, asm: "MULLD", aux: "SymOff", resultInArg0: true, clobberFlags: true, faultOnNilArg1: true, symEffect: "Read"}, // arg0 * *arg1. arg2=mem
|
||||
{name: "MULLWload", argLength: 3, reg: gpopload, asm: "MULLW", aux: "SymOff", resultInArg0: true, clobberFlags: true, faultOnNilArg1: true, symEffect: "Read"}, // arg0 * *arg1. arg2=mem
|
||||
|
||||
{name: "MULHD", argLength: 2, reg: gp21, asm: "MULHD", typ: "Int64", resultInArg0: true, clobberFlags: true}, // (arg0 * arg1) >> width
|
||||
{name: "MULHDU", argLength: 2, reg: gp21, asm: "MULHDU", typ: "Int64", resultInArg0: true, clobberFlags: true}, // (arg0 * arg1) >> width
|
||||
{name: "MULHD", argLength: 2, reg: gp21, asm: "MULHD", typ: "Int64", commutative: true, resultInArg0: true, clobberFlags: true}, // (arg0 * arg1) >> width
|
||||
{name: "MULHDU", argLength: 2, reg: gp21, asm: "MULHDU", typ: "Int64", commutative: true, resultInArg0: true, clobberFlags: true}, // (arg0 * arg1) >> width
|
||||
|
||||
{name: "DIVD", argLength: 2, reg: gp21, asm: "DIVD", resultInArg0: true, clobberFlags: true}, // arg0 / arg1
|
||||
{name: "DIVW", argLength: 2, reg: gp21, asm: "DIVW", resultInArg0: true, clobberFlags: true}, // arg0 / arg1
|
||||
|
|
@ -265,24 +265,24 @@ func init() {
|
|||
{name: "FCMPS", argLength: 2, reg: fp2flags, asm: "CEBR", typ: "Flags"}, // arg0 compare to arg1, f32
|
||||
{name: "FCMP", argLength: 2, reg: fp2flags, asm: "FCMPU", typ: "Flags"}, // arg0 compare to arg1, f64
|
||||
|
||||
{name: "SLD", argLength: 2, reg: sh21, asm: "SLD"}, // arg0 << arg1, shift amount is mod 64
|
||||
{name: "SLW", argLength: 2, reg: sh21, asm: "SLW"}, // arg0 << arg1, shift amount is mod 32
|
||||
{name: "SLDconst", argLength: 1, reg: gp11, asm: "SLD", aux: "Int64"}, // arg0 << auxint, shift amount 0-63
|
||||
{name: "SLWconst", argLength: 1, reg: gp11, asm: "SLW", aux: "Int32"}, // arg0 << auxint, shift amount 0-31
|
||||
{name: "SLD", argLength: 2, reg: sh21, asm: "SLD"}, // arg0 << arg1, shift amount is mod 64
|
||||
{name: "SLW", argLength: 2, reg: sh21, asm: "SLW"}, // arg0 << arg1, shift amount is mod 32
|
||||
{name: "SLDconst", argLength: 1, reg: gp11, asm: "SLD", aux: "Int8"}, // arg0 << auxint, shift amount 0-63
|
||||
{name: "SLWconst", argLength: 1, reg: gp11, asm: "SLW", aux: "Int8"}, // arg0 << auxint, shift amount 0-31
|
||||
|
||||
{name: "SRD", argLength: 2, reg: sh21, asm: "SRD"}, // unsigned arg0 >> arg1, shift amount is mod 64
|
||||
{name: "SRW", argLength: 2, reg: sh21, asm: "SRW"}, // unsigned arg0 >> arg1, shift amount is mod 32
|
||||
{name: "SRDconst", argLength: 1, reg: gp11, asm: "SRD", aux: "Int64"}, // unsigned arg0 >> auxint, shift amount 0-63
|
||||
{name: "SRWconst", argLength: 1, reg: gp11, asm: "SRW", aux: "Int32"}, // unsigned arg0 >> auxint, shift amount 0-31
|
||||
{name: "SRD", argLength: 2, reg: sh21, asm: "SRD"}, // unsigned arg0 >> arg1, shift amount is mod 64
|
||||
{name: "SRW", argLength: 2, reg: sh21, asm: "SRW"}, // unsigned arg0 >> arg1, shift amount is mod 32
|
||||
{name: "SRDconst", argLength: 1, reg: gp11, asm: "SRD", aux: "Int8"}, // unsigned arg0 >> auxint, shift amount 0-63
|
||||
{name: "SRWconst", argLength: 1, reg: gp11, asm: "SRW", aux: "Int8"}, // unsigned arg0 >> auxint, shift amount 0-31
|
||||
|
||||
// Arithmetic shifts clobber flags.
|
||||
{name: "SRAD", argLength: 2, reg: sh21, asm: "SRAD", clobberFlags: true}, // signed arg0 >> arg1, shift amount is mod 64
|
||||
{name: "SRAW", argLength: 2, reg: sh21, asm: "SRAW", clobberFlags: true}, // signed arg0 >> arg1, shift amount is mod 32
|
||||
{name: "SRADconst", argLength: 1, reg: gp11, asm: "SRAD", aux: "Int64", clobberFlags: true}, // signed arg0 >> auxint, shift amount 0-63
|
||||
{name: "SRAWconst", argLength: 1, reg: gp11, asm: "SRAW", aux: "Int32", clobberFlags: true}, // signed arg0 >> auxint, shift amount 0-31
|
||||
{name: "SRAD", argLength: 2, reg: sh21, asm: "SRAD", clobberFlags: true}, // signed arg0 >> arg1, shift amount is mod 64
|
||||
{name: "SRAW", argLength: 2, reg: sh21, asm: "SRAW", clobberFlags: true}, // signed arg0 >> arg1, shift amount is mod 32
|
||||
{name: "SRADconst", argLength: 1, reg: gp11, asm: "SRAD", aux: "Int8", clobberFlags: true}, // signed arg0 >> auxint, shift amount 0-63
|
||||
{name: "SRAWconst", argLength: 1, reg: gp11, asm: "SRAW", aux: "Int8", clobberFlags: true}, // signed arg0 >> auxint, shift amount 0-31
|
||||
|
||||
{name: "RLLGconst", argLength: 1, reg: gp11, asm: "RLLG", aux: "Int64"}, // arg0 rotate left auxint, rotate amount 0-63
|
||||
{name: "RLLconst", argLength: 1, reg: gp11, asm: "RLL", aux: "Int32"}, // arg0 rotate left auxint, rotate amount 0-31
|
||||
{name: "RLLGconst", argLength: 1, reg: gp11, asm: "RLLG", aux: "Int8"}, // arg0 rotate left auxint, rotate amount 0-63
|
||||
{name: "RLLconst", argLength: 1, reg: gp11, asm: "RLL", aux: "Int8"}, // arg0 rotate left auxint, rotate amount 0-31
|
||||
|
||||
// unary ops
|
||||
{name: "NEG", argLength: 1, reg: gp11, asm: "NEG", clobberFlags: true}, // -arg0
|
||||
|
|
@ -364,20 +364,20 @@ func init() {
|
|||
|
||||
// indexed loads/stores
|
||||
// TODO(mundaym): add sign-extended indexed loads
|
||||
{name: "MOVBZloadidx", argLength: 3, reg: gploadidx, asm: "MOVBZ", aux: "SymOff", clobberFlags: true, symEffect: "Read"}, // load a byte from arg0+arg1+auxint+aux. arg2=mem
|
||||
{name: "MOVHZloadidx", argLength: 3, reg: gploadidx, asm: "MOVHZ", aux: "SymOff", clobberFlags: true, symEffect: "Read"}, // load 2 bytes from arg0+arg1+auxint+aux. arg2=mem
|
||||
{name: "MOVWZloadidx", argLength: 3, reg: gploadidx, asm: "MOVWZ", aux: "SymOff", clobberFlags: true, symEffect: "Read"}, // load 4 bytes from arg0+arg1+auxint+aux. arg2=mem
|
||||
{name: "MOVDloadidx", argLength: 3, reg: gploadidx, asm: "MOVD", aux: "SymOff", clobberFlags: true, symEffect: "Read"}, // load 8 bytes from arg0+arg1+auxint+aux. arg2=mem
|
||||
{name: "MOVHBRloadidx", argLength: 3, reg: gploadidx, asm: "MOVHBR", aux: "SymOff", clobberFlags: true, symEffect: "Read"}, // load 2 bytes from arg0+arg1+auxint+aux. arg2=mem. Reverse bytes.
|
||||
{name: "MOVWBRloadidx", argLength: 3, reg: gploadidx, asm: "MOVWBR", aux: "SymOff", clobberFlags: true, symEffect: "Read"}, // load 4 bytes from arg0+arg1+auxint+aux. arg2=mem. Reverse bytes.
|
||||
{name: "MOVDBRloadidx", argLength: 3, reg: gploadidx, asm: "MOVDBR", aux: "SymOff", clobberFlags: true, symEffect: "Read"}, // load 8 bytes from arg0+arg1+auxint+aux. arg2=mem. Reverse bytes.
|
||||
{name: "MOVBstoreidx", argLength: 4, reg: gpstoreidx, asm: "MOVB", aux: "SymOff", clobberFlags: true, symEffect: "Write"}, // store byte in arg2 to arg0+arg1+auxint+aux. arg3=mem
|
||||
{name: "MOVHstoreidx", argLength: 4, reg: gpstoreidx, asm: "MOVH", aux: "SymOff", clobberFlags: true, symEffect: "Write"}, // store 2 bytes in arg2 to arg0+arg1+auxint+aux. arg3=mem
|
||||
{name: "MOVWstoreidx", argLength: 4, reg: gpstoreidx, asm: "MOVW", aux: "SymOff", clobberFlags: true, symEffect: "Write"}, // store 4 bytes in arg2 to arg0+arg1+auxint+aux. arg3=mem
|
||||
{name: "MOVDstoreidx", argLength: 4, reg: gpstoreidx, asm: "MOVD", aux: "SymOff", clobberFlags: true, symEffect: "Write"}, // store 8 bytes in arg2 to arg0+arg1+auxint+aux. arg3=mem
|
||||
{name: "MOVHBRstoreidx", argLength: 4, reg: gpstoreidx, asm: "MOVHBR", aux: "SymOff", clobberFlags: true, symEffect: "Write"}, // store 2 bytes in arg2 to arg0+arg1+auxint+aux. arg3=mem. Reverse bytes.
|
||||
{name: "MOVWBRstoreidx", argLength: 4, reg: gpstoreidx, asm: "MOVWBR", aux: "SymOff", clobberFlags: true, symEffect: "Write"}, // store 4 bytes in arg2 to arg0+arg1+auxint+aux. arg3=mem. Reverse bytes.
|
||||
{name: "MOVDBRstoreidx", argLength: 4, reg: gpstoreidx, asm: "MOVDBR", aux: "SymOff", clobberFlags: true, symEffect: "Write"}, // store 8 bytes in arg2 to arg0+arg1+auxint+aux. arg3=mem. Reverse bytes.
|
||||
{name: "MOVBZloadidx", argLength: 3, reg: gploadidx, commutative: true, asm: "MOVBZ", aux: "SymOff", typ: "UInt8", clobberFlags: true, symEffect: "Read"}, // load a byte from arg0+arg1+auxint+aux. arg2=mem
|
||||
{name: "MOVHZloadidx", argLength: 3, reg: gploadidx, commutative: true, asm: "MOVHZ", aux: "SymOff", typ: "UInt16", clobberFlags: true, symEffect: "Read"}, // load 2 bytes from arg0+arg1+auxint+aux. arg2=mem
|
||||
{name: "MOVWZloadidx", argLength: 3, reg: gploadidx, commutative: true, asm: "MOVWZ", aux: "SymOff", typ: "UInt32", clobberFlags: true, symEffect: "Read"}, // load 4 bytes from arg0+arg1+auxint+aux. arg2=mem
|
||||
{name: "MOVDloadidx", argLength: 3, reg: gploadidx, commutative: true, asm: "MOVD", aux: "SymOff", typ: "UInt64", clobberFlags: true, symEffect: "Read"}, // load 8 bytes from arg0+arg1+auxint+aux. arg2=mem
|
||||
{name: "MOVHBRloadidx", argLength: 3, reg: gploadidx, commutative: true, asm: "MOVHBR", aux: "SymOff", typ: "Int16", clobberFlags: true, symEffect: "Read"}, // load 2 bytes from arg0+arg1+auxint+aux. arg2=mem. Reverse bytes.
|
||||
{name: "MOVWBRloadidx", argLength: 3, reg: gploadidx, commutative: true, asm: "MOVWBR", aux: "SymOff", typ: "Int32", clobberFlags: true, symEffect: "Read"}, // load 4 bytes from arg0+arg1+auxint+aux. arg2=mem. Reverse bytes.
|
||||
{name: "MOVDBRloadidx", argLength: 3, reg: gploadidx, commutative: true, asm: "MOVDBR", aux: "SymOff", typ: "Int64", clobberFlags: true, symEffect: "Read"}, // load 8 bytes from arg0+arg1+auxint+aux. arg2=mem. Reverse bytes.
|
||||
{name: "MOVBstoreidx", argLength: 4, reg: gpstoreidx, commutative: true, asm: "MOVB", aux: "SymOff", clobberFlags: true, symEffect: "Write"}, // store byte in arg2 to arg0+arg1+auxint+aux. arg3=mem
|
||||
{name: "MOVHstoreidx", argLength: 4, reg: gpstoreidx, commutative: true, asm: "MOVH", aux: "SymOff", clobberFlags: true, symEffect: "Write"}, // store 2 bytes in arg2 to arg0+arg1+auxint+aux. arg3=mem
|
||||
{name: "MOVWstoreidx", argLength: 4, reg: gpstoreidx, commutative: true, asm: "MOVW", aux: "SymOff", clobberFlags: true, symEffect: "Write"}, // store 4 bytes in arg2 to arg0+arg1+auxint+aux. arg3=mem
|
||||
{name: "MOVDstoreidx", argLength: 4, reg: gpstoreidx, commutative: true, asm: "MOVD", aux: "SymOff", clobberFlags: true, symEffect: "Write"}, // store 8 bytes in arg2 to arg0+arg1+auxint+aux. arg3=mem
|
||||
{name: "MOVHBRstoreidx", argLength: 4, reg: gpstoreidx, commutative: true, asm: "MOVHBR", aux: "SymOff", clobberFlags: true, symEffect: "Write"}, // store 2 bytes in arg2 to arg0+arg1+auxint+aux. arg3=mem. Reverse bytes.
|
||||
{name: "MOVWBRstoreidx", argLength: 4, reg: gpstoreidx, commutative: true, asm: "MOVWBR", aux: "SymOff", clobberFlags: true, symEffect: "Write"}, // store 4 bytes in arg2 to arg0+arg1+auxint+aux. arg3=mem. Reverse bytes.
|
||||
{name: "MOVDBRstoreidx", argLength: 4, reg: gpstoreidx, commutative: true, asm: "MOVDBR", aux: "SymOff", clobberFlags: true, symEffect: "Write"}, // store 8 bytes in arg2 to arg0+arg1+auxint+aux. arg3=mem. Reverse bytes.
|
||||
|
||||
// For storeconst ops, the AuxInt field encodes both
|
||||
// the value to store and an address offset of the store.
|
||||
|
|
|
|||
|
|
@ -244,48 +244,12 @@
|
|||
(Neq16 (Const16 <t> [c]) (Add16 (Const16 <t> [d]) x)) -> (Neq16 (Const16 <t> [int64(int16(c-d))]) x)
|
||||
(Neq8 (Const8 <t> [c]) (Add8 (Const8 <t> [d]) x)) -> (Neq8 (Const8 <t> [int64(int8(c-d))]) x)
|
||||
|
||||
// canonicalize: swap arguments for commutative operations when one argument is a constant.
|
||||
(Eq64 x (Const64 <t> [c])) && x.Op != OpConst64 -> (Eq64 (Const64 <t> [c]) x)
|
||||
(Eq32 x (Const32 <t> [c])) && x.Op != OpConst32 -> (Eq32 (Const32 <t> [c]) x)
|
||||
(Eq16 x (Const16 <t> [c])) && x.Op != OpConst16 -> (Eq16 (Const16 <t> [c]) x)
|
||||
(Eq8 x (Const8 <t> [c])) && x.Op != OpConst8 -> (Eq8 (Const8 <t> [c]) x)
|
||||
|
||||
(Neq64 x (Const64 <t> [c])) && x.Op != OpConst64 -> (Neq64 (Const64 <t> [c]) x)
|
||||
(Neq32 x (Const32 <t> [c])) && x.Op != OpConst32 -> (Neq32 (Const32 <t> [c]) x)
|
||||
(Neq16 x (Const16 <t> [c])) && x.Op != OpConst16 -> (Neq16 (Const16 <t> [c]) x)
|
||||
(Neq8 x (Const8 <t> [c])) && x.Op != OpConst8 -> (Neq8 (Const8 <t> [c]) x)
|
||||
|
||||
// AddPtr is not canonicalized because nilcheck ptr checks the first argument to be non-nil.
|
||||
(Add64 x (Const64 <t> [c])) && x.Op != OpConst64 -> (Add64 (Const64 <t> [c]) x)
|
||||
(Add32 x (Const32 <t> [c])) && x.Op != OpConst32 -> (Add32 (Const32 <t> [c]) x)
|
||||
(Add16 x (Const16 <t> [c])) && x.Op != OpConst16 -> (Add16 (Const16 <t> [c]) x)
|
||||
(Add8 x (Const8 <t> [c])) && x.Op != OpConst8 -> (Add8 (Const8 <t> [c]) x)
|
||||
|
||||
(Mul64 x (Const64 <t> [c])) && x.Op != OpConst64 -> (Mul64 (Const64 <t> [c]) x)
|
||||
(Mul32 x (Const32 <t> [c])) && x.Op != OpConst32 -> (Mul32 (Const32 <t> [c]) x)
|
||||
(Mul16 x (Const16 <t> [c])) && x.Op != OpConst16 -> (Mul16 (Const16 <t> [c]) x)
|
||||
(Mul8 x (Const8 <t> [c])) && x.Op != OpConst8 -> (Mul8 (Const8 <t> [c]) x)
|
||||
|
||||
// Canonicalize x-const to x+(-const)
|
||||
(Sub64 x (Const64 <t> [c])) && x.Op != OpConst64 -> (Add64 (Const64 <t> [-c]) x)
|
||||
(Sub32 x (Const32 <t> [c])) && x.Op != OpConst32 -> (Add32 (Const32 <t> [int64(int32(-c))]) x)
|
||||
(Sub16 x (Const16 <t> [c])) && x.Op != OpConst16 -> (Add16 (Const16 <t> [int64(int16(-c))]) x)
|
||||
(Sub8 x (Const8 <t> [c])) && x.Op != OpConst8 -> (Add8 (Const8 <t> [int64(int8(-c))]) x)
|
||||
|
||||
(And64 x (Const64 <t> [c])) && x.Op != OpConst64 -> (And64 (Const64 <t> [c]) x)
|
||||
(And32 x (Const32 <t> [c])) && x.Op != OpConst32 -> (And32 (Const32 <t> [c]) x)
|
||||
(And16 x (Const16 <t> [c])) && x.Op != OpConst16 -> (And16 (Const16 <t> [c]) x)
|
||||
(And8 x (Const8 <t> [c])) && x.Op != OpConst8 -> (And8 (Const8 <t> [c]) x)
|
||||
|
||||
(Or64 x (Const64 <t> [c])) && x.Op != OpConst64 -> (Or64 (Const64 <t> [c]) x)
|
||||
(Or32 x (Const32 <t> [c])) && x.Op != OpConst32 -> (Or32 (Const32 <t> [c]) x)
|
||||
(Or16 x (Const16 <t> [c])) && x.Op != OpConst16 -> (Or16 (Const16 <t> [c]) x)
|
||||
(Or8 x (Const8 <t> [c])) && x.Op != OpConst8 -> (Or8 (Const8 <t> [c]) x)
|
||||
|
||||
(Xor64 x (Const64 <t> [c])) && x.Op != OpConst64 -> (Xor64 (Const64 <t> [c]) x)
|
||||
(Xor32 x (Const32 <t> [c])) && x.Op != OpConst32 -> (Xor32 (Const32 <t> [c]) x)
|
||||
(Xor16 x (Const16 <t> [c])) && x.Op != OpConst16 -> (Xor16 (Const16 <t> [c]) x)
|
||||
(Xor8 x (Const8 <t> [c])) && x.Op != OpConst8 -> (Xor8 (Const8 <t> [c]) x)
|
||||
|
||||
// fold negation into comparison operators
|
||||
(Not (Eq64 x y)) -> (Neq64 x y)
|
||||
(Not (Eq32 x y)) -> (Neq32 x y)
|
||||
|
|
@ -635,50 +599,14 @@
|
|||
(And32 x (And32 x y)) -> (And32 x y)
|
||||
(And16 x (And16 x y)) -> (And16 x y)
|
||||
(And8 x (And8 x y)) -> (And8 x y)
|
||||
(And64 x (And64 y x)) -> (And64 x y)
|
||||
(And32 x (And32 y x)) -> (And32 x y)
|
||||
(And16 x (And16 y x)) -> (And16 x y)
|
||||
(And8 x (And8 y x)) -> (And8 x y)
|
||||
(And64 (And64 x y) x) -> (And64 x y)
|
||||
(And32 (And32 x y) x) -> (And32 x y)
|
||||
(And16 (And16 x y) x) -> (And16 x y)
|
||||
(And8 (And8 x y) x) -> (And8 x y)
|
||||
(And64 (And64 x y) y) -> (And64 x y)
|
||||
(And32 (And32 x y) y) -> (And32 x y)
|
||||
(And16 (And16 x y) y) -> (And16 x y)
|
||||
(And8 (And8 x y) y) -> (And8 x y)
|
||||
(Or64 x (Or64 x y)) -> (Or64 x y)
|
||||
(Or32 x (Or32 x y)) -> (Or32 x y)
|
||||
(Or16 x (Or16 x y)) -> (Or16 x y)
|
||||
(Or8 x (Or8 x y)) -> (Or8 x y)
|
||||
(Or64 x (Or64 y x)) -> (Or64 x y)
|
||||
(Or32 x (Or32 y x)) -> (Or32 x y)
|
||||
(Or16 x (Or16 y x)) -> (Or16 x y)
|
||||
(Or8 x (Or8 y x)) -> (Or8 x y)
|
||||
(Or64 (Or64 x y) x) -> (Or64 x y)
|
||||
(Or32 (Or32 x y) x) -> (Or32 x y)
|
||||
(Or16 (Or16 x y) x) -> (Or16 x y)
|
||||
(Or8 (Or8 x y) x) -> (Or8 x y)
|
||||
(Or64 (Or64 x y) y) -> (Or64 x y)
|
||||
(Or32 (Or32 x y) y) -> (Or32 x y)
|
||||
(Or16 (Or16 x y) y) -> (Or16 x y)
|
||||
(Or8 (Or8 x y) y) -> (Or8 x y)
|
||||
(Xor64 x (Xor64 x y)) -> y
|
||||
(Xor32 x (Xor32 x y)) -> y
|
||||
(Xor16 x (Xor16 x y)) -> y
|
||||
(Xor8 x (Xor8 x y)) -> y
|
||||
(Xor64 x (Xor64 y x)) -> y
|
||||
(Xor32 x (Xor32 y x)) -> y
|
||||
(Xor16 x (Xor16 y x)) -> y
|
||||
(Xor8 x (Xor8 y x)) -> y
|
||||
(Xor64 (Xor64 x y) x) -> y
|
||||
(Xor32 (Xor32 x y) x) -> y
|
||||
(Xor16 (Xor16 x y) x) -> y
|
||||
(Xor8 (Xor8 x y) x) -> y
|
||||
(Xor64 (Xor64 x y) y) -> x
|
||||
(Xor32 (Xor32 x y) y) -> x
|
||||
(Xor16 (Xor16 x y) y) -> x
|
||||
(Xor8 (Xor8 x y) y) -> x
|
||||
|
||||
(Trunc64to8 (And64 (Const64 [y]) x)) && y&0xFF == 0xFF -> (Trunc64to8 x)
|
||||
(Trunc64to16 (And64 (Const64 [y]) x)) && y&0xFFFF == 0xFFFF -> (Trunc64to16 x)
|
||||
|
|
@ -732,9 +660,7 @@
|
|||
|
||||
// user nil checks
|
||||
(NeqPtr p (ConstNil)) -> (IsNonNil p)
|
||||
(NeqPtr (ConstNil) p) -> (IsNonNil p)
|
||||
(EqPtr p (ConstNil)) -> (Not (IsNonNil p))
|
||||
(EqPtr (ConstNil) p) -> (Not (IsNonNil p))
|
||||
(IsNonNil (ConstNil)) -> (ConstBool [0])
|
||||
|
||||
// slice and interface comparisons
|
||||
|
|
@ -912,7 +838,6 @@
|
|||
|
||||
// Get rid of Convert ops for pointer arithmetic on unsafe.Pointer.
|
||||
(Convert (Add64 (Convert ptr mem) off) mem) -> (Add64 ptr off)
|
||||
(Convert (Add64 off (Convert ptr mem)) mem) -> (Add64 ptr off)
|
||||
(Convert (Convert ptr mem) mem) -> ptr
|
||||
|
||||
// Decompose compound argument values
|
||||
|
|
@ -1238,35 +1163,11 @@
|
|||
// Reassociate expressions involving
|
||||
// constants such that constants come first,
|
||||
// exposing obvious constant-folding opportunities.
|
||||
// First, re-write (op x (op y z)) to (op (op y z) x) if
|
||||
// the op is commutative, to reduce the number of subsequent
|
||||
// matching rules for folding. Then, reassociate
|
||||
// (op (op y C) x) to (op C (op x y)) or similar, where C
|
||||
// Reassociate (op (op y C) x) to (op C (op x y)) or similar, where C
|
||||
// is constant, which pushes constants to the outside
|
||||
// of the expression. At that point, any constant-folding
|
||||
// opportunities should be obvious.
|
||||
|
||||
(Add64 x l:(Add64 _ _)) && (x.Op != OpAdd64 && x.Op != OpConst64) -> (Add64 l x)
|
||||
(Add32 x l:(Add32 _ _)) && (x.Op != OpAdd32 && x.Op != OpConst32) -> (Add32 l x)
|
||||
(Add16 x l:(Add16 _ _)) && (x.Op != OpAdd16 && x.Op != OpConst16) -> (Add16 l x)
|
||||
(Add8 x l:(Add8 _ _)) && (x.Op != OpAdd8 && x.Op != OpConst8) -> (Add8 l x)
|
||||
(And64 x l:(And64 _ _)) && (x.Op != OpAnd64 && x.Op != OpConst64) -> (And64 l x)
|
||||
(And32 x l:(And32 _ _)) && (x.Op != OpAnd32 && x.Op != OpConst32) -> (And32 l x)
|
||||
(And16 x l:(And16 _ _)) && (x.Op != OpAnd16 && x.Op != OpConst16) -> (And16 l x)
|
||||
(And8 x l:(And8 _ _)) && (x.Op != OpAnd8 && x.Op != OpConst8) -> (And8 l x)
|
||||
(Or64 x l:(Or64 _ _)) && (x.Op != OpOr64 && x.Op != OpConst64) -> (Or64 l x)
|
||||
(Or32 x l:(Or32 _ _)) && (x.Op != OpOr32 && x.Op != OpConst32) -> (Or32 l x)
|
||||
(Or16 x l:(Or16 _ _)) && (x.Op != OpOr16 && x.Op != OpConst16) -> (Or16 l x)
|
||||
(Or8 x l:(Or8 _ _)) && (x.Op != OpOr8 && x.Op != OpConst8) -> (Or8 l x)
|
||||
(Xor64 x l:(Xor64 _ _)) && (x.Op != OpXor64 && x.Op != OpConst64) -> (Xor64 l x)
|
||||
(Xor32 x l:(Xor32 _ _)) && (x.Op != OpXor32 && x.Op != OpConst32) -> (Xor32 l x)
|
||||
(Xor16 x l:(Xor16 _ _)) && (x.Op != OpXor16 && x.Op != OpConst16) -> (Xor16 l x)
|
||||
(Xor8 x l:(Xor8 _ _)) && (x.Op != OpXor8 && x.Op != OpConst8) -> (Xor8 l x)
|
||||
(Mul64 x l:(Mul64 _ _)) && (x.Op != OpMul64 && x.Op != OpConst64) -> (Mul64 l x)
|
||||
(Mul32 x l:(Mul32 _ _)) && (x.Op != OpMul32 && x.Op != OpConst32) -> (Mul32 l x)
|
||||
(Mul16 x l:(Mul16 _ _)) && (x.Op != OpMul16 && x.Op != OpConst16) -> (Mul16 l x)
|
||||
(Mul8 x l:(Mul8 _ _)) && (x.Op != OpMul8 && x.Op != OpConst8) -> (Mul8 l x)
|
||||
|
||||
// x + (C + z) -> C + (x + z)
|
||||
(Add64 (Add64 i:(Const64 <t>) z) x) && (z.Op != OpConst64 && x.Op != OpConst64) -> (Add64 i (Add64 <t> z x))
|
||||
(Add32 (Add32 i:(Const32 <t>) z) x) && (z.Op != OpConst32 && x.Op != OpConst32) -> (Add32 i (Add32 <t> z x))
|
||||
|
|
@ -1379,19 +1280,13 @@
|
|||
|
||||
// floating point optimizations
|
||||
(Add32F x (Const32F [0])) -> x
|
||||
(Add32F (Const32F [0]) x) -> x
|
||||
(Add64F x (Const64F [0])) -> x
|
||||
(Add64F (Const64F [0]) x) -> x
|
||||
(Sub32F x (Const32F [0])) -> x
|
||||
(Sub64F x (Const64F [0])) -> x
|
||||
(Mul32F x (Const32F [f2i(1)])) -> x
|
||||
(Mul32F (Const32F [f2i(1)]) x) -> x
|
||||
(Mul64F x (Const64F [f2i(1)])) -> x
|
||||
(Mul64F (Const64F [f2i(1)]) x) -> x
|
||||
(Mul32F x (Const32F [f2i(-1)])) -> (Neg32F x)
|
||||
(Mul32F (Const32F [f2i(-1)]) x) -> (Neg32F x)
|
||||
(Mul64F x (Const64F [f2i(-1)])) -> (Neg64F x)
|
||||
(Mul64F (Const64F [f2i(-1)]) x) -> (Neg64F x)
|
||||
(Div32F x (Const32F [f2i(1)])) -> x
|
||||
(Div64F x (Const64F [f2i(1)])) -> x
|
||||
(Div32F x (Const32F [f2i(-1)])) -> (Neg32F x)
|
||||
|
|
|
|||
|
|
@ -28,8 +28,8 @@ var genericOps = []opData{
|
|||
{name: "Add32", argLength: 2, commutative: true},
|
||||
{name: "Add64", argLength: 2, commutative: true},
|
||||
{name: "AddPtr", argLength: 2}, // For address calculations. arg0 is a pointer and arg1 is an int.
|
||||
{name: "Add32F", argLength: 2},
|
||||
{name: "Add64F", argLength: 2},
|
||||
{name: "Add32F", argLength: 2, commutative: true},
|
||||
{name: "Add64F", argLength: 2, commutative: true},
|
||||
|
||||
{name: "Sub8", argLength: 2}, // arg0 - arg1
|
||||
{name: "Sub16", argLength: 2},
|
||||
|
|
@ -43,24 +43,25 @@ var genericOps = []opData{
|
|||
{name: "Mul16", argLength: 2, commutative: true},
|
||||
{name: "Mul32", argLength: 2, commutative: true},
|
||||
{name: "Mul64", argLength: 2, commutative: true},
|
||||
{name: "Mul32F", argLength: 2},
|
||||
{name: "Mul64F", argLength: 2},
|
||||
{name: "Mul32F", argLength: 2, commutative: true},
|
||||
{name: "Mul64F", argLength: 2, commutative: true},
|
||||
|
||||
{name: "Div32F", argLength: 2}, // arg0 / arg1
|
||||
{name: "Div64F", argLength: 2},
|
||||
|
||||
{name: "Hmul32", argLength: 2},
|
||||
{name: "Hmul32u", argLength: 2},
|
||||
{name: "Hmul64", argLength: 2},
|
||||
{name: "Hmul64u", argLength: 2},
|
||||
{name: "Hmul32", argLength: 2, commutative: true},
|
||||
{name: "Hmul32u", argLength: 2, commutative: true},
|
||||
{name: "Hmul64", argLength: 2, commutative: true},
|
||||
{name: "Hmul64u", argLength: 2, commutative: true},
|
||||
|
||||
{name: "Mul32uhilo", argLength: 2, typ: "(UInt32,UInt32)"}, // arg0 * arg1, returns (hi, lo)
|
||||
{name: "Mul64uhilo", argLength: 2, typ: "(UInt64,UInt64)"}, // arg0 * arg1, returns (hi, lo)
|
||||
{name: "Mul32uhilo", argLength: 2, typ: "(UInt32,UInt32)", commutative: true}, // arg0 * arg1, returns (hi, lo)
|
||||
{name: "Mul64uhilo", argLength: 2, typ: "(UInt64,UInt64)", commutative: true}, // arg0 * arg1, returns (hi, lo)
|
||||
|
||||
// Weird special instructions for use in the strength reduction of divides.
|
||||
// These ops compute unsigned (arg0 + arg1) / 2, correct to all
|
||||
// 32/64 bits, even when the intermediate result of the add has 33/65 bits.
|
||||
// These ops can assume arg0 >= arg1.
|
||||
// Note: these ops aren't commutative!
|
||||
{name: "Avg32u", argLength: 2, typ: "UInt32"}, // 32-bit platforms only
|
||||
{name: "Avg64u", argLength: 2, typ: "UInt64"}, // 64-bit platforms only
|
||||
|
||||
|
|
@ -159,8 +160,8 @@ var genericOps = []opData{
|
|||
{name: "EqPtr", argLength: 2, commutative: true, typ: "Bool"},
|
||||
{name: "EqInter", argLength: 2, typ: "Bool"}, // arg0 or arg1 is nil; other cases handled by frontend
|
||||
{name: "EqSlice", argLength: 2, typ: "Bool"}, // arg0 or arg1 is nil; other cases handled by frontend
|
||||
{name: "Eq32F", argLength: 2, typ: "Bool"},
|
||||
{name: "Eq64F", argLength: 2, typ: "Bool"},
|
||||
{name: "Eq32F", argLength: 2, commutative: true, typ: "Bool"},
|
||||
{name: "Eq64F", argLength: 2, commutative: true, typ: "Bool"},
|
||||
|
||||
{name: "Neq8", argLength: 2, commutative: true, typ: "Bool"}, // arg0 != arg1
|
||||
{name: "Neq16", argLength: 2, commutative: true, typ: "Bool"},
|
||||
|
|
@ -169,8 +170,8 @@ var genericOps = []opData{
|
|||
{name: "NeqPtr", argLength: 2, commutative: true, typ: "Bool"},
|
||||
{name: "NeqInter", argLength: 2, typ: "Bool"}, // arg0 or arg1 is nil; other cases handled by frontend
|
||||
{name: "NeqSlice", argLength: 2, typ: "Bool"}, // arg0 or arg1 is nil; other cases handled by frontend
|
||||
{name: "Neq32F", argLength: 2, typ: "Bool"},
|
||||
{name: "Neq64F", argLength: 2},
|
||||
{name: "Neq32F", argLength: 2, commutative: true, typ: "Bool"},
|
||||
{name: "Neq64F", argLength: 2, commutative: true, typ: "Bool"},
|
||||
|
||||
{name: "Less8", argLength: 2, typ: "Bool"}, // arg0 < arg1, signed
|
||||
{name: "Less8U", argLength: 2, typ: "Bool"}, // arg0 < arg1, unsigned
|
||||
|
|
|
|||
|
|
@ -30,7 +30,7 @@ import (
|
|||
// sexpr [&& extra conditions] -> [@block] sexpr
|
||||
//
|
||||
// sexpr are s-expressions (lisp-like parenthesized groupings)
|
||||
// sexpr ::= (opcode sexpr*)
|
||||
// sexpr ::= [variable:](opcode sexpr*)
|
||||
// | variable
|
||||
// | <type>
|
||||
// | [auxint]
|
||||
|
|
@ -39,7 +39,7 @@ import (
|
|||
// aux ::= variable | {code}
|
||||
// type ::= variable | {code}
|
||||
// variable ::= some token
|
||||
// opcode ::= one of the opcodes from ../op.go (without the Op prefix)
|
||||
// opcode ::= one of the opcodes from the *Ops.go files
|
||||
|
||||
// extra conditions is just a chunk of Go that evaluates to a boolean. It may use
|
||||
// variables declared in the matching sexpr. The variable "v" is predefined to be
|
||||
|
|
@ -119,15 +119,17 @@ func genRules(arch arch) {
|
|||
}
|
||||
|
||||
loc := fmt.Sprintf("%s.rules:%d", arch.name, ruleLineno)
|
||||
r := Rule{rule: rule, loc: loc}
|
||||
if rawop := strings.Split(rule, " ")[0][1:]; isBlock(rawop, arch) {
|
||||
blockrules[rawop] = append(blockrules[rawop], r)
|
||||
} else {
|
||||
// Do fancier value op matching.
|
||||
match, _, _ := r.parse()
|
||||
op, oparch, _, _, _, _ := parseValue(match, arch, loc)
|
||||
opname := fmt.Sprintf("Op%s%s", oparch, op.name)
|
||||
oprules[opname] = append(oprules[opname], r)
|
||||
for _, crule := range commute(rule, arch) {
|
||||
r := Rule{rule: crule, loc: loc}
|
||||
if rawop := strings.Split(crule, " ")[0][1:]; isBlock(rawop, arch) {
|
||||
blockrules[rawop] = append(blockrules[rawop], r)
|
||||
} else {
|
||||
// Do fancier value op matching.
|
||||
match, _, _ := r.parse()
|
||||
op, oparch, _, _, _, _ := parseValue(match, arch, loc)
|
||||
opname := fmt.Sprintf("Op%s%s", oparch, op.name)
|
||||
oprules[opname] = append(oprules[opname], r)
|
||||
}
|
||||
}
|
||||
rule = ""
|
||||
ruleLineno = 0
|
||||
|
|
@ -752,3 +754,169 @@ func isVariable(s string) bool {
|
|||
}
|
||||
return b
|
||||
}
|
||||
|
||||
// commute returns all equivalent rules to r after applying all possible
|
||||
// argument swaps to the commutable ops in r.
|
||||
// Potentially exponential, be careful.
|
||||
func commute(r string, arch arch) []string {
|
||||
match, cond, result := Rule{rule: r}.parse()
|
||||
a := commute1(match, varCount(match), arch)
|
||||
for i, m := range a {
|
||||
if cond != "" {
|
||||
m += " && " + cond
|
||||
}
|
||||
m += " -> " + result
|
||||
a[i] = m
|
||||
}
|
||||
if len(a) == 1 && normalizeWhitespace(r) != normalizeWhitespace(a[0]) {
|
||||
fmt.Println(normalizeWhitespace(r))
|
||||
fmt.Println(normalizeWhitespace(a[0]))
|
||||
panic("commute() is not the identity for noncommuting rule")
|
||||
}
|
||||
if false && len(a) > 1 {
|
||||
fmt.Println(r)
|
||||
for _, x := range a {
|
||||
fmt.Println(" " + x)
|
||||
}
|
||||
}
|
||||
return a
|
||||
}
|
||||
|
||||
func commute1(m string, cnt map[string]int, arch arch) []string {
|
||||
if m[0] == '<' || m[0] == '[' || m[0] == '{' || isVariable(m) {
|
||||
return []string{m}
|
||||
}
|
||||
// Split up input.
|
||||
var prefix string
|
||||
colon := strings.Index(m, ":")
|
||||
if colon >= 0 && isVariable(m[:colon]) {
|
||||
prefix = m[:colon+1]
|
||||
m = m[colon+1:]
|
||||
}
|
||||
if m[0] != '(' || m[len(m)-1] != ')' {
|
||||
panic("non-compound expr in commute1: " + m)
|
||||
}
|
||||
s := split(m[1 : len(m)-1])
|
||||
op := s[0]
|
||||
|
||||
// Figure out if the op is commutative or not.
|
||||
commutative := false
|
||||
for _, x := range genericOps {
|
||||
if op == x.name {
|
||||
if x.commutative {
|
||||
commutative = true
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
if arch.name != "generic" {
|
||||
for _, x := range arch.ops {
|
||||
if op == x.name {
|
||||
if x.commutative {
|
||||
commutative = true
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
var idx0, idx1 int
|
||||
if commutative {
|
||||
// Find indexes of two args we can swap.
|
||||
for i, arg := range s {
|
||||
if i == 0 || arg[0] == '<' || arg[0] == '[' || arg[0] == '{' {
|
||||
continue
|
||||
}
|
||||
if idx0 == 0 {
|
||||
idx0 = i
|
||||
continue
|
||||
}
|
||||
if idx1 == 0 {
|
||||
idx1 = i
|
||||
break
|
||||
}
|
||||
}
|
||||
if idx1 == 0 {
|
||||
panic("couldn't find first two args of commutative op " + s[0])
|
||||
}
|
||||
if cnt[s[idx0]] == 1 && cnt[s[idx1]] == 1 || s[idx0] == s[idx1] && cnt[s[idx0]] == 2 {
|
||||
// When we have (Add x y) with no ther uses of x and y in the matching rule,
|
||||
// then we can skip the commutative match (Add y x).
|
||||
commutative = false
|
||||
}
|
||||
}
|
||||
|
||||
// Recursively commute arguments.
|
||||
a := make([][]string, len(s))
|
||||
for i, arg := range s {
|
||||
a[i] = commute1(arg, cnt, arch)
|
||||
}
|
||||
|
||||
// Choose all possibilities from all args.
|
||||
r := crossProduct(a)
|
||||
|
||||
// If commutative, do that again with its two args reversed.
|
||||
if commutative {
|
||||
a[idx0], a[idx1] = a[idx1], a[idx0]
|
||||
r = append(r, crossProduct(a)...)
|
||||
}
|
||||
|
||||
// Construct result.
|
||||
for i, x := range r {
|
||||
r[i] = prefix + "(" + x + ")"
|
||||
}
|
||||
return r
|
||||
}
|
||||
|
||||
// varCount returns a map which counts the number of occurrences of
|
||||
// Value variables in m.
|
||||
func varCount(m string) map[string]int {
|
||||
cnt := map[string]int{}
|
||||
varCount1(m, cnt)
|
||||
return cnt
|
||||
}
|
||||
func varCount1(m string, cnt map[string]int) {
|
||||
if m[0] == '<' || m[0] == '[' || m[0] == '{' {
|
||||
return
|
||||
}
|
||||
if isVariable(m) {
|
||||
cnt[m]++
|
||||
return
|
||||
}
|
||||
// Split up input.
|
||||
colon := strings.Index(m, ":")
|
||||
if colon >= 0 && isVariable(m[:colon]) {
|
||||
cnt[m[:colon]]++
|
||||
m = m[colon+1:]
|
||||
}
|
||||
if m[0] != '(' || m[len(m)-1] != ')' {
|
||||
panic("non-compound expr in commute1: " + m)
|
||||
}
|
||||
s := split(m[1 : len(m)-1])
|
||||
for _, arg := range s[1:] {
|
||||
varCount1(arg, cnt)
|
||||
}
|
||||
}
|
||||
|
||||
// crossProduct returns all possible values
|
||||
// x[0][i] + " " + x[1][j] + " " + ... + " " + x[len(x)-1][k]
|
||||
// for all valid values of i, j, ..., k.
|
||||
func crossProduct(x [][]string) []string {
|
||||
if len(x) == 1 {
|
||||
return x[0]
|
||||
}
|
||||
var r []string
|
||||
for _, tail := range crossProduct(x[1:]) {
|
||||
for _, first := range x[0] {
|
||||
r = append(r, first+" "+tail)
|
||||
}
|
||||
}
|
||||
return r
|
||||
}
|
||||
|
||||
// normalizeWhitespace replaces 2+ whitespace sequences with a single space.
|
||||
func normalizeWhitespace(x string) string {
|
||||
x = strings.Join(strings.Fields(x), " ")
|
||||
x = strings.Replace(x, "( ", "(", -1)
|
||||
x = strings.Replace(x, " )", ")", -1)
|
||||
return x
|
||||
}
|
||||
|
|
|
|||
|
|
@ -2475,6 +2475,7 @@ var opcodeTable = [...]opInfo{
|
|||
{
|
||||
name: "HMULL",
|
||||
argLen: 2,
|
||||
commutative: true,
|
||||
clobberFlags: true,
|
||||
asm: x86.AIMULL,
|
||||
reg: regInfo{
|
||||
|
|
@ -2491,6 +2492,7 @@ var opcodeTable = [...]opInfo{
|
|||
{
|
||||
name: "HMULLU",
|
||||
argLen: 2,
|
||||
commutative: true,
|
||||
clobberFlags: true,
|
||||
asm: x86.AMULL,
|
||||
reg: regInfo{
|
||||
|
|
@ -2507,6 +2509,7 @@ var opcodeTable = [...]opInfo{
|
|||
{
|
||||
name: "MULLQU",
|
||||
argLen: 2,
|
||||
commutative: true,
|
||||
clobberFlags: true,
|
||||
asm: x86.AMULL,
|
||||
reg: regInfo{
|
||||
|
|
@ -2854,9 +2857,10 @@ var opcodeTable = [...]opInfo{
|
|||
},
|
||||
},
|
||||
{
|
||||
name: "TESTL",
|
||||
argLen: 2,
|
||||
asm: x86.ATESTL,
|
||||
name: "TESTL",
|
||||
argLen: 2,
|
||||
commutative: true,
|
||||
asm: x86.ATESTL,
|
||||
reg: regInfo{
|
||||
inputs: []inputInfo{
|
||||
{0, 255}, // AX CX DX BX SP BP SI DI
|
||||
|
|
@ -2865,9 +2869,10 @@ var opcodeTable = [...]opInfo{
|
|||
},
|
||||
},
|
||||
{
|
||||
name: "TESTW",
|
||||
argLen: 2,
|
||||
asm: x86.ATESTW,
|
||||
name: "TESTW",
|
||||
argLen: 2,
|
||||
commutative: true,
|
||||
asm: x86.ATESTW,
|
||||
reg: regInfo{
|
||||
inputs: []inputInfo{
|
||||
{0, 255}, // AX CX DX BX SP BP SI DI
|
||||
|
|
@ -2876,9 +2881,10 @@ var opcodeTable = [...]opInfo{
|
|||
},
|
||||
},
|
||||
{
|
||||
name: "TESTB",
|
||||
argLen: 2,
|
||||
asm: x86.ATESTB,
|
||||
name: "TESTB",
|
||||
argLen: 2,
|
||||
commutative: true,
|
||||
asm: x86.ATESTB,
|
||||
reg: regInfo{
|
||||
inputs: []inputInfo{
|
||||
{0, 255}, // AX CX DX BX SP BP SI DI
|
||||
|
|
@ -3658,10 +3664,11 @@ var opcodeTable = [...]opInfo{
|
|||
},
|
||||
},
|
||||
{
|
||||
name: "LEAL1",
|
||||
auxType: auxSymOff,
|
||||
argLen: 2,
|
||||
symEffect: SymAddr,
|
||||
name: "LEAL1",
|
||||
auxType: auxSymOff,
|
||||
argLen: 2,
|
||||
commutative: true,
|
||||
symEffect: SymAddr,
|
||||
reg: regInfo{
|
||||
inputs: []inputInfo{
|
||||
{1, 255}, // AX CX DX BX SP BP SI DI
|
||||
|
|
@ -3840,11 +3847,12 @@ var opcodeTable = [...]opInfo{
|
|||
},
|
||||
},
|
||||
{
|
||||
name: "MOVBloadidx1",
|
||||
auxType: auxSymOff,
|
||||
argLen: 3,
|
||||
symEffect: SymRead,
|
||||
asm: x86.AMOVBLZX,
|
||||
name: "MOVBloadidx1",
|
||||
auxType: auxSymOff,
|
||||
argLen: 3,
|
||||
commutative: true,
|
||||
symEffect: SymRead,
|
||||
asm: x86.AMOVBLZX,
|
||||
reg: regInfo{
|
||||
inputs: []inputInfo{
|
||||
{1, 255}, // AX CX DX BX SP BP SI DI
|
||||
|
|
@ -3856,11 +3864,12 @@ var opcodeTable = [...]opInfo{
|
|||
},
|
||||
},
|
||||
{
|
||||
name: "MOVWloadidx1",
|
||||
auxType: auxSymOff,
|
||||
argLen: 3,
|
||||
symEffect: SymRead,
|
||||
asm: x86.AMOVWLZX,
|
||||
name: "MOVWloadidx1",
|
||||
auxType: auxSymOff,
|
||||
argLen: 3,
|
||||
commutative: true,
|
||||
symEffect: SymRead,
|
||||
asm: x86.AMOVWLZX,
|
||||
reg: regInfo{
|
||||
inputs: []inputInfo{
|
||||
{1, 255}, // AX CX DX BX SP BP SI DI
|
||||
|
|
@ -3888,11 +3897,12 @@ var opcodeTable = [...]opInfo{
|
|||
},
|
||||
},
|
||||
{
|
||||
name: "MOVLloadidx1",
|
||||
auxType: auxSymOff,
|
||||
argLen: 3,
|
||||
symEffect: SymRead,
|
||||
asm: x86.AMOVL,
|
||||
name: "MOVLloadidx1",
|
||||
auxType: auxSymOff,
|
||||
argLen: 3,
|
||||
commutative: true,
|
||||
symEffect: SymRead,
|
||||
asm: x86.AMOVL,
|
||||
reg: regInfo{
|
||||
inputs: []inputInfo{
|
||||
{1, 255}, // AX CX DX BX SP BP SI DI
|
||||
|
|
@ -3920,11 +3930,12 @@ var opcodeTable = [...]opInfo{
|
|||
},
|
||||
},
|
||||
{
|
||||
name: "MOVBstoreidx1",
|
||||
auxType: auxSymOff,
|
||||
argLen: 4,
|
||||
symEffect: SymWrite,
|
||||
asm: x86.AMOVB,
|
||||
name: "MOVBstoreidx1",
|
||||
auxType: auxSymOff,
|
||||
argLen: 4,
|
||||
commutative: true,
|
||||
symEffect: SymWrite,
|
||||
asm: x86.AMOVB,
|
||||
reg: regInfo{
|
||||
inputs: []inputInfo{
|
||||
{1, 255}, // AX CX DX BX SP BP SI DI
|
||||
|
|
@ -3934,11 +3945,12 @@ var opcodeTable = [...]opInfo{
|
|||
},
|
||||
},
|
||||
{
|
||||
name: "MOVWstoreidx1",
|
||||
auxType: auxSymOff,
|
||||
argLen: 4,
|
||||
symEffect: SymWrite,
|
||||
asm: x86.AMOVW,
|
||||
name: "MOVWstoreidx1",
|
||||
auxType: auxSymOff,
|
||||
argLen: 4,
|
||||
commutative: true,
|
||||
symEffect: SymWrite,
|
||||
asm: x86.AMOVW,
|
||||
reg: regInfo{
|
||||
inputs: []inputInfo{
|
||||
{1, 255}, // AX CX DX BX SP BP SI DI
|
||||
|
|
@ -3962,11 +3974,12 @@ var opcodeTable = [...]opInfo{
|
|||
},
|
||||
},
|
||||
{
|
||||
name: "MOVLstoreidx1",
|
||||
auxType: auxSymOff,
|
||||
argLen: 4,
|
||||
symEffect: SymWrite,
|
||||
asm: x86.AMOVL,
|
||||
name: "MOVLstoreidx1",
|
||||
auxType: auxSymOff,
|
||||
argLen: 4,
|
||||
commutative: true,
|
||||
symEffect: SymWrite,
|
||||
asm: x86.AMOVL,
|
||||
reg: regInfo{
|
||||
inputs: []inputInfo{
|
||||
{1, 255}, // AX CX DX BX SP BP SI DI
|
||||
|
|
@ -4949,6 +4962,7 @@ var opcodeTable = [...]opInfo{
|
|||
{
|
||||
name: "HMULQ",
|
||||
argLen: 2,
|
||||
commutative: true,
|
||||
clobberFlags: true,
|
||||
asm: x86.AIMULQ,
|
||||
reg: regInfo{
|
||||
|
|
@ -4965,6 +4979,7 @@ var opcodeTable = [...]opInfo{
|
|||
{
|
||||
name: "HMULL",
|
||||
argLen: 2,
|
||||
commutative: true,
|
||||
clobberFlags: true,
|
||||
asm: x86.AIMULL,
|
||||
reg: regInfo{
|
||||
|
|
@ -4981,6 +4996,7 @@ var opcodeTable = [...]opInfo{
|
|||
{
|
||||
name: "HMULQU",
|
||||
argLen: 2,
|
||||
commutative: true,
|
||||
clobberFlags: true,
|
||||
asm: x86.AMULQ,
|
||||
reg: regInfo{
|
||||
|
|
@ -4997,6 +5013,7 @@ var opcodeTable = [...]opInfo{
|
|||
{
|
||||
name: "HMULLU",
|
||||
argLen: 2,
|
||||
commutative: true,
|
||||
clobberFlags: true,
|
||||
asm: x86.AMULL,
|
||||
reg: regInfo{
|
||||
|
|
@ -5125,6 +5142,7 @@ var opcodeTable = [...]opInfo{
|
|||
{
|
||||
name: "MULQU2",
|
||||
argLen: 2,
|
||||
commutative: true,
|
||||
clobberFlags: true,
|
||||
asm: x86.AMULQ,
|
||||
reg: regInfo{
|
||||
|
|
@ -5508,9 +5526,10 @@ var opcodeTable = [...]opInfo{
|
|||
},
|
||||
},
|
||||
{
|
||||
name: "TESTQ",
|
||||
argLen: 2,
|
||||
asm: x86.ATESTQ,
|
||||
name: "TESTQ",
|
||||
argLen: 2,
|
||||
commutative: true,
|
||||
asm: x86.ATESTQ,
|
||||
reg: regInfo{
|
||||
inputs: []inputInfo{
|
||||
{0, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
|
||||
|
|
@ -5519,9 +5538,10 @@ var opcodeTable = [...]opInfo{
|
|||
},
|
||||
},
|
||||
{
|
||||
name: "TESTL",
|
||||
argLen: 2,
|
||||
asm: x86.ATESTL,
|
||||
name: "TESTL",
|
||||
argLen: 2,
|
||||
commutative: true,
|
||||
asm: x86.ATESTL,
|
||||
reg: regInfo{
|
||||
inputs: []inputInfo{
|
||||
{0, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
|
||||
|
|
@ -5530,9 +5550,10 @@ var opcodeTable = [...]opInfo{
|
|||
},
|
||||
},
|
||||
{
|
||||
name: "TESTW",
|
||||
argLen: 2,
|
||||
asm: x86.ATESTW,
|
||||
name: "TESTW",
|
||||
argLen: 2,
|
||||
commutative: true,
|
||||
asm: x86.ATESTW,
|
||||
reg: regInfo{
|
||||
inputs: []inputInfo{
|
||||
{0, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
|
||||
|
|
@ -5541,9 +5562,10 @@ var opcodeTable = [...]opInfo{
|
|||
},
|
||||
},
|
||||
{
|
||||
name: "TESTB",
|
||||
argLen: 2,
|
||||
asm: x86.ATESTB,
|
||||
name: "TESTB",
|
||||
argLen: 2,
|
||||
commutative: true,
|
||||
asm: x86.ATESTB,
|
||||
reg: regInfo{
|
||||
inputs: []inputInfo{
|
||||
{0, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
|
||||
|
|
@ -5629,7 +5651,7 @@ var opcodeTable = [...]opInfo{
|
|||
},
|
||||
{
|
||||
name: "SHLQconst",
|
||||
auxType: auxInt64,
|
||||
auxType: auxInt8,
|
||||
argLen: 1,
|
||||
resultInArg0: true,
|
||||
clobberFlags: true,
|
||||
|
|
@ -5645,7 +5667,7 @@ var opcodeTable = [...]opInfo{
|
|||
},
|
||||
{
|
||||
name: "SHLLconst",
|
||||
auxType: auxInt32,
|
||||
auxType: auxInt8,
|
||||
argLen: 1,
|
||||
resultInArg0: true,
|
||||
clobberFlags: true,
|
||||
|
|
@ -5725,7 +5747,7 @@ var opcodeTable = [...]opInfo{
|
|||
},
|
||||
{
|
||||
name: "SHRQconst",
|
||||
auxType: auxInt64,
|
||||
auxType: auxInt8,
|
||||
argLen: 1,
|
||||
resultInArg0: true,
|
||||
clobberFlags: true,
|
||||
|
|
@ -5741,7 +5763,7 @@ var opcodeTable = [...]opInfo{
|
|||
},
|
||||
{
|
||||
name: "SHRLconst",
|
||||
auxType: auxInt32,
|
||||
auxType: auxInt8,
|
||||
argLen: 1,
|
||||
resultInArg0: true,
|
||||
clobberFlags: true,
|
||||
|
|
@ -5757,7 +5779,7 @@ var opcodeTable = [...]opInfo{
|
|||
},
|
||||
{
|
||||
name: "SHRWconst",
|
||||
auxType: auxInt16,
|
||||
auxType: auxInt8,
|
||||
argLen: 1,
|
||||
resultInArg0: true,
|
||||
clobberFlags: true,
|
||||
|
|
@ -5853,7 +5875,7 @@ var opcodeTable = [...]opInfo{
|
|||
},
|
||||
{
|
||||
name: "SARQconst",
|
||||
auxType: auxInt64,
|
||||
auxType: auxInt8,
|
||||
argLen: 1,
|
||||
resultInArg0: true,
|
||||
clobberFlags: true,
|
||||
|
|
@ -5869,7 +5891,7 @@ var opcodeTable = [...]opInfo{
|
|||
},
|
||||
{
|
||||
name: "SARLconst",
|
||||
auxType: auxInt32,
|
||||
auxType: auxInt8,
|
||||
argLen: 1,
|
||||
resultInArg0: true,
|
||||
clobberFlags: true,
|
||||
|
|
@ -5885,7 +5907,7 @@ var opcodeTable = [...]opInfo{
|
|||
},
|
||||
{
|
||||
name: "SARWconst",
|
||||
auxType: auxInt16,
|
||||
auxType: auxInt8,
|
||||
argLen: 1,
|
||||
resultInArg0: true,
|
||||
clobberFlags: true,
|
||||
|
|
@ -5917,7 +5939,7 @@ var opcodeTable = [...]opInfo{
|
|||
},
|
||||
{
|
||||
name: "ROLQconst",
|
||||
auxType: auxInt64,
|
||||
auxType: auxInt8,
|
||||
argLen: 1,
|
||||
resultInArg0: true,
|
||||
clobberFlags: true,
|
||||
|
|
@ -5933,7 +5955,7 @@ var opcodeTable = [...]opInfo{
|
|||
},
|
||||
{
|
||||
name: "ROLLconst",
|
||||
auxType: auxInt32,
|
||||
auxType: auxInt8,
|
||||
argLen: 1,
|
||||
resultInArg0: true,
|
||||
clobberFlags: true,
|
||||
|
|
@ -5949,7 +5971,7 @@ var opcodeTable = [...]opInfo{
|
|||
},
|
||||
{
|
||||
name: "ROLWconst",
|
||||
auxType: auxInt16,
|
||||
auxType: auxInt8,
|
||||
argLen: 1,
|
||||
resultInArg0: true,
|
||||
clobberFlags: true,
|
||||
|
|
@ -6807,10 +6829,11 @@ var opcodeTable = [...]opInfo{
|
|||
},
|
||||
},
|
||||
{
|
||||
name: "LEAQ1",
|
||||
auxType: auxSymOff,
|
||||
argLen: 2,
|
||||
symEffect: SymAddr,
|
||||
name: "LEAQ1",
|
||||
auxType: auxSymOff,
|
||||
argLen: 2,
|
||||
commutative: true,
|
||||
symEffect: SymAddr,
|
||||
reg: regInfo{
|
||||
inputs: []inputInfo{
|
||||
{1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
|
||||
|
|
@ -7081,11 +7104,12 @@ var opcodeTable = [...]opInfo{
|
|||
},
|
||||
},
|
||||
{
|
||||
name: "MOVBloadidx1",
|
||||
auxType: auxSymOff,
|
||||
argLen: 3,
|
||||
symEffect: SymRead,
|
||||
asm: x86.AMOVBLZX,
|
||||
name: "MOVBloadidx1",
|
||||
auxType: auxSymOff,
|
||||
argLen: 3,
|
||||
commutative: true,
|
||||
symEffect: SymRead,
|
||||
asm: x86.AMOVBLZX,
|
||||
reg: regInfo{
|
||||
inputs: []inputInfo{
|
||||
{1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
|
||||
|
|
@ -7097,11 +7121,12 @@ var opcodeTable = [...]opInfo{
|
|||
},
|
||||
},
|
||||
{
|
||||
name: "MOVWloadidx1",
|
||||
auxType: auxSymOff,
|
||||
argLen: 3,
|
||||
symEffect: SymRead,
|
||||
asm: x86.AMOVWLZX,
|
||||
name: "MOVWloadidx1",
|
||||
auxType: auxSymOff,
|
||||
argLen: 3,
|
||||
commutative: true,
|
||||
symEffect: SymRead,
|
||||
asm: x86.AMOVWLZX,
|
||||
reg: regInfo{
|
||||
inputs: []inputInfo{
|
||||
{1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
|
||||
|
|
@ -7129,11 +7154,12 @@ var opcodeTable = [...]opInfo{
|
|||
},
|
||||
},
|
||||
{
|
||||
name: "MOVLloadidx1",
|
||||
auxType: auxSymOff,
|
||||
argLen: 3,
|
||||
symEffect: SymRead,
|
||||
asm: x86.AMOVL,
|
||||
name: "MOVLloadidx1",
|
||||
auxType: auxSymOff,
|
||||
argLen: 3,
|
||||
commutative: true,
|
||||
symEffect: SymRead,
|
||||
asm: x86.AMOVL,
|
||||
reg: regInfo{
|
||||
inputs: []inputInfo{
|
||||
{1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
|
||||
|
|
@ -7161,11 +7187,12 @@ var opcodeTable = [...]opInfo{
|
|||
},
|
||||
},
|
||||
{
|
||||
name: "MOVQloadidx1",
|
||||
auxType: auxSymOff,
|
||||
argLen: 3,
|
||||
symEffect: SymRead,
|
||||
asm: x86.AMOVQ,
|
||||
name: "MOVQloadidx1",
|
||||
auxType: auxSymOff,
|
||||
argLen: 3,
|
||||
commutative: true,
|
||||
symEffect: SymRead,
|
||||
asm: x86.AMOVQ,
|
||||
reg: regInfo{
|
||||
inputs: []inputInfo{
|
||||
{1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
|
||||
|
|
@ -7193,11 +7220,12 @@ var opcodeTable = [...]opInfo{
|
|||
},
|
||||
},
|
||||
{
|
||||
name: "MOVBstoreidx1",
|
||||
auxType: auxSymOff,
|
||||
argLen: 4,
|
||||
symEffect: SymWrite,
|
||||
asm: x86.AMOVB,
|
||||
name: "MOVBstoreidx1",
|
||||
auxType: auxSymOff,
|
||||
argLen: 4,
|
||||
commutative: true,
|
||||
symEffect: SymWrite,
|
||||
asm: x86.AMOVB,
|
||||
reg: regInfo{
|
||||
inputs: []inputInfo{
|
||||
{1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
|
||||
|
|
@ -7207,11 +7235,12 @@ var opcodeTable = [...]opInfo{
|
|||
},
|
||||
},
|
||||
{
|
||||
name: "MOVWstoreidx1",
|
||||
auxType: auxSymOff,
|
||||
argLen: 4,
|
||||
symEffect: SymWrite,
|
||||
asm: x86.AMOVW,
|
||||
name: "MOVWstoreidx1",
|
||||
auxType: auxSymOff,
|
||||
argLen: 4,
|
||||
commutative: true,
|
||||
symEffect: SymWrite,
|
||||
asm: x86.AMOVW,
|
||||
reg: regInfo{
|
||||
inputs: []inputInfo{
|
||||
{1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
|
||||
|
|
@ -7235,11 +7264,12 @@ var opcodeTable = [...]opInfo{
|
|||
},
|
||||
},
|
||||
{
|
||||
name: "MOVLstoreidx1",
|
||||
auxType: auxSymOff,
|
||||
argLen: 4,
|
||||
symEffect: SymWrite,
|
||||
asm: x86.AMOVL,
|
||||
name: "MOVLstoreidx1",
|
||||
auxType: auxSymOff,
|
||||
argLen: 4,
|
||||
commutative: true,
|
||||
symEffect: SymWrite,
|
||||
asm: x86.AMOVL,
|
||||
reg: regInfo{
|
||||
inputs: []inputInfo{
|
||||
{1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
|
||||
|
|
@ -7263,11 +7293,12 @@ var opcodeTable = [...]opInfo{
|
|||
},
|
||||
},
|
||||
{
|
||||
name: "MOVQstoreidx1",
|
||||
auxType: auxSymOff,
|
||||
argLen: 4,
|
||||
symEffect: SymWrite,
|
||||
asm: x86.AMOVQ,
|
||||
name: "MOVQstoreidx1",
|
||||
auxType: auxSymOff,
|
||||
argLen: 4,
|
||||
commutative: true,
|
||||
symEffect: SymWrite,
|
||||
asm: x86.AMOVQ,
|
||||
reg: regInfo{
|
||||
inputs: []inputInfo{
|
||||
{1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
|
||||
|
|
@ -16534,9 +16565,10 @@ var opcodeTable = [...]opInfo{
|
|||
},
|
||||
},
|
||||
{
|
||||
name: "NOR",
|
||||
argLen: 2,
|
||||
asm: ppc64.ANOR,
|
||||
name: "NOR",
|
||||
argLen: 2,
|
||||
commutative: true,
|
||||
asm: ppc64.ANOR,
|
||||
reg: regInfo{
|
||||
inputs: []inputInfo{
|
||||
{0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
|
||||
|
|
@ -18282,6 +18314,7 @@ var opcodeTable = [...]opInfo{
|
|||
{
|
||||
name: "MULHD",
|
||||
argLen: 2,
|
||||
commutative: true,
|
||||
resultInArg0: true,
|
||||
clobberFlags: true,
|
||||
asm: s390x.AMULHD,
|
||||
|
|
@ -18298,6 +18331,7 @@ var opcodeTable = [...]opInfo{
|
|||
{
|
||||
name: "MULHDU",
|
||||
argLen: 2,
|
||||
commutative: true,
|
||||
resultInArg0: true,
|
||||
clobberFlags: true,
|
||||
asm: s390x.AMULHDU,
|
||||
|
|
@ -18885,7 +18919,7 @@ var opcodeTable = [...]opInfo{
|
|||
},
|
||||
{
|
||||
name: "SLDconst",
|
||||
auxType: auxInt64,
|
||||
auxType: auxInt8,
|
||||
argLen: 1,
|
||||
asm: s390x.ASLD,
|
||||
reg: regInfo{
|
||||
|
|
@ -18899,7 +18933,7 @@ var opcodeTable = [...]opInfo{
|
|||
},
|
||||
{
|
||||
name: "SLWconst",
|
||||
auxType: auxInt32,
|
||||
auxType: auxInt8,
|
||||
argLen: 1,
|
||||
asm: s390x.ASLW,
|
||||
reg: regInfo{
|
||||
|
|
@ -18941,7 +18975,7 @@ var opcodeTable = [...]opInfo{
|
|||
},
|
||||
{
|
||||
name: "SRDconst",
|
||||
auxType: auxInt64,
|
||||
auxType: auxInt8,
|
||||
argLen: 1,
|
||||
asm: s390x.ASRD,
|
||||
reg: regInfo{
|
||||
|
|
@ -18955,7 +18989,7 @@ var opcodeTable = [...]opInfo{
|
|||
},
|
||||
{
|
||||
name: "SRWconst",
|
||||
auxType: auxInt32,
|
||||
auxType: auxInt8,
|
||||
argLen: 1,
|
||||
asm: s390x.ASRW,
|
||||
reg: regInfo{
|
||||
|
|
@ -18999,7 +19033,7 @@ var opcodeTable = [...]opInfo{
|
|||
},
|
||||
{
|
||||
name: "SRADconst",
|
||||
auxType: auxInt64,
|
||||
auxType: auxInt8,
|
||||
argLen: 1,
|
||||
clobberFlags: true,
|
||||
asm: s390x.ASRAD,
|
||||
|
|
@ -19014,7 +19048,7 @@ var opcodeTable = [...]opInfo{
|
|||
},
|
||||
{
|
||||
name: "SRAWconst",
|
||||
auxType: auxInt32,
|
||||
auxType: auxInt8,
|
||||
argLen: 1,
|
||||
clobberFlags: true,
|
||||
asm: s390x.ASRAW,
|
||||
|
|
@ -19029,7 +19063,7 @@ var opcodeTable = [...]opInfo{
|
|||
},
|
||||
{
|
||||
name: "RLLGconst",
|
||||
auxType: auxInt64,
|
||||
auxType: auxInt8,
|
||||
argLen: 1,
|
||||
asm: s390x.ARLLG,
|
||||
reg: regInfo{
|
||||
|
|
@ -19043,7 +19077,7 @@ var opcodeTable = [...]opInfo{
|
|||
},
|
||||
{
|
||||
name: "RLLconst",
|
||||
auxType: auxInt32,
|
||||
auxType: auxInt8,
|
||||
argLen: 1,
|
||||
asm: s390x.ARLL,
|
||||
reg: regInfo{
|
||||
|
|
@ -19863,6 +19897,7 @@ var opcodeTable = [...]opInfo{
|
|||
name: "MOVBZloadidx",
|
||||
auxType: auxSymOff,
|
||||
argLen: 3,
|
||||
commutative: true,
|
||||
clobberFlags: true,
|
||||
symEffect: SymRead,
|
||||
asm: s390x.AMOVBZ,
|
||||
|
|
@ -19880,6 +19915,7 @@ var opcodeTable = [...]opInfo{
|
|||
name: "MOVHZloadidx",
|
||||
auxType: auxSymOff,
|
||||
argLen: 3,
|
||||
commutative: true,
|
||||
clobberFlags: true,
|
||||
symEffect: SymRead,
|
||||
asm: s390x.AMOVHZ,
|
||||
|
|
@ -19897,6 +19933,7 @@ var opcodeTable = [...]opInfo{
|
|||
name: "MOVWZloadidx",
|
||||
auxType: auxSymOff,
|
||||
argLen: 3,
|
||||
commutative: true,
|
||||
clobberFlags: true,
|
||||
symEffect: SymRead,
|
||||
asm: s390x.AMOVWZ,
|
||||
|
|
@ -19914,6 +19951,7 @@ var opcodeTable = [...]opInfo{
|
|||
name: "MOVDloadidx",
|
||||
auxType: auxSymOff,
|
||||
argLen: 3,
|
||||
commutative: true,
|
||||
clobberFlags: true,
|
||||
symEffect: SymRead,
|
||||
asm: s390x.AMOVD,
|
||||
|
|
@ -19931,6 +19969,7 @@ var opcodeTable = [...]opInfo{
|
|||
name: "MOVHBRloadidx",
|
||||
auxType: auxSymOff,
|
||||
argLen: 3,
|
||||
commutative: true,
|
||||
clobberFlags: true,
|
||||
symEffect: SymRead,
|
||||
asm: s390x.AMOVHBR,
|
||||
|
|
@ -19948,6 +19987,7 @@ var opcodeTable = [...]opInfo{
|
|||
name: "MOVWBRloadidx",
|
||||
auxType: auxSymOff,
|
||||
argLen: 3,
|
||||
commutative: true,
|
||||
clobberFlags: true,
|
||||
symEffect: SymRead,
|
||||
asm: s390x.AMOVWBR,
|
||||
|
|
@ -19965,6 +20005,7 @@ var opcodeTable = [...]opInfo{
|
|||
name: "MOVDBRloadidx",
|
||||
auxType: auxSymOff,
|
||||
argLen: 3,
|
||||
commutative: true,
|
||||
clobberFlags: true,
|
||||
symEffect: SymRead,
|
||||
asm: s390x.AMOVDBR,
|
||||
|
|
@ -19982,6 +20023,7 @@ var opcodeTable = [...]opInfo{
|
|||
name: "MOVBstoreidx",
|
||||
auxType: auxSymOff,
|
||||
argLen: 4,
|
||||
commutative: true,
|
||||
clobberFlags: true,
|
||||
symEffect: SymWrite,
|
||||
asm: s390x.AMOVB,
|
||||
|
|
@ -19997,6 +20039,7 @@ var opcodeTable = [...]opInfo{
|
|||
name: "MOVHstoreidx",
|
||||
auxType: auxSymOff,
|
||||
argLen: 4,
|
||||
commutative: true,
|
||||
clobberFlags: true,
|
||||
symEffect: SymWrite,
|
||||
asm: s390x.AMOVH,
|
||||
|
|
@ -20012,6 +20055,7 @@ var opcodeTable = [...]opInfo{
|
|||
name: "MOVWstoreidx",
|
||||
auxType: auxSymOff,
|
||||
argLen: 4,
|
||||
commutative: true,
|
||||
clobberFlags: true,
|
||||
symEffect: SymWrite,
|
||||
asm: s390x.AMOVW,
|
||||
|
|
@ -20027,6 +20071,7 @@ var opcodeTable = [...]opInfo{
|
|||
name: "MOVDstoreidx",
|
||||
auxType: auxSymOff,
|
||||
argLen: 4,
|
||||
commutative: true,
|
||||
clobberFlags: true,
|
||||
symEffect: SymWrite,
|
||||
asm: s390x.AMOVD,
|
||||
|
|
@ -20042,6 +20087,7 @@ var opcodeTable = [...]opInfo{
|
|||
name: "MOVHBRstoreidx",
|
||||
auxType: auxSymOff,
|
||||
argLen: 4,
|
||||
commutative: true,
|
||||
clobberFlags: true,
|
||||
symEffect: SymWrite,
|
||||
asm: s390x.AMOVHBR,
|
||||
|
|
@ -20057,6 +20103,7 @@ var opcodeTable = [...]opInfo{
|
|||
name: "MOVWBRstoreidx",
|
||||
auxType: auxSymOff,
|
||||
argLen: 4,
|
||||
commutative: true,
|
||||
clobberFlags: true,
|
||||
symEffect: SymWrite,
|
||||
asm: s390x.AMOVWBR,
|
||||
|
|
@ -20072,6 +20119,7 @@ var opcodeTable = [...]opInfo{
|
|||
name: "MOVDBRstoreidx",
|
||||
auxType: auxSymOff,
|
||||
argLen: 4,
|
||||
commutative: true,
|
||||
clobberFlags: true,
|
||||
symEffect: SymWrite,
|
||||
asm: s390x.AMOVDBR,
|
||||
|
|
@ -20646,14 +20694,16 @@ var opcodeTable = [...]opInfo{
|
|||
generic: true,
|
||||
},
|
||||
{
|
||||
name: "Add32F",
|
||||
argLen: 2,
|
||||
generic: true,
|
||||
name: "Add32F",
|
||||
argLen: 2,
|
||||
commutative: true,
|
||||
generic: true,
|
||||
},
|
||||
{
|
||||
name: "Add64F",
|
||||
argLen: 2,
|
||||
generic: true,
|
||||
name: "Add64F",
|
||||
argLen: 2,
|
||||
commutative: true,
|
||||
generic: true,
|
||||
},
|
||||
{
|
||||
name: "Sub8",
|
||||
|
|
@ -20715,14 +20765,16 @@ var opcodeTable = [...]opInfo{
|
|||
generic: true,
|
||||
},
|
||||
{
|
||||
name: "Mul32F",
|
||||
argLen: 2,
|
||||
generic: true,
|
||||
name: "Mul32F",
|
||||
argLen: 2,
|
||||
commutative: true,
|
||||
generic: true,
|
||||
},
|
||||
{
|
||||
name: "Mul64F",
|
||||
argLen: 2,
|
||||
generic: true,
|
||||
name: "Mul64F",
|
||||
argLen: 2,
|
||||
commutative: true,
|
||||
generic: true,
|
||||
},
|
||||
{
|
||||
name: "Div32F",
|
||||
|
|
@ -20735,34 +20787,40 @@ var opcodeTable = [...]opInfo{
|
|||
generic: true,
|
||||
},
|
||||
{
|
||||
name: "Hmul32",
|
||||
argLen: 2,
|
||||
generic: true,
|
||||
name: "Hmul32",
|
||||
argLen: 2,
|
||||
commutative: true,
|
||||
generic: true,
|
||||
},
|
||||
{
|
||||
name: "Hmul32u",
|
||||
argLen: 2,
|
||||
generic: true,
|
||||
name: "Hmul32u",
|
||||
argLen: 2,
|
||||
commutative: true,
|
||||
generic: true,
|
||||
},
|
||||
{
|
||||
name: "Hmul64",
|
||||
argLen: 2,
|
||||
generic: true,
|
||||
name: "Hmul64",
|
||||
argLen: 2,
|
||||
commutative: true,
|
||||
generic: true,
|
||||
},
|
||||
{
|
||||
name: "Hmul64u",
|
||||
argLen: 2,
|
||||
generic: true,
|
||||
name: "Hmul64u",
|
||||
argLen: 2,
|
||||
commutative: true,
|
||||
generic: true,
|
||||
},
|
||||
{
|
||||
name: "Mul32uhilo",
|
||||
argLen: 2,
|
||||
generic: true,
|
||||
name: "Mul32uhilo",
|
||||
argLen: 2,
|
||||
commutative: true,
|
||||
generic: true,
|
||||
},
|
||||
{
|
||||
name: "Mul64uhilo",
|
||||
argLen: 2,
|
||||
generic: true,
|
||||
name: "Mul64uhilo",
|
||||
argLen: 2,
|
||||
commutative: true,
|
||||
generic: true,
|
||||
},
|
||||
{
|
||||
name: "Avg32u",
|
||||
|
|
@ -21212,14 +21270,16 @@ var opcodeTable = [...]opInfo{
|
|||
generic: true,
|
||||
},
|
||||
{
|
||||
name: "Eq32F",
|
||||
argLen: 2,
|
||||
generic: true,
|
||||
name: "Eq32F",
|
||||
argLen: 2,
|
||||
commutative: true,
|
||||
generic: true,
|
||||
},
|
||||
{
|
||||
name: "Eq64F",
|
||||
argLen: 2,
|
||||
generic: true,
|
||||
name: "Eq64F",
|
||||
argLen: 2,
|
||||
commutative: true,
|
||||
generic: true,
|
||||
},
|
||||
{
|
||||
name: "Neq8",
|
||||
|
|
@ -21262,14 +21322,16 @@ var opcodeTable = [...]opInfo{
|
|||
generic: true,
|
||||
},
|
||||
{
|
||||
name: "Neq32F",
|
||||
argLen: 2,
|
||||
generic: true,
|
||||
name: "Neq32F",
|
||||
argLen: 2,
|
||||
commutative: true,
|
||||
generic: true,
|
||||
},
|
||||
{
|
||||
name: "Neq64F",
|
||||
argLen: 2,
|
||||
generic: true,
|
||||
name: "Neq64F",
|
||||
argLen: 2,
|
||||
commutative: true,
|
||||
generic: true,
|
||||
},
|
||||
{
|
||||
name: "Less8",
|
||||
|
|
|
|||
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
|
|
@ -686,7 +686,7 @@ func rewriteValueMIPS_OpAtomicAnd8(v *Value) bool {
|
|||
_ = config
|
||||
types := &b.Func.Config.Types
|
||||
_ = types
|
||||
// match: (AtomicAnd8 ptr val mem)
|
||||
// match: (AtomicAnd8 ptr val mem)
|
||||
// cond: !config.BigEndian
|
||||
// result: (LoweredAtomicAnd (AND <types.UInt32Ptr> (MOVWconst [^3]) ptr) (OR <types.UInt32> (SLL <types.UInt32> (ZeroExt8to32 val) (SLLconst <types.UInt32> [3] (ANDconst <types.UInt32> [3] ptr))) (NORconst [0] <types.UInt32> (SLL <types.UInt32> (MOVWconst [0xff]) (SLLconst <types.UInt32> [3] (ANDconst <types.UInt32> [3] ptr))))) mem)
|
||||
for {
|
||||
|
|
@ -735,7 +735,7 @@ func rewriteValueMIPS_OpAtomicAnd8(v *Value) bool {
|
|||
v.AddArg(mem)
|
||||
return true
|
||||
}
|
||||
// match: (AtomicAnd8 ptr val mem)
|
||||
// match: (AtomicAnd8 ptr val mem)
|
||||
// cond: config.BigEndian
|
||||
// result: (LoweredAtomicAnd (AND <types.UInt32Ptr> (MOVWconst [^3]) ptr) (OR <types.UInt32> (SLL <types.UInt32> (ZeroExt8to32 val) (SLLconst <types.UInt32> [3] (ANDconst <types.UInt32> [3] (XORconst <types.UInt32> [3] ptr)))) (NORconst [0] <types.UInt32> (SLL <types.UInt32> (MOVWconst [0xff]) (SLLconst <types.UInt32> [3] (ANDconst <types.UInt32> [3] (XORconst <types.UInt32> [3] ptr)))))) mem)
|
||||
for {
|
||||
|
|
@ -825,7 +825,7 @@ func rewriteValueMIPS_OpAtomicExchange32(v *Value) bool {
|
|||
}
|
||||
}
|
||||
func rewriteValueMIPS_OpAtomicLoad32(v *Value) bool {
|
||||
// match: (AtomicLoad32 ptr mem)
|
||||
// match: (AtomicLoad32 ptr mem)
|
||||
// cond:
|
||||
// result: (LoweredAtomicLoad ptr mem)
|
||||
for {
|
||||
|
|
@ -927,7 +927,7 @@ func rewriteValueMIPS_OpAtomicOr8(v *Value) bool {
|
|||
return false
|
||||
}
|
||||
func rewriteValueMIPS_OpAtomicStore32(v *Value) bool {
|
||||
// match: (AtomicStore32 ptr val mem)
|
||||
// match: (AtomicStore32 ptr val mem)
|
||||
// cond:
|
||||
// result: (LoweredAtomicStore ptr val mem)
|
||||
for {
|
||||
|
|
@ -2777,21 +2777,6 @@ func rewriteValueMIPS_OpLsh8x8(v *Value) bool {
|
|||
}
|
||||
}
|
||||
func rewriteValueMIPS_OpMIPSADD(v *Value) bool {
|
||||
// match: (ADD (MOVWconst [c]) x)
|
||||
// cond:
|
||||
// result: (ADDconst [c] x)
|
||||
for {
|
||||
v_0 := v.Args[0]
|
||||
if v_0.Op != OpMIPSMOVWconst {
|
||||
break
|
||||
}
|
||||
c := v_0.AuxInt
|
||||
x := v.Args[1]
|
||||
v.reset(OpMIPSADDconst)
|
||||
v.AuxInt = c
|
||||
v.AddArg(x)
|
||||
return true
|
||||
}
|
||||
// match: (ADD x (MOVWconst [c]))
|
||||
// cond:
|
||||
// result: (ADDconst [c] x)
|
||||
|
|
@ -2807,6 +2792,21 @@ func rewriteValueMIPS_OpMIPSADD(v *Value) bool {
|
|||
v.AddArg(x)
|
||||
return true
|
||||
}
|
||||
// match: (ADD (MOVWconst [c]) x)
|
||||
// cond:
|
||||
// result: (ADDconst [c] x)
|
||||
for {
|
||||
v_0 := v.Args[0]
|
||||
if v_0.Op != OpMIPSMOVWconst {
|
||||
break
|
||||
}
|
||||
c := v_0.AuxInt
|
||||
x := v.Args[1]
|
||||
v.reset(OpMIPSADDconst)
|
||||
v.AuxInt = c
|
||||
v.AddArg(x)
|
||||
return true
|
||||
}
|
||||
// match: (ADD x (NEG y))
|
||||
// cond:
|
||||
// result: (SUB x y)
|
||||
|
|
@ -2858,7 +2858,7 @@ func rewriteValueMIPS_OpMIPSADDconst(v *Value) bool {
|
|||
v.AddArg(ptr)
|
||||
return true
|
||||
}
|
||||
// match: (ADDconst [0] x)
|
||||
// match: (ADDconst [0] x)
|
||||
// cond:
|
||||
// result: x
|
||||
for {
|
||||
|
|
@ -2922,21 +2922,6 @@ func rewriteValueMIPS_OpMIPSADDconst(v *Value) bool {
|
|||
func rewriteValueMIPS_OpMIPSAND(v *Value) bool {
|
||||
b := v.Block
|
||||
_ = b
|
||||
// match: (AND (MOVWconst [c]) x)
|
||||
// cond:
|
||||
// result: (ANDconst [c] x)
|
||||
for {
|
||||
v_0 := v.Args[0]
|
||||
if v_0.Op != OpMIPSMOVWconst {
|
||||
break
|
||||
}
|
||||
c := v_0.AuxInt
|
||||
x := v.Args[1]
|
||||
v.reset(OpMIPSANDconst)
|
||||
v.AuxInt = c
|
||||
v.AddArg(x)
|
||||
return true
|
||||
}
|
||||
// match: (AND x (MOVWconst [c]))
|
||||
// cond:
|
||||
// result: (ANDconst [c] x)
|
||||
|
|
@ -2952,6 +2937,21 @@ func rewriteValueMIPS_OpMIPSAND(v *Value) bool {
|
|||
v.AddArg(x)
|
||||
return true
|
||||
}
|
||||
// match: (AND (MOVWconst [c]) x)
|
||||
// cond:
|
||||
// result: (ANDconst [c] x)
|
||||
for {
|
||||
v_0 := v.Args[0]
|
||||
if v_0.Op != OpMIPSMOVWconst {
|
||||
break
|
||||
}
|
||||
c := v_0.AuxInt
|
||||
x := v.Args[1]
|
||||
v.reset(OpMIPSANDconst)
|
||||
v.AuxInt = c
|
||||
v.AddArg(x)
|
||||
return true
|
||||
}
|
||||
// match: (AND x x)
|
||||
// cond:
|
||||
// result: x
|
||||
|
|
@ -2993,10 +2993,38 @@ func rewriteValueMIPS_OpMIPSAND(v *Value) bool {
|
|||
v.AddArg(v0)
|
||||
return true
|
||||
}
|
||||
// match: (AND (SGTUconst [1] y) (SGTUconst [1] x))
|
||||
// cond:
|
||||
// result: (SGTUconst [1] (OR <x.Type> x y))
|
||||
for {
|
||||
v_0 := v.Args[0]
|
||||
if v_0.Op != OpMIPSSGTUconst {
|
||||
break
|
||||
}
|
||||
if v_0.AuxInt != 1 {
|
||||
break
|
||||
}
|
||||
y := v_0.Args[0]
|
||||
v_1 := v.Args[1]
|
||||
if v_1.Op != OpMIPSSGTUconst {
|
||||
break
|
||||
}
|
||||
if v_1.AuxInt != 1 {
|
||||
break
|
||||
}
|
||||
x := v_1.Args[0]
|
||||
v.reset(OpMIPSSGTUconst)
|
||||
v.AuxInt = 1
|
||||
v0 := b.NewValue0(v.Pos, OpMIPSOR, x.Type)
|
||||
v0.AddArg(x)
|
||||
v0.AddArg(y)
|
||||
v.AddArg(v0)
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
func rewriteValueMIPS_OpMIPSANDconst(v *Value) bool {
|
||||
// match: (ANDconst [0] _)
|
||||
// match: (ANDconst [0] _)
|
||||
// cond:
|
||||
// result: (MOVWconst [0])
|
||||
for {
|
||||
|
|
@ -3347,7 +3375,7 @@ func rewriteValueMIPS_OpMIPSMOVBUreg(v *Value) bool {
|
|||
return false
|
||||
}
|
||||
func rewriteValueMIPS_OpMIPSMOVBload(v *Value) bool {
|
||||
// match: (MOVBload [off1] {sym} x:(ADDconst [off2] ptr) mem)
|
||||
// match: (MOVBload [off1] {sym} x:(ADDconst [off2] ptr) mem)
|
||||
// cond: (is16Bit(off1+off2) || x.Uses == 1)
|
||||
// result: (MOVBload [off1+off2] {sym} ptr mem)
|
||||
for {
|
||||
|
|
@ -3490,7 +3518,7 @@ func rewriteValueMIPS_OpMIPSMOVBreg(v *Value) bool {
|
|||
v.AddArg(x)
|
||||
return true
|
||||
}
|
||||
// match: (MOVBreg (MOVWconst [c]))
|
||||
// match: (MOVBreg (MOVWconst [c]))
|
||||
// cond:
|
||||
// result: (MOVWconst [int64(int8(c))])
|
||||
for {
|
||||
|
|
@ -3737,7 +3765,7 @@ func rewriteValueMIPS_OpMIPSMOVBstorezero(v *Value) bool {
|
|||
return false
|
||||
}
|
||||
func rewriteValueMIPS_OpMIPSMOVDload(v *Value) bool {
|
||||
// match: (MOVDload [off1] {sym} x:(ADDconst [off2] ptr) mem)
|
||||
// match: (MOVDload [off1] {sym} x:(ADDconst [off2] ptr) mem)
|
||||
// cond: (is16Bit(off1+off2) || x.Uses == 1)
|
||||
// result: (MOVDload [off1+off2] {sym} ptr mem)
|
||||
for {
|
||||
|
|
@ -3864,7 +3892,7 @@ func rewriteValueMIPS_OpMIPSMOVDstore(v *Value) bool {
|
|||
return false
|
||||
}
|
||||
func rewriteValueMIPS_OpMIPSMOVFload(v *Value) bool {
|
||||
// match: (MOVFload [off1] {sym} x:(ADDconst [off2] ptr) mem)
|
||||
// match: (MOVFload [off1] {sym} x:(ADDconst [off2] ptr) mem)
|
||||
// cond: (is16Bit(off1+off2) || x.Uses == 1)
|
||||
// result: (MOVFload [off1+off2] {sym} ptr mem)
|
||||
for {
|
||||
|
|
@ -4171,7 +4199,7 @@ func rewriteValueMIPS_OpMIPSMOVHUreg(v *Value) bool {
|
|||
return false
|
||||
}
|
||||
func rewriteValueMIPS_OpMIPSMOVHload(v *Value) bool {
|
||||
// match: (MOVHload [off1] {sym} x:(ADDconst [off2] ptr) mem)
|
||||
// match: (MOVHload [off1] {sym} x:(ADDconst [off2] ptr) mem)
|
||||
// cond: (is16Bit(off1+off2) || x.Uses == 1)
|
||||
// result: (MOVHload [off1+off2] {sym} ptr mem)
|
||||
for {
|
||||
|
|
@ -4362,7 +4390,7 @@ func rewriteValueMIPS_OpMIPSMOVHreg(v *Value) bool {
|
|||
v.AddArg(x)
|
||||
return true
|
||||
}
|
||||
// match: (MOVHreg (MOVWconst [c]))
|
||||
// match: (MOVHreg (MOVWconst [c]))
|
||||
// cond:
|
||||
// result: (MOVWconst [int64(int16(c))])
|
||||
for {
|
||||
|
|
@ -4567,7 +4595,7 @@ func rewriteValueMIPS_OpMIPSMOVHstorezero(v *Value) bool {
|
|||
return false
|
||||
}
|
||||
func rewriteValueMIPS_OpMIPSMOVWload(v *Value) bool {
|
||||
// match: (MOVWload [off1] {sym} x:(ADDconst [off2] ptr) mem)
|
||||
// match: (MOVWload [off1] {sym} x:(ADDconst [off2] ptr) mem)
|
||||
// cond: (is16Bit(off1+off2) || x.Uses == 1)
|
||||
// result: (MOVWload [off1+off2] {sym} ptr mem)
|
||||
for {
|
||||
|
|
@ -4652,7 +4680,7 @@ func rewriteValueMIPS_OpMIPSMOVWreg(v *Value) bool {
|
|||
v.AddArg(x)
|
||||
return true
|
||||
}
|
||||
// match: (MOVWreg (MOVWconst [c]))
|
||||
// match: (MOVWreg (MOVWconst [c]))
|
||||
// cond:
|
||||
// result: (MOVWconst [c])
|
||||
for {
|
||||
|
|
@ -4815,7 +4843,7 @@ func rewriteValueMIPS_OpMIPSMOVWstorezero(v *Value) bool {
|
|||
return false
|
||||
}
|
||||
func rewriteValueMIPS_OpMIPSMUL(v *Value) bool {
|
||||
// match: (MUL (MOVWconst [0]) _ )
|
||||
// match: (MUL (MOVWconst [0]) _)
|
||||
// cond:
|
||||
// result: (MOVWconst [0])
|
||||
for {
|
||||
|
|
@ -4830,7 +4858,22 @@ func rewriteValueMIPS_OpMIPSMUL(v *Value) bool {
|
|||
v.AuxInt = 0
|
||||
return true
|
||||
}
|
||||
// match: (MUL (MOVWconst [1]) x )
|
||||
// match: (MUL _ (MOVWconst [0]))
|
||||
// cond:
|
||||
// result: (MOVWconst [0])
|
||||
for {
|
||||
v_1 := v.Args[1]
|
||||
if v_1.Op != OpMIPSMOVWconst {
|
||||
break
|
||||
}
|
||||
if v_1.AuxInt != 0 {
|
||||
break
|
||||
}
|
||||
v.reset(OpMIPSMOVWconst)
|
||||
v.AuxInt = 0
|
||||
return true
|
||||
}
|
||||
// match: (MUL (MOVWconst [1]) x)
|
||||
// cond:
|
||||
// result: x
|
||||
for {
|
||||
|
|
@ -4847,7 +4890,24 @@ func rewriteValueMIPS_OpMIPSMUL(v *Value) bool {
|
|||
v.AddArg(x)
|
||||
return true
|
||||
}
|
||||
// match: (MUL (MOVWconst [-1]) x )
|
||||
// match: (MUL x (MOVWconst [1]))
|
||||
// cond:
|
||||
// result: x
|
||||
for {
|
||||
x := v.Args[0]
|
||||
v_1 := v.Args[1]
|
||||
if v_1.Op != OpMIPSMOVWconst {
|
||||
break
|
||||
}
|
||||
if v_1.AuxInt != 1 {
|
||||
break
|
||||
}
|
||||
v.reset(OpCopy)
|
||||
v.Type = x.Type
|
||||
v.AddArg(x)
|
||||
return true
|
||||
}
|
||||
// match: (MUL (MOVWconst [-1]) x)
|
||||
// cond:
|
||||
// result: (NEG x)
|
||||
for {
|
||||
|
|
@ -4863,7 +4923,23 @@ func rewriteValueMIPS_OpMIPSMUL(v *Value) bool {
|
|||
v.AddArg(x)
|
||||
return true
|
||||
}
|
||||
// match: (MUL (MOVWconst [c]) x )
|
||||
// match: (MUL x (MOVWconst [-1]))
|
||||
// cond:
|
||||
// result: (NEG x)
|
||||
for {
|
||||
x := v.Args[0]
|
||||
v_1 := v.Args[1]
|
||||
if v_1.Op != OpMIPSMOVWconst {
|
||||
break
|
||||
}
|
||||
if v_1.AuxInt != -1 {
|
||||
break
|
||||
}
|
||||
v.reset(OpMIPSNEG)
|
||||
v.AddArg(x)
|
||||
return true
|
||||
}
|
||||
// match: (MUL (MOVWconst [c]) x)
|
||||
// cond: isPowerOfTwo(int64(uint32(c)))
|
||||
// result: (SLLconst [log2(int64(uint32(c)))] x)
|
||||
for {
|
||||
|
|
@ -4881,6 +4957,24 @@ func rewriteValueMIPS_OpMIPSMUL(v *Value) bool {
|
|||
v.AddArg(x)
|
||||
return true
|
||||
}
|
||||
// match: (MUL x (MOVWconst [c]))
|
||||
// cond: isPowerOfTwo(int64(uint32(c)))
|
||||
// result: (SLLconst [log2(int64(uint32(c)))] x)
|
||||
for {
|
||||
x := v.Args[0]
|
||||
v_1 := v.Args[1]
|
||||
if v_1.Op != OpMIPSMOVWconst {
|
||||
break
|
||||
}
|
||||
c := v_1.AuxInt
|
||||
if !(isPowerOfTwo(int64(uint32(c)))) {
|
||||
break
|
||||
}
|
||||
v.reset(OpMIPSSLLconst)
|
||||
v.AuxInt = log2(int64(uint32(c)))
|
||||
v.AddArg(x)
|
||||
return true
|
||||
}
|
||||
// match: (MUL (MOVWconst [c]) (MOVWconst [d]))
|
||||
// cond:
|
||||
// result: (MOVWconst [int64(int32(c)*int32(d))])
|
||||
|
|
@ -4899,6 +4993,24 @@ func rewriteValueMIPS_OpMIPSMUL(v *Value) bool {
|
|||
v.AuxInt = int64(int32(c) * int32(d))
|
||||
return true
|
||||
}
|
||||
// match: (MUL (MOVWconst [d]) (MOVWconst [c]))
|
||||
// cond:
|
||||
// result: (MOVWconst [int64(int32(c)*int32(d))])
|
||||
for {
|
||||
v_0 := v.Args[0]
|
||||
if v_0.Op != OpMIPSMOVWconst {
|
||||
break
|
||||
}
|
||||
d := v_0.AuxInt
|
||||
v_1 := v.Args[1]
|
||||
if v_1.Op != OpMIPSMOVWconst {
|
||||
break
|
||||
}
|
||||
c := v_1.AuxInt
|
||||
v.reset(OpMIPSMOVWconst)
|
||||
v.AuxInt = int64(int32(c) * int32(d))
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
func rewriteValueMIPS_OpMIPSNEG(v *Value) bool {
|
||||
|
|
@ -4918,21 +5030,6 @@ func rewriteValueMIPS_OpMIPSNEG(v *Value) bool {
|
|||
return false
|
||||
}
|
||||
func rewriteValueMIPS_OpMIPSNOR(v *Value) bool {
|
||||
// match: (NOR (MOVWconst [c]) x)
|
||||
// cond:
|
||||
// result: (NORconst [c] x)
|
||||
for {
|
||||
v_0 := v.Args[0]
|
||||
if v_0.Op != OpMIPSMOVWconst {
|
||||
break
|
||||
}
|
||||
c := v_0.AuxInt
|
||||
x := v.Args[1]
|
||||
v.reset(OpMIPSNORconst)
|
||||
v.AuxInt = c
|
||||
v.AddArg(x)
|
||||
return true
|
||||
}
|
||||
// match: (NOR x (MOVWconst [c]))
|
||||
// cond:
|
||||
// result: (NORconst [c] x)
|
||||
|
|
@ -4948,6 +5045,21 @@ func rewriteValueMIPS_OpMIPSNOR(v *Value) bool {
|
|||
v.AddArg(x)
|
||||
return true
|
||||
}
|
||||
// match: (NOR (MOVWconst [c]) x)
|
||||
// cond:
|
||||
// result: (NORconst [c] x)
|
||||
for {
|
||||
v_0 := v.Args[0]
|
||||
if v_0.Op != OpMIPSMOVWconst {
|
||||
break
|
||||
}
|
||||
c := v_0.AuxInt
|
||||
x := v.Args[1]
|
||||
v.reset(OpMIPSNORconst)
|
||||
v.AuxInt = c
|
||||
v.AddArg(x)
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
func rewriteValueMIPS_OpMIPSNORconst(v *Value) bool {
|
||||
|
|
@ -4970,22 +5082,7 @@ func rewriteValueMIPS_OpMIPSNORconst(v *Value) bool {
|
|||
func rewriteValueMIPS_OpMIPSOR(v *Value) bool {
|
||||
b := v.Block
|
||||
_ = b
|
||||
// match: (OR (MOVWconst [c]) x)
|
||||
// cond:
|
||||
// result: (ORconst [c] x)
|
||||
for {
|
||||
v_0 := v.Args[0]
|
||||
if v_0.Op != OpMIPSMOVWconst {
|
||||
break
|
||||
}
|
||||
c := v_0.AuxInt
|
||||
x := v.Args[1]
|
||||
v.reset(OpMIPSORconst)
|
||||
v.AuxInt = c
|
||||
v.AddArg(x)
|
||||
return true
|
||||
}
|
||||
// match: (OR x (MOVWconst [c]))
|
||||
// match: (OR x (MOVWconst [c]))
|
||||
// cond:
|
||||
// result: (ORconst [c] x)
|
||||
for {
|
||||
|
|
@ -5000,7 +5097,22 @@ func rewriteValueMIPS_OpMIPSOR(v *Value) bool {
|
|||
v.AddArg(x)
|
||||
return true
|
||||
}
|
||||
// match: (OR x x)
|
||||
// match: (OR (MOVWconst [c]) x)
|
||||
// cond:
|
||||
// result: (ORconst [c] x)
|
||||
for {
|
||||
v_0 := v.Args[0]
|
||||
if v_0.Op != OpMIPSMOVWconst {
|
||||
break
|
||||
}
|
||||
c := v_0.AuxInt
|
||||
x := v.Args[1]
|
||||
v.reset(OpMIPSORconst)
|
||||
v.AuxInt = c
|
||||
v.AddArg(x)
|
||||
return true
|
||||
}
|
||||
// match: (OR x x)
|
||||
// cond:
|
||||
// result: x
|
||||
for {
|
||||
|
|
@ -5034,10 +5146,31 @@ func rewriteValueMIPS_OpMIPSOR(v *Value) bool {
|
|||
v.AddArg(v0)
|
||||
return true
|
||||
}
|
||||
// match: (OR (SGTUzero y) (SGTUzero x))
|
||||
// cond:
|
||||
// result: (SGTUzero (OR <x.Type> x y))
|
||||
for {
|
||||
v_0 := v.Args[0]
|
||||
if v_0.Op != OpMIPSSGTUzero {
|
||||
break
|
||||
}
|
||||
y := v_0.Args[0]
|
||||
v_1 := v.Args[1]
|
||||
if v_1.Op != OpMIPSSGTUzero {
|
||||
break
|
||||
}
|
||||
x := v_1.Args[0]
|
||||
v.reset(OpMIPSSGTUzero)
|
||||
v0 := b.NewValue0(v.Pos, OpMIPSOR, x.Type)
|
||||
v0.AddArg(x)
|
||||
v0.AddArg(y)
|
||||
v.AddArg(v0)
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
func rewriteValueMIPS_OpMIPSORconst(v *Value) bool {
|
||||
// match: (ORconst [0] x)
|
||||
// match: (ORconst [0] x)
|
||||
// cond:
|
||||
// result: x
|
||||
for {
|
||||
|
|
@ -5050,7 +5183,7 @@ func rewriteValueMIPS_OpMIPSORconst(v *Value) bool {
|
|||
v.AddArg(x)
|
||||
return true
|
||||
}
|
||||
// match: (ORconst [-1] _)
|
||||
// match: (ORconst [-1] _)
|
||||
// cond:
|
||||
// result: (MOVWconst [-1])
|
||||
for {
|
||||
|
|
@ -5094,7 +5227,7 @@ func rewriteValueMIPS_OpMIPSORconst(v *Value) bool {
|
|||
return false
|
||||
}
|
||||
func rewriteValueMIPS_OpMIPSSGT(v *Value) bool {
|
||||
// match: (SGT (MOVWconst [c]) x)
|
||||
// match: (SGT (MOVWconst [c]) x)
|
||||
// cond:
|
||||
// result: (SGTconst [c] x)
|
||||
for {
|
||||
|
|
@ -5735,7 +5868,7 @@ func rewriteValueMIPS_OpMIPSSUB(v *Value) bool {
|
|||
return false
|
||||
}
|
||||
func rewriteValueMIPS_OpMIPSSUBconst(v *Value) bool {
|
||||
// match: (SUBconst [0] x)
|
||||
// match: (SUBconst [0] x)
|
||||
// cond:
|
||||
// result: x
|
||||
for {
|
||||
|
|
@ -5797,21 +5930,6 @@ func rewriteValueMIPS_OpMIPSSUBconst(v *Value) bool {
|
|||
return false
|
||||
}
|
||||
func rewriteValueMIPS_OpMIPSXOR(v *Value) bool {
|
||||
// match: (XOR (MOVWconst [c]) x)
|
||||
// cond:
|
||||
// result: (XORconst [c] x)
|
||||
for {
|
||||
v_0 := v.Args[0]
|
||||
if v_0.Op != OpMIPSMOVWconst {
|
||||
break
|
||||
}
|
||||
c := v_0.AuxInt
|
||||
x := v.Args[1]
|
||||
v.reset(OpMIPSXORconst)
|
||||
v.AuxInt = c
|
||||
v.AddArg(x)
|
||||
return true
|
||||
}
|
||||
// match: (XOR x (MOVWconst [c]))
|
||||
// cond:
|
||||
// result: (XORconst [c] x)
|
||||
|
|
@ -5827,6 +5945,21 @@ func rewriteValueMIPS_OpMIPSXOR(v *Value) bool {
|
|||
v.AddArg(x)
|
||||
return true
|
||||
}
|
||||
// match: (XOR (MOVWconst [c]) x)
|
||||
// cond:
|
||||
// result: (XORconst [c] x)
|
||||
for {
|
||||
v_0 := v.Args[0]
|
||||
if v_0.Op != OpMIPSMOVWconst {
|
||||
break
|
||||
}
|
||||
c := v_0.AuxInt
|
||||
x := v.Args[1]
|
||||
v.reset(OpMIPSXORconst)
|
||||
v.AuxInt = c
|
||||
v.AddArg(x)
|
||||
return true
|
||||
}
|
||||
// match: (XOR x x)
|
||||
// cond:
|
||||
// result: (MOVWconst [0])
|
||||
|
|
@ -5842,7 +5975,7 @@ func rewriteValueMIPS_OpMIPSXOR(v *Value) bool {
|
|||
return false
|
||||
}
|
||||
func rewriteValueMIPS_OpMIPSXORconst(v *Value) bool {
|
||||
// match: (XORconst [0] x)
|
||||
// match: (XORconst [0] x)
|
||||
// cond:
|
||||
// result: x
|
||||
for {
|
||||
|
|
@ -7777,7 +7910,33 @@ func rewriteValueMIPS_OpSelect0(v *Value) bool {
|
|||
v.AddArg(v0)
|
||||
return true
|
||||
}
|
||||
// match: (Select0 (MULTU (MOVWconst [0]) _ ))
|
||||
// match: (Select0 (MULTU (MOVWconst [c]) x))
|
||||
// cond: x.Op != OpMIPSMOVWconst
|
||||
// result: (Select0 (MULTU (MOVWconst [c]) x ))
|
||||
for {
|
||||
v_0 := v.Args[0]
|
||||
if v_0.Op != OpMIPSMULTU {
|
||||
break
|
||||
}
|
||||
v_0_0 := v_0.Args[0]
|
||||
if v_0_0.Op != OpMIPSMOVWconst {
|
||||
break
|
||||
}
|
||||
c := v_0_0.AuxInt
|
||||
x := v_0.Args[1]
|
||||
if !(x.Op != OpMIPSMOVWconst) {
|
||||
break
|
||||
}
|
||||
v.reset(OpSelect0)
|
||||
v0 := b.NewValue0(v.Pos, OpMIPSMULTU, MakeTuple(types.UInt32, types.UInt32))
|
||||
v1 := b.NewValue0(v.Pos, OpMIPSMOVWconst, types.UInt32)
|
||||
v1.AuxInt = c
|
||||
v0.AddArg(v1)
|
||||
v0.AddArg(x)
|
||||
v.AddArg(v0)
|
||||
return true
|
||||
}
|
||||
// match: (Select0 (MULTU (MOVWconst [0]) _))
|
||||
// cond:
|
||||
// result: (MOVWconst [0])
|
||||
for {
|
||||
|
|
@ -7796,7 +7955,26 @@ func rewriteValueMIPS_OpSelect0(v *Value) bool {
|
|||
v.AuxInt = 0
|
||||
return true
|
||||
}
|
||||
// match: (Select0 (MULTU (MOVWconst [1]) _ ))
|
||||
// match: (Select0 (MULTU _ (MOVWconst [0])))
|
||||
// cond:
|
||||
// result: (MOVWconst [0])
|
||||
for {
|
||||
v_0 := v.Args[0]
|
||||
if v_0.Op != OpMIPSMULTU {
|
||||
break
|
||||
}
|
||||
v_0_1 := v_0.Args[1]
|
||||
if v_0_1.Op != OpMIPSMOVWconst {
|
||||
break
|
||||
}
|
||||
if v_0_1.AuxInt != 0 {
|
||||
break
|
||||
}
|
||||
v.reset(OpMIPSMOVWconst)
|
||||
v.AuxInt = 0
|
||||
return true
|
||||
}
|
||||
// match: (Select0 (MULTU (MOVWconst [1]) _))
|
||||
// cond:
|
||||
// result: (MOVWconst [0])
|
||||
for {
|
||||
|
|
@ -7815,7 +7993,26 @@ func rewriteValueMIPS_OpSelect0(v *Value) bool {
|
|||
v.AuxInt = 0
|
||||
return true
|
||||
}
|
||||
// match: (Select0 (MULTU (MOVWconst [-1]) x ))
|
||||
// match: (Select0 (MULTU _ (MOVWconst [1])))
|
||||
// cond:
|
||||
// result: (MOVWconst [0])
|
||||
for {
|
||||
v_0 := v.Args[0]
|
||||
if v_0.Op != OpMIPSMULTU {
|
||||
break
|
||||
}
|
||||
v_0_1 := v_0.Args[1]
|
||||
if v_0_1.Op != OpMIPSMOVWconst {
|
||||
break
|
||||
}
|
||||
if v_0_1.AuxInt != 1 {
|
||||
break
|
||||
}
|
||||
v.reset(OpMIPSMOVWconst)
|
||||
v.AuxInt = 0
|
||||
return true
|
||||
}
|
||||
// match: (Select0 (MULTU (MOVWconst [-1]) x))
|
||||
// cond:
|
||||
// result: (CMOVZ (ADDconst <x.Type> [-1] x) (MOVWconst [0]) x)
|
||||
for {
|
||||
|
|
@ -7842,7 +8039,34 @@ func rewriteValueMIPS_OpSelect0(v *Value) bool {
|
|||
v.AddArg(x)
|
||||
return true
|
||||
}
|
||||
// match: (Select0 (MULTU (MOVWconst [c]) x ))
|
||||
// match: (Select0 (MULTU x (MOVWconst [-1])))
|
||||
// cond:
|
||||
// result: (CMOVZ (ADDconst <x.Type> [-1] x) (MOVWconst [0]) x)
|
||||
for {
|
||||
v_0 := v.Args[0]
|
||||
if v_0.Op != OpMIPSMULTU {
|
||||
break
|
||||
}
|
||||
x := v_0.Args[0]
|
||||
v_0_1 := v_0.Args[1]
|
||||
if v_0_1.Op != OpMIPSMOVWconst {
|
||||
break
|
||||
}
|
||||
if v_0_1.AuxInt != -1 {
|
||||
break
|
||||
}
|
||||
v.reset(OpMIPSCMOVZ)
|
||||
v0 := b.NewValue0(v.Pos, OpMIPSADDconst, x.Type)
|
||||
v0.AuxInt = -1
|
||||
v0.AddArg(x)
|
||||
v.AddArg(v0)
|
||||
v1 := b.NewValue0(v.Pos, OpMIPSMOVWconst, types.UInt32)
|
||||
v1.AuxInt = 0
|
||||
v.AddArg(v1)
|
||||
v.AddArg(x)
|
||||
return true
|
||||
}
|
||||
// match: (Select0 (MULTU (MOVWconst [c]) x))
|
||||
// cond: isPowerOfTwo(int64(uint32(c)))
|
||||
// result: (SRLconst [32-log2(int64(uint32(c)))] x)
|
||||
for {
|
||||
|
|
@ -7864,7 +8088,29 @@ func rewriteValueMIPS_OpSelect0(v *Value) bool {
|
|||
v.AddArg(x)
|
||||
return true
|
||||
}
|
||||
// match: (Select0 (MULTU (MOVWconst [c]) (MOVWconst [d])))
|
||||
// match: (Select0 (MULTU x (MOVWconst [c])))
|
||||
// cond: isPowerOfTwo(int64(uint32(c)))
|
||||
// result: (SRLconst [32-log2(int64(uint32(c)))] x)
|
||||
for {
|
||||
v_0 := v.Args[0]
|
||||
if v_0.Op != OpMIPSMULTU {
|
||||
break
|
||||
}
|
||||
x := v_0.Args[0]
|
||||
v_0_1 := v_0.Args[1]
|
||||
if v_0_1.Op != OpMIPSMOVWconst {
|
||||
break
|
||||
}
|
||||
c := v_0_1.AuxInt
|
||||
if !(isPowerOfTwo(int64(uint32(c)))) {
|
||||
break
|
||||
}
|
||||
v.reset(OpMIPSSRLconst)
|
||||
v.AuxInt = 32 - log2(int64(uint32(c)))
|
||||
v.AddArg(x)
|
||||
return true
|
||||
}
|
||||
// match: (Select0 (MULTU (MOVWconst [c]) (MOVWconst [d])))
|
||||
// cond:
|
||||
// result: (MOVWconst [(c*d)>>32])
|
||||
for {
|
||||
|
|
@ -7886,7 +8132,29 @@ func rewriteValueMIPS_OpSelect0(v *Value) bool {
|
|||
v.AuxInt = (c * d) >> 32
|
||||
return true
|
||||
}
|
||||
// match: (Select0 (DIV (MOVWconst [c]) (MOVWconst [d])))
|
||||
// match: (Select0 (MULTU (MOVWconst [d]) (MOVWconst [c])))
|
||||
// cond:
|
||||
// result: (MOVWconst [(c*d)>>32])
|
||||
for {
|
||||
v_0 := v.Args[0]
|
||||
if v_0.Op != OpMIPSMULTU {
|
||||
break
|
||||
}
|
||||
v_0_0 := v_0.Args[0]
|
||||
if v_0_0.Op != OpMIPSMOVWconst {
|
||||
break
|
||||
}
|
||||
d := v_0_0.AuxInt
|
||||
v_0_1 := v_0.Args[1]
|
||||
if v_0_1.Op != OpMIPSMOVWconst {
|
||||
break
|
||||
}
|
||||
c := v_0_1.AuxInt
|
||||
v.reset(OpMIPSMOVWconst)
|
||||
v.AuxInt = (c * d) >> 32
|
||||
return true
|
||||
}
|
||||
// match: (Select0 (DIV (MOVWconst [c]) (MOVWconst [d])))
|
||||
// cond:
|
||||
// result: (MOVWconst [int64(int32(c)%int32(d))])
|
||||
for {
|
||||
|
|
@ -8003,7 +8271,33 @@ func rewriteValueMIPS_OpSelect1(v *Value) bool {
|
|||
v.AddArg(v0)
|
||||
return true
|
||||
}
|
||||
// match: (Select1 (MULTU (MOVWconst [0]) _ ))
|
||||
// match: (Select1 (MULTU (MOVWconst [c]) x))
|
||||
// cond: x.Op != OpMIPSMOVWconst
|
||||
// result: (Select1 (MULTU (MOVWconst [c]) x ))
|
||||
for {
|
||||
v_0 := v.Args[0]
|
||||
if v_0.Op != OpMIPSMULTU {
|
||||
break
|
||||
}
|
||||
v_0_0 := v_0.Args[0]
|
||||
if v_0_0.Op != OpMIPSMOVWconst {
|
||||
break
|
||||
}
|
||||
c := v_0_0.AuxInt
|
||||
x := v_0.Args[1]
|
||||
if !(x.Op != OpMIPSMOVWconst) {
|
||||
break
|
||||
}
|
||||
v.reset(OpSelect1)
|
||||
v0 := b.NewValue0(v.Pos, OpMIPSMULTU, MakeTuple(types.UInt32, types.UInt32))
|
||||
v1 := b.NewValue0(v.Pos, OpMIPSMOVWconst, types.UInt32)
|
||||
v1.AuxInt = c
|
||||
v0.AddArg(v1)
|
||||
v0.AddArg(x)
|
||||
v.AddArg(v0)
|
||||
return true
|
||||
}
|
||||
// match: (Select1 (MULTU (MOVWconst [0]) _))
|
||||
// cond:
|
||||
// result: (MOVWconst [0])
|
||||
for {
|
||||
|
|
@ -8022,7 +8316,26 @@ func rewriteValueMIPS_OpSelect1(v *Value) bool {
|
|||
v.AuxInt = 0
|
||||
return true
|
||||
}
|
||||
// match: (Select1 (MULTU (MOVWconst [1]) x ))
|
||||
// match: (Select1 (MULTU _ (MOVWconst [0])))
|
||||
// cond:
|
||||
// result: (MOVWconst [0])
|
||||
for {
|
||||
v_0 := v.Args[0]
|
||||
if v_0.Op != OpMIPSMULTU {
|
||||
break
|
||||
}
|
||||
v_0_1 := v_0.Args[1]
|
||||
if v_0_1.Op != OpMIPSMOVWconst {
|
||||
break
|
||||
}
|
||||
if v_0_1.AuxInt != 0 {
|
||||
break
|
||||
}
|
||||
v.reset(OpMIPSMOVWconst)
|
||||
v.AuxInt = 0
|
||||
return true
|
||||
}
|
||||
// match: (Select1 (MULTU (MOVWconst [1]) x))
|
||||
// cond:
|
||||
// result: x
|
||||
for {
|
||||
|
|
@ -8043,7 +8356,28 @@ func rewriteValueMIPS_OpSelect1(v *Value) bool {
|
|||
v.AddArg(x)
|
||||
return true
|
||||
}
|
||||
// match: (Select1 (MULTU (MOVWconst [-1]) x ))
|
||||
// match: (Select1 (MULTU x (MOVWconst [1])))
|
||||
// cond:
|
||||
// result: x
|
||||
for {
|
||||
v_0 := v.Args[0]
|
||||
if v_0.Op != OpMIPSMULTU {
|
||||
break
|
||||
}
|
||||
x := v_0.Args[0]
|
||||
v_0_1 := v_0.Args[1]
|
||||
if v_0_1.Op != OpMIPSMOVWconst {
|
||||
break
|
||||
}
|
||||
if v_0_1.AuxInt != 1 {
|
||||
break
|
||||
}
|
||||
v.reset(OpCopy)
|
||||
v.Type = x.Type
|
||||
v.AddArg(x)
|
||||
return true
|
||||
}
|
||||
// match: (Select1 (MULTU (MOVWconst [-1]) x))
|
||||
// cond:
|
||||
// result: (NEG <x.Type> x)
|
||||
for {
|
||||
|
|
@ -8064,7 +8398,28 @@ func rewriteValueMIPS_OpSelect1(v *Value) bool {
|
|||
v.AddArg(x)
|
||||
return true
|
||||
}
|
||||
// match: (Select1 (MULTU (MOVWconst [c]) x ))
|
||||
// match: (Select1 (MULTU x (MOVWconst [-1])))
|
||||
// cond:
|
||||
// result: (NEG <x.Type> x)
|
||||
for {
|
||||
v_0 := v.Args[0]
|
||||
if v_0.Op != OpMIPSMULTU {
|
||||
break
|
||||
}
|
||||
x := v_0.Args[0]
|
||||
v_0_1 := v_0.Args[1]
|
||||
if v_0_1.Op != OpMIPSMOVWconst {
|
||||
break
|
||||
}
|
||||
if v_0_1.AuxInt != -1 {
|
||||
break
|
||||
}
|
||||
v.reset(OpMIPSNEG)
|
||||
v.Type = x.Type
|
||||
v.AddArg(x)
|
||||
return true
|
||||
}
|
||||
// match: (Select1 (MULTU (MOVWconst [c]) x))
|
||||
// cond: isPowerOfTwo(int64(uint32(c)))
|
||||
// result: (SLLconst [log2(int64(uint32(c)))] x)
|
||||
for {
|
||||
|
|
@ -8086,7 +8441,29 @@ func rewriteValueMIPS_OpSelect1(v *Value) bool {
|
|||
v.AddArg(x)
|
||||
return true
|
||||
}
|
||||
// match: (Select1 (MULTU (MOVWconst [c]) (MOVWconst [d])))
|
||||
// match: (Select1 (MULTU x (MOVWconst [c])))
|
||||
// cond: isPowerOfTwo(int64(uint32(c)))
|
||||
// result: (SLLconst [log2(int64(uint32(c)))] x)
|
||||
for {
|
||||
v_0 := v.Args[0]
|
||||
if v_0.Op != OpMIPSMULTU {
|
||||
break
|
||||
}
|
||||
x := v_0.Args[0]
|
||||
v_0_1 := v_0.Args[1]
|
||||
if v_0_1.Op != OpMIPSMOVWconst {
|
||||
break
|
||||
}
|
||||
c := v_0_1.AuxInt
|
||||
if !(isPowerOfTwo(int64(uint32(c)))) {
|
||||
break
|
||||
}
|
||||
v.reset(OpMIPSSLLconst)
|
||||
v.AuxInt = log2(int64(uint32(c)))
|
||||
v.AddArg(x)
|
||||
return true
|
||||
}
|
||||
// match: (Select1 (MULTU (MOVWconst [c]) (MOVWconst [d])))
|
||||
// cond:
|
||||
// result: (MOVWconst [int64(int32(uint32(c)*uint32(d)))])
|
||||
for {
|
||||
|
|
@ -8108,7 +8485,29 @@ func rewriteValueMIPS_OpSelect1(v *Value) bool {
|
|||
v.AuxInt = int64(int32(uint32(c) * uint32(d)))
|
||||
return true
|
||||
}
|
||||
// match: (Select1 (DIV (MOVWconst [c]) (MOVWconst [d])))
|
||||
// match: (Select1 (MULTU (MOVWconst [d]) (MOVWconst [c])))
|
||||
// cond:
|
||||
// result: (MOVWconst [int64(int32(uint32(c)*uint32(d)))])
|
||||
for {
|
||||
v_0 := v.Args[0]
|
||||
if v_0.Op != OpMIPSMULTU {
|
||||
break
|
||||
}
|
||||
v_0_0 := v_0.Args[0]
|
||||
if v_0_0.Op != OpMIPSMOVWconst {
|
||||
break
|
||||
}
|
||||
d := v_0_0.AuxInt
|
||||
v_0_1 := v_0.Args[1]
|
||||
if v_0_1.Op != OpMIPSMOVWconst {
|
||||
break
|
||||
}
|
||||
c := v_0_1.AuxInt
|
||||
v.reset(OpMIPSMOVWconst)
|
||||
v.AuxInt = int64(int32(uint32(c) * uint32(d)))
|
||||
return true
|
||||
}
|
||||
// match: (Select1 (DIV (MOVWconst [c]) (MOVWconst [d])))
|
||||
// cond:
|
||||
// result: (MOVWconst [int64(int32(c)/int32(d))])
|
||||
for {
|
||||
|
|
@ -9182,7 +9581,7 @@ func rewriteBlockMIPS(b *Block) bool {
|
|||
_ = no
|
||||
return true
|
||||
}
|
||||
// match: (EQ (MOVWconst [0]) yes no)
|
||||
// match: (EQ (MOVWconst [0]) yes no)
|
||||
// cond:
|
||||
// result: (First nil yes no)
|
||||
for {
|
||||
|
|
@ -9201,7 +9600,7 @@ func rewriteBlockMIPS(b *Block) bool {
|
|||
_ = no
|
||||
return true
|
||||
}
|
||||
// match: (EQ (MOVWconst [c]) yes no)
|
||||
// match: (EQ (MOVWconst [c]) yes no)
|
||||
// cond: c != 0
|
||||
// result: (First nil no yes)
|
||||
for {
|
||||
|
|
@ -9653,7 +10052,7 @@ func rewriteBlockMIPS(b *Block) bool {
|
|||
_ = no
|
||||
return true
|
||||
}
|
||||
// match: (NE (MOVWconst [0]) yes no)
|
||||
// match: (NE (MOVWconst [0]) yes no)
|
||||
// cond:
|
||||
// result: (First nil no yes)
|
||||
for {
|
||||
|
|
@ -9673,7 +10072,7 @@ func rewriteBlockMIPS(b *Block) bool {
|
|||
_ = yes
|
||||
return true
|
||||
}
|
||||
// match: (NE (MOVWconst [c]) yes no)
|
||||
// match: (NE (MOVWconst [c]) yes no)
|
||||
// cond: c != 0
|
||||
// result: (First nil yes no)
|
||||
for {
|
||||
|
|
|
|||
|
|
@ -2692,7 +2692,7 @@ func rewriteValueMIPS64_OpLsh16x8(v *Value) bool {
|
|||
_ = b
|
||||
types := &b.Func.Config.Types
|
||||
_ = types
|
||||
// match: (Lsh16x8 <t> x y)
|
||||
// match: (Lsh16x8 <t> x y)
|
||||
// cond:
|
||||
// result: (AND (NEGV <t> (SGTU (Const64 <types.UInt64> [64]) (ZeroExt8to64 y))) (SLLV <t> x (ZeroExt8to64 y)))
|
||||
for {
|
||||
|
|
@ -2816,7 +2816,7 @@ func rewriteValueMIPS64_OpLsh32x8(v *Value) bool {
|
|||
_ = b
|
||||
types := &b.Func.Config.Types
|
||||
_ = types
|
||||
// match: (Lsh32x8 <t> x y)
|
||||
// match: (Lsh32x8 <t> x y)
|
||||
// cond:
|
||||
// result: (AND (NEGV <t> (SGTU (Const64 <types.UInt64> [64]) (ZeroExt8to64 y))) (SLLV <t> x (ZeroExt8to64 y)))
|
||||
for {
|
||||
|
|
@ -2940,7 +2940,7 @@ func rewriteValueMIPS64_OpLsh64x8(v *Value) bool {
|
|||
_ = b
|
||||
types := &b.Func.Config.Types
|
||||
_ = types
|
||||
// match: (Lsh64x8 <t> x y)
|
||||
// match: (Lsh64x8 <t> x y)
|
||||
// cond:
|
||||
// result: (AND (NEGV <t> (SGTU (Const64 <types.UInt64> [64]) (ZeroExt8to64 y))) (SLLV <t> x (ZeroExt8to64 y)))
|
||||
for {
|
||||
|
|
@ -3064,7 +3064,7 @@ func rewriteValueMIPS64_OpLsh8x8(v *Value) bool {
|
|||
_ = b
|
||||
types := &b.Func.Config.Types
|
||||
_ = types
|
||||
// match: (Lsh8x8 <t> x y)
|
||||
// match: (Lsh8x8 <t> x y)
|
||||
// cond:
|
||||
// result: (AND (NEGV <t> (SGTU (Const64 <types.UInt64> [64]) (ZeroExt8to64 y))) (SLLV <t> x (ZeroExt8to64 y)))
|
||||
for {
|
||||
|
|
@ -3092,24 +3092,6 @@ func rewriteValueMIPS64_OpLsh8x8(v *Value) bool {
|
|||
}
|
||||
}
|
||||
func rewriteValueMIPS64_OpMIPS64ADDV(v *Value) bool {
|
||||
// match: (ADDV (MOVVconst [c]) x)
|
||||
// cond: is32Bit(c)
|
||||
// result: (ADDVconst [c] x)
|
||||
for {
|
||||
v_0 := v.Args[0]
|
||||
if v_0.Op != OpMIPS64MOVVconst {
|
||||
break
|
||||
}
|
||||
c := v_0.AuxInt
|
||||
x := v.Args[1]
|
||||
if !(is32Bit(c)) {
|
||||
break
|
||||
}
|
||||
v.reset(OpMIPS64ADDVconst)
|
||||
v.AuxInt = c
|
||||
v.AddArg(x)
|
||||
return true
|
||||
}
|
||||
// match: (ADDV x (MOVVconst [c]))
|
||||
// cond: is32Bit(c)
|
||||
// result: (ADDVconst [c] x)
|
||||
|
|
@ -3128,6 +3110,24 @@ func rewriteValueMIPS64_OpMIPS64ADDV(v *Value) bool {
|
|||
v.AddArg(x)
|
||||
return true
|
||||
}
|
||||
// match: (ADDV (MOVVconst [c]) x)
|
||||
// cond: is32Bit(c)
|
||||
// result: (ADDVconst [c] x)
|
||||
for {
|
||||
v_0 := v.Args[0]
|
||||
if v_0.Op != OpMIPS64MOVVconst {
|
||||
break
|
||||
}
|
||||
c := v_0.AuxInt
|
||||
x := v.Args[1]
|
||||
if !(is32Bit(c)) {
|
||||
break
|
||||
}
|
||||
v.reset(OpMIPS64ADDVconst)
|
||||
v.AuxInt = c
|
||||
v.AddArg(x)
|
||||
return true
|
||||
}
|
||||
// match: (ADDV x (NEGV y))
|
||||
// cond:
|
||||
// result: (SUBV x y)
|
||||
|
|
@ -3179,7 +3179,7 @@ func rewriteValueMIPS64_OpMIPS64ADDVconst(v *Value) bool {
|
|||
v.AddArg(ptr)
|
||||
return true
|
||||
}
|
||||
// match: (ADDVconst [0] x)
|
||||
// match: (ADDVconst [0] x)
|
||||
// cond:
|
||||
// result: x
|
||||
for {
|
||||
|
|
@ -3247,24 +3247,6 @@ func rewriteValueMIPS64_OpMIPS64ADDVconst(v *Value) bool {
|
|||
return false
|
||||
}
|
||||
func rewriteValueMIPS64_OpMIPS64AND(v *Value) bool {
|
||||
// match: (AND (MOVVconst [c]) x)
|
||||
// cond: is32Bit(c)
|
||||
// result: (ANDconst [c] x)
|
||||
for {
|
||||
v_0 := v.Args[0]
|
||||
if v_0.Op != OpMIPS64MOVVconst {
|
||||
break
|
||||
}
|
||||
c := v_0.AuxInt
|
||||
x := v.Args[1]
|
||||
if !(is32Bit(c)) {
|
||||
break
|
||||
}
|
||||
v.reset(OpMIPS64ANDconst)
|
||||
v.AuxInt = c
|
||||
v.AddArg(x)
|
||||
return true
|
||||
}
|
||||
// match: (AND x (MOVVconst [c]))
|
||||
// cond: is32Bit(c)
|
||||
// result: (ANDconst [c] x)
|
||||
|
|
@ -3283,6 +3265,24 @@ func rewriteValueMIPS64_OpMIPS64AND(v *Value) bool {
|
|||
v.AddArg(x)
|
||||
return true
|
||||
}
|
||||
// match: (AND (MOVVconst [c]) x)
|
||||
// cond: is32Bit(c)
|
||||
// result: (ANDconst [c] x)
|
||||
for {
|
||||
v_0 := v.Args[0]
|
||||
if v_0.Op != OpMIPS64MOVVconst {
|
||||
break
|
||||
}
|
||||
c := v_0.AuxInt
|
||||
x := v.Args[1]
|
||||
if !(is32Bit(c)) {
|
||||
break
|
||||
}
|
||||
v.reset(OpMIPS64ANDconst)
|
||||
v.AuxInt = c
|
||||
v.AddArg(x)
|
||||
return true
|
||||
}
|
||||
// match: (AND x x)
|
||||
// cond:
|
||||
// result: x
|
||||
|
|
@ -3299,7 +3299,7 @@ func rewriteValueMIPS64_OpMIPS64AND(v *Value) bool {
|
|||
return false
|
||||
}
|
||||
func rewriteValueMIPS64_OpMIPS64ANDconst(v *Value) bool {
|
||||
// match: (ANDconst [0] _)
|
||||
// match: (ANDconst [0] _)
|
||||
// cond:
|
||||
// result: (MOVVconst [0])
|
||||
for {
|
||||
|
|
@ -3446,7 +3446,7 @@ func rewriteValueMIPS64_OpMIPS64MOVBUreg(v *Value) bool {
|
|||
return false
|
||||
}
|
||||
func rewriteValueMIPS64_OpMIPS64MOVBload(v *Value) bool {
|
||||
// match: (MOVBload [off1] {sym} (ADDVconst [off2] ptr) mem)
|
||||
// match: (MOVBload [off1] {sym} (ADDVconst [off2] ptr) mem)
|
||||
// cond: is32Bit(off1+off2)
|
||||
// result: (MOVBload [off1+off2] {sym} ptr mem)
|
||||
for {
|
||||
|
|
@ -3520,7 +3520,7 @@ func rewriteValueMIPS64_OpMIPS64MOVBreg(v *Value) bool {
|
|||
v.AddArg(x)
|
||||
return true
|
||||
}
|
||||
// match: (MOVBreg (MOVVconst [c]))
|
||||
// match: (MOVBreg (MOVVconst [c]))
|
||||
// cond:
|
||||
// result: (MOVVconst [int64(int8(c))])
|
||||
for {
|
||||
|
|
@ -3788,7 +3788,7 @@ func rewriteValueMIPS64_OpMIPS64MOVBstorezero(v *Value) bool {
|
|||
return false
|
||||
}
|
||||
func rewriteValueMIPS64_OpMIPS64MOVDload(v *Value) bool {
|
||||
// match: (MOVDload [off1] {sym} (ADDVconst [off2] ptr) mem)
|
||||
// match: (MOVDload [off1] {sym} (ADDVconst [off2] ptr) mem)
|
||||
// cond: is32Bit(off1+off2)
|
||||
// result: (MOVDload [off1+off2] {sym} ptr mem)
|
||||
for {
|
||||
|
|
@ -3892,7 +3892,7 @@ func rewriteValueMIPS64_OpMIPS64MOVDstore(v *Value) bool {
|
|||
return false
|
||||
}
|
||||
func rewriteValueMIPS64_OpMIPS64MOVFload(v *Value) bool {
|
||||
// match: (MOVFload [off1] {sym} (ADDVconst [off2] ptr) mem)
|
||||
// match: (MOVFload [off1] {sym} (ADDVconst [off2] ptr) mem)
|
||||
// cond: is32Bit(off1+off2)
|
||||
// result: (MOVFload [off1+off2] {sym} ptr mem)
|
||||
for {
|
||||
|
|
@ -4110,7 +4110,7 @@ func rewriteValueMIPS64_OpMIPS64MOVHUreg(v *Value) bool {
|
|||
return false
|
||||
}
|
||||
func rewriteValueMIPS64_OpMIPS64MOVHload(v *Value) bool {
|
||||
// match: (MOVHload [off1] {sym} (ADDVconst [off2] ptr) mem)
|
||||
// match: (MOVHload [off1] {sym} (ADDVconst [off2] ptr) mem)
|
||||
// cond: is32Bit(off1+off2)
|
||||
// result: (MOVHload [off1+off2] {sym} ptr mem)
|
||||
for {
|
||||
|
|
@ -4232,7 +4232,7 @@ func rewriteValueMIPS64_OpMIPS64MOVHreg(v *Value) bool {
|
|||
v.AddArg(x)
|
||||
return true
|
||||
}
|
||||
// match: (MOVHreg (MOVVconst [c]))
|
||||
// match: (MOVHreg (MOVVconst [c]))
|
||||
// cond:
|
||||
// result: (MOVVconst [int64(int16(c))])
|
||||
for {
|
||||
|
|
@ -4458,7 +4458,7 @@ func rewriteValueMIPS64_OpMIPS64MOVHstorezero(v *Value) bool {
|
|||
return false
|
||||
}
|
||||
func rewriteValueMIPS64_OpMIPS64MOVVload(v *Value) bool {
|
||||
// match: (MOVVload [off1] {sym} (ADDVconst [off2] ptr) mem)
|
||||
// match: (MOVVload [off1] {sym} (ADDVconst [off2] ptr) mem)
|
||||
// cond: is32Bit(off1+off2)
|
||||
// result: (MOVVload [off1+off2] {sym} ptr mem)
|
||||
for {
|
||||
|
|
@ -4520,7 +4520,7 @@ func rewriteValueMIPS64_OpMIPS64MOVVreg(v *Value) bool {
|
|||
v.AddArg(x)
|
||||
return true
|
||||
}
|
||||
// match: (MOVVreg (MOVVconst [c]))
|
||||
// match: (MOVVreg (MOVVconst [c]))
|
||||
// cond:
|
||||
// result: (MOVVconst [c])
|
||||
for {
|
||||
|
|
@ -4800,7 +4800,7 @@ func rewriteValueMIPS64_OpMIPS64MOVWUreg(v *Value) bool {
|
|||
return false
|
||||
}
|
||||
func rewriteValueMIPS64_OpMIPS64MOVWload(v *Value) bool {
|
||||
// match: (MOVWload [off1] {sym} (ADDVconst [off2] ptr) mem)
|
||||
// match: (MOVWload [off1] {sym} (ADDVconst [off2] ptr) mem)
|
||||
// cond: is32Bit(off1+off2)
|
||||
// result: (MOVWload [off1+off2] {sym} ptr mem)
|
||||
for {
|
||||
|
|
@ -4970,7 +4970,7 @@ func rewriteValueMIPS64_OpMIPS64MOVWreg(v *Value) bool {
|
|||
v.AddArg(x)
|
||||
return true
|
||||
}
|
||||
// match: (MOVWreg (MOVVconst [c]))
|
||||
// match: (MOVWreg (MOVVconst [c]))
|
||||
// cond:
|
||||
// result: (MOVVconst [int64(int32(c))])
|
||||
for {
|
||||
|
|
@ -5170,24 +5170,6 @@ func rewriteValueMIPS64_OpMIPS64NEGV(v *Value) bool {
|
|||
return false
|
||||
}
|
||||
func rewriteValueMIPS64_OpMIPS64NOR(v *Value) bool {
|
||||
// match: (NOR (MOVVconst [c]) x)
|
||||
// cond: is32Bit(c)
|
||||
// result: (NORconst [c] x)
|
||||
for {
|
||||
v_0 := v.Args[0]
|
||||
if v_0.Op != OpMIPS64MOVVconst {
|
||||
break
|
||||
}
|
||||
c := v_0.AuxInt
|
||||
x := v.Args[1]
|
||||
if !(is32Bit(c)) {
|
||||
break
|
||||
}
|
||||
v.reset(OpMIPS64NORconst)
|
||||
v.AuxInt = c
|
||||
v.AddArg(x)
|
||||
return true
|
||||
}
|
||||
// match: (NOR x (MOVVconst [c]))
|
||||
// cond: is32Bit(c)
|
||||
// result: (NORconst [c] x)
|
||||
|
|
@ -5206,6 +5188,24 @@ func rewriteValueMIPS64_OpMIPS64NOR(v *Value) bool {
|
|||
v.AddArg(x)
|
||||
return true
|
||||
}
|
||||
// match: (NOR (MOVVconst [c]) x)
|
||||
// cond: is32Bit(c)
|
||||
// result: (NORconst [c] x)
|
||||
for {
|
||||
v_0 := v.Args[0]
|
||||
if v_0.Op != OpMIPS64MOVVconst {
|
||||
break
|
||||
}
|
||||
c := v_0.AuxInt
|
||||
x := v.Args[1]
|
||||
if !(is32Bit(c)) {
|
||||
break
|
||||
}
|
||||
v.reset(OpMIPS64NORconst)
|
||||
v.AuxInt = c
|
||||
v.AddArg(x)
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
func rewriteValueMIPS64_OpMIPS64NORconst(v *Value) bool {
|
||||
|
|
@ -5226,25 +5226,7 @@ func rewriteValueMIPS64_OpMIPS64NORconst(v *Value) bool {
|
|||
return false
|
||||
}
|
||||
func rewriteValueMIPS64_OpMIPS64OR(v *Value) bool {
|
||||
// match: (OR (MOVVconst [c]) x)
|
||||
// cond: is32Bit(c)
|
||||
// result: (ORconst [c] x)
|
||||
for {
|
||||
v_0 := v.Args[0]
|
||||
if v_0.Op != OpMIPS64MOVVconst {
|
||||
break
|
||||
}
|
||||
c := v_0.AuxInt
|
||||
x := v.Args[1]
|
||||
if !(is32Bit(c)) {
|
||||
break
|
||||
}
|
||||
v.reset(OpMIPS64ORconst)
|
||||
v.AuxInt = c
|
||||
v.AddArg(x)
|
||||
return true
|
||||
}
|
||||
// match: (OR x (MOVVconst [c]))
|
||||
// match: (OR x (MOVVconst [c]))
|
||||
// cond: is32Bit(c)
|
||||
// result: (ORconst [c] x)
|
||||
for {
|
||||
|
|
@ -5262,7 +5244,25 @@ func rewriteValueMIPS64_OpMIPS64OR(v *Value) bool {
|
|||
v.AddArg(x)
|
||||
return true
|
||||
}
|
||||
// match: (OR x x)
|
||||
// match: (OR (MOVVconst [c]) x)
|
||||
// cond: is32Bit(c)
|
||||
// result: (ORconst [c] x)
|
||||
for {
|
||||
v_0 := v.Args[0]
|
||||
if v_0.Op != OpMIPS64MOVVconst {
|
||||
break
|
||||
}
|
||||
c := v_0.AuxInt
|
||||
x := v.Args[1]
|
||||
if !(is32Bit(c)) {
|
||||
break
|
||||
}
|
||||
v.reset(OpMIPS64ORconst)
|
||||
v.AuxInt = c
|
||||
v.AddArg(x)
|
||||
return true
|
||||
}
|
||||
// match: (OR x x)
|
||||
// cond:
|
||||
// result: x
|
||||
for {
|
||||
|
|
@ -5278,7 +5278,7 @@ func rewriteValueMIPS64_OpMIPS64OR(v *Value) bool {
|
|||
return false
|
||||
}
|
||||
func rewriteValueMIPS64_OpMIPS64ORconst(v *Value) bool {
|
||||
// match: (ORconst [0] x)
|
||||
// match: (ORconst [0] x)
|
||||
// cond:
|
||||
// result: x
|
||||
for {
|
||||
|
|
@ -5291,7 +5291,7 @@ func rewriteValueMIPS64_OpMIPS64ORconst(v *Value) bool {
|
|||
v.AddArg(x)
|
||||
return true
|
||||
}
|
||||
// match: (ORconst [-1] _)
|
||||
// match: (ORconst [-1] _)
|
||||
// cond:
|
||||
// result: (MOVVconst [-1])
|
||||
for {
|
||||
|
|
@ -5338,7 +5338,7 @@ func rewriteValueMIPS64_OpMIPS64ORconst(v *Value) bool {
|
|||
return false
|
||||
}
|
||||
func rewriteValueMIPS64_OpMIPS64SGT(v *Value) bool {
|
||||
// match: (SGT (MOVVconst [c]) x)
|
||||
// match: (SGT (MOVVconst [c]) x)
|
||||
// cond: is32Bit(c)
|
||||
// result: (SGTconst [c] x)
|
||||
for {
|
||||
|
|
@ -5902,7 +5902,7 @@ func rewriteValueMIPS64_OpMIPS64SUBV(v *Value) bool {
|
|||
return false
|
||||
}
|
||||
func rewriteValueMIPS64_OpMIPS64SUBVconst(v *Value) bool {
|
||||
// match: (SUBVconst [0] x)
|
||||
// match: (SUBVconst [0] x)
|
||||
// cond:
|
||||
// result: x
|
||||
for {
|
||||
|
|
@ -5970,24 +5970,6 @@ func rewriteValueMIPS64_OpMIPS64SUBVconst(v *Value) bool {
|
|||
return false
|
||||
}
|
||||
func rewriteValueMIPS64_OpMIPS64XOR(v *Value) bool {
|
||||
// match: (XOR (MOVVconst [c]) x)
|
||||
// cond: is32Bit(c)
|
||||
// result: (XORconst [c] x)
|
||||
for {
|
||||
v_0 := v.Args[0]
|
||||
if v_0.Op != OpMIPS64MOVVconst {
|
||||
break
|
||||
}
|
||||
c := v_0.AuxInt
|
||||
x := v.Args[1]
|
||||
if !(is32Bit(c)) {
|
||||
break
|
||||
}
|
||||
v.reset(OpMIPS64XORconst)
|
||||
v.AuxInt = c
|
||||
v.AddArg(x)
|
||||
return true
|
||||
}
|
||||
// match: (XOR x (MOVVconst [c]))
|
||||
// cond: is32Bit(c)
|
||||
// result: (XORconst [c] x)
|
||||
|
|
@ -6006,6 +5988,24 @@ func rewriteValueMIPS64_OpMIPS64XOR(v *Value) bool {
|
|||
v.AddArg(x)
|
||||
return true
|
||||
}
|
||||
// match: (XOR (MOVVconst [c]) x)
|
||||
// cond: is32Bit(c)
|
||||
// result: (XORconst [c] x)
|
||||
for {
|
||||
v_0 := v.Args[0]
|
||||
if v_0.Op != OpMIPS64MOVVconst {
|
||||
break
|
||||
}
|
||||
c := v_0.AuxInt
|
||||
x := v.Args[1]
|
||||
if !(is32Bit(c)) {
|
||||
break
|
||||
}
|
||||
v.reset(OpMIPS64XORconst)
|
||||
v.AuxInt = c
|
||||
v.AddArg(x)
|
||||
return true
|
||||
}
|
||||
// match: (XOR x x)
|
||||
// cond:
|
||||
// result: (MOVVconst [0])
|
||||
|
|
@ -6021,7 +6021,7 @@ func rewriteValueMIPS64_OpMIPS64XOR(v *Value) bool {
|
|||
return false
|
||||
}
|
||||
func rewriteValueMIPS64_OpMIPS64XORconst(v *Value) bool {
|
||||
// match: (XORconst [0] x)
|
||||
// match: (XORconst [0] x)
|
||||
// cond:
|
||||
// result: x
|
||||
for {
|
||||
|
|
@ -7352,7 +7352,7 @@ func rewriteValueMIPS64_OpRsh16Ux8(v *Value) bool {
|
|||
_ = b
|
||||
types := &b.Func.Config.Types
|
||||
_ = types
|
||||
// match: (Rsh16Ux8 <t> x y)
|
||||
// match: (Rsh16Ux8 <t> x y)
|
||||
// cond:
|
||||
// result: (AND (NEGV <t> (SGTU (Const64 <types.UInt64> [64]) (ZeroExt8to64 y))) (SRLV <t> (ZeroExt16to64 x) (ZeroExt8to64 y)))
|
||||
for {
|
||||
|
|
@ -7484,7 +7484,7 @@ func rewriteValueMIPS64_OpRsh16x8(v *Value) bool {
|
|||
_ = b
|
||||
types := &b.Func.Config.Types
|
||||
_ = types
|
||||
// match: (Rsh16x8 <t> x y)
|
||||
// match: (Rsh16x8 <t> x y)
|
||||
// cond:
|
||||
// result: (SRAV (SignExt16to64 x) (OR <t> (NEGV <t> (SGTU (ZeroExt8to64 y) (Const64 <types.UInt64> [63]))) (ZeroExt8to64 y)))
|
||||
for {
|
||||
|
|
@ -7616,7 +7616,7 @@ func rewriteValueMIPS64_OpRsh32Ux8(v *Value) bool {
|
|||
_ = b
|
||||
types := &b.Func.Config.Types
|
||||
_ = types
|
||||
// match: (Rsh32Ux8 <t> x y)
|
||||
// match: (Rsh32Ux8 <t> x y)
|
||||
// cond:
|
||||
// result: (AND (NEGV <t> (SGTU (Const64 <types.UInt64> [64]) (ZeroExt8to64 y))) (SRLV <t> (ZeroExt32to64 x) (ZeroExt8to64 y)))
|
||||
for {
|
||||
|
|
@ -7748,7 +7748,7 @@ func rewriteValueMIPS64_OpRsh32x8(v *Value) bool {
|
|||
_ = b
|
||||
types := &b.Func.Config.Types
|
||||
_ = types
|
||||
// match: (Rsh32x8 <t> x y)
|
||||
// match: (Rsh32x8 <t> x y)
|
||||
// cond:
|
||||
// result: (SRAV (SignExt32to64 x) (OR <t> (NEGV <t> (SGTU (ZeroExt8to64 y) (Const64 <types.UInt64> [63]))) (ZeroExt8to64 y)))
|
||||
for {
|
||||
|
|
@ -7874,7 +7874,7 @@ func rewriteValueMIPS64_OpRsh64Ux8(v *Value) bool {
|
|||
_ = b
|
||||
types := &b.Func.Config.Types
|
||||
_ = types
|
||||
// match: (Rsh64Ux8 <t> x y)
|
||||
// match: (Rsh64Ux8 <t> x y)
|
||||
// cond:
|
||||
// result: (AND (NEGV <t> (SGTU (Const64 <types.UInt64> [64]) (ZeroExt8to64 y))) (SRLV <t> x (ZeroExt8to64 y)))
|
||||
for {
|
||||
|
|
@ -7998,7 +7998,7 @@ func rewriteValueMIPS64_OpRsh64x8(v *Value) bool {
|
|||
_ = b
|
||||
types := &b.Func.Config.Types
|
||||
_ = types
|
||||
// match: (Rsh64x8 <t> x y)
|
||||
// match: (Rsh64x8 <t> x y)
|
||||
// cond:
|
||||
// result: (SRAV x (OR <t> (NEGV <t> (SGTU (ZeroExt8to64 y) (Const64 <types.UInt64> [63]))) (ZeroExt8to64 y)))
|
||||
for {
|
||||
|
|
@ -8128,7 +8128,7 @@ func rewriteValueMIPS64_OpRsh8Ux8(v *Value) bool {
|
|||
_ = b
|
||||
types := &b.Func.Config.Types
|
||||
_ = types
|
||||
// match: (Rsh8Ux8 <t> x y)
|
||||
// match: (Rsh8Ux8 <t> x y)
|
||||
// cond:
|
||||
// result: (AND (NEGV <t> (SGTU (Const64 <types.UInt64> [64]) (ZeroExt8to64 y))) (SRLV <t> (ZeroExt8to64 x) (ZeroExt8to64 y)))
|
||||
for {
|
||||
|
|
@ -8260,7 +8260,7 @@ func rewriteValueMIPS64_OpRsh8x8(v *Value) bool {
|
|||
_ = b
|
||||
types := &b.Func.Config.Types
|
||||
_ = types
|
||||
// match: (Rsh8x8 <t> x y)
|
||||
// match: (Rsh8x8 <t> x y)
|
||||
// cond:
|
||||
// result: (SRAV (SignExt8to64 x) (OR <t> (NEGV <t> (SGTU (ZeroExt8to64 y) (Const64 <types.UInt64> [63]))) (ZeroExt8to64 y)))
|
||||
for {
|
||||
|
|
@ -8331,7 +8331,7 @@ func rewriteValueMIPS64_OpSelect0(v *Value) bool {
|
|||
v.AddArg(x)
|
||||
return true
|
||||
}
|
||||
// match: (Select0 (DIVV (MOVVconst [c]) (MOVVconst [d])))
|
||||
// match: (Select0 (DIVV (MOVVconst [c]) (MOVVconst [d])))
|
||||
// cond:
|
||||
// result: (MOVVconst [int64(c)%int64(d)])
|
||||
for {
|
||||
|
|
@ -8398,6 +8398,26 @@ func rewriteValueMIPS64_OpSelect1(v *Value) bool {
|
|||
v.AddArg(x)
|
||||
return true
|
||||
}
|
||||
// match: (Select1 (MULVU (MOVVconst [-1]) x))
|
||||
// cond:
|
||||
// result: (NEGV x)
|
||||
for {
|
||||
v_0 := v.Args[0]
|
||||
if v_0.Op != OpMIPS64MULVU {
|
||||
break
|
||||
}
|
||||
v_0_0 := v_0.Args[0]
|
||||
if v_0_0.Op != OpMIPS64MOVVconst {
|
||||
break
|
||||
}
|
||||
if v_0_0.AuxInt != -1 {
|
||||
break
|
||||
}
|
||||
x := v_0.Args[1]
|
||||
v.reset(OpMIPS64NEGV)
|
||||
v.AddArg(x)
|
||||
return true
|
||||
}
|
||||
// match: (Select1 (MULVU _ (MOVVconst [0])))
|
||||
// cond:
|
||||
// result: (MOVVconst [0])
|
||||
|
|
@ -8417,6 +8437,25 @@ func rewriteValueMIPS64_OpSelect1(v *Value) bool {
|
|||
v.AuxInt = 0
|
||||
return true
|
||||
}
|
||||
// match: (Select1 (MULVU (MOVVconst [0]) _))
|
||||
// cond:
|
||||
// result: (MOVVconst [0])
|
||||
for {
|
||||
v_0 := v.Args[0]
|
||||
if v_0.Op != OpMIPS64MULVU {
|
||||
break
|
||||
}
|
||||
v_0_0 := v_0.Args[0]
|
||||
if v_0_0.Op != OpMIPS64MOVVconst {
|
||||
break
|
||||
}
|
||||
if v_0_0.AuxInt != 0 {
|
||||
break
|
||||
}
|
||||
v.reset(OpMIPS64MOVVconst)
|
||||
v.AuxInt = 0
|
||||
return true
|
||||
}
|
||||
// match: (Select1 (MULVU x (MOVVconst [1])))
|
||||
// cond:
|
||||
// result: x
|
||||
|
|
@ -8438,6 +8477,27 @@ func rewriteValueMIPS64_OpSelect1(v *Value) bool {
|
|||
v.AddArg(x)
|
||||
return true
|
||||
}
|
||||
// match: (Select1 (MULVU (MOVVconst [1]) x))
|
||||
// cond:
|
||||
// result: x
|
||||
for {
|
||||
v_0 := v.Args[0]
|
||||
if v_0.Op != OpMIPS64MULVU {
|
||||
break
|
||||
}
|
||||
v_0_0 := v_0.Args[0]
|
||||
if v_0_0.Op != OpMIPS64MOVVconst {
|
||||
break
|
||||
}
|
||||
if v_0_0.AuxInt != 1 {
|
||||
break
|
||||
}
|
||||
x := v_0.Args[1]
|
||||
v.reset(OpCopy)
|
||||
v.Type = x.Type
|
||||
v.AddArg(x)
|
||||
return true
|
||||
}
|
||||
// match: (Select1 (MULVU x (MOVVconst [c])))
|
||||
// cond: isPowerOfTwo(c)
|
||||
// result: (SLLVconst [log2(c)] x)
|
||||
|
|
@ -8460,6 +8520,28 @@ func rewriteValueMIPS64_OpSelect1(v *Value) bool {
|
|||
v.AddArg(x)
|
||||
return true
|
||||
}
|
||||
// match: (Select1 (MULVU (MOVVconst [c]) x))
|
||||
// cond: isPowerOfTwo(c)
|
||||
// result: (SLLVconst [log2(c)] x)
|
||||
for {
|
||||
v_0 := v.Args[0]
|
||||
if v_0.Op != OpMIPS64MULVU {
|
||||
break
|
||||
}
|
||||
v_0_0 := v_0.Args[0]
|
||||
if v_0_0.Op != OpMIPS64MOVVconst {
|
||||
break
|
||||
}
|
||||
c := v_0_0.AuxInt
|
||||
x := v_0.Args[1]
|
||||
if !(isPowerOfTwo(c)) {
|
||||
break
|
||||
}
|
||||
v.reset(OpMIPS64SLLVconst)
|
||||
v.AuxInt = log2(c)
|
||||
v.AddArg(x)
|
||||
return true
|
||||
}
|
||||
// match: (Select1 (MULVU (MOVVconst [-1]) x))
|
||||
// cond:
|
||||
// result: (NEGV x)
|
||||
|
|
@ -8480,6 +8562,26 @@ func rewriteValueMIPS64_OpSelect1(v *Value) bool {
|
|||
v.AddArg(x)
|
||||
return true
|
||||
}
|
||||
// match: (Select1 (MULVU x (MOVVconst [-1])))
|
||||
// cond:
|
||||
// result: (NEGV x)
|
||||
for {
|
||||
v_0 := v.Args[0]
|
||||
if v_0.Op != OpMIPS64MULVU {
|
||||
break
|
||||
}
|
||||
x := v_0.Args[0]
|
||||
v_0_1 := v_0.Args[1]
|
||||
if v_0_1.Op != OpMIPS64MOVVconst {
|
||||
break
|
||||
}
|
||||
if v_0_1.AuxInt != -1 {
|
||||
break
|
||||
}
|
||||
v.reset(OpMIPS64NEGV)
|
||||
v.AddArg(x)
|
||||
return true
|
||||
}
|
||||
// match: (Select1 (MULVU (MOVVconst [0]) _))
|
||||
// cond:
|
||||
// result: (MOVVconst [0])
|
||||
|
|
@ -8499,6 +8601,25 @@ func rewriteValueMIPS64_OpSelect1(v *Value) bool {
|
|||
v.AuxInt = 0
|
||||
return true
|
||||
}
|
||||
// match: (Select1 (MULVU _ (MOVVconst [0])))
|
||||
// cond:
|
||||
// result: (MOVVconst [0])
|
||||
for {
|
||||
v_0 := v.Args[0]
|
||||
if v_0.Op != OpMIPS64MULVU {
|
||||
break
|
||||
}
|
||||
v_0_1 := v_0.Args[1]
|
||||
if v_0_1.Op != OpMIPS64MOVVconst {
|
||||
break
|
||||
}
|
||||
if v_0_1.AuxInt != 0 {
|
||||
break
|
||||
}
|
||||
v.reset(OpMIPS64MOVVconst)
|
||||
v.AuxInt = 0
|
||||
return true
|
||||
}
|
||||
// match: (Select1 (MULVU (MOVVconst [1]) x))
|
||||
// cond:
|
||||
// result: x
|
||||
|
|
@ -8520,6 +8641,27 @@ func rewriteValueMIPS64_OpSelect1(v *Value) bool {
|
|||
v.AddArg(x)
|
||||
return true
|
||||
}
|
||||
// match: (Select1 (MULVU x (MOVVconst [1])))
|
||||
// cond:
|
||||
// result: x
|
||||
for {
|
||||
v_0 := v.Args[0]
|
||||
if v_0.Op != OpMIPS64MULVU {
|
||||
break
|
||||
}
|
||||
x := v_0.Args[0]
|
||||
v_0_1 := v_0.Args[1]
|
||||
if v_0_1.Op != OpMIPS64MOVVconst {
|
||||
break
|
||||
}
|
||||
if v_0_1.AuxInt != 1 {
|
||||
break
|
||||
}
|
||||
v.reset(OpCopy)
|
||||
v.Type = x.Type
|
||||
v.AddArg(x)
|
||||
return true
|
||||
}
|
||||
// match: (Select1 (MULVU (MOVVconst [c]) x))
|
||||
// cond: isPowerOfTwo(c)
|
||||
// result: (SLLVconst [log2(c)] x)
|
||||
|
|
@ -8542,6 +8684,28 @@ func rewriteValueMIPS64_OpSelect1(v *Value) bool {
|
|||
v.AddArg(x)
|
||||
return true
|
||||
}
|
||||
// match: (Select1 (MULVU x (MOVVconst [c])))
|
||||
// cond: isPowerOfTwo(c)
|
||||
// result: (SLLVconst [log2(c)] x)
|
||||
for {
|
||||
v_0 := v.Args[0]
|
||||
if v_0.Op != OpMIPS64MULVU {
|
||||
break
|
||||
}
|
||||
x := v_0.Args[0]
|
||||
v_0_1 := v_0.Args[1]
|
||||
if v_0_1.Op != OpMIPS64MOVVconst {
|
||||
break
|
||||
}
|
||||
c := v_0_1.AuxInt
|
||||
if !(isPowerOfTwo(c)) {
|
||||
break
|
||||
}
|
||||
v.reset(OpMIPS64SLLVconst)
|
||||
v.AuxInt = log2(c)
|
||||
v.AddArg(x)
|
||||
return true
|
||||
}
|
||||
// match: (Select1 (DIVVU x (MOVVconst [1])))
|
||||
// cond:
|
||||
// result: x
|
||||
|
|
@ -8607,7 +8771,29 @@ func rewriteValueMIPS64_OpSelect1(v *Value) bool {
|
|||
v.AuxInt = c * d
|
||||
return true
|
||||
}
|
||||
// match: (Select1 (DIVV (MOVVconst [c]) (MOVVconst [d])))
|
||||
// match: (Select1 (MULVU (MOVVconst [d]) (MOVVconst [c])))
|
||||
// cond:
|
||||
// result: (MOVVconst [c*d])
|
||||
for {
|
||||
v_0 := v.Args[0]
|
||||
if v_0.Op != OpMIPS64MULVU {
|
||||
break
|
||||
}
|
||||
v_0_0 := v_0.Args[0]
|
||||
if v_0_0.Op != OpMIPS64MOVVconst {
|
||||
break
|
||||
}
|
||||
d := v_0_0.AuxInt
|
||||
v_0_1 := v_0.Args[1]
|
||||
if v_0_1.Op != OpMIPS64MOVVconst {
|
||||
break
|
||||
}
|
||||
c := v_0_1.AuxInt
|
||||
v.reset(OpMIPS64MOVVconst)
|
||||
v.AuxInt = c * d
|
||||
return true
|
||||
}
|
||||
// match: (Select1 (DIVV (MOVVconst [c]) (MOVVconst [d])))
|
||||
// cond:
|
||||
// result: (MOVVconst [int64(c)/int64(d)])
|
||||
for {
|
||||
|
|
@ -9834,7 +10020,7 @@ func rewriteBlockMIPS64(b *Block) bool {
|
|||
_ = no
|
||||
return true
|
||||
}
|
||||
// match: (EQ (MOVVconst [0]) yes no)
|
||||
// match: (EQ (MOVVconst [0]) yes no)
|
||||
// cond:
|
||||
// result: (First nil yes no)
|
||||
for {
|
||||
|
|
@ -9853,7 +10039,7 @@ func rewriteBlockMIPS64(b *Block) bool {
|
|||
_ = no
|
||||
return true
|
||||
}
|
||||
// match: (EQ (MOVVconst [c]) yes no)
|
||||
// match: (EQ (MOVVconst [c]) yes no)
|
||||
// cond: c != 0
|
||||
// result: (First nil no yes)
|
||||
for {
|
||||
|
|
@ -10273,7 +10459,7 @@ func rewriteBlockMIPS64(b *Block) bool {
|
|||
_ = no
|
||||
return true
|
||||
}
|
||||
// match: (NE (MOVVconst [0]) yes no)
|
||||
// match: (NE (MOVVconst [0]) yes no)
|
||||
// cond:
|
||||
// result: (First nil no yes)
|
||||
for {
|
||||
|
|
@ -10293,7 +10479,7 @@ func rewriteBlockMIPS64(b *Block) bool {
|
|||
_ = yes
|
||||
return true
|
||||
}
|
||||
// match: (NE (MOVVconst [c]) yes no)
|
||||
// match: (NE (MOVVconst [c]) yes no)
|
||||
// cond: c != 0
|
||||
// result: (First nil yes no)
|
||||
for {
|
||||
|
|
|
|||
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
|
|
@ -34,7 +34,7 @@ func rewriteValuedec(v *Value) bool {
|
|||
return false
|
||||
}
|
||||
func rewriteValuedec_OpComplexImag(v *Value) bool {
|
||||
// match: (ComplexImag (ComplexMake _ imag ))
|
||||
// match: (ComplexImag (ComplexMake _ imag))
|
||||
// cond:
|
||||
// result: imag
|
||||
for {
|
||||
|
|
@ -51,7 +51,7 @@ func rewriteValuedec_OpComplexImag(v *Value) bool {
|
|||
return false
|
||||
}
|
||||
func rewriteValuedec_OpComplexReal(v *Value) bool {
|
||||
// match: (ComplexReal (ComplexMake real _ ))
|
||||
// match: (ComplexReal (ComplexMake real _))
|
||||
// cond:
|
||||
// result: real
|
||||
for {
|
||||
|
|
@ -274,7 +274,7 @@ func rewriteValuedec_OpSliceLen(v *Value) bool {
|
|||
return false
|
||||
}
|
||||
func rewriteValuedec_OpSlicePtr(v *Value) bool {
|
||||
// match: (SlicePtr (SliceMake ptr _ _ ))
|
||||
// match: (SlicePtr (SliceMake ptr _ _))
|
||||
// cond:
|
||||
// result: ptr
|
||||
for {
|
||||
|
|
|
|||
File diff suppressed because it is too large
Load Diff
Loading…
Reference in New Issue