mirror of https://github.com/golang/go.git
cmd/compile/internal/ssa: reintroduce ANDconst opcode on PPC64
This allows more effective conversion of rotate and mask opcodes into their CC equivalents, while simplifying the first lowering pass. This was removed before the latelower pass was introduced to fold more cases of compare against zero. Add ANDconst to push the conversion of ANDconst to ANDCCconst into latelower with the other CC opcodes. This also requires introducing RLDICLCC to prevent regressions when ANDconst is converted to RLDICL then to RLDICLCC and back to ANDCCconst when possible. Change-Id: I9e5f9c99fbefa334db18c6c152c5f967f3ff2590 Reviewed-on: https://go-review.googlesource.com/c/go/+/586160 Reviewed-by: Lynn Boger <laboger@linux.vnet.ibm.com> Reviewed-by: Dmitri Shuralyov <dmitshur@google.com> LUCI-TryBot-Result: Go LUCI <golang-scoped@luci-project-accounts.iam.gserviceaccount.com> Reviewed-by: Carlos Amedee <carlos@golang.org>
This commit is contained in:
parent
6861b2eff5
commit
dca577d882
|
|
@ -629,18 +629,18 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) {
|
|||
p.AddRestSourceArgs([]obj.Addr{{Type: obj.TYPE_CONST, Offset: mb}, {Type: obj.TYPE_CONST, Offset: me}})
|
||||
// Auxint holds mask
|
||||
|
||||
case ssa.OpPPC64RLDICL, ssa.OpPPC64RLDICR:
|
||||
case ssa.OpPPC64RLDICL, ssa.OpPPC64RLDICLCC, ssa.OpPPC64RLDICR:
|
||||
sh, mb, me, _ := ssa.DecodePPC64RotateMask(v.AuxInt)
|
||||
p := s.Prog(v.Op.Asm())
|
||||
p.From = obj.Addr{Type: obj.TYPE_CONST, Offset: sh}
|
||||
switch v.Op {
|
||||
case ssa.OpPPC64RLDICL:
|
||||
case ssa.OpPPC64RLDICL, ssa.OpPPC64RLDICLCC:
|
||||
p.AddRestSourceConst(mb)
|
||||
case ssa.OpPPC64RLDICR:
|
||||
p.AddRestSourceConst(me)
|
||||
}
|
||||
p.Reg = v.Args[0].Reg()
|
||||
p.To = obj.Addr{Type: obj.TYPE_REG, Reg: v.Reg()}
|
||||
p.To = obj.Addr{Type: obj.TYPE_REG, Reg: v.ResultReg()}
|
||||
|
||||
case ssa.OpPPC64RLWNM:
|
||||
_, mb, me, _ := ssa.DecodePPC64RotateMask(v.AuxInt)
|
||||
|
|
@ -691,7 +691,8 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) {
|
|||
|
||||
case ssa.OpPPC64ADDconst, ssa.OpPPC64ORconst, ssa.OpPPC64XORconst,
|
||||
ssa.OpPPC64SRADconst, ssa.OpPPC64SRAWconst, ssa.OpPPC64SRDconst, ssa.OpPPC64SRWconst,
|
||||
ssa.OpPPC64SLDconst, ssa.OpPPC64SLWconst, ssa.OpPPC64EXTSWSLconst, ssa.OpPPC64MULLWconst, ssa.OpPPC64MULLDconst:
|
||||
ssa.OpPPC64SLDconst, ssa.OpPPC64SLWconst, ssa.OpPPC64EXTSWSLconst, ssa.OpPPC64MULLWconst, ssa.OpPPC64MULLDconst,
|
||||
ssa.OpPPC64ANDconst:
|
||||
p := s.Prog(v.Op.Asm())
|
||||
p.Reg = v.Args[0].Reg()
|
||||
p.From.Type = obj.TYPE_CONST
|
||||
|
|
|
|||
|
|
@ -137,22 +137,22 @@
|
|||
(ROTL x (MOVDconst [c])) => (ROTLconst x [c&63])
|
||||
|
||||
// Combine rotate and mask operations
|
||||
(Select0 (ANDCCconst [m] (ROTLWconst [r] x))) && isPPC64WordRotateMask(m) => (RLWINM [encodePPC64RotateMask(r,m,32)] x)
|
||||
(ANDconst [m] (ROTLWconst [r] x)) && isPPC64WordRotateMask(m) => (RLWINM [encodePPC64RotateMask(r,m,32)] x)
|
||||
(AND (MOVDconst [m]) (ROTLWconst [r] x)) && isPPC64WordRotateMask(m) => (RLWINM [encodePPC64RotateMask(r,m,32)] x)
|
||||
(Select0 (ANDCCconst [m] (ROTLW x r))) && isPPC64WordRotateMask(m) => (RLWNM [encodePPC64RotateMask(0,m,32)] x r)
|
||||
(ANDconst [m] (ROTLW x r)) && isPPC64WordRotateMask(m) => (RLWNM [encodePPC64RotateMask(0,m,32)] x r)
|
||||
(AND (MOVDconst [m]) (ROTLW x r)) && isPPC64WordRotateMask(m) => (RLWNM [encodePPC64RotateMask(0,m,32)] x r)
|
||||
|
||||
// Note, any rotated word bitmask is still a valid word bitmask.
|
||||
(ROTLWconst [r] (AND (MOVDconst [m]) x)) && isPPC64WordRotateMask(m) => (RLWINM [encodePPC64RotateMask(r,rotateLeft32(m,r),32)] x)
|
||||
(ROTLWconst [r] (Select0 (ANDCCconst [m] x))) && isPPC64WordRotateMask(m) => (RLWINM [encodePPC64RotateMask(r,rotateLeft32(m,r),32)] x)
|
||||
(ROTLWconst [r] (ANDconst [m] x)) && isPPC64WordRotateMask(m) => (RLWINM [encodePPC64RotateMask(r,rotateLeft32(m,r),32)] x)
|
||||
|
||||
(Select0 (ANDCCconst [m] (SRWconst x [s]))) && mergePPC64RShiftMask(m,s,32) == 0 => (MOVDconst [0])
|
||||
(Select0 (ANDCCconst [m] (SRWconst x [s]))) && mergePPC64AndSrwi(m,s) != 0 => (RLWINM [mergePPC64AndSrwi(m,s)] x)
|
||||
(ANDconst [m] (SRWconst x [s])) && mergePPC64RShiftMask(m,s,32) == 0 => (MOVDconst [0])
|
||||
(ANDconst [m] (SRWconst x [s])) && mergePPC64AndSrwi(m,s) != 0 => (RLWINM [mergePPC64AndSrwi(m,s)] x)
|
||||
(AND (MOVDconst [m]) (SRWconst x [s])) && mergePPC64RShiftMask(m,s,32) == 0 => (MOVDconst [0])
|
||||
(AND (MOVDconst [m]) (SRWconst x [s])) && mergePPC64AndSrwi(m,s) != 0 => (RLWINM [mergePPC64AndSrwi(m,s)] x)
|
||||
|
||||
(SRWconst (Select0 (ANDCCconst [m] x)) [s]) && mergePPC64RShiftMask(m>>uint(s),s,32) == 0 => (MOVDconst [0])
|
||||
(SRWconst (Select0 (ANDCCconst [m] x)) [s]) && mergePPC64AndSrwi(m>>uint(s),s) != 0 => (RLWINM [mergePPC64AndSrwi(m>>uint(s),s)] x)
|
||||
(SRWconst (ANDconst [m] x) [s]) && mergePPC64RShiftMask(m>>uint(s),s,32) == 0 => (MOVDconst [0])
|
||||
(SRWconst (ANDconst [m] x) [s]) && mergePPC64AndSrwi(m>>uint(s),s) != 0 => (RLWINM [mergePPC64AndSrwi(m>>uint(s),s)] x)
|
||||
(SRWconst (AND (MOVDconst [m]) x) [s]) && mergePPC64RShiftMask(m>>uint(s),s,32) == 0 => (MOVDconst [0])
|
||||
(SRWconst (AND (MOVDconst [m]) x) [s]) && mergePPC64AndSrwi(m>>uint(s),s) != 0 => (RLWINM [mergePPC64AndSrwi(m>>uint(s),s)] x)
|
||||
|
||||
|
|
@ -201,38 +201,38 @@
|
|||
|
||||
((Rsh64U|Lsh64)x64 <t> x y) => (ISEL [0] (S(R|L)D <t> x y) (MOVDconst [0]) (CMPUconst y [64]))
|
||||
((Rsh64U|Lsh64)x32 <t> x y) => (ISEL [0] (S(R|L)D <t> x y) (MOVDconst [0]) (CMPWUconst y [64]))
|
||||
((Rsh64U|Lsh64)x16 <t> x y) => (ISEL [2] (S(R|L)D <t> x y) (MOVDconst [0]) (Select1 <types.TypeFlags> (ANDCCconst [0xFFC0] y)))
|
||||
((Rsh64U|Lsh64)x8 <t> x y) => (ISEL [2] (S(R|L)D <t> x y) (MOVDconst [0]) (Select1 <types.TypeFlags> (ANDCCconst [0x00C0] y)))
|
||||
((Rsh64U|Lsh64)x16 <t> x y) => (ISEL [2] (S(R|L)D <t> x y) (MOVDconst [0]) (CMPconst [0] (ANDconst [0xFFC0] y)))
|
||||
((Rsh64U|Lsh64)x8 <t> x y) => (ISEL [2] (S(R|L)D <t> x y) (MOVDconst [0]) (CMPconst [0] (ANDconst [0x00C0] y)))
|
||||
(Rsh64x(64|32) <t> x y) => (ISEL [0] (SRAD <t> x y) (SRADconst <t> x [63]) (CMP(U|WU)const y [64]))
|
||||
(Rsh64x16 <t> x y) => (ISEL [2] (SRAD <t> x y) (SRADconst <t> x [63]) (Select1 <types.TypeFlags> (ANDCCconst [0xFFC0] y)))
|
||||
(Rsh64x8 <t> x y) => (ISEL [2] (SRAD <t> x y) (SRADconst <t> x [63]) (Select1 <types.TypeFlags> (ANDCCconst [0x00C0] y)))
|
||||
(Rsh64x16 <t> x y) => (ISEL [2] (SRAD <t> x y) (SRADconst <t> x [63]) (CMPconst [0] (ANDconst [0xFFC0] y)))
|
||||
(Rsh64x8 <t> x y) => (ISEL [2] (SRAD <t> x y) (SRADconst <t> x [63]) (CMPconst [0] (ANDconst [0x00C0] y)))
|
||||
|
||||
((Rsh32U|Lsh32)x64 <t> x y) => (ISEL [0] (S(R|L)W <t> x y) (MOVDconst [0]) (CMPUconst y [32]))
|
||||
((Rsh32U|Lsh32)x32 <t> x y) => (ISEL [0] (S(R|L)W <t> x y) (MOVDconst [0]) (CMPWUconst y [32]))
|
||||
((Rsh32U|Lsh32)x16 <t> x y) => (ISEL [2] (S(R|L)W <t> x y) (MOVDconst [0]) (Select1 <types.TypeFlags> (ANDCCconst [0xFFE0] y)))
|
||||
((Rsh32U|Lsh32)x8 <t> x y) => (ISEL [2] (S(R|L)W <t> x y) (MOVDconst [0]) (Select1 <types.TypeFlags> (ANDCCconst [0x00E0] y)))
|
||||
((Rsh32U|Lsh32)x16 <t> x y) => (ISEL [2] (S(R|L)W <t> x y) (MOVDconst [0]) (CMPconst [0] (ANDconst [0xFFE0] y)))
|
||||
((Rsh32U|Lsh32)x8 <t> x y) => (ISEL [2] (S(R|L)W <t> x y) (MOVDconst [0]) (CMPconst [0] (ANDconst [0x00E0] y)))
|
||||
(Rsh32x(64|32) <t> x y) => (ISEL [0] (SRAW <t> x y) (SRAWconst <t> x [31]) (CMP(U|WU)const y [32]))
|
||||
(Rsh32x16 <t> x y) => (ISEL [2] (SRAW <t> x y) (SRAWconst <t> x [31]) (Select1 <types.TypeFlags> (ANDCCconst [0xFFE0] y)))
|
||||
(Rsh32x8 <t> x y) => (ISEL [2] (SRAW <t> x y) (SRAWconst <t> x [31]) (Select1 <types.TypeFlags> (ANDCCconst [0x00E0] y)))
|
||||
(Rsh32x16 <t> x y) => (ISEL [2] (SRAW <t> x y) (SRAWconst <t> x [31]) (CMPconst [0] (ANDconst [0xFFE0] y)))
|
||||
(Rsh32x8 <t> x y) => (ISEL [2] (SRAW <t> x y) (SRAWconst <t> x [31]) (CMPconst [0] (ANDconst [0x00E0] y)))
|
||||
|
||||
((Rsh16U|Lsh16)x64 <t> x y) => (ISEL [0] (S(R|L)D <t> (MOVHZreg x) y) (MOVDconst [0]) (CMPUconst y [16]))
|
||||
((Rsh16U|Lsh16)x32 <t> x y) => (ISEL [0] (S(R|L)D <t> (MOVHZreg x) y) (MOVDconst [0]) (CMPWUconst y [16]))
|
||||
((Rsh16U|Lsh16)x16 <t> x y) => (ISEL [2] (S(R|L)D <t> (MOVHZreg x) y) (MOVDconst [0]) (Select1 <types.TypeFlags> (ANDCCconst [0xFFF0] y)))
|
||||
((Rsh16U|Lsh16)x8 <t> x y) => (ISEL [2] (S(R|L)D <t> (MOVHZreg x) y) (MOVDconst [0]) (Select1 <types.TypeFlags> (ANDCCconst [0x00F0] y)))
|
||||
((Rsh16U|Lsh16)x16 <t> x y) => (ISEL [2] (S(R|L)D <t> (MOVHZreg x) y) (MOVDconst [0]) (CMPconst [0] (ANDconst [0xFFF0] y)))
|
||||
((Rsh16U|Lsh16)x8 <t> x y) => (ISEL [2] (S(R|L)D <t> (MOVHZreg x) y) (MOVDconst [0]) (CMPconst [0] (ANDconst [0x00F0] y)))
|
||||
(Rsh16x(64|32) <t> x y) => (ISEL [0] (SRAD <t> (MOVHreg x) y) (SRADconst <t> (MOVHreg x) [15]) (CMP(U|WU)const y [16]))
|
||||
(Rsh16x16 <t> x y) => (ISEL [2] (SRAD <t> (MOVHreg x) y) (SRADconst <t> (MOVHreg x) [15]) (Select1 <types.TypeFlags> (ANDCCconst [0xFFF0] y)))
|
||||
(Rsh16x8 <t> x y) => (ISEL [2] (SRAD <t> (MOVHreg x) y) (SRADconst <t> (MOVHreg x) [15]) (Select1 <types.TypeFlags> (ANDCCconst [0x00F0] y)))
|
||||
(Rsh16x16 <t> x y) => (ISEL [2] (SRAD <t> (MOVHreg x) y) (SRADconst <t> (MOVHreg x) [15]) (CMPconst [0] (ANDconst [0xFFF0] y)))
|
||||
(Rsh16x8 <t> x y) => (ISEL [2] (SRAD <t> (MOVHreg x) y) (SRADconst <t> (MOVHreg x) [15]) (CMPconst [0] (ANDconst [0x00F0] y)))
|
||||
|
||||
((Rsh8U|Lsh8)x64 <t> x y) => (ISEL [0] (S(R|L)D <t> (MOVBZreg x) y) (MOVDconst [0]) (CMPUconst y [8]))
|
||||
((Rsh8U|Lsh8)x32 <t> x y) => (ISEL [0] (S(R|L)D <t> (MOVBZreg x) y) (MOVDconst [0]) (CMPWUconst y [8]))
|
||||
((Rsh8U|Lsh8)x16 <t> x y) => (ISEL [2] (S(R|L)D <t> (MOVBZreg x) y) (MOVDconst [0]) (Select1 <types.TypeFlags> (ANDCCconst [0xFFF8] y)))
|
||||
((Rsh8U|Lsh8)x8 <t> x y) => (ISEL [2] (S(R|L)D <t> (MOVBZreg x) y) (MOVDconst [0]) (Select1 <types.TypeFlags> (ANDCCconst [0x00F8] y)))
|
||||
((Rsh8U|Lsh8)x16 <t> x y) => (ISEL [2] (S(R|L)D <t> (MOVBZreg x) y) (MOVDconst [0]) (CMPconst [0] (ANDconst [0xFFF8] y)))
|
||||
((Rsh8U|Lsh8)x8 <t> x y) => (ISEL [2] (S(R|L)D <t> (MOVBZreg x) y) (MOVDconst [0]) (CMPconst [0] (ANDconst [0x00F8] y)))
|
||||
(Rsh8x(64|32) <t> x y) => (ISEL [0] (SRAD <t> (MOVBreg x) y) (SRADconst <t> (MOVBreg x) [7]) (CMP(U|WU)const y [8]))
|
||||
(Rsh8x16 <t> x y) => (ISEL [2] (SRAD <t> (MOVBreg x) y) (SRADconst <t> (MOVBreg x) [7]) (Select1 <types.TypeFlags> (ANDCCconst [0xFFF8] y)))
|
||||
(Rsh8x8 <t> x y) => (ISEL [2] (SRAD <t> (MOVBreg x) y) (SRADconst <t> (MOVBreg x) [7]) (Select1 <types.TypeFlags> (ANDCCconst [0x00F8] y)))
|
||||
(Rsh8x16 <t> x y) => (ISEL [2] (SRAD <t> (MOVBreg x) y) (SRADconst <t> (MOVBreg x) [7]) (CMPconst [0] (ANDconst [0xFFF8] y)))
|
||||
(Rsh8x8 <t> x y) => (ISEL [2] (SRAD <t> (MOVBreg x) y) (SRADconst <t> (MOVBreg x) [7]) (CMPconst [0] (ANDconst [0x00F8] y)))
|
||||
|
||||
// Catch bounded shifts in situations like foo<<uint(shift&63) which might not be caught by the prove pass.
|
||||
(CMP(U|WU)const [d] (Select0 (ANDCCconst z [c]))) && uint64(d) > uint64(c) => (FlagLT)
|
||||
(CMP(U|WU)const [d] (ANDconst z [c])) && uint64(d) > uint64(c) => (FlagLT)
|
||||
|
||||
(ORN x (MOVDconst [-1])) => x
|
||||
|
||||
|
|
@ -282,7 +282,7 @@
|
|||
(OR x (NOR y y)) => (ORN x y)
|
||||
|
||||
// Lowering comparisons
|
||||
(EqB x y) => (Select0 <typ.Int> (ANDCCconst [1] (EQV x y)))
|
||||
(EqB x y) => (ANDconst [1] (EQV x y))
|
||||
// Sign extension dependence on operand sign sets up for sign/zero-extension elision later
|
||||
(Eq(8|16) x y) && x.Type.IsSigned() && y.Type.IsSigned() => (Equal (CMPW (SignExt(8|16)to32 x) (SignExt(8|16)to32 y)))
|
||||
(Eq(8|16) x y) => (Equal (CMPW (ZeroExt(8|16)to32 x) (ZeroExt(8|16)to32 y)))
|
||||
|
|
@ -322,11 +322,11 @@
|
|||
(If (FGreaterThan cc) yes no) => (FGT cc yes no)
|
||||
(If (FGreaterEqual cc) yes no) => (FGE cc yes no)
|
||||
|
||||
(If cond yes no) => (NE (Select1 <types.TypeFlags> (ANDCCconst [1] cond)) yes no)
|
||||
(If cond yes no) => (NE (CMPconst [0] (ANDconst [1] cond)) yes no)
|
||||
|
||||
// Absorb boolean tests into block
|
||||
(NE (Select1 (ANDCCconst [1] ((Equal|NotEqual|LessThan|LessEqual|GreaterThan|GreaterEqual) cc))) yes no) => ((EQ|NE|LT|LE|GT|GE) cc yes no)
|
||||
(NE (Select1 (ANDCCconst [1] ((FLessThan|FLessEqual|FGreaterThan|FGreaterEqual) cc))) yes no) => ((FLT|FLE|FGT|FGE) cc yes no)
|
||||
(NE (CMPconst [0] (ANDconst [1] ((Equal|NotEqual|LessThan|LessEqual|GreaterThan|GreaterEqual) cc))) yes no) => ((EQ|NE|LT|LE|GT|GE) cc yes no)
|
||||
(NE (CMPconst [0] (ANDconst [1] ((FLessThan|FLessEqual|FGreaterThan|FGreaterEqual) cc))) yes no) => ((FLT|FLE|FGT|FGE) cc yes no)
|
||||
|
||||
// absorb flag constants into branches
|
||||
(EQ (FlagEQ) yes no) => (First yes no)
|
||||
|
|
@ -408,8 +408,6 @@
|
|||
|
||||
|
||||
// Elide compares of bit tests
|
||||
((EQ|NE|LT|LE|GT|GE) (CMPconst [0] (Select0 z:(ANDCCconst [c] x))) yes no) => ((EQ|NE|LT|LE|GT|GE) (Select1 <types.TypeFlags> z) yes no)
|
||||
((EQ|NE|LT|LE|GT|GE) (CMPWconst [0] (Select0 z:(ANDCCconst [c] x))) yes no) => ((EQ|NE|LT|LE|GT|GE) (Select1 <types.TypeFlags> z) yes no)
|
||||
((EQ|NE|LT|LE|GT|GE) (CMPconst [0] z:(AND x y)) yes no) && z.Uses == 1 => ((EQ|NE|LT|LE|GT|GE) (Select1 <types.TypeFlags> (ANDCC x y)) yes no)
|
||||
((EQ|NE|LT|LE|GT|GE) (CMPconst [0] z:(OR x y)) yes no) && z.Uses == 1 => ((EQ|NE|LT|LE|GT|GE) (Select1 <types.TypeFlags> (ORCC x y)) yes no)
|
||||
((EQ|NE|LT|LE|GT|GE) (CMPconst [0] z:(XOR x y)) yes no) && z.Uses == 1 => ((EQ|NE|LT|LE|GT|GE) (Select1 <types.TypeFlags> (XORCC x y)) yes no)
|
||||
|
|
@ -417,9 +415,9 @@
|
|||
(CondSelect x y (SETBC [a] cmp)) => (ISEL [a] x y cmp)
|
||||
(CondSelect x y (SETBCR [a] cmp)) => (ISEL [a+4] x y cmp)
|
||||
// Only lower after bool is lowered. It should always lower. This helps ensure the folding below happens reliably.
|
||||
(CondSelect x y bool) && flagArg(bool) == nil => (ISEL [6] x y (Select1 <types.TypeFlags> (ANDCCconst [1] bool)))
|
||||
(CondSelect x y bool) && flagArg(bool) == nil => (ISEL [6] x y (CMPconst [0] (ANDconst [1] bool)))
|
||||
// Fold any CR -> GPR -> CR transfers when applying the above rule.
|
||||
(ISEL [6] x y (Select1 (ANDCCconst [1] (SETBC [c] cmp)))) => (ISEL [c] x y cmp)
|
||||
(ISEL [6] x y (CMPconst [0] (ANDconst [1] (SETBC [c] cmp)))) => (ISEL [c] x y cmp)
|
||||
(ISEL [6] x y ((CMP|CMPW)const [0] (SETBC [c] cmp))) => (ISEL [c] x y cmp)
|
||||
(ISEL [6] x y ((CMP|CMPW)const [0] (SETBCR [c] cmp))) => (ISEL [c+4] x y cmp)
|
||||
|
||||
|
|
@ -563,45 +561,44 @@
|
|||
|
||||
// Discover consts
|
||||
(AND x (MOVDconst [-1])) => x
|
||||
(AND x (MOVDconst [c])) && isU16Bit(c) => (Select0 (ANDCCconst [c] x))
|
||||
(AND x (MOVDconst [c])) && isU16Bit(c) => (ANDconst [c] x)
|
||||
(XOR x (MOVDconst [c])) && isU32Bit(c) => (XORconst [c] x)
|
||||
(OR x (MOVDconst [c])) && isU32Bit(c) => (ORconst [c] x)
|
||||
|
||||
// Simplify consts
|
||||
(ANDCCconst [c] (Select0 (ANDCCconst [d] x))) => (ANDCCconst [c&d] x)
|
||||
(ANDconst [c] (ANDconst [d] x)) => (ANDconst [c&d] x)
|
||||
(ORconst [c] (ORconst [d] x)) => (ORconst [c|d] x)
|
||||
(XORconst [c] (XORconst [d] x)) => (XORconst [c^d] x)
|
||||
(Select0 (ANDCCconst [-1] x)) => x
|
||||
(Select0 (ANDCCconst [0] _)) => (MOVDconst [0])
|
||||
(Select1 (ANDCCconst [0] _)) => (FlagEQ)
|
||||
(ANDconst [-1] x) => x
|
||||
(ANDconst [0] _) => (MOVDconst [0])
|
||||
(XORconst [0] x) => x
|
||||
(ORconst [-1] _) => (MOVDconst [-1])
|
||||
(ORconst [0] x) => x
|
||||
|
||||
// zero-extend of small and => small and
|
||||
(MOVBZreg y:(Select0 (ANDCCconst [c] _))) && uint64(c) <= 0xFF => y
|
||||
(MOVHZreg y:(Select0 (ANDCCconst [c] _))) && uint64(c) <= 0xFFFF => y
|
||||
(MOVWZreg y:(Select0 (ANDCCconst [c] _))) && uint64(c) <= 0xFFFFFFFF => y
|
||||
(MOVBZreg y:(ANDconst [c] _)) && uint64(c) <= 0xFF => y
|
||||
(MOVHZreg y:(ANDconst [c] _)) && uint64(c) <= 0xFFFF => y
|
||||
(MOVWZreg y:(ANDconst [c] _)) && uint64(c) <= 0xFFFFFFFF => y
|
||||
(MOVWZreg y:(AND (MOVDconst [c]) _)) && uint64(c) <= 0xFFFFFFFF => y
|
||||
|
||||
// sign extend of small-positive and => small-positive-and
|
||||
(MOVBreg y:(Select0 (ANDCCconst [c] _))) && uint64(c) <= 0x7F => y
|
||||
(MOVHreg y:(Select0 (ANDCCconst [c] _))) && uint64(c) <= 0x7FFF => y
|
||||
(MOVWreg y:(Select0 (ANDCCconst [c] _))) && uint64(c) <= 0xFFFF => y // 0xFFFF is largest immediate constant, when regarded as 32-bit is > 0
|
||||
(MOVBreg y:(ANDconst [c] _)) && uint64(c) <= 0x7F => y
|
||||
(MOVHreg y:(ANDconst [c] _)) && uint64(c) <= 0x7FFF => y
|
||||
(MOVWreg y:(ANDconst [c] _)) && uint64(c) <= 0xFFFF => y // 0xFFFF is largest immediate constant, when regarded as 32-bit is > 0
|
||||
(MOVWreg y:(AND (MOVDconst [c]) _)) && uint64(c) <= 0x7FFFFFFF => y
|
||||
|
||||
// small and of zero-extend => either zero-extend or small and
|
||||
(Select0 (ANDCCconst [c] y:(MOVBZreg _))) && c&0xFF == 0xFF => y
|
||||
(Select0 (ANDCCconst [0xFF] (MOVBreg x))) => (MOVBZreg x)
|
||||
(Select0 (ANDCCconst [c] y:(MOVHZreg _))) && c&0xFFFF == 0xFFFF => y
|
||||
(Select0 (ANDCCconst [0xFFFF] (MOVHreg x))) => (MOVHZreg x)
|
||||
(ANDconst [c] y:(MOVBZreg _)) && c&0xFF == 0xFF => y
|
||||
(ANDconst [0xFF] (MOVBreg x)) => (MOVBZreg x)
|
||||
(ANDconst [c] y:(MOVHZreg _)) && c&0xFFFF == 0xFFFF => y
|
||||
(ANDconst [0xFFFF] (MOVHreg x)) => (MOVHZreg x)
|
||||
|
||||
(AND (MOVDconst [c]) y:(MOVWZreg _)) && c&0xFFFFFFFF == 0xFFFFFFFF => y
|
||||
(AND (MOVDconst [0xFFFFFFFF]) y:(MOVWreg x)) => (MOVWZreg x)
|
||||
// normal case
|
||||
(Select0 (ANDCCconst [c] (MOVBZreg x))) => (Select0 (ANDCCconst [c&0xFF] x))
|
||||
(Select0 (ANDCCconst [c] (MOVHZreg x))) => (Select0 (ANDCCconst [c&0xFFFF] x))
|
||||
(Select0 (ANDCCconst [c] (MOVWZreg x))) => (Select0 (ANDCCconst [c&0xFFFFFFFF] x))
|
||||
(ANDconst [c] (MOVBZreg x)) => (ANDconst [c&0xFF] x)
|
||||
(ANDconst [c] (MOVHZreg x)) => (ANDconst [c&0xFFFF] x)
|
||||
(ANDconst [c] (MOVWZreg x)) => (ANDconst [c&0xFFFFFFFF] x)
|
||||
|
||||
// Eliminate unnecessary sign/zero extend following right shift
|
||||
(MOV(B|H|W)Zreg (SRWconst [c] (MOVBZreg x))) => (SRWconst [c] (MOVBZreg x))
|
||||
|
|
@ -650,10 +647,10 @@
|
|||
(MOVBZreg (RLWINM [r] y)) && mergePPC64AndRlwinm(0xFF,r) != 0 => (RLWINM [mergePPC64AndRlwinm(0xFF,r)] y)
|
||||
(MOVHZreg (RLWINM [r] y)) && mergePPC64AndRlwinm(0xFFFF,r) != 0 => (RLWINM [mergePPC64AndRlwinm(0xFFFF,r)] y)
|
||||
(MOVWZreg (RLWINM [r] y)) && mergePPC64AndRlwinm(0xFFFFFFFF,r) != 0 => (RLWINM [mergePPC64AndRlwinm(0xFFFFFFFF,r)] y)
|
||||
(Select0 (ANDCCconst [m] (RLWINM [r] y))) && mergePPC64AndRlwinm(uint32(m),r) != 0 => (RLWINM [mergePPC64AndRlwinm(uint32(m),r)] y)
|
||||
(ANDconst [m] (RLWINM [r] y)) && mergePPC64AndRlwinm(uint32(m),r) != 0 => (RLWINM [mergePPC64AndRlwinm(uint32(m),r)] y)
|
||||
(SLDconst [s] (RLWINM [r] y)) && mergePPC64SldiRlwinm(s,r) != 0 => (RLWINM [mergePPC64SldiRlwinm(s,r)] y)
|
||||
(RLWINM [r] (MOVHZreg u)) && mergePPC64RlwinmAnd(r,0xFFFF) != 0 => (RLWINM [mergePPC64RlwinmAnd(r,0xFFFF)] u)
|
||||
(RLWINM [r] (Select0 (ANDCCconst [a] u))) && mergePPC64RlwinmAnd(r,uint32(a)) != 0 => (RLWINM [mergePPC64RlwinmAnd(r,uint32(a))] u)
|
||||
(RLWINM [r] (ANDconst [a] u)) && mergePPC64RlwinmAnd(r,uint32(a)) != 0 => (RLWINM [mergePPC64RlwinmAnd(r,uint32(a))] u)
|
||||
// SLWconst is a special case of RLWNM which always zero-extends the result.
|
||||
(SLWconst [s] (MOVWZreg w)) => (SLWconst [s] w)
|
||||
(MOVWZreg w:(SLWconst u)) => w
|
||||
|
|
@ -682,10 +679,10 @@
|
|||
(MOVBZreg ((OR|XOR|AND) <t> x (MOVHZreg y))) => (MOVBZreg ((OR|XOR|AND) <t> x y))
|
||||
(MOVBZreg ((OR|XOR|AND) <t> x (MOVBZreg y))) => (MOVBZreg ((OR|XOR|AND) <t> x y))
|
||||
|
||||
(MOV(B|H|W)Zreg z:(Select0 (ANDCCconst [c] (MOVBZload ptr x)))) => z
|
||||
(MOV(B|H|W)Zreg z:(ANDconst [c] (MOVBZload ptr x))) => z
|
||||
(MOV(B|H|W)Zreg z:(AND y (MOV(B|H|W)Zload ptr x))) => z
|
||||
(MOV(H|W)Zreg z:(Select0 (ANDCCconst [c] (MOVHZload ptr x)))) => z
|
||||
(MOVWZreg z:(Select0 (ANDCCconst [c] (MOVWZload ptr x)))) => z
|
||||
(MOV(H|W)Zreg z:(ANDconst [c] (MOVHZload ptr x))) => z
|
||||
(MOVWZreg z:(ANDconst [c] (MOVWZload ptr x))) => z
|
||||
|
||||
// Arithmetic constant ops
|
||||
|
||||
|
|
@ -818,7 +815,7 @@
|
|||
(AtomicOr(8|32) ...) => (LoweredAtomicOr(8|32) ...)
|
||||
|
||||
(Slicemask <t> x) => (SRADconst (NEG <t> x) [63])
|
||||
(Select0 (ANDCCconst [1] z:(SRADconst [63] x))) && z.Uses == 1 => (SRDconst [63] x)
|
||||
(ANDconst [1] z:(SRADconst [63] x)) && z.Uses == 1 => (SRDconst [63] x)
|
||||
|
||||
// Note that MOV??reg returns a 64-bit int, x is not necessarily that wide
|
||||
// This may interact with other patterns in the future. (Compare with arm64)
|
||||
|
|
@ -854,11 +851,11 @@
|
|||
(SLDconst [c] z:(MOVHZreg x)) && c < 16 && z.Uses == 1 => (CLRLSLDI [newPPC64ShiftAuxInt(c,48,63,64)] x)
|
||||
(SLDconst [c] z:(MOVWZreg x)) && c < 32 && z.Uses == 1 => (CLRLSLDI [newPPC64ShiftAuxInt(c,32,63,64)] x)
|
||||
|
||||
(SLDconst [c] z:(Select0 (ANDCCconst [d] x))) && z.Uses == 1 && isPPC64ValidShiftMask(d) && c <= (64-getPPC64ShiftMaskLength(d)) => (CLRLSLDI [newPPC64ShiftAuxInt(c,64-getPPC64ShiftMaskLength(d),63,64)] x)
|
||||
(SLDconst [c] z:(ANDconst [d] x)) && z.Uses == 1 && isPPC64ValidShiftMask(d) && c <= (64-getPPC64ShiftMaskLength(d)) => (CLRLSLDI [newPPC64ShiftAuxInt(c,64-getPPC64ShiftMaskLength(d),63,64)] x)
|
||||
(SLDconst [c] z:(AND (MOVDconst [d]) x)) && z.Uses == 1 && isPPC64ValidShiftMask(d) && c<=(64-getPPC64ShiftMaskLength(d)) => (CLRLSLDI [newPPC64ShiftAuxInt(c,64-getPPC64ShiftMaskLength(d),63,64)] x)
|
||||
(SLWconst [c] z:(MOVBZreg x)) && z.Uses == 1 && c < 8 => (CLRLSLWI [newPPC64ShiftAuxInt(c,24,31,32)] x)
|
||||
(SLWconst [c] z:(MOVHZreg x)) && z.Uses == 1 && c < 16 => (CLRLSLWI [newPPC64ShiftAuxInt(c,16,31,32)] x)
|
||||
(SLWconst [c] z:(Select0 (ANDCCconst [d] x))) && z.Uses == 1 && isPPC64ValidShiftMask(d) && c<=(32-getPPC64ShiftMaskLength(d)) => (CLRLSLWI [newPPC64ShiftAuxInt(c,32-getPPC64ShiftMaskLength(d),31,32)] x)
|
||||
(SLWconst [c] z:(ANDconst [d] x)) && z.Uses == 1 && isPPC64ValidShiftMask(d) && c<=(32-getPPC64ShiftMaskLength(d)) => (CLRLSLWI [newPPC64ShiftAuxInt(c,32-getPPC64ShiftMaskLength(d),31,32)] x)
|
||||
(SLWconst [c] z:(AND (MOVDconst [d]) x)) && z.Uses == 1 && isPPC64ValidShiftMask(d) && c<=(32-getPPC64ShiftMaskLength(d)) => (CLRLSLWI [newPPC64ShiftAuxInt(c,32-getPPC64ShiftMaskLength(d),31,32)] x)
|
||||
// special case for power9
|
||||
(SL(W|D)const [c] z:(MOVWreg x)) && c < 32 && buildcfg.GOPPC64 >= 9 => (EXTSWSLconst [c] x)
|
||||
|
|
@ -894,8 +891,9 @@
|
|||
// Canonicalize the order of arguments to comparisons - helps with CSE.
|
||||
((CMP|CMPW|CMPU|CMPWU) x y) && canonLessThan(x,y) => (InvertFlags ((CMP|CMPW|CMPU|CMPWU) y x))
|
||||
|
||||
// n is always a zero-extended uint16 value, so n & z is always a non-negative 32 or 64 bit value. Use the flag result of ANDCCconst.
|
||||
((CMP|CMPW|CMPU|CMPWU)const [0] (Select0 a:(ANDCCconst [n] z))) => (Select1 <types.TypeFlags> a)
|
||||
// n is always a zero-extended uint16 value, so n & z is always a non-negative 32 or 64 bit value.
|
||||
// Rewrite to a cmp int64(0) to lower into ANDCCconst in the latelower pass.
|
||||
(CMP(W|U|WU)const [0] a:(ANDconst [n] z)) => (CMPconst [0] a)
|
||||
|
||||
// SETBC auxInt values 0=LT 1=GT 2=EQ Crbit==1 ? 1 : 0
|
||||
// SETBCR auxInt values 0=LT 1=GT 2=EQ Crbit==1 ? 0 : 1
|
||||
|
|
@ -962,8 +960,8 @@
|
|||
(XORconst [1] (SETBCR [n] cmp)) => (SETBC [n] cmp)
|
||||
(XORconst [1] (SETBC [n] cmp)) => (SETBCR [n] cmp)
|
||||
|
||||
(SETBC [2] (Select1 a:(ANDCCconst <t> [1] _))) => (XORconst [1] (Select0 <t.FieldType(0)> a))
|
||||
(SETBCR [2] (Select1 a:(ANDCCconst [1] _))) => (Select0 a)
|
||||
(SETBC [2] (CMPconst [0] a:(ANDconst [1] _))) => (XORconst [1] a)
|
||||
(SETBCR [2] (CMPconst [0] a:(ANDconst [1] _))) => a
|
||||
|
||||
// Only CMPconst for these in case AND|OR|XOR result is > 32 bits
|
||||
(SETBC [2] (CMPconst [0] a:(AND y z))) && a.Uses == 1 => (SETBC [2] (Select1 <types.TypeFlags> (ANDCC y z )))
|
||||
|
|
@ -976,7 +974,7 @@
|
|||
(SETBCR [2] (CMPconst [0] a:(XOR y z))) && a.Uses == 1 => (SETBCR [2] (Select1 <types.TypeFlags> (XORCC y z )))
|
||||
|
||||
// A particular pattern seen in cgo code:
|
||||
(AND (MOVDconst [c]) x:(MOVBZload _ _)) => (Select0 (ANDCCconst [c&0xFF] x))
|
||||
(AND (MOVDconst [c]) x:(MOVBZload _ _)) => (ANDconst [c&0xFF] x)
|
||||
|
||||
// floating point negative abs
|
||||
(FNEG (F(ABS|NABS) x)) => (F(NABS|ABS) x)
|
||||
|
|
|
|||
|
|
@ -248,11 +248,12 @@ func init() {
|
|||
{name: "ROTLWconst", argLength: 1, reg: gp11, asm: "ROTLW", aux: "Int64"}, // uint32(arg0) rotate left by auxInt bits
|
||||
{name: "EXTSWSLconst", argLength: 1, reg: gp11, asm: "EXTSWSLI", aux: "Int64"},
|
||||
|
||||
{name: "RLWINM", argLength: 1, reg: gp11, asm: "RLWNM", aux: "Int64"}, // Rotate and mask by immediate "rlwinm". encodePPC64RotateMask describes aux
|
||||
{name: "RLWNM", argLength: 2, reg: gp21, asm: "RLWNM", aux: "Int64"}, // Rotate and mask by "rlwnm". encodePPC64RotateMask describes aux
|
||||
{name: "RLWMI", argLength: 2, reg: gp21a0, asm: "RLWMI", aux: "Int64", resultInArg0: true}, // "rlwimi" similar aux encoding as above
|
||||
{name: "RLDICL", argLength: 1, reg: gp11, asm: "RLDICL", aux: "Int64"}, // Auxint is encoded similarly to RLWINM, but only MB and SH are valid. ME is always 63.
|
||||
{name: "RLDICR", argLength: 1, reg: gp11, asm: "RLDICR", aux: "Int64"}, // Likewise, but only ME and SH are valid. MB is always 0.
|
||||
{name: "RLWINM", argLength: 1, reg: gp11, asm: "RLWNM", aux: "Int64"}, // Rotate and mask by immediate "rlwinm". encodePPC64RotateMask describes aux
|
||||
{name: "RLWNM", argLength: 2, reg: gp21, asm: "RLWNM", aux: "Int64"}, // Rotate and mask by "rlwnm". encodePPC64RotateMask describes aux
|
||||
{name: "RLWMI", argLength: 2, reg: gp21a0, asm: "RLWMI", aux: "Int64", resultInArg0: true}, // "rlwimi" similar aux encoding as above
|
||||
{name: "RLDICL", argLength: 1, reg: gp11, asm: "RLDICL", aux: "Int64"}, // Auxint is encoded similarly to RLWINM, but only MB and SH are valid. ME is always 63.
|
||||
{name: "RLDICLCC", argLength: 1, reg: gp11, asm: "RLDICLCC", aux: "Int64", typ: "(Int, Flags)"}, // Auxint is encoded similarly to RLWINM, but only MB and SH are valid. ME is always 63. Sets CC.
|
||||
{name: "RLDICR", argLength: 1, reg: gp11, asm: "RLDICR", aux: "Int64"}, // Likewise, but only ME and SH are valid. MB is always 0.
|
||||
|
||||
{name: "CNTLZD", argLength: 1, reg: gp11, asm: "CNTLZD"}, // count leading zeros
|
||||
{name: "CNTLZDCC", argLength: 1, reg: gp11, asm: "CNTLZDCC", typ: "(Int, Flags)"}, // count leading zeros, sets CC
|
||||
|
|
@ -323,9 +324,10 @@ func init() {
|
|||
{name: "FNABS", argLength: 1, reg: fp11, asm: "FNABS"}, // -abs(arg0), float64
|
||||
{name: "FCPSGN", argLength: 2, reg: fp21, asm: "FCPSGN"}, // copysign arg0 -> arg1, float64
|
||||
|
||||
{name: "ORconst", argLength: 1, reg: gp11, asm: "OR", aux: "Int64"}, // arg0|aux
|
||||
{name: "XORconst", argLength: 1, reg: gp11, asm: "XOR", aux: "Int64"}, // arg0^aux
|
||||
{name: "ANDCCconst", argLength: 1, reg: regInfo{inputs: []regMask{gp | sp | sb}, outputs: []regMask{gp}}, asm: "ANDCC", aux: "Int64", typ: "(Int,Flags)"}, // arg0&aux == 0 // and-immediate sets CC on PPC, always.
|
||||
{name: "ORconst", argLength: 1, reg: gp11, asm: "OR", aux: "Int64"}, // arg0|aux
|
||||
{name: "XORconst", argLength: 1, reg: gp11, asm: "XOR", aux: "Int64"}, // arg0^aux
|
||||
{name: "ANDCCconst", argLength: 1, reg: regInfo{inputs: []regMask{gp | sp | sb}, outputs: []regMask{gp}}, asm: "ANDCC", aux: "Int64", typ: "(Int,Flags)"}, // arg0&aux == 0 // and-immediate sets CC on PPC, always.
|
||||
{name: "ANDconst", argLength: 1, reg: regInfo{inputs: []regMask{gp | sp | sb}, outputs: []regMask{gp}}, clobberFlags: true, asm: "ANDCC", aux: "Int64", typ: "Int"}, // arg0&aux == 0 // and-immediate sets CC on PPC, always.
|
||||
|
||||
{name: "MOVBreg", argLength: 1, reg: gp11, asm: "MOVB", typ: "Int64"}, // sign extend int8 to int64
|
||||
{name: "MOVBZreg", argLength: 1, reg: gp11, asm: "MOVBZ", typ: "Int64"}, // zero extend uint8 to uint64
|
||||
|
|
|
|||
|
|
@ -18,11 +18,8 @@
|
|||
(SETBC [1] cmp) && buildcfg.GOPPC64 <= 9 => (ISELZ [1] (MOVDconst [1]) cmp)
|
||||
(SETBCR [1] cmp) && buildcfg.GOPPC64 <= 9 => (ISELZ [5] (MOVDconst [1]) cmp)
|
||||
|
||||
// Avoid using ANDCCconst if the value for CR0 is not needed, since ANDCCconst
|
||||
// always sets it.
|
||||
(Select0 z:(ANDCCconst [m] x)) && z.Uses == 1 && isPPC64ValidShiftMask(m) => (RLDICL [encodePPC64RotateMask(0,m,64)] x)
|
||||
// The upper bits of the smaller than register values is undefined. Take advantage of that.
|
||||
(AND <t> x:(MOVDconst [m]) n) && t.Size() <= 2 => (Select0 (ANDCCconst [int64(int16(m))] n))
|
||||
(AND <t> x:(MOVDconst [m]) n) && t.Size() <= 2 => (ANDconst [int64(int16(m))] n)
|
||||
|
||||
// Convert simple bit masks to an equivalent rldic[lr] if possible.
|
||||
(AND x:(MOVDconst [m]) n) && isPPC64ValidShiftMask(m) => (RLDICL [encodePPC64RotateMask(0,m,64)] n)
|
||||
|
|
@ -47,9 +44,17 @@
|
|||
// Note: to minimize potentially expensive regeneration of CC opcodes during the flagalloc pass, only rewrite if
|
||||
// both ops are in the same block.
|
||||
(CMPconst [0] z:((ADD|AND|ANDN|OR|SUB|NOR|XOR) x y)) && v.Block == z.Block => (CMPconst [0] convertPPC64OpToOpCC(z))
|
||||
(CMPconst [0] z:((NEG|CNTLZD) x)) && v.Block == z.Block => (CMPconst [0] convertPPC64OpToOpCC(z))
|
||||
(CMPconst [0] z:((NEG|CNTLZD|RLDICL) x)) && v.Block == z.Block => (CMPconst [0] convertPPC64OpToOpCC(z))
|
||||
// Note: ADDCCconst only assembles to 1 instruction for int16 constants.
|
||||
(CMPconst [0] z:(ADDconst [c] x)) && int64(int16(c)) == c && v.Block == z.Block => (CMPconst [0] convertPPC64OpToOpCC(z))
|
||||
(CMPconst [0] z:(ANDconst [c] x)) && int64(uint16(c)) == c && v.Block == z.Block => (CMPconst [0] convertPPC64OpToOpCC(z))
|
||||
// And finally, fixup the flag user.
|
||||
(CMPconst <t> [0] (Select0 z:((ADD|AND|ANDN|OR|SUB|NOR|XOR)CC x y))) => (Select1 <t> z)
|
||||
(CMPconst <t> [0] (Select0 z:((ADDCCconst|NEGCC|CNTLZDCC) y))) => (Select1 <t> z)
|
||||
(CMPconst <t> [0] (Select0 z:((ADDCCconst|ANDCCconst|NEGCC|CNTLZDCC|RLDICLCC) y))) => (Select1 <t> z)
|
||||
|
||||
// After trying to convert ANDconst to ANDCCconst above, if the CC result is not needed, try to avoid using
|
||||
// ANDconst which clobbers CC.
|
||||
(ANDconst [m] x) && isPPC64ValidShiftMask(m) => (RLDICL [encodePPC64RotateMask(0,m,64)] x)
|
||||
|
||||
// Likewise, trying converting RLDICLCC back to ANDCCconst as it is faster.
|
||||
(RLDICLCC [a] x) && convertPPC64RldiclAndccconst(a) != 0 => (ANDCCconst [convertPPC64RldiclAndccconst(a)] x)
|
||||
|
|
|
|||
|
|
@ -2165,6 +2165,7 @@ const (
|
|||
OpPPC64RLWNM
|
||||
OpPPC64RLWMI
|
||||
OpPPC64RLDICL
|
||||
OpPPC64RLDICLCC
|
||||
OpPPC64RLDICR
|
||||
OpPPC64CNTLZD
|
||||
OpPPC64CNTLZDCC
|
||||
|
|
@ -2221,6 +2222,7 @@ const (
|
|||
OpPPC64ORconst
|
||||
OpPPC64XORconst
|
||||
OpPPC64ANDCCconst
|
||||
OpPPC64ANDconst
|
||||
OpPPC64MOVBreg
|
||||
OpPPC64MOVBZreg
|
||||
OpPPC64MOVHreg
|
||||
|
|
@ -29121,6 +29123,20 @@ var opcodeTable = [...]opInfo{
|
|||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "RLDICLCC",
|
||||
auxType: auxInt64,
|
||||
argLen: 1,
|
||||
asm: ppc64.ARLDICLCC,
|
||||
reg: regInfo{
|
||||
inputs: []inputInfo{
|
||||
{0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
|
||||
},
|
||||
outputs: []outputInfo{
|
||||
{0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "RLDICR",
|
||||
auxType: auxInt64,
|
||||
|
|
@ -29885,6 +29901,21 @@ var opcodeTable = [...]opInfo{
|
|||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "ANDconst",
|
||||
auxType: auxInt64,
|
||||
argLen: 1,
|
||||
clobberFlags: true,
|
||||
asm: ppc64.AANDCC,
|
||||
reg: regInfo{
|
||||
inputs: []inputInfo{
|
||||
{0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
|
||||
},
|
||||
outputs: []outputInfo{
|
||||
{0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "MOVBreg",
|
||||
argLen: 1,
|
||||
|
|
|
|||
|
|
@ -1745,9 +1745,11 @@ func convertPPC64OpToOpCC(op *Value) *Value {
|
|||
OpPPC64ADD: OpPPC64ADDCC,
|
||||
OpPPC64ADDconst: OpPPC64ADDCCconst,
|
||||
OpPPC64AND: OpPPC64ANDCC,
|
||||
OpPPC64ANDconst: OpPPC64ANDCCconst,
|
||||
OpPPC64ANDN: OpPPC64ANDNCC,
|
||||
OpPPC64CNTLZD: OpPPC64CNTLZDCC,
|
||||
OpPPC64OR: OpPPC64ORCC,
|
||||
OpPPC64RLDICL: OpPPC64RLDICLCC,
|
||||
OpPPC64SUB: OpPPC64SUBCC,
|
||||
OpPPC64NEG: OpPPC64NEGCC,
|
||||
OpPPC64NOR: OpPPC64NORCC,
|
||||
|
|
@ -1761,6 +1763,15 @@ func convertPPC64OpToOpCC(op *Value) *Value {
|
|||
return op
|
||||
}
|
||||
|
||||
// Try converting a RLDICL to ANDCC. If successful, return the mask otherwise 0.
|
||||
func convertPPC64RldiclAndccconst(sauxint int64) int64 {
|
||||
r, _, _, mask := DecodePPC64RotateMask(sauxint)
|
||||
if r != 0 || mask&0xFFFF != mask {
|
||||
return 0
|
||||
}
|
||||
return int64(mask)
|
||||
}
|
||||
|
||||
// Convenience function to rotate a 32 bit constant value by another constant.
|
||||
func rotateLeft32(v, rotate int64) int64 {
|
||||
return int64(bits.RotateLeft32(uint32(v), int(rotate)))
|
||||
|
|
|
|||
File diff suppressed because it is too large
Load Diff
|
|
@ -3,7 +3,6 @@
|
|||
package ssa
|
||||
|
||||
import "internal/buildcfg"
|
||||
import "cmd/compile/internal/types"
|
||||
|
||||
func rewriteValuePPC64latelower(v *Value) bool {
|
||||
switch v.Op {
|
||||
|
|
@ -11,18 +10,20 @@ func rewriteValuePPC64latelower(v *Value) bool {
|
|||
return rewriteValuePPC64latelower_OpPPC64ADD(v)
|
||||
case OpPPC64AND:
|
||||
return rewriteValuePPC64latelower_OpPPC64AND(v)
|
||||
case OpPPC64ANDconst:
|
||||
return rewriteValuePPC64latelower_OpPPC64ANDconst(v)
|
||||
case OpPPC64CMPconst:
|
||||
return rewriteValuePPC64latelower_OpPPC64CMPconst(v)
|
||||
case OpPPC64ISEL:
|
||||
return rewriteValuePPC64latelower_OpPPC64ISEL(v)
|
||||
case OpPPC64RLDICL:
|
||||
return rewriteValuePPC64latelower_OpPPC64RLDICL(v)
|
||||
case OpPPC64RLDICLCC:
|
||||
return rewriteValuePPC64latelower_OpPPC64RLDICLCC(v)
|
||||
case OpPPC64SETBC:
|
||||
return rewriteValuePPC64latelower_OpPPC64SETBC(v)
|
||||
case OpPPC64SETBCR:
|
||||
return rewriteValuePPC64latelower_OpPPC64SETBCR(v)
|
||||
case OpSelect0:
|
||||
return rewriteValuePPC64latelower_OpSelect0(v)
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
|
@ -54,11 +55,9 @@ func rewriteValuePPC64latelower_OpPPC64ADD(v *Value) bool {
|
|||
func rewriteValuePPC64latelower_OpPPC64AND(v *Value) bool {
|
||||
v_1 := v.Args[1]
|
||||
v_0 := v.Args[0]
|
||||
b := v.Block
|
||||
typ := &b.Func.Config.Types
|
||||
// match: (AND <t> x:(MOVDconst [m]) n)
|
||||
// cond: t.Size() <= 2
|
||||
// result: (Select0 (ANDCCconst [int64(int16(m))] n))
|
||||
// result: (ANDconst [int64(int16(m))] n)
|
||||
for {
|
||||
t := v.Type
|
||||
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
|
||||
|
|
@ -71,11 +70,9 @@ func rewriteValuePPC64latelower_OpPPC64AND(v *Value) bool {
|
|||
if !(t.Size() <= 2) {
|
||||
continue
|
||||
}
|
||||
v.reset(OpSelect0)
|
||||
v0 := b.NewValue0(v.Pos, OpPPC64ANDCCconst, types.NewTuple(typ.Int, types.TypeFlags))
|
||||
v0.AuxInt = int64ToAuxInt(int64(int16(m)))
|
||||
v0.AddArg(n)
|
||||
v.AddArg(v0)
|
||||
v.reset(OpPPC64ANDconst)
|
||||
v.AuxInt = int64ToAuxInt(int64(int16(m)))
|
||||
v.AddArg(n)
|
||||
return true
|
||||
}
|
||||
break
|
||||
|
|
@ -146,6 +143,24 @@ func rewriteValuePPC64latelower_OpPPC64AND(v *Value) bool {
|
|||
}
|
||||
return false
|
||||
}
|
||||
func rewriteValuePPC64latelower_OpPPC64ANDconst(v *Value) bool {
|
||||
v_0 := v.Args[0]
|
||||
// match: (ANDconst [m] x)
|
||||
// cond: isPPC64ValidShiftMask(m)
|
||||
// result: (RLDICL [encodePPC64RotateMask(0,m,64)] x)
|
||||
for {
|
||||
m := auxIntToInt64(v.AuxInt)
|
||||
x := v_0
|
||||
if !(isPPC64ValidShiftMask(m)) {
|
||||
break
|
||||
}
|
||||
v.reset(OpPPC64RLDICL)
|
||||
v.AuxInt = int64ToAuxInt(encodePPC64RotateMask(0, m, 64))
|
||||
v.AddArg(x)
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
func rewriteValuePPC64latelower_OpPPC64CMPconst(v *Value) bool {
|
||||
v_0 := v.Args[0]
|
||||
// match: (CMPconst [0] z:(ADD x y))
|
||||
|
|
@ -319,6 +334,25 @@ func rewriteValuePPC64latelower_OpPPC64CMPconst(v *Value) bool {
|
|||
v.AddArg(convertPPC64OpToOpCC(z))
|
||||
return true
|
||||
}
|
||||
// match: (CMPconst [0] z:(RLDICL x))
|
||||
// cond: v.Block == z.Block
|
||||
// result: (CMPconst [0] convertPPC64OpToOpCC(z))
|
||||
for {
|
||||
if auxIntToInt64(v.AuxInt) != 0 {
|
||||
break
|
||||
}
|
||||
z := v_0
|
||||
if z.Op != OpPPC64RLDICL {
|
||||
break
|
||||
}
|
||||
if !(v.Block == z.Block) {
|
||||
break
|
||||
}
|
||||
v.reset(OpPPC64CMPconst)
|
||||
v.AuxInt = int64ToAuxInt(0)
|
||||
v.AddArg(convertPPC64OpToOpCC(z))
|
||||
return true
|
||||
}
|
||||
// match: (CMPconst [0] z:(ADDconst [c] x))
|
||||
// cond: int64(int16(c)) == c && v.Block == z.Block
|
||||
// result: (CMPconst [0] convertPPC64OpToOpCC(z))
|
||||
|
|
@ -339,6 +373,26 @@ func rewriteValuePPC64latelower_OpPPC64CMPconst(v *Value) bool {
|
|||
v.AddArg(convertPPC64OpToOpCC(z))
|
||||
return true
|
||||
}
|
||||
// match: (CMPconst [0] z:(ANDconst [c] x))
|
||||
// cond: int64(uint16(c)) == c && v.Block == z.Block
|
||||
// result: (CMPconst [0] convertPPC64OpToOpCC(z))
|
||||
for {
|
||||
if auxIntToInt64(v.AuxInt) != 0 {
|
||||
break
|
||||
}
|
||||
z := v_0
|
||||
if z.Op != OpPPC64ANDconst {
|
||||
break
|
||||
}
|
||||
c := auxIntToInt64(z.AuxInt)
|
||||
if !(int64(uint16(c)) == c && v.Block == z.Block) {
|
||||
break
|
||||
}
|
||||
v.reset(OpPPC64CMPconst)
|
||||
v.AuxInt = int64ToAuxInt(0)
|
||||
v.AddArg(convertPPC64OpToOpCC(z))
|
||||
return true
|
||||
}
|
||||
// match: (CMPconst <t> [0] (Select0 z:(ADDCC x y)))
|
||||
// result: (Select1 <t> z)
|
||||
for {
|
||||
|
|
@ -467,6 +521,22 @@ func rewriteValuePPC64latelower_OpPPC64CMPconst(v *Value) bool {
|
|||
v.AddArg(z)
|
||||
return true
|
||||
}
|
||||
// match: (CMPconst <t> [0] (Select0 z:(ANDCCconst y)))
|
||||
// result: (Select1 <t> z)
|
||||
for {
|
||||
t := v.Type
|
||||
if auxIntToInt64(v.AuxInt) != 0 || v_0.Op != OpSelect0 {
|
||||
break
|
||||
}
|
||||
z := v_0.Args[0]
|
||||
if z.Op != OpPPC64ANDCCconst {
|
||||
break
|
||||
}
|
||||
v.reset(OpSelect1)
|
||||
v.Type = t
|
||||
v.AddArg(z)
|
||||
return true
|
||||
}
|
||||
// match: (CMPconst <t> [0] (Select0 z:(NEGCC y)))
|
||||
// result: (Select1 <t> z)
|
||||
for {
|
||||
|
|
@ -499,6 +569,22 @@ func rewriteValuePPC64latelower_OpPPC64CMPconst(v *Value) bool {
|
|||
v.AddArg(z)
|
||||
return true
|
||||
}
|
||||
// match: (CMPconst <t> [0] (Select0 z:(RLDICLCC y)))
|
||||
// result: (Select1 <t> z)
|
||||
for {
|
||||
t := v.Type
|
||||
if auxIntToInt64(v.AuxInt) != 0 || v_0.Op != OpSelect0 {
|
||||
break
|
||||
}
|
||||
z := v_0.Args[0]
|
||||
if z.Op != OpPPC64RLDICLCC {
|
||||
break
|
||||
}
|
||||
v.reset(OpSelect1)
|
||||
v.Type = t
|
||||
v.AddArg(z)
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
func rewriteValuePPC64latelower_OpPPC64ISEL(v *Value) bool {
|
||||
|
|
@ -558,6 +644,24 @@ func rewriteValuePPC64latelower_OpPPC64RLDICL(v *Value) bool {
|
|||
}
|
||||
return false
|
||||
}
|
||||
func rewriteValuePPC64latelower_OpPPC64RLDICLCC(v *Value) bool {
|
||||
v_0 := v.Args[0]
|
||||
// match: (RLDICLCC [a] x)
|
||||
// cond: convertPPC64RldiclAndccconst(a) != 0
|
||||
// result: (ANDCCconst [convertPPC64RldiclAndccconst(a)] x)
|
||||
for {
|
||||
a := auxIntToInt64(v.AuxInt)
|
||||
x := v_0
|
||||
if !(convertPPC64RldiclAndccconst(a) != 0) {
|
||||
break
|
||||
}
|
||||
v.reset(OpPPC64ANDCCconst)
|
||||
v.AuxInt = int64ToAuxInt(convertPPC64RldiclAndccconst(a))
|
||||
v.AddArg(x)
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
func rewriteValuePPC64latelower_OpPPC64SETBC(v *Value) bool {
|
||||
v_0 := v.Args[0]
|
||||
b := v.Block
|
||||
|
|
@ -678,28 +782,6 @@ func rewriteValuePPC64latelower_OpPPC64SETBCR(v *Value) bool {
|
|||
}
|
||||
return false
|
||||
}
|
||||
func rewriteValuePPC64latelower_OpSelect0(v *Value) bool {
|
||||
v_0 := v.Args[0]
|
||||
// match: (Select0 z:(ANDCCconst [m] x))
|
||||
// cond: z.Uses == 1 && isPPC64ValidShiftMask(m)
|
||||
// result: (RLDICL [encodePPC64RotateMask(0,m,64)] x)
|
||||
for {
|
||||
z := v_0
|
||||
if z.Op != OpPPC64ANDCCconst {
|
||||
break
|
||||
}
|
||||
m := auxIntToInt64(z.AuxInt)
|
||||
x := z.Args[0]
|
||||
if !(z.Uses == 1 && isPPC64ValidShiftMask(m)) {
|
||||
break
|
||||
}
|
||||
v.reset(OpPPC64RLDICL)
|
||||
v.AuxInt = int64ToAuxInt(encodePPC64RotateMask(0, m, 64))
|
||||
v.AddArg(x)
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
func rewriteBlockPPC64latelower(b *Block) bool {
|
||||
return false
|
||||
}
|
||||
|
|
|
|||
Loading…
Reference in New Issue