cmd/compile: splitload (CMPconst [0] x) into (TEST x x) on amd64

Provides minor widespread benefit to generated code.

Removes one source of random fluctuation when changing
other aspects of the compiler.

Change-Id: I16db6f5e240a97d27f05dc1ba5b8b729af3adb12
Reviewed-on: https://go-review.googlesource.com/c/go/+/229702
Run-TryBot: Josh Bleecher Snyder <josharian@gmail.com>
Reviewed-by: Keith Randall <khr@golang.org>
TryBot-Result: Gobot Gobot <gobot@golang.org>
This commit is contained in:
Josh Bleecher Snyder 2020-04-23 12:00:14 -07:00
parent d9d88dd27f
commit e0915dea09
2 changed files with 289 additions and 11 deletions

View File

@ -18,21 +18,28 @@
(CMP(Q|L|W|B)load {sym} [off] ptr x mem) => (CMP(Q|L|W|B) (MOV(Q|L|W|B)load {sym} [off] ptr mem) x)
(CMPQconstload {sym} [vo] ptr mem) => (CMPQconst (MOVQload {sym} [vo.Off32()] ptr mem) [vo.Val32()])
(CMPLconstload {sym} [vo] ptr mem) => (CMPLconst (MOVLload {sym} [vo.Off32()] ptr mem) [vo.Val32()])
(CMPWconstload {sym} [vo] ptr mem) => (CMPWconst (MOVWload {sym} [vo.Off32()] ptr mem) [vo.Val16()])
(CMPBconstload {sym} [vo] ptr mem) => (CMPBconst (MOVBload {sym} [vo.Off32()] ptr mem) [vo.Val8()])
(CMP(Q|L|W|B)constload {sym} [vo] ptr mem) && vo.Val() == 0 => (TEST(Q|L|W|B) x:(MOV(Q|L|W|B)load {sym} [vo.Off32()] ptr mem) x)
(CMPQconstload {sym} [vo] ptr mem) && vo.Val() != 0 => (CMPQconst (MOVQload {sym} [vo.Off32()] ptr mem) [vo.Val32()])
(CMPLconstload {sym} [vo] ptr mem) && vo.Val() != 0 => (CMPLconst (MOVLload {sym} [vo.Off32()] ptr mem) [vo.Val32()])
(CMPWconstload {sym} [vo] ptr mem) && vo.Val() != 0 => (CMPWconst (MOVWload {sym} [vo.Off32()] ptr mem) [vo.Val16()])
(CMPBconstload {sym} [vo] ptr mem) && vo.Val() != 0 => (CMPBconst (MOVBload {sym} [vo.Off32()] ptr mem) [vo.Val8()])
(CMP(Q|L|W|B)loadidx1 {sym} [off] ptr idx x mem) => (CMP(Q|L|W|B) (MOV(Q|L|W|B)loadidx1 {sym} [off] ptr idx mem) x)
(CMPQloadidx8 {sym} [off] ptr idx x mem) => (CMPQ (MOVQloadidx8 {sym} [off] ptr idx mem) x)
(CMPLloadidx4 {sym} [off] ptr idx x mem) => (CMPL (MOVLloadidx4 {sym} [off] ptr idx mem) x)
(CMPWloadidx2 {sym} [off] ptr idx x mem) => (CMPW (MOVWloadidx2 {sym} [off] ptr idx mem) x)
(CMPQconstloadidx1 {sym} [vo] ptr idx mem) => (CMPQconst (MOVQloadidx1 {sym} [vo.Off32()] ptr idx mem) [vo.Val32()])
(CMPLconstloadidx1 {sym} [vo] ptr idx mem) => (CMPLconst (MOVLloadidx1 {sym} [vo.Off32()] ptr idx mem) [vo.Val32()])
(CMPWconstloadidx1 {sym} [vo] ptr idx mem) => (CMPWconst (MOVWloadidx1 {sym} [vo.Off32()] ptr idx mem) [vo.Val16()])
(CMPBconstloadidx1 {sym} [vo] ptr idx mem) => (CMPBconst (MOVBloadidx1 {sym} [vo.Off32()] ptr idx mem) [vo.Val8()])
(CMP(Q|L|W|B)constloadidx1 {sym} [vo] ptr idx mem) && vo.Val() == 0 => (TEST(Q|L|W|B) x:(MOV(Q|L|W|B)loadidx1 {sym} [vo.Off32()] ptr idx mem) x)
(CMPQconstloadidx8 {sym} [vo] ptr idx mem) && vo.Val() == 0 => (TESTQ x:(MOVQloadidx8 {sym} [vo.Off32()] ptr idx mem) x)
(CMPLconstloadidx4 {sym} [vo] ptr idx mem) && vo.Val() == 0 => (TESTL x:(MOVLloadidx4 {sym} [vo.Off32()] ptr idx mem) x)
(CMPWconstloadidx2 {sym} [vo] ptr idx mem) && vo.Val() == 0 => (TESTW x:(MOVWloadidx2 {sym} [vo.Off32()] ptr idx mem) x)
(CMPQconstloadidx8 {sym} [vo] ptr idx mem) => (CMPQconst (MOVQloadidx8 {sym} [vo.Off32()] ptr idx mem) [vo.Val32()])
(CMPLconstloadidx4 {sym} [vo] ptr idx mem) => (CMPLconst (MOVLloadidx4 {sym} [vo.Off32()] ptr idx mem) [vo.Val32()])
(CMPWconstloadidx2 {sym} [vo] ptr idx mem) => (CMPWconst (MOVWloadidx2 {sym} [vo.Off32()] ptr idx mem) [vo.Val16()])
(CMPQconstloadidx1 {sym} [vo] ptr idx mem) && vo.Val() != 0 => (CMPQconst (MOVQloadidx1 {sym} [vo.Off32()] ptr idx mem) [vo.Val32()])
(CMPLconstloadidx1 {sym} [vo] ptr idx mem) && vo.Val() != 0 => (CMPLconst (MOVLloadidx1 {sym} [vo.Off32()] ptr idx mem) [vo.Val32()])
(CMPWconstloadidx1 {sym} [vo] ptr idx mem) && vo.Val() != 0 => (CMPWconst (MOVWloadidx1 {sym} [vo.Off32()] ptr idx mem) [vo.Val16()])
(CMPBconstloadidx1 {sym} [vo] ptr idx mem) && vo.Val() != 0 => (CMPBconst (MOVBloadidx1 {sym} [vo.Off32()] ptr idx mem) [vo.Val8()])
(CMPQconstloadidx8 {sym} [vo] ptr idx mem) && vo.Val() != 0 => (CMPQconst (MOVQloadidx8 {sym} [vo.Off32()] ptr idx mem) [vo.Val32()])
(CMPLconstloadidx4 {sym} [vo] ptr idx mem) && vo.Val() != 0 => (CMPLconst (MOVLloadidx4 {sym} [vo.Off32()] ptr idx mem) [vo.Val32()])
(CMPWconstloadidx2 {sym} [vo] ptr idx mem) && vo.Val() != 0 => (CMPWconst (MOVWloadidx2 {sym} [vo.Off32()] ptr idx mem) [vo.Val16()])

View File

@ -58,12 +58,35 @@ func rewriteValueAMD64splitload_OpAMD64CMPBconstload(v *Value) bool {
b := v.Block
typ := &b.Func.Config.Types
// match: (CMPBconstload {sym} [vo] ptr mem)
// cond: vo.Val() == 0
// result: (TESTB x:(MOVBload {sym} [vo.Off32()] ptr mem) x)
for {
vo := auxIntToValAndOff(v.AuxInt)
sym := auxToSym(v.Aux)
ptr := v_0
mem := v_1
if !(vo.Val() == 0) {
break
}
v.reset(OpAMD64TESTB)
x := b.NewValue0(v.Pos, OpAMD64MOVBload, typ.UInt8)
x.AuxInt = int32ToAuxInt(vo.Off32())
x.Aux = symToAux(sym)
x.AddArg2(ptr, mem)
v.AddArg2(x, x)
return true
}
// match: (CMPBconstload {sym} [vo] ptr mem)
// cond: vo.Val() != 0
// result: (CMPBconst (MOVBload {sym} [vo.Off32()] ptr mem) [vo.Val8()])
for {
vo := auxIntToValAndOff(v.AuxInt)
sym := auxToSym(v.Aux)
ptr := v_0
mem := v_1
if !(vo.Val() != 0) {
break
}
v.reset(OpAMD64CMPBconst)
v.AuxInt = int8ToAuxInt(vo.Val8())
v0 := b.NewValue0(v.Pos, OpAMD64MOVBload, typ.UInt8)
@ -73,6 +96,7 @@ func rewriteValueAMD64splitload_OpAMD64CMPBconstload(v *Value) bool {
v.AddArg(v0)
return true
}
return false
}
func rewriteValueAMD64splitload_OpAMD64CMPBconstloadidx1(v *Value) bool {
v_2 := v.Args[2]
@ -81,6 +105,27 @@ func rewriteValueAMD64splitload_OpAMD64CMPBconstloadidx1(v *Value) bool {
b := v.Block
typ := &b.Func.Config.Types
// match: (CMPBconstloadidx1 {sym} [vo] ptr idx mem)
// cond: vo.Val() == 0
// result: (TESTB x:(MOVBloadidx1 {sym} [vo.Off32()] ptr idx mem) x)
for {
vo := auxIntToValAndOff(v.AuxInt)
sym := auxToSym(v.Aux)
ptr := v_0
idx := v_1
mem := v_2
if !(vo.Val() == 0) {
break
}
v.reset(OpAMD64TESTB)
x := b.NewValue0(v.Pos, OpAMD64MOVBloadidx1, typ.UInt8)
x.AuxInt = int32ToAuxInt(vo.Off32())
x.Aux = symToAux(sym)
x.AddArg3(ptr, idx, mem)
v.AddArg2(x, x)
return true
}
// match: (CMPBconstloadidx1 {sym} [vo] ptr idx mem)
// cond: vo.Val() != 0
// result: (CMPBconst (MOVBloadidx1 {sym} [vo.Off32()] ptr idx mem) [vo.Val8()])
for {
vo := auxIntToValAndOff(v.AuxInt)
@ -88,6 +133,9 @@ func rewriteValueAMD64splitload_OpAMD64CMPBconstloadidx1(v *Value) bool {
ptr := v_0
idx := v_1
mem := v_2
if !(vo.Val() != 0) {
break
}
v.reset(OpAMD64CMPBconst)
v.AuxInt = int8ToAuxInt(vo.Val8())
v0 := b.NewValue0(v.Pos, OpAMD64MOVBloadidx1, typ.UInt8)
@ -97,6 +145,7 @@ func rewriteValueAMD64splitload_OpAMD64CMPBconstloadidx1(v *Value) bool {
v.AddArg(v0)
return true
}
return false
}
func rewriteValueAMD64splitload_OpAMD64CMPBload(v *Value) bool {
v_2 := v.Args[2]
@ -152,12 +201,35 @@ func rewriteValueAMD64splitload_OpAMD64CMPLconstload(v *Value) bool {
b := v.Block
typ := &b.Func.Config.Types
// match: (CMPLconstload {sym} [vo] ptr mem)
// cond: vo.Val() == 0
// result: (TESTL x:(MOVLload {sym} [vo.Off32()] ptr mem) x)
for {
vo := auxIntToValAndOff(v.AuxInt)
sym := auxToSym(v.Aux)
ptr := v_0
mem := v_1
if !(vo.Val() == 0) {
break
}
v.reset(OpAMD64TESTL)
x := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32)
x.AuxInt = int32ToAuxInt(vo.Off32())
x.Aux = symToAux(sym)
x.AddArg2(ptr, mem)
v.AddArg2(x, x)
return true
}
// match: (CMPLconstload {sym} [vo] ptr mem)
// cond: vo.Val() != 0
// result: (CMPLconst (MOVLload {sym} [vo.Off32()] ptr mem) [vo.Val32()])
for {
vo := auxIntToValAndOff(v.AuxInt)
sym := auxToSym(v.Aux)
ptr := v_0
mem := v_1
if !(vo.Val() != 0) {
break
}
v.reset(OpAMD64CMPLconst)
v.AuxInt = int32ToAuxInt(vo.Val32())
v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32)
@ -167,6 +239,7 @@ func rewriteValueAMD64splitload_OpAMD64CMPLconstload(v *Value) bool {
v.AddArg(v0)
return true
}
return false
}
func rewriteValueAMD64splitload_OpAMD64CMPLconstloadidx1(v *Value) bool {
v_2 := v.Args[2]
@ -175,6 +248,27 @@ func rewriteValueAMD64splitload_OpAMD64CMPLconstloadidx1(v *Value) bool {
b := v.Block
typ := &b.Func.Config.Types
// match: (CMPLconstloadidx1 {sym} [vo] ptr idx mem)
// cond: vo.Val() == 0
// result: (TESTL x:(MOVLloadidx1 {sym} [vo.Off32()] ptr idx mem) x)
for {
vo := auxIntToValAndOff(v.AuxInt)
sym := auxToSym(v.Aux)
ptr := v_0
idx := v_1
mem := v_2
if !(vo.Val() == 0) {
break
}
v.reset(OpAMD64TESTL)
x := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32)
x.AuxInt = int32ToAuxInt(vo.Off32())
x.Aux = symToAux(sym)
x.AddArg3(ptr, idx, mem)
v.AddArg2(x, x)
return true
}
// match: (CMPLconstloadidx1 {sym} [vo] ptr idx mem)
// cond: vo.Val() != 0
// result: (CMPLconst (MOVLloadidx1 {sym} [vo.Off32()] ptr idx mem) [vo.Val32()])
for {
vo := auxIntToValAndOff(v.AuxInt)
@ -182,6 +276,9 @@ func rewriteValueAMD64splitload_OpAMD64CMPLconstloadidx1(v *Value) bool {
ptr := v_0
idx := v_1
mem := v_2
if !(vo.Val() != 0) {
break
}
v.reset(OpAMD64CMPLconst)
v.AuxInt = int32ToAuxInt(vo.Val32())
v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32)
@ -191,6 +288,7 @@ func rewriteValueAMD64splitload_OpAMD64CMPLconstloadidx1(v *Value) bool {
v.AddArg(v0)
return true
}
return false
}
func rewriteValueAMD64splitload_OpAMD64CMPLconstloadidx4(v *Value) bool {
v_2 := v.Args[2]
@ -199,6 +297,27 @@ func rewriteValueAMD64splitload_OpAMD64CMPLconstloadidx4(v *Value) bool {
b := v.Block
typ := &b.Func.Config.Types
// match: (CMPLconstloadidx4 {sym} [vo] ptr idx mem)
// cond: vo.Val() == 0
// result: (TESTL x:(MOVLloadidx4 {sym} [vo.Off32()] ptr idx mem) x)
for {
vo := auxIntToValAndOff(v.AuxInt)
sym := auxToSym(v.Aux)
ptr := v_0
idx := v_1
mem := v_2
if !(vo.Val() == 0) {
break
}
v.reset(OpAMD64TESTL)
x := b.NewValue0(v.Pos, OpAMD64MOVLloadidx4, typ.UInt32)
x.AuxInt = int32ToAuxInt(vo.Off32())
x.Aux = symToAux(sym)
x.AddArg3(ptr, idx, mem)
v.AddArg2(x, x)
return true
}
// match: (CMPLconstloadidx4 {sym} [vo] ptr idx mem)
// cond: vo.Val() != 0
// result: (CMPLconst (MOVLloadidx4 {sym} [vo.Off32()] ptr idx mem) [vo.Val32()])
for {
vo := auxIntToValAndOff(v.AuxInt)
@ -206,6 +325,9 @@ func rewriteValueAMD64splitload_OpAMD64CMPLconstloadidx4(v *Value) bool {
ptr := v_0
idx := v_1
mem := v_2
if !(vo.Val() != 0) {
break
}
v.reset(OpAMD64CMPLconst)
v.AuxInt = int32ToAuxInt(vo.Val32())
v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx4, typ.UInt32)
@ -215,6 +337,7 @@ func rewriteValueAMD64splitload_OpAMD64CMPLconstloadidx4(v *Value) bool {
v.AddArg(v0)
return true
}
return false
}
func rewriteValueAMD64splitload_OpAMD64CMPLload(v *Value) bool {
v_2 := v.Args[2]
@ -295,12 +418,35 @@ func rewriteValueAMD64splitload_OpAMD64CMPQconstload(v *Value) bool {
b := v.Block
typ := &b.Func.Config.Types
// match: (CMPQconstload {sym} [vo] ptr mem)
// cond: vo.Val() == 0
// result: (TESTQ x:(MOVQload {sym} [vo.Off32()] ptr mem) x)
for {
vo := auxIntToValAndOff(v.AuxInt)
sym := auxToSym(v.Aux)
ptr := v_0
mem := v_1
if !(vo.Val() == 0) {
break
}
v.reset(OpAMD64TESTQ)
x := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64)
x.AuxInt = int32ToAuxInt(vo.Off32())
x.Aux = symToAux(sym)
x.AddArg2(ptr, mem)
v.AddArg2(x, x)
return true
}
// match: (CMPQconstload {sym} [vo] ptr mem)
// cond: vo.Val() != 0
// result: (CMPQconst (MOVQload {sym} [vo.Off32()] ptr mem) [vo.Val32()])
for {
vo := auxIntToValAndOff(v.AuxInt)
sym := auxToSym(v.Aux)
ptr := v_0
mem := v_1
if !(vo.Val() != 0) {
break
}
v.reset(OpAMD64CMPQconst)
v.AuxInt = int32ToAuxInt(vo.Val32())
v0 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64)
@ -310,6 +456,7 @@ func rewriteValueAMD64splitload_OpAMD64CMPQconstload(v *Value) bool {
v.AddArg(v0)
return true
}
return false
}
func rewriteValueAMD64splitload_OpAMD64CMPQconstloadidx1(v *Value) bool {
v_2 := v.Args[2]
@ -318,6 +465,27 @@ func rewriteValueAMD64splitload_OpAMD64CMPQconstloadidx1(v *Value) bool {
b := v.Block
typ := &b.Func.Config.Types
// match: (CMPQconstloadidx1 {sym} [vo] ptr idx mem)
// cond: vo.Val() == 0
// result: (TESTQ x:(MOVQloadidx1 {sym} [vo.Off32()] ptr idx mem) x)
for {
vo := auxIntToValAndOff(v.AuxInt)
sym := auxToSym(v.Aux)
ptr := v_0
idx := v_1
mem := v_2
if !(vo.Val() == 0) {
break
}
v.reset(OpAMD64TESTQ)
x := b.NewValue0(v.Pos, OpAMD64MOVQloadidx1, typ.UInt64)
x.AuxInt = int32ToAuxInt(vo.Off32())
x.Aux = symToAux(sym)
x.AddArg3(ptr, idx, mem)
v.AddArg2(x, x)
return true
}
// match: (CMPQconstloadidx1 {sym} [vo] ptr idx mem)
// cond: vo.Val() != 0
// result: (CMPQconst (MOVQloadidx1 {sym} [vo.Off32()] ptr idx mem) [vo.Val32()])
for {
vo := auxIntToValAndOff(v.AuxInt)
@ -325,6 +493,9 @@ func rewriteValueAMD64splitload_OpAMD64CMPQconstloadidx1(v *Value) bool {
ptr := v_0
idx := v_1
mem := v_2
if !(vo.Val() != 0) {
break
}
v.reset(OpAMD64CMPQconst)
v.AuxInt = int32ToAuxInt(vo.Val32())
v0 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx1, typ.UInt64)
@ -334,6 +505,7 @@ func rewriteValueAMD64splitload_OpAMD64CMPQconstloadidx1(v *Value) bool {
v.AddArg(v0)
return true
}
return false
}
func rewriteValueAMD64splitload_OpAMD64CMPQconstloadidx8(v *Value) bool {
v_2 := v.Args[2]
@ -342,6 +514,27 @@ func rewriteValueAMD64splitload_OpAMD64CMPQconstloadidx8(v *Value) bool {
b := v.Block
typ := &b.Func.Config.Types
// match: (CMPQconstloadidx8 {sym} [vo] ptr idx mem)
// cond: vo.Val() == 0
// result: (TESTQ x:(MOVQloadidx8 {sym} [vo.Off32()] ptr idx mem) x)
for {
vo := auxIntToValAndOff(v.AuxInt)
sym := auxToSym(v.Aux)
ptr := v_0
idx := v_1
mem := v_2
if !(vo.Val() == 0) {
break
}
v.reset(OpAMD64TESTQ)
x := b.NewValue0(v.Pos, OpAMD64MOVQloadidx8, typ.UInt64)
x.AuxInt = int32ToAuxInt(vo.Off32())
x.Aux = symToAux(sym)
x.AddArg3(ptr, idx, mem)
v.AddArg2(x, x)
return true
}
// match: (CMPQconstloadidx8 {sym} [vo] ptr idx mem)
// cond: vo.Val() != 0
// result: (CMPQconst (MOVQloadidx8 {sym} [vo.Off32()] ptr idx mem) [vo.Val32()])
for {
vo := auxIntToValAndOff(v.AuxInt)
@ -349,6 +542,9 @@ func rewriteValueAMD64splitload_OpAMD64CMPQconstloadidx8(v *Value) bool {
ptr := v_0
idx := v_1
mem := v_2
if !(vo.Val() != 0) {
break
}
v.reset(OpAMD64CMPQconst)
v.AuxInt = int32ToAuxInt(vo.Val32())
v0 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx8, typ.UInt64)
@ -358,6 +554,7 @@ func rewriteValueAMD64splitload_OpAMD64CMPQconstloadidx8(v *Value) bool {
v.AddArg(v0)
return true
}
return false
}
func rewriteValueAMD64splitload_OpAMD64CMPQload(v *Value) bool {
v_2 := v.Args[2]
@ -438,12 +635,35 @@ func rewriteValueAMD64splitload_OpAMD64CMPWconstload(v *Value) bool {
b := v.Block
typ := &b.Func.Config.Types
// match: (CMPWconstload {sym} [vo] ptr mem)
// cond: vo.Val() == 0
// result: (TESTW x:(MOVWload {sym} [vo.Off32()] ptr mem) x)
for {
vo := auxIntToValAndOff(v.AuxInt)
sym := auxToSym(v.Aux)
ptr := v_0
mem := v_1
if !(vo.Val() == 0) {
break
}
v.reset(OpAMD64TESTW)
x := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16)
x.AuxInt = int32ToAuxInt(vo.Off32())
x.Aux = symToAux(sym)
x.AddArg2(ptr, mem)
v.AddArg2(x, x)
return true
}
// match: (CMPWconstload {sym} [vo] ptr mem)
// cond: vo.Val() != 0
// result: (CMPWconst (MOVWload {sym} [vo.Off32()] ptr mem) [vo.Val16()])
for {
vo := auxIntToValAndOff(v.AuxInt)
sym := auxToSym(v.Aux)
ptr := v_0
mem := v_1
if !(vo.Val() != 0) {
break
}
v.reset(OpAMD64CMPWconst)
v.AuxInt = int16ToAuxInt(vo.Val16())
v0 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16)
@ -453,6 +673,7 @@ func rewriteValueAMD64splitload_OpAMD64CMPWconstload(v *Value) bool {
v.AddArg(v0)
return true
}
return false
}
func rewriteValueAMD64splitload_OpAMD64CMPWconstloadidx1(v *Value) bool {
v_2 := v.Args[2]
@ -461,6 +682,27 @@ func rewriteValueAMD64splitload_OpAMD64CMPWconstloadidx1(v *Value) bool {
b := v.Block
typ := &b.Func.Config.Types
// match: (CMPWconstloadidx1 {sym} [vo] ptr idx mem)
// cond: vo.Val() == 0
// result: (TESTW x:(MOVWloadidx1 {sym} [vo.Off32()] ptr idx mem) x)
for {
vo := auxIntToValAndOff(v.AuxInt)
sym := auxToSym(v.Aux)
ptr := v_0
idx := v_1
mem := v_2
if !(vo.Val() == 0) {
break
}
v.reset(OpAMD64TESTW)
x := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16)
x.AuxInt = int32ToAuxInt(vo.Off32())
x.Aux = symToAux(sym)
x.AddArg3(ptr, idx, mem)
v.AddArg2(x, x)
return true
}
// match: (CMPWconstloadidx1 {sym} [vo] ptr idx mem)
// cond: vo.Val() != 0
// result: (CMPWconst (MOVWloadidx1 {sym} [vo.Off32()] ptr idx mem) [vo.Val16()])
for {
vo := auxIntToValAndOff(v.AuxInt)
@ -468,6 +710,9 @@ func rewriteValueAMD64splitload_OpAMD64CMPWconstloadidx1(v *Value) bool {
ptr := v_0
idx := v_1
mem := v_2
if !(vo.Val() != 0) {
break
}
v.reset(OpAMD64CMPWconst)
v.AuxInt = int16ToAuxInt(vo.Val16())
v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16)
@ -477,6 +722,7 @@ func rewriteValueAMD64splitload_OpAMD64CMPWconstloadidx1(v *Value) bool {
v.AddArg(v0)
return true
}
return false
}
func rewriteValueAMD64splitload_OpAMD64CMPWconstloadidx2(v *Value) bool {
v_2 := v.Args[2]
@ -485,6 +731,27 @@ func rewriteValueAMD64splitload_OpAMD64CMPWconstloadidx2(v *Value) bool {
b := v.Block
typ := &b.Func.Config.Types
// match: (CMPWconstloadidx2 {sym} [vo] ptr idx mem)
// cond: vo.Val() == 0
// result: (TESTW x:(MOVWloadidx2 {sym} [vo.Off32()] ptr idx mem) x)
for {
vo := auxIntToValAndOff(v.AuxInt)
sym := auxToSym(v.Aux)
ptr := v_0
idx := v_1
mem := v_2
if !(vo.Val() == 0) {
break
}
v.reset(OpAMD64TESTW)
x := b.NewValue0(v.Pos, OpAMD64MOVWloadidx2, typ.UInt16)
x.AuxInt = int32ToAuxInt(vo.Off32())
x.Aux = symToAux(sym)
x.AddArg3(ptr, idx, mem)
v.AddArg2(x, x)
return true
}
// match: (CMPWconstloadidx2 {sym} [vo] ptr idx mem)
// cond: vo.Val() != 0
// result: (CMPWconst (MOVWloadidx2 {sym} [vo.Off32()] ptr idx mem) [vo.Val16()])
for {
vo := auxIntToValAndOff(v.AuxInt)
@ -492,6 +759,9 @@ func rewriteValueAMD64splitload_OpAMD64CMPWconstloadidx2(v *Value) bool {
ptr := v_0
idx := v_1
mem := v_2
if !(vo.Val() != 0) {
break
}
v.reset(OpAMD64CMPWconst)
v.AuxInt = int16ToAuxInt(vo.Val16())
v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx2, typ.UInt16)
@ -501,6 +771,7 @@ func rewriteValueAMD64splitload_OpAMD64CMPWconstloadidx2(v *Value) bool {
v.AddArg(v0)
return true
}
return false
}
func rewriteValueAMD64splitload_OpAMD64CMPWload(v *Value) bool {
v_2 := v.Args[2]