diff --git a/doc/articles/race_detector.html b/doc/articles/race_detector.html index 014411d948..63a658f870 100644 --- a/doc/articles/race_detector.html +++ b/doc/articles/race_detector.html @@ -379,6 +379,38 @@ func (w *Watchdog) Start() { } +
+As this example demonstrates, unsynchronized send and close operations +on the same channel can also be a race condition: +
+ +
+c := make(chan struct{}) // or buffered channel
+
+// The race detector cannot derive the happens before relation
+// for the following send and close operations. These two operations
+// are unsynchronized and happen concurrently.
+go func() { c <- struct{}{} }()
+close(c)
+
+
++According to the Go memory model, a send on a channel happens before +the corresponding receive from that channel completes. To synchronize +send and close operations, use a receive operation that guarantees +the send is done before the close: +
+ +
+c := make(chan struct{}) // or buffered channel
+
+go func() { c <- struct{}{} }()
+<-c
+close(c)
+
+
diff --git a/doc/go1.14.html b/doc/go1.14.html index 6e7c5dc9b6..eb35fa8cae 100644 --- a/doc/go1.14.html +++ b/doc/go1.14.html @@ -759,6 +759,19 @@ Do not send CLs removing the interior tags from such phrases. +
+ When parsing of a URL fails
+ (for example by Parse
+ or ParseRequestURI),
+ the resulting Error message
+ will now quote the unparsable URL.
+ This provides clearer structure and consistency with other parsing errors.
+
diff --git a/doc/go1.15.html b/doc/go1.15.html index 1eb159c318..b4319874c9 100644 --- a/doc/go1.15.html +++ b/doc/go1.15.html @@ -47,6 +47,14 @@ TODO TODO
+go test
+ Changing the -timeout flag now invalidates cached test results. A
+ cached result for a test run with a long timeout will no longer count as
+ passing when go test is re-invoked with a short one.
+
@@ -92,6 +100,18 @@ TODO TODO
+
+ If panic is invoked with a value whose type is derived from any
+ of: bool, complex64, complex128, float32, float64,
+ int, int8, int16, int32, int64, string,
+ uint, uint8, uint16, uint32, uint64, uintptr,
+ then the value will be printed, instead of just its address.
+
diff --git a/doc/go_mem.html b/doc/go_mem.html index d355bebaed..5f1eb68af3 100644 --- a/doc/go_mem.html +++ b/doc/go_mem.html @@ -273,9 +273,7 @@ func f() { a = "hello, world" <-c } - -
func main() {
go f()
c <- 0
diff --git a/misc/cgo/test/sigaltstack.go b/misc/cgo/test/sigaltstack.go
index 2c9b81ced7..7b3f4acbb7 100644
--- a/misc/cgo/test/sigaltstack.go
+++ b/misc/cgo/test/sigaltstack.go
@@ -14,15 +14,22 @@ package cgotest
#include
#include
+#ifdef _AIX
+// On AIX, SIGSTKSZ is too small to handle Go sighandler.
+#define CSIGSTKSZ 0x4000
+#else
+#define CSIGSTKSZ SIGSTKSZ
+#endif
+
static stack_t oss;
-static char signalStack[SIGSTKSZ];
+static char signalStack[CSIGSTKSZ];
static void changeSignalStack(void) {
stack_t ss;
memset(&ss, 0, sizeof ss);
ss.ss_sp = signalStack;
ss.ss_flags = 0;
- ss.ss_size = SIGSTKSZ;
+ ss.ss_size = CSIGSTKSZ;
if (sigaltstack(&ss, &oss) < 0) {
perror("sigaltstack");
abort();
diff --git a/src/bytes/bytes.go b/src/bytes/bytes.go
index e872cc2050..e7931387aa 100644
--- a/src/bytes/bytes.go
+++ b/src/bytes/bytes.go
@@ -117,17 +117,17 @@ func LastIndex(s, sep []byte) int {
return -1
}
// Rabin-Karp search from the end of the string
- hashss, pow := hashStrRev(sep)
+ hashss, pow := bytealg.HashStrRevBytes(sep)
last := len(s) - n
var h uint32
for i := len(s) - 1; i >= last; i-- {
- h = h*primeRK + uint32(s[i])
+ h = h*bytealg.PrimeRK + uint32(s[i])
}
if h == hashss && Equal(s[last:], sep) {
return last
}
for i := last - 1; i >= 0; i-- {
- h *= primeRK
+ h *= bytealg.PrimeRK
h += uint32(s[i])
h -= pow * uint32(s[i+n])
if h == hashss && Equal(s[i:i+n], sep) {
@@ -1068,7 +1068,7 @@ func Index(s, sep []byte) int {
// we should cutover at even larger average skips,
// because Equal becomes that much more expensive.
// This code does not take that effect into account.
- j := indexRabinKarp(s[i:], sep)
+ j := bytealg.IndexRabinKarpBytes(s[i:], sep)
if j < 0 {
return -1
}
@@ -1077,63 +1077,3 @@ func Index(s, sep []byte) int {
}
return -1
}
-
-func indexRabinKarp(s, sep []byte) int {
- // Rabin-Karp search
- hashsep, pow := hashStr(sep)
- n := len(sep)
- var h uint32
- for i := 0; i < n; i++ {
- h = h*primeRK + uint32(s[i])
- }
- if h == hashsep && Equal(s[:n], sep) {
- return 0
- }
- for i := n; i < len(s); {
- h *= primeRK
- h += uint32(s[i])
- h -= pow * uint32(s[i-n])
- i++
- if h == hashsep && Equal(s[i-n:i], sep) {
- return i - n
- }
- }
- return -1
-}
-
-// primeRK is the prime base used in Rabin-Karp algorithm.
-const primeRK = 16777619
-
-// hashStr returns the hash and the appropriate multiplicative
-// factor for use in Rabin-Karp algorithm.
-func hashStr(sep []byte) (uint32, uint32) {
- hash := uint32(0)
- for i := 0; i < len(sep); i++ {
- hash = hash*primeRK + uint32(sep[i])
- }
- var pow, sq uint32 = 1, primeRK
- for i := len(sep); i > 0; i >>= 1 {
- if i&1 != 0 {
- pow *= sq
- }
- sq *= sq
- }
- return hash, pow
-}
-
-// hashStrRev returns the hash of the reverse of sep and the
-// appropriate multiplicative factor for use in Rabin-Karp algorithm.
-func hashStrRev(sep []byte) (uint32, uint32) {
- hash := uint32(0)
- for i := len(sep) - 1; i >= 0; i-- {
- hash = hash*primeRK + uint32(sep[i])
- }
- var pow, sq uint32 = 1, primeRK
- for i := len(sep); i > 0; i >>= 1 {
- if i&1 != 0 {
- pow *= sq
- }
- sq *= sq
- }
- return hash, pow
-}
diff --git a/src/bytes/bytes_test.go b/src/bytes/bytes_test.go
index 2dbbb99f37..a208d4ed76 100644
--- a/src/bytes/bytes_test.go
+++ b/src/bytes/bytes_test.go
@@ -141,9 +141,10 @@ var indexTests = []BinOpTest{
{"barfoobarfooyyyzzzyyyzzzyyyzzzyyyxxxzzzyyy", "x", 33},
{"foofyfoobarfoobar", "y", 4},
{"oooooooooooooooooooooo", "r", -1},
- // test fallback to Rabin-Karp.
{"oxoxoxoxoxoxoxoxoxoxoxoy", "oy", 22},
{"oxoxoxoxoxoxoxoxoxoxoxox", "oy", -1},
+ // test fallback to Rabin-Karp.
+ {"000000000000000000000000000000000000000000000000000000000000000000000001", "0000000000000000000000000000000000000000000000000000000000000000001", 5},
}
var lastIndexTests = []BinOpTest{
@@ -209,6 +210,27 @@ func runIndexTests(t *testing.T, f func(s, sep []byte) int, funcName string, tes
t.Errorf("%s(%q,%q) = %v; want %v", funcName, a, b, actual, test.i)
}
}
+ var allocTests = []struct {
+ a []byte
+ b []byte
+ i int
+ }{
+ // case for function Index.
+ {[]byte("000000000000000000000000000000000000000000000000000000000000000000000001"), []byte("0000000000000000000000000000000000000000000000000000000000000000001"), 5},
+ // case for function LastIndex.
+ {[]byte("000000000000000000000000000000000000000000000000000000000000000010000"), []byte("00000000000000000000000000000000000000000000000000000000000001"), 3},
+ }
+ allocs := testing.AllocsPerRun(100, func() {
+ if i := Index(allocTests[1].a, allocTests[1].b); i != allocTests[1].i {
+ t.Errorf("Index([]byte(%q), []byte(%q)) = %v; want %v", allocTests[1].a, allocTests[1].b, i, allocTests[1].i)
+ }
+ if i := LastIndex(allocTests[0].a, allocTests[0].b); i != allocTests[0].i {
+ t.Errorf("LastIndex([]byte(%q), []byte(%q)) = %v; want %v", allocTests[0].a, allocTests[0].b, i, allocTests[0].i)
+ }
+ })
+ if allocs != 0 {
+ t.Errorf("expected no allocations, got %f", allocs)
+ }
}
func runIndexAnyTests(t *testing.T, f func(s []byte, chars string) int, funcName string, testCases []BinOpTest) {
diff --git a/src/cmd/asm/internal/arch/arch.go b/src/cmd/asm/internal/arch/arch.go
index f090d12bed..d9ba6670e8 100644
--- a/src/cmd/asm/internal/arch/arch.go
+++ b/src/cmd/asm/internal/arch/arch.go
@@ -484,6 +484,9 @@ func archMips64(linkArch *obj.LinkArch) *Arch {
for i := mips.REG_FCR0; i <= mips.REG_FCR31; i++ {
register[obj.Rconv(i)] = int16(i)
}
+ for i := mips.REG_W0; i <= mips.REG_W31; i++ {
+ register[obj.Rconv(i)] = int16(i)
+ }
register["HI"] = mips.REG_HI
register["LO"] = mips.REG_LO
// Pseudo-registers.
@@ -501,6 +504,7 @@ func archMips64(linkArch *obj.LinkArch) *Arch {
"FCR": true,
"M": true,
"R": true,
+ "W": true,
}
instructions := make(map[string]obj.As)
diff --git a/src/cmd/asm/internal/arch/mips.go b/src/cmd/asm/internal/arch/mips.go
index 79fb7cf02e..5d71f40fbe 100644
--- a/src/cmd/asm/internal/arch/mips.go
+++ b/src/cmd/asm/internal/arch/mips.go
@@ -63,6 +63,10 @@ func mipsRegisterNumber(name string, n int16) (int16, bool) {
if 0 <= n && n <= 31 {
return mips.REG_R0 + n, true
}
+ case "W":
+ if 0 <= n && n <= 31 {
+ return mips.REG_W0 + n, true
+ }
}
return 0, false
}
diff --git a/src/cmd/asm/internal/asm/testdata/mips64.s b/src/cmd/asm/internal/asm/testdata/mips64.s
index 2a8c288d7b..21ab82f319 100644
--- a/src/cmd/asm/internal/asm/testdata/mips64.s
+++ b/src/cmd/asm/internal/asm/testdata/mips64.s
@@ -583,6 +583,39 @@ label4:
NEGV R1, R2 // 0001102f
RET
+// MSA VMOVI
+ VMOVB $511, W0 // 7b0ff807
+ VMOVH $24, W23 // 7b20c5c7
+ VMOVW $-24, W15 // 7b5f43c7
+ VMOVD $-511, W31 // 7b700fc7
+
+ VMOVB (R0), W8 // 78000220
+ VMOVB 511(R3), W0 // 79ff1820
+ VMOVB -512(R12), W21 // 7a006560
+ VMOVH (R24), W12 // 7800c321
+ VMOVH 110(R19), W8 // 78379a21
+ VMOVH -70(R12), W3 // 7bdd60e1
+ VMOVW (R3), W31 // 78001fe2
+ VMOVW 64(R20), W16 // 7810a422
+ VMOVW -104(R17), W24 // 7be68e22
+ VMOVD (R3), W2 // 780018a3
+ VMOVD 128(R23), W19 // 7810bce3
+ VMOVD -256(R31), W0 // 7be0f823
+
+ VMOVB W8, (R0) // 78000224
+ VMOVB W0, 511(R3) // 79ff1824
+ VMOVB W21, -512(R12) // 7a006564
+ VMOVH W12, (R24) // 7800c325
+ VMOVH W8, 110(R19) // 78379a25
+ VMOVH W3, -70(R12) // 7bdd60e5
+ VMOVW W31, (R3) // 78001fe6
+ VMOVW W16, 64(R20) // 7810a426
+ VMOVW W24, -104(R17) // 7be68e26
+ VMOVD W2, (R3) // 780018a7
+ VMOVD W19, 128(R23) // 7810bce7
+ VMOVD W0, -256(R31) // 7be0f827
+ RET
+
// END
//
// LEND comma // asm doesn't support the trailing comma.
diff --git a/src/cmd/compile/internal/gc/float_test.go b/src/cmd/compile/internal/gc/float_test.go
index c5c604003a..6ae363be22 100644
--- a/src/cmd/compile/internal/gc/float_test.go
+++ b/src/cmd/compile/internal/gc/float_test.go
@@ -483,6 +483,64 @@ func TestFloat32StoreToLoadConstantFold(t *testing.T) {
}
}
+// Signaling NaN values as constants.
+const (
+ snan32bits uint32 = 0x7f800001
+ snan64bits uint64 = 0x7ff0000000000001
+)
+
+// Signaling NaNs as variables.
+var snan32bitsVar uint32 = snan32bits
+var snan64bitsVar uint64 = snan64bits
+
+func TestFloatSignalingNaN(t *testing.T) {
+ // Make sure we generate a signaling NaN from a constant properly.
+ // See issue 36400.
+ f32 := math.Float32frombits(snan32bits)
+ g32 := math.Float32frombits(snan32bitsVar)
+ x32 := math.Float32bits(f32)
+ y32 := math.Float32bits(g32)
+ if x32 != y32 {
+ t.Errorf("got %x, want %x (diff=%x)", x32, y32, x32^y32)
+ }
+
+ f64 := math.Float64frombits(snan64bits)
+ g64 := math.Float64frombits(snan64bitsVar)
+ x64 := math.Float64bits(f64)
+ y64 := math.Float64bits(g64)
+ if x64 != y64 {
+ t.Errorf("got %x, want %x (diff=%x)", x64, y64, x64^y64)
+ }
+}
+
+func TestFloatSignalingNaNConversion(t *testing.T) {
+ // Test to make sure when we convert a signaling NaN, we get a NaN.
+ // (Ideally we want a quiet NaN, but some platforms don't agree.)
+ // See issue 36399.
+ s32 := math.Float32frombits(snan32bitsVar)
+ if s32 == s32 {
+ t.Errorf("converting a NaN did not result in a NaN")
+ }
+ s64 := math.Float64frombits(snan64bitsVar)
+ if s64 == s64 {
+ t.Errorf("converting a NaN did not result in a NaN")
+ }
+}
+
+func TestFloatSignalingNaNConversionConst(t *testing.T) {
+ // Test to make sure when we convert a signaling NaN, it converts to a NaN.
+ // (Ideally we want a quiet NaN, but some platforms don't agree.)
+ // See issue 36399 and 36400.
+ s32 := math.Float32frombits(snan32bits)
+ if s32 == s32 {
+ t.Errorf("converting a NaN did not result in a NaN")
+ }
+ s64 := math.Float64frombits(snan64bits)
+ if s64 == s64 {
+ t.Errorf("converting a NaN did not result in a NaN")
+ }
+}
+
var sinkFloat float64
func BenchmarkMul2(b *testing.B) {
diff --git a/src/cmd/compile/internal/gc/go.go b/src/cmd/compile/internal/gc/go.go
index 50b866ca65..85c857c214 100644
--- a/src/cmd/compile/internal/gc/go.go
+++ b/src/cmd/compile/internal/gc/go.go
@@ -279,7 +279,7 @@ type Arch struct {
var thearch Arch
var (
- staticbytes,
+ staticuint64s,
zerobase *Node
assertE2I,
diff --git a/src/cmd/compile/internal/gc/ssa.go b/src/cmd/compile/internal/gc/ssa.go
index a710f81dc5..34adeabae1 100644
--- a/src/cmd/compile/internal/gc/ssa.go
+++ b/src/cmd/compile/internal/gc/ssa.go
@@ -1274,6 +1274,16 @@ func (s *state) stmt(n *Node) {
s.assign(n.Left, r, deref, skip)
case OIF:
+ if Isconst(n.Left, CTBOOL) {
+ s.stmtList(n.Left.Ninit)
+ if n.Left.Bool() {
+ s.stmtList(n.Nbody)
+ } else {
+ s.stmtList(n.Rlist)
+ }
+ break
+ }
+
bEnd := s.f.NewBlock(ssa.BlockPlain)
var likely int8
if n.Likely() {
@@ -2203,7 +2213,7 @@ func (s *state) expr(n *Node) *ssa.Value {
conv = conv1
}
}
- if thearch.LinkArch.Family == sys.ARM64 || thearch.LinkArch.Family == sys.Wasm || s.softFloat {
+ if thearch.LinkArch.Family == sys.ARM64 || thearch.LinkArch.Family == sys.Wasm || thearch.LinkArch.Family == sys.S390X || s.softFloat {
if conv1, ok1 := uint64fpConvOpToSSA[twoTypes{s.concreteEtype(ft), s.concreteEtype(tt)}]; ok1 {
conv = conv1
}
@@ -3269,7 +3279,7 @@ func init() {
}
return s.newValue2(ssa.OpMul64uover, types.NewTuple(types.Types[TUINT], types.Types[TUINT]), args[0], args[1])
},
- sys.AMD64, sys.I386)
+ sys.AMD64, sys.I386, sys.MIPS64)
add("runtime", "KeepAlive",
func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
data := s.newValue1(ssa.OpIData, s.f.Config.Types.BytePtr, args[0])
diff --git a/src/cmd/compile/internal/gc/walk.go b/src/cmd/compile/internal/gc/walk.go
index 9298d7b783..14af03f58c 100644
--- a/src/cmd/compile/internal/gc/walk.go
+++ b/src/cmd/compile/internal/gc/walk.go
@@ -565,7 +565,6 @@ opswitch:
n.Right = walkexpr(n.Right, &ll)
n.Right = addinit(n.Right, ll.Slice())
- n = walkinrange(n, init)
case OPRINT, OPRINTN:
n = walkprint(n, init)
@@ -838,10 +837,12 @@ opswitch:
break
}
- if staticbytes == nil {
- staticbytes = newname(Runtimepkg.Lookup("staticbytes"))
- staticbytes.SetClass(PEXTERN)
- staticbytes.Type = types.NewArray(types.Types[TUINT8], 256)
+ if staticuint64s == nil {
+ staticuint64s = newname(Runtimepkg.Lookup("staticuint64s"))
+ staticuint64s.SetClass(PEXTERN)
+ // The actual type is [256]uint64, but we use [256*8]uint8 so we can address
+ // individual bytes.
+ staticuint64s.Type = types.NewArray(types.Types[TUINT8], 256*8)
zerobase = newname(Runtimepkg.Lookup("zerobase"))
zerobase.SetClass(PEXTERN)
zerobase.Type = types.Types[TUINTPTR]
@@ -857,9 +858,16 @@ opswitch:
cheapexpr(n.Left, init) // Evaluate n.Left for side-effects. See issue 19246.
value = zerobase
case fromType.IsBoolean() || (fromType.Size() == 1 && fromType.IsInteger()):
- // n.Left is a bool/byte. Use staticbytes[n.Left].
+ // n.Left is a bool/byte. Use staticuint64s[n.Left * 8] on little-endian
+ // and staticuint64s[n.Left * 8 + 7] on big-endian.
n.Left = cheapexpr(n.Left, init)
- value = nod(OINDEX, staticbytes, byteindex(n.Left))
+ // byteindex widens n.Left so that the multiplication doesn't overflow.
+ index := nod(OLSH, byteindex(n.Left), nodintconst(3))
+ index.SetBounded(true)
+ if thearch.LinkArch.ByteOrder == binary.BigEndian {
+ index = nod(OADD, index, nodintconst(7))
+ }
+ value = nod(OINDEX, staticuint64s, index)
value.SetBounded(true)
case n.Left.Class() == PEXTERN && n.Left.Name != nil && n.Left.Name.Readonly():
// n.Left is a readonly global; use it directly.
@@ -2424,15 +2432,21 @@ func convnop(n *Node, t *types.Type) *Node {
return n
}
-// byteindex converts n, which is byte-sized, to a uint8.
-// We cannot use conv, because we allow converting bool to uint8 here,
+// byteindex converts n, which is byte-sized, to an int used to index into an array.
+// We cannot use conv, because we allow converting bool to int here,
// which is forbidden in user code.
func byteindex(n *Node) *Node {
- if types.Identical(n.Type, types.Types[TUINT8]) {
- return n
+ // We cannot convert from bool to int directly.
+ // While converting from int8 to int is possible, it would yield
+ // the wrong result for negative values.
+ // Reinterpreting the value as an unsigned byte solves both cases.
+ if !types.Identical(n.Type, types.Types[TUINT8]) {
+ n = nod(OCONV, n, nil)
+ n.Type = types.Types[TUINT8]
+ n.SetTypecheck(1)
}
n = nod(OCONV, n, nil)
- n.Type = types.Types[TUINT8]
+ n.Type = types.Types[TINT]
n.SetTypecheck(1)
return n
}
@@ -3523,133 +3537,6 @@ func (n *Node) isIntOrdering() bool {
return n.Left.Type.IsInteger() && n.Right.Type.IsInteger()
}
-// walkinrange optimizes integer-in-range checks, such as 4 <= x && x < 10.
-// n must be an OANDAND or OOROR node.
-// The result of walkinrange MUST be assigned back to n, e.g.
-// n.Left = walkinrange(n.Left)
-func walkinrange(n *Node, init *Nodes) *Node {
- // We are looking for something equivalent to a opl b OP b opr c, where:
- // * a, b, and c have integer type
- // * b is side-effect-free
- // * opl and opr are each < or ≤
- // * OP is &&
- l := n.Left
- r := n.Right
- if !l.isIntOrdering() || !r.isIntOrdering() {
- return n
- }
-
- // Find b, if it exists, and rename appropriately.
- // Input is: l.Left l.Op l.Right ANDAND/OROR r.Left r.Op r.Right
- // Output is: a opl b(==x) ANDAND/OROR b(==x) opr c
- a, opl, b := l.Left, l.Op, l.Right
- x, opr, c := r.Left, r.Op, r.Right
- for i := 0; ; i++ {
- if samesafeexpr(b, x) {
- break
- }
- if i == 3 {
- // Tried all permutations and couldn't find an appropriate b == x.
- return n
- }
- if i&1 == 0 {
- a, opl, b = b, brrev(opl), a
- } else {
- x, opr, c = c, brrev(opr), x
- }
- }
-
- // If n.Op is ||, apply de Morgan.
- // Negate the internal ops now; we'll negate the top level op at the end.
- // Henceforth assume &&.
- negateResult := n.Op == OOROR
- if negateResult {
- opl = brcom(opl)
- opr = brcom(opr)
- }
-
- cmpdir := func(o Op) int {
- switch o {
- case OLE, OLT:
- return -1
- case OGE, OGT:
- return +1
- }
- Fatalf("walkinrange cmpdir %v", o)
- return 0
- }
- if cmpdir(opl) != cmpdir(opr) {
- // Not a range check; something like b < a && b < c.
- return n
- }
-
- switch opl {
- case OGE, OGT:
- // We have something like a > b && b ≥ c.
- // Switch and reverse ops and rename constants,
- // to make it look like a ≤ b && b < c.
- a, c = c, a
- opl, opr = brrev(opr), brrev(opl)
- }
-
- // We must ensure that c-a is non-negative.
- // For now, require a and c to be constants.
- // In the future, we could also support a == 0 and c == len/cap(...).
- // Unfortunately, by this point, most len/cap expressions have been
- // stored into temporary variables.
- if !Isconst(a, CTINT) || !Isconst(c, CTINT) {
- return n
- }
-
- // Ensure that Int64() does not overflow on a and c (it'll happen
- // for any const above 2**63; see issue #27143).
- if !a.CanInt64() || !c.CanInt64() {
- return n
- }
-
- if opl == OLT {
- // We have a < b && ...
- // We need a ≤ b && ... to safely use unsigned comparison tricks.
- // If a is not the maximum constant for b's type,
- // we can increment a and switch to ≤.
- if a.Int64() >= maxintval[b.Type.Etype].Int64() {
- return n
- }
- a = nodintconst(a.Int64() + 1)
- opl = OLE
- }
-
- bound := c.Int64() - a.Int64()
- if bound < 0 {
- // Bad news. Something like 5 <= x && x < 3.
- // Rare in practice, and we still need to generate side-effects,
- // so just leave it alone.
- return n
- }
-
- // We have a ≤ b && b < c (or a ≤ b && b ≤ c).
- // This is equivalent to (a-a) ≤ (b-a) && (b-a) < (c-a),
- // which is equivalent to 0 ≤ (b-a) && (b-a) < (c-a),
- // which is equivalent to uint(b-a) < uint(c-a).
- ut := b.Type.ToUnsigned()
- lhs := conv(nod(OSUB, b, a), ut)
- rhs := nodintconst(bound)
- if negateResult {
- // Negate top level.
- opr = brcom(opr)
- }
- cmp := nod(opr, lhs, rhs)
- cmp.Pos = n.Pos
- cmp = addinit(cmp, l.Ninit.Slice())
- cmp = addinit(cmp, r.Ninit.Slice())
- // Typecheck the AST rooted at cmp...
- cmp = typecheck(cmp, ctxExpr)
- // ...but then reset cmp's type to match n's type.
- cmp.Type = n.Type
- cmp = walkexpr(cmp, init)
- return cmp
-}
-
// return 1 if integer n must be in range [0, max), 0 otherwise
func bounded(n *Node, max int64) bool {
if n.Type == nil || !n.Type.IsInteger() {
diff --git a/src/cmd/compile/internal/ppc64/ssa.go b/src/cmd/compile/internal/ppc64/ssa.go
index 591f3666e7..ce30c9ae37 100644
--- a/src/cmd/compile/internal/ppc64/ssa.go
+++ b/src/cmd/compile/internal/ppc64/ssa.go
@@ -1328,7 +1328,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
v.Fatalf("Pseudo-op should not make it to codegen: %s ###\n", v.LongString())
case ssa.OpPPC64InvertFlags:
v.Fatalf("InvertFlags should never make it to codegen %v", v.LongString())
- case ssa.OpPPC64FlagEQ, ssa.OpPPC64FlagLT, ssa.OpPPC64FlagGT:
+ case ssa.OpPPC64FlagEQ, ssa.OpPPC64FlagLT, ssa.OpPPC64FlagGT, ssa.OpPPC64FlagCarrySet, ssa.OpPPC64FlagCarryClear:
v.Fatalf("Flag* ops should never make it to codegen %v", v.LongString())
case ssa.OpClobber:
// TODO: implement for clobberdead experiment. Nop is ok for now.
diff --git a/src/cmd/compile/internal/riscv64/ssa.go b/src/cmd/compile/internal/riscv64/ssa.go
index 167c9a3411..3fece75b1b 100644
--- a/src/cmd/compile/internal/riscv64/ssa.go
+++ b/src/cmd/compile/internal/riscv64/ssa.go
@@ -314,6 +314,13 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p.To.Type = obj.TYPE_MEM
p.To.Reg = v.Args[0].Reg()
gc.AddAux(&p.To, v)
+ case ssa.OpRISCV64MOVBstorezero, ssa.OpRISCV64MOVHstorezero, ssa.OpRISCV64MOVWstorezero, ssa.OpRISCV64MOVDstorezero:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = riscv.REG_ZERO
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = v.Args[0].Reg()
+ gc.AddAux(&p.To, v)
case ssa.OpRISCV64SEQZ, ssa.OpRISCV64SNEZ:
p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG
@@ -464,7 +471,7 @@ func ssaGenBlock(s *gc.SSAGenState, b, next *ssa.Block) {
case ssa.BlockRet:
s.Prog(obj.ARET)
case ssa.BlockRetJmp:
- p := s.Prog(obj.AJMP)
+ p := s.Prog(obj.ARET)
p.To.Type = obj.TYPE_MEM
p.To.Name = obj.NAME_EXTERN
p.To.Sym = b.Aux.(*obj.LSym)
diff --git a/src/cmd/compile/internal/s390x/ssa.go b/src/cmd/compile/internal/s390x/ssa.go
index f1725bdda4..2de3ef4b35 100644
--- a/src/cmd/compile/internal/s390x/ssa.go
+++ b/src/cmd/compile/internal/s390x/ssa.go
@@ -498,6 +498,8 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
ssa.OpS390XLDGR, ssa.OpS390XLGDR,
ssa.OpS390XCEFBRA, ssa.OpS390XCDFBRA, ssa.OpS390XCEGBRA, ssa.OpS390XCDGBRA,
ssa.OpS390XCFEBRA, ssa.OpS390XCFDBRA, ssa.OpS390XCGEBRA, ssa.OpS390XCGDBRA,
+ ssa.OpS390XCELFBR, ssa.OpS390XCDLFBR, ssa.OpS390XCELGBR, ssa.OpS390XCDLGBR,
+ ssa.OpS390XCLFEBR, ssa.OpS390XCLFDBR, ssa.OpS390XCLGEBR, ssa.OpS390XCLGDBR,
ssa.OpS390XLDEBR, ssa.OpS390XLEDBR,
ssa.OpS390XFNEG, ssa.OpS390XFNEGS,
ssa.OpS390XLPDFR, ssa.OpS390XLNDFR:
diff --git a/src/cmd/compile/internal/ssa/block.go b/src/cmd/compile/internal/ssa/block.go
index eadd5700ba..205fcfc707 100644
--- a/src/cmd/compile/internal/ssa/block.go
+++ b/src/cmd/compile/internal/ssa/block.go
@@ -232,6 +232,34 @@ func (b *Block) Reset(kind BlockKind) {
b.AuxInt = 0
}
+// resetWithControl resets b and adds control v.
+// It is equivalent to b.Reset(kind); b.AddControl(v),
+// except that it is one call instead of two and avoids a bounds check.
+// It is intended for use by rewrite rules, where this matters.
+func (b *Block) resetWithControl(kind BlockKind, v *Value) {
+ b.Kind = kind
+ b.ResetControls()
+ b.Aux = nil
+ b.AuxInt = 0
+ b.Controls[0] = v
+ v.Uses++
+}
+
+// resetWithControl2 resets b and adds controls v and w.
+// It is equivalent to b.Reset(kind); b.AddControl(v); b.AddControl(w),
+// except that it is one call instead of three and avoids two bounds checks.
+// It is intended for use by rewrite rules, where this matters.
+func (b *Block) resetWithControl2(kind BlockKind, v, w *Value) {
+ b.Kind = kind
+ b.ResetControls()
+ b.Aux = nil
+ b.AuxInt = 0
+ b.Controls[0] = v
+ b.Controls[1] = w
+ v.Uses++
+ w.Uses++
+}
+
// AddEdgeTo adds an edge from block b to block c. Used during building of the
// SSA graph; do not use on an already-completed SSA graph.
func (b *Block) AddEdgeTo(c *Block) {
diff --git a/src/cmd/compile/internal/ssa/branchelim.go b/src/cmd/compile/internal/ssa/branchelim.go
index c7c3f8c15f..4f9fd8e22e 100644
--- a/src/cmd/compile/internal/ssa/branchelim.go
+++ b/src/cmd/compile/internal/ssa/branchelim.go
@@ -148,7 +148,7 @@ func elimIf(f *Func, loadAddr *sparseSet, dom *Block) bool {
// the number of useless instructions executed.
const maxfuseinsts = 2
- if len(simple.Values) > maxfuseinsts || !allTrivial(simple) {
+ if len(simple.Values) > maxfuseinsts || !canSpeculativelyExecute(simple) {
return false
}
@@ -305,10 +305,10 @@ func elimIfElse(f *Func, loadAddr *sparseSet, b *Block) bool {
return false
}
yes, no := b.Succs[0].Block(), b.Succs[1].Block()
- if !isLeafPlain(yes) || len(yes.Values) > 1 || !allTrivial(yes) {
+ if !isLeafPlain(yes) || len(yes.Values) > 1 || !canSpeculativelyExecute(yes) {
return false
}
- if !isLeafPlain(no) || len(no.Values) > 1 || !allTrivial(no) {
+ if !isLeafPlain(no) || len(no.Values) > 1 || !canSpeculativelyExecute(no) {
return false
}
if b.Succs[0].Block().Succs[0].Block() != b.Succs[1].Block().Succs[0].Block() {
@@ -415,7 +415,15 @@ func shouldElimIfElse(no, yes, post *Block, arch string) bool {
}
}
-func allTrivial(b *Block) bool {
+// canSpeculativelyExecute reports whether every value in the block can
+// be evaluated without causing any observable side effects (memory
+// accesses, panics and so on) except for execution time changes. It
+// also ensures that the block does not contain any phis which we can't
+// speculatively execute.
+// Warning: this function cannot currently detect values that represent
+// instructions the execution of which need to be guarded with CPU
+// hardware feature checks. See issue #34950.
+func canSpeculativelyExecute(b *Block) bool {
// don't fuse memory ops, Phi ops, divides (can panic),
// or anything else with side-effects
for _, v := range b.Values {
diff --git a/src/cmd/compile/internal/ssa/check.go b/src/cmd/compile/internal/ssa/check.go
index a6746805f7..4c694a03ac 100644
--- a/src/cmd/compile/internal/ssa/check.go
+++ b/src/cmd/compile/internal/ssa/check.go
@@ -141,15 +141,23 @@ func checkFunc(f *Func) {
f.Fatalf("bad int32 AuxInt value for %v", v)
}
canHaveAuxInt = true
- case auxInt64, auxFloat64, auxARM64BitField:
+ case auxInt64, auxARM64BitField:
canHaveAuxInt = true
case auxInt128:
// AuxInt must be zero, so leave canHaveAuxInt set to false.
case auxFloat32:
canHaveAuxInt = true
+ if math.IsNaN(v.AuxFloat()) {
+ f.Fatalf("value %v has an AuxInt that encodes a NaN", v)
+ }
if !isExactFloat32(v.AuxFloat()) {
f.Fatalf("value %v has an AuxInt value that is not an exact float32", v)
}
+ case auxFloat64:
+ canHaveAuxInt = true
+ if math.IsNaN(v.AuxFloat()) {
+ f.Fatalf("value %v has an AuxInt that encodes a NaN", v)
+ }
case auxString, auxSym, auxTyp, auxArchSpecific:
canHaveAux = true
case auxSymOff, auxSymValAndOff, auxTypSize:
diff --git a/src/cmd/compile/internal/ssa/compile.go b/src/cmd/compile/internal/ssa/compile.go
index 8551c0a54b..2de4e133bf 100644
--- a/src/cmd/compile/internal/ssa/compile.go
+++ b/src/cmd/compile/internal/ssa/compile.go
@@ -35,7 +35,8 @@ func Compile(f *Func) {
var rnd *rand.Rand
if checkEnabled {
- rnd = rand.New(rand.NewSource(int64(crc32.ChecksumIEEE(([]byte)(f.Name)))))
+ seed := int64(crc32.ChecksumIEEE(([]byte)(f.Name))) ^ int64(checkRandSeed)
+ rnd = rand.New(rand.NewSource(seed))
}
// hook to print function & phase if panic happens
@@ -199,7 +200,10 @@ func (p *pass) addDump(s string) {
}
// Run consistency checker between each phase
-var checkEnabled = false
+var (
+ checkEnabled = false
+ checkRandSeed = 0
+)
// Debug output
var IntrinsicsDebug int
@@ -253,7 +257,7 @@ where:
` + phasenames + `
- is one of:
- on, off, debug, mem, time, test, stats, dump
+ on, off, debug, mem, time, test, stats, dump, seed
- defaults to 1
@@ -271,6 +275,10 @@ Examples:
-d=ssa/check/on
enables checking after each phase
+ -d=ssa/check/seed=1234
+enables checking after each phase, using 1234 to seed the PRNG
+used for value order randomization
+
-d=ssa/all/time
enables time reporting for all phases
@@ -294,6 +302,12 @@ commas. For example:
debugPoset = checkEnabled
return ""
}
+ if phase == "check" && flag == "seed" {
+ checkEnabled = true
+ checkRandSeed = val
+ debugPoset = checkEnabled
+ return ""
+ }
alltime := false
allmem := false
@@ -414,7 +428,7 @@ var passes = [...]pass{
{name: "gcse deadcode", fn: deadcode, required: true}, // clean out after cse and phiopt
{name: "nilcheckelim", fn: nilcheckelim},
{name: "prove", fn: prove},
- {name: "fuse plain", fn: fusePlain},
+ {name: "early fuse", fn: fuseEarly},
{name: "decompose builtin", fn: decomposeBuiltIn, required: true},
{name: "softfloat", fn: softfloat, required: true},
{name: "late opt", fn: opt, required: true}, // TODO: split required rules and optimizing rules
@@ -422,7 +436,7 @@ var passes = [...]pass{
{name: "generic deadcode", fn: deadcode, required: true}, // remove dead stores, which otherwise mess up store chain
{name: "check bce", fn: checkbce},
{name: "branchelim", fn: branchelim},
- {name: "fuse", fn: fuseAll},
+ {name: "late fuse", fn: fuseLate},
{name: "dse", fn: dse},
{name: "writebarrier", fn: writebarrier, required: true}, // expand write barrier ops
{name: "insert resched checks", fn: insertLoopReschedChecks,
@@ -477,7 +491,7 @@ var passOrder = [...]constraint{
// allow deadcode to clean up after nilcheckelim
{"nilcheckelim", "generic deadcode"},
// nilcheckelim generates sequences of plain basic blocks
- {"nilcheckelim", "fuse"},
+ {"nilcheckelim", "late fuse"},
// nilcheckelim relies on opt to rewrite user nil checks
{"opt", "nilcheckelim"},
// tighten will be most effective when as many values have been removed as possible
diff --git a/src/cmd/compile/internal/ssa/fuse.go b/src/cmd/compile/internal/ssa/fuse.go
index c2d4051da8..f80ec0dc5d 100644
--- a/src/cmd/compile/internal/ssa/fuse.go
+++ b/src/cmd/compile/internal/ssa/fuse.go
@@ -8,18 +8,18 @@ import (
"cmd/internal/src"
)
-// fusePlain runs fuse(f, fuseTypePlain).
-func fusePlain(f *Func) { fuse(f, fuseTypePlain) }
+// fuseEarly runs fuse(f, fuseTypePlain|fuseTypeIntInRange).
+func fuseEarly(f *Func) { fuse(f, fuseTypePlain|fuseTypeIntInRange) }
-// fuseAll runs fuse(f, fuseTypeAll).
-func fuseAll(f *Func) { fuse(f, fuseTypeAll) }
+// fuseLate runs fuse(f, fuseTypePlain|fuseTypeIf).
+func fuseLate(f *Func) { fuse(f, fuseTypePlain|fuseTypeIf) }
type fuseType uint8
const (
fuseTypePlain fuseType = 1 << iota
fuseTypeIf
- fuseTypeAll = fuseTypePlain | fuseTypeIf
+ fuseTypeIntInRange
)
// fuse simplifies control flow by joining basic blocks.
@@ -32,6 +32,9 @@ func fuse(f *Func, typ fuseType) {
if typ&fuseTypeIf != 0 {
changed = fuseBlockIf(b) || changed
}
+ if typ&fuseTypeIntInRange != 0 {
+ changed = fuseIntegerComparisons(b) || changed
+ }
if typ&fuseTypePlain != 0 {
changed = fuseBlockPlain(b) || changed
}
diff --git a/src/cmd/compile/internal/ssa/fuse_comparisons.go b/src/cmd/compile/internal/ssa/fuse_comparisons.go
new file mode 100644
index 0000000000..d843fc3fda
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/fuse_comparisons.go
@@ -0,0 +1,157 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa
+
+// fuseIntegerComparisons optimizes inequalities such as '1 <= x && x < 5',
+// which can be optimized to 'unsigned(x-1) < 4'.
+//
+// Look for branch structure like:
+//
+// p
+// |\
+// | b
+// |/ \
+// s0 s1
+//
+// In our example, p has control '1 <= x', b has control 'x < 5',
+// and s0 and s1 are the if and else results of the comparison.
+//
+// This will be optimized into:
+//
+// p
+// \
+// b
+// / \
+// s0 s1
+//
+// where b has the combined control value 'unsigned(x-1) < 4'.
+// Later passes will then fuse p and b.
+func fuseIntegerComparisons(b *Block) bool {
+ if len(b.Preds) != 1 {
+ return false
+ }
+ p := b.Preds[0].Block()
+ if b.Kind != BlockIf || p.Kind != BlockIf {
+ return false
+ }
+
+ // Don't merge control values if b is likely to be bypassed anyway.
+ if p.Likely == BranchLikely && p.Succs[0].Block() != b {
+ return false
+ }
+ if p.Likely == BranchUnlikely && p.Succs[1].Block() != b {
+ return false
+ }
+
+ // Check if the control values combine to make an integer inequality that
+ // can be further optimized later.
+ bc := b.Controls[0]
+ pc := p.Controls[0]
+ if !areMergeableInequalities(bc, pc) {
+ return false
+ }
+
+ // If the first (true) successors match then we have a disjunction (||).
+ // If the second (false) successors match then we have a conjunction (&&).
+ for i, op := range [2]Op{OpOrB, OpAndB} {
+ if p.Succs[i].Block() != b.Succs[i].Block() {
+ continue
+ }
+
+ // TODO(mundaym): should we also check the cost of executing b?
+ // Currently we might speculatively execute b even if b contains
+ // a lot of instructions. We could just check that len(b.Values)
+ // is lower than a fixed amount. Bear in mind however that the
+ // other optimization passes might yet reduce the cost of b
+ // significantly so we shouldn't be overly conservative.
+ if !canSpeculativelyExecute(b) {
+ return false
+ }
+
+ // Logically combine the control values for p and b.
+ v := b.NewValue0(bc.Pos, op, bc.Type)
+ v.AddArg(pc)
+ v.AddArg(bc)
+
+ // Set the combined control value as the control value for b.
+ b.SetControl(v)
+
+ // Modify p so that it jumps directly to b.
+ p.removeEdge(i)
+ p.Kind = BlockPlain
+ p.Likely = BranchUnknown
+ p.ResetControls()
+
+ return true
+ }
+
+ // TODO: could negate condition(s) to merge controls.
+ return false
+}
+
+// getConstIntArgIndex returns the index of the first argument that is a
+// constant integer or -1 if no such argument exists.
+func getConstIntArgIndex(v *Value) int {
+ for i, a := range v.Args {
+ switch a.Op {
+ case OpConst8, OpConst16, OpConst32, OpConst64:
+ return i
+ }
+ }
+ return -1
+}
+
+// isSignedInequality reports whether op represents the inequality < or ≤
+// in the signed domain.
+func isSignedInequality(v *Value) bool {
+ switch v.Op {
+ case OpLess64, OpLess32, OpLess16, OpLess8,
+ OpLeq64, OpLeq32, OpLeq16, OpLeq8:
+ return true
+ }
+ return false
+}
+
+// isUnsignedInequality reports whether op represents the inequality < or ≤
+// in the unsigned domain.
+func isUnsignedInequality(v *Value) bool {
+ switch v.Op {
+ case OpLess64U, OpLess32U, OpLess16U, OpLess8U,
+ OpLeq64U, OpLeq32U, OpLeq16U, OpLeq8U:
+ return true
+ }
+ return false
+}
+
+func areMergeableInequalities(x, y *Value) bool {
+ // We need both inequalities to be either in the signed or unsigned domain.
+ // TODO(mundaym): it would also be good to merge when we have an Eq op that
+ // could be transformed into a Less/Leq. For example in the unsigned
+ // domain 'x == 0 || 3 < x' is equivalent to 'x <= 0 || 3 < x'
+ inequalityChecks := [...]func(*Value) bool{
+ isSignedInequality,
+ isUnsignedInequality,
+ }
+ for _, f := range inequalityChecks {
+ if !f(x) || !f(y) {
+ continue
+ }
+
+ // Check that both inequalities are comparisons with constants.
+ xi := getConstIntArgIndex(x)
+ if xi < 0 {
+ return false
+ }
+ yi := getConstIntArgIndex(y)
+ if yi < 0 {
+ return false
+ }
+
+ // Check that the non-constant arguments to the inequalities
+ // are the same.
+ return x.Args[xi^1] == y.Args[yi^1]
+ }
+ return false
+}
diff --git a/src/cmd/compile/internal/ssa/fuse_test.go b/src/cmd/compile/internal/ssa/fuse_test.go
index 77d2aad5c1..5fe3da93ca 100644
--- a/src/cmd/compile/internal/ssa/fuse_test.go
+++ b/src/cmd/compile/internal/ssa/fuse_test.go
@@ -26,7 +26,7 @@ func TestFuseEliminatesOneBranch(t *testing.T) {
Exit("mem")))
CheckFunc(fun.f)
- fuseAll(fun.f)
+ fuseLate(fun.f)
for _, b := range fun.f.Blocks {
if b == fun.blocks["then"] && b.Kind != BlockInvalid {
@@ -56,7 +56,7 @@ func TestFuseEliminatesBothBranches(t *testing.T) {
Exit("mem")))
CheckFunc(fun.f)
- fuseAll(fun.f)
+ fuseLate(fun.f)
for _, b := range fun.f.Blocks {
if b == fun.blocks["then"] && b.Kind != BlockInvalid {
@@ -90,7 +90,7 @@ func TestFuseHandlesPhis(t *testing.T) {
Exit("mem")))
CheckFunc(fun.f)
- fuseAll(fun.f)
+ fuseLate(fun.f)
for _, b := range fun.f.Blocks {
if b == fun.blocks["then"] && b.Kind != BlockInvalid {
@@ -122,7 +122,7 @@ func TestFuseEliminatesEmptyBlocks(t *testing.T) {
))
CheckFunc(fun.f)
- fuseAll(fun.f)
+ fuseLate(fun.f)
for k, b := range fun.blocks {
if k[:1] == "z" && b.Kind != BlockInvalid {
@@ -153,7 +153,7 @@ func TestFuseSideEffects(t *testing.T) {
Goto("loop")))
CheckFunc(fun.f)
- fuseAll(fun.f)
+ fuseLate(fun.f)
for _, b := range fun.f.Blocks {
if b == fun.blocks["then"] && b.Kind == BlockInvalid {
@@ -196,7 +196,7 @@ func BenchmarkFuse(b *testing.B) {
b.ResetTimer()
for i := 0; i < b.N; i++ {
fun := c.Fun("entry", blocks...)
- fuseAll(fun.f)
+ fuseLate(fun.f)
}
})
}
diff --git a/src/cmd/compile/internal/ssa/gen/386.rules b/src/cmd/compile/internal/ssa/gen/386.rules
index 78916bebc3..64a6cbaf84 100644
--- a/src/cmd/compile/internal/ssa/gen/386.rules
+++ b/src/cmd/compile/internal/ssa/gen/386.rules
@@ -92,6 +92,8 @@
(Round32F ...) -> (Copy ...)
(Round64F ...) -> (Copy ...)
+(CvtBoolToUint8 ...) -> (Copy ...)
+
// Lowering shifts
// Unsigned shifts need to return 0 if shift amount is >= width of shifted value.
// result = (arg << shift) & (shift >= argbits ? 0 : 0xffffffffffffffff)
diff --git a/src/cmd/compile/internal/ssa/gen/AMD64.rules b/src/cmd/compile/internal/ssa/gen/AMD64.rules
index 4fd13a5056..c165fed485 100644
--- a/src/cmd/compile/internal/ssa/gen/AMD64.rules
+++ b/src/cmd/compile/internal/ssa/gen/AMD64.rules
@@ -154,6 +154,8 @@
(Round(32|64)F ...) -> (Copy ...)
+(CvtBoolToUint8 ...) -> (Copy ...)
+
// Lowering shifts
// Unsigned shifts need to return 0 if shift amount is >= width of shifted value.
// result = (arg << shift) & (shift >= argbits ? 0 : 0xffffffffffffffff)
@@ -756,6 +758,7 @@
(MULQconst [c] (MULQconst [d] x)) && is32Bit(c*d) -> (MULQconst [c * d] x)
(ORQ x (MOVQconst [c])) && is32Bit(c) -> (ORQconst [c] x)
+(ORQ x (MOVLconst [c])) -> (ORQconst [c] x)
(ORL x (MOVLconst [c])) -> (ORLconst [c] x)
(XORQ x (MOVQconst [c])) && is32Bit(c) -> (XORQconst [c] x)
@@ -1305,6 +1308,15 @@
(CMPBconst (MOVLconst [x]) [y]) && int8(x)>int8(y) && uint8(x) (FlagGT_ULT)
(CMPBconst (MOVLconst [x]) [y]) && int8(x)>int8(y) && uint8(x)>uint8(y) -> (FlagGT_UGT)
+// CMPQconst requires a 32 bit const, but we can still constant-fold 64 bit consts.
+// In theory this applies to any of the simplifications above,
+// but CMPQ is the only one I've actually seen occur.
+(CMPQ (MOVQconst [x]) (MOVQconst [y])) && x==y -> (FlagEQ)
+(CMPQ (MOVQconst [x]) (MOVQconst [y])) && x (FlagLT_ULT)
+(CMPQ (MOVQconst [x]) (MOVQconst [y])) && xuint64(y) -> (FlagLT_UGT)
+(CMPQ (MOVQconst [x]) (MOVQconst [y])) && x>y && uint64(x) (FlagGT_ULT)
+(CMPQ (MOVQconst [x]) (MOVQconst [y])) && x>y && uint64(x)>uint64(y) -> (FlagGT_UGT)
+
// Other known comparisons.
(CMPQconst (MOVBQZX _) [c]) && 0xFF < c -> (FlagLT_ULT)
(CMPQconst (MOVWQZX _) [c]) && 0xFFFF < c -> (FlagLT_ULT)
@@ -1478,6 +1490,12 @@
(BTCQconst [c] (MOVQconst [d])) -> (MOVQconst [d^(1< (MOVLconst [d^(1< (MOVQconst [c|d])
+
// generic simplifications
// TODO: more of this
(ADDQ x (NEGQ y)) -> (SUBQ x y)
@@ -1493,6 +1511,7 @@
(SHLLconst [d] (MOVLconst [c])) -> (MOVLconst [int64(int32(c)) << uint64(d)])
(SHLQconst [d] (MOVQconst [c])) -> (MOVQconst [c << uint64(d)])
+(SHLQconst [d] (MOVLconst [c])) -> (MOVQconst [int64(int32(c)) << uint64(d)])
// Fold NEG into ADDconst/MULconst. Take care to keep c in 32 bit range.
(NEGQ (ADDQconst [c] (NEGQ x))) && c != -(1<<31) -> (ADDQconst [-c] x)
diff --git a/src/cmd/compile/internal/ssa/gen/ARM.rules b/src/cmd/compile/internal/ssa/gen/ARM.rules
index 77e7b477c6..839d701b8c 100644
--- a/src/cmd/compile/internal/ssa/gen/ARM.rules
+++ b/src/cmd/compile/internal/ssa/gen/ARM.rules
@@ -207,6 +207,8 @@
(Round(32|64)F ...) -> (Copy ...)
+(CvtBoolToUint8 ...) -> (Copy ...)
+
// fused-multiply-add
(FMA x y z) -> (FMULAD z x y)
diff --git a/src/cmd/compile/internal/ssa/gen/ARM64.rules b/src/cmd/compile/internal/ssa/gen/ARM64.rules
index 4e0ab3288d..61994a15a1 100644
--- a/src/cmd/compile/internal/ssa/gen/ARM64.rules
+++ b/src/cmd/compile/internal/ssa/gen/ARM64.rules
@@ -244,6 +244,8 @@
(Cvt32Fto64F ...) -> (FCVTSD ...)
(Cvt64Fto32F ...) -> (FCVTDS ...)
+(CvtBoolToUint8 ...) -> (Copy ...)
+
(Round32F ...) -> (LoweredRound32F ...)
(Round64F ...) -> (LoweredRound64F ...)
diff --git a/src/cmd/compile/internal/ssa/gen/MIPS.rules b/src/cmd/compile/internal/ssa/gen/MIPS.rules
index 228d5ee454..9ac8e5f471 100644
--- a/src/cmd/compile/internal/ssa/gen/MIPS.rules
+++ b/src/cmd/compile/internal/ssa/gen/MIPS.rules
@@ -170,6 +170,8 @@
(Cvt32Fto64F ...) -> (MOVFD ...)
(Cvt64Fto32F ...) -> (MOVDF ...)
+(CvtBoolToUint8 ...) -> (Copy ...)
+
(Round(32|64)F ...) -> (Copy ...)
// comparisons
diff --git a/src/cmd/compile/internal/ssa/gen/MIPS64.rules b/src/cmd/compile/internal/ssa/gen/MIPS64.rules
index 35c65023cd..be05dc71c0 100644
--- a/src/cmd/compile/internal/ssa/gen/MIPS64.rules
+++ b/src/cmd/compile/internal/ssa/gen/MIPS64.rules
@@ -11,6 +11,8 @@
(Mul(64|32|16|8) x y) -> (Select1 (MULVU x y))
(Mul(32|64)F ...) -> (MUL(F|D) ...)
(Mul64uhilo ...) -> (MULVU ...)
+(Select0 (Mul64uover x y)) -> (Select1 (MULVU x y))
+(Select1 (Mul64uover x y)) -> (SGTU (Select0 (MULVU x y)) (MOVVconst [0]))
(Hmul64 x y) -> (Select0 (MULV x y))
(Hmul64u x y) -> (Select0 (MULVU x y))
@@ -171,6 +173,8 @@
(Cvt32Fto64F ...) -> (MOVFD ...)
(Cvt64Fto32F ...) -> (MOVDF ...)
+(CvtBoolToUint8 ...) -> (Copy ...)
+
(Round(32|64)F ...) -> (Copy ...)
// comparisons
diff --git a/src/cmd/compile/internal/ssa/gen/PPC64.rules b/src/cmd/compile/internal/ssa/gen/PPC64.rules
index d4ef49e20b..c53ec0fde1 100644
--- a/src/cmd/compile/internal/ssa/gen/PPC64.rules
+++ b/src/cmd/compile/internal/ssa/gen/PPC64.rules
@@ -59,6 +59,8 @@
(Cvt32Fto64F ...) -> (Copy ...) // Note v will have the wrong type for patterns dependent on Float32/Float64
(Cvt64Fto32F ...) -> (FRSP ...)
+(CvtBoolToUint8 ...) -> (Copy ...)
+
(Round(32|64)F ...) -> (LoweredRound(32|64)F ...)
(Sqrt ...) -> (FSQRT ...)
@@ -78,7 +80,7 @@
// Constant folding
(FABS (FMOVDconst [x])) -> (FMOVDconst [auxFrom64F(math.Abs(auxTo64F(x)))])
-(FSQRT (FMOVDconst [x])) -> (FMOVDconst [auxFrom64F(math.Sqrt(auxTo64F(x)))])
+(FSQRT (FMOVDconst [x])) && auxTo64F(x) >= 0 -> (FMOVDconst [auxFrom64F(math.Sqrt(auxTo64F(x)))])
(FFLOOR (FMOVDconst [x])) -> (FMOVDconst [auxFrom64F(math.Floor(auxTo64F(x)))])
(FCEIL (FMOVDconst [x])) -> (FMOVDconst [auxFrom64F(math.Ceil(auxTo64F(x)))])
(FTRUNC (FMOVDconst [x])) -> (FMOVDconst [auxFrom64F(math.Trunc(auxTo64F(x)))])
@@ -116,47 +118,22 @@
(ROTLW x (MOVDconst [c])) -> (ROTLWconst x [c&31])
(ROTL x (MOVDconst [c])) -> (ROTLconst x [c&63])
-(Lsh64x64 x (Const64 [c])) && uint64(c) < 64 -> (SLDconst x [c])
-(Rsh64x64 x (Const64 [c])) && uint64(c) < 64 -> (SRADconst x [c])
-(Rsh64Ux64 x (Const64 [c])) && uint64(c) < 64 -> (SRDconst x [c])
-(Lsh32x64 x (Const64 [c])) && uint64(c) < 32 -> (SLWconst x [c])
-(Rsh32x64 x (Const64 [c])) && uint64(c) < 32 -> (SRAWconst x [c])
-(Rsh32Ux64 x (Const64 [c])) && uint64(c) < 32 -> (SRWconst x [c])
-(Lsh16x64 x (Const64 [c])) && uint64(c) < 16 -> (SLWconst x [c])
-(Rsh16x64 x (Const64 [c])) && uint64(c) < 16 -> (SRAWconst (SignExt16to32 x) [c])
-(Rsh16Ux64 x (Const64 [c])) && uint64(c) < 16 -> (SRWconst (ZeroExt16to32 x) [c])
-(Lsh8x64 x (Const64 [c])) && uint64(c) < 8 -> (SLWconst x [c])
-(Rsh8x64 x (Const64 [c])) && uint64(c) < 8 -> (SRAWconst (SignExt8to32 x) [c])
-(Rsh8Ux64 x (Const64 [c])) && uint64(c) < 8 -> (SRWconst (ZeroExt8to32 x) [c])
-
-(Lsh64x32 x (Const64 [c])) && uint32(c) < 64 -> (SLDconst x [c])
-(Rsh64x32 x (Const64 [c])) && uint32(c) < 64 -> (SRADconst x [c])
-(Rsh64Ux32 x (Const64 [c])) && uint32(c) < 64 -> (SRDconst x [c])
-(Lsh32x32 x (Const64 [c])) && uint32(c) < 32 -> (SLWconst x [c])
-(Rsh32x32 x (Const64 [c])) && uint32(c) < 32 -> (SRAWconst x [c])
-(Rsh32Ux32 x (Const64 [c])) && uint32(c) < 32 -> (SRWconst x [c])
-(Lsh16x32 x (Const64 [c])) && uint32(c) < 16 -> (SLWconst x [c])
-(Rsh16x32 x (Const64 [c])) && uint32(c) < 16 -> (SRAWconst (SignExt16to32 x) [c])
-(Rsh16Ux32 x (Const64 [c])) && uint32(c) < 16 -> (SRWconst (ZeroExt16to32 x) [c])
-(Lsh8x32 x (Const64 [c])) && uint32(c) < 8 -> (SLWconst x [c])
-(Rsh8x32 x (Const64 [c])) && uint32(c) < 8 -> (SRAWconst (SignExt8to32 x) [c])
-(Rsh8Ux32 x (Const64 [c])) && uint32(c) < 8 -> (SRWconst (ZeroExt8to32 x) [c])
// large constant shifts
-(Lsh64x64 _ (Const64 [c])) && uint64(c) >= 64 -> (MOVDconst [0])
-(Rsh64Ux64 _ (Const64 [c])) && uint64(c) >= 64 -> (MOVDconst [0])
-(Lsh32x64 _ (Const64 [c])) && uint64(c) >= 32 -> (MOVDconst [0])
-(Rsh32Ux64 _ (Const64 [c])) && uint64(c) >= 32 -> (MOVDconst [0])
-(Lsh16x64 _ (Const64 [c])) && uint64(c) >= 16 -> (MOVDconst [0])
-(Rsh16Ux64 _ (Const64 [c])) && uint64(c) >= 16 -> (MOVDconst [0])
-(Lsh8x64 _ (Const64 [c])) && uint64(c) >= 8 -> (MOVDconst [0])
-(Rsh8Ux64 _ (Const64 [c])) && uint64(c) >= 8 -> (MOVDconst [0])
+(Lsh64x64 _ (MOVDconst [c])) && uint64(c) >= 64 -> (MOVDconst [0])
+(Rsh64Ux64 _ (MOVDconst [c])) && uint64(c) >= 64 -> (MOVDconst [0])
+(Lsh32x64 _ (MOVDconst [c])) && uint64(c) >= 32 -> (MOVDconst [0])
+(Rsh32Ux64 _ (MOVDconst [c])) && uint64(c) >= 32 -> (MOVDconst [0])
+(Lsh16x64 _ (MOVDconst [c])) && uint64(c) >= 16 -> (MOVDconst [0])
+(Rsh16Ux64 _ (MOVDconst [c])) && uint64(c) >= 16 -> (MOVDconst [0])
+(Lsh8x64 _ (MOVDconst [c])) && uint64(c) >= 8 -> (MOVDconst [0])
+(Rsh8Ux64 _ (MOVDconst [c])) && uint64(c) >= 8 -> (MOVDconst [0])
// large constant signed right shift, we leave the sign bit
-(Rsh64x64 x (Const64 [c])) && uint64(c) >= 64 -> (SRADconst x [63])
-(Rsh32x64 x (Const64 [c])) && uint64(c) >= 32 -> (SRAWconst x [63])
-(Rsh16x64 x (Const64 [c])) && uint64(c) >= 16 -> (SRAWconst (SignExt16to32 x) [63])
-(Rsh8x64 x (Const64 [c])) && uint64(c) >= 8 -> (SRAWconst (SignExt8to32 x) [63])
+(Rsh64x64 x (MOVDconst [c])) && uint64(c) >= 64 -> (SRADconst x [63])
+(Rsh32x64 x (MOVDconst [c])) && uint64(c) >= 32 -> (SRAWconst x [63])
+(Rsh16x64 x (MOVDconst [c])) && uint64(c) >= 16 -> (SRAWconst (SignExt16to32 x) [63])
+(Rsh8x64 x (MOVDconst [c])) && uint64(c) >= 8 -> (SRAWconst (SignExt8to32 x) [63])
// constant shifts
(Lsh64x64 x (MOVDconst [c])) && uint64(c) < 64 -> (SLDconst x [c])
@@ -297,11 +274,13 @@
(MaskIfNotCarry (ADDconstForCarry [c] (ANDconst [d] _))) && c < 0 && d > 0 && c + d < 0 -> (MOVDconst [-1])
(ORN x (MOVDconst [-1])) -> x
-// Potentially useful optimizing rewrites.
-// (ADDconstForCarry [k] c), k < 0 && (c < 0 || k+c >= 0) -> CarrySet
-// (ADDconstForCarry [k] c), K < 0 && (c >= 0 && k+c < 0) -> CarryClear
-// (MaskIfNotCarry CarrySet) -> 0
-// (MaskIfNotCarry CarrySet) -> -1
+(ADDconstForCarry [c] (MOVDconst [d])) && int64(int16(c)) < 0 && (int64(int16(c)) < 0 || int64(int16(c)) + d >= 0) -> (FlagCarryClear)
+(ADDconstForCarry [c] (MOVDconst [d])) && int64(int16(c)) < 0 && int64(int16(c)) >= 0 && int64(int16(c)) + d < 0 -> (FlagCarrySet)
+
+(MaskIfNotCarry (FlagCarrySet)) -> (MOVDconst [0])
+(MaskIfNotCarry (FlagCarryClear)) -> (MOVDconst [-1])
+
+(S(RAD|RAW|RD|RW|LD|LW) x (MOVDconst [c])) -> (S(RAD|RAW|RD|RW|LD|LW)const [c] x)
(Addr ...) -> (MOVDaddr ...)
(LocalAddr {sym} base _) -> (MOVDaddr {sym} base)
@@ -662,6 +641,9 @@
(AND (MOVDconst [c]) (MOVDconst [d])) -> (MOVDconst [c&d])
(OR (MOVDconst [c]) (MOVDconst [d])) -> (MOVDconst [c|d])
(XOR (MOVDconst [c]) (MOVDconst [d])) -> (MOVDconst [c^d])
+(ORN (MOVDconst [c]) (MOVDconst [d])) -> (MOVDconst [c|^d])
+(ANDN (MOVDconst [c]) (MOVDconst [d])) -> (MOVDconst [c&^d])
+(NOR (MOVDconst [c]) (MOVDconst [d])) -> (MOVDconst [^(c|d)])
// Discover consts
(AND x (MOVDconst [c])) && isU16Bit(c) -> (ANDconst [c] x)
diff --git a/src/cmd/compile/internal/ssa/gen/PPC64Ops.go b/src/cmd/compile/internal/ssa/gen/PPC64Ops.go
index 6660b921ef..d0a22c1f20 100644
--- a/src/cmd/compile/internal/ssa/gen/PPC64Ops.go
+++ b/src/cmd/compile/internal/ssa/gen/PPC64Ops.go
@@ -205,7 +205,7 @@ func init() {
{name: "ROTLW", argLength: 2, reg: gp21, asm: "ROTLW"}, // uint32(arg0) rotate left by arg1 mod 32
{name: "LoweredAdd64Carry", argLength: 3, reg: gp32, resultNotInArgs: true}, // arg0 + arg1 + carry, returns (sum, carry)
- {name: "ADDconstForCarry", argLength: 1, reg: regInfo{inputs: []regMask{gp | sp | sb}, clobbers: tmp}, aux: "Int16", asm: "ADDC", typ: "Flags"}, // _, carry := arg0 + aux
+ {name: "ADDconstForCarry", argLength: 1, reg: regInfo{inputs: []regMask{gp | sp | sb}, clobbers: tmp}, aux: "Int16", asm: "ADDC", typ: "Flags"}, // _, carry := arg0 + auxint
{name: "MaskIfNotCarry", argLength: 1, reg: crgp, asm: "ADDME", typ: "Int64"}, // carry - 1 (if carry then 0 else -1)
{name: "SRADconst", argLength: 1, reg: gp11, asm: "SRAD", aux: "Int64"}, // arg0 >>a aux, 64 bits
@@ -588,10 +588,11 @@ func init() {
// These ops are for temporary use by rewrite rules. They
// cannot appear in the generated assembly.
- {name: "FlagEQ"}, // equal
- {name: "FlagLT"}, // signed < or unsigned <
- {name: "FlagGT"}, // signed > or unsigned >
-
+ {name: "FlagEQ"}, // equal
+ {name: "FlagLT"}, // signed < or unsigned <
+ {name: "FlagGT"}, // signed > or unsigned >
+ {name: "FlagCarrySet"}, // carry flag set
+ {name: "FlagCarryClear"}, // carry flag clear
}
blocks := []blockData{
diff --git a/src/cmd/compile/internal/ssa/gen/RISCV64.rules b/src/cmd/compile/internal/ssa/gen/RISCV64.rules
index 720724647e..9b88b56871 100644
--- a/src/cmd/compile/internal/ssa/gen/RISCV64.rules
+++ b/src/cmd/compile/internal/ssa/gen/RISCV64.rules
@@ -131,6 +131,8 @@
(Cvt32Fto64F ...) -> (FCVTDS ...)
(Cvt64Fto32F ...) -> (FCVTSD ...)
+(CvtBoolToUint8 ...) -> (Copy ...)
+
(Round32F ...) -> (Copy ...)
(Round64F ...) -> (Copy ...)
@@ -325,6 +327,14 @@
(MOVWstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
(MOVDstore [off1] {sym1} (MOVaddr [off2] {sym2} base) val mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
(MOVDstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+(MOVBstorezero [off1] {sym1} (MOVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) && is32Bit(off1+off2) ->
+ (MOVBstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+(MOVHstorezero [off1] {sym1} (MOVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) && is32Bit(off1+off2) ->
+ (MOVHstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+(MOVWstorezero [off1] {sym1} (MOVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) && is32Bit(off1+off2) ->
+ (MOVWstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+(MOVDstorezero [off1] {sym1} (MOVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) && is32Bit(off1+off2) ->
+ (MOVDstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
(MOVBUload [off1] {sym} (ADDI [off2] base) mem) && is32Bit(off1+off2) ->
(MOVBUload [off1+off2] {sym} base mem)
@@ -349,6 +359,10 @@
(MOVWstore [off1+off2] {sym} base val mem)
(MOVDstore [off1] {sym} (ADDI [off2] base) val mem) && is32Bit(off1+off2) ->
(MOVDstore [off1+off2] {sym} base val mem)
+(MOVBstorezero [off1] {sym} (ADDI [off2] ptr) mem) && is32Bit(off1+off2) -> (MOVBstorezero [off1+off2] {sym} ptr mem)
+(MOVHstorezero [off1] {sym} (ADDI [off2] ptr) mem) && is32Bit(off1+off2) -> (MOVHstorezero [off1+off2] {sym} ptr mem)
+(MOVWstorezero [off1] {sym} (ADDI [off2] ptr) mem) && is32Bit(off1+off2) -> (MOVWstorezero [off1+off2] {sym} ptr mem)
+(MOVDstorezero [off1] {sym} (ADDI [off2] ptr) mem) && is32Bit(off1+off2) -> (MOVDstorezero [off1+off2] {sym} ptr mem)
// Similarly, fold ADDI into MOVaddr to avoid confusing live variable analysis
// with OffPtr -> ADDI.
@@ -436,9 +450,6 @@
(MOVDconst [c]) && !is32Bit(c) && int32(c) < 0 -> (ADD (SLLI [32] (MOVDconst [c>>32+1])) (MOVDconst [int64(int32(c))]))
(MOVDconst [c]) && !is32Bit(c) && int32(c) >= 0 -> (ADD (SLLI [32] (MOVDconst [c>>32+0])) (MOVDconst [int64(int32(c))]))
-// Fold ADD+MOVDconst into ADDI where possible.
-(ADD (MOVDconst [off]) ptr) && is32Bit(off) -> (ADDI [off] ptr)
-
(Addr ...) -> (MOVaddr ...)
(LocalAddr {sym} base _) -> (MOVaddr {sym} base)
@@ -457,5 +468,34 @@
(ClosureCall ...) -> (CALLclosure ...)
(InterCall ...) -> (CALLinter ...)
+// Optimizations
+
+// Absorb SNEZ into branch.
+(BNE (SNEZ x) yes no) -> (BNE x yes no)
+
+// Store zero
+(MOVBstore [off] {sym} ptr (MOVBconst [0]) mem) -> (MOVBstorezero [off] {sym} ptr mem)
+(MOVHstore [off] {sym} ptr (MOVHconst [0]) mem) -> (MOVHstorezero [off] {sym} ptr mem)
+(MOVWstore [off] {sym} ptr (MOVWconst [0]) mem) -> (MOVWstorezero [off] {sym} ptr mem)
+(MOVDstore [off] {sym} ptr (MOVDconst [0]) mem) -> (MOVDstorezero [off] {sym} ptr mem)
+
+// Fold ADD+MOVDconst into ADDI where possible.
+(ADD (MOVDconst [off]) ptr) && is32Bit(off) -> (ADDI [off] ptr)
+
+// Convert subtraction of a const into ADDI with negative immediate, where possible.
+(SUB x (MOVBconst [val])) && is32Bit(-val) -> (ADDI [-val] x)
+(SUB x (MOVHconst [val])) && is32Bit(-val) -> (ADDI [-val] x)
+(SUB x (MOVWconst [val])) && is32Bit(-val) -> (ADDI [-val] x)
+(SUB x (MOVDconst [val])) && is32Bit(-val) -> (ADDI [-val] x)
+
+// Subtraction of zero.
+(SUB x (MOVBconst [0])) -> x
+(SUB x (MOVHconst [0])) -> x
+(SUB x (MOVWconst [0])) -> x
+(SUB x (MOVDconst [0])) -> x
+
+// Subtraction of zero with sign extension.
+(SUBW x (MOVWconst [0])) -> (ADDIW [0] x)
+
// remove redundant *const ops
(ADDI [0] x) -> x
diff --git a/src/cmd/compile/internal/ssa/gen/RISCV64Ops.go b/src/cmd/compile/internal/ssa/gen/RISCV64Ops.go
index 7829f9a07c..28a91d559f 100644
--- a/src/cmd/compile/internal/ssa/gen/RISCV64Ops.go
+++ b/src/cmd/compile/internal/ssa/gen/RISCV64Ops.go
@@ -106,12 +106,13 @@ func init() {
callerSave := gpMask | fpMask | regNamed["g"]
var (
- gpstore = regInfo{inputs: []regMask{gpspsbMask, gpspMask, 0}} // SB in first input so we can load from a global, but not in second to avoid using SB as a temporary register
- gp01 = regInfo{outputs: []regMask{gpMask}}
- gp11 = regInfo{inputs: []regMask{gpMask}, outputs: []regMask{gpMask}}
- gp21 = regInfo{inputs: []regMask{gpMask, gpMask}, outputs: []regMask{gpMask}}
- gpload = regInfo{inputs: []regMask{gpspsbMask, 0}, outputs: []regMask{gpMask}}
- gp11sb = regInfo{inputs: []regMask{gpspsbMask}, outputs: []regMask{gpMask}}
+ gpstore = regInfo{inputs: []regMask{gpspsbMask, gpspMask, 0}} // SB in first input so we can load from a global, but not in second to avoid using SB as a temporary register
+ gpstore0 = regInfo{inputs: []regMask{gpspsbMask}}
+ gp01 = regInfo{outputs: []regMask{gpMask}}
+ gp11 = regInfo{inputs: []regMask{gpMask}, outputs: []regMask{gpMask}}
+ gp21 = regInfo{inputs: []regMask{gpMask, gpMask}, outputs: []regMask{gpMask}}
+ gpload = regInfo{inputs: []regMask{gpspsbMask, 0}, outputs: []regMask{gpMask}}
+ gp11sb = regInfo{inputs: []regMask{gpspsbMask}, outputs: []regMask{gpMask}}
fp11 = regInfo{inputs: []regMask{fpMask}, outputs: []regMask{fpMask}}
fp21 = regInfo{inputs: []regMask{fpMask, fpMask}, outputs: []regMask{fpMask}}
@@ -171,6 +172,12 @@ func init() {
{name: "MOVWstore", argLength: 3, reg: gpstore, asm: "MOVW", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // 32 bits
{name: "MOVDstore", argLength: 3, reg: gpstore, asm: "MOV", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // 64 bits
+ // Stores: store of zero in arg0+auxint+aux; arg1=mem
+ {name: "MOVBstorezero", argLength: 2, reg: gpstore0, aux: "SymOff", asm: "MOVB", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // 8 bits
+ {name: "MOVHstorezero", argLength: 2, reg: gpstore0, aux: "SymOff", asm: "MOVH", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // 16 bits
+ {name: "MOVWstorezero", argLength: 2, reg: gpstore0, aux: "SymOff", asm: "MOVW", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // 32 bits
+ {name: "MOVDstorezero", argLength: 2, reg: gpstore0, aux: "SymOff", asm: "MOV", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // 64 bits
+
// Shift ops
{name: "SLL", argLength: 2, reg: gp21, asm: "SLL"}, // arg0 << aux1
{name: "SRA", argLength: 2, reg: gp21, asm: "SRA"}, // arg0 >> aux1, signed
diff --git a/src/cmd/compile/internal/ssa/gen/S390X.rules b/src/cmd/compile/internal/ssa/gen/S390X.rules
index 30a0249759..5cff8df3a4 100644
--- a/src/cmd/compile/internal/ssa/gen/S390X.rules
+++ b/src/cmd/compile/internal/ssa/gen/S390X.rules
@@ -227,9 +227,23 @@
(Cvt64Fto32 ...) -> (CFDBRA ...)
(Cvt64Fto64 ...) -> (CGDBRA ...)
+// Lowering float <-> uint
+(Cvt32Uto32F ...) -> (CELFBR ...)
+(Cvt32Uto64F ...) -> (CDLFBR ...)
+(Cvt64Uto32F ...) -> (CELGBR ...)
+(Cvt64Uto64F ...) -> (CDLGBR ...)
+
+(Cvt32Fto32U ...) -> (CLFEBR ...)
+(Cvt32Fto64U ...) -> (CLGEBR ...)
+(Cvt64Fto32U ...) -> (CLFDBR ...)
+(Cvt64Fto64U ...) -> (CLGDBR ...)
+
+// Lowering float32 <-> float64
(Cvt32Fto64F ...) -> (LDEBR ...)
(Cvt64Fto32F ...) -> (LEDBR ...)
+(CvtBoolToUint8 ...) -> (Copy ...)
+
(Round(32|64)F ...) -> (LoweredRound(32|64)F ...)
// Lowering shifts
diff --git a/src/cmd/compile/internal/ssa/gen/S390XOps.go b/src/cmd/compile/internal/ssa/gen/S390XOps.go
index 6517957fd4..819046d30c 100644
--- a/src/cmd/compile/internal/ssa/gen/S390XOps.go
+++ b/src/cmd/compile/internal/ssa/gen/S390XOps.go
@@ -401,8 +401,17 @@ func init() {
{name: "CDFBRA", argLength: 1, reg: gpfp, asm: "CDFBRA"}, // convert int32 to float64
{name: "CEGBRA", argLength: 1, reg: gpfp, asm: "CEGBRA"}, // convert int64 to float32
{name: "CDGBRA", argLength: 1, reg: gpfp, asm: "CDGBRA"}, // convert int64 to float64
- {name: "LEDBR", argLength: 1, reg: fp11, asm: "LEDBR"}, // convert float64 to float32
- {name: "LDEBR", argLength: 1, reg: fp11, asm: "LDEBR"}, // convert float32 to float64
+ {name: "CLFEBR", argLength: 1, reg: fpgp, asm: "CLFEBR"}, // convert float32 to uint32
+ {name: "CLFDBR", argLength: 1, reg: fpgp, asm: "CLFDBR"}, // convert float64 to uint32
+ {name: "CLGEBR", argLength: 1, reg: fpgp, asm: "CLGEBR"}, // convert float32 to uint64
+ {name: "CLGDBR", argLength: 1, reg: fpgp, asm: "CLGDBR"}, // convert float64 to uint64
+ {name: "CELFBR", argLength: 1, reg: gpfp, asm: "CELFBR"}, // convert uint32 to float32
+ {name: "CDLFBR", argLength: 1, reg: gpfp, asm: "CDLFBR"}, // convert uint32 to float64
+ {name: "CELGBR", argLength: 1, reg: gpfp, asm: "CELGBR"}, // convert uint64 to float32
+ {name: "CDLGBR", argLength: 1, reg: gpfp, asm: "CDLGBR"}, // convert uint64 to float64
+
+ {name: "LEDBR", argLength: 1, reg: fp11, asm: "LEDBR"}, // convert float64 to float32
+ {name: "LDEBR", argLength: 1, reg: fp11, asm: "LDEBR"}, // convert float32 to float64
{name: "MOVDaddr", argLength: 1, reg: addr, aux: "SymOff", rematerializeable: true, symEffect: "Read"}, // arg0 + auxint + offset encoded in aux
{name: "MOVDaddridx", argLength: 2, reg: addridx, aux: "SymOff", symEffect: "Read"}, // arg0 + arg1 + auxint + aux
diff --git a/src/cmd/compile/internal/ssa/gen/Wasm.rules b/src/cmd/compile/internal/ssa/gen/Wasm.rules
index 010adcb095..bf2b904baf 100644
--- a/src/cmd/compile/internal/ssa/gen/Wasm.rules
+++ b/src/cmd/compile/internal/ssa/gen/Wasm.rules
@@ -91,6 +91,8 @@
(Cvt32Fto64F ...) -> (F64PromoteF32 ...)
(Cvt64Fto32F ...) -> (F32DemoteF64 ...)
+(CvtBoolToUint8 ...) -> (Copy ...)
+
(Round32F ...) -> (Copy ...)
(Round64F ...) -> (Copy ...)
@@ -355,7 +357,7 @@
(I64Or (I64Const [x]) (I64Const [y])) -> (I64Const [x | y])
(I64Xor (I64Const [x]) (I64Const [y])) -> (I64Const [x ^ y])
(F64Add (F64Const [x]) (F64Const [y])) -> (F64Const [auxFrom64F(auxTo64F(x) + auxTo64F(y))])
-(F64Mul (F64Const [x]) (F64Const [y])) -> (F64Const [auxFrom64F(auxTo64F(x) * auxTo64F(y))])
+(F64Mul (F64Const [x]) (F64Const [y])) && !math.IsNaN(auxTo64F(x) * auxTo64F(y)) -> (F64Const [auxFrom64F(auxTo64F(x) * auxTo64F(y))])
(I64Eq (I64Const [x]) (I64Const [y])) && x == y -> (I64Const [1])
(I64Eq (I64Const [x]) (I64Const [y])) && x != y -> (I64Const [0])
(I64Ne (I64Const [x]) (I64Const [y])) && x == y -> (I64Const [0])
@@ -365,15 +367,16 @@
(I64ShrU (I64Const [x]) (I64Const [y])) -> (I64Const [int64(uint64(x) >> uint64(y))])
(I64ShrS (I64Const [x]) (I64Const [y])) -> (I64Const [x >> uint64(y)])
-(I64Add (I64Const [x]) y) -> (I64Add y (I64Const [x]))
-(I64Mul (I64Const [x]) y) -> (I64Mul y (I64Const [x]))
-(I64And (I64Const [x]) y) -> (I64And y (I64Const [x]))
-(I64Or (I64Const [x]) y) -> (I64Or y (I64Const [x]))
-(I64Xor (I64Const [x]) y) -> (I64Xor y (I64Const [x]))
-(F64Add (F64Const [x]) y) -> (F64Add y (F64Const [x]))
-(F64Mul (F64Const [x]) y) -> (F64Mul y (F64Const [x]))
-(I64Eq (I64Const [x]) y) -> (I64Eq y (I64Const [x]))
-(I64Ne (I64Const [x]) y) -> (I64Ne y (I64Const [x]))
+// TODO: declare these operations as commutative and get rid of these rules?
+(I64Add (I64Const [x]) y) && y.Op != OpWasmI64Const -> (I64Add y (I64Const [x]))
+(I64Mul (I64Const [x]) y) && y.Op != OpWasmI64Const -> (I64Mul y (I64Const [x]))
+(I64And (I64Const [x]) y) && y.Op != OpWasmI64Const -> (I64And y (I64Const [x]))
+(I64Or (I64Const [x]) y) && y.Op != OpWasmI64Const -> (I64Or y (I64Const [x]))
+(I64Xor (I64Const [x]) y) && y.Op != OpWasmI64Const -> (I64Xor y (I64Const [x]))
+(F64Add (F64Const [x]) y) && y.Op != OpWasmF64Const -> (F64Add y (F64Const [x]))
+(F64Mul (F64Const [x]) y) && y.Op != OpWasmF64Const -> (F64Mul y (F64Const [x]))
+(I64Eq (I64Const [x]) y) && y.Op != OpWasmI64Const -> (I64Eq y (I64Const [x]))
+(I64Ne (I64Const [x]) y) && y.Op != OpWasmI64Const -> (I64Ne y (I64Const [x]))
(I64Eq x (I64Const [0])) -> (I64Eqz x)
(I64Ne x (I64Const [0])) -> (I64Eqz (I64Eqz x))
diff --git a/src/cmd/compile/internal/ssa/gen/generic.rules b/src/cmd/compile/internal/ssa/gen/generic.rules
index f4d487176b..8ec22d86e7 100644
--- a/src/cmd/compile/internal/ssa/gen/generic.rules
+++ b/src/cmd/compile/internal/ssa/gen/generic.rules
@@ -56,6 +56,7 @@
(Cvt64Fto64 (Const64F [c])) -> (Const64 [int64(auxTo64F(c))])
(Round32F x:(Const32F)) -> x
(Round64F x:(Const64F)) -> x
+(CvtBoolToUint8 (ConstBool [c])) -> (Const8 [c])
(Trunc16to8 (ZeroExt8to16 x)) -> x
(Trunc32to8 (ZeroExt8to32 x)) -> x
@@ -118,8 +119,8 @@
(Mul16 (Const16 [c]) (Const16 [d])) -> (Const16 [int64(int16(c*d))])
(Mul32 (Const32 [c]) (Const32 [d])) -> (Const32 [int64(int32(c*d))])
(Mul64 (Const64 [c]) (Const64 [d])) -> (Const64 [c*d])
-(Mul32F (Const32F [c]) (Const32F [d])) -> (Const32F [auxFrom32F(auxTo32F(c) * auxTo32F(d))])
-(Mul64F (Const64F [c]) (Const64F [d])) -> (Const64F [auxFrom64F(auxTo64F(c) * auxTo64F(d))])
+(Mul32F (Const32F [c]) (Const32F [d])) && !math.IsNaN(float64(auxTo32F(c) * auxTo32F(d))) -> (Const32F [auxFrom32F(auxTo32F(c) * auxTo32F(d))])
+(Mul64F (Const64F [c]) (Const64F [d])) && !math.IsNaN(auxTo64F(c) * auxTo64F(d)) -> (Const64F [auxFrom64F(auxTo64F(c) * auxTo64F(d))])
(And8 (Const8 [c]) (Const8 [d])) -> (Const8 [int64(int8(c&d))])
(And16 (Const16 [c]) (Const16 [d])) -> (Const16 [int64(int16(c&d))])
@@ -144,8 +145,8 @@
(Div16u (Const16 [c]) (Const16 [d])) && d != 0 -> (Const16 [int64(int16(uint16(c)/uint16(d)))])
(Div32u (Const32 [c]) (Const32 [d])) && d != 0 -> (Const32 [int64(int32(uint32(c)/uint32(d)))])
(Div64u (Const64 [c]) (Const64 [d])) && d != 0 -> (Const64 [int64(uint64(c)/uint64(d))])
-(Div32F (Const32F [c]) (Const32F [d])) -> (Const32F [auxFrom32F(auxTo32F(c) / auxTo32F(d))])
-(Div64F (Const64F [c]) (Const64F [d])) -> (Const64F [auxFrom64F(auxTo64F(c) / auxTo64F(d))])
+(Div32F (Const32F [c]) (Const32F [d])) && !math.IsNaN(float64(auxTo32F(c) / auxTo32F(d))) -> (Const32F [auxFrom32F(auxTo32F(c) / auxTo32F(d))])
+(Div64F (Const64F [c]) (Const64F [d])) && !math.IsNaN(auxTo64F(c) / auxTo64F(d)) -> (Const64F [auxFrom64F(auxTo64F(c) / auxTo64F(d))])
(Select0 (Div128u (Const64 [0]) lo y)) -> (Div64u lo y)
(Select1 (Div128u (Const64 [0]) lo y)) -> (Mod64u lo y)
@@ -253,6 +254,54 @@
(Neq16 (Const16 [c]) (Add16 (Const16 [d]) x)) -> (Neq16 (Const16 [int64(int16(c-d))]) x)
(Neq8 (Const8 [c]) (Add8 (Const8 [d]) x)) -> (Neq8 (Const8 [int64(int8(c-d))]) x)
+// signed integer range: ( c <= x && x (<|<=) d ) -> ( unsigned(x-c) (<|<=) unsigned(d-c) )
+(AndB (Leq64 (Const64 [c]) x) ((Less|Leq)64 x (Const64 [d]))) && d >= c -> ((Less|Leq)64U (Sub64 x (Const64 [c])) (Const64 [d-c]))
+(AndB (Leq32 (Const32 [c]) x) ((Less|Leq)32 x (Const32 [d]))) && d >= c -> ((Less|Leq)32U (Sub32 x (Const32 [c])) (Const32 [d-c]))
+(AndB (Leq16 (Const16 [c]) x) ((Less|Leq)16 x (Const16 [d]))) && d >= c -> ((Less|Leq)16U (Sub16 x (Const16 [c])) (Const16 [d-c]))
+(AndB (Leq8 (Const8 [c]) x) ((Less|Leq)8 x (Const8 [d]))) && d >= c -> ((Less|Leq)8U (Sub8 x (Const8 [c])) (Const8 [d-c]))
+
+// signed integer range: ( c < x && x (<|<=) d ) -> ( unsigned(x-(c+1)) (<|<=) unsigned(d-(c+1)) )
+(AndB (Less64 (Const64 [c]) x) ((Less|Leq)64 x (Const64 [d]))) && d >= c+1 && int64(c+1) > int64(c) -> ((Less|Leq)64U (Sub64 x (Const64 [c+1])) (Const64 [d-c-1]))
+(AndB (Less32 (Const32 [c]) x) ((Less|Leq)32 x (Const32 [d]))) && d >= c+1 && int32(c+1) > int32(c) -> ((Less|Leq)32U (Sub32 x (Const32 [c+1])) (Const32 [d-c-1]))
+(AndB (Less16 (Const16 [c]) x) ((Less|Leq)16 x (Const16 [d]))) && d >= c+1 && int16(c+1) > int16(c) -> ((Less|Leq)16U (Sub16 x (Const16 [c+1])) (Const16 [d-c-1]))
+(AndB (Less8 (Const8 [c]) x) ((Less|Leq)8 x (Const8 [d]))) && d >= c+1 && int8(c+1) > int8(c) -> ((Less|Leq)8U (Sub8 x (Const8 [c+1])) (Const8 [d-c-1]))
+
+// unsigned integer range: ( c <= x && x (<|<=) d ) -> ( x-c (<|<=) d-c )
+(AndB (Leq64U (Const64 [c]) x) ((Less|Leq)64U x (Const64 [d]))) && uint64(d) >= uint64(c) -> ((Less|Leq)64U (Sub64 x (Const64 [c])) (Const64 [d-c]))
+(AndB (Leq32U (Const32 [c]) x) ((Less|Leq)32U x (Const32 [d]))) && uint32(d) >= uint32(c) -> ((Less|Leq)32U (Sub32 x (Const32 [c])) (Const32 [int64(int32(d-c))]))
+(AndB (Leq16U (Const16 [c]) x) ((Less|Leq)16U x (Const16 [d]))) && uint16(d) >= uint16(c) -> ((Less|Leq)16U (Sub16 x (Const16 [c])) (Const16 [int64(int16(d-c))]))
+(AndB (Leq8U (Const8 [c]) x) ((Less|Leq)8U x (Const8 [d]))) && uint8(d) >= uint8(c) -> ((Less|Leq)8U (Sub8 x (Const8 [c])) (Const8 [int64(int8(d-c))]))
+
+// unsigned integer range: ( c < x && x (<|<=) d ) -> ( x-(c+1) (<|<=) d-(c+1) )
+(AndB (Less64U (Const64 [c]) x) ((Less|Leq)64U x (Const64 [d]))) && uint64(d) >= uint64(c+1) && uint64(c+1) > uint64(c) -> ((Less|Leq)64U (Sub64 x (Const64 [c+1])) (Const64 [d-c-1]))
+(AndB (Less32U (Const32 [c]) x) ((Less|Leq)32U x (Const32 [d]))) && uint32(d) >= uint32(c+1) && uint32(c+1) > uint32(c) -> ((Less|Leq)32U (Sub32 x (Const32 [int64(int32(c+1))])) (Const32 [int64(int32(d-c-1))]))
+(AndB (Less16U (Const16 [c]) x) ((Less|Leq)16U x (Const16 [d]))) && uint16(d) >= uint16(c+1) && uint16(c+1) > uint16(c) -> ((Less|Leq)16U (Sub16 x (Const16 [int64(int16(c+1))])) (Const16 [int64(int16(d-c-1))]))
+(AndB (Less8U (Const8 [c]) x) ((Less|Leq)8U x (Const8 [d]))) && uint8(d) >= uint8(c+1) && uint8(c+1) > uint8(c) -> ((Less|Leq)8U (Sub8 x (Const8 [int64(int8(c+1))])) (Const8 [int64(int8(d-c-1))]))
+
+// signed integer range: ( c (<|<=) x || x < d ) -> ( unsigned(c-d) (<|<=) unsigned(x-d) )
+(OrB ((Less|Leq)64 (Const64 [c]) x) (Less64 x (Const64 [d]))) && c >= d -> ((Less|Leq)64U (Const64 [c-d]) (Sub64 x (Const64 [d])))
+(OrB ((Less|Leq)32 (Const32 [c]) x) (Less32 x (Const32 [d]))) && c >= d -> ((Less|Leq)32U (Const32 [c-d]) (Sub32 x (Const32 [d])))
+(OrB ((Less|Leq)16 (Const16 [c]) x) (Less16 x (Const16 [d]))) && c >= d -> ((Less|Leq)16U (Const16 [c-d]) (Sub16 x (Const16 [d])))
+(OrB ((Less|Leq)8 (Const8 [c]) x) (Less8 x (Const8 [d]))) && c >= d -> ((Less|Leq)8U (Const8 [c-d]) (Sub8 x (Const8 [d])))
+
+// signed integer range: ( c (<|<=) x || x <= d ) -> ( unsigned(c-(d+1)) (<|<=) unsigned(x-(d+1)) )
+(OrB ((Less|Leq)64 (Const64 [c]) x) (Leq64 x (Const64 [d]))) && c >= d+1 && int64(d+1) > int64(d) -> ((Less|Leq)64U (Const64 [c-d-1]) (Sub64 x (Const64 [d+1])))
+(OrB ((Less|Leq)32 (Const32 [c]) x) (Leq32 x (Const32 [d]))) && c >= d+1 && int32(d+1) > int32(d) -> ((Less|Leq)32U (Const32 [c-d-1]) (Sub32 x (Const32 [d+1])))
+(OrB ((Less|Leq)16 (Const16 [c]) x) (Leq16 x (Const16 [d]))) && c >= d+1 && int16(d+1) > int16(d) -> ((Less|Leq)16U (Const16 [c-d-1]) (Sub16 x (Const16 [d+1])))
+(OrB ((Less|Leq)8 (Const8 [c]) x) (Leq8 x (Const8 [d]))) && c >= d+1 && int8(d+1) > int8(d) -> ((Less|Leq)8U (Const8 [c-d-1]) (Sub8 x (Const8 [d+1])))
+
+// unsigned integer range: ( c (<|<=) x || x < d ) -> ( c-d (<|<=) x-d )
+(OrB ((Less|Leq)64U (Const64 [c]) x) (Less64U x (Const64 [d]))) && uint64(c) >= uint64(d) -> ((Less|Leq)64U (Const64 [c-d]) (Sub64 x (Const64 [d])))
+(OrB ((Less|Leq)32U (Const32 [c]) x) (Less32U x (Const32 [d]))) && uint32(c) >= uint32(d) -> ((Less|Leq)32U (Const32 [int64(int32(c-d))]) (Sub32 x (Const32 [d])))
+(OrB ((Less|Leq)16U (Const16 [c]) x) (Less16U x (Const16 [d]))) && uint16(c) >= uint16(d) -> ((Less|Leq)16U (Const16 [int64(int16(c-d))]) (Sub16 x (Const16 [d])))
+(OrB ((Less|Leq)8U (Const8 [c]) x) (Less8U x (Const8 [d]))) && uint8(c) >= uint8(d) -> ((Less|Leq)8U (Const8 [int64( int8(c-d))]) (Sub8 x (Const8 [d])))
+
+// unsigned integer range: ( c (<|<=) x || x <= d ) -> ( c-(d+1) (<|<=) x-(d+1) )
+(OrB ((Less|Leq)64U (Const64 [c]) x) (Leq64U x (Const64 [d]))) && uint64(c) >= uint64(d+1) && uint64(d+1) > uint64(d) -> ((Less|Leq)64U (Const64 [c-d-1]) (Sub64 x (Const64 [d+1])))
+(OrB ((Less|Leq)32U (Const32 [c]) x) (Leq32U x (Const32 [d]))) && uint32(c) >= uint32(d+1) && uint32(d+1) > uint32(d) -> ((Less|Leq)32U (Const32 [int64(int32(c-d-1))]) (Sub32 x (Const32 [int64(int32(d+1))])))
+(OrB ((Less|Leq)16U (Const16 [c]) x) (Leq16U x (Const16 [d]))) && uint16(c) >= uint16(d+1) && uint16(d+1) > uint16(d) -> ((Less|Leq)16U (Const16 [int64(int16(c-d-1))]) (Sub16 x (Const16 [int64(int16(d+1))])))
+(OrB ((Less|Leq)8U (Const8 [c]) x) (Leq8U x (Const8 [d]))) && uint8(c) >= uint8(d+1) && uint8(d+1) > uint8(d) -> ((Less|Leq)8U (Const8 [int64( int8(c-d-1))]) (Sub8 x (Const8 [int64( int8(d+1))])))
+
// Canonicalize x-const to x+(-const)
(Sub64 x (Const64 [c])) && x.Op != OpConst64 -> (Add64 (Const64 [-c]) x)
(Sub32 x (Const32 [c])) && x.Op != OpConst32 -> (Add32 (Const32 [int64(int32(-c))]) x)
@@ -574,8 +623,8 @@
-> x
// Pass constants through math.Float{32,64}bits and math.Float{32,64}frombits
-(Load p1 (Store {t2} p2 (Const64 [x]) _)) && isSamePtr(p1,p2) && sizeof(t2) == 8 && is64BitFloat(t1) -> (Const64F [x])
-(Load p1 (Store {t2} p2 (Const32 [x]) _)) && isSamePtr(p1,p2) && sizeof(t2) == 4 && is32BitFloat(t1) -> (Const32F [auxFrom32F(math.Float32frombits(uint32(x)))])
+ (Load p1 (Store {t2} p2 (Const64 [x]) _)) && isSamePtr(p1,p2) && sizeof(t2) == 8 && is64BitFloat(t1) && !math.IsNaN(math.Float64frombits(uint64(x))) -> (Const64F [x])
+ (Load p1 (Store {t2} p2 (Const32 [x]) _)) && isSamePtr(p1,p2) && sizeof(t2) == 4 && is32BitFloat(t1) && !math.IsNaN(float64(math.Float32frombits(uint32(x)))) -> (Const32F [auxFrom32F(math.Float32frombits(uint32(x)))])
(Load p1 (Store {t2} p2 (Const64F [x]) _)) && isSamePtr(p1,p2) && sizeof(t2) == 8 && is64BitInt(t1) -> (Const64 [x])
(Load p1 (Store {t2} p2 (Const32F [x]) _)) && isSamePtr(p1,p2) && sizeof(t2) == 4 && is32BitInt(t1) -> (Const32 [int64(int32(math.Float32bits(auxTo32F(x))))])
@@ -1844,7 +1893,7 @@
(Div32F x (Const32F [c])) && reciprocalExact32(auxTo32F(c)) -> (Mul32F x (Const32F [auxFrom32F(1/auxTo32F(c))]))
(Div64F x (Const64F [c])) && reciprocalExact64(auxTo64F(c)) -> (Mul64F x (Const64F [auxFrom64F(1/auxTo64F(c))]))
-(Sqrt (Const64F [c])) -> (Const64F [auxFrom64F(math.Sqrt(auxTo64F(c)))])
+(Sqrt (Const64F [c])) && !math.IsNaN(math.Sqrt(auxTo64F(c))) -> (Const64F [auxFrom64F(math.Sqrt(auxTo64F(c)))])
// recognize runtime.newobject and don't Zero/Nilcheck it
(Zero (Load (OffPtr [c] (SP)) mem) mem)
diff --git a/src/cmd/compile/internal/ssa/gen/genericOps.go b/src/cmd/compile/internal/ssa/gen/genericOps.go
index 9f17299610..b7e91a1f20 100644
--- a/src/cmd/compile/internal/ssa/gen/genericOps.go
+++ b/src/cmd/compile/internal/ssa/gen/genericOps.go
@@ -323,7 +323,12 @@ var genericOps = []opData{
{name: "Const32", aux: "Int32"}, // auxint is sign-extended 32 bits
// Note: ConstX are sign-extended even when the type of the value is unsigned.
// For instance, uint8(0xaa) is stored as auxint=0xffffffffffffffaa.
- {name: "Const64", aux: "Int64"}, // value is auxint
+ {name: "Const64", aux: "Int64"}, // value is auxint
+ // Note: for both Const32F and Const64F, we disallow encoding NaNs.
+ // Signaling NaNs are tricky because if you do anything with them, they become quiet.
+ // Particularly, converting a 32 bit sNaN to 64 bit and back converts it to a qNaN.
+ // See issue 36399 and 36400.
+ // Encodings of +inf, -inf, and -0 are fine.
{name: "Const32F", aux: "Float32"}, // value is math.Float64frombits(uint64(auxint)) and is exactly representable as float 32
{name: "Const64F", aux: "Float64"}, // value is math.Float64frombits(uint64(auxint))
{name: "ConstInterface"}, // nil interface
@@ -418,6 +423,7 @@ var genericOps = []opData{
{name: "Cvt64Fto64", argLength: 1},
{name: "Cvt32Fto64F", argLength: 1},
{name: "Cvt64Fto32F", argLength: 1},
+ {name: "CvtBoolToUint8", argLength: 1},
// Force rounding to precision of type.
{name: "Round32F", argLength: 1},
diff --git a/src/cmd/compile/internal/ssa/gen/rulegen.go b/src/cmd/compile/internal/ssa/gen/rulegen.go
index 2a10f2fa25..8e88d0b6a3 100644
--- a/src/cmd/compile/internal/ssa/gen/rulegen.go
+++ b/src/cmd/compile/internal/ssa/gen/rulegen.go
@@ -891,7 +891,7 @@ func genBlockRewrite(rule Rule, arch arch, data blockData) *RuleRewrite {
}
blockName, _ := getBlockInfo(outop, arch)
- rr.add(stmtf("b.Reset(%s)", blockName))
+ var genControls [2]string
for i, control := range t[:outdata.controls] {
// Select a source position for any new control values.
// TODO: does it always make sense to use the source position
@@ -904,9 +904,19 @@ func genBlockRewrite(rule Rule, arch arch, data blockData) *RuleRewrite {
}
// Generate a new control value (or copy an existing value).
- v := genResult0(rr, arch, control, false, false, newpos)
- rr.add(stmtf("b.AddControl(%s)", v))
+ genControls[i] = genResult0(rr, arch, control, false, false, newpos)
}
+ switch outdata.controls {
+ case 0:
+ rr.add(stmtf("b.Reset(%s)", blockName))
+ case 1:
+ rr.add(stmtf("b.resetWithControl(%s, %s)", blockName, genControls[0]))
+ case 2:
+ rr.add(stmtf("b.resetWithControl2(%s, %s, %s)", blockName, genControls[0], genControls[1]))
+ default:
+ log.Fatalf("too many controls: %d", outdata.controls)
+ }
+
if auxint != "" {
rr.add(stmtf("b.AuxInt = %s", auxint))
}
@@ -991,16 +1001,21 @@ func genMatch0(rr *RuleRewrite, arch arch, match, v string, cnt map[string]int,
}
}
- // Access last argument first to minimize bounds checks.
- if n := len(args); n > 1 && !pregenTop {
- a := args[n-1]
- if a != "_" && !rr.declared(a) && token.IsIdentifier(a) && !(commutative && len(args) == 2) {
- rr.add(declf(a, "%s.Args[%d]", v, n-1))
-
- // delete the last argument so it is not reprocessed
- args = args[:n-1]
- } else {
- rr.add(stmtf("_ = %s.Args[%d]", v, n-1))
+ if !pregenTop {
+ // Access last argument first to minimize bounds checks.
+ for n := len(args) - 1; n > 0; n-- {
+ a := args[n]
+ if a == "_" {
+ continue
+ }
+ if !rr.declared(a) && token.IsIdentifier(a) && !(commutative && len(args) == 2) {
+ rr.add(declf(a, "%s.Args[%d]", v, n))
+ // delete the last argument so it is not reprocessed
+ args = args[:n]
+ } else {
+ rr.add(stmtf("_ = %s.Args[%d]", v, n))
+ }
+ break
}
}
if commutative && !pregenTop {
@@ -1093,9 +1108,7 @@ func genResult0(rr *RuleRewrite, arch arch, result string, top, move bool, pos s
// It in not safe in general to move a variable between blocks
// (and particularly not a phi node).
// Introduce a copy.
- rr.add(stmtf("v.reset(OpCopy)"))
- rr.add(stmtf("v.Type = %s.Type", result))
- rr.add(stmtf("v.AddArg(%s)", result))
+ rr.add(stmtf("v.copyOf(%s)", result))
}
return result
}
@@ -1123,8 +1136,7 @@ func genResult0(rr *RuleRewrite, arch arch, result string, top, move bool, pos s
rr.add(declf(v, "b.NewValue0(%s, Op%s%s, %s)", pos, oparch, op.name, typ))
if move && top {
// Rewrite original into a copy
- rr.add(stmtf("v.reset(OpCopy)"))
- rr.add(stmtf("v.AddArg(%s)", v))
+ rr.add(stmtf("v.copyOf(%s)", v))
}
}
@@ -1134,11 +1146,21 @@ func genResult0(rr *RuleRewrite, arch arch, result string, top, move bool, pos s
if aux != "" {
rr.add(stmtf("%s.Aux = %s", v, aux))
}
- for _, arg := range args {
+ all := new(strings.Builder)
+ for i, arg := range args {
x := genResult0(rr, arch, arg, false, move, pos)
- rr.add(stmtf("%s.AddArg(%s)", v, x))
+ if i > 0 {
+ all.WriteString(", ")
+ }
+ all.WriteString(x)
+ }
+ switch len(args) {
+ case 0:
+ case 1:
+ rr.add(stmtf("%s.AddArg(%s)", v, all.String()))
+ default:
+ rr.add(stmtf("%s.AddArg%d(%s)", v, len(args), all.String()))
}
-
return v
}
diff --git a/src/cmd/compile/internal/ssa/html.go b/src/cmd/compile/internal/ssa/html.go
index 1e76a673ef..c384817d0c 100644
--- a/src/cmd/compile/internal/ssa/html.go
+++ b/src/cmd/compile/internal/ssa/html.go
@@ -93,7 +93,7 @@ td > h2 {
td.collapsed {
font-size: 12px;
width: 12px;
- border: 0px;
+ border: 1px solid white;
padding: 0;
cursor: pointer;
background: #fafafa;
@@ -247,18 +247,61 @@ svg {
outline: 1px solid #eee;
}
-.highlight-aquamarine { background-color: aquamarine; }
-.highlight-coral { background-color: coral; }
-.highlight-lightpink { background-color: lightpink; }
-.highlight-lightsteelblue { background-color: lightsteelblue; }
-.highlight-palegreen { background-color: palegreen; }
-.highlight-skyblue { background-color: skyblue; }
-.highlight-lightgray { background-color: lightgray; }
-.highlight-yellow { background-color: yellow; }
-.highlight-lime { background-color: lime; }
-.highlight-khaki { background-color: khaki; }
-.highlight-aqua { background-color: aqua; }
-.highlight-salmon { background-color: salmon; }
+body.darkmode {
+ background-color: rgb(21, 21, 21);
+ color: rgb(230, 255, 255);
+ opacity: 100%;
+}
+
+td.darkmode {
+ background-color: rgb(21, 21, 21);
+ border: 1px solid gray;
+}
+
+body.darkmode table, th {
+ border: 1px solid gray;
+}
+
+.highlight-aquamarine { background-color: aquamarine; color: black; }
+.highlight-coral { background-color: coral; color: black; }
+.highlight-lightpink { background-color: lightpink; color: black; }
+.highlight-lightsteelblue { background-color: lightsteelblue; color: black; }
+.highlight-palegreen { background-color: palegreen; color: black; }
+.highlight-skyblue { background-color: skyblue; color: black; }
+.highlight-lightgray { background-color: lightgray; color: black; }
+.highlight-yellow { background-color: yellow; color: black; }
+.highlight-lime { background-color: lime; color: black; }
+.highlight-khaki { background-color: khaki; color: black; }
+.highlight-aqua { background-color: aqua; color: black; }
+.highlight-salmon { background-color: salmon; color: black; }
+
+/* Ensure all dead values/blocks continue to have gray font color in dark mode with highlights */
+.dead-value span.highlight-aquamarine,
+.dead-block.highlight-aquamarine,
+.dead-value span.highlight-coral,
+.dead-block.highlight-coral,
+.dead-value span.highlight-lightpink,
+.dead-block.highlight-lightpink,
+.dead-value span.highlight-lightsteelblue,
+.dead-block.highlight-lightsteelblue,
+.dead-value span.highlight-palegreen,
+.dead-block.highlight-palegreen,
+.dead-value span.highlight-skyblue,
+.dead-block.highlight-skyblue,
+.dead-value span.highlight-lightgray,
+.dead-block.highlight-lightgray,
+.dead-value span.highlight-yellow,
+.dead-block.highlight-yellow,
+.dead-value span.highlight-lime,
+.dead-block.highlight-lime,
+.dead-value span.highlight-khaki,
+.dead-block.highlight-khaki,
+.dead-value span.highlight-aqua,
+.dead-block.highlight-aqua,
+.dead-value span.highlight-salmon,
+.dead-block.highlight-salmon {
+ color: gray;
+}
.outline-blue { outline: blue solid 2px; }
.outline-red { outline: red solid 2px; }
@@ -284,6 +327,10 @@ ellipse.outline-teal { stroke-width: 2px; stroke: teal; }
ellipse.outline-maroon { stroke-width: 2px; stroke: maroon; }
ellipse.outline-black { stroke-width: 2px; stroke: black; }
+/* Capture alternative for outline-black and ellipse.outline-black when in dark mode */
+body.darkmode .outline-black { outline: gray solid 2px; }
+body.darkmode ellipse.outline-black { outline: gray solid 2px; }
+
+}
+
+function toggleDarkMode() {
+ document.body.classList.toggle('darkmode');
+
+ const collapsedEls = document.getElementsByClassName('collapsed');
+ const len = collapsedEls.length;
+
+ for (let i = 0; i < len; i++) {
+ collapsedEls[i].classList.toggle('darkmode');
+ }
+}
+
+
`)
w.WriteString("")
@@ -616,6 +681,8 @@ Edge with a dot means that this edge follows the order in which blocks were laid
+
+
`)
w.WriteString("")
w.WriteString("")
diff --git a/src/cmd/compile/internal/ssa/nilcheck_test.go b/src/cmd/compile/internal/ssa/nilcheck_test.go
index f728e8ee25..16d94614d8 100644
--- a/src/cmd/compile/internal/ssa/nilcheck_test.go
+++ b/src/cmd/compile/internal/ssa/nilcheck_test.go
@@ -87,7 +87,7 @@ func TestNilcheckSimple(t *testing.T) {
nilcheckelim(fun.f)
// clean up the removed nil check
- fusePlain(fun.f)
+ fuse(fun.f, fuseTypePlain)
deadcode(fun.f)
CheckFunc(fun.f)
@@ -124,7 +124,7 @@ func TestNilcheckDomOrder(t *testing.T) {
nilcheckelim(fun.f)
// clean up the removed nil check
- fusePlain(fun.f)
+ fuse(fun.f, fuseTypePlain)
deadcode(fun.f)
CheckFunc(fun.f)
@@ -157,7 +157,7 @@ func TestNilcheckAddr(t *testing.T) {
nilcheckelim(fun.f)
// clean up the removed nil check
- fusePlain(fun.f)
+ fuse(fun.f, fuseTypePlain)
deadcode(fun.f)
CheckFunc(fun.f)
@@ -191,7 +191,7 @@ func TestNilcheckAddPtr(t *testing.T) {
nilcheckelim(fun.f)
// clean up the removed nil check
- fusePlain(fun.f)
+ fuse(fun.f, fuseTypePlain)
deadcode(fun.f)
CheckFunc(fun.f)
@@ -235,7 +235,7 @@ func TestNilcheckPhi(t *testing.T) {
nilcheckelim(fun.f)
// clean up the removed nil check
- fusePlain(fun.f)
+ fuse(fun.f, fuseTypePlain)
deadcode(fun.f)
CheckFunc(fun.f)
@@ -276,7 +276,7 @@ func TestNilcheckKeepRemove(t *testing.T) {
nilcheckelim(fun.f)
// clean up the removed nil check
- fusePlain(fun.f)
+ fuse(fun.f, fuseTypePlain)
deadcode(fun.f)
CheckFunc(fun.f)
@@ -323,7 +323,7 @@ func TestNilcheckInFalseBranch(t *testing.T) {
nilcheckelim(fun.f)
// clean up the removed nil check
- fusePlain(fun.f)
+ fuse(fun.f, fuseTypePlain)
deadcode(fun.f)
CheckFunc(fun.f)
@@ -374,7 +374,7 @@ func TestNilcheckUser(t *testing.T) {
nilcheckelim(fun.f)
// clean up the removed nil check
- fusePlain(fun.f)
+ fuse(fun.f, fuseTypePlain)
deadcode(fun.f)
CheckFunc(fun.f)
@@ -418,7 +418,7 @@ func TestNilcheckBug(t *testing.T) {
nilcheckelim(fun.f)
// clean up the removed nil check
- fusePlain(fun.f)
+ fuse(fun.f, fuseTypePlain)
deadcode(fun.f)
CheckFunc(fun.f)
diff --git a/src/cmd/compile/internal/ssa/numberlines.go b/src/cmd/compile/internal/ssa/numberlines.go
index 3d77fe5bb4..f4e62b88c4 100644
--- a/src/cmd/compile/internal/ssa/numberlines.go
+++ b/src/cmd/compile/internal/ssa/numberlines.go
@@ -66,12 +66,9 @@ func nextGoodStatementIndex(v *Value, i int, b *Block) int {
return i
}
-// notStmtBoundary indicates which value opcodes can never be a statement
-// boundary because they don't correspond to a user's understanding of a
-// statement boundary. Called from *Value.reset(), and *Func.newValue(),
-// located here to keep all the statement boundary heuristics in one place.
-// Note: *Value.reset() filters out OpCopy because of how that is used in
-// rewrite.
+// notStmtBoundary reports whether a value with opcode op can never be a statement
+// boundary. Such values don't correspond to a user's understanding of a
+// statement boundary.
func notStmtBoundary(op Op) bool {
switch op {
case OpCopy, OpPhi, OpVarKill, OpVarDef, OpVarLive, OpUnknown, OpFwdRef, OpArg:
diff --git a/src/cmd/compile/internal/ssa/opGen.go b/src/cmd/compile/internal/ssa/opGen.go
index 1111316d9b..9da7376a8a 100644
--- a/src/cmd/compile/internal/ssa/opGen.go
+++ b/src/cmd/compile/internal/ssa/opGen.go
@@ -1881,6 +1881,8 @@ const (
OpPPC64FlagEQ
OpPPC64FlagLT
OpPPC64FlagGT
+ OpPPC64FlagCarrySet
+ OpPPC64FlagCarryClear
OpRISCV64ADD
OpRISCV64ADDI
@@ -1915,6 +1917,10 @@ const (
OpRISCV64MOVHstore
OpRISCV64MOVWstore
OpRISCV64MOVDstore
+ OpRISCV64MOVBstorezero
+ OpRISCV64MOVHstorezero
+ OpRISCV64MOVWstorezero
+ OpRISCV64MOVDstorezero
OpRISCV64SLL
OpRISCV64SRA
OpRISCV64SRL
@@ -2113,6 +2119,14 @@ const (
OpS390XCDFBRA
OpS390XCEGBRA
OpS390XCDGBRA
+ OpS390XCLFEBR
+ OpS390XCLFDBR
+ OpS390XCLGEBR
+ OpS390XCLGDBR
+ OpS390XCELFBR
+ OpS390XCDLFBR
+ OpS390XCELGBR
+ OpS390XCDLGBR
OpS390XLEDBR
OpS390XLDEBR
OpS390XMOVDaddr
@@ -2584,6 +2598,7 @@ const (
OpCvt64Fto64
OpCvt32Fto64F
OpCvt64Fto32F
+ OpCvtBoolToUint8
OpRound32F
OpRound64F
OpIsNonNil
@@ -24986,6 +25001,16 @@ var opcodeTable = [...]opInfo{
argLen: 0,
reg: regInfo{},
},
+ {
+ name: "FlagCarrySet",
+ argLen: 0,
+ reg: regInfo{},
+ },
+ {
+ name: "FlagCarryClear",
+ argLen: 0,
+ reg: regInfo{},
+ },
{
name: "ADD",
@@ -25462,6 +25487,58 @@ var opcodeTable = [...]opInfo{
},
},
},
+ {
+ name: "MOVBstorezero",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: riscv.AMOVB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372037928517622}, // SP X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 SB
+ },
+ },
+ },
+ {
+ name: "MOVHstorezero",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: riscv.AMOVH,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372037928517622}, // SP X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 SB
+ },
+ },
+ },
+ {
+ name: "MOVWstorezero",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: riscv.AMOVW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372037928517622}, // SP X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 SB
+ },
+ },
+ },
+ {
+ name: "MOVDstorezero",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: riscv.AMOV,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372037928517622}, // SP X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 SB
+ },
+ },
+ },
{
name: "SLL",
argLen: 2,
@@ -28313,6 +28390,110 @@ var opcodeTable = [...]opInfo{
},
},
},
+ {
+ name: "CLFEBR",
+ argLen: 1,
+ asm: s390x.ACLFEBR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "CLFDBR",
+ argLen: 1,
+ asm: s390x.ACLFDBR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "CLGEBR",
+ argLen: 1,
+ asm: s390x.ACLGEBR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "CLGDBR",
+ argLen: 1,
+ asm: s390x.ACLGDBR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "CELFBR",
+ argLen: 1,
+ asm: s390x.ACELFBR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ },
+ {
+ name: "CDLFBR",
+ argLen: 1,
+ asm: s390x.ACDLFBR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ },
+ {
+ name: "CELGBR",
+ argLen: 1,
+ asm: s390x.ACELGBR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ },
+ {
+ name: "CDLGBR",
+ argLen: 1,
+ asm: s390x.ACDLGBR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ },
{
name: "LEDBR",
argLen: 1,
@@ -32561,6 +32742,11 @@ var opcodeTable = [...]opInfo{
argLen: 1,
generic: true,
},
+ {
+ name: "CvtBoolToUint8",
+ argLen: 1,
+ generic: true,
+ },
{
name: "Round32F",
argLen: 1,
diff --git a/src/cmd/compile/internal/ssa/phiopt.go b/src/cmd/compile/internal/ssa/phiopt.go
index cc3319e188..8643fa584c 100644
--- a/src/cmd/compile/internal/ssa/phiopt.go
+++ b/src/cmd/compile/internal/ssa/phiopt.go
@@ -148,6 +148,13 @@ func phioptint(v *Value, b0 *Block, reverse int) {
negate = !negate
}
+ a := b0.Controls[0]
+ if negate {
+ a = v.Block.NewValue1(v.Pos, OpNot, a.Type, a)
+ }
+ v.AddArg(a)
+
+ cvt := v.Block.NewValue1(v.Pos, OpCvtBoolToUint8, a.Type, a)
switch v.Type.Size() {
case 1:
v.reset(OpCopy)
@@ -160,12 +167,7 @@ func phioptint(v *Value, b0 *Block, reverse int) {
default:
v.Fatalf("bad int size %d", v.Type.Size())
}
-
- a := b0.Controls[0]
- if negate {
- a = v.Block.NewValue1(v.Pos, OpNot, a.Type, a)
- }
- v.AddArg(a)
+ v.AddArg(cvt)
f := b0.Func
if f.pass.debug > 0 {
diff --git a/src/cmd/compile/internal/ssa/rewrite.go b/src/cmd/compile/internal/ssa/rewrite.go
index fcbb76cf34..238e243096 100644
--- a/src/cmd/compile/internal/ssa/rewrite.go
+++ b/src/cmd/compile/internal/ssa/rewrite.go
@@ -487,11 +487,17 @@ func DivisionNeedsFixUp(v *Value) bool {
// auxFrom64F encodes a float64 value so it can be stored in an AuxInt.
func auxFrom64F(f float64) int64 {
+ if f != f {
+ panic("can't encode a NaN in AuxInt field")
+ }
return int64(math.Float64bits(f))
}
// auxFrom32F encodes a float32 value so it can be stored in an AuxInt.
func auxFrom32F(f float32) int64 {
+ if f != f {
+ panic("can't encode a NaN in AuxInt field")
+ }
return int64(math.Float64bits(extend32Fto64F(f)))
}
diff --git a/src/cmd/compile/internal/ssa/rewrite386.go b/src/cmd/compile/internal/ssa/rewrite386.go
index cf9a7362a2..8b2da94c13 100644
--- a/src/cmd/compile/internal/ssa/rewrite386.go
+++ b/src/cmd/compile/internal/ssa/rewrite386.go
@@ -408,6 +408,9 @@ func rewriteValue386(v *Value) bool {
case OpCvt64Fto32F:
v.Op = Op386CVTSD2SS
return true
+ case OpCvtBoolToUint8:
+ v.Op = OpCopy
+ return true
case OpDiv16:
v.Op = Op386DIVW
return true
@@ -788,8 +791,7 @@ func rewriteValue386_Op386ADCL(v *Value) bool {
f := v_2
v.reset(Op386ADCLconst)
v.AuxInt = c
- v.AddArg(x)
- v.AddArg(f)
+ v.AddArg2(x, f)
return true
}
break
@@ -899,8 +901,7 @@ func rewriteValue386_Op386ADDL(v *Value) bool {
}
y := v_1.Args[0]
v.reset(Op386LEAL8)
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
break
@@ -915,8 +916,7 @@ func rewriteValue386_Op386ADDL(v *Value) bool {
}
y := v_1.Args[0]
v.reset(Op386LEAL4)
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
break
@@ -931,8 +931,7 @@ func rewriteValue386_Op386ADDL(v *Value) bool {
}
y := v_1.Args[0]
v.reset(Op386LEAL2)
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
break
@@ -950,8 +949,7 @@ func rewriteValue386_Op386ADDL(v *Value) bool {
continue
}
v.reset(Op386LEAL2)
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
break
@@ -973,8 +971,7 @@ func rewriteValue386_Op386ADDL(v *Value) bool {
}
y := v_1_1
v.reset(Op386LEAL2)
- v.AddArg(y)
- v.AddArg(x)
+ v.AddArg2(y, x)
return true
}
}
@@ -992,8 +989,7 @@ func rewriteValue386_Op386ADDL(v *Value) bool {
y := v_1
v.reset(Op386LEAL1)
v.AuxInt = c
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
break
@@ -1016,8 +1012,7 @@ func rewriteValue386_Op386ADDL(v *Value) bool {
v.reset(Op386LEAL1)
v.AuxInt = c
v.Aux = s
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
break
@@ -1042,9 +1037,7 @@ func rewriteValue386_Op386ADDL(v *Value) bool {
v.reset(Op386ADDLload)
v.AuxInt = off
v.Aux = sym
- v.AddArg(x)
- v.AddArg(ptr)
- v.AddArg(mem)
+ v.AddArg3(x, ptr, mem)
return true
}
break
@@ -1070,10 +1063,7 @@ func rewriteValue386_Op386ADDL(v *Value) bool {
v.reset(Op386ADDLloadidx4)
v.AuxInt = off
v.Aux = sym
- v.AddArg(x)
- v.AddArg(ptr)
- v.AddArg(idx)
- v.AddArg(mem)
+ v.AddArg4(x, ptr, idx, mem)
return true
}
break
@@ -1088,8 +1078,7 @@ func rewriteValue386_Op386ADDL(v *Value) bool {
}
y := v_1.Args[0]
v.reset(Op386SUBL)
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
break
@@ -1130,8 +1119,7 @@ func rewriteValue386_Op386ADDLconst(v *Value) bool {
x := v_0.Args[0]
v.reset(Op386LEAL1)
v.AuxInt = c
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
// match: (ADDLconst [c] (LEAL [d] {s} x))
@@ -1172,8 +1160,7 @@ func rewriteValue386_Op386ADDLconst(v *Value) bool {
v.reset(Op386LEAL1)
v.AuxInt = c + d
v.Aux = s
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
// match: (ADDLconst [c] (LEAL2 [d] {s} x y))
@@ -1194,8 +1181,7 @@ func rewriteValue386_Op386ADDLconst(v *Value) bool {
v.reset(Op386LEAL2)
v.AuxInt = c + d
v.Aux = s
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
// match: (ADDLconst [c] (LEAL4 [d] {s} x y))
@@ -1216,8 +1202,7 @@ func rewriteValue386_Op386ADDLconst(v *Value) bool {
v.reset(Op386LEAL4)
v.AuxInt = c + d
v.Aux = s
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
// match: (ADDLconst [c] (LEAL8 [d] {s} x y))
@@ -1238,8 +1223,7 @@ func rewriteValue386_Op386ADDLconst(v *Value) bool {
v.reset(Op386LEAL8)
v.AuxInt = c + d
v.Aux = s
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
// match: (ADDLconst [c] x)
@@ -1251,9 +1235,7 @@ func rewriteValue386_Op386ADDLconst(v *Value) bool {
if !(int32(c) == 0) {
break
}
- v.reset(OpCopy)
- v.Type = x.Type
- v.AddArg(x)
+ v.copyOf(x)
return true
}
// match: (ADDLconst [c] (MOVLconst [d]))
@@ -1307,8 +1289,7 @@ func rewriteValue386_Op386ADDLconstmodify(v *Value) bool {
v.reset(Op386ADDLconstmodify)
v.AuxInt = ValAndOff(valoff1).add(off2)
v.Aux = sym
- v.AddArg(base)
- v.AddArg(mem)
+ v.AddArg2(base, mem)
return true
}
// match: (ADDLconstmodify [valoff1] {sym1} (LEAL [off2] {sym2} base) mem)
@@ -1330,8 +1311,7 @@ func rewriteValue386_Op386ADDLconstmodify(v *Value) bool {
v.reset(Op386ADDLconstmodify)
v.AuxInt = ValAndOff(valoff1).add(off2)
v.Aux = mergeSym(sym1, sym2)
- v.AddArg(base)
- v.AddArg(mem)
+ v.AddArg2(base, mem)
return true
}
return false
@@ -1361,9 +1341,7 @@ func rewriteValue386_Op386ADDLconstmodifyidx4(v *Value) bool {
v.reset(Op386ADDLconstmodifyidx4)
v.AuxInt = ValAndOff(valoff1).add(off2)
v.Aux = sym
- v.AddArg(base)
- v.AddArg(idx)
- v.AddArg(mem)
+ v.AddArg3(base, idx, mem)
return true
}
// match: (ADDLconstmodifyidx4 [valoff1] {sym} base (ADDLconst [off2] idx) mem)
@@ -1385,9 +1363,7 @@ func rewriteValue386_Op386ADDLconstmodifyidx4(v *Value) bool {
v.reset(Op386ADDLconstmodifyidx4)
v.AuxInt = ValAndOff(valoff1).add(off2 * 4)
v.Aux = sym
- v.AddArg(base)
- v.AddArg(idx)
- v.AddArg(mem)
+ v.AddArg3(base, idx, mem)
return true
}
// match: (ADDLconstmodifyidx4 [valoff1] {sym1} (LEAL [off2] {sym2} base) idx mem)
@@ -1410,9 +1386,7 @@ func rewriteValue386_Op386ADDLconstmodifyidx4(v *Value) bool {
v.reset(Op386ADDLconstmodifyidx4)
v.AuxInt = ValAndOff(valoff1).add(off2)
v.Aux = mergeSym(sym1, sym2)
- v.AddArg(base)
- v.AddArg(idx)
- v.AddArg(mem)
+ v.AddArg3(base, idx, mem)
return true
}
return false
@@ -1442,9 +1416,7 @@ func rewriteValue386_Op386ADDLload(v *Value) bool {
v.reset(Op386ADDLload)
v.AuxInt = off1 + off2
v.Aux = sym
- v.AddArg(val)
- v.AddArg(base)
- v.AddArg(mem)
+ v.AddArg3(val, base, mem)
return true
}
// match: (ADDLload [off1] {sym1} val (LEAL [off2] {sym2} base) mem)
@@ -1467,9 +1439,7 @@ func rewriteValue386_Op386ADDLload(v *Value) bool {
v.reset(Op386ADDLload)
v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
- v.AddArg(val)
- v.AddArg(base)
- v.AddArg(mem)
+ v.AddArg3(val, base, mem)
return true
}
// match: (ADDLload [off1] {sym1} val (LEAL4 [off2] {sym2} ptr idx) mem)
@@ -1493,10 +1463,7 @@ func rewriteValue386_Op386ADDLload(v *Value) bool {
v.reset(Op386ADDLloadidx4)
v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
- v.AddArg(val)
- v.AddArg(ptr)
- v.AddArg(idx)
- v.AddArg(mem)
+ v.AddArg4(val, ptr, idx, mem)
return true
}
return false
@@ -1528,10 +1495,7 @@ func rewriteValue386_Op386ADDLloadidx4(v *Value) bool {
v.reset(Op386ADDLloadidx4)
v.AuxInt = off1 + off2
v.Aux = sym
- v.AddArg(val)
- v.AddArg(base)
- v.AddArg(idx)
- v.AddArg(mem)
+ v.AddArg4(val, base, idx, mem)
return true
}
// match: (ADDLloadidx4 [off1] {sym} val base (ADDLconst [off2] idx) mem)
@@ -1554,10 +1518,7 @@ func rewriteValue386_Op386ADDLloadidx4(v *Value) bool {
v.reset(Op386ADDLloadidx4)
v.AuxInt = off1 + off2*4
v.Aux = sym
- v.AddArg(val)
- v.AddArg(base)
- v.AddArg(idx)
- v.AddArg(mem)
+ v.AddArg4(val, base, idx, mem)
return true
}
// match: (ADDLloadidx4 [off1] {sym1} val (LEAL [off2] {sym2} base) idx mem)
@@ -1581,10 +1542,7 @@ func rewriteValue386_Op386ADDLloadidx4(v *Value) bool {
v.reset(Op386ADDLloadidx4)
v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
- v.AddArg(val)
- v.AddArg(base)
- v.AddArg(idx)
- v.AddArg(mem)
+ v.AddArg4(val, base, idx, mem)
return true
}
return false
@@ -1614,9 +1572,7 @@ func rewriteValue386_Op386ADDLmodify(v *Value) bool {
v.reset(Op386ADDLmodify)
v.AuxInt = off1 + off2
v.Aux = sym
- v.AddArg(base)
- v.AddArg(val)
- v.AddArg(mem)
+ v.AddArg3(base, val, mem)
return true
}
// match: (ADDLmodify [off1] {sym1} (LEAL [off2] {sym2} base) val mem)
@@ -1639,9 +1595,7 @@ func rewriteValue386_Op386ADDLmodify(v *Value) bool {
v.reset(Op386ADDLmodify)
v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
- v.AddArg(base)
- v.AddArg(val)
- v.AddArg(mem)
+ v.AddArg3(base, val, mem)
return true
}
return false
@@ -1673,10 +1627,7 @@ func rewriteValue386_Op386ADDLmodifyidx4(v *Value) bool {
v.reset(Op386ADDLmodifyidx4)
v.AuxInt = off1 + off2
v.Aux = sym
- v.AddArg(base)
- v.AddArg(idx)
- v.AddArg(val)
- v.AddArg(mem)
+ v.AddArg4(base, idx, val, mem)
return true
}
// match: (ADDLmodifyidx4 [off1] {sym} base (ADDLconst [off2] idx) val mem)
@@ -1699,10 +1650,7 @@ func rewriteValue386_Op386ADDLmodifyidx4(v *Value) bool {
v.reset(Op386ADDLmodifyidx4)
v.AuxInt = off1 + off2*4
v.Aux = sym
- v.AddArg(base)
- v.AddArg(idx)
- v.AddArg(val)
- v.AddArg(mem)
+ v.AddArg4(base, idx, val, mem)
return true
}
// match: (ADDLmodifyidx4 [off1] {sym1} (LEAL [off2] {sym2} base) idx val mem)
@@ -1726,10 +1674,7 @@ func rewriteValue386_Op386ADDLmodifyidx4(v *Value) bool {
v.reset(Op386ADDLmodifyidx4)
v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
- v.AddArg(base)
- v.AddArg(idx)
- v.AddArg(val)
- v.AddArg(mem)
+ v.AddArg4(base, idx, val, mem)
return true
}
// match: (ADDLmodifyidx4 [off] {sym} ptr idx (MOVLconst [c]) mem)
@@ -1751,9 +1696,7 @@ func rewriteValue386_Op386ADDLmodifyidx4(v *Value) bool {
v.reset(Op386ADDLconstmodifyidx4)
v.AuxInt = makeValAndOff(c, off)
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(idx)
- v.AddArg(mem)
+ v.AddArg3(ptr, idx, mem)
return true
}
return false
@@ -1783,9 +1726,7 @@ func rewriteValue386_Op386ADDSD(v *Value) bool {
v.reset(Op386ADDSDload)
v.AuxInt = off
v.Aux = sym
- v.AddArg(x)
- v.AddArg(ptr)
- v.AddArg(mem)
+ v.AddArg3(x, ptr, mem)
return true
}
break
@@ -1817,9 +1758,7 @@ func rewriteValue386_Op386ADDSDload(v *Value) bool {
v.reset(Op386ADDSDload)
v.AuxInt = off1 + off2
v.Aux = sym
- v.AddArg(val)
- v.AddArg(base)
- v.AddArg(mem)
+ v.AddArg3(val, base, mem)
return true
}
// match: (ADDSDload [off1] {sym1} val (LEAL [off2] {sym2} base) mem)
@@ -1842,9 +1781,7 @@ func rewriteValue386_Op386ADDSDload(v *Value) bool {
v.reset(Op386ADDSDload)
v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
- v.AddArg(val)
- v.AddArg(base)
- v.AddArg(mem)
+ v.AddArg3(val, base, mem)
return true
}
return false
@@ -1874,9 +1811,7 @@ func rewriteValue386_Op386ADDSS(v *Value) bool {
v.reset(Op386ADDSSload)
v.AuxInt = off
v.Aux = sym
- v.AddArg(x)
- v.AddArg(ptr)
- v.AddArg(mem)
+ v.AddArg3(x, ptr, mem)
return true
}
break
@@ -1908,9 +1843,7 @@ func rewriteValue386_Op386ADDSSload(v *Value) bool {
v.reset(Op386ADDSSload)
v.AuxInt = off1 + off2
v.Aux = sym
- v.AddArg(val)
- v.AddArg(base)
- v.AddArg(mem)
+ v.AddArg3(val, base, mem)
return true
}
// match: (ADDSSload [off1] {sym1} val (LEAL [off2] {sym2} base) mem)
@@ -1933,9 +1866,7 @@ func rewriteValue386_Op386ADDSSload(v *Value) bool {
v.reset(Op386ADDSSload)
v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
- v.AddArg(val)
- v.AddArg(base)
- v.AddArg(mem)
+ v.AddArg3(val, base, mem)
return true
}
return false
@@ -1979,9 +1910,7 @@ func rewriteValue386_Op386ANDL(v *Value) bool {
v.reset(Op386ANDLload)
v.AuxInt = off
v.Aux = sym
- v.AddArg(x)
- v.AddArg(ptr)
- v.AddArg(mem)
+ v.AddArg3(x, ptr, mem)
return true
}
break
@@ -2007,10 +1936,7 @@ func rewriteValue386_Op386ANDL(v *Value) bool {
v.reset(Op386ANDLloadidx4)
v.AuxInt = off
v.Aux = sym
- v.AddArg(x)
- v.AddArg(ptr)
- v.AddArg(idx)
- v.AddArg(mem)
+ v.AddArg4(x, ptr, idx, mem)
return true
}
break
@@ -2022,9 +1948,7 @@ func rewriteValue386_Op386ANDL(v *Value) bool {
if x != v_1 {
break
}
- v.reset(OpCopy)
- v.Type = x.Type
- v.AddArg(x)
+ v.copyOf(x)
return true
}
return false
@@ -2066,9 +1990,7 @@ func rewriteValue386_Op386ANDLconst(v *Value) bool {
if !(int32(c) == -1) {
break
}
- v.reset(OpCopy)
- v.Type = x.Type
- v.AddArg(x)
+ v.copyOf(x)
return true
}
// match: (ANDLconst [c] (MOVLconst [d]))
@@ -2108,8 +2030,7 @@ func rewriteValue386_Op386ANDLconstmodify(v *Value) bool {
v.reset(Op386ANDLconstmodify)
v.AuxInt = ValAndOff(valoff1).add(off2)
v.Aux = sym
- v.AddArg(base)
- v.AddArg(mem)
+ v.AddArg2(base, mem)
return true
}
// match: (ANDLconstmodify [valoff1] {sym1} (LEAL [off2] {sym2} base) mem)
@@ -2131,8 +2052,7 @@ func rewriteValue386_Op386ANDLconstmodify(v *Value) bool {
v.reset(Op386ANDLconstmodify)
v.AuxInt = ValAndOff(valoff1).add(off2)
v.Aux = mergeSym(sym1, sym2)
- v.AddArg(base)
- v.AddArg(mem)
+ v.AddArg2(base, mem)
return true
}
return false
@@ -2162,9 +2082,7 @@ func rewriteValue386_Op386ANDLconstmodifyidx4(v *Value) bool {
v.reset(Op386ANDLconstmodifyidx4)
v.AuxInt = ValAndOff(valoff1).add(off2)
v.Aux = sym
- v.AddArg(base)
- v.AddArg(idx)
- v.AddArg(mem)
+ v.AddArg3(base, idx, mem)
return true
}
// match: (ANDLconstmodifyidx4 [valoff1] {sym} base (ADDLconst [off2] idx) mem)
@@ -2186,9 +2104,7 @@ func rewriteValue386_Op386ANDLconstmodifyidx4(v *Value) bool {
v.reset(Op386ANDLconstmodifyidx4)
v.AuxInt = ValAndOff(valoff1).add(off2 * 4)
v.Aux = sym
- v.AddArg(base)
- v.AddArg(idx)
- v.AddArg(mem)
+ v.AddArg3(base, idx, mem)
return true
}
// match: (ANDLconstmodifyidx4 [valoff1] {sym1} (LEAL [off2] {sym2} base) idx mem)
@@ -2211,9 +2127,7 @@ func rewriteValue386_Op386ANDLconstmodifyidx4(v *Value) bool {
v.reset(Op386ANDLconstmodifyidx4)
v.AuxInt = ValAndOff(valoff1).add(off2)
v.Aux = mergeSym(sym1, sym2)
- v.AddArg(base)
- v.AddArg(idx)
- v.AddArg(mem)
+ v.AddArg3(base, idx, mem)
return true
}
return false
@@ -2243,9 +2157,7 @@ func rewriteValue386_Op386ANDLload(v *Value) bool {
v.reset(Op386ANDLload)
v.AuxInt = off1 + off2
v.Aux = sym
- v.AddArg(val)
- v.AddArg(base)
- v.AddArg(mem)
+ v.AddArg3(val, base, mem)
return true
}
// match: (ANDLload [off1] {sym1} val (LEAL [off2] {sym2} base) mem)
@@ -2268,9 +2180,7 @@ func rewriteValue386_Op386ANDLload(v *Value) bool {
v.reset(Op386ANDLload)
v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
- v.AddArg(val)
- v.AddArg(base)
- v.AddArg(mem)
+ v.AddArg3(val, base, mem)
return true
}
// match: (ANDLload [off1] {sym1} val (LEAL4 [off2] {sym2} ptr idx) mem)
@@ -2294,10 +2204,7 @@ func rewriteValue386_Op386ANDLload(v *Value) bool {
v.reset(Op386ANDLloadidx4)
v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
- v.AddArg(val)
- v.AddArg(ptr)
- v.AddArg(idx)
- v.AddArg(mem)
+ v.AddArg4(val, ptr, idx, mem)
return true
}
return false
@@ -2329,10 +2236,7 @@ func rewriteValue386_Op386ANDLloadidx4(v *Value) bool {
v.reset(Op386ANDLloadidx4)
v.AuxInt = off1 + off2
v.Aux = sym
- v.AddArg(val)
- v.AddArg(base)
- v.AddArg(idx)
- v.AddArg(mem)
+ v.AddArg4(val, base, idx, mem)
return true
}
// match: (ANDLloadidx4 [off1] {sym} val base (ADDLconst [off2] idx) mem)
@@ -2355,10 +2259,7 @@ func rewriteValue386_Op386ANDLloadidx4(v *Value) bool {
v.reset(Op386ANDLloadidx4)
v.AuxInt = off1 + off2*4
v.Aux = sym
- v.AddArg(val)
- v.AddArg(base)
- v.AddArg(idx)
- v.AddArg(mem)
+ v.AddArg4(val, base, idx, mem)
return true
}
// match: (ANDLloadidx4 [off1] {sym1} val (LEAL [off2] {sym2} base) idx mem)
@@ -2382,10 +2283,7 @@ func rewriteValue386_Op386ANDLloadidx4(v *Value) bool {
v.reset(Op386ANDLloadidx4)
v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
- v.AddArg(val)
- v.AddArg(base)
- v.AddArg(idx)
- v.AddArg(mem)
+ v.AddArg4(val, base, idx, mem)
return true
}
return false
@@ -2415,9 +2313,7 @@ func rewriteValue386_Op386ANDLmodify(v *Value) bool {
v.reset(Op386ANDLmodify)
v.AuxInt = off1 + off2
v.Aux = sym
- v.AddArg(base)
- v.AddArg(val)
- v.AddArg(mem)
+ v.AddArg3(base, val, mem)
return true
}
// match: (ANDLmodify [off1] {sym1} (LEAL [off2] {sym2} base) val mem)
@@ -2440,9 +2336,7 @@ func rewriteValue386_Op386ANDLmodify(v *Value) bool {
v.reset(Op386ANDLmodify)
v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
- v.AddArg(base)
- v.AddArg(val)
- v.AddArg(mem)
+ v.AddArg3(base, val, mem)
return true
}
return false
@@ -2474,10 +2368,7 @@ func rewriteValue386_Op386ANDLmodifyidx4(v *Value) bool {
v.reset(Op386ANDLmodifyidx4)
v.AuxInt = off1 + off2
v.Aux = sym
- v.AddArg(base)
- v.AddArg(idx)
- v.AddArg(val)
- v.AddArg(mem)
+ v.AddArg4(base, idx, val, mem)
return true
}
// match: (ANDLmodifyidx4 [off1] {sym} base (ADDLconst [off2] idx) val mem)
@@ -2500,10 +2391,7 @@ func rewriteValue386_Op386ANDLmodifyidx4(v *Value) bool {
v.reset(Op386ANDLmodifyidx4)
v.AuxInt = off1 + off2*4
v.Aux = sym
- v.AddArg(base)
- v.AddArg(idx)
- v.AddArg(val)
- v.AddArg(mem)
+ v.AddArg4(base, idx, val, mem)
return true
}
// match: (ANDLmodifyidx4 [off1] {sym1} (LEAL [off2] {sym2} base) idx val mem)
@@ -2527,10 +2415,7 @@ func rewriteValue386_Op386ANDLmodifyidx4(v *Value) bool {
v.reset(Op386ANDLmodifyidx4)
v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
- v.AddArg(base)
- v.AddArg(idx)
- v.AddArg(val)
- v.AddArg(mem)
+ v.AddArg4(base, idx, val, mem)
return true
}
// match: (ANDLmodifyidx4 [off] {sym} ptr idx (MOVLconst [c]) mem)
@@ -2552,9 +2437,7 @@ func rewriteValue386_Op386ANDLmodifyidx4(v *Value) bool {
v.reset(Op386ANDLconstmodifyidx4)
v.AuxInt = makeValAndOff(c, off)
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(idx)
- v.AddArg(mem)
+ v.AddArg3(ptr, idx, mem)
return true
}
return false
@@ -2602,8 +2485,7 @@ func rewriteValue386_Op386CMPB(v *Value) bool {
}
v.reset(Op386InvertFlags)
v0 := b.NewValue0(v.Pos, Op386CMPB, types.TypeFlags)
- v0.AddArg(y)
- v0.AddArg(x)
+ v0.AddArg2(y, x)
v.AddArg(v0)
return true
}
@@ -2626,9 +2508,7 @@ func rewriteValue386_Op386CMPB(v *Value) bool {
v.reset(Op386CMPBload)
v.AuxInt = off
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(x)
- v.AddArg(mem)
+ v.AddArg3(ptr, x, mem)
return true
}
// match: (CMPB x l:(MOVBload {sym} [off] ptr mem))
@@ -2651,9 +2531,7 @@ func rewriteValue386_Op386CMPB(v *Value) bool {
v0 := b.NewValue0(l.Pos, Op386CMPBload, types.TypeFlags)
v0.AuxInt = off
v0.Aux = sym
- v0.AddArg(ptr)
- v0.AddArg(x)
- v0.AddArg(mem)
+ v0.AddArg3(ptr, x, mem)
v.AddArg(v0)
return true
}
@@ -2769,8 +2647,7 @@ func rewriteValue386_Op386CMPBconst(v *Value) bool {
break
}
v.reset(Op386TESTB)
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
// match: (CMPBconst l:(ANDLconst [c] x) [0])
@@ -2802,8 +2679,7 @@ func rewriteValue386_Op386CMPBconst(v *Value) bool {
}
x := v_0
v.reset(Op386TESTB)
- v.AddArg(x)
- v.AddArg(x)
+ v.AddArg2(x, x)
return true
}
// match: (CMPBconst l:(MOVBload {sym} [off] ptr mem) [c])
@@ -2824,12 +2700,10 @@ func rewriteValue386_Op386CMPBconst(v *Value) bool {
}
b = l.Block
v0 := b.NewValue0(l.Pos, Op386CMPBconstload, types.TypeFlags)
- v.reset(OpCopy)
- v.AddArg(v0)
+ v.copyOf(v0)
v0.AuxInt = makeValAndOff(c, off)
v0.Aux = sym
- v0.AddArg(ptr)
- v0.AddArg(mem)
+ v0.AddArg2(ptr, mem)
return true
}
return false
@@ -2856,8 +2730,7 @@ func rewriteValue386_Op386CMPBload(v *Value) bool {
v.reset(Op386CMPBconstload)
v.AuxInt = makeValAndOff(int64(int8(c)), off)
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(mem)
+ v.AddArg2(ptr, mem)
return true
}
return false
@@ -2905,8 +2778,7 @@ func rewriteValue386_Op386CMPL(v *Value) bool {
}
v.reset(Op386InvertFlags)
v0 := b.NewValue0(v.Pos, Op386CMPL, types.TypeFlags)
- v0.AddArg(y)
- v0.AddArg(x)
+ v0.AddArg2(y, x)
v.AddArg(v0)
return true
}
@@ -2929,9 +2801,7 @@ func rewriteValue386_Op386CMPL(v *Value) bool {
v.reset(Op386CMPLload)
v.AuxInt = off
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(x)
- v.AddArg(mem)
+ v.AddArg3(ptr, x, mem)
return true
}
// match: (CMPL x l:(MOVLload {sym} [off] ptr mem))
@@ -2954,9 +2824,7 @@ func rewriteValue386_Op386CMPL(v *Value) bool {
v0 := b.NewValue0(l.Pos, Op386CMPLload, types.TypeFlags)
v0.AuxInt = off
v0.Aux = sym
- v0.AddArg(ptr)
- v0.AddArg(x)
- v0.AddArg(mem)
+ v0.AddArg3(ptr, x, mem)
v.AddArg(v0)
return true
}
@@ -3087,8 +2955,7 @@ func rewriteValue386_Op386CMPLconst(v *Value) bool {
break
}
v.reset(Op386TESTL)
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
// match: (CMPLconst l:(ANDLconst [c] x) [0])
@@ -3120,8 +2987,7 @@ func rewriteValue386_Op386CMPLconst(v *Value) bool {
}
x := v_0
v.reset(Op386TESTL)
- v.AddArg(x)
- v.AddArg(x)
+ v.AddArg2(x, x)
return true
}
// match: (CMPLconst l:(MOVLload {sym} [off] ptr mem) [c])
@@ -3142,12 +3008,10 @@ func rewriteValue386_Op386CMPLconst(v *Value) bool {
}
b = l.Block
v0 := b.NewValue0(l.Pos, Op386CMPLconstload, types.TypeFlags)
- v.reset(OpCopy)
- v.AddArg(v0)
+ v.copyOf(v0)
v0.AuxInt = makeValAndOff(c, off)
v0.Aux = sym
- v0.AddArg(ptr)
- v0.AddArg(mem)
+ v0.AddArg2(ptr, mem)
return true
}
return false
@@ -3174,8 +3038,7 @@ func rewriteValue386_Op386CMPLload(v *Value) bool {
v.reset(Op386CMPLconstload)
v.AuxInt = makeValAndOff(int64(int32(c)), off)
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(mem)
+ v.AddArg2(ptr, mem)
return true
}
return false
@@ -3223,8 +3086,7 @@ func rewriteValue386_Op386CMPW(v *Value) bool {
}
v.reset(Op386InvertFlags)
v0 := b.NewValue0(v.Pos, Op386CMPW, types.TypeFlags)
- v0.AddArg(y)
- v0.AddArg(x)
+ v0.AddArg2(y, x)
v.AddArg(v0)
return true
}
@@ -3247,9 +3109,7 @@ func rewriteValue386_Op386CMPW(v *Value) bool {
v.reset(Op386CMPWload)
v.AuxInt = off
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(x)
- v.AddArg(mem)
+ v.AddArg3(ptr, x, mem)
return true
}
// match: (CMPW x l:(MOVWload {sym} [off] ptr mem))
@@ -3272,9 +3132,7 @@ func rewriteValue386_Op386CMPW(v *Value) bool {
v0 := b.NewValue0(l.Pos, Op386CMPWload, types.TypeFlags)
v0.AuxInt = off
v0.Aux = sym
- v0.AddArg(ptr)
- v0.AddArg(x)
- v0.AddArg(mem)
+ v0.AddArg3(ptr, x, mem)
v.AddArg(v0)
return true
}
@@ -3390,8 +3248,7 @@ func rewriteValue386_Op386CMPWconst(v *Value) bool {
break
}
v.reset(Op386TESTW)
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
// match: (CMPWconst l:(ANDLconst [c] x) [0])
@@ -3423,8 +3280,7 @@ func rewriteValue386_Op386CMPWconst(v *Value) bool {
}
x := v_0
v.reset(Op386TESTW)
- v.AddArg(x)
- v.AddArg(x)
+ v.AddArg2(x, x)
return true
}
// match: (CMPWconst l:(MOVWload {sym} [off] ptr mem) [c])
@@ -3445,12 +3301,10 @@ func rewriteValue386_Op386CMPWconst(v *Value) bool {
}
b = l.Block
v0 := b.NewValue0(l.Pos, Op386CMPWconstload, types.TypeFlags)
- v.reset(OpCopy)
- v.AddArg(v0)
+ v.copyOf(v0)
v0.AuxInt = makeValAndOff(c, off)
v0.Aux = sym
- v0.AddArg(ptr)
- v0.AddArg(mem)
+ v0.AddArg2(ptr, mem)
return true
}
return false
@@ -3477,8 +3331,7 @@ func rewriteValue386_Op386CMPWload(v *Value) bool {
v.reset(Op386CMPWconstload)
v.AuxInt = makeValAndOff(int64(int16(c)), off)
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(mem)
+ v.AddArg2(ptr, mem)
return true
}
return false
@@ -3507,9 +3360,7 @@ func rewriteValue386_Op386DIVSD(v *Value) bool {
v.reset(Op386DIVSDload)
v.AuxInt = off
v.Aux = sym
- v.AddArg(x)
- v.AddArg(ptr)
- v.AddArg(mem)
+ v.AddArg3(x, ptr, mem)
return true
}
return false
@@ -3539,9 +3390,7 @@ func rewriteValue386_Op386DIVSDload(v *Value) bool {
v.reset(Op386DIVSDload)
v.AuxInt = off1 + off2
v.Aux = sym
- v.AddArg(val)
- v.AddArg(base)
- v.AddArg(mem)
+ v.AddArg3(val, base, mem)
return true
}
// match: (DIVSDload [off1] {sym1} val (LEAL [off2] {sym2} base) mem)
@@ -3564,9 +3413,7 @@ func rewriteValue386_Op386DIVSDload(v *Value) bool {
v.reset(Op386DIVSDload)
v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
- v.AddArg(val)
- v.AddArg(base)
- v.AddArg(mem)
+ v.AddArg3(val, base, mem)
return true
}
return false
@@ -3595,9 +3442,7 @@ func rewriteValue386_Op386DIVSS(v *Value) bool {
v.reset(Op386DIVSSload)
v.AuxInt = off
v.Aux = sym
- v.AddArg(x)
- v.AddArg(ptr)
- v.AddArg(mem)
+ v.AddArg3(x, ptr, mem)
return true
}
return false
@@ -3627,9 +3472,7 @@ func rewriteValue386_Op386DIVSSload(v *Value) bool {
v.reset(Op386DIVSSload)
v.AuxInt = off1 + off2
v.Aux = sym
- v.AddArg(val)
- v.AddArg(base)
- v.AddArg(mem)
+ v.AddArg3(val, base, mem)
return true
}
// match: (DIVSSload [off1] {sym1} val (LEAL [off2] {sym2} base) mem)
@@ -3652,9 +3495,7 @@ func rewriteValue386_Op386DIVSSload(v *Value) bool {
v.reset(Op386DIVSSload)
v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
- v.AddArg(val)
- v.AddArg(base)
- v.AddArg(mem)
+ v.AddArg3(val, base, mem)
return true
}
return false
@@ -3702,8 +3543,7 @@ func rewriteValue386_Op386LEAL(v *Value) bool {
v.reset(Op386LEAL1)
v.AuxInt = c
v.Aux = s
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
break
@@ -3748,8 +3588,7 @@ func rewriteValue386_Op386LEAL(v *Value) bool {
v.reset(Op386LEAL1)
v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
// match: (LEAL [off1] {sym1} (LEAL2 [off2] {sym2} x y))
@@ -3771,8 +3610,7 @@ func rewriteValue386_Op386LEAL(v *Value) bool {
v.reset(Op386LEAL2)
v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
// match: (LEAL [off1] {sym1} (LEAL4 [off2] {sym2} x y))
@@ -3794,8 +3632,7 @@ func rewriteValue386_Op386LEAL(v *Value) bool {
v.reset(Op386LEAL4)
v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
// match: (LEAL [off1] {sym1} (LEAL8 [off2] {sym2} x y))
@@ -3817,8 +3654,7 @@ func rewriteValue386_Op386LEAL(v *Value) bool {
v.reset(Op386LEAL8)
v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
return false
@@ -3845,8 +3681,7 @@ func rewriteValue386_Op386LEAL1(v *Value) bool {
v.reset(Op386LEAL1)
v.AuxInt = c + d
v.Aux = s
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
break
@@ -3865,8 +3700,7 @@ func rewriteValue386_Op386LEAL1(v *Value) bool {
v.reset(Op386LEAL2)
v.AuxInt = c
v.Aux = s
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
break
@@ -3885,8 +3719,7 @@ func rewriteValue386_Op386LEAL1(v *Value) bool {
v.reset(Op386LEAL4)
v.AuxInt = c
v.Aux = s
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
break
@@ -3905,8 +3738,7 @@ func rewriteValue386_Op386LEAL1(v *Value) bool {
v.reset(Op386LEAL8)
v.AuxInt = c
v.Aux = s
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
break
@@ -3931,8 +3763,7 @@ func rewriteValue386_Op386LEAL1(v *Value) bool {
v.reset(Op386LEAL1)
v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
break
@@ -3960,8 +3791,7 @@ func rewriteValue386_Op386LEAL2(v *Value) bool {
v.reset(Op386LEAL2)
v.AuxInt = c + d
v.Aux = s
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
// match: (LEAL2 [c] {s} x (ADDLconst [d] y))
@@ -3982,8 +3812,7 @@ func rewriteValue386_Op386LEAL2(v *Value) bool {
v.reset(Op386LEAL2)
v.AuxInt = c + 2*d
v.Aux = s
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
// match: (LEAL2 [c] {s} x (SHLLconst [1] y))
@@ -3999,8 +3828,7 @@ func rewriteValue386_Op386LEAL2(v *Value) bool {
v.reset(Op386LEAL4)
v.AuxInt = c
v.Aux = s
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
// match: (LEAL2 [c] {s} x (SHLLconst [2] y))
@@ -4016,8 +3844,7 @@ func rewriteValue386_Op386LEAL2(v *Value) bool {
v.reset(Op386LEAL8)
v.AuxInt = c
v.Aux = s
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
// match: (LEAL2 [off1] {sym1} (LEAL [off2] {sym2} x) y)
@@ -4039,8 +3866,7 @@ func rewriteValue386_Op386LEAL2(v *Value) bool {
v.reset(Op386LEAL2)
v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
return false
@@ -4066,8 +3892,7 @@ func rewriteValue386_Op386LEAL4(v *Value) bool {
v.reset(Op386LEAL4)
v.AuxInt = c + d
v.Aux = s
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
// match: (LEAL4 [c] {s} x (ADDLconst [d] y))
@@ -4088,8 +3913,7 @@ func rewriteValue386_Op386LEAL4(v *Value) bool {
v.reset(Op386LEAL4)
v.AuxInt = c + 4*d
v.Aux = s
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
// match: (LEAL4 [c] {s} x (SHLLconst [1] y))
@@ -4105,8 +3929,7 @@ func rewriteValue386_Op386LEAL4(v *Value) bool {
v.reset(Op386LEAL8)
v.AuxInt = c
v.Aux = s
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
// match: (LEAL4 [off1] {sym1} (LEAL [off2] {sym2} x) y)
@@ -4128,8 +3951,7 @@ func rewriteValue386_Op386LEAL4(v *Value) bool {
v.reset(Op386LEAL4)
v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
return false
@@ -4155,8 +3977,7 @@ func rewriteValue386_Op386LEAL8(v *Value) bool {
v.reset(Op386LEAL8)
v.AuxInt = c + d
v.Aux = s
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
// match: (LEAL8 [c] {s} x (ADDLconst [d] y))
@@ -4177,8 +3998,7 @@ func rewriteValue386_Op386LEAL8(v *Value) bool {
v.reset(Op386LEAL8)
v.AuxInt = c + 8*d
v.Aux = s
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
// match: (LEAL8 [off1] {sym1} (LEAL [off2] {sym2} x) y)
@@ -4200,8 +4020,7 @@ func rewriteValue386_Op386LEAL8(v *Value) bool {
v.reset(Op386LEAL8)
v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
return false
@@ -4226,12 +4045,10 @@ func rewriteValue386_Op386MOVBLSX(v *Value) bool {
}
b = x.Block
v0 := b.NewValue0(x.Pos, Op386MOVBLSXload, v.Type)
- v.reset(OpCopy)
- v.AddArg(v0)
+ v.copyOf(v0)
v0.AuxInt = off
v0.Aux = sym
- v0.AddArg(ptr)
- v0.AddArg(mem)
+ v0.AddArg2(ptr, mem)
return true
}
// match: (MOVBLSX (ANDLconst [c] x))
@@ -4270,9 +4087,8 @@ func rewriteValue386_Op386MOVBLSXload(v *Value) bool {
}
off2 := v_1.AuxInt
sym2 := v_1.Aux
- _ = v_1.Args[2]
- ptr2 := v_1.Args[0]
x := v_1.Args[1]
+ ptr2 := v_1.Args[0]
if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
break
}
@@ -4299,8 +4115,7 @@ func rewriteValue386_Op386MOVBLSXload(v *Value) bool {
v.reset(Op386MOVBLSXload)
v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
- v.AddArg(base)
- v.AddArg(mem)
+ v.AddArg2(base, mem)
return true
}
return false
@@ -4325,12 +4140,10 @@ func rewriteValue386_Op386MOVBLZX(v *Value) bool {
}
b = x.Block
v0 := b.NewValue0(x.Pos, Op386MOVBload, v.Type)
- v.reset(OpCopy)
- v.AddArg(v0)
+ v.copyOf(v0)
v0.AuxInt = off
v0.Aux = sym
- v0.AddArg(ptr)
- v0.AddArg(mem)
+ v0.AddArg2(ptr, mem)
return true
}
// match: (MOVBLZX x:(MOVBloadidx1 [off] {sym} ptr idx mem))
@@ -4351,13 +4164,10 @@ func rewriteValue386_Op386MOVBLZX(v *Value) bool {
}
b = x.Block
v0 := b.NewValue0(v.Pos, Op386MOVBloadidx1, v.Type)
- v.reset(OpCopy)
- v.AddArg(v0)
+ v.copyOf(v0)
v0.AuxInt = off
v0.Aux = sym
- v0.AddArg(ptr)
- v0.AddArg(idx)
- v0.AddArg(mem)
+ v0.AddArg3(ptr, idx, mem)
return true
}
// match: (MOVBLZX (ANDLconst [c] x))
@@ -4392,9 +4202,8 @@ func rewriteValue386_Op386MOVBload(v *Value) bool {
}
off2 := v_1.AuxInt
sym2 := v_1.Aux
- _ = v_1.Args[2]
- ptr2 := v_1.Args[0]
x := v_1.Args[1]
+ ptr2 := v_1.Args[0]
if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
break
}
@@ -4420,8 +4229,7 @@ func rewriteValue386_Op386MOVBload(v *Value) bool {
v.reset(Op386MOVBload)
v.AuxInt = off1 + off2
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(mem)
+ v.AddArg2(ptr, mem)
return true
}
// match: (MOVBload [off1] {sym1} (LEAL [off2] {sym2} base) mem)
@@ -4443,8 +4251,7 @@ func rewriteValue386_Op386MOVBload(v *Value) bool {
v.reset(Op386MOVBload)
v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
- v.AddArg(base)
- v.AddArg(mem)
+ v.AddArg2(base, mem)
return true
}
// match: (MOVBload [off1] {sym1} (LEAL1 [off2] {sym2} ptr idx) mem)
@@ -4467,9 +4274,7 @@ func rewriteValue386_Op386MOVBload(v *Value) bool {
v.reset(Op386MOVBloadidx1)
v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
- v.AddArg(ptr)
- v.AddArg(idx)
- v.AddArg(mem)
+ v.AddArg3(ptr, idx, mem)
return true
}
// match: (MOVBload [off] {sym} (ADDL ptr idx) mem)
@@ -4494,9 +4299,7 @@ func rewriteValue386_Op386MOVBload(v *Value) bool {
v.reset(Op386MOVBloadidx1)
v.AuxInt = off
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(idx)
- v.AddArg(mem)
+ v.AddArg3(ptr, idx, mem)
return true
}
break
@@ -4536,9 +4339,7 @@ func rewriteValue386_Op386MOVBloadidx1(v *Value) bool {
v.reset(Op386MOVBloadidx1)
v.AuxInt = int64(int32(c + d))
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(idx)
- v.AddArg(mem)
+ v.AddArg3(ptr, idx, mem)
return true
}
break
@@ -4559,9 +4360,7 @@ func rewriteValue386_Op386MOVBloadidx1(v *Value) bool {
v.reset(Op386MOVBloadidx1)
v.AuxInt = int64(int32(c + d))
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(idx)
- v.AddArg(mem)
+ v.AddArg3(ptr, idx, mem)
return true
}
break
@@ -4588,9 +4387,7 @@ func rewriteValue386_Op386MOVBstore(v *Value) bool {
v.reset(Op386MOVBstore)
v.AuxInt = off
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(x)
- v.AddArg(mem)
+ v.AddArg3(ptr, x, mem)
return true
}
// match: (MOVBstore [off] {sym} ptr (MOVBLZX x) mem)
@@ -4607,9 +4404,7 @@ func rewriteValue386_Op386MOVBstore(v *Value) bool {
v.reset(Op386MOVBstore)
v.AuxInt = off
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(x)
- v.AddArg(mem)
+ v.AddArg3(ptr, x, mem)
return true
}
// match: (MOVBstore [off1] {sym} (ADDLconst [off2] ptr) val mem)
@@ -4631,9 +4426,7 @@ func rewriteValue386_Op386MOVBstore(v *Value) bool {
v.reset(Op386MOVBstore)
v.AuxInt = off1 + off2
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(val)
- v.AddArg(mem)
+ v.AddArg3(ptr, val, mem)
return true
}
// match: (MOVBstore [off] {sym} ptr (MOVLconst [c]) mem)
@@ -4654,8 +4447,7 @@ func rewriteValue386_Op386MOVBstore(v *Value) bool {
v.reset(Op386MOVBstoreconst)
v.AuxInt = makeValAndOff(int64(int8(c)), off)
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(mem)
+ v.AddArg2(ptr, mem)
return true
}
// match: (MOVBstore [off1] {sym1} (LEAL [off2] {sym2} base) val mem)
@@ -4678,9 +4470,7 @@ func rewriteValue386_Op386MOVBstore(v *Value) bool {
v.reset(Op386MOVBstore)
v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
- v.AddArg(base)
- v.AddArg(val)
- v.AddArg(mem)
+ v.AddArg3(base, val, mem)
return true
}
// match: (MOVBstore [off1] {sym1} (LEAL1 [off2] {sym2} ptr idx) val mem)
@@ -4704,10 +4494,7 @@ func rewriteValue386_Op386MOVBstore(v *Value) bool {
v.reset(Op386MOVBstoreidx1)
v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
- v.AddArg(ptr)
- v.AddArg(idx)
- v.AddArg(val)
- v.AddArg(mem)
+ v.AddArg4(ptr, idx, val, mem)
return true
}
// match: (MOVBstore [off] {sym} (ADDL ptr idx) val mem)
@@ -4733,10 +4520,7 @@ func rewriteValue386_Op386MOVBstore(v *Value) bool {
v.reset(Op386MOVBstoreidx1)
v.AuxInt = off
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(idx)
- v.AddArg(val)
- v.AddArg(mem)
+ v.AddArg4(ptr, idx, val, mem)
return true
}
break
@@ -4763,9 +4547,7 @@ func rewriteValue386_Op386MOVBstore(v *Value) bool {
v.reset(Op386MOVWstore)
v.AuxInt = i - 1
v.Aux = s
- v.AddArg(p)
- v.AddArg(w)
- v.AddArg(mem)
+ v.AddArg3(p, w, mem)
return true
}
// match: (MOVBstore [i] {s} p (SHRLconst [8] w) x:(MOVBstore [i-1] {s} p w mem))
@@ -4790,9 +4572,7 @@ func rewriteValue386_Op386MOVBstore(v *Value) bool {
v.reset(Op386MOVWstore)
v.AuxInt = i - 1
v.Aux = s
- v.AddArg(p)
- v.AddArg(w)
- v.AddArg(mem)
+ v.AddArg3(p, w, mem)
return true
}
// match: (MOVBstore [i] {s} p w x:(MOVBstore {s} [i+1] p (SHRWconst [8] w) mem))
@@ -4818,9 +4598,7 @@ func rewriteValue386_Op386MOVBstore(v *Value) bool {
v.reset(Op386MOVWstore)
v.AuxInt = i
v.Aux = s
- v.AddArg(p)
- v.AddArg(w)
- v.AddArg(mem)
+ v.AddArg3(p, w, mem)
return true
}
// match: (MOVBstore [i] {s} p w x:(MOVBstore {s} [i+1] p (SHRLconst [8] w) mem))
@@ -4846,9 +4624,7 @@ func rewriteValue386_Op386MOVBstore(v *Value) bool {
v.reset(Op386MOVWstore)
v.AuxInt = i
v.Aux = s
- v.AddArg(p)
- v.AddArg(w)
- v.AddArg(mem)
+ v.AddArg3(p, w, mem)
return true
}
// match: (MOVBstore [i] {s} p (SHRLconst [j] w) x:(MOVBstore [i-1] {s} p w0:(SHRLconst [j-8] w) mem))
@@ -4878,9 +4654,7 @@ func rewriteValue386_Op386MOVBstore(v *Value) bool {
v.reset(Op386MOVWstore)
v.AuxInt = i - 1
v.Aux = s
- v.AddArg(p)
- v.AddArg(w0)
- v.AddArg(mem)
+ v.AddArg3(p, w0, mem)
return true
}
return false
@@ -4908,8 +4682,7 @@ func rewriteValue386_Op386MOVBstoreconst(v *Value) bool {
v.reset(Op386MOVBstoreconst)
v.AuxInt = ValAndOff(sc).add(off)
v.Aux = s
- v.AddArg(ptr)
- v.AddArg(mem)
+ v.AddArg2(ptr, mem)
return true
}
// match: (MOVBstoreconst [sc] {sym1} (LEAL [off] {sym2} ptr) mem)
@@ -4931,8 +4704,7 @@ func rewriteValue386_Op386MOVBstoreconst(v *Value) bool {
v.reset(Op386MOVBstoreconst)
v.AuxInt = ValAndOff(sc).add(off)
v.Aux = mergeSym(sym1, sym2)
- v.AddArg(ptr)
- v.AddArg(mem)
+ v.AddArg2(ptr, mem)
return true
}
// match: (MOVBstoreconst [x] {sym1} (LEAL1 [off] {sym2} ptr idx) mem)
@@ -4955,9 +4727,7 @@ func rewriteValue386_Op386MOVBstoreconst(v *Value) bool {
v.reset(Op386MOVBstoreconstidx1)
v.AuxInt = ValAndOff(x).add(off)
v.Aux = mergeSym(sym1, sym2)
- v.AddArg(ptr)
- v.AddArg(idx)
- v.AddArg(mem)
+ v.AddArg3(ptr, idx, mem)
return true
}
// match: (MOVBstoreconst [x] {sym} (ADDL ptr idx) mem)
@@ -4974,9 +4744,7 @@ func rewriteValue386_Op386MOVBstoreconst(v *Value) bool {
v.reset(Op386MOVBstoreconstidx1)
v.AuxInt = x
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(idx)
- v.AddArg(mem)
+ v.AddArg3(ptr, idx, mem)
return true
}
// match: (MOVBstoreconst [c] {s} p x:(MOVBstoreconst [a] {s} p mem))
@@ -5001,8 +4769,7 @@ func rewriteValue386_Op386MOVBstoreconst(v *Value) bool {
v.reset(Op386MOVWstoreconst)
v.AuxInt = makeValAndOff(ValAndOff(a).Val()&0xff|ValAndOff(c).Val()<<8, ValAndOff(a).Off())
v.Aux = s
- v.AddArg(p)
- v.AddArg(mem)
+ v.AddArg2(p, mem)
return true
}
// match: (MOVBstoreconst [a] {s} p x:(MOVBstoreconst [c] {s} p mem))
@@ -5027,8 +4794,7 @@ func rewriteValue386_Op386MOVBstoreconst(v *Value) bool {
v.reset(Op386MOVWstoreconst)
v.AuxInt = makeValAndOff(ValAndOff(a).Val()&0xff|ValAndOff(c).Val()<<8, ValAndOff(a).Off())
v.Aux = s
- v.AddArg(p)
- v.AddArg(mem)
+ v.AddArg2(p, mem)
return true
}
return false
@@ -5052,9 +4818,7 @@ func rewriteValue386_Op386MOVBstoreconstidx1(v *Value) bool {
v.reset(Op386MOVBstoreconstidx1)
v.AuxInt = ValAndOff(x).add(c)
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(idx)
- v.AddArg(mem)
+ v.AddArg3(ptr, idx, mem)
return true
}
// match: (MOVBstoreconstidx1 [x] {sym} ptr (ADDLconst [c] idx) mem)
@@ -5072,9 +4836,7 @@ func rewriteValue386_Op386MOVBstoreconstidx1(v *Value) bool {
v.reset(Op386MOVBstoreconstidx1)
v.AuxInt = ValAndOff(x).add(c)
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(idx)
- v.AddArg(mem)
+ v.AddArg3(ptr, idx, mem)
return true
}
// match: (MOVBstoreconstidx1 [c] {s} p i x:(MOVBstoreconstidx1 [a] {s} p i mem))
@@ -5100,9 +4862,7 @@ func rewriteValue386_Op386MOVBstoreconstidx1(v *Value) bool {
v.reset(Op386MOVWstoreconstidx1)
v.AuxInt = makeValAndOff(ValAndOff(a).Val()&0xff|ValAndOff(c).Val()<<8, ValAndOff(a).Off())
v.Aux = s
- v.AddArg(p)
- v.AddArg(i)
- v.AddArg(mem)
+ v.AddArg3(p, i, mem)
return true
}
return false
@@ -5129,10 +4889,7 @@ func rewriteValue386_Op386MOVBstoreidx1(v *Value) bool {
v.reset(Op386MOVBstoreidx1)
v.AuxInt = int64(int32(c + d))
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(idx)
- v.AddArg(val)
- v.AddArg(mem)
+ v.AddArg4(ptr, idx, val, mem)
return true
}
break
@@ -5154,10 +4911,7 @@ func rewriteValue386_Op386MOVBstoreidx1(v *Value) bool {
v.reset(Op386MOVBstoreidx1)
v.AuxInt = int64(int32(c + d))
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(idx)
- v.AddArg(val)
- v.AddArg(mem)
+ v.AddArg4(ptr, idx, val, mem)
return true
}
break
@@ -5189,10 +4943,7 @@ func rewriteValue386_Op386MOVBstoreidx1(v *Value) bool {
v.reset(Op386MOVWstoreidx1)
v.AuxInt = i - 1
v.Aux = s
- v.AddArg(p)
- v.AddArg(idx)
- v.AddArg(w)
- v.AddArg(mem)
+ v.AddArg4(p, idx, w, mem)
return true
}
}
@@ -5225,10 +4976,7 @@ func rewriteValue386_Op386MOVBstoreidx1(v *Value) bool {
v.reset(Op386MOVWstoreidx1)
v.AuxInt = i - 1
v.Aux = s
- v.AddArg(p)
- v.AddArg(idx)
- v.AddArg(w)
- v.AddArg(mem)
+ v.AddArg4(p, idx, w, mem)
return true
}
}
@@ -5262,10 +5010,7 @@ func rewriteValue386_Op386MOVBstoreidx1(v *Value) bool {
v.reset(Op386MOVWstoreidx1)
v.AuxInt = i
v.Aux = s
- v.AddArg(p)
- v.AddArg(idx)
- v.AddArg(w)
- v.AddArg(mem)
+ v.AddArg4(p, idx, w, mem)
return true
}
}
@@ -5299,10 +5044,7 @@ func rewriteValue386_Op386MOVBstoreidx1(v *Value) bool {
v.reset(Op386MOVWstoreidx1)
v.AuxInt = i
v.Aux = s
- v.AddArg(p)
- v.AddArg(idx)
- v.AddArg(w)
- v.AddArg(mem)
+ v.AddArg4(p, idx, w, mem)
return true
}
}
@@ -5340,10 +5082,7 @@ func rewriteValue386_Op386MOVBstoreidx1(v *Value) bool {
v.reset(Op386MOVWstoreidx1)
v.AuxInt = i - 1
v.Aux = s
- v.AddArg(p)
- v.AddArg(idx)
- v.AddArg(w0)
- v.AddArg(mem)
+ v.AddArg4(p, idx, w0, mem)
return true
}
}
@@ -5368,15 +5107,12 @@ func rewriteValue386_Op386MOVLload(v *Value) bool {
}
off2 := v_1.AuxInt
sym2 := v_1.Aux
- _ = v_1.Args[2]
- ptr2 := v_1.Args[0]
x := v_1.Args[1]
+ ptr2 := v_1.Args[0]
if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
break
}
- v.reset(OpCopy)
- v.Type = x.Type
- v.AddArg(x)
+ v.copyOf(x)
return true
}
// match: (MOVLload [off1] {sym} (ADDLconst [off2] ptr) mem)
@@ -5397,8 +5133,7 @@ func rewriteValue386_Op386MOVLload(v *Value) bool {
v.reset(Op386MOVLload)
v.AuxInt = off1 + off2
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(mem)
+ v.AddArg2(ptr, mem)
return true
}
// match: (MOVLload [off1] {sym1} (LEAL [off2] {sym2} base) mem)
@@ -5420,8 +5155,7 @@ func rewriteValue386_Op386MOVLload(v *Value) bool {
v.reset(Op386MOVLload)
v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
- v.AddArg(base)
- v.AddArg(mem)
+ v.AddArg2(base, mem)
return true
}
// match: (MOVLload [off1] {sym1} (LEAL1 [off2] {sym2} ptr idx) mem)
@@ -5444,9 +5178,7 @@ func rewriteValue386_Op386MOVLload(v *Value) bool {
v.reset(Op386MOVLloadidx1)
v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
- v.AddArg(ptr)
- v.AddArg(idx)
- v.AddArg(mem)
+ v.AddArg3(ptr, idx, mem)
return true
}
// match: (MOVLload [off1] {sym1} (LEAL4 [off2] {sym2} ptr idx) mem)
@@ -5469,9 +5201,7 @@ func rewriteValue386_Op386MOVLload(v *Value) bool {
v.reset(Op386MOVLloadidx4)
v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
- v.AddArg(ptr)
- v.AddArg(idx)
- v.AddArg(mem)
+ v.AddArg3(ptr, idx, mem)
return true
}
// match: (MOVLload [off] {sym} (ADDL ptr idx) mem)
@@ -5496,9 +5226,7 @@ func rewriteValue386_Op386MOVLload(v *Value) bool {
v.reset(Op386MOVLloadidx1)
v.AuxInt = off
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(idx)
- v.AddArg(mem)
+ v.AddArg3(ptr, idx, mem)
return true
}
break
@@ -5537,9 +5265,7 @@ func rewriteValue386_Op386MOVLloadidx1(v *Value) bool {
v.reset(Op386MOVLloadidx4)
v.AuxInt = c
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(idx)
- v.AddArg(mem)
+ v.AddArg3(ptr, idx, mem)
return true
}
break
@@ -5560,9 +5286,7 @@ func rewriteValue386_Op386MOVLloadidx1(v *Value) bool {
v.reset(Op386MOVLloadidx1)
v.AuxInt = int64(int32(c + d))
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(idx)
- v.AddArg(mem)
+ v.AddArg3(ptr, idx, mem)
return true
}
break
@@ -5583,9 +5307,7 @@ func rewriteValue386_Op386MOVLloadidx1(v *Value) bool {
v.reset(Op386MOVLloadidx1)
v.AuxInt = int64(int32(c + d))
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(idx)
- v.AddArg(mem)
+ v.AddArg3(ptr, idx, mem)
return true
}
break
@@ -5611,9 +5333,7 @@ func rewriteValue386_Op386MOVLloadidx4(v *Value) bool {
v.reset(Op386MOVLloadidx4)
v.AuxInt = int64(int32(c + d))
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(idx)
- v.AddArg(mem)
+ v.AddArg3(ptr, idx, mem)
return true
}
// match: (MOVLloadidx4 [c] {sym} ptr (ADDLconst [d] idx) mem)
@@ -5631,9 +5351,7 @@ func rewriteValue386_Op386MOVLloadidx4(v *Value) bool {
v.reset(Op386MOVLloadidx4)
v.AuxInt = int64(int32(c + 4*d))
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(idx)
- v.AddArg(mem)
+ v.AddArg3(ptr, idx, mem)
return true
}
return false
@@ -5663,9 +5381,7 @@ func rewriteValue386_Op386MOVLstore(v *Value) bool {
v.reset(Op386MOVLstore)
v.AuxInt = off1 + off2
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(val)
- v.AddArg(mem)
+ v.AddArg3(ptr, val, mem)
return true
}
// match: (MOVLstore [off] {sym} ptr (MOVLconst [c]) mem)
@@ -5686,8 +5402,7 @@ func rewriteValue386_Op386MOVLstore(v *Value) bool {
v.reset(Op386MOVLstoreconst)
v.AuxInt = makeValAndOff(int64(int32(c)), off)
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(mem)
+ v.AddArg2(ptr, mem)
return true
}
// match: (MOVLstore [off1] {sym1} (LEAL [off2] {sym2} base) val mem)
@@ -5710,9 +5425,7 @@ func rewriteValue386_Op386MOVLstore(v *Value) bool {
v.reset(Op386MOVLstore)
v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
- v.AddArg(base)
- v.AddArg(val)
- v.AddArg(mem)
+ v.AddArg3(base, val, mem)
return true
}
// match: (MOVLstore [off1] {sym1} (LEAL1 [off2] {sym2} ptr idx) val mem)
@@ -5736,10 +5449,7 @@ func rewriteValue386_Op386MOVLstore(v *Value) bool {
v.reset(Op386MOVLstoreidx1)
v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
- v.AddArg(ptr)
- v.AddArg(idx)
- v.AddArg(val)
- v.AddArg(mem)
+ v.AddArg4(ptr, idx, val, mem)
return true
}
// match: (MOVLstore [off1] {sym1} (LEAL4 [off2] {sym2} ptr idx) val mem)
@@ -5763,10 +5473,7 @@ func rewriteValue386_Op386MOVLstore(v *Value) bool {
v.reset(Op386MOVLstoreidx4)
v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
- v.AddArg(ptr)
- v.AddArg(idx)
- v.AddArg(val)
- v.AddArg(mem)
+ v.AddArg4(ptr, idx, val, mem)
return true
}
// match: (MOVLstore [off] {sym} (ADDL ptr idx) val mem)
@@ -5792,10 +5499,7 @@ func rewriteValue386_Op386MOVLstore(v *Value) bool {
v.reset(Op386MOVLstoreidx1)
v.AuxInt = off
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(idx)
- v.AddArg(val)
- v.AddArg(mem)
+ v.AddArg4(ptr, idx, val, mem)
return true
}
break
@@ -5819,9 +5523,7 @@ func rewriteValue386_Op386MOVLstore(v *Value) bool {
v.reset(Op386ADDLmodify)
v.AuxInt = off
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(x)
- v.AddArg(mem)
+ v.AddArg3(ptr, x, mem)
return true
}
// match: (MOVLstore {sym} [off] ptr y:(ANDLload x [off] {sym} ptr mem) mem)
@@ -5843,9 +5545,7 @@ func rewriteValue386_Op386MOVLstore(v *Value) bool {
v.reset(Op386ANDLmodify)
v.AuxInt = off
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(x)
- v.AddArg(mem)
+ v.AddArg3(ptr, x, mem)
return true
}
// match: (MOVLstore {sym} [off] ptr y:(ORLload x [off] {sym} ptr mem) mem)
@@ -5867,9 +5567,7 @@ func rewriteValue386_Op386MOVLstore(v *Value) bool {
v.reset(Op386ORLmodify)
v.AuxInt = off
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(x)
- v.AddArg(mem)
+ v.AddArg3(ptr, x, mem)
return true
}
// match: (MOVLstore {sym} [off] ptr y:(XORLload x [off] {sym} ptr mem) mem)
@@ -5891,9 +5589,7 @@ func rewriteValue386_Op386MOVLstore(v *Value) bool {
v.reset(Op386XORLmodify)
v.AuxInt = off
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(x)
- v.AddArg(mem)
+ v.AddArg3(ptr, x, mem)
return true
}
// match: (MOVLstore {sym} [off] ptr y:(ADDL l:(MOVLload [off] {sym} ptr mem) x) mem)
@@ -5926,9 +5622,7 @@ func rewriteValue386_Op386MOVLstore(v *Value) bool {
v.reset(Op386ADDLmodify)
v.AuxInt = off
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(x)
- v.AddArg(mem)
+ v.AddArg3(ptr, x, mem)
return true
}
break
@@ -5956,9 +5650,7 @@ func rewriteValue386_Op386MOVLstore(v *Value) bool {
v.reset(Op386SUBLmodify)
v.AuxInt = off
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(x)
- v.AddArg(mem)
+ v.AddArg3(ptr, x, mem)
return true
}
// match: (MOVLstore {sym} [off] ptr y:(ANDL l:(MOVLload [off] {sym} ptr mem) x) mem)
@@ -5991,9 +5683,7 @@ func rewriteValue386_Op386MOVLstore(v *Value) bool {
v.reset(Op386ANDLmodify)
v.AuxInt = off
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(x)
- v.AddArg(mem)
+ v.AddArg3(ptr, x, mem)
return true
}
break
@@ -6028,9 +5718,7 @@ func rewriteValue386_Op386MOVLstore(v *Value) bool {
v.reset(Op386ORLmodify)
v.AuxInt = off
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(x)
- v.AddArg(mem)
+ v.AddArg3(ptr, x, mem)
return true
}
break
@@ -6065,9 +5753,7 @@ func rewriteValue386_Op386MOVLstore(v *Value) bool {
v.reset(Op386XORLmodify)
v.AuxInt = off
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(x)
- v.AddArg(mem)
+ v.AddArg3(ptr, x, mem)
return true
}
break
@@ -6095,8 +5781,7 @@ func rewriteValue386_Op386MOVLstore(v *Value) bool {
v.reset(Op386ADDLconstmodify)
v.AuxInt = makeValAndOff(c, off)
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(mem)
+ v.AddArg2(ptr, mem)
return true
}
// match: (MOVLstore {sym} [off] ptr y:(ANDLconst [c] l:(MOVLload [off] {sym} ptr mem)) mem)
@@ -6122,8 +5807,7 @@ func rewriteValue386_Op386MOVLstore(v *Value) bool {
v.reset(Op386ANDLconstmodify)
v.AuxInt = makeValAndOff(c, off)
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(mem)
+ v.AddArg2(ptr, mem)
return true
}
// match: (MOVLstore {sym} [off] ptr y:(ORLconst [c] l:(MOVLload [off] {sym} ptr mem)) mem)
@@ -6149,8 +5833,7 @@ func rewriteValue386_Op386MOVLstore(v *Value) bool {
v.reset(Op386ORLconstmodify)
v.AuxInt = makeValAndOff(c, off)
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(mem)
+ v.AddArg2(ptr, mem)
return true
}
// match: (MOVLstore {sym} [off] ptr y:(XORLconst [c] l:(MOVLload [off] {sym} ptr mem)) mem)
@@ -6176,8 +5859,7 @@ func rewriteValue386_Op386MOVLstore(v *Value) bool {
v.reset(Op386XORLconstmodify)
v.AuxInt = makeValAndOff(c, off)
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(mem)
+ v.AddArg2(ptr, mem)
return true
}
return false
@@ -6205,8 +5887,7 @@ func rewriteValue386_Op386MOVLstoreconst(v *Value) bool {
v.reset(Op386MOVLstoreconst)
v.AuxInt = ValAndOff(sc).add(off)
v.Aux = s
- v.AddArg(ptr)
- v.AddArg(mem)
+ v.AddArg2(ptr, mem)
return true
}
// match: (MOVLstoreconst [sc] {sym1} (LEAL [off] {sym2} ptr) mem)
@@ -6228,8 +5909,7 @@ func rewriteValue386_Op386MOVLstoreconst(v *Value) bool {
v.reset(Op386MOVLstoreconst)
v.AuxInt = ValAndOff(sc).add(off)
v.Aux = mergeSym(sym1, sym2)
- v.AddArg(ptr)
- v.AddArg(mem)
+ v.AddArg2(ptr, mem)
return true
}
// match: (MOVLstoreconst [x] {sym1} (LEAL1 [off] {sym2} ptr idx) mem)
@@ -6252,9 +5932,7 @@ func rewriteValue386_Op386MOVLstoreconst(v *Value) bool {
v.reset(Op386MOVLstoreconstidx1)
v.AuxInt = ValAndOff(x).add(off)
v.Aux = mergeSym(sym1, sym2)
- v.AddArg(ptr)
- v.AddArg(idx)
- v.AddArg(mem)
+ v.AddArg3(ptr, idx, mem)
return true
}
// match: (MOVLstoreconst [x] {sym1} (LEAL4 [off] {sym2} ptr idx) mem)
@@ -6277,9 +5955,7 @@ func rewriteValue386_Op386MOVLstoreconst(v *Value) bool {
v.reset(Op386MOVLstoreconstidx4)
v.AuxInt = ValAndOff(x).add(off)
v.Aux = mergeSym(sym1, sym2)
- v.AddArg(ptr)
- v.AddArg(idx)
- v.AddArg(mem)
+ v.AddArg3(ptr, idx, mem)
return true
}
// match: (MOVLstoreconst [x] {sym} (ADDL ptr idx) mem)
@@ -6296,9 +5972,7 @@ func rewriteValue386_Op386MOVLstoreconst(v *Value) bool {
v.reset(Op386MOVLstoreconstidx1)
v.AuxInt = x
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(idx)
- v.AddArg(mem)
+ v.AddArg3(ptr, idx, mem)
return true
}
return false
@@ -6321,9 +5995,7 @@ func rewriteValue386_Op386MOVLstoreconstidx1(v *Value) bool {
v.reset(Op386MOVLstoreconstidx4)
v.AuxInt = c
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(idx)
- v.AddArg(mem)
+ v.AddArg3(ptr, idx, mem)
return true
}
// match: (MOVLstoreconstidx1 [x] {sym} (ADDLconst [c] ptr) idx mem)
@@ -6341,9 +6013,7 @@ func rewriteValue386_Op386MOVLstoreconstidx1(v *Value) bool {
v.reset(Op386MOVLstoreconstidx1)
v.AuxInt = ValAndOff(x).add(c)
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(idx)
- v.AddArg(mem)
+ v.AddArg3(ptr, idx, mem)
return true
}
// match: (MOVLstoreconstidx1 [x] {sym} ptr (ADDLconst [c] idx) mem)
@@ -6361,9 +6031,7 @@ func rewriteValue386_Op386MOVLstoreconstidx1(v *Value) bool {
v.reset(Op386MOVLstoreconstidx1)
v.AuxInt = ValAndOff(x).add(c)
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(idx)
- v.AddArg(mem)
+ v.AddArg3(ptr, idx, mem)
return true
}
return false
@@ -6387,9 +6055,7 @@ func rewriteValue386_Op386MOVLstoreconstidx4(v *Value) bool {
v.reset(Op386MOVLstoreconstidx4)
v.AuxInt = ValAndOff(x).add(c)
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(idx)
- v.AddArg(mem)
+ v.AddArg3(ptr, idx, mem)
return true
}
// match: (MOVLstoreconstidx4 [x] {sym} ptr (ADDLconst [c] idx) mem)
@@ -6407,9 +6073,7 @@ func rewriteValue386_Op386MOVLstoreconstidx4(v *Value) bool {
v.reset(Op386MOVLstoreconstidx4)
v.AuxInt = ValAndOff(x).add(4 * c)
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(idx)
- v.AddArg(mem)
+ v.AddArg3(ptr, idx, mem)
return true
}
return false
@@ -6435,10 +6099,7 @@ func rewriteValue386_Op386MOVLstoreidx1(v *Value) bool {
v.reset(Op386MOVLstoreidx4)
v.AuxInt = c
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(idx)
- v.AddArg(val)
- v.AddArg(mem)
+ v.AddArg4(ptr, idx, val, mem)
return true
}
break
@@ -6460,10 +6121,7 @@ func rewriteValue386_Op386MOVLstoreidx1(v *Value) bool {
v.reset(Op386MOVLstoreidx1)
v.AuxInt = int64(int32(c + d))
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(idx)
- v.AddArg(val)
- v.AddArg(mem)
+ v.AddArg4(ptr, idx, val, mem)
return true
}
break
@@ -6485,10 +6143,7 @@ func rewriteValue386_Op386MOVLstoreidx1(v *Value) bool {
v.reset(Op386MOVLstoreidx1)
v.AuxInt = int64(int32(c + d))
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(idx)
- v.AddArg(val)
- v.AddArg(mem)
+ v.AddArg4(ptr, idx, val, mem)
return true
}
break
@@ -6516,10 +6171,7 @@ func rewriteValue386_Op386MOVLstoreidx4(v *Value) bool {
v.reset(Op386MOVLstoreidx4)
v.AuxInt = int64(int32(c + d))
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(idx)
- v.AddArg(val)
- v.AddArg(mem)
+ v.AddArg4(ptr, idx, val, mem)
return true
}
// match: (MOVLstoreidx4 [c] {sym} ptr (ADDLconst [d] idx) val mem)
@@ -6538,10 +6190,7 @@ func rewriteValue386_Op386MOVLstoreidx4(v *Value) bool {
v.reset(Op386MOVLstoreidx4)
v.AuxInt = int64(int32(c + 4*d))
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(idx)
- v.AddArg(val)
- v.AddArg(mem)
+ v.AddArg4(ptr, idx, val, mem)
return true
}
// match: (MOVLstoreidx4 {sym} [off] ptr idx y:(ADDLloadidx4 x [off] {sym} ptr idx mem) mem)
@@ -6564,10 +6213,7 @@ func rewriteValue386_Op386MOVLstoreidx4(v *Value) bool {
v.reset(Op386ADDLmodifyidx4)
v.AuxInt = off
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(idx)
- v.AddArg(x)
- v.AddArg(mem)
+ v.AddArg4(ptr, idx, x, mem)
return true
}
// match: (MOVLstoreidx4 {sym} [off] ptr idx y:(ANDLloadidx4 x [off] {sym} ptr idx mem) mem)
@@ -6590,10 +6236,7 @@ func rewriteValue386_Op386MOVLstoreidx4(v *Value) bool {
v.reset(Op386ANDLmodifyidx4)
v.AuxInt = off
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(idx)
- v.AddArg(x)
- v.AddArg(mem)
+ v.AddArg4(ptr, idx, x, mem)
return true
}
// match: (MOVLstoreidx4 {sym} [off] ptr idx y:(ORLloadidx4 x [off] {sym} ptr idx mem) mem)
@@ -6616,10 +6259,7 @@ func rewriteValue386_Op386MOVLstoreidx4(v *Value) bool {
v.reset(Op386ORLmodifyidx4)
v.AuxInt = off
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(idx)
- v.AddArg(x)
- v.AddArg(mem)
+ v.AddArg4(ptr, idx, x, mem)
return true
}
// match: (MOVLstoreidx4 {sym} [off] ptr idx y:(XORLloadidx4 x [off] {sym} ptr idx mem) mem)
@@ -6642,10 +6282,7 @@ func rewriteValue386_Op386MOVLstoreidx4(v *Value) bool {
v.reset(Op386XORLmodifyidx4)
v.AuxInt = off
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(idx)
- v.AddArg(x)
- v.AddArg(mem)
+ v.AddArg4(ptr, idx, x, mem)
return true
}
// match: (MOVLstoreidx4 {sym} [off] ptr idx y:(ADDL l:(MOVLloadidx4 [off] {sym} ptr idx mem) x) mem)
@@ -6679,10 +6316,7 @@ func rewriteValue386_Op386MOVLstoreidx4(v *Value) bool {
v.reset(Op386ADDLmodifyidx4)
v.AuxInt = off
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(idx)
- v.AddArg(x)
- v.AddArg(mem)
+ v.AddArg4(ptr, idx, x, mem)
return true
}
break
@@ -6711,10 +6345,7 @@ func rewriteValue386_Op386MOVLstoreidx4(v *Value) bool {
v.reset(Op386SUBLmodifyidx4)
v.AuxInt = off
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(idx)
- v.AddArg(x)
- v.AddArg(mem)
+ v.AddArg4(ptr, idx, x, mem)
return true
}
// match: (MOVLstoreidx4 {sym} [off] ptr idx y:(ANDL l:(MOVLloadidx4 [off] {sym} ptr idx mem) x) mem)
@@ -6748,10 +6379,7 @@ func rewriteValue386_Op386MOVLstoreidx4(v *Value) bool {
v.reset(Op386ANDLmodifyidx4)
v.AuxInt = off
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(idx)
- v.AddArg(x)
- v.AddArg(mem)
+ v.AddArg4(ptr, idx, x, mem)
return true
}
break
@@ -6787,10 +6415,7 @@ func rewriteValue386_Op386MOVLstoreidx4(v *Value) bool {
v.reset(Op386ORLmodifyidx4)
v.AuxInt = off
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(idx)
- v.AddArg(x)
- v.AddArg(mem)
+ v.AddArg4(ptr, idx, x, mem)
return true
}
break
@@ -6826,10 +6451,7 @@ func rewriteValue386_Op386MOVLstoreidx4(v *Value) bool {
v.reset(Op386XORLmodifyidx4)
v.AuxInt = off
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(idx)
- v.AddArg(x)
- v.AddArg(mem)
+ v.AddArg4(ptr, idx, x, mem)
return true
}
break
@@ -6858,9 +6480,7 @@ func rewriteValue386_Op386MOVLstoreidx4(v *Value) bool {
v.reset(Op386ADDLconstmodifyidx4)
v.AuxInt = makeValAndOff(c, off)
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(idx)
- v.AddArg(mem)
+ v.AddArg3(ptr, idx, mem)
return true
}
// match: (MOVLstoreidx4 {sym} [off] ptr idx y:(ANDLconst [c] l:(MOVLloadidx4 [off] {sym} ptr idx mem)) mem)
@@ -6887,9 +6507,7 @@ func rewriteValue386_Op386MOVLstoreidx4(v *Value) bool {
v.reset(Op386ANDLconstmodifyidx4)
v.AuxInt = makeValAndOff(c, off)
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(idx)
- v.AddArg(mem)
+ v.AddArg3(ptr, idx, mem)
return true
}
// match: (MOVLstoreidx4 {sym} [off] ptr idx y:(ORLconst [c] l:(MOVLloadidx4 [off] {sym} ptr idx mem)) mem)
@@ -6916,9 +6534,7 @@ func rewriteValue386_Op386MOVLstoreidx4(v *Value) bool {
v.reset(Op386ORLconstmodifyidx4)
v.AuxInt = makeValAndOff(c, off)
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(idx)
- v.AddArg(mem)
+ v.AddArg3(ptr, idx, mem)
return true
}
// match: (MOVLstoreidx4 {sym} [off] ptr idx y:(XORLconst [c] l:(MOVLloadidx4 [off] {sym} ptr idx mem)) mem)
@@ -6945,9 +6561,7 @@ func rewriteValue386_Op386MOVLstoreidx4(v *Value) bool {
v.reset(Op386XORLconstmodifyidx4)
v.AuxInt = makeValAndOff(c, off)
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(idx)
- v.AddArg(mem)
+ v.AddArg3(ptr, idx, mem)
return true
}
return false
@@ -6995,8 +6609,7 @@ func rewriteValue386_Op386MOVSDload(v *Value) bool {
v.reset(Op386MOVSDload)
v.AuxInt = off1 + off2
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(mem)
+ v.AddArg2(ptr, mem)
return true
}
// match: (MOVSDload [off1] {sym1} (LEAL [off2] {sym2} base) mem)
@@ -7018,8 +6631,7 @@ func rewriteValue386_Op386MOVSDload(v *Value) bool {
v.reset(Op386MOVSDload)
v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
- v.AddArg(base)
- v.AddArg(mem)
+ v.AddArg2(base, mem)
return true
}
// match: (MOVSDload [off1] {sym1} (LEAL1 [off2] {sym2} ptr idx) mem)
@@ -7042,9 +6654,7 @@ func rewriteValue386_Op386MOVSDload(v *Value) bool {
v.reset(Op386MOVSDloadidx1)
v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
- v.AddArg(ptr)
- v.AddArg(idx)
- v.AddArg(mem)
+ v.AddArg3(ptr, idx, mem)
return true
}
// match: (MOVSDload [off1] {sym1} (LEAL8 [off2] {sym2} ptr idx) mem)
@@ -7067,9 +6677,7 @@ func rewriteValue386_Op386MOVSDload(v *Value) bool {
v.reset(Op386MOVSDloadidx8)
v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
- v.AddArg(ptr)
- v.AddArg(idx)
- v.AddArg(mem)
+ v.AddArg3(ptr, idx, mem)
return true
}
// match: (MOVSDload [off] {sym} (ADDL ptr idx) mem)
@@ -7094,9 +6702,7 @@ func rewriteValue386_Op386MOVSDload(v *Value) bool {
v.reset(Op386MOVSDloadidx1)
v.AuxInt = off
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(idx)
- v.AddArg(mem)
+ v.AddArg3(ptr, idx, mem)
return true
}
break
@@ -7122,9 +6728,7 @@ func rewriteValue386_Op386MOVSDloadidx1(v *Value) bool {
v.reset(Op386MOVSDloadidx1)
v.AuxInt = int64(int32(c + d))
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(idx)
- v.AddArg(mem)
+ v.AddArg3(ptr, idx, mem)
return true
}
// match: (MOVSDloadidx1 [c] {sym} ptr (ADDLconst [d] idx) mem)
@@ -7142,9 +6746,7 @@ func rewriteValue386_Op386MOVSDloadidx1(v *Value) bool {
v.reset(Op386MOVSDloadidx1)
v.AuxInt = int64(int32(c + d))
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(idx)
- v.AddArg(mem)
+ v.AddArg3(ptr, idx, mem)
return true
}
return false
@@ -7168,9 +6770,7 @@ func rewriteValue386_Op386MOVSDloadidx8(v *Value) bool {
v.reset(Op386MOVSDloadidx8)
v.AuxInt = int64(int32(c + d))
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(idx)
- v.AddArg(mem)
+ v.AddArg3(ptr, idx, mem)
return true
}
// match: (MOVSDloadidx8 [c] {sym} ptr (ADDLconst [d] idx) mem)
@@ -7188,9 +6788,7 @@ func rewriteValue386_Op386MOVSDloadidx8(v *Value) bool {
v.reset(Op386MOVSDloadidx8)
v.AuxInt = int64(int32(c + 8*d))
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(idx)
- v.AddArg(mem)
+ v.AddArg3(ptr, idx, mem)
return true
}
return false
@@ -7220,9 +6818,7 @@ func rewriteValue386_Op386MOVSDstore(v *Value) bool {
v.reset(Op386MOVSDstore)
v.AuxInt = off1 + off2
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(val)
- v.AddArg(mem)
+ v.AddArg3(ptr, val, mem)
return true
}
// match: (MOVSDstore [off1] {sym1} (LEAL [off2] {sym2} base) val mem)
@@ -7245,9 +6841,7 @@ func rewriteValue386_Op386MOVSDstore(v *Value) bool {
v.reset(Op386MOVSDstore)
v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
- v.AddArg(base)
- v.AddArg(val)
- v.AddArg(mem)
+ v.AddArg3(base, val, mem)
return true
}
// match: (MOVSDstore [off1] {sym1} (LEAL1 [off2] {sym2} ptr idx) val mem)
@@ -7271,10 +6865,7 @@ func rewriteValue386_Op386MOVSDstore(v *Value) bool {
v.reset(Op386MOVSDstoreidx1)
v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
- v.AddArg(ptr)
- v.AddArg(idx)
- v.AddArg(val)
- v.AddArg(mem)
+ v.AddArg4(ptr, idx, val, mem)
return true
}
// match: (MOVSDstore [off1] {sym1} (LEAL8 [off2] {sym2} ptr idx) val mem)
@@ -7298,10 +6889,7 @@ func rewriteValue386_Op386MOVSDstore(v *Value) bool {
v.reset(Op386MOVSDstoreidx8)
v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
- v.AddArg(ptr)
- v.AddArg(idx)
- v.AddArg(val)
- v.AddArg(mem)
+ v.AddArg4(ptr, idx, val, mem)
return true
}
// match: (MOVSDstore [off] {sym} (ADDL ptr idx) val mem)
@@ -7327,10 +6915,7 @@ func rewriteValue386_Op386MOVSDstore(v *Value) bool {
v.reset(Op386MOVSDstoreidx1)
v.AuxInt = off
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(idx)
- v.AddArg(val)
- v.AddArg(mem)
+ v.AddArg4(ptr, idx, val, mem)
return true
}
break
@@ -7358,10 +6943,7 @@ func rewriteValue386_Op386MOVSDstoreidx1(v *Value) bool {
v.reset(Op386MOVSDstoreidx1)
v.AuxInt = int64(int32(c + d))
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(idx)
- v.AddArg(val)
- v.AddArg(mem)
+ v.AddArg4(ptr, idx, val, mem)
return true
}
// match: (MOVSDstoreidx1 [c] {sym} ptr (ADDLconst [d] idx) val mem)
@@ -7380,10 +6962,7 @@ func rewriteValue386_Op386MOVSDstoreidx1(v *Value) bool {
v.reset(Op386MOVSDstoreidx1)
v.AuxInt = int64(int32(c + d))
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(idx)
- v.AddArg(val)
- v.AddArg(mem)
+ v.AddArg4(ptr, idx, val, mem)
return true
}
return false
@@ -7409,10 +6988,7 @@ func rewriteValue386_Op386MOVSDstoreidx8(v *Value) bool {
v.reset(Op386MOVSDstoreidx8)
v.AuxInt = int64(int32(c + d))
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(idx)
- v.AddArg(val)
- v.AddArg(mem)
+ v.AddArg4(ptr, idx, val, mem)
return true
}
// match: (MOVSDstoreidx8 [c] {sym} ptr (ADDLconst [d] idx) val mem)
@@ -7431,10 +7007,7 @@ func rewriteValue386_Op386MOVSDstoreidx8(v *Value) bool {
v.reset(Op386MOVSDstoreidx8)
v.AuxInt = int64(int32(c + 8*d))
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(idx)
- v.AddArg(val)
- v.AddArg(mem)
+ v.AddArg4(ptr, idx, val, mem)
return true
}
return false
@@ -7482,8 +7055,7 @@ func rewriteValue386_Op386MOVSSload(v *Value) bool {
v.reset(Op386MOVSSload)
v.AuxInt = off1 + off2
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(mem)
+ v.AddArg2(ptr, mem)
return true
}
// match: (MOVSSload [off1] {sym1} (LEAL [off2] {sym2} base) mem)
@@ -7505,8 +7077,7 @@ func rewriteValue386_Op386MOVSSload(v *Value) bool {
v.reset(Op386MOVSSload)
v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
- v.AddArg(base)
- v.AddArg(mem)
+ v.AddArg2(base, mem)
return true
}
// match: (MOVSSload [off1] {sym1} (LEAL1 [off2] {sym2} ptr idx) mem)
@@ -7529,9 +7100,7 @@ func rewriteValue386_Op386MOVSSload(v *Value) bool {
v.reset(Op386MOVSSloadidx1)
v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
- v.AddArg(ptr)
- v.AddArg(idx)
- v.AddArg(mem)
+ v.AddArg3(ptr, idx, mem)
return true
}
// match: (MOVSSload [off1] {sym1} (LEAL4 [off2] {sym2} ptr idx) mem)
@@ -7554,9 +7123,7 @@ func rewriteValue386_Op386MOVSSload(v *Value) bool {
v.reset(Op386MOVSSloadidx4)
v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
- v.AddArg(ptr)
- v.AddArg(idx)
- v.AddArg(mem)
+ v.AddArg3(ptr, idx, mem)
return true
}
// match: (MOVSSload [off] {sym} (ADDL ptr idx) mem)
@@ -7581,9 +7148,7 @@ func rewriteValue386_Op386MOVSSload(v *Value) bool {
v.reset(Op386MOVSSloadidx1)
v.AuxInt = off
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(idx)
- v.AddArg(mem)
+ v.AddArg3(ptr, idx, mem)
return true
}
break
@@ -7609,9 +7174,7 @@ func rewriteValue386_Op386MOVSSloadidx1(v *Value) bool {
v.reset(Op386MOVSSloadidx1)
v.AuxInt = int64(int32(c + d))
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(idx)
- v.AddArg(mem)
+ v.AddArg3(ptr, idx, mem)
return true
}
// match: (MOVSSloadidx1 [c] {sym} ptr (ADDLconst [d] idx) mem)
@@ -7629,9 +7192,7 @@ func rewriteValue386_Op386MOVSSloadidx1(v *Value) bool {
v.reset(Op386MOVSSloadidx1)
v.AuxInt = int64(int32(c + d))
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(idx)
- v.AddArg(mem)
+ v.AddArg3(ptr, idx, mem)
return true
}
return false
@@ -7655,9 +7216,7 @@ func rewriteValue386_Op386MOVSSloadidx4(v *Value) bool {
v.reset(Op386MOVSSloadidx4)
v.AuxInt = int64(int32(c + d))
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(idx)
- v.AddArg(mem)
+ v.AddArg3(ptr, idx, mem)
return true
}
// match: (MOVSSloadidx4 [c] {sym} ptr (ADDLconst [d] idx) mem)
@@ -7675,9 +7234,7 @@ func rewriteValue386_Op386MOVSSloadidx4(v *Value) bool {
v.reset(Op386MOVSSloadidx4)
v.AuxInt = int64(int32(c + 4*d))
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(idx)
- v.AddArg(mem)
+ v.AddArg3(ptr, idx, mem)
return true
}
return false
@@ -7707,9 +7264,7 @@ func rewriteValue386_Op386MOVSSstore(v *Value) bool {
v.reset(Op386MOVSSstore)
v.AuxInt = off1 + off2
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(val)
- v.AddArg(mem)
+ v.AddArg3(ptr, val, mem)
return true
}
// match: (MOVSSstore [off1] {sym1} (LEAL [off2] {sym2} base) val mem)
@@ -7732,9 +7287,7 @@ func rewriteValue386_Op386MOVSSstore(v *Value) bool {
v.reset(Op386MOVSSstore)
v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
- v.AddArg(base)
- v.AddArg(val)
- v.AddArg(mem)
+ v.AddArg3(base, val, mem)
return true
}
// match: (MOVSSstore [off1] {sym1} (LEAL1 [off2] {sym2} ptr idx) val mem)
@@ -7758,10 +7311,7 @@ func rewriteValue386_Op386MOVSSstore(v *Value) bool {
v.reset(Op386MOVSSstoreidx1)
v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
- v.AddArg(ptr)
- v.AddArg(idx)
- v.AddArg(val)
- v.AddArg(mem)
+ v.AddArg4(ptr, idx, val, mem)
return true
}
// match: (MOVSSstore [off1] {sym1} (LEAL4 [off2] {sym2} ptr idx) val mem)
@@ -7785,10 +7335,7 @@ func rewriteValue386_Op386MOVSSstore(v *Value) bool {
v.reset(Op386MOVSSstoreidx4)
v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
- v.AddArg(ptr)
- v.AddArg(idx)
- v.AddArg(val)
- v.AddArg(mem)
+ v.AddArg4(ptr, idx, val, mem)
return true
}
// match: (MOVSSstore [off] {sym} (ADDL ptr idx) val mem)
@@ -7814,10 +7361,7 @@ func rewriteValue386_Op386MOVSSstore(v *Value) bool {
v.reset(Op386MOVSSstoreidx1)
v.AuxInt = off
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(idx)
- v.AddArg(val)
- v.AddArg(mem)
+ v.AddArg4(ptr, idx, val, mem)
return true
}
break
@@ -7845,10 +7389,7 @@ func rewriteValue386_Op386MOVSSstoreidx1(v *Value) bool {
v.reset(Op386MOVSSstoreidx1)
v.AuxInt = int64(int32(c + d))
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(idx)
- v.AddArg(val)
- v.AddArg(mem)
+ v.AddArg4(ptr, idx, val, mem)
return true
}
// match: (MOVSSstoreidx1 [c] {sym} ptr (ADDLconst [d] idx) val mem)
@@ -7867,10 +7408,7 @@ func rewriteValue386_Op386MOVSSstoreidx1(v *Value) bool {
v.reset(Op386MOVSSstoreidx1)
v.AuxInt = int64(int32(c + d))
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(idx)
- v.AddArg(val)
- v.AddArg(mem)
+ v.AddArg4(ptr, idx, val, mem)
return true
}
return false
@@ -7896,10 +7434,7 @@ func rewriteValue386_Op386MOVSSstoreidx4(v *Value) bool {
v.reset(Op386MOVSSstoreidx4)
v.AuxInt = int64(int32(c + d))
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(idx)
- v.AddArg(val)
- v.AddArg(mem)
+ v.AddArg4(ptr, idx, val, mem)
return true
}
// match: (MOVSSstoreidx4 [c] {sym} ptr (ADDLconst [d] idx) val mem)
@@ -7918,10 +7453,7 @@ func rewriteValue386_Op386MOVSSstoreidx4(v *Value) bool {
v.reset(Op386MOVSSstoreidx4)
v.AuxInt = int64(int32(c + 4*d))
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(idx)
- v.AddArg(val)
- v.AddArg(mem)
+ v.AddArg4(ptr, idx, val, mem)
return true
}
return false
@@ -7946,12 +7478,10 @@ func rewriteValue386_Op386MOVWLSX(v *Value) bool {
}
b = x.Block
v0 := b.NewValue0(x.Pos, Op386MOVWLSXload, v.Type)
- v.reset(OpCopy)
- v.AddArg(v0)
+ v.copyOf(v0)
v0.AuxInt = off
v0.Aux = sym
- v0.AddArg(ptr)
- v0.AddArg(mem)
+ v0.AddArg2(ptr, mem)
return true
}
// match: (MOVWLSX (ANDLconst [c] x))
@@ -7990,9 +7520,8 @@ func rewriteValue386_Op386MOVWLSXload(v *Value) bool {
}
off2 := v_1.AuxInt
sym2 := v_1.Aux
- _ = v_1.Args[2]
- ptr2 := v_1.Args[0]
x := v_1.Args[1]
+ ptr2 := v_1.Args[0]
if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
break
}
@@ -8019,8 +7548,7 @@ func rewriteValue386_Op386MOVWLSXload(v *Value) bool {
v.reset(Op386MOVWLSXload)
v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
- v.AddArg(base)
- v.AddArg(mem)
+ v.AddArg2(base, mem)
return true
}
return false
@@ -8045,12 +7573,10 @@ func rewriteValue386_Op386MOVWLZX(v *Value) bool {
}
b = x.Block
v0 := b.NewValue0(x.Pos, Op386MOVWload, v.Type)
- v.reset(OpCopy)
- v.AddArg(v0)
+ v.copyOf(v0)
v0.AuxInt = off
v0.Aux = sym
- v0.AddArg(ptr)
- v0.AddArg(mem)
+ v0.AddArg2(ptr, mem)
return true
}
// match: (MOVWLZX x:(MOVWloadidx1 [off] {sym} ptr idx mem))
@@ -8071,13 +7597,10 @@ func rewriteValue386_Op386MOVWLZX(v *Value) bool {
}
b = x.Block
v0 := b.NewValue0(v.Pos, Op386MOVWloadidx1, v.Type)
- v.reset(OpCopy)
- v.AddArg(v0)
+ v.copyOf(v0)
v0.AuxInt = off
v0.Aux = sym
- v0.AddArg(ptr)
- v0.AddArg(idx)
- v0.AddArg(mem)
+ v0.AddArg3(ptr, idx, mem)
return true
}
// match: (MOVWLZX x:(MOVWloadidx2 [off] {sym} ptr idx mem))
@@ -8098,13 +7621,10 @@ func rewriteValue386_Op386MOVWLZX(v *Value) bool {
}
b = x.Block
v0 := b.NewValue0(v.Pos, Op386MOVWloadidx2, v.Type)
- v.reset(OpCopy)
- v.AddArg(v0)
+ v.copyOf(v0)
v0.AuxInt = off
v0.Aux = sym
- v0.AddArg(ptr)
- v0.AddArg(idx)
- v0.AddArg(mem)
+ v0.AddArg3(ptr, idx, mem)
return true
}
// match: (MOVWLZX (ANDLconst [c] x))
@@ -8139,9 +7659,8 @@ func rewriteValue386_Op386MOVWload(v *Value) bool {
}
off2 := v_1.AuxInt
sym2 := v_1.Aux
- _ = v_1.Args[2]
- ptr2 := v_1.Args[0]
x := v_1.Args[1]
+ ptr2 := v_1.Args[0]
if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
break
}
@@ -8167,8 +7686,7 @@ func rewriteValue386_Op386MOVWload(v *Value) bool {
v.reset(Op386MOVWload)
v.AuxInt = off1 + off2
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(mem)
+ v.AddArg2(ptr, mem)
return true
}
// match: (MOVWload [off1] {sym1} (LEAL [off2] {sym2} base) mem)
@@ -8190,8 +7708,7 @@ func rewriteValue386_Op386MOVWload(v *Value) bool {
v.reset(Op386MOVWload)
v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
- v.AddArg(base)
- v.AddArg(mem)
+ v.AddArg2(base, mem)
return true
}
// match: (MOVWload [off1] {sym1} (LEAL1 [off2] {sym2} ptr idx) mem)
@@ -8214,9 +7731,7 @@ func rewriteValue386_Op386MOVWload(v *Value) bool {
v.reset(Op386MOVWloadidx1)
v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
- v.AddArg(ptr)
- v.AddArg(idx)
- v.AddArg(mem)
+ v.AddArg3(ptr, idx, mem)
return true
}
// match: (MOVWload [off1] {sym1} (LEAL2 [off2] {sym2} ptr idx) mem)
@@ -8239,9 +7754,7 @@ func rewriteValue386_Op386MOVWload(v *Value) bool {
v.reset(Op386MOVWloadidx2)
v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
- v.AddArg(ptr)
- v.AddArg(idx)
- v.AddArg(mem)
+ v.AddArg3(ptr, idx, mem)
return true
}
// match: (MOVWload [off] {sym} (ADDL ptr idx) mem)
@@ -8266,9 +7779,7 @@ func rewriteValue386_Op386MOVWload(v *Value) bool {
v.reset(Op386MOVWloadidx1)
v.AuxInt = off
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(idx)
- v.AddArg(mem)
+ v.AddArg3(ptr, idx, mem)
return true
}
break
@@ -8307,9 +7818,7 @@ func rewriteValue386_Op386MOVWloadidx1(v *Value) bool {
v.reset(Op386MOVWloadidx2)
v.AuxInt = c
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(idx)
- v.AddArg(mem)
+ v.AddArg3(ptr, idx, mem)
return true
}
break
@@ -8330,9 +7839,7 @@ func rewriteValue386_Op386MOVWloadidx1(v *Value) bool {
v.reset(Op386MOVWloadidx1)
v.AuxInt = int64(int32(c + d))
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(idx)
- v.AddArg(mem)
+ v.AddArg3(ptr, idx, mem)
return true
}
break
@@ -8353,9 +7860,7 @@ func rewriteValue386_Op386MOVWloadidx1(v *Value) bool {
v.reset(Op386MOVWloadidx1)
v.AuxInt = int64(int32(c + d))
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(idx)
- v.AddArg(mem)
+ v.AddArg3(ptr, idx, mem)
return true
}
break
@@ -8381,9 +7886,7 @@ func rewriteValue386_Op386MOVWloadidx2(v *Value) bool {
v.reset(Op386MOVWloadidx2)
v.AuxInt = int64(int32(c + d))
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(idx)
- v.AddArg(mem)
+ v.AddArg3(ptr, idx, mem)
return true
}
// match: (MOVWloadidx2 [c] {sym} ptr (ADDLconst [d] idx) mem)
@@ -8401,9 +7904,7 @@ func rewriteValue386_Op386MOVWloadidx2(v *Value) bool {
v.reset(Op386MOVWloadidx2)
v.AuxInt = int64(int32(c + 2*d))
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(idx)
- v.AddArg(mem)
+ v.AddArg3(ptr, idx, mem)
return true
}
return false
@@ -8428,9 +7929,7 @@ func rewriteValue386_Op386MOVWstore(v *Value) bool {
v.reset(Op386MOVWstore)
v.AuxInt = off
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(x)
- v.AddArg(mem)
+ v.AddArg3(ptr, x, mem)
return true
}
// match: (MOVWstore [off] {sym} ptr (MOVWLZX x) mem)
@@ -8447,9 +7946,7 @@ func rewriteValue386_Op386MOVWstore(v *Value) bool {
v.reset(Op386MOVWstore)
v.AuxInt = off
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(x)
- v.AddArg(mem)
+ v.AddArg3(ptr, x, mem)
return true
}
// match: (MOVWstore [off1] {sym} (ADDLconst [off2] ptr) val mem)
@@ -8471,9 +7968,7 @@ func rewriteValue386_Op386MOVWstore(v *Value) bool {
v.reset(Op386MOVWstore)
v.AuxInt = off1 + off2
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(val)
- v.AddArg(mem)
+ v.AddArg3(ptr, val, mem)
return true
}
// match: (MOVWstore [off] {sym} ptr (MOVLconst [c]) mem)
@@ -8494,8 +7989,7 @@ func rewriteValue386_Op386MOVWstore(v *Value) bool {
v.reset(Op386MOVWstoreconst)
v.AuxInt = makeValAndOff(int64(int16(c)), off)
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(mem)
+ v.AddArg2(ptr, mem)
return true
}
// match: (MOVWstore [off1] {sym1} (LEAL [off2] {sym2} base) val mem)
@@ -8518,9 +8012,7 @@ func rewriteValue386_Op386MOVWstore(v *Value) bool {
v.reset(Op386MOVWstore)
v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
- v.AddArg(base)
- v.AddArg(val)
- v.AddArg(mem)
+ v.AddArg3(base, val, mem)
return true
}
// match: (MOVWstore [off1] {sym1} (LEAL1 [off2] {sym2} ptr idx) val mem)
@@ -8544,10 +8036,7 @@ func rewriteValue386_Op386MOVWstore(v *Value) bool {
v.reset(Op386MOVWstoreidx1)
v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
- v.AddArg(ptr)
- v.AddArg(idx)
- v.AddArg(val)
- v.AddArg(mem)
+ v.AddArg4(ptr, idx, val, mem)
return true
}
// match: (MOVWstore [off1] {sym1} (LEAL2 [off2] {sym2} ptr idx) val mem)
@@ -8571,10 +8060,7 @@ func rewriteValue386_Op386MOVWstore(v *Value) bool {
v.reset(Op386MOVWstoreidx2)
v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
- v.AddArg(ptr)
- v.AddArg(idx)
- v.AddArg(val)
- v.AddArg(mem)
+ v.AddArg4(ptr, idx, val, mem)
return true
}
// match: (MOVWstore [off] {sym} (ADDL ptr idx) val mem)
@@ -8600,10 +8086,7 @@ func rewriteValue386_Op386MOVWstore(v *Value) bool {
v.reset(Op386MOVWstoreidx1)
v.AuxInt = off
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(idx)
- v.AddArg(val)
- v.AddArg(mem)
+ v.AddArg4(ptr, idx, val, mem)
return true
}
break
@@ -8630,9 +8113,7 @@ func rewriteValue386_Op386MOVWstore(v *Value) bool {
v.reset(Op386MOVLstore)
v.AuxInt = i - 2
v.Aux = s
- v.AddArg(p)
- v.AddArg(w)
- v.AddArg(mem)
+ v.AddArg3(p, w, mem)
return true
}
// match: (MOVWstore [i] {s} p (SHRLconst [j] w) x:(MOVWstore [i-2] {s} p w0:(SHRLconst [j-16] w) mem))
@@ -8662,9 +8143,7 @@ func rewriteValue386_Op386MOVWstore(v *Value) bool {
v.reset(Op386MOVLstore)
v.AuxInt = i - 2
v.Aux = s
- v.AddArg(p)
- v.AddArg(w0)
- v.AddArg(mem)
+ v.AddArg3(p, w0, mem)
return true
}
return false
@@ -8692,8 +8171,7 @@ func rewriteValue386_Op386MOVWstoreconst(v *Value) bool {
v.reset(Op386MOVWstoreconst)
v.AuxInt = ValAndOff(sc).add(off)
v.Aux = s
- v.AddArg(ptr)
- v.AddArg(mem)
+ v.AddArg2(ptr, mem)
return true
}
// match: (MOVWstoreconst [sc] {sym1} (LEAL [off] {sym2} ptr) mem)
@@ -8715,8 +8193,7 @@ func rewriteValue386_Op386MOVWstoreconst(v *Value) bool {
v.reset(Op386MOVWstoreconst)
v.AuxInt = ValAndOff(sc).add(off)
v.Aux = mergeSym(sym1, sym2)
- v.AddArg(ptr)
- v.AddArg(mem)
+ v.AddArg2(ptr, mem)
return true
}
// match: (MOVWstoreconst [x] {sym1} (LEAL1 [off] {sym2} ptr idx) mem)
@@ -8739,9 +8216,7 @@ func rewriteValue386_Op386MOVWstoreconst(v *Value) bool {
v.reset(Op386MOVWstoreconstidx1)
v.AuxInt = ValAndOff(x).add(off)
v.Aux = mergeSym(sym1, sym2)
- v.AddArg(ptr)
- v.AddArg(idx)
- v.AddArg(mem)
+ v.AddArg3(ptr, idx, mem)
return true
}
// match: (MOVWstoreconst [x] {sym1} (LEAL2 [off] {sym2} ptr idx) mem)
@@ -8764,9 +8239,7 @@ func rewriteValue386_Op386MOVWstoreconst(v *Value) bool {
v.reset(Op386MOVWstoreconstidx2)
v.AuxInt = ValAndOff(x).add(off)
v.Aux = mergeSym(sym1, sym2)
- v.AddArg(ptr)
- v.AddArg(idx)
- v.AddArg(mem)
+ v.AddArg3(ptr, idx, mem)
return true
}
// match: (MOVWstoreconst [x] {sym} (ADDL ptr idx) mem)
@@ -8783,9 +8256,7 @@ func rewriteValue386_Op386MOVWstoreconst(v *Value) bool {
v.reset(Op386MOVWstoreconstidx1)
v.AuxInt = x
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(idx)
- v.AddArg(mem)
+ v.AddArg3(ptr, idx, mem)
return true
}
// match: (MOVWstoreconst [c] {s} p x:(MOVWstoreconst [a] {s} p mem))
@@ -8810,8 +8281,7 @@ func rewriteValue386_Op386MOVWstoreconst(v *Value) bool {
v.reset(Op386MOVLstoreconst)
v.AuxInt = makeValAndOff(ValAndOff(a).Val()&0xffff|ValAndOff(c).Val()<<16, ValAndOff(a).Off())
v.Aux = s
- v.AddArg(p)
- v.AddArg(mem)
+ v.AddArg2(p, mem)
return true
}
// match: (MOVWstoreconst [a] {s} p x:(MOVWstoreconst [c] {s} p mem))
@@ -8836,8 +8306,7 @@ func rewriteValue386_Op386MOVWstoreconst(v *Value) bool {
v.reset(Op386MOVLstoreconst)
v.AuxInt = makeValAndOff(ValAndOff(a).Val()&0xffff|ValAndOff(c).Val()<<16, ValAndOff(a).Off())
v.Aux = s
- v.AddArg(p)
- v.AddArg(mem)
+ v.AddArg2(p, mem)
return true
}
return false
@@ -8860,9 +8329,7 @@ func rewriteValue386_Op386MOVWstoreconstidx1(v *Value) bool {
v.reset(Op386MOVWstoreconstidx2)
v.AuxInt = c
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(idx)
- v.AddArg(mem)
+ v.AddArg3(ptr, idx, mem)
return true
}
// match: (MOVWstoreconstidx1 [x] {sym} (ADDLconst [c] ptr) idx mem)
@@ -8880,9 +8347,7 @@ func rewriteValue386_Op386MOVWstoreconstidx1(v *Value) bool {
v.reset(Op386MOVWstoreconstidx1)
v.AuxInt = ValAndOff(x).add(c)
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(idx)
- v.AddArg(mem)
+ v.AddArg3(ptr, idx, mem)
return true
}
// match: (MOVWstoreconstidx1 [x] {sym} ptr (ADDLconst [c] idx) mem)
@@ -8900,9 +8365,7 @@ func rewriteValue386_Op386MOVWstoreconstidx1(v *Value) bool {
v.reset(Op386MOVWstoreconstidx1)
v.AuxInt = ValAndOff(x).add(c)
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(idx)
- v.AddArg(mem)
+ v.AddArg3(ptr, idx, mem)
return true
}
// match: (MOVWstoreconstidx1 [c] {s} p i x:(MOVWstoreconstidx1 [a] {s} p i mem))
@@ -8928,9 +8391,7 @@ func rewriteValue386_Op386MOVWstoreconstidx1(v *Value) bool {
v.reset(Op386MOVLstoreconstidx1)
v.AuxInt = makeValAndOff(ValAndOff(a).Val()&0xffff|ValAndOff(c).Val()<<16, ValAndOff(a).Off())
v.Aux = s
- v.AddArg(p)
- v.AddArg(i)
- v.AddArg(mem)
+ v.AddArg3(p, i, mem)
return true
}
return false
@@ -8955,9 +8416,7 @@ func rewriteValue386_Op386MOVWstoreconstidx2(v *Value) bool {
v.reset(Op386MOVWstoreconstidx2)
v.AuxInt = ValAndOff(x).add(c)
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(idx)
- v.AddArg(mem)
+ v.AddArg3(ptr, idx, mem)
return true
}
// match: (MOVWstoreconstidx2 [x] {sym} ptr (ADDLconst [c] idx) mem)
@@ -8975,9 +8434,7 @@ func rewriteValue386_Op386MOVWstoreconstidx2(v *Value) bool {
v.reset(Op386MOVWstoreconstidx2)
v.AuxInt = ValAndOff(x).add(2 * c)
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(idx)
- v.AddArg(mem)
+ v.AddArg3(ptr, idx, mem)
return true
}
// match: (MOVWstoreconstidx2 [c] {s} p i x:(MOVWstoreconstidx2 [a] {s} p i mem))
@@ -9003,12 +8460,10 @@ func rewriteValue386_Op386MOVWstoreconstidx2(v *Value) bool {
v.reset(Op386MOVLstoreconstidx1)
v.AuxInt = makeValAndOff(ValAndOff(a).Val()&0xffff|ValAndOff(c).Val()<<16, ValAndOff(a).Off())
v.Aux = s
- v.AddArg(p)
v0 := b.NewValue0(v.Pos, Op386SHLLconst, i.Type)
v0.AuxInt = 1
v0.AddArg(i)
- v.AddArg(v0)
- v.AddArg(mem)
+ v.AddArg3(p, v0, mem)
return true
}
return false
@@ -9034,10 +8489,7 @@ func rewriteValue386_Op386MOVWstoreidx1(v *Value) bool {
v.reset(Op386MOVWstoreidx2)
v.AuxInt = c
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(idx)
- v.AddArg(val)
- v.AddArg(mem)
+ v.AddArg4(ptr, idx, val, mem)
return true
}
break
@@ -9059,10 +8511,7 @@ func rewriteValue386_Op386MOVWstoreidx1(v *Value) bool {
v.reset(Op386MOVWstoreidx1)
v.AuxInt = int64(int32(c + d))
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(idx)
- v.AddArg(val)
- v.AddArg(mem)
+ v.AddArg4(ptr, idx, val, mem)
return true
}
break
@@ -9084,10 +8533,7 @@ func rewriteValue386_Op386MOVWstoreidx1(v *Value) bool {
v.reset(Op386MOVWstoreidx1)
v.AuxInt = int64(int32(c + d))
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(idx)
- v.AddArg(val)
- v.AddArg(mem)
+ v.AddArg4(ptr, idx, val, mem)
return true
}
break
@@ -9119,10 +8565,7 @@ func rewriteValue386_Op386MOVWstoreidx1(v *Value) bool {
v.reset(Op386MOVLstoreidx1)
v.AuxInt = i - 2
v.Aux = s
- v.AddArg(p)
- v.AddArg(idx)
- v.AddArg(w)
- v.AddArg(mem)
+ v.AddArg4(p, idx, w, mem)
return true
}
}
@@ -9160,10 +8603,7 @@ func rewriteValue386_Op386MOVWstoreidx1(v *Value) bool {
v.reset(Op386MOVLstoreidx1)
v.AuxInt = i - 2
v.Aux = s
- v.AddArg(p)
- v.AddArg(idx)
- v.AddArg(w0)
- v.AddArg(mem)
+ v.AddArg4(p, idx, w0, mem)
return true
}
}
@@ -9193,10 +8633,7 @@ func rewriteValue386_Op386MOVWstoreidx2(v *Value) bool {
v.reset(Op386MOVWstoreidx2)
v.AuxInt = int64(int32(c + d))
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(idx)
- v.AddArg(val)
- v.AddArg(mem)
+ v.AddArg4(ptr, idx, val, mem)
return true
}
// match: (MOVWstoreidx2 [c] {sym} ptr (ADDLconst [d] idx) val mem)
@@ -9215,10 +8652,7 @@ func rewriteValue386_Op386MOVWstoreidx2(v *Value) bool {
v.reset(Op386MOVWstoreidx2)
v.AuxInt = int64(int32(c + 2*d))
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(idx)
- v.AddArg(val)
- v.AddArg(mem)
+ v.AddArg4(ptr, idx, val, mem)
return true
}
// match: (MOVWstoreidx2 [i] {s} p idx (SHRLconst [16] w) x:(MOVWstoreidx2 [i-2] {s} p idx w mem))
@@ -9244,13 +8678,10 @@ func rewriteValue386_Op386MOVWstoreidx2(v *Value) bool {
v.reset(Op386MOVLstoreidx1)
v.AuxInt = i - 2
v.Aux = s
- v.AddArg(p)
v0 := b.NewValue0(v.Pos, Op386SHLLconst, idx.Type)
v0.AuxInt = 1
v0.AddArg(idx)
- v.AddArg(v0)
- v.AddArg(w)
- v.AddArg(mem)
+ v.AddArg4(p, v0, w, mem)
return true
}
// match: (MOVWstoreidx2 [i] {s} p idx (SHRLconst [j] w) x:(MOVWstoreidx2 [i-2] {s} p idx w0:(SHRLconst [j-16] w) mem))
@@ -9281,13 +8712,10 @@ func rewriteValue386_Op386MOVWstoreidx2(v *Value) bool {
v.reset(Op386MOVLstoreidx1)
v.AuxInt = i - 2
v.Aux = s
- v.AddArg(p)
v0 := b.NewValue0(v.Pos, Op386SHLLconst, idx.Type)
v0.AuxInt = 1
v0.AddArg(idx)
- v.AddArg(v0)
- v.AddArg(w0)
- v.AddArg(mem)
+ v.AddArg4(p, v0, w0, mem)
return true
}
return false
@@ -9331,9 +8759,7 @@ func rewriteValue386_Op386MULL(v *Value) bool {
v.reset(Op386MULLload)
v.AuxInt = off
v.Aux = sym
- v.AddArg(x)
- v.AddArg(ptr)
- v.AddArg(mem)
+ v.AddArg3(x, ptr, mem)
return true
}
break
@@ -9359,10 +8785,7 @@ func rewriteValue386_Op386MULL(v *Value) bool {
v.reset(Op386MULLloadidx4)
v.AuxInt = off
v.Aux = sym
- v.AddArg(x)
- v.AddArg(ptr)
- v.AddArg(idx)
- v.AddArg(mem)
+ v.AddArg4(x, ptr, idx, mem)
return true
}
break
@@ -9395,8 +8818,7 @@ func rewriteValue386_Op386MULLconst(v *Value) bool {
x := v_0
v.reset(Op386NEGL)
v0 := b.NewValue0(v.Pos, Op386LEAL8, v.Type)
- v0.AddArg(x)
- v0.AddArg(x)
+ v0.AddArg2(x, x)
v.AddArg(v0)
return true
}
@@ -9409,8 +8831,7 @@ func rewriteValue386_Op386MULLconst(v *Value) bool {
x := v_0
v.reset(Op386NEGL)
v0 := b.NewValue0(v.Pos, Op386LEAL4, v.Type)
- v0.AddArg(x)
- v0.AddArg(x)
+ v0.AddArg2(x, x)
v.AddArg(v0)
return true
}
@@ -9423,8 +8844,7 @@ func rewriteValue386_Op386MULLconst(v *Value) bool {
x := v_0
v.reset(Op386NEGL)
v0 := b.NewValue0(v.Pos, Op386LEAL2, v.Type)
- v0.AddArg(x)
- v0.AddArg(x)
+ v0.AddArg2(x, x)
v.AddArg(v0)
return true
}
@@ -9456,9 +8876,7 @@ func rewriteValue386_Op386MULLconst(v *Value) bool {
break
}
x := v_0
- v.reset(OpCopy)
- v.Type = x.Type
- v.AddArg(x)
+ v.copyOf(x)
return true
}
// match: (MULLconst [3] x)
@@ -9469,8 +8887,7 @@ func rewriteValue386_Op386MULLconst(v *Value) bool {
}
x := v_0
v.reset(Op386LEAL2)
- v.AddArg(x)
- v.AddArg(x)
+ v.AddArg2(x, x)
return true
}
// match: (MULLconst [5] x)
@@ -9481,8 +8898,7 @@ func rewriteValue386_Op386MULLconst(v *Value) bool {
}
x := v_0
v.reset(Op386LEAL4)
- v.AddArg(x)
- v.AddArg(x)
+ v.AddArg2(x, x)
return true
}
// match: (MULLconst [7] x)
@@ -9493,11 +8909,9 @@ func rewriteValue386_Op386MULLconst(v *Value) bool {
}
x := v_0
v.reset(Op386LEAL2)
- v.AddArg(x)
v0 := b.NewValue0(v.Pos, Op386LEAL2, v.Type)
- v0.AddArg(x)
- v0.AddArg(x)
- v.AddArg(v0)
+ v0.AddArg2(x, x)
+ v.AddArg2(x, v0)
return true
}
// match: (MULLconst [9] x)
@@ -9508,8 +8922,7 @@ func rewriteValue386_Op386MULLconst(v *Value) bool {
}
x := v_0
v.reset(Op386LEAL8)
- v.AddArg(x)
- v.AddArg(x)
+ v.AddArg2(x, x)
return true
}
// match: (MULLconst [11] x)
@@ -9520,11 +8933,9 @@ func rewriteValue386_Op386MULLconst(v *Value) bool {
}
x := v_0
v.reset(Op386LEAL2)
- v.AddArg(x)
v0 := b.NewValue0(v.Pos, Op386LEAL4, v.Type)
- v0.AddArg(x)
- v0.AddArg(x)
- v.AddArg(v0)
+ v0.AddArg2(x, x)
+ v.AddArg2(x, v0)
return true
}
// match: (MULLconst [13] x)
@@ -9535,11 +8946,9 @@ func rewriteValue386_Op386MULLconst(v *Value) bool {
}
x := v_0
v.reset(Op386LEAL4)
- v.AddArg(x)
v0 := b.NewValue0(v.Pos, Op386LEAL2, v.Type)
- v0.AddArg(x)
- v0.AddArg(x)
- v.AddArg(v0)
+ v0.AddArg2(x, x)
+ v.AddArg2(x, v0)
return true
}
// match: (MULLconst [19] x)
@@ -9550,11 +8959,9 @@ func rewriteValue386_Op386MULLconst(v *Value) bool {
}
x := v_0
v.reset(Op386LEAL2)
- v.AddArg(x)
v0 := b.NewValue0(v.Pos, Op386LEAL8, v.Type)
- v0.AddArg(x)
- v0.AddArg(x)
- v.AddArg(v0)
+ v0.AddArg2(x, x)
+ v.AddArg2(x, v0)
return true
}
// match: (MULLconst [21] x)
@@ -9565,11 +8972,9 @@ func rewriteValue386_Op386MULLconst(v *Value) bool {
}
x := v_0
v.reset(Op386LEAL4)
- v.AddArg(x)
v0 := b.NewValue0(v.Pos, Op386LEAL4, v.Type)
- v0.AddArg(x)
- v0.AddArg(x)
- v.AddArg(v0)
+ v0.AddArg2(x, x)
+ v.AddArg2(x, v0)
return true
}
// match: (MULLconst [25] x)
@@ -9580,11 +8985,9 @@ func rewriteValue386_Op386MULLconst(v *Value) bool {
}
x := v_0
v.reset(Op386LEAL8)
- v.AddArg(x)
v0 := b.NewValue0(v.Pos, Op386LEAL2, v.Type)
- v0.AddArg(x)
- v0.AddArg(x)
- v.AddArg(v0)
+ v0.AddArg2(x, x)
+ v.AddArg2(x, v0)
return true
}
// match: (MULLconst [27] x)
@@ -9596,13 +8999,10 @@ func rewriteValue386_Op386MULLconst(v *Value) bool {
x := v_0
v.reset(Op386LEAL8)
v0 := b.NewValue0(v.Pos, Op386LEAL2, v.Type)
- v0.AddArg(x)
- v0.AddArg(x)
- v.AddArg(v0)
+ v0.AddArg2(x, x)
v1 := b.NewValue0(v.Pos, Op386LEAL2, v.Type)
- v1.AddArg(x)
- v1.AddArg(x)
- v.AddArg(v1)
+ v1.AddArg2(x, x)
+ v.AddArg2(v0, v1)
return true
}
// match: (MULLconst [37] x)
@@ -9613,11 +9013,9 @@ func rewriteValue386_Op386MULLconst(v *Value) bool {
}
x := v_0
v.reset(Op386LEAL4)
- v.AddArg(x)
v0 := b.NewValue0(v.Pos, Op386LEAL8, v.Type)
- v0.AddArg(x)
- v0.AddArg(x)
- v.AddArg(v0)
+ v0.AddArg2(x, x)
+ v.AddArg2(x, v0)
return true
}
// match: (MULLconst [41] x)
@@ -9628,11 +9026,9 @@ func rewriteValue386_Op386MULLconst(v *Value) bool {
}
x := v_0
v.reset(Op386LEAL8)
- v.AddArg(x)
v0 := b.NewValue0(v.Pos, Op386LEAL4, v.Type)
- v0.AddArg(x)
- v0.AddArg(x)
- v.AddArg(v0)
+ v0.AddArg2(x, x)
+ v.AddArg2(x, v0)
return true
}
// match: (MULLconst [45] x)
@@ -9644,13 +9040,10 @@ func rewriteValue386_Op386MULLconst(v *Value) bool {
x := v_0
v.reset(Op386LEAL8)
v0 := b.NewValue0(v.Pos, Op386LEAL4, v.Type)
- v0.AddArg(x)
- v0.AddArg(x)
- v.AddArg(v0)
+ v0.AddArg2(x, x)
v1 := b.NewValue0(v.Pos, Op386LEAL4, v.Type)
- v1.AddArg(x)
- v1.AddArg(x)
- v.AddArg(v1)
+ v1.AddArg2(x, x)
+ v.AddArg2(v0, v1)
return true
}
// match: (MULLconst [73] x)
@@ -9661,11 +9054,9 @@ func rewriteValue386_Op386MULLconst(v *Value) bool {
}
x := v_0
v.reset(Op386LEAL8)
- v.AddArg(x)
v0 := b.NewValue0(v.Pos, Op386LEAL8, v.Type)
- v0.AddArg(x)
- v0.AddArg(x)
- v.AddArg(v0)
+ v0.AddArg2(x, x)
+ v.AddArg2(x, v0)
return true
}
// match: (MULLconst [81] x)
@@ -9677,13 +9068,10 @@ func rewriteValue386_Op386MULLconst(v *Value) bool {
x := v_0
v.reset(Op386LEAL8)
v0 := b.NewValue0(v.Pos, Op386LEAL8, v.Type)
- v0.AddArg(x)
- v0.AddArg(x)
- v.AddArg(v0)
+ v0.AddArg2(x, x)
v1 := b.NewValue0(v.Pos, Op386LEAL8, v.Type)
- v1.AddArg(x)
- v1.AddArg(x)
- v.AddArg(v1)
+ v1.AddArg2(x, x)
+ v.AddArg2(v0, v1)
return true
}
// match: (MULLconst [c] x)
@@ -9699,8 +9087,7 @@ func rewriteValue386_Op386MULLconst(v *Value) bool {
v0 := b.NewValue0(v.Pos, Op386SHLLconst, v.Type)
v0.AuxInt = log2(c + 1)
v0.AddArg(x)
- v.AddArg(v0)
- v.AddArg(x)
+ v.AddArg2(v0, x)
return true
}
// match: (MULLconst [c] x)
@@ -9716,8 +9103,7 @@ func rewriteValue386_Op386MULLconst(v *Value) bool {
v0 := b.NewValue0(v.Pos, Op386SHLLconst, v.Type)
v0.AuxInt = log2(c - 1)
v0.AddArg(x)
- v.AddArg(v0)
- v.AddArg(x)
+ v.AddArg2(v0, x)
return true
}
// match: (MULLconst [c] x)
@@ -9733,8 +9119,7 @@ func rewriteValue386_Op386MULLconst(v *Value) bool {
v0 := b.NewValue0(v.Pos, Op386SHLLconst, v.Type)
v0.AuxInt = log2(c - 2)
v0.AddArg(x)
- v.AddArg(v0)
- v.AddArg(x)
+ v.AddArg2(v0, x)
return true
}
// match: (MULLconst [c] x)
@@ -9750,8 +9135,7 @@ func rewriteValue386_Op386MULLconst(v *Value) bool {
v0 := b.NewValue0(v.Pos, Op386SHLLconst, v.Type)
v0.AuxInt = log2(c - 4)
v0.AddArg(x)
- v.AddArg(v0)
- v.AddArg(x)
+ v.AddArg2(v0, x)
return true
}
// match: (MULLconst [c] x)
@@ -9767,8 +9151,7 @@ func rewriteValue386_Op386MULLconst(v *Value) bool {
v0 := b.NewValue0(v.Pos, Op386SHLLconst, v.Type)
v0.AuxInt = log2(c - 8)
v0.AddArg(x)
- v.AddArg(v0)
- v.AddArg(x)
+ v.AddArg2(v0, x)
return true
}
// match: (MULLconst [c] x)
@@ -9783,8 +9166,7 @@ func rewriteValue386_Op386MULLconst(v *Value) bool {
v.reset(Op386SHLLconst)
v.AuxInt = log2(c / 3)
v0 := b.NewValue0(v.Pos, Op386LEAL2, v.Type)
- v0.AddArg(x)
- v0.AddArg(x)
+ v0.AddArg2(x, x)
v.AddArg(v0)
return true
}
@@ -9800,8 +9182,7 @@ func rewriteValue386_Op386MULLconst(v *Value) bool {
v.reset(Op386SHLLconst)
v.AuxInt = log2(c / 5)
v0 := b.NewValue0(v.Pos, Op386LEAL4, v.Type)
- v0.AddArg(x)
- v0.AddArg(x)
+ v0.AddArg2(x, x)
v.AddArg(v0)
return true
}
@@ -9817,8 +9198,7 @@ func rewriteValue386_Op386MULLconst(v *Value) bool {
v.reset(Op386SHLLconst)
v.AuxInt = log2(c / 9)
v0 := b.NewValue0(v.Pos, Op386LEAL8, v.Type)
- v0.AddArg(x)
- v0.AddArg(x)
+ v0.AddArg2(x, x)
v.AddArg(v0)
return true
}
@@ -9861,9 +9241,7 @@ func rewriteValue386_Op386MULLload(v *Value) bool {
v.reset(Op386MULLload)
v.AuxInt = off1 + off2
v.Aux = sym
- v.AddArg(val)
- v.AddArg(base)
- v.AddArg(mem)
+ v.AddArg3(val, base, mem)
return true
}
// match: (MULLload [off1] {sym1} val (LEAL [off2] {sym2} base) mem)
@@ -9886,9 +9264,7 @@ func rewriteValue386_Op386MULLload(v *Value) bool {
v.reset(Op386MULLload)
v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
- v.AddArg(val)
- v.AddArg(base)
- v.AddArg(mem)
+ v.AddArg3(val, base, mem)
return true
}
// match: (MULLload [off1] {sym1} val (LEAL4 [off2] {sym2} ptr idx) mem)
@@ -9912,10 +9288,7 @@ func rewriteValue386_Op386MULLload(v *Value) bool {
v.reset(Op386MULLloadidx4)
v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
- v.AddArg(val)
- v.AddArg(ptr)
- v.AddArg(idx)
- v.AddArg(mem)
+ v.AddArg4(val, ptr, idx, mem)
return true
}
return false
@@ -9947,10 +9320,7 @@ func rewriteValue386_Op386MULLloadidx4(v *Value) bool {
v.reset(Op386MULLloadidx4)
v.AuxInt = off1 + off2
v.Aux = sym
- v.AddArg(val)
- v.AddArg(base)
- v.AddArg(idx)
- v.AddArg(mem)
+ v.AddArg4(val, base, idx, mem)
return true
}
// match: (MULLloadidx4 [off1] {sym} val base (ADDLconst [off2] idx) mem)
@@ -9973,10 +9343,7 @@ func rewriteValue386_Op386MULLloadidx4(v *Value) bool {
v.reset(Op386MULLloadidx4)
v.AuxInt = off1 + off2*4
v.Aux = sym
- v.AddArg(val)
- v.AddArg(base)
- v.AddArg(idx)
- v.AddArg(mem)
+ v.AddArg4(val, base, idx, mem)
return true
}
// match: (MULLloadidx4 [off1] {sym1} val (LEAL [off2] {sym2} base) idx mem)
@@ -10000,10 +9367,7 @@ func rewriteValue386_Op386MULLloadidx4(v *Value) bool {
v.reset(Op386MULLloadidx4)
v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
- v.AddArg(val)
- v.AddArg(base)
- v.AddArg(idx)
- v.AddArg(mem)
+ v.AddArg4(val, base, idx, mem)
return true
}
return false
@@ -10033,9 +9397,7 @@ func rewriteValue386_Op386MULSD(v *Value) bool {
v.reset(Op386MULSDload)
v.AuxInt = off
v.Aux = sym
- v.AddArg(x)
- v.AddArg(ptr)
- v.AddArg(mem)
+ v.AddArg3(x, ptr, mem)
return true
}
break
@@ -10067,9 +9429,7 @@ func rewriteValue386_Op386MULSDload(v *Value) bool {
v.reset(Op386MULSDload)
v.AuxInt = off1 + off2
v.Aux = sym
- v.AddArg(val)
- v.AddArg(base)
- v.AddArg(mem)
+ v.AddArg3(val, base, mem)
return true
}
// match: (MULSDload [off1] {sym1} val (LEAL [off2] {sym2} base) mem)
@@ -10092,9 +9452,7 @@ func rewriteValue386_Op386MULSDload(v *Value) bool {
v.reset(Op386MULSDload)
v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
- v.AddArg(val)
- v.AddArg(base)
- v.AddArg(mem)
+ v.AddArg3(val, base, mem)
return true
}
return false
@@ -10124,9 +9482,7 @@ func rewriteValue386_Op386MULSS(v *Value) bool {
v.reset(Op386MULSSload)
v.AuxInt = off
v.Aux = sym
- v.AddArg(x)
- v.AddArg(ptr)
- v.AddArg(mem)
+ v.AddArg3(x, ptr, mem)
return true
}
break
@@ -10158,9 +9514,7 @@ func rewriteValue386_Op386MULSSload(v *Value) bool {
v.reset(Op386MULSSload)
v.AuxInt = off1 + off2
v.Aux = sym
- v.AddArg(val)
- v.AddArg(base)
- v.AddArg(mem)
+ v.AddArg3(val, base, mem)
return true
}
// match: (MULSSload [off1] {sym1} val (LEAL [off2] {sym2} base) mem)
@@ -10183,9 +9537,7 @@ func rewriteValue386_Op386MULSSload(v *Value) bool {
v.reset(Op386MULSSload)
v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
- v.AddArg(val)
- v.AddArg(base)
- v.AddArg(mem)
+ v.AddArg3(val, base, mem)
return true
}
return false
@@ -10335,9 +9687,7 @@ func rewriteValue386_Op386ORL(v *Value) bool {
v.reset(Op386ORLload)
v.AuxInt = off
v.Aux = sym
- v.AddArg(x)
- v.AddArg(ptr)
- v.AddArg(mem)
+ v.AddArg3(x, ptr, mem)
return true
}
break
@@ -10363,10 +9713,7 @@ func rewriteValue386_Op386ORL(v *Value) bool {
v.reset(Op386ORLloadidx4)
v.AuxInt = off
v.Aux = sym
- v.AddArg(x)
- v.AddArg(ptr)
- v.AddArg(idx)
- v.AddArg(mem)
+ v.AddArg4(x, ptr, idx, mem)
return true
}
break
@@ -10378,9 +9725,7 @@ func rewriteValue386_Op386ORL(v *Value) bool {
if x != v_1 {
break
}
- v.reset(OpCopy)
- v.Type = x.Type
- v.AddArg(x)
+ v.copyOf(x)
return true
}
// match: (ORL x0:(MOVBload [i0] {s} p mem) s0:(SHLLconst [8] x1:(MOVBload [i1] {s} p mem)))
@@ -10414,12 +9759,10 @@ func rewriteValue386_Op386ORL(v *Value) bool {
}
b = mergePoint(b, x0, x1)
v0 := b.NewValue0(x1.Pos, Op386MOVWload, typ.UInt16)
- v.reset(OpCopy)
- v.AddArg(v0)
+ v.copyOf(v0)
v0.AuxInt = i0
v0.Aux = s
- v0.AddArg(p)
- v0.AddArg(mem)
+ v0.AddArg2(p, mem)
return true
}
break
@@ -10479,12 +9822,10 @@ func rewriteValue386_Op386ORL(v *Value) bool {
}
b = mergePoint(b, x0, x1, x2)
v0 := b.NewValue0(x2.Pos, Op386MOVLload, typ.UInt32)
- v.reset(OpCopy)
- v.AddArg(v0)
+ v.copyOf(v0)
v0.AuxInt = i0
v0.Aux = s
- v0.AddArg(p)
- v0.AddArg(mem)
+ v0.AddArg2(p, mem)
return true
}
}
@@ -10528,13 +9869,10 @@ func rewriteValue386_Op386ORL(v *Value) bool {
}
b = mergePoint(b, x0, x1)
v0 := b.NewValue0(v.Pos, Op386MOVWloadidx1, v.Type)
- v.reset(OpCopy)
- v.AddArg(v0)
+ v.copyOf(v0)
v0.AuxInt = i0
v0.Aux = s
- v0.AddArg(p)
- v0.AddArg(idx)
- v0.AddArg(mem)
+ v0.AddArg3(p, idx, mem)
return true
}
}
@@ -10606,13 +9944,10 @@ func rewriteValue386_Op386ORL(v *Value) bool {
}
b = mergePoint(b, x0, x1, x2)
v0 := b.NewValue0(v.Pos, Op386MOVLloadidx1, v.Type)
- v.reset(OpCopy)
- v.AddArg(v0)
+ v.copyOf(v0)
v0.AuxInt = i0
v0.Aux = s
- v0.AddArg(p)
- v0.AddArg(idx)
- v0.AddArg(mem)
+ v0.AddArg3(p, idx, mem)
return true
}
}
@@ -10634,9 +9969,7 @@ func rewriteValue386_Op386ORLconst(v *Value) bool {
if !(int32(c) == 0) {
break
}
- v.reset(OpCopy)
- v.Type = x.Type
- v.AddArg(x)
+ v.copyOf(x)
return true
}
// match: (ORLconst [c] _)
@@ -10688,8 +10021,7 @@ func rewriteValue386_Op386ORLconstmodify(v *Value) bool {
v.reset(Op386ORLconstmodify)
v.AuxInt = ValAndOff(valoff1).add(off2)
v.Aux = sym
- v.AddArg(base)
- v.AddArg(mem)
+ v.AddArg2(base, mem)
return true
}
// match: (ORLconstmodify [valoff1] {sym1} (LEAL [off2] {sym2} base) mem)
@@ -10711,8 +10043,7 @@ func rewriteValue386_Op386ORLconstmodify(v *Value) bool {
v.reset(Op386ORLconstmodify)
v.AuxInt = ValAndOff(valoff1).add(off2)
v.Aux = mergeSym(sym1, sym2)
- v.AddArg(base)
- v.AddArg(mem)
+ v.AddArg2(base, mem)
return true
}
return false
@@ -10742,9 +10073,7 @@ func rewriteValue386_Op386ORLconstmodifyidx4(v *Value) bool {
v.reset(Op386ORLconstmodifyidx4)
v.AuxInt = ValAndOff(valoff1).add(off2)
v.Aux = sym
- v.AddArg(base)
- v.AddArg(idx)
- v.AddArg(mem)
+ v.AddArg3(base, idx, mem)
return true
}
// match: (ORLconstmodifyidx4 [valoff1] {sym} base (ADDLconst [off2] idx) mem)
@@ -10766,9 +10095,7 @@ func rewriteValue386_Op386ORLconstmodifyidx4(v *Value) bool {
v.reset(Op386ORLconstmodifyidx4)
v.AuxInt = ValAndOff(valoff1).add(off2 * 4)
v.Aux = sym
- v.AddArg(base)
- v.AddArg(idx)
- v.AddArg(mem)
+ v.AddArg3(base, idx, mem)
return true
}
// match: (ORLconstmodifyidx4 [valoff1] {sym1} (LEAL [off2] {sym2} base) idx mem)
@@ -10791,9 +10118,7 @@ func rewriteValue386_Op386ORLconstmodifyidx4(v *Value) bool {
v.reset(Op386ORLconstmodifyidx4)
v.AuxInt = ValAndOff(valoff1).add(off2)
v.Aux = mergeSym(sym1, sym2)
- v.AddArg(base)
- v.AddArg(idx)
- v.AddArg(mem)
+ v.AddArg3(base, idx, mem)
return true
}
return false
@@ -10823,9 +10148,7 @@ func rewriteValue386_Op386ORLload(v *Value) bool {
v.reset(Op386ORLload)
v.AuxInt = off1 + off2
v.Aux = sym
- v.AddArg(val)
- v.AddArg(base)
- v.AddArg(mem)
+ v.AddArg3(val, base, mem)
return true
}
// match: (ORLload [off1] {sym1} val (LEAL [off2] {sym2} base) mem)
@@ -10848,9 +10171,7 @@ func rewriteValue386_Op386ORLload(v *Value) bool {
v.reset(Op386ORLload)
v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
- v.AddArg(val)
- v.AddArg(base)
- v.AddArg(mem)
+ v.AddArg3(val, base, mem)
return true
}
// match: (ORLload [off1] {sym1} val (LEAL4 [off2] {sym2} ptr idx) mem)
@@ -10874,10 +10195,7 @@ func rewriteValue386_Op386ORLload(v *Value) bool {
v.reset(Op386ORLloadidx4)
v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
- v.AddArg(val)
- v.AddArg(ptr)
- v.AddArg(idx)
- v.AddArg(mem)
+ v.AddArg4(val, ptr, idx, mem)
return true
}
return false
@@ -10909,10 +10227,7 @@ func rewriteValue386_Op386ORLloadidx4(v *Value) bool {
v.reset(Op386ORLloadidx4)
v.AuxInt = off1 + off2
v.Aux = sym
- v.AddArg(val)
- v.AddArg(base)
- v.AddArg(idx)
- v.AddArg(mem)
+ v.AddArg4(val, base, idx, mem)
return true
}
// match: (ORLloadidx4 [off1] {sym} val base (ADDLconst [off2] idx) mem)
@@ -10935,10 +10250,7 @@ func rewriteValue386_Op386ORLloadidx4(v *Value) bool {
v.reset(Op386ORLloadidx4)
v.AuxInt = off1 + off2*4
v.Aux = sym
- v.AddArg(val)
- v.AddArg(base)
- v.AddArg(idx)
- v.AddArg(mem)
+ v.AddArg4(val, base, idx, mem)
return true
}
// match: (ORLloadidx4 [off1] {sym1} val (LEAL [off2] {sym2} base) idx mem)
@@ -10962,10 +10274,7 @@ func rewriteValue386_Op386ORLloadidx4(v *Value) bool {
v.reset(Op386ORLloadidx4)
v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
- v.AddArg(val)
- v.AddArg(base)
- v.AddArg(idx)
- v.AddArg(mem)
+ v.AddArg4(val, base, idx, mem)
return true
}
return false
@@ -10995,9 +10304,7 @@ func rewriteValue386_Op386ORLmodify(v *Value) bool {
v.reset(Op386ORLmodify)
v.AuxInt = off1 + off2
v.Aux = sym
- v.AddArg(base)
- v.AddArg(val)
- v.AddArg(mem)
+ v.AddArg3(base, val, mem)
return true
}
// match: (ORLmodify [off1] {sym1} (LEAL [off2] {sym2} base) val mem)
@@ -11020,9 +10327,7 @@ func rewriteValue386_Op386ORLmodify(v *Value) bool {
v.reset(Op386ORLmodify)
v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
- v.AddArg(base)
- v.AddArg(val)
- v.AddArg(mem)
+ v.AddArg3(base, val, mem)
return true
}
return false
@@ -11054,10 +10359,7 @@ func rewriteValue386_Op386ORLmodifyidx4(v *Value) bool {
v.reset(Op386ORLmodifyidx4)
v.AuxInt = off1 + off2
v.Aux = sym
- v.AddArg(base)
- v.AddArg(idx)
- v.AddArg(val)
- v.AddArg(mem)
+ v.AddArg4(base, idx, val, mem)
return true
}
// match: (ORLmodifyidx4 [off1] {sym} base (ADDLconst [off2] idx) val mem)
@@ -11080,10 +10382,7 @@ func rewriteValue386_Op386ORLmodifyidx4(v *Value) bool {
v.reset(Op386ORLmodifyidx4)
v.AuxInt = off1 + off2*4
v.Aux = sym
- v.AddArg(base)
- v.AddArg(idx)
- v.AddArg(val)
- v.AddArg(mem)
+ v.AddArg4(base, idx, val, mem)
return true
}
// match: (ORLmodifyidx4 [off1] {sym1} (LEAL [off2] {sym2} base) idx val mem)
@@ -11107,10 +10406,7 @@ func rewriteValue386_Op386ORLmodifyidx4(v *Value) bool {
v.reset(Op386ORLmodifyidx4)
v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
- v.AddArg(base)
- v.AddArg(idx)
- v.AddArg(val)
- v.AddArg(mem)
+ v.AddArg4(base, idx, val, mem)
return true
}
// match: (ORLmodifyidx4 [off] {sym} ptr idx (MOVLconst [c]) mem)
@@ -11132,9 +10428,7 @@ func rewriteValue386_Op386ORLmodifyidx4(v *Value) bool {
v.reset(Op386ORLconstmodifyidx4)
v.AuxInt = makeValAndOff(c, off)
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(idx)
- v.AddArg(mem)
+ v.AddArg3(ptr, idx, mem)
return true
}
return false
@@ -11162,9 +10456,7 @@ func rewriteValue386_Op386ROLBconst(v *Value) bool {
break
}
x := v_0
- v.reset(OpCopy)
- v.Type = x.Type
- v.AddArg(x)
+ v.copyOf(x)
return true
}
return false
@@ -11192,9 +10484,7 @@ func rewriteValue386_Op386ROLLconst(v *Value) bool {
break
}
x := v_0
- v.reset(OpCopy)
- v.Type = x.Type
- v.AddArg(x)
+ v.copyOf(x)
return true
}
return false
@@ -11222,9 +10512,7 @@ func rewriteValue386_Op386ROLWconst(v *Value) bool {
break
}
x := v_0
- v.reset(OpCopy)
- v.Type = x.Type
- v.AddArg(x)
+ v.copyOf(x)
return true
}
return false
@@ -11256,9 +10544,7 @@ func rewriteValue386_Op386SARBconst(v *Value) bool {
break
}
x := v_0
- v.reset(OpCopy)
- v.Type = x.Type
- v.AddArg(x)
+ v.copyOf(x)
return true
}
// match: (SARBconst [c] (MOVLconst [d]))
@@ -11300,8 +10586,7 @@ func rewriteValue386_Op386SARL(v *Value) bool {
}
y := v_1.Args[0]
v.reset(Op386SARL)
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
return false
@@ -11315,9 +10600,7 @@ func rewriteValue386_Op386SARLconst(v *Value) bool {
break
}
x := v_0
- v.reset(OpCopy)
- v.Type = x.Type
- v.AddArg(x)
+ v.copyOf(x)
return true
}
// match: (SARLconst [c] (MOVLconst [d]))
@@ -11361,9 +10644,7 @@ func rewriteValue386_Op386SARWconst(v *Value) bool {
break
}
x := v_0
- v.reset(OpCopy)
- v.Type = x.Type
- v.AddArg(x)
+ v.copyOf(x)
return true
}
// match: (SARWconst [c] (MOVLconst [d]))
@@ -11395,8 +10676,7 @@ func rewriteValue386_Op386SBBL(v *Value) bool {
f := v_2
v.reset(Op386SBBLconst)
v.AuxInt = c
- v.AddArg(x)
- v.AddArg(f)
+ v.AddArg2(x, f)
return true
}
return false
@@ -12130,8 +11410,7 @@ func rewriteValue386_Op386SHLL(v *Value) bool {
}
y := v_1.Args[0]
v.reset(Op386SHLL)
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
return false
@@ -12145,9 +11424,7 @@ func rewriteValue386_Op386SHLLconst(v *Value) bool {
break
}
x := v_0
- v.reset(OpCopy)
- v.Type = x.Type
- v.AddArg(x)
+ v.copyOf(x)
return true
}
return false
@@ -12198,9 +11475,7 @@ func rewriteValue386_Op386SHRBconst(v *Value) bool {
break
}
x := v_0
- v.reset(OpCopy)
- v.Type = x.Type
- v.AddArg(x)
+ v.copyOf(x)
return true
}
return false
@@ -12230,8 +11505,7 @@ func rewriteValue386_Op386SHRL(v *Value) bool {
}
y := v_1.Args[0]
v.reset(Op386SHRL)
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
return false
@@ -12245,9 +11519,7 @@ func rewriteValue386_Op386SHRLconst(v *Value) bool {
break
}
x := v_0
- v.reset(OpCopy)
- v.Type = x.Type
- v.AddArg(x)
+ v.copyOf(x)
return true
}
return false
@@ -12298,9 +11570,7 @@ func rewriteValue386_Op386SHRWconst(v *Value) bool {
break
}
x := v_0
- v.reset(OpCopy)
- v.Type = x.Type
- v.AddArg(x)
+ v.copyOf(x)
return true
}
return false
@@ -12356,9 +11626,7 @@ func rewriteValue386_Op386SUBL(v *Value) bool {
v.reset(Op386SUBLload)
v.AuxInt = off
v.Aux = sym
- v.AddArg(x)
- v.AddArg(ptr)
- v.AddArg(mem)
+ v.AddArg3(x, ptr, mem)
return true
}
// match: (SUBL x l:(MOVLloadidx4 [off] {sym} ptr idx mem))
@@ -12381,10 +11649,7 @@ func rewriteValue386_Op386SUBL(v *Value) bool {
v.reset(Op386SUBLloadidx4)
v.AuxInt = off
v.Aux = sym
- v.AddArg(x)
- v.AddArg(ptr)
- v.AddArg(idx)
- v.AddArg(mem)
+ v.AddArg4(x, ptr, idx, mem)
return true
}
// match: (SUBL x x)
@@ -12429,9 +11694,7 @@ func rewriteValue386_Op386SUBLconst(v *Value) bool {
if !(int32(c) == 0) {
break
}
- v.reset(OpCopy)
- v.Type = x.Type
- v.AddArg(x)
+ v.copyOf(x)
return true
}
// match: (SUBLconst [c] x)
@@ -12470,9 +11733,7 @@ func rewriteValue386_Op386SUBLload(v *Value) bool {
v.reset(Op386SUBLload)
v.AuxInt = off1 + off2
v.Aux = sym
- v.AddArg(val)
- v.AddArg(base)
- v.AddArg(mem)
+ v.AddArg3(val, base, mem)
return true
}
// match: (SUBLload [off1] {sym1} val (LEAL [off2] {sym2} base) mem)
@@ -12495,9 +11756,7 @@ func rewriteValue386_Op386SUBLload(v *Value) bool {
v.reset(Op386SUBLload)
v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
- v.AddArg(val)
- v.AddArg(base)
- v.AddArg(mem)
+ v.AddArg3(val, base, mem)
return true
}
// match: (SUBLload [off1] {sym1} val (LEAL4 [off2] {sym2} ptr idx) mem)
@@ -12521,10 +11780,7 @@ func rewriteValue386_Op386SUBLload(v *Value) bool {
v.reset(Op386SUBLloadidx4)
v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
- v.AddArg(val)
- v.AddArg(ptr)
- v.AddArg(idx)
- v.AddArg(mem)
+ v.AddArg4(val, ptr, idx, mem)
return true
}
return false
@@ -12556,10 +11812,7 @@ func rewriteValue386_Op386SUBLloadidx4(v *Value) bool {
v.reset(Op386SUBLloadidx4)
v.AuxInt = off1 + off2
v.Aux = sym
- v.AddArg(val)
- v.AddArg(base)
- v.AddArg(idx)
- v.AddArg(mem)
+ v.AddArg4(val, base, idx, mem)
return true
}
// match: (SUBLloadidx4 [off1] {sym} val base (ADDLconst [off2] idx) mem)
@@ -12582,10 +11835,7 @@ func rewriteValue386_Op386SUBLloadidx4(v *Value) bool {
v.reset(Op386SUBLloadidx4)
v.AuxInt = off1 + off2*4
v.Aux = sym
- v.AddArg(val)
- v.AddArg(base)
- v.AddArg(idx)
- v.AddArg(mem)
+ v.AddArg4(val, base, idx, mem)
return true
}
// match: (SUBLloadidx4 [off1] {sym1} val (LEAL [off2] {sym2} base) idx mem)
@@ -12609,10 +11859,7 @@ func rewriteValue386_Op386SUBLloadidx4(v *Value) bool {
v.reset(Op386SUBLloadidx4)
v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
- v.AddArg(val)
- v.AddArg(base)
- v.AddArg(idx)
- v.AddArg(mem)
+ v.AddArg4(val, base, idx, mem)
return true
}
return false
@@ -12642,9 +11889,7 @@ func rewriteValue386_Op386SUBLmodify(v *Value) bool {
v.reset(Op386SUBLmodify)
v.AuxInt = off1 + off2
v.Aux = sym
- v.AddArg(base)
- v.AddArg(val)
- v.AddArg(mem)
+ v.AddArg3(base, val, mem)
return true
}
// match: (SUBLmodify [off1] {sym1} (LEAL [off2] {sym2} base) val mem)
@@ -12667,9 +11912,7 @@ func rewriteValue386_Op386SUBLmodify(v *Value) bool {
v.reset(Op386SUBLmodify)
v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
- v.AddArg(base)
- v.AddArg(val)
- v.AddArg(mem)
+ v.AddArg3(base, val, mem)
return true
}
return false
@@ -12701,10 +11944,7 @@ func rewriteValue386_Op386SUBLmodifyidx4(v *Value) bool {
v.reset(Op386SUBLmodifyidx4)
v.AuxInt = off1 + off2
v.Aux = sym
- v.AddArg(base)
- v.AddArg(idx)
- v.AddArg(val)
- v.AddArg(mem)
+ v.AddArg4(base, idx, val, mem)
return true
}
// match: (SUBLmodifyidx4 [off1] {sym} base (ADDLconst [off2] idx) val mem)
@@ -12727,10 +11967,7 @@ func rewriteValue386_Op386SUBLmodifyidx4(v *Value) bool {
v.reset(Op386SUBLmodifyidx4)
v.AuxInt = off1 + off2*4
v.Aux = sym
- v.AddArg(base)
- v.AddArg(idx)
- v.AddArg(val)
- v.AddArg(mem)
+ v.AddArg4(base, idx, val, mem)
return true
}
// match: (SUBLmodifyidx4 [off1] {sym1} (LEAL [off2] {sym2} base) idx val mem)
@@ -12754,10 +11991,7 @@ func rewriteValue386_Op386SUBLmodifyidx4(v *Value) bool {
v.reset(Op386SUBLmodifyidx4)
v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
- v.AddArg(base)
- v.AddArg(idx)
- v.AddArg(val)
- v.AddArg(mem)
+ v.AddArg4(base, idx, val, mem)
return true
}
// match: (SUBLmodifyidx4 [off] {sym} ptr idx (MOVLconst [c]) mem)
@@ -12779,9 +12013,7 @@ func rewriteValue386_Op386SUBLmodifyidx4(v *Value) bool {
v.reset(Op386ADDLconstmodifyidx4)
v.AuxInt = makeValAndOff(-c, off)
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(idx)
- v.AddArg(mem)
+ v.AddArg3(ptr, idx, mem)
return true
}
return false
@@ -12810,9 +12042,7 @@ func rewriteValue386_Op386SUBSD(v *Value) bool {
v.reset(Op386SUBSDload)
v.AuxInt = off
v.Aux = sym
- v.AddArg(x)
- v.AddArg(ptr)
- v.AddArg(mem)
+ v.AddArg3(x, ptr, mem)
return true
}
return false
@@ -12842,9 +12072,7 @@ func rewriteValue386_Op386SUBSDload(v *Value) bool {
v.reset(Op386SUBSDload)
v.AuxInt = off1 + off2
v.Aux = sym
- v.AddArg(val)
- v.AddArg(base)
- v.AddArg(mem)
+ v.AddArg3(val, base, mem)
return true
}
// match: (SUBSDload [off1] {sym1} val (LEAL [off2] {sym2} base) mem)
@@ -12867,9 +12095,7 @@ func rewriteValue386_Op386SUBSDload(v *Value) bool {
v.reset(Op386SUBSDload)
v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
- v.AddArg(val)
- v.AddArg(base)
- v.AddArg(mem)
+ v.AddArg3(val, base, mem)
return true
}
return false
@@ -12898,9 +12124,7 @@ func rewriteValue386_Op386SUBSS(v *Value) bool {
v.reset(Op386SUBSSload)
v.AuxInt = off
v.Aux = sym
- v.AddArg(x)
- v.AddArg(ptr)
- v.AddArg(mem)
+ v.AddArg3(x, ptr, mem)
return true
}
return false
@@ -12930,9 +12154,7 @@ func rewriteValue386_Op386SUBSSload(v *Value) bool {
v.reset(Op386SUBSSload)
v.AuxInt = off1 + off2
v.Aux = sym
- v.AddArg(val)
- v.AddArg(base)
- v.AddArg(mem)
+ v.AddArg3(val, base, mem)
return true
}
// match: (SUBSSload [off1] {sym1} val (LEAL [off2] {sym2} base) mem)
@@ -12955,9 +12177,7 @@ func rewriteValue386_Op386SUBSSload(v *Value) bool {
v.reset(Op386SUBSSload)
v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
- v.AddArg(val)
- v.AddArg(base)
- v.AddArg(mem)
+ v.AddArg3(val, base, mem)
return true
}
return false
@@ -13075,9 +12295,7 @@ func rewriteValue386_Op386XORL(v *Value) bool {
v.reset(Op386XORLload)
v.AuxInt = off
v.Aux = sym
- v.AddArg(x)
- v.AddArg(ptr)
- v.AddArg(mem)
+ v.AddArg3(x, ptr, mem)
return true
}
break
@@ -13103,10 +12321,7 @@ func rewriteValue386_Op386XORL(v *Value) bool {
v.reset(Op386XORLloadidx4)
v.AuxInt = off
v.Aux = sym
- v.AddArg(x)
- v.AddArg(ptr)
- v.AddArg(idx)
- v.AddArg(mem)
+ v.AddArg4(x, ptr, idx, mem)
return true
}
break
@@ -13149,9 +12364,7 @@ func rewriteValue386_Op386XORLconst(v *Value) bool {
if !(int32(c) == 0) {
break
}
- v.reset(OpCopy)
- v.Type = x.Type
- v.AddArg(x)
+ v.copyOf(x)
return true
}
// match: (XORLconst [c] (MOVLconst [d]))
@@ -13191,8 +12404,7 @@ func rewriteValue386_Op386XORLconstmodify(v *Value) bool {
v.reset(Op386XORLconstmodify)
v.AuxInt = ValAndOff(valoff1).add(off2)
v.Aux = sym
- v.AddArg(base)
- v.AddArg(mem)
+ v.AddArg2(base, mem)
return true
}
// match: (XORLconstmodify [valoff1] {sym1} (LEAL [off2] {sym2} base) mem)
@@ -13214,8 +12426,7 @@ func rewriteValue386_Op386XORLconstmodify(v *Value) bool {
v.reset(Op386XORLconstmodify)
v.AuxInt = ValAndOff(valoff1).add(off2)
v.Aux = mergeSym(sym1, sym2)
- v.AddArg(base)
- v.AddArg(mem)
+ v.AddArg2(base, mem)
return true
}
return false
@@ -13245,9 +12456,7 @@ func rewriteValue386_Op386XORLconstmodifyidx4(v *Value) bool {
v.reset(Op386XORLconstmodifyidx4)
v.AuxInt = ValAndOff(valoff1).add(off2)
v.Aux = sym
- v.AddArg(base)
- v.AddArg(idx)
- v.AddArg(mem)
+ v.AddArg3(base, idx, mem)
return true
}
// match: (XORLconstmodifyidx4 [valoff1] {sym} base (ADDLconst [off2] idx) mem)
@@ -13269,9 +12478,7 @@ func rewriteValue386_Op386XORLconstmodifyidx4(v *Value) bool {
v.reset(Op386XORLconstmodifyidx4)
v.AuxInt = ValAndOff(valoff1).add(off2 * 4)
v.Aux = sym
- v.AddArg(base)
- v.AddArg(idx)
- v.AddArg(mem)
+ v.AddArg3(base, idx, mem)
return true
}
// match: (XORLconstmodifyidx4 [valoff1] {sym1} (LEAL [off2] {sym2} base) idx mem)
@@ -13294,9 +12501,7 @@ func rewriteValue386_Op386XORLconstmodifyidx4(v *Value) bool {
v.reset(Op386XORLconstmodifyidx4)
v.AuxInt = ValAndOff(valoff1).add(off2)
v.Aux = mergeSym(sym1, sym2)
- v.AddArg(base)
- v.AddArg(idx)
- v.AddArg(mem)
+ v.AddArg3(base, idx, mem)
return true
}
return false
@@ -13326,9 +12531,7 @@ func rewriteValue386_Op386XORLload(v *Value) bool {
v.reset(Op386XORLload)
v.AuxInt = off1 + off2
v.Aux = sym
- v.AddArg(val)
- v.AddArg(base)
- v.AddArg(mem)
+ v.AddArg3(val, base, mem)
return true
}
// match: (XORLload [off1] {sym1} val (LEAL [off2] {sym2} base) mem)
@@ -13351,9 +12554,7 @@ func rewriteValue386_Op386XORLload(v *Value) bool {
v.reset(Op386XORLload)
v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
- v.AddArg(val)
- v.AddArg(base)
- v.AddArg(mem)
+ v.AddArg3(val, base, mem)
return true
}
// match: (XORLload [off1] {sym1} val (LEAL4 [off2] {sym2} ptr idx) mem)
@@ -13377,10 +12578,7 @@ func rewriteValue386_Op386XORLload(v *Value) bool {
v.reset(Op386XORLloadidx4)
v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
- v.AddArg(val)
- v.AddArg(ptr)
- v.AddArg(idx)
- v.AddArg(mem)
+ v.AddArg4(val, ptr, idx, mem)
return true
}
return false
@@ -13412,10 +12610,7 @@ func rewriteValue386_Op386XORLloadidx4(v *Value) bool {
v.reset(Op386XORLloadidx4)
v.AuxInt = off1 + off2
v.Aux = sym
- v.AddArg(val)
- v.AddArg(base)
- v.AddArg(idx)
- v.AddArg(mem)
+ v.AddArg4(val, base, idx, mem)
return true
}
// match: (XORLloadidx4 [off1] {sym} val base (ADDLconst [off2] idx) mem)
@@ -13438,10 +12633,7 @@ func rewriteValue386_Op386XORLloadidx4(v *Value) bool {
v.reset(Op386XORLloadidx4)
v.AuxInt = off1 + off2*4
v.Aux = sym
- v.AddArg(val)
- v.AddArg(base)
- v.AddArg(idx)
- v.AddArg(mem)
+ v.AddArg4(val, base, idx, mem)
return true
}
// match: (XORLloadidx4 [off1] {sym1} val (LEAL [off2] {sym2} base) idx mem)
@@ -13465,10 +12657,7 @@ func rewriteValue386_Op386XORLloadidx4(v *Value) bool {
v.reset(Op386XORLloadidx4)
v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
- v.AddArg(val)
- v.AddArg(base)
- v.AddArg(idx)
- v.AddArg(mem)
+ v.AddArg4(val, base, idx, mem)
return true
}
return false
@@ -13498,9 +12687,7 @@ func rewriteValue386_Op386XORLmodify(v *Value) bool {
v.reset(Op386XORLmodify)
v.AuxInt = off1 + off2
v.Aux = sym
- v.AddArg(base)
- v.AddArg(val)
- v.AddArg(mem)
+ v.AddArg3(base, val, mem)
return true
}
// match: (XORLmodify [off1] {sym1} (LEAL [off2] {sym2} base) val mem)
@@ -13523,9 +12710,7 @@ func rewriteValue386_Op386XORLmodify(v *Value) bool {
v.reset(Op386XORLmodify)
v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
- v.AddArg(base)
- v.AddArg(val)
- v.AddArg(mem)
+ v.AddArg3(base, val, mem)
return true
}
return false
@@ -13557,10 +12742,7 @@ func rewriteValue386_Op386XORLmodifyidx4(v *Value) bool {
v.reset(Op386XORLmodifyidx4)
v.AuxInt = off1 + off2
v.Aux = sym
- v.AddArg(base)
- v.AddArg(idx)
- v.AddArg(val)
- v.AddArg(mem)
+ v.AddArg4(base, idx, val, mem)
return true
}
// match: (XORLmodifyidx4 [off1] {sym} base (ADDLconst [off2] idx) val mem)
@@ -13583,10 +12765,7 @@ func rewriteValue386_Op386XORLmodifyidx4(v *Value) bool {
v.reset(Op386XORLmodifyidx4)
v.AuxInt = off1 + off2*4
v.Aux = sym
- v.AddArg(base)
- v.AddArg(idx)
- v.AddArg(val)
- v.AddArg(mem)
+ v.AddArg4(base, idx, val, mem)
return true
}
// match: (XORLmodifyidx4 [off1] {sym1} (LEAL [off2] {sym2} base) idx val mem)
@@ -13610,10 +12789,7 @@ func rewriteValue386_Op386XORLmodifyidx4(v *Value) bool {
v.reset(Op386XORLmodifyidx4)
v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
- v.AddArg(base)
- v.AddArg(idx)
- v.AddArg(val)
- v.AddArg(mem)
+ v.AddArg4(base, idx, val, mem)
return true
}
// match: (XORLmodifyidx4 [off] {sym} ptr idx (MOVLconst [c]) mem)
@@ -13635,9 +12811,7 @@ func rewriteValue386_Op386XORLmodifyidx4(v *Value) bool {
v.reset(Op386XORLconstmodifyidx4)
v.AuxInt = makeValAndOff(c, off)
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(idx)
- v.AddArg(mem)
+ v.AddArg3(ptr, idx, mem)
return true
}
return false
@@ -13680,10 +12854,9 @@ func rewriteValue386_OpDiv8(v *Value) bool {
v.reset(Op386DIVW)
v0 := b.NewValue0(v.Pos, OpSignExt8to16, typ.Int16)
v0.AddArg(x)
- v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpSignExt8to16, typ.Int16)
v1.AddArg(y)
- v.AddArg(v1)
+ v.AddArg2(v0, v1)
return true
}
}
@@ -13700,10 +12873,9 @@ func rewriteValue386_OpDiv8u(v *Value) bool {
v.reset(Op386DIVWU)
v0 := b.NewValue0(v.Pos, OpZeroExt8to16, typ.UInt16)
v0.AddArg(x)
- v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpZeroExt8to16, typ.UInt16)
v1.AddArg(y)
- v.AddArg(v1)
+ v.AddArg2(v0, v1)
return true
}
}
@@ -13718,8 +12890,7 @@ func rewriteValue386_OpEq16(v *Value) bool {
y := v_1
v.reset(Op386SETEQ)
v0 := b.NewValue0(v.Pos, Op386CMPW, types.TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
+ v0.AddArg2(x, y)
v.AddArg(v0)
return true
}
@@ -13735,8 +12906,7 @@ func rewriteValue386_OpEq32(v *Value) bool {
y := v_1
v.reset(Op386SETEQ)
v0 := b.NewValue0(v.Pos, Op386CMPL, types.TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
+ v0.AddArg2(x, y)
v.AddArg(v0)
return true
}
@@ -13752,8 +12922,7 @@ func rewriteValue386_OpEq32F(v *Value) bool {
y := v_1
v.reset(Op386SETEQF)
v0 := b.NewValue0(v.Pos, Op386UCOMISS, types.TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
+ v0.AddArg2(x, y)
v.AddArg(v0)
return true
}
@@ -13769,8 +12938,7 @@ func rewriteValue386_OpEq64F(v *Value) bool {
y := v_1
v.reset(Op386SETEQF)
v0 := b.NewValue0(v.Pos, Op386UCOMISD, types.TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
+ v0.AddArg2(x, y)
v.AddArg(v0)
return true
}
@@ -13786,8 +12954,7 @@ func rewriteValue386_OpEq8(v *Value) bool {
y := v_1
v.reset(Op386SETEQ)
v0 := b.NewValue0(v.Pos, Op386CMPB, types.TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
+ v0.AddArg2(x, y)
v.AddArg(v0)
return true
}
@@ -13803,8 +12970,7 @@ func rewriteValue386_OpEqB(v *Value) bool {
y := v_1
v.reset(Op386SETEQ)
v0 := b.NewValue0(v.Pos, Op386CMPB, types.TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
+ v0.AddArg2(x, y)
v.AddArg(v0)
return true
}
@@ -13820,8 +12986,7 @@ func rewriteValue386_OpEqPtr(v *Value) bool {
y := v_1
v.reset(Op386SETEQ)
v0 := b.NewValue0(v.Pos, Op386CMPL, types.TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
+ v0.AddArg2(x, y)
v.AddArg(v0)
return true
}
@@ -13837,8 +13002,7 @@ func rewriteValue386_OpGeq32F(v *Value) bool {
y := v_1
v.reset(Op386SETGEF)
v0 := b.NewValue0(v.Pos, Op386UCOMISS, types.TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
+ v0.AddArg2(x, y)
v.AddArg(v0)
return true
}
@@ -13854,8 +13018,7 @@ func rewriteValue386_OpGeq64F(v *Value) bool {
y := v_1
v.reset(Op386SETGEF)
v0 := b.NewValue0(v.Pos, Op386UCOMISD, types.TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
+ v0.AddArg2(x, y)
v.AddArg(v0)
return true
}
@@ -13871,8 +13034,7 @@ func rewriteValue386_OpGreater32F(v *Value) bool {
y := v_1
v.reset(Op386SETGF)
v0 := b.NewValue0(v.Pos, Op386UCOMISS, types.TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
+ v0.AddArg2(x, y)
v.AddArg(v0)
return true
}
@@ -13888,8 +13050,7 @@ func rewriteValue386_OpGreater64F(v *Value) bool {
y := v_1
v.reset(Op386SETGF)
v0 := b.NewValue0(v.Pos, Op386UCOMISD, types.TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
+ v0.AddArg2(x, y)
v.AddArg(v0)
return true
}
@@ -13905,8 +13066,7 @@ func rewriteValue386_OpIsInBounds(v *Value) bool {
len := v_1
v.reset(Op386SETB)
v0 := b.NewValue0(v.Pos, Op386CMPL, types.TypeFlags)
- v0.AddArg(idx)
- v0.AddArg(len)
+ v0.AddArg2(idx, len)
v.AddArg(v0)
return true
}
@@ -13920,8 +13080,7 @@ func rewriteValue386_OpIsNonNil(v *Value) bool {
p := v_0
v.reset(Op386SETNE)
v0 := b.NewValue0(v.Pos, Op386TESTL, types.TypeFlags)
- v0.AddArg(p)
- v0.AddArg(p)
+ v0.AddArg2(p, p)
v.AddArg(v0)
return true
}
@@ -13937,8 +13096,7 @@ func rewriteValue386_OpIsSliceInBounds(v *Value) bool {
len := v_1
v.reset(Op386SETBE)
v0 := b.NewValue0(v.Pos, Op386CMPL, types.TypeFlags)
- v0.AddArg(idx)
- v0.AddArg(len)
+ v0.AddArg2(idx, len)
v.AddArg(v0)
return true
}
@@ -13954,8 +13112,7 @@ func rewriteValue386_OpLeq16(v *Value) bool {
y := v_1
v.reset(Op386SETLE)
v0 := b.NewValue0(v.Pos, Op386CMPW, types.TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
+ v0.AddArg2(x, y)
v.AddArg(v0)
return true
}
@@ -13971,8 +13128,7 @@ func rewriteValue386_OpLeq16U(v *Value) bool {
y := v_1
v.reset(Op386SETBE)
v0 := b.NewValue0(v.Pos, Op386CMPW, types.TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
+ v0.AddArg2(x, y)
v.AddArg(v0)
return true
}
@@ -13988,8 +13144,7 @@ func rewriteValue386_OpLeq32(v *Value) bool {
y := v_1
v.reset(Op386SETLE)
v0 := b.NewValue0(v.Pos, Op386CMPL, types.TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
+ v0.AddArg2(x, y)
v.AddArg(v0)
return true
}
@@ -14005,8 +13160,7 @@ func rewriteValue386_OpLeq32F(v *Value) bool {
y := v_1
v.reset(Op386SETGEF)
v0 := b.NewValue0(v.Pos, Op386UCOMISS, types.TypeFlags)
- v0.AddArg(y)
- v0.AddArg(x)
+ v0.AddArg2(y, x)
v.AddArg(v0)
return true
}
@@ -14022,8 +13176,7 @@ func rewriteValue386_OpLeq32U(v *Value) bool {
y := v_1
v.reset(Op386SETBE)
v0 := b.NewValue0(v.Pos, Op386CMPL, types.TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
+ v0.AddArg2(x, y)
v.AddArg(v0)
return true
}
@@ -14039,8 +13192,7 @@ func rewriteValue386_OpLeq64F(v *Value) bool {
y := v_1
v.reset(Op386SETGEF)
v0 := b.NewValue0(v.Pos, Op386UCOMISD, types.TypeFlags)
- v0.AddArg(y)
- v0.AddArg(x)
+ v0.AddArg2(y, x)
v.AddArg(v0)
return true
}
@@ -14056,8 +13208,7 @@ func rewriteValue386_OpLeq8(v *Value) bool {
y := v_1
v.reset(Op386SETLE)
v0 := b.NewValue0(v.Pos, Op386CMPB, types.TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
+ v0.AddArg2(x, y)
v.AddArg(v0)
return true
}
@@ -14073,8 +13224,7 @@ func rewriteValue386_OpLeq8U(v *Value) bool {
y := v_1
v.reset(Op386SETBE)
v0 := b.NewValue0(v.Pos, Op386CMPB, types.TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
+ v0.AddArg2(x, y)
v.AddArg(v0)
return true
}
@@ -14090,8 +13240,7 @@ func rewriteValue386_OpLess16(v *Value) bool {
y := v_1
v.reset(Op386SETL)
v0 := b.NewValue0(v.Pos, Op386CMPW, types.TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
+ v0.AddArg2(x, y)
v.AddArg(v0)
return true
}
@@ -14107,8 +13256,7 @@ func rewriteValue386_OpLess16U(v *Value) bool {
y := v_1
v.reset(Op386SETB)
v0 := b.NewValue0(v.Pos, Op386CMPW, types.TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
+ v0.AddArg2(x, y)
v.AddArg(v0)
return true
}
@@ -14124,8 +13272,7 @@ func rewriteValue386_OpLess32(v *Value) bool {
y := v_1
v.reset(Op386SETL)
v0 := b.NewValue0(v.Pos, Op386CMPL, types.TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
+ v0.AddArg2(x, y)
v.AddArg(v0)
return true
}
@@ -14141,8 +13288,7 @@ func rewriteValue386_OpLess32F(v *Value) bool {
y := v_1
v.reset(Op386SETGF)
v0 := b.NewValue0(v.Pos, Op386UCOMISS, types.TypeFlags)
- v0.AddArg(y)
- v0.AddArg(x)
+ v0.AddArg2(y, x)
v.AddArg(v0)
return true
}
@@ -14158,8 +13304,7 @@ func rewriteValue386_OpLess32U(v *Value) bool {
y := v_1
v.reset(Op386SETB)
v0 := b.NewValue0(v.Pos, Op386CMPL, types.TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
+ v0.AddArg2(x, y)
v.AddArg(v0)
return true
}
@@ -14175,8 +13320,7 @@ func rewriteValue386_OpLess64F(v *Value) bool {
y := v_1
v.reset(Op386SETGF)
v0 := b.NewValue0(v.Pos, Op386UCOMISD, types.TypeFlags)
- v0.AddArg(y)
- v0.AddArg(x)
+ v0.AddArg2(y, x)
v.AddArg(v0)
return true
}
@@ -14192,8 +13336,7 @@ func rewriteValue386_OpLess8(v *Value) bool {
y := v_1
v.reset(Op386SETL)
v0 := b.NewValue0(v.Pos, Op386CMPB, types.TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
+ v0.AddArg2(x, y)
v.AddArg(v0)
return true
}
@@ -14209,8 +13352,7 @@ func rewriteValue386_OpLess8U(v *Value) bool {
y := v_1
v.reset(Op386SETB)
v0 := b.NewValue0(v.Pos, Op386CMPB, types.TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
+ v0.AddArg2(x, y)
v.AddArg(v0)
return true
}
@@ -14229,8 +13371,7 @@ func rewriteValue386_OpLoad(v *Value) bool {
break
}
v.reset(Op386MOVLload)
- v.AddArg(ptr)
- v.AddArg(mem)
+ v.AddArg2(ptr, mem)
return true
}
// match: (Load ptr mem)
@@ -14244,8 +13385,7 @@ func rewriteValue386_OpLoad(v *Value) bool {
break
}
v.reset(Op386MOVWload)
- v.AddArg(ptr)
- v.AddArg(mem)
+ v.AddArg2(ptr, mem)
return true
}
// match: (Load ptr mem)
@@ -14259,8 +13399,7 @@ func rewriteValue386_OpLoad(v *Value) bool {
break
}
v.reset(Op386MOVBload)
- v.AddArg(ptr)
- v.AddArg(mem)
+ v.AddArg2(ptr, mem)
return true
}
// match: (Load ptr mem)
@@ -14274,8 +13413,7 @@ func rewriteValue386_OpLoad(v *Value) bool {
break
}
v.reset(Op386MOVSSload)
- v.AddArg(ptr)
- v.AddArg(mem)
+ v.AddArg2(ptr, mem)
return true
}
// match: (Load ptr mem)
@@ -14289,8 +13427,7 @@ func rewriteValue386_OpLoad(v *Value) bool {
break
}
v.reset(Op386MOVSDload)
- v.AddArg(ptr)
- v.AddArg(mem)
+ v.AddArg2(ptr, mem)
return true
}
return false
@@ -14324,15 +13461,13 @@ func rewriteValue386_OpLsh16x16(v *Value) bool {
}
v.reset(Op386ANDL)
v0 := b.NewValue0(v.Pos, Op386SHLL, t)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
+ v0.AddArg2(x, y)
v1 := b.NewValue0(v.Pos, Op386SBBLcarrymask, t)
v2 := b.NewValue0(v.Pos, Op386CMPWconst, types.TypeFlags)
v2.AuxInt = 32
v2.AddArg(y)
v1.AddArg(v2)
- v.AddArg(v1)
+ v.AddArg2(v0, v1)
return true
}
// match: (Lsh16x16 x y)
@@ -14347,8 +13482,7 @@ func rewriteValue386_OpLsh16x16(v *Value) bool {
}
v.reset(Op386SHLL)
v.Type = t
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
return false
@@ -14369,15 +13503,13 @@ func rewriteValue386_OpLsh16x32(v *Value) bool {
}
v.reset(Op386ANDL)
v0 := b.NewValue0(v.Pos, Op386SHLL, t)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
+ v0.AddArg2(x, y)
v1 := b.NewValue0(v.Pos, Op386SBBLcarrymask, t)
v2 := b.NewValue0(v.Pos, Op386CMPLconst, types.TypeFlags)
v2.AuxInt = 32
v2.AddArg(y)
v1.AddArg(v2)
- v.AddArg(v1)
+ v.AddArg2(v0, v1)
return true
}
// match: (Lsh16x32 x y)
@@ -14392,8 +13524,7 @@ func rewriteValue386_OpLsh16x32(v *Value) bool {
}
v.reset(Op386SHLL)
v.Type = t
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
return false
@@ -14451,15 +13582,13 @@ func rewriteValue386_OpLsh16x8(v *Value) bool {
}
v.reset(Op386ANDL)
v0 := b.NewValue0(v.Pos, Op386SHLL, t)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
+ v0.AddArg2(x, y)
v1 := b.NewValue0(v.Pos, Op386SBBLcarrymask, t)
v2 := b.NewValue0(v.Pos, Op386CMPBconst, types.TypeFlags)
v2.AuxInt = 32
v2.AddArg(y)
v1.AddArg(v2)
- v.AddArg(v1)
+ v.AddArg2(v0, v1)
return true
}
// match: (Lsh16x8 x y)
@@ -14474,8 +13603,7 @@ func rewriteValue386_OpLsh16x8(v *Value) bool {
}
v.reset(Op386SHLL)
v.Type = t
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
return false
@@ -14496,15 +13624,13 @@ func rewriteValue386_OpLsh32x16(v *Value) bool {
}
v.reset(Op386ANDL)
v0 := b.NewValue0(v.Pos, Op386SHLL, t)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
+ v0.AddArg2(x, y)
v1 := b.NewValue0(v.Pos, Op386SBBLcarrymask, t)
v2 := b.NewValue0(v.Pos, Op386CMPWconst, types.TypeFlags)
v2.AuxInt = 32
v2.AddArg(y)
v1.AddArg(v2)
- v.AddArg(v1)
+ v.AddArg2(v0, v1)
return true
}
// match: (Lsh32x16 x y)
@@ -14519,8 +13645,7 @@ func rewriteValue386_OpLsh32x16(v *Value) bool {
}
v.reset(Op386SHLL)
v.Type = t
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
return false
@@ -14541,15 +13666,13 @@ func rewriteValue386_OpLsh32x32(v *Value) bool {
}
v.reset(Op386ANDL)
v0 := b.NewValue0(v.Pos, Op386SHLL, t)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
+ v0.AddArg2(x, y)
v1 := b.NewValue0(v.Pos, Op386SBBLcarrymask, t)
v2 := b.NewValue0(v.Pos, Op386CMPLconst, types.TypeFlags)
v2.AuxInt = 32
v2.AddArg(y)
v1.AddArg(v2)
- v.AddArg(v1)
+ v.AddArg2(v0, v1)
return true
}
// match: (Lsh32x32 x y)
@@ -14564,8 +13687,7 @@ func rewriteValue386_OpLsh32x32(v *Value) bool {
}
v.reset(Op386SHLL)
v.Type = t
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
return false
@@ -14623,15 +13745,13 @@ func rewriteValue386_OpLsh32x8(v *Value) bool {
}
v.reset(Op386ANDL)
v0 := b.NewValue0(v.Pos, Op386SHLL, t)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
+ v0.AddArg2(x, y)
v1 := b.NewValue0(v.Pos, Op386SBBLcarrymask, t)
v2 := b.NewValue0(v.Pos, Op386CMPBconst, types.TypeFlags)
v2.AuxInt = 32
v2.AddArg(y)
v1.AddArg(v2)
- v.AddArg(v1)
+ v.AddArg2(v0, v1)
return true
}
// match: (Lsh32x8 x y)
@@ -14646,8 +13766,7 @@ func rewriteValue386_OpLsh32x8(v *Value) bool {
}
v.reset(Op386SHLL)
v.Type = t
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
return false
@@ -14668,15 +13787,13 @@ func rewriteValue386_OpLsh8x16(v *Value) bool {
}
v.reset(Op386ANDL)
v0 := b.NewValue0(v.Pos, Op386SHLL, t)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
+ v0.AddArg2(x, y)
v1 := b.NewValue0(v.Pos, Op386SBBLcarrymask, t)
v2 := b.NewValue0(v.Pos, Op386CMPWconst, types.TypeFlags)
v2.AuxInt = 32
v2.AddArg(y)
v1.AddArg(v2)
- v.AddArg(v1)
+ v.AddArg2(v0, v1)
return true
}
// match: (Lsh8x16 x y)
@@ -14691,8 +13808,7 @@ func rewriteValue386_OpLsh8x16(v *Value) bool {
}
v.reset(Op386SHLL)
v.Type = t
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
return false
@@ -14713,15 +13829,13 @@ func rewriteValue386_OpLsh8x32(v *Value) bool {
}
v.reset(Op386ANDL)
v0 := b.NewValue0(v.Pos, Op386SHLL, t)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
+ v0.AddArg2(x, y)
v1 := b.NewValue0(v.Pos, Op386SBBLcarrymask, t)
v2 := b.NewValue0(v.Pos, Op386CMPLconst, types.TypeFlags)
v2.AuxInt = 32
v2.AddArg(y)
v1.AddArg(v2)
- v.AddArg(v1)
+ v.AddArg2(v0, v1)
return true
}
// match: (Lsh8x32 x y)
@@ -14736,8 +13850,7 @@ func rewriteValue386_OpLsh8x32(v *Value) bool {
}
v.reset(Op386SHLL)
v.Type = t
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
return false
@@ -14795,15 +13908,13 @@ func rewriteValue386_OpLsh8x8(v *Value) bool {
}
v.reset(Op386ANDL)
v0 := b.NewValue0(v.Pos, Op386SHLL, t)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
+ v0.AddArg2(x, y)
v1 := b.NewValue0(v.Pos, Op386SBBLcarrymask, t)
v2 := b.NewValue0(v.Pos, Op386CMPBconst, types.TypeFlags)
v2.AuxInt = 32
v2.AddArg(y)
v1.AddArg(v2)
- v.AddArg(v1)
+ v.AddArg2(v0, v1)
return true
}
// match: (Lsh8x8 x y)
@@ -14818,8 +13929,7 @@ func rewriteValue386_OpLsh8x8(v *Value) bool {
}
v.reset(Op386SHLL)
v.Type = t
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
return false
@@ -14837,10 +13947,9 @@ func rewriteValue386_OpMod8(v *Value) bool {
v.reset(Op386MODW)
v0 := b.NewValue0(v.Pos, OpSignExt8to16, typ.Int16)
v0.AddArg(x)
- v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpSignExt8to16, typ.Int16)
v1.AddArg(y)
- v.AddArg(v1)
+ v.AddArg2(v0, v1)
return true
}
}
@@ -14857,10 +13966,9 @@ func rewriteValue386_OpMod8u(v *Value) bool {
v.reset(Op386MODWU)
v0 := b.NewValue0(v.Pos, OpZeroExt8to16, typ.UInt16)
v0.AddArg(x)
- v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpZeroExt8to16, typ.UInt16)
v1.AddArg(y)
- v.AddArg(v1)
+ v.AddArg2(v0, v1)
return true
}
}
@@ -14878,9 +13986,7 @@ func rewriteValue386_OpMove(v *Value) bool {
break
}
mem := v_2
- v.reset(OpCopy)
- v.Type = mem.Type
- v.AddArg(mem)
+ v.copyOf(mem)
return true
}
// match: (Move [1] dst src mem)
@@ -14893,12 +13999,9 @@ func rewriteValue386_OpMove(v *Value) bool {
src := v_1
mem := v_2
v.reset(Op386MOVBstore)
- v.AddArg(dst)
v0 := b.NewValue0(v.Pos, Op386MOVBload, typ.UInt8)
- v0.AddArg(src)
- v0.AddArg(mem)
- v.AddArg(v0)
- v.AddArg(mem)
+ v0.AddArg2(src, mem)
+ v.AddArg3(dst, v0, mem)
return true
}
// match: (Move [2] dst src mem)
@@ -14911,12 +14014,9 @@ func rewriteValue386_OpMove(v *Value) bool {
src := v_1
mem := v_2
v.reset(Op386MOVWstore)
- v.AddArg(dst)
v0 := b.NewValue0(v.Pos, Op386MOVWload, typ.UInt16)
- v0.AddArg(src)
- v0.AddArg(mem)
- v.AddArg(v0)
- v.AddArg(mem)
+ v0.AddArg2(src, mem)
+ v.AddArg3(dst, v0, mem)
return true
}
// match: (Move [4] dst src mem)
@@ -14929,12 +14029,9 @@ func rewriteValue386_OpMove(v *Value) bool {
src := v_1
mem := v_2
v.reset(Op386MOVLstore)
- v.AddArg(dst)
v0 := b.NewValue0(v.Pos, Op386MOVLload, typ.UInt32)
- v0.AddArg(src)
- v0.AddArg(mem)
- v.AddArg(v0)
- v.AddArg(mem)
+ v0.AddArg2(src, mem)
+ v.AddArg3(dst, v0, mem)
return true
}
// match: (Move [3] dst src mem)
@@ -14948,20 +14045,14 @@ func rewriteValue386_OpMove(v *Value) bool {
mem := v_2
v.reset(Op386MOVBstore)
v.AuxInt = 2
- v.AddArg(dst)
v0 := b.NewValue0(v.Pos, Op386MOVBload, typ.UInt8)
v0.AuxInt = 2
- v0.AddArg(src)
- v0.AddArg(mem)
- v.AddArg(v0)
+ v0.AddArg2(src, mem)
v1 := b.NewValue0(v.Pos, Op386MOVWstore, types.TypeMem)
- v1.AddArg(dst)
v2 := b.NewValue0(v.Pos, Op386MOVWload, typ.UInt16)
- v2.AddArg(src)
- v2.AddArg(mem)
- v1.AddArg(v2)
- v1.AddArg(mem)
- v.AddArg(v1)
+ v2.AddArg2(src, mem)
+ v1.AddArg3(dst, v2, mem)
+ v.AddArg3(dst, v0, v1)
return true
}
// match: (Move [5] dst src mem)
@@ -14975,20 +14066,14 @@ func rewriteValue386_OpMove(v *Value) bool {
mem := v_2
v.reset(Op386MOVBstore)
v.AuxInt = 4
- v.AddArg(dst)
v0 := b.NewValue0(v.Pos, Op386MOVBload, typ.UInt8)
v0.AuxInt = 4
- v0.AddArg(src)
- v0.AddArg(mem)
- v.AddArg(v0)
+ v0.AddArg2(src, mem)
v1 := b.NewValue0(v.Pos, Op386MOVLstore, types.TypeMem)
- v1.AddArg(dst)
v2 := b.NewValue0(v.Pos, Op386MOVLload, typ.UInt32)
- v2.AddArg(src)
- v2.AddArg(mem)
- v1.AddArg(v2)
- v1.AddArg(mem)
- v.AddArg(v1)
+ v2.AddArg2(src, mem)
+ v1.AddArg3(dst, v2, mem)
+ v.AddArg3(dst, v0, v1)
return true
}
// match: (Move [6] dst src mem)
@@ -15002,20 +14087,14 @@ func rewriteValue386_OpMove(v *Value) bool {
mem := v_2
v.reset(Op386MOVWstore)
v.AuxInt = 4
- v.AddArg(dst)
v0 := b.NewValue0(v.Pos, Op386MOVWload, typ.UInt16)
v0.AuxInt = 4
- v0.AddArg(src)
- v0.AddArg(mem)
- v.AddArg(v0)
+ v0.AddArg2(src, mem)
v1 := b.NewValue0(v.Pos, Op386MOVLstore, types.TypeMem)
- v1.AddArg(dst)
v2 := b.NewValue0(v.Pos, Op386MOVLload, typ.UInt32)
- v2.AddArg(src)
- v2.AddArg(mem)
- v1.AddArg(v2)
- v1.AddArg(mem)
- v.AddArg(v1)
+ v2.AddArg2(src, mem)
+ v1.AddArg3(dst, v2, mem)
+ v.AddArg3(dst, v0, v1)
return true
}
// match: (Move [7] dst src mem)
@@ -15029,20 +14108,14 @@ func rewriteValue386_OpMove(v *Value) bool {
mem := v_2
v.reset(Op386MOVLstore)
v.AuxInt = 3
- v.AddArg(dst)
v0 := b.NewValue0(v.Pos, Op386MOVLload, typ.UInt32)
v0.AuxInt = 3
- v0.AddArg(src)
- v0.AddArg(mem)
- v.AddArg(v0)
+ v0.AddArg2(src, mem)
v1 := b.NewValue0(v.Pos, Op386MOVLstore, types.TypeMem)
- v1.AddArg(dst)
v2 := b.NewValue0(v.Pos, Op386MOVLload, typ.UInt32)
- v2.AddArg(src)
- v2.AddArg(mem)
- v1.AddArg(v2)
- v1.AddArg(mem)
- v.AddArg(v1)
+ v2.AddArg2(src, mem)
+ v1.AddArg3(dst, v2, mem)
+ v.AddArg3(dst, v0, v1)
return true
}
// match: (Move [8] dst src mem)
@@ -15056,20 +14129,14 @@ func rewriteValue386_OpMove(v *Value) bool {
mem := v_2
v.reset(Op386MOVLstore)
v.AuxInt = 4
- v.AddArg(dst)
v0 := b.NewValue0(v.Pos, Op386MOVLload, typ.UInt32)
v0.AuxInt = 4
- v0.AddArg(src)
- v0.AddArg(mem)
- v.AddArg(v0)
+ v0.AddArg2(src, mem)
v1 := b.NewValue0(v.Pos, Op386MOVLstore, types.TypeMem)
- v1.AddArg(dst)
v2 := b.NewValue0(v.Pos, Op386MOVLload, typ.UInt32)
- v2.AddArg(src)
- v2.AddArg(mem)
- v1.AddArg(v2)
- v1.AddArg(mem)
- v.AddArg(v1)
+ v2.AddArg2(src, mem)
+ v1.AddArg3(dst, v2, mem)
+ v.AddArg3(dst, v0, v1)
return true
}
// match: (Move [s] dst src mem)
@@ -15088,19 +14155,14 @@ func rewriteValue386_OpMove(v *Value) bool {
v0 := b.NewValue0(v.Pos, Op386ADDLconst, dst.Type)
v0.AuxInt = s % 4
v0.AddArg(dst)
- v.AddArg(v0)
v1 := b.NewValue0(v.Pos, Op386ADDLconst, src.Type)
v1.AuxInt = s % 4
v1.AddArg(src)
- v.AddArg(v1)
v2 := b.NewValue0(v.Pos, Op386MOVLstore, types.TypeMem)
- v2.AddArg(dst)
v3 := b.NewValue0(v.Pos, Op386MOVLload, typ.UInt32)
- v3.AddArg(src)
- v3.AddArg(mem)
- v2.AddArg(v3)
- v2.AddArg(mem)
- v.AddArg(v2)
+ v3.AddArg2(src, mem)
+ v2.AddArg3(dst, v3, mem)
+ v.AddArg3(v0, v1, v2)
return true
}
// match: (Move [s] dst src mem)
@@ -15116,9 +14178,7 @@ func rewriteValue386_OpMove(v *Value) bool {
}
v.reset(Op386DUFFCOPY)
v.AuxInt = 10 * (128 - s/4)
- v.AddArg(dst)
- v.AddArg(src)
- v.AddArg(mem)
+ v.AddArg3(dst, src, mem)
return true
}
// match: (Move [s] dst src mem)
@@ -15133,12 +14193,9 @@ func rewriteValue386_OpMove(v *Value) bool {
break
}
v.reset(Op386REPMOVSL)
- v.AddArg(dst)
- v.AddArg(src)
v0 := b.NewValue0(v.Pos, Op386MOVLconst, typ.UInt32)
v0.AuxInt = s / 4
- v.AddArg(v0)
- v.AddArg(mem)
+ v.AddArg4(dst, src, v0, mem)
return true
}
return false
@@ -15157,10 +14214,9 @@ func rewriteValue386_OpNeg32F(v *Value) bool {
break
}
v.reset(Op386PXOR)
- v.AddArg(x)
v0 := b.NewValue0(v.Pos, Op386MOVSSconst, typ.Float32)
v0.AuxInt = auxFrom32F(float32(math.Copysign(0, -1)))
- v.AddArg(v0)
+ v.AddArg2(x, v0)
return true
}
// match: (Neg32F x)
@@ -15191,10 +14247,9 @@ func rewriteValue386_OpNeg64F(v *Value) bool {
break
}
v.reset(Op386PXOR)
- v.AddArg(x)
v0 := b.NewValue0(v.Pos, Op386MOVSDconst, typ.Float64)
v0.AuxInt = auxFrom64F(math.Copysign(0, -1))
- v.AddArg(v0)
+ v.AddArg2(x, v0)
return true
}
// match: (Neg64F x)
@@ -15222,8 +14277,7 @@ func rewriteValue386_OpNeq16(v *Value) bool {
y := v_1
v.reset(Op386SETNE)
v0 := b.NewValue0(v.Pos, Op386CMPW, types.TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
+ v0.AddArg2(x, y)
v.AddArg(v0)
return true
}
@@ -15239,8 +14293,7 @@ func rewriteValue386_OpNeq32(v *Value) bool {
y := v_1
v.reset(Op386SETNE)
v0 := b.NewValue0(v.Pos, Op386CMPL, types.TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
+ v0.AddArg2(x, y)
v.AddArg(v0)
return true
}
@@ -15256,8 +14309,7 @@ func rewriteValue386_OpNeq32F(v *Value) bool {
y := v_1
v.reset(Op386SETNEF)
v0 := b.NewValue0(v.Pos, Op386UCOMISS, types.TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
+ v0.AddArg2(x, y)
v.AddArg(v0)
return true
}
@@ -15273,8 +14325,7 @@ func rewriteValue386_OpNeq64F(v *Value) bool {
y := v_1
v.reset(Op386SETNEF)
v0 := b.NewValue0(v.Pos, Op386UCOMISD, types.TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
+ v0.AddArg2(x, y)
v.AddArg(v0)
return true
}
@@ -15290,8 +14341,7 @@ func rewriteValue386_OpNeq8(v *Value) bool {
y := v_1
v.reset(Op386SETNE)
v0 := b.NewValue0(v.Pos, Op386CMPB, types.TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
+ v0.AddArg2(x, y)
v.AddArg(v0)
return true
}
@@ -15307,8 +14357,7 @@ func rewriteValue386_OpNeqB(v *Value) bool {
y := v_1
v.reset(Op386SETNE)
v0 := b.NewValue0(v.Pos, Op386CMPB, types.TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
+ v0.AddArg2(x, y)
v.AddArg(v0)
return true
}
@@ -15324,8 +14373,7 @@ func rewriteValue386_OpNeqPtr(v *Value) bool {
y := v_1
v.reset(Op386SETNE)
v0 := b.NewValue0(v.Pos, Op386CMPL, types.TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
+ v0.AddArg2(x, y)
v.AddArg(v0)
return true
}
@@ -15359,9 +14407,7 @@ func rewriteValue386_OpPanicBounds(v *Value) bool {
}
v.reset(Op386LoweredPanicBoundsA)
v.AuxInt = kind
- v.AddArg(x)
- v.AddArg(y)
- v.AddArg(mem)
+ v.AddArg3(x, y, mem)
return true
}
// match: (PanicBounds [kind] x y mem)
@@ -15377,9 +14423,7 @@ func rewriteValue386_OpPanicBounds(v *Value) bool {
}
v.reset(Op386LoweredPanicBoundsB)
v.AuxInt = kind
- v.AddArg(x)
- v.AddArg(y)
- v.AddArg(mem)
+ v.AddArg3(x, y, mem)
return true
}
// match: (PanicBounds [kind] x y mem)
@@ -15395,9 +14439,7 @@ func rewriteValue386_OpPanicBounds(v *Value) bool {
}
v.reset(Op386LoweredPanicBoundsC)
v.AuxInt = kind
- v.AddArg(x)
- v.AddArg(y)
- v.AddArg(mem)
+ v.AddArg3(x, y, mem)
return true
}
return false
@@ -15421,10 +14463,7 @@ func rewriteValue386_OpPanicExtend(v *Value) bool {
}
v.reset(Op386LoweredPanicExtendA)
v.AuxInt = kind
- v.AddArg(hi)
- v.AddArg(lo)
- v.AddArg(y)
- v.AddArg(mem)
+ v.AddArg4(hi, lo, y, mem)
return true
}
// match: (PanicExtend [kind] hi lo y mem)
@@ -15441,10 +14480,7 @@ func rewriteValue386_OpPanicExtend(v *Value) bool {
}
v.reset(Op386LoweredPanicExtendB)
v.AuxInt = kind
- v.AddArg(hi)
- v.AddArg(lo)
- v.AddArg(y)
- v.AddArg(mem)
+ v.AddArg4(hi, lo, y, mem)
return true
}
// match: (PanicExtend [kind] hi lo y mem)
@@ -15461,10 +14497,7 @@ func rewriteValue386_OpPanicExtend(v *Value) bool {
}
v.reset(Op386LoweredPanicExtendC)
v.AuxInt = kind
- v.AddArg(hi)
- v.AddArg(lo)
- v.AddArg(y)
- v.AddArg(mem)
+ v.AddArg4(hi, lo, y, mem)
return true
}
return false
@@ -15539,15 +14572,13 @@ func rewriteValue386_OpRsh16Ux16(v *Value) bool {
}
v.reset(Op386ANDL)
v0 := b.NewValue0(v.Pos, Op386SHRW, t)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
+ v0.AddArg2(x, y)
v1 := b.NewValue0(v.Pos, Op386SBBLcarrymask, t)
v2 := b.NewValue0(v.Pos, Op386CMPWconst, types.TypeFlags)
v2.AuxInt = 16
v2.AddArg(y)
v1.AddArg(v2)
- v.AddArg(v1)
+ v.AddArg2(v0, v1)
return true
}
// match: (Rsh16Ux16 x y)
@@ -15562,8 +14593,7 @@ func rewriteValue386_OpRsh16Ux16(v *Value) bool {
}
v.reset(Op386SHRW)
v.Type = t
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
return false
@@ -15584,15 +14614,13 @@ func rewriteValue386_OpRsh16Ux32(v *Value) bool {
}
v.reset(Op386ANDL)
v0 := b.NewValue0(v.Pos, Op386SHRW, t)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
+ v0.AddArg2(x, y)
v1 := b.NewValue0(v.Pos, Op386SBBLcarrymask, t)
v2 := b.NewValue0(v.Pos, Op386CMPLconst, types.TypeFlags)
v2.AuxInt = 16
v2.AddArg(y)
v1.AddArg(v2)
- v.AddArg(v1)
+ v.AddArg2(v0, v1)
return true
}
// match: (Rsh16Ux32 x y)
@@ -15607,8 +14635,7 @@ func rewriteValue386_OpRsh16Ux32(v *Value) bool {
}
v.reset(Op386SHRW)
v.Type = t
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
return false
@@ -15666,15 +14693,13 @@ func rewriteValue386_OpRsh16Ux8(v *Value) bool {
}
v.reset(Op386ANDL)
v0 := b.NewValue0(v.Pos, Op386SHRW, t)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
+ v0.AddArg2(x, y)
v1 := b.NewValue0(v.Pos, Op386SBBLcarrymask, t)
v2 := b.NewValue0(v.Pos, Op386CMPBconst, types.TypeFlags)
v2.AuxInt = 16
v2.AddArg(y)
v1.AddArg(v2)
- v.AddArg(v1)
+ v.AddArg2(v0, v1)
return true
}
// match: (Rsh16Ux8 x y)
@@ -15689,8 +14714,7 @@ func rewriteValue386_OpRsh16Ux8(v *Value) bool {
}
v.reset(Op386SHRW)
v.Type = t
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
return false
@@ -15711,9 +14735,7 @@ func rewriteValue386_OpRsh16x16(v *Value) bool {
}
v.reset(Op386SARW)
v.Type = t
- v.AddArg(x)
v0 := b.NewValue0(v.Pos, Op386ORL, y.Type)
- v0.AddArg(y)
v1 := b.NewValue0(v.Pos, Op386NOTL, y.Type)
v2 := b.NewValue0(v.Pos, Op386SBBLcarrymask, y.Type)
v3 := b.NewValue0(v.Pos, Op386CMPWconst, types.TypeFlags)
@@ -15721,8 +14743,8 @@ func rewriteValue386_OpRsh16x16(v *Value) bool {
v3.AddArg(y)
v2.AddArg(v3)
v1.AddArg(v2)
- v0.AddArg(v1)
- v.AddArg(v0)
+ v0.AddArg2(y, v1)
+ v.AddArg2(x, v0)
return true
}
// match: (Rsh16x16 x y)
@@ -15735,8 +14757,7 @@ func rewriteValue386_OpRsh16x16(v *Value) bool {
break
}
v.reset(Op386SARW)
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
return false
@@ -15757,9 +14778,7 @@ func rewriteValue386_OpRsh16x32(v *Value) bool {
}
v.reset(Op386SARW)
v.Type = t
- v.AddArg(x)
v0 := b.NewValue0(v.Pos, Op386ORL, y.Type)
- v0.AddArg(y)
v1 := b.NewValue0(v.Pos, Op386NOTL, y.Type)
v2 := b.NewValue0(v.Pos, Op386SBBLcarrymask, y.Type)
v3 := b.NewValue0(v.Pos, Op386CMPLconst, types.TypeFlags)
@@ -15767,8 +14786,8 @@ func rewriteValue386_OpRsh16x32(v *Value) bool {
v3.AddArg(y)
v2.AddArg(v3)
v1.AddArg(v2)
- v0.AddArg(v1)
- v.AddArg(v0)
+ v0.AddArg2(y, v1)
+ v.AddArg2(x, v0)
return true
}
// match: (Rsh16x32 x y)
@@ -15781,8 +14800,7 @@ func rewriteValue386_OpRsh16x32(v *Value) bool {
break
}
v.reset(Op386SARW)
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
return false
@@ -15842,9 +14860,7 @@ func rewriteValue386_OpRsh16x8(v *Value) bool {
}
v.reset(Op386SARW)
v.Type = t
- v.AddArg(x)
v0 := b.NewValue0(v.Pos, Op386ORL, y.Type)
- v0.AddArg(y)
v1 := b.NewValue0(v.Pos, Op386NOTL, y.Type)
v2 := b.NewValue0(v.Pos, Op386SBBLcarrymask, y.Type)
v3 := b.NewValue0(v.Pos, Op386CMPBconst, types.TypeFlags)
@@ -15852,8 +14868,8 @@ func rewriteValue386_OpRsh16x8(v *Value) bool {
v3.AddArg(y)
v2.AddArg(v3)
v1.AddArg(v2)
- v0.AddArg(v1)
- v.AddArg(v0)
+ v0.AddArg2(y, v1)
+ v.AddArg2(x, v0)
return true
}
// match: (Rsh16x8 x y)
@@ -15866,8 +14882,7 @@ func rewriteValue386_OpRsh16x8(v *Value) bool {
break
}
v.reset(Op386SARW)
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
return false
@@ -15888,15 +14903,13 @@ func rewriteValue386_OpRsh32Ux16(v *Value) bool {
}
v.reset(Op386ANDL)
v0 := b.NewValue0(v.Pos, Op386SHRL, t)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
+ v0.AddArg2(x, y)
v1 := b.NewValue0(v.Pos, Op386SBBLcarrymask, t)
v2 := b.NewValue0(v.Pos, Op386CMPWconst, types.TypeFlags)
v2.AuxInt = 32
v2.AddArg(y)
v1.AddArg(v2)
- v.AddArg(v1)
+ v.AddArg2(v0, v1)
return true
}
// match: (Rsh32Ux16 x y)
@@ -15911,8 +14924,7 @@ func rewriteValue386_OpRsh32Ux16(v *Value) bool {
}
v.reset(Op386SHRL)
v.Type = t
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
return false
@@ -15933,15 +14945,13 @@ func rewriteValue386_OpRsh32Ux32(v *Value) bool {
}
v.reset(Op386ANDL)
v0 := b.NewValue0(v.Pos, Op386SHRL, t)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
+ v0.AddArg2(x, y)
v1 := b.NewValue0(v.Pos, Op386SBBLcarrymask, t)
v2 := b.NewValue0(v.Pos, Op386CMPLconst, types.TypeFlags)
v2.AuxInt = 32
v2.AddArg(y)
v1.AddArg(v2)
- v.AddArg(v1)
+ v.AddArg2(v0, v1)
return true
}
// match: (Rsh32Ux32 x y)
@@ -15956,8 +14966,7 @@ func rewriteValue386_OpRsh32Ux32(v *Value) bool {
}
v.reset(Op386SHRL)
v.Type = t
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
return false
@@ -16015,15 +15024,13 @@ func rewriteValue386_OpRsh32Ux8(v *Value) bool {
}
v.reset(Op386ANDL)
v0 := b.NewValue0(v.Pos, Op386SHRL, t)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
+ v0.AddArg2(x, y)
v1 := b.NewValue0(v.Pos, Op386SBBLcarrymask, t)
v2 := b.NewValue0(v.Pos, Op386CMPBconst, types.TypeFlags)
v2.AuxInt = 32
v2.AddArg(y)
v1.AddArg(v2)
- v.AddArg(v1)
+ v.AddArg2(v0, v1)
return true
}
// match: (Rsh32Ux8 x y)
@@ -16038,8 +15045,7 @@ func rewriteValue386_OpRsh32Ux8(v *Value) bool {
}
v.reset(Op386SHRL)
v.Type = t
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
return false
@@ -16060,9 +15066,7 @@ func rewriteValue386_OpRsh32x16(v *Value) bool {
}
v.reset(Op386SARL)
v.Type = t
- v.AddArg(x)
v0 := b.NewValue0(v.Pos, Op386ORL, y.Type)
- v0.AddArg(y)
v1 := b.NewValue0(v.Pos, Op386NOTL, y.Type)
v2 := b.NewValue0(v.Pos, Op386SBBLcarrymask, y.Type)
v3 := b.NewValue0(v.Pos, Op386CMPWconst, types.TypeFlags)
@@ -16070,8 +15074,8 @@ func rewriteValue386_OpRsh32x16(v *Value) bool {
v3.AddArg(y)
v2.AddArg(v3)
v1.AddArg(v2)
- v0.AddArg(v1)
- v.AddArg(v0)
+ v0.AddArg2(y, v1)
+ v.AddArg2(x, v0)
return true
}
// match: (Rsh32x16 x y)
@@ -16084,8 +15088,7 @@ func rewriteValue386_OpRsh32x16(v *Value) bool {
break
}
v.reset(Op386SARL)
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
return false
@@ -16106,9 +15109,7 @@ func rewriteValue386_OpRsh32x32(v *Value) bool {
}
v.reset(Op386SARL)
v.Type = t
- v.AddArg(x)
v0 := b.NewValue0(v.Pos, Op386ORL, y.Type)
- v0.AddArg(y)
v1 := b.NewValue0(v.Pos, Op386NOTL, y.Type)
v2 := b.NewValue0(v.Pos, Op386SBBLcarrymask, y.Type)
v3 := b.NewValue0(v.Pos, Op386CMPLconst, types.TypeFlags)
@@ -16116,8 +15117,8 @@ func rewriteValue386_OpRsh32x32(v *Value) bool {
v3.AddArg(y)
v2.AddArg(v3)
v1.AddArg(v2)
- v0.AddArg(v1)
- v.AddArg(v0)
+ v0.AddArg2(y, v1)
+ v.AddArg2(x, v0)
return true
}
// match: (Rsh32x32 x y)
@@ -16130,8 +15131,7 @@ func rewriteValue386_OpRsh32x32(v *Value) bool {
break
}
v.reset(Op386SARL)
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
return false
@@ -16191,9 +15191,7 @@ func rewriteValue386_OpRsh32x8(v *Value) bool {
}
v.reset(Op386SARL)
v.Type = t
- v.AddArg(x)
v0 := b.NewValue0(v.Pos, Op386ORL, y.Type)
- v0.AddArg(y)
v1 := b.NewValue0(v.Pos, Op386NOTL, y.Type)
v2 := b.NewValue0(v.Pos, Op386SBBLcarrymask, y.Type)
v3 := b.NewValue0(v.Pos, Op386CMPBconst, types.TypeFlags)
@@ -16201,8 +15199,8 @@ func rewriteValue386_OpRsh32x8(v *Value) bool {
v3.AddArg(y)
v2.AddArg(v3)
v1.AddArg(v2)
- v0.AddArg(v1)
- v.AddArg(v0)
+ v0.AddArg2(y, v1)
+ v.AddArg2(x, v0)
return true
}
// match: (Rsh32x8 x y)
@@ -16215,8 +15213,7 @@ func rewriteValue386_OpRsh32x8(v *Value) bool {
break
}
v.reset(Op386SARL)
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
return false
@@ -16237,15 +15234,13 @@ func rewriteValue386_OpRsh8Ux16(v *Value) bool {
}
v.reset(Op386ANDL)
v0 := b.NewValue0(v.Pos, Op386SHRB, t)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
+ v0.AddArg2(x, y)
v1 := b.NewValue0(v.Pos, Op386SBBLcarrymask, t)
v2 := b.NewValue0(v.Pos, Op386CMPWconst, types.TypeFlags)
v2.AuxInt = 8
v2.AddArg(y)
v1.AddArg(v2)
- v.AddArg(v1)
+ v.AddArg2(v0, v1)
return true
}
// match: (Rsh8Ux16 x y)
@@ -16260,8 +15255,7 @@ func rewriteValue386_OpRsh8Ux16(v *Value) bool {
}
v.reset(Op386SHRB)
v.Type = t
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
return false
@@ -16282,15 +15276,13 @@ func rewriteValue386_OpRsh8Ux32(v *Value) bool {
}
v.reset(Op386ANDL)
v0 := b.NewValue0(v.Pos, Op386SHRB, t)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
+ v0.AddArg2(x, y)
v1 := b.NewValue0(v.Pos, Op386SBBLcarrymask, t)
v2 := b.NewValue0(v.Pos, Op386CMPLconst, types.TypeFlags)
v2.AuxInt = 8
v2.AddArg(y)
v1.AddArg(v2)
- v.AddArg(v1)
+ v.AddArg2(v0, v1)
return true
}
// match: (Rsh8Ux32 x y)
@@ -16305,8 +15297,7 @@ func rewriteValue386_OpRsh8Ux32(v *Value) bool {
}
v.reset(Op386SHRB)
v.Type = t
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
return false
@@ -16364,15 +15355,13 @@ func rewriteValue386_OpRsh8Ux8(v *Value) bool {
}
v.reset(Op386ANDL)
v0 := b.NewValue0(v.Pos, Op386SHRB, t)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
+ v0.AddArg2(x, y)
v1 := b.NewValue0(v.Pos, Op386SBBLcarrymask, t)
v2 := b.NewValue0(v.Pos, Op386CMPBconst, types.TypeFlags)
v2.AuxInt = 8
v2.AddArg(y)
v1.AddArg(v2)
- v.AddArg(v1)
+ v.AddArg2(v0, v1)
return true
}
// match: (Rsh8Ux8 x y)
@@ -16387,8 +15376,7 @@ func rewriteValue386_OpRsh8Ux8(v *Value) bool {
}
v.reset(Op386SHRB)
v.Type = t
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
return false
@@ -16409,9 +15397,7 @@ func rewriteValue386_OpRsh8x16(v *Value) bool {
}
v.reset(Op386SARB)
v.Type = t
- v.AddArg(x)
v0 := b.NewValue0(v.Pos, Op386ORL, y.Type)
- v0.AddArg(y)
v1 := b.NewValue0(v.Pos, Op386NOTL, y.Type)
v2 := b.NewValue0(v.Pos, Op386SBBLcarrymask, y.Type)
v3 := b.NewValue0(v.Pos, Op386CMPWconst, types.TypeFlags)
@@ -16419,8 +15405,8 @@ func rewriteValue386_OpRsh8x16(v *Value) bool {
v3.AddArg(y)
v2.AddArg(v3)
v1.AddArg(v2)
- v0.AddArg(v1)
- v.AddArg(v0)
+ v0.AddArg2(y, v1)
+ v.AddArg2(x, v0)
return true
}
// match: (Rsh8x16 x y)
@@ -16433,8 +15419,7 @@ func rewriteValue386_OpRsh8x16(v *Value) bool {
break
}
v.reset(Op386SARB)
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
return false
@@ -16455,9 +15440,7 @@ func rewriteValue386_OpRsh8x32(v *Value) bool {
}
v.reset(Op386SARB)
v.Type = t
- v.AddArg(x)
v0 := b.NewValue0(v.Pos, Op386ORL, y.Type)
- v0.AddArg(y)
v1 := b.NewValue0(v.Pos, Op386NOTL, y.Type)
v2 := b.NewValue0(v.Pos, Op386SBBLcarrymask, y.Type)
v3 := b.NewValue0(v.Pos, Op386CMPLconst, types.TypeFlags)
@@ -16465,8 +15448,8 @@ func rewriteValue386_OpRsh8x32(v *Value) bool {
v3.AddArg(y)
v2.AddArg(v3)
v1.AddArg(v2)
- v0.AddArg(v1)
- v.AddArg(v0)
+ v0.AddArg2(y, v1)
+ v.AddArg2(x, v0)
return true
}
// match: (Rsh8x32 x y)
@@ -16479,8 +15462,7 @@ func rewriteValue386_OpRsh8x32(v *Value) bool {
break
}
v.reset(Op386SARB)
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
return false
@@ -16540,9 +15522,7 @@ func rewriteValue386_OpRsh8x8(v *Value) bool {
}
v.reset(Op386SARB)
v.Type = t
- v.AddArg(x)
v0 := b.NewValue0(v.Pos, Op386ORL, y.Type)
- v0.AddArg(y)
v1 := b.NewValue0(v.Pos, Op386NOTL, y.Type)
v2 := b.NewValue0(v.Pos, Op386SBBLcarrymask, y.Type)
v3 := b.NewValue0(v.Pos, Op386CMPBconst, types.TypeFlags)
@@ -16550,8 +15530,8 @@ func rewriteValue386_OpRsh8x8(v *Value) bool {
v3.AddArg(y)
v2.AddArg(v3)
v1.AddArg(v2)
- v0.AddArg(v1)
- v.AddArg(v0)
+ v0.AddArg2(y, v1)
+ v.AddArg2(x, v0)
return true
}
// match: (Rsh8x8 x y)
@@ -16564,8 +15544,7 @@ func rewriteValue386_OpRsh8x8(v *Value) bool {
break
}
v.reset(Op386SARB)
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
return false
@@ -16585,8 +15564,7 @@ func rewriteValue386_OpSelect0(v *Value) bool {
v.reset(OpSelect0)
v.Type = typ.UInt32
v0 := b.NewValue0(v.Pos, Op386MULLU, types.NewTuple(typ.UInt32, types.TypeFlags))
- v0.AddArg(x)
- v0.AddArg(y)
+ v0.AddArg2(x, y)
v.AddArg(v0)
return true
}
@@ -16607,8 +15585,7 @@ func rewriteValue386_OpSelect1(v *Value) bool {
v.reset(Op386SETO)
v0 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
v1 := b.NewValue0(v.Pos, Op386MULLU, types.NewTuple(typ.UInt32, types.TypeFlags))
- v1.AddArg(x)
- v1.AddArg(y)
+ v1.AddArg2(x, y)
v0.AddArg(v1)
v.AddArg(v0)
return true
@@ -16659,9 +15636,7 @@ func rewriteValue386_OpStore(v *Value) bool {
break
}
v.reset(Op386MOVSDstore)
- v.AddArg(ptr)
- v.AddArg(val)
- v.AddArg(mem)
+ v.AddArg3(ptr, val, mem)
return true
}
// match: (Store {t} ptr val mem)
@@ -16676,9 +15651,7 @@ func rewriteValue386_OpStore(v *Value) bool {
break
}
v.reset(Op386MOVSSstore)
- v.AddArg(ptr)
- v.AddArg(val)
- v.AddArg(mem)
+ v.AddArg3(ptr, val, mem)
return true
}
// match: (Store {t} ptr val mem)
@@ -16693,9 +15666,7 @@ func rewriteValue386_OpStore(v *Value) bool {
break
}
v.reset(Op386MOVLstore)
- v.AddArg(ptr)
- v.AddArg(val)
- v.AddArg(mem)
+ v.AddArg3(ptr, val, mem)
return true
}
// match: (Store {t} ptr val mem)
@@ -16710,9 +15681,7 @@ func rewriteValue386_OpStore(v *Value) bool {
break
}
v.reset(Op386MOVWstore)
- v.AddArg(ptr)
- v.AddArg(val)
- v.AddArg(mem)
+ v.AddArg3(ptr, val, mem)
return true
}
// match: (Store {t} ptr val mem)
@@ -16727,9 +15696,7 @@ func rewriteValue386_OpStore(v *Value) bool {
break
}
v.reset(Op386MOVBstore)
- v.AddArg(ptr)
- v.AddArg(val)
- v.AddArg(mem)
+ v.AddArg3(ptr, val, mem)
return true
}
return false
@@ -16747,9 +15714,7 @@ func rewriteValue386_OpZero(v *Value) bool {
break
}
mem := v_1
- v.reset(OpCopy)
- v.Type = mem.Type
- v.AddArg(mem)
+ v.copyOf(mem)
return true
}
// match: (Zero [1] destptr mem)
@@ -16762,8 +15727,7 @@ func rewriteValue386_OpZero(v *Value) bool {
mem := v_1
v.reset(Op386MOVBstoreconst)
v.AuxInt = 0
- v.AddArg(destptr)
- v.AddArg(mem)
+ v.AddArg2(destptr, mem)
return true
}
// match: (Zero [2] destptr mem)
@@ -16776,8 +15740,7 @@ func rewriteValue386_OpZero(v *Value) bool {
mem := v_1
v.reset(Op386MOVWstoreconst)
v.AuxInt = 0
- v.AddArg(destptr)
- v.AddArg(mem)
+ v.AddArg2(destptr, mem)
return true
}
// match: (Zero [4] destptr mem)
@@ -16790,8 +15753,7 @@ func rewriteValue386_OpZero(v *Value) bool {
mem := v_1
v.reset(Op386MOVLstoreconst)
v.AuxInt = 0
- v.AddArg(destptr)
- v.AddArg(mem)
+ v.AddArg2(destptr, mem)
return true
}
// match: (Zero [3] destptr mem)
@@ -16804,12 +15766,10 @@ func rewriteValue386_OpZero(v *Value) bool {
mem := v_1
v.reset(Op386MOVBstoreconst)
v.AuxInt = makeValAndOff(0, 2)
- v.AddArg(destptr)
v0 := b.NewValue0(v.Pos, Op386MOVWstoreconst, types.TypeMem)
v0.AuxInt = 0
- v0.AddArg(destptr)
- v0.AddArg(mem)
- v.AddArg(v0)
+ v0.AddArg2(destptr, mem)
+ v.AddArg2(destptr, v0)
return true
}
// match: (Zero [5] destptr mem)
@@ -16822,12 +15782,10 @@ func rewriteValue386_OpZero(v *Value) bool {
mem := v_1
v.reset(Op386MOVBstoreconst)
v.AuxInt = makeValAndOff(0, 4)
- v.AddArg(destptr)
v0 := b.NewValue0(v.Pos, Op386MOVLstoreconst, types.TypeMem)
v0.AuxInt = 0
- v0.AddArg(destptr)
- v0.AddArg(mem)
- v.AddArg(v0)
+ v0.AddArg2(destptr, mem)
+ v.AddArg2(destptr, v0)
return true
}
// match: (Zero [6] destptr mem)
@@ -16840,12 +15798,10 @@ func rewriteValue386_OpZero(v *Value) bool {
mem := v_1
v.reset(Op386MOVWstoreconst)
v.AuxInt = makeValAndOff(0, 4)
- v.AddArg(destptr)
v0 := b.NewValue0(v.Pos, Op386MOVLstoreconst, types.TypeMem)
v0.AuxInt = 0
- v0.AddArg(destptr)
- v0.AddArg(mem)
- v.AddArg(v0)
+ v0.AddArg2(destptr, mem)
+ v.AddArg2(destptr, v0)
return true
}
// match: (Zero [7] destptr mem)
@@ -16858,12 +15814,10 @@ func rewriteValue386_OpZero(v *Value) bool {
mem := v_1
v.reset(Op386MOVLstoreconst)
v.AuxInt = makeValAndOff(0, 3)
- v.AddArg(destptr)
v0 := b.NewValue0(v.Pos, Op386MOVLstoreconst, types.TypeMem)
v0.AuxInt = 0
- v0.AddArg(destptr)
- v0.AddArg(mem)
- v.AddArg(v0)
+ v0.AddArg2(destptr, mem)
+ v.AddArg2(destptr, v0)
return true
}
// match: (Zero [s] destptr mem)
@@ -16881,12 +15835,10 @@ func rewriteValue386_OpZero(v *Value) bool {
v0 := b.NewValue0(v.Pos, Op386ADDLconst, typ.UInt32)
v0.AuxInt = s % 4
v0.AddArg(destptr)
- v.AddArg(v0)
v1 := b.NewValue0(v.Pos, Op386MOVLstoreconst, types.TypeMem)
v1.AuxInt = 0
- v1.AddArg(destptr)
- v1.AddArg(mem)
- v.AddArg(v1)
+ v1.AddArg2(destptr, mem)
+ v.AddArg2(v0, v1)
return true
}
// match: (Zero [8] destptr mem)
@@ -16899,12 +15851,10 @@ func rewriteValue386_OpZero(v *Value) bool {
mem := v_1
v.reset(Op386MOVLstoreconst)
v.AuxInt = makeValAndOff(0, 4)
- v.AddArg(destptr)
v0 := b.NewValue0(v.Pos, Op386MOVLstoreconst, types.TypeMem)
v0.AuxInt = 0
- v0.AddArg(destptr)
- v0.AddArg(mem)
- v.AddArg(v0)
+ v0.AddArg2(destptr, mem)
+ v.AddArg2(destptr, v0)
return true
}
// match: (Zero [12] destptr mem)
@@ -16917,16 +15867,13 @@ func rewriteValue386_OpZero(v *Value) bool {
mem := v_1
v.reset(Op386MOVLstoreconst)
v.AuxInt = makeValAndOff(0, 8)
- v.AddArg(destptr)
v0 := b.NewValue0(v.Pos, Op386MOVLstoreconst, types.TypeMem)
v0.AuxInt = makeValAndOff(0, 4)
- v0.AddArg(destptr)
v1 := b.NewValue0(v.Pos, Op386MOVLstoreconst, types.TypeMem)
v1.AuxInt = 0
- v1.AddArg(destptr)
- v1.AddArg(mem)
- v0.AddArg(v1)
- v.AddArg(v0)
+ v1.AddArg2(destptr, mem)
+ v0.AddArg2(destptr, v1)
+ v.AddArg2(destptr, v0)
return true
}
// match: (Zero [16] destptr mem)
@@ -16939,20 +15886,16 @@ func rewriteValue386_OpZero(v *Value) bool {
mem := v_1
v.reset(Op386MOVLstoreconst)
v.AuxInt = makeValAndOff(0, 12)
- v.AddArg(destptr)
v0 := b.NewValue0(v.Pos, Op386MOVLstoreconst, types.TypeMem)
v0.AuxInt = makeValAndOff(0, 8)
- v0.AddArg(destptr)
v1 := b.NewValue0(v.Pos, Op386MOVLstoreconst, types.TypeMem)
v1.AuxInt = makeValAndOff(0, 4)
- v1.AddArg(destptr)
v2 := b.NewValue0(v.Pos, Op386MOVLstoreconst, types.TypeMem)
v2.AuxInt = 0
- v2.AddArg(destptr)
- v2.AddArg(mem)
- v1.AddArg(v2)
- v0.AddArg(v1)
- v.AddArg(v0)
+ v2.AddArg2(destptr, mem)
+ v1.AddArg2(destptr, v2)
+ v0.AddArg2(destptr, v1)
+ v.AddArg2(destptr, v0)
return true
}
// match: (Zero [s] destptr mem)
@@ -16967,11 +15910,9 @@ func rewriteValue386_OpZero(v *Value) bool {
}
v.reset(Op386DUFFZERO)
v.AuxInt = 1 * (128 - s/4)
- v.AddArg(destptr)
v0 := b.NewValue0(v.Pos, Op386MOVLconst, typ.UInt32)
v0.AuxInt = 0
- v.AddArg(v0)
- v.AddArg(mem)
+ v.AddArg3(destptr, v0, mem)
return true
}
// match: (Zero [s] destptr mem)
@@ -16985,14 +15926,11 @@ func rewriteValue386_OpZero(v *Value) bool {
break
}
v.reset(Op386REPSTOSL)
- v.AddArg(destptr)
v0 := b.NewValue0(v.Pos, Op386MOVLconst, typ.UInt32)
v0.AuxInt = s / 4
- v.AddArg(v0)
v1 := b.NewValue0(v.Pos, Op386MOVLconst, typ.UInt32)
v1.AuxInt = 0
- v.AddArg(v1)
- v.AddArg(mem)
+ v.AddArg4(destptr, v0, v1, mem)
return true
}
return false
@@ -17024,8 +15962,7 @@ func rewriteBlock386(b *Block) bool {
for b.Controls[0].Op == Op386InvertFlags {
v_0 := b.Controls[0]
cmp := v_0.Args[0]
- b.Reset(Block386EQ)
- b.AddControl(cmp)
+ b.resetWithControl(Block386EQ, cmp)
return true
}
// match: (EQ (FlagEQ) yes no)
@@ -17068,8 +16005,7 @@ func rewriteBlock386(b *Block) bool {
for b.Controls[0].Op == Op386InvertFlags {
v_0 := b.Controls[0]
cmp := v_0.Args[0]
- b.Reset(Block386LE)
- b.AddControl(cmp)
+ b.resetWithControl(Block386LE, cmp)
return true
}
// match: (GE (FlagEQ) yes no)
@@ -17110,8 +16046,7 @@ func rewriteBlock386(b *Block) bool {
for b.Controls[0].Op == Op386InvertFlags {
v_0 := b.Controls[0]
cmp := v_0.Args[0]
- b.Reset(Block386LT)
- b.AddControl(cmp)
+ b.resetWithControl(Block386LT, cmp)
return true
}
// match: (GT (FlagEQ) yes no)
@@ -17153,8 +16088,7 @@ func rewriteBlock386(b *Block) bool {
for b.Controls[0].Op == Op386SETL {
v_0 := b.Controls[0]
cmp := v_0.Args[0]
- b.Reset(Block386LT)
- b.AddControl(cmp)
+ b.resetWithControl(Block386LT, cmp)
return true
}
// match: (If (SETLE cmp) yes no)
@@ -17162,8 +16096,7 @@ func rewriteBlock386(b *Block) bool {
for b.Controls[0].Op == Op386SETLE {
v_0 := b.Controls[0]
cmp := v_0.Args[0]
- b.Reset(Block386LE)
- b.AddControl(cmp)
+ b.resetWithControl(Block386LE, cmp)
return true
}
// match: (If (SETG cmp) yes no)
@@ -17171,8 +16104,7 @@ func rewriteBlock386(b *Block) bool {
for b.Controls[0].Op == Op386SETG {
v_0 := b.Controls[0]
cmp := v_0.Args[0]
- b.Reset(Block386GT)
- b.AddControl(cmp)
+ b.resetWithControl(Block386GT, cmp)
return true
}
// match: (If (SETGE cmp) yes no)
@@ -17180,8 +16112,7 @@ func rewriteBlock386(b *Block) bool {
for b.Controls[0].Op == Op386SETGE {
v_0 := b.Controls[0]
cmp := v_0.Args[0]
- b.Reset(Block386GE)
- b.AddControl(cmp)
+ b.resetWithControl(Block386GE, cmp)
return true
}
// match: (If (SETEQ cmp) yes no)
@@ -17189,8 +16120,7 @@ func rewriteBlock386(b *Block) bool {
for b.Controls[0].Op == Op386SETEQ {
v_0 := b.Controls[0]
cmp := v_0.Args[0]
- b.Reset(Block386EQ)
- b.AddControl(cmp)
+ b.resetWithControl(Block386EQ, cmp)
return true
}
// match: (If (SETNE cmp) yes no)
@@ -17198,8 +16128,7 @@ func rewriteBlock386(b *Block) bool {
for b.Controls[0].Op == Op386SETNE {
v_0 := b.Controls[0]
cmp := v_0.Args[0]
- b.Reset(Block386NE)
- b.AddControl(cmp)
+ b.resetWithControl(Block386NE, cmp)
return true
}
// match: (If (SETB cmp) yes no)
@@ -17207,8 +16136,7 @@ func rewriteBlock386(b *Block) bool {
for b.Controls[0].Op == Op386SETB {
v_0 := b.Controls[0]
cmp := v_0.Args[0]
- b.Reset(Block386ULT)
- b.AddControl(cmp)
+ b.resetWithControl(Block386ULT, cmp)
return true
}
// match: (If (SETBE cmp) yes no)
@@ -17216,8 +16144,7 @@ func rewriteBlock386(b *Block) bool {
for b.Controls[0].Op == Op386SETBE {
v_0 := b.Controls[0]
cmp := v_0.Args[0]
- b.Reset(Block386ULE)
- b.AddControl(cmp)
+ b.resetWithControl(Block386ULE, cmp)
return true
}
// match: (If (SETA cmp) yes no)
@@ -17225,8 +16152,7 @@ func rewriteBlock386(b *Block) bool {
for b.Controls[0].Op == Op386SETA {
v_0 := b.Controls[0]
cmp := v_0.Args[0]
- b.Reset(Block386UGT)
- b.AddControl(cmp)
+ b.resetWithControl(Block386UGT, cmp)
return true
}
// match: (If (SETAE cmp) yes no)
@@ -17234,8 +16160,7 @@ func rewriteBlock386(b *Block) bool {
for b.Controls[0].Op == Op386SETAE {
v_0 := b.Controls[0]
cmp := v_0.Args[0]
- b.Reset(Block386UGE)
- b.AddControl(cmp)
+ b.resetWithControl(Block386UGE, cmp)
return true
}
// match: (If (SETO cmp) yes no)
@@ -17243,8 +16168,7 @@ func rewriteBlock386(b *Block) bool {
for b.Controls[0].Op == Op386SETO {
v_0 := b.Controls[0]
cmp := v_0.Args[0]
- b.Reset(Block386OS)
- b.AddControl(cmp)
+ b.resetWithControl(Block386OS, cmp)
return true
}
// match: (If (SETGF cmp) yes no)
@@ -17252,8 +16176,7 @@ func rewriteBlock386(b *Block) bool {
for b.Controls[0].Op == Op386SETGF {
v_0 := b.Controls[0]
cmp := v_0.Args[0]
- b.Reset(Block386UGT)
- b.AddControl(cmp)
+ b.resetWithControl(Block386UGT, cmp)
return true
}
// match: (If (SETGEF cmp) yes no)
@@ -17261,8 +16184,7 @@ func rewriteBlock386(b *Block) bool {
for b.Controls[0].Op == Op386SETGEF {
v_0 := b.Controls[0]
cmp := v_0.Args[0]
- b.Reset(Block386UGE)
- b.AddControl(cmp)
+ b.resetWithControl(Block386UGE, cmp)
return true
}
// match: (If (SETEQF cmp) yes no)
@@ -17270,8 +16192,7 @@ func rewriteBlock386(b *Block) bool {
for b.Controls[0].Op == Op386SETEQF {
v_0 := b.Controls[0]
cmp := v_0.Args[0]
- b.Reset(Block386EQF)
- b.AddControl(cmp)
+ b.resetWithControl(Block386EQF, cmp)
return true
}
// match: (If (SETNEF cmp) yes no)
@@ -17279,19 +16200,16 @@ func rewriteBlock386(b *Block) bool {
for b.Controls[0].Op == Op386SETNEF {
v_0 := b.Controls[0]
cmp := v_0.Args[0]
- b.Reset(Block386NEF)
- b.AddControl(cmp)
+ b.resetWithControl(Block386NEF, cmp)
return true
}
// match: (If cond yes no)
// result: (NE (TESTB cond cond) yes no)
for {
cond := b.Controls[0]
- b.Reset(Block386NE)
v0 := b.NewValue0(cond.Pos, Op386TESTB, types.TypeFlags)
- v0.AddArg(cond)
- v0.AddArg(cond)
- b.AddControl(v0)
+ v0.AddArg2(cond, cond)
+ b.resetWithControl(Block386NE, v0)
return true
}
case Block386LE:
@@ -17300,8 +16218,7 @@ func rewriteBlock386(b *Block) bool {
for b.Controls[0].Op == Op386InvertFlags {
v_0 := b.Controls[0]
cmp := v_0.Args[0]
- b.Reset(Block386GE)
- b.AddControl(cmp)
+ b.resetWithControl(Block386GE, cmp)
return true
}
// match: (LE (FlagEQ) yes no)
@@ -17342,8 +16259,7 @@ func rewriteBlock386(b *Block) bool {
for b.Controls[0].Op == Op386InvertFlags {
v_0 := b.Controls[0]
cmp := v_0.Args[0]
- b.Reset(Block386GT)
- b.AddControl(cmp)
+ b.resetWithControl(Block386GT, cmp)
return true
}
// match: (LT (FlagEQ) yes no)
@@ -17394,8 +16310,7 @@ func rewriteBlock386(b *Block) bool {
if v_0_1.Op != Op386SETL || cmp != v_0_1.Args[0] {
break
}
- b.Reset(Block386LT)
- b.AddControl(cmp)
+ b.resetWithControl(Block386LT, cmp)
return true
}
// match: (NE (TESTB (SETLE cmp) (SETLE cmp)) yes no)
@@ -17412,8 +16327,7 @@ func rewriteBlock386(b *Block) bool {
if v_0_1.Op != Op386SETLE || cmp != v_0_1.Args[0] {
break
}
- b.Reset(Block386LE)
- b.AddControl(cmp)
+ b.resetWithControl(Block386LE, cmp)
return true
}
// match: (NE (TESTB (SETG cmp) (SETG cmp)) yes no)
@@ -17430,8 +16344,7 @@ func rewriteBlock386(b *Block) bool {
if v_0_1.Op != Op386SETG || cmp != v_0_1.Args[0] {
break
}
- b.Reset(Block386GT)
- b.AddControl(cmp)
+ b.resetWithControl(Block386GT, cmp)
return true
}
// match: (NE (TESTB (SETGE cmp) (SETGE cmp)) yes no)
@@ -17448,8 +16361,7 @@ func rewriteBlock386(b *Block) bool {
if v_0_1.Op != Op386SETGE || cmp != v_0_1.Args[0] {
break
}
- b.Reset(Block386GE)
- b.AddControl(cmp)
+ b.resetWithControl(Block386GE, cmp)
return true
}
// match: (NE (TESTB (SETEQ cmp) (SETEQ cmp)) yes no)
@@ -17466,8 +16378,7 @@ func rewriteBlock386(b *Block) bool {
if v_0_1.Op != Op386SETEQ || cmp != v_0_1.Args[0] {
break
}
- b.Reset(Block386EQ)
- b.AddControl(cmp)
+ b.resetWithControl(Block386EQ, cmp)
return true
}
// match: (NE (TESTB (SETNE cmp) (SETNE cmp)) yes no)
@@ -17484,8 +16395,7 @@ func rewriteBlock386(b *Block) bool {
if v_0_1.Op != Op386SETNE || cmp != v_0_1.Args[0] {
break
}
- b.Reset(Block386NE)
- b.AddControl(cmp)
+ b.resetWithControl(Block386NE, cmp)
return true
}
// match: (NE (TESTB (SETB cmp) (SETB cmp)) yes no)
@@ -17502,8 +16412,7 @@ func rewriteBlock386(b *Block) bool {
if v_0_1.Op != Op386SETB || cmp != v_0_1.Args[0] {
break
}
- b.Reset(Block386ULT)
- b.AddControl(cmp)
+ b.resetWithControl(Block386ULT, cmp)
return true
}
// match: (NE (TESTB (SETBE cmp) (SETBE cmp)) yes no)
@@ -17520,8 +16429,7 @@ func rewriteBlock386(b *Block) bool {
if v_0_1.Op != Op386SETBE || cmp != v_0_1.Args[0] {
break
}
- b.Reset(Block386ULE)
- b.AddControl(cmp)
+ b.resetWithControl(Block386ULE, cmp)
return true
}
// match: (NE (TESTB (SETA cmp) (SETA cmp)) yes no)
@@ -17538,8 +16446,7 @@ func rewriteBlock386(b *Block) bool {
if v_0_1.Op != Op386SETA || cmp != v_0_1.Args[0] {
break
}
- b.Reset(Block386UGT)
- b.AddControl(cmp)
+ b.resetWithControl(Block386UGT, cmp)
return true
}
// match: (NE (TESTB (SETAE cmp) (SETAE cmp)) yes no)
@@ -17556,8 +16463,7 @@ func rewriteBlock386(b *Block) bool {
if v_0_1.Op != Op386SETAE || cmp != v_0_1.Args[0] {
break
}
- b.Reset(Block386UGE)
- b.AddControl(cmp)
+ b.resetWithControl(Block386UGE, cmp)
return true
}
// match: (NE (TESTB (SETO cmp) (SETO cmp)) yes no)
@@ -17574,8 +16480,7 @@ func rewriteBlock386(b *Block) bool {
if v_0_1.Op != Op386SETO || cmp != v_0_1.Args[0] {
break
}
- b.Reset(Block386OS)
- b.AddControl(cmp)
+ b.resetWithControl(Block386OS, cmp)
return true
}
// match: (NE (TESTB (SETGF cmp) (SETGF cmp)) yes no)
@@ -17592,8 +16497,7 @@ func rewriteBlock386(b *Block) bool {
if v_0_1.Op != Op386SETGF || cmp != v_0_1.Args[0] {
break
}
- b.Reset(Block386UGT)
- b.AddControl(cmp)
+ b.resetWithControl(Block386UGT, cmp)
return true
}
// match: (NE (TESTB (SETGEF cmp) (SETGEF cmp)) yes no)
@@ -17610,8 +16514,7 @@ func rewriteBlock386(b *Block) bool {
if v_0_1.Op != Op386SETGEF || cmp != v_0_1.Args[0] {
break
}
- b.Reset(Block386UGE)
- b.AddControl(cmp)
+ b.resetWithControl(Block386UGE, cmp)
return true
}
// match: (NE (TESTB (SETEQF cmp) (SETEQF cmp)) yes no)
@@ -17628,8 +16531,7 @@ func rewriteBlock386(b *Block) bool {
if v_0_1.Op != Op386SETEQF || cmp != v_0_1.Args[0] {
break
}
- b.Reset(Block386EQF)
- b.AddControl(cmp)
+ b.resetWithControl(Block386EQF, cmp)
return true
}
// match: (NE (TESTB (SETNEF cmp) (SETNEF cmp)) yes no)
@@ -17646,8 +16548,7 @@ func rewriteBlock386(b *Block) bool {
if v_0_1.Op != Op386SETNEF || cmp != v_0_1.Args[0] {
break
}
- b.Reset(Block386NEF)
- b.AddControl(cmp)
+ b.resetWithControl(Block386NEF, cmp)
return true
}
// match: (NE (InvertFlags cmp) yes no)
@@ -17655,8 +16556,7 @@ func rewriteBlock386(b *Block) bool {
for b.Controls[0].Op == Op386InvertFlags {
v_0 := b.Controls[0]
cmp := v_0.Args[0]
- b.Reset(Block386NE)
- b.AddControl(cmp)
+ b.resetWithControl(Block386NE, cmp)
return true
}
// match: (NE (FlagEQ) yes no)
@@ -17696,8 +16596,7 @@ func rewriteBlock386(b *Block) bool {
for b.Controls[0].Op == Op386InvertFlags {
v_0 := b.Controls[0]
cmp := v_0.Args[0]
- b.Reset(Block386ULE)
- b.AddControl(cmp)
+ b.resetWithControl(Block386ULE, cmp)
return true
}
// match: (UGE (FlagEQ) yes no)
@@ -17738,8 +16637,7 @@ func rewriteBlock386(b *Block) bool {
for b.Controls[0].Op == Op386InvertFlags {
v_0 := b.Controls[0]
cmp := v_0.Args[0]
- b.Reset(Block386ULT)
- b.AddControl(cmp)
+ b.resetWithControl(Block386ULT, cmp)
return true
}
// match: (UGT (FlagEQ) yes no)
@@ -17781,8 +16679,7 @@ func rewriteBlock386(b *Block) bool {
for b.Controls[0].Op == Op386InvertFlags {
v_0 := b.Controls[0]
cmp := v_0.Args[0]
- b.Reset(Block386UGE)
- b.AddControl(cmp)
+ b.resetWithControl(Block386UGE, cmp)
return true
}
// match: (ULE (FlagEQ) yes no)
@@ -17823,8 +16720,7 @@ func rewriteBlock386(b *Block) bool {
for b.Controls[0].Op == Op386InvertFlags {
v_0 := b.Controls[0]
cmp := v_0.Args[0]
- b.Reset(Block386UGT)
- b.AddControl(cmp)
+ b.resetWithControl(Block386UGT, cmp)
return true
}
// match: (ULT (FlagEQ) yes no)
diff --git a/src/cmd/compile/internal/ssa/rewrite386splitload.go b/src/cmd/compile/internal/ssa/rewrite386splitload.go
index cce1b2d05a..f82eae99ab 100644
--- a/src/cmd/compile/internal/ssa/rewrite386splitload.go
+++ b/src/cmd/compile/internal/ssa/rewrite386splitload.go
@@ -37,8 +37,7 @@ func rewriteValue386splitload_Op386CMPBconstload(v *Value) bool {
v0 := b.NewValue0(v.Pos, Op386MOVBload, typ.UInt8)
v0.AuxInt = offOnly(vo)
v0.Aux = sym
- v0.AddArg(ptr)
- v0.AddArg(mem)
+ v0.AddArg2(ptr, mem)
v.AddArg(v0)
return true
}
@@ -61,10 +60,8 @@ func rewriteValue386splitload_Op386CMPBload(v *Value) bool {
v0 := b.NewValue0(v.Pos, Op386MOVBload, typ.UInt8)
v0.AuxInt = off
v0.Aux = sym
- v0.AddArg(ptr)
- v0.AddArg(mem)
- v.AddArg(v0)
- v.AddArg(x)
+ v0.AddArg2(ptr, mem)
+ v.AddArg2(v0, x)
return true
}
}
@@ -85,8 +82,7 @@ func rewriteValue386splitload_Op386CMPLconstload(v *Value) bool {
v0 := b.NewValue0(v.Pos, Op386MOVLload, typ.UInt32)
v0.AuxInt = offOnly(vo)
v0.Aux = sym
- v0.AddArg(ptr)
- v0.AddArg(mem)
+ v0.AddArg2(ptr, mem)
v.AddArg(v0)
return true
}
@@ -109,10 +105,8 @@ func rewriteValue386splitload_Op386CMPLload(v *Value) bool {
v0 := b.NewValue0(v.Pos, Op386MOVLload, typ.UInt32)
v0.AuxInt = off
v0.Aux = sym
- v0.AddArg(ptr)
- v0.AddArg(mem)
- v.AddArg(v0)
- v.AddArg(x)
+ v0.AddArg2(ptr, mem)
+ v.AddArg2(v0, x)
return true
}
}
@@ -133,8 +127,7 @@ func rewriteValue386splitload_Op386CMPWconstload(v *Value) bool {
v0 := b.NewValue0(v.Pos, Op386MOVWload, typ.UInt16)
v0.AuxInt = offOnly(vo)
v0.Aux = sym
- v0.AddArg(ptr)
- v0.AddArg(mem)
+ v0.AddArg2(ptr, mem)
v.AddArg(v0)
return true
}
@@ -157,10 +150,8 @@ func rewriteValue386splitload_Op386CMPWload(v *Value) bool {
v0 := b.NewValue0(v.Pos, Op386MOVWload, typ.UInt16)
v0.AuxInt = off
v0.Aux = sym
- v0.AddArg(ptr)
- v0.AddArg(mem)
- v.AddArg(v0)
- v.AddArg(x)
+ v0.AddArg2(ptr, mem)
+ v.AddArg2(v0, x)
return true
}
}
diff --git a/src/cmd/compile/internal/ssa/rewriteAMD64.go b/src/cmd/compile/internal/ssa/rewriteAMD64.go
index 665b20c42d..c37bae2c22 100644
--- a/src/cmd/compile/internal/ssa/rewriteAMD64.go
+++ b/src/cmd/compile/internal/ssa/rewriteAMD64.go
@@ -780,6 +780,9 @@ func rewriteValueAMD64(v *Value) bool {
case OpCvt64to64F:
v.Op = OpAMD64CVTSQ2SD
return true
+ case OpCvtBoolToUint8:
+ v.Op = OpCopy
+ return true
case OpDiv128u:
v.Op = OpAMD64DIVQU2
return true
@@ -1262,8 +1265,7 @@ func rewriteValueAMD64_OpAMD64ADCQ(v *Value) bool {
}
v.reset(OpAMD64ADCQconst)
v.AuxInt = c
- v.AddArg(x)
- v.AddArg(carry)
+ v.AddArg2(x, carry)
return true
}
break
@@ -1277,8 +1279,7 @@ func rewriteValueAMD64_OpAMD64ADCQ(v *Value) bool {
break
}
v.reset(OpAMD64ADDQcarry)
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
return false
@@ -1404,8 +1405,7 @@ func rewriteValueAMD64_OpAMD64ADDL(v *Value) bool {
}
y := v_1.Args[0]
v.reset(OpAMD64LEAL8)
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
break
@@ -1420,8 +1420,7 @@ func rewriteValueAMD64_OpAMD64ADDL(v *Value) bool {
}
y := v_1.Args[0]
v.reset(OpAMD64LEAL4)
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
break
@@ -1436,8 +1435,7 @@ func rewriteValueAMD64_OpAMD64ADDL(v *Value) bool {
}
y := v_1.Args[0]
v.reset(OpAMD64LEAL2)
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
break
@@ -1455,8 +1453,7 @@ func rewriteValueAMD64_OpAMD64ADDL(v *Value) bool {
continue
}
v.reset(OpAMD64LEAL2)
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
break
@@ -1478,8 +1475,7 @@ func rewriteValueAMD64_OpAMD64ADDL(v *Value) bool {
}
y := v_1_1
v.reset(OpAMD64LEAL2)
- v.AddArg(y)
- v.AddArg(x)
+ v.AddArg2(y, x)
return true
}
}
@@ -1497,8 +1493,7 @@ func rewriteValueAMD64_OpAMD64ADDL(v *Value) bool {
y := v_1
v.reset(OpAMD64LEAL1)
v.AuxInt = c
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
break
@@ -1521,8 +1516,7 @@ func rewriteValueAMD64_OpAMD64ADDL(v *Value) bool {
v.reset(OpAMD64LEAL1)
v.AuxInt = c
v.Aux = s
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
break
@@ -1537,8 +1531,7 @@ func rewriteValueAMD64_OpAMD64ADDL(v *Value) bool {
}
y := v_1.Args[0]
v.reset(OpAMD64SUBL)
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
break
@@ -1563,9 +1556,7 @@ func rewriteValueAMD64_OpAMD64ADDL(v *Value) bool {
v.reset(OpAMD64ADDLload)
v.AuxInt = off
v.Aux = sym
- v.AddArg(x)
- v.AddArg(ptr)
- v.AddArg(mem)
+ v.AddArg3(x, ptr, mem)
return true
}
break
@@ -1585,8 +1576,7 @@ func rewriteValueAMD64_OpAMD64ADDLconst(v *Value) bool {
x := v_0.Args[0]
v.reset(OpAMD64LEAL1)
v.AuxInt = c
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
// match: (ADDLconst [c] (SHLLconst [1] x))
@@ -1599,8 +1589,7 @@ func rewriteValueAMD64_OpAMD64ADDLconst(v *Value) bool {
x := v_0.Args[0]
v.reset(OpAMD64LEAL1)
v.AuxInt = c
- v.AddArg(x)
- v.AddArg(x)
+ v.AddArg2(x, x)
return true
}
// match: (ADDLconst [c] (LEAL [d] {s} x))
@@ -1641,8 +1630,7 @@ func rewriteValueAMD64_OpAMD64ADDLconst(v *Value) bool {
v.reset(OpAMD64LEAL1)
v.AuxInt = c + d
v.Aux = s
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
// match: (ADDLconst [c] (LEAL2 [d] {s} x y))
@@ -1663,8 +1651,7 @@ func rewriteValueAMD64_OpAMD64ADDLconst(v *Value) bool {
v.reset(OpAMD64LEAL2)
v.AuxInt = c + d
v.Aux = s
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
// match: (ADDLconst [c] (LEAL4 [d] {s} x y))
@@ -1685,8 +1672,7 @@ func rewriteValueAMD64_OpAMD64ADDLconst(v *Value) bool {
v.reset(OpAMD64LEAL4)
v.AuxInt = c + d
v.Aux = s
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
// match: (ADDLconst [c] (LEAL8 [d] {s} x y))
@@ -1707,8 +1693,7 @@ func rewriteValueAMD64_OpAMD64ADDLconst(v *Value) bool {
v.reset(OpAMD64LEAL8)
v.AuxInt = c + d
v.Aux = s
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
// match: (ADDLconst [c] x)
@@ -1720,9 +1705,7 @@ func rewriteValueAMD64_OpAMD64ADDLconst(v *Value) bool {
if !(int32(c) == 0) {
break
}
- v.reset(OpCopy)
- v.Type = x.Type
- v.AddArg(x)
+ v.copyOf(x)
return true
}
// match: (ADDLconst [c] (MOVLconst [d]))
@@ -1787,8 +1770,7 @@ func rewriteValueAMD64_OpAMD64ADDLconstmodify(v *Value) bool {
v.reset(OpAMD64ADDLconstmodify)
v.AuxInt = ValAndOff(valoff1).add(off2)
v.Aux = sym
- v.AddArg(base)
- v.AddArg(mem)
+ v.AddArg2(base, mem)
return true
}
// match: (ADDLconstmodify [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem)
@@ -1810,8 +1792,7 @@ func rewriteValueAMD64_OpAMD64ADDLconstmodify(v *Value) bool {
v.reset(OpAMD64ADDLconstmodify)
v.AuxInt = ValAndOff(valoff1).add(off2)
v.Aux = mergeSym(sym1, sym2)
- v.AddArg(base)
- v.AddArg(mem)
+ v.AddArg2(base, mem)
return true
}
return false
@@ -1841,9 +1822,7 @@ func rewriteValueAMD64_OpAMD64ADDLload(v *Value) bool {
v.reset(OpAMD64ADDLload)
v.AuxInt = off1 + off2
v.Aux = sym
- v.AddArg(val)
- v.AddArg(base)
- v.AddArg(mem)
+ v.AddArg3(val, base, mem)
return true
}
// match: (ADDLload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem)
@@ -1866,9 +1845,7 @@ func rewriteValueAMD64_OpAMD64ADDLload(v *Value) bool {
v.reset(OpAMD64ADDLload)
v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
- v.AddArg(val)
- v.AddArg(base)
- v.AddArg(mem)
+ v.AddArg3(val, base, mem)
return true
}
// match: (ADDLload x [off] {sym} ptr (MOVSSstore [off] {sym} ptr y _))
@@ -1881,16 +1858,14 @@ func rewriteValueAMD64_OpAMD64ADDLload(v *Value) bool {
if v_2.Op != OpAMD64MOVSSstore || v_2.AuxInt != off || v_2.Aux != sym {
break
}
- _ = v_2.Args[2]
+ y := v_2.Args[1]
if ptr != v_2.Args[0] {
break
}
- y := v_2.Args[1]
v.reset(OpAMD64ADDL)
- v.AddArg(x)
v0 := b.NewValue0(v_2.Pos, OpAMD64MOVLf2i, typ.UInt32)
v0.AddArg(y)
- v.AddArg(v0)
+ v.AddArg2(x, v0)
return true
}
return false
@@ -1918,9 +1893,7 @@ func rewriteValueAMD64_OpAMD64ADDLmodify(v *Value) bool {
v.reset(OpAMD64ADDLmodify)
v.AuxInt = off1 + off2
v.Aux = sym
- v.AddArg(base)
- v.AddArg(val)
- v.AddArg(mem)
+ v.AddArg3(base, val, mem)
return true
}
// match: (ADDLmodify [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
@@ -1943,9 +1916,7 @@ func rewriteValueAMD64_OpAMD64ADDLmodify(v *Value) bool {
v.reset(OpAMD64ADDLmodify)
v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
- v.AddArg(base)
- v.AddArg(val)
- v.AddArg(mem)
+ v.AddArg3(base, val, mem)
return true
}
return false
@@ -2027,8 +1998,7 @@ func rewriteValueAMD64_OpAMD64ADDQ(v *Value) bool {
}
y := v_1.Args[0]
v.reset(OpAMD64LEAQ8)
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
break
@@ -2043,8 +2013,7 @@ func rewriteValueAMD64_OpAMD64ADDQ(v *Value) bool {
}
y := v_1.Args[0]
v.reset(OpAMD64LEAQ4)
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
break
@@ -2059,8 +2028,7 @@ func rewriteValueAMD64_OpAMD64ADDQ(v *Value) bool {
}
y := v_1.Args[0]
v.reset(OpAMD64LEAQ2)
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
break
@@ -2078,8 +2046,7 @@ func rewriteValueAMD64_OpAMD64ADDQ(v *Value) bool {
continue
}
v.reset(OpAMD64LEAQ2)
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
break
@@ -2101,8 +2068,7 @@ func rewriteValueAMD64_OpAMD64ADDQ(v *Value) bool {
}
y := v_1_1
v.reset(OpAMD64LEAQ2)
- v.AddArg(y)
- v.AddArg(x)
+ v.AddArg2(y, x)
return true
}
}
@@ -2120,8 +2086,7 @@ func rewriteValueAMD64_OpAMD64ADDQ(v *Value) bool {
y := v_1
v.reset(OpAMD64LEAQ1)
v.AuxInt = c
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
break
@@ -2144,8 +2109,7 @@ func rewriteValueAMD64_OpAMD64ADDQ(v *Value) bool {
v.reset(OpAMD64LEAQ1)
v.AuxInt = c
v.Aux = s
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
break
@@ -2160,8 +2124,7 @@ func rewriteValueAMD64_OpAMD64ADDQ(v *Value) bool {
}
y := v_1.Args[0]
v.reset(OpAMD64SUBQ)
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
break
@@ -2186,9 +2149,7 @@ func rewriteValueAMD64_OpAMD64ADDQ(v *Value) bool {
v.reset(OpAMD64ADDQload)
v.AuxInt = off
v.Aux = sym
- v.AddArg(x)
- v.AddArg(ptr)
- v.AddArg(mem)
+ v.AddArg3(x, ptr, mem)
return true
}
break
@@ -2233,8 +2194,7 @@ func rewriteValueAMD64_OpAMD64ADDQconst(v *Value) bool {
x := v_0.Args[0]
v.reset(OpAMD64LEAQ1)
v.AuxInt = c
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
// match: (ADDQconst [c] (SHLQconst [1] x))
@@ -2247,8 +2207,7 @@ func rewriteValueAMD64_OpAMD64ADDQconst(v *Value) bool {
x := v_0.Args[0]
v.reset(OpAMD64LEAQ1)
v.AuxInt = c
- v.AddArg(x)
- v.AddArg(x)
+ v.AddArg2(x, x)
return true
}
// match: (ADDQconst [c] (LEAQ [d] {s} x))
@@ -2289,8 +2248,7 @@ func rewriteValueAMD64_OpAMD64ADDQconst(v *Value) bool {
v.reset(OpAMD64LEAQ1)
v.AuxInt = c + d
v.Aux = s
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
// match: (ADDQconst [c] (LEAQ2 [d] {s} x y))
@@ -2311,8 +2269,7 @@ func rewriteValueAMD64_OpAMD64ADDQconst(v *Value) bool {
v.reset(OpAMD64LEAQ2)
v.AuxInt = c + d
v.Aux = s
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
// match: (ADDQconst [c] (LEAQ4 [d] {s} x y))
@@ -2333,8 +2290,7 @@ func rewriteValueAMD64_OpAMD64ADDQconst(v *Value) bool {
v.reset(OpAMD64LEAQ4)
v.AuxInt = c + d
v.Aux = s
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
// match: (ADDQconst [c] (LEAQ8 [d] {s} x y))
@@ -2355,8 +2311,7 @@ func rewriteValueAMD64_OpAMD64ADDQconst(v *Value) bool {
v.reset(OpAMD64LEAQ8)
v.AuxInt = c + d
v.Aux = s
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
// match: (ADDQconst [0] x)
@@ -2366,9 +2321,7 @@ func rewriteValueAMD64_OpAMD64ADDQconst(v *Value) bool {
break
}
x := v_0
- v.reset(OpCopy)
- v.Type = x.Type
- v.AddArg(x)
+ v.copyOf(x)
return true
}
// match: (ADDQconst [c] (MOVQconst [d]))
@@ -2437,8 +2390,7 @@ func rewriteValueAMD64_OpAMD64ADDQconstmodify(v *Value) bool {
v.reset(OpAMD64ADDQconstmodify)
v.AuxInt = ValAndOff(valoff1).add(off2)
v.Aux = sym
- v.AddArg(base)
- v.AddArg(mem)
+ v.AddArg2(base, mem)
return true
}
// match: (ADDQconstmodify [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem)
@@ -2460,8 +2412,7 @@ func rewriteValueAMD64_OpAMD64ADDQconstmodify(v *Value) bool {
v.reset(OpAMD64ADDQconstmodify)
v.AuxInt = ValAndOff(valoff1).add(off2)
v.Aux = mergeSym(sym1, sym2)
- v.AddArg(base)
- v.AddArg(mem)
+ v.AddArg2(base, mem)
return true
}
return false
@@ -2491,9 +2442,7 @@ func rewriteValueAMD64_OpAMD64ADDQload(v *Value) bool {
v.reset(OpAMD64ADDQload)
v.AuxInt = off1 + off2
v.Aux = sym
- v.AddArg(val)
- v.AddArg(base)
- v.AddArg(mem)
+ v.AddArg3(val, base, mem)
return true
}
// match: (ADDQload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem)
@@ -2516,9 +2465,7 @@ func rewriteValueAMD64_OpAMD64ADDQload(v *Value) bool {
v.reset(OpAMD64ADDQload)
v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
- v.AddArg(val)
- v.AddArg(base)
- v.AddArg(mem)
+ v.AddArg3(val, base, mem)
return true
}
// match: (ADDQload x [off] {sym} ptr (MOVSDstore [off] {sym} ptr y _))
@@ -2531,16 +2478,14 @@ func rewriteValueAMD64_OpAMD64ADDQload(v *Value) bool {
if v_2.Op != OpAMD64MOVSDstore || v_2.AuxInt != off || v_2.Aux != sym {
break
}
- _ = v_2.Args[2]
+ y := v_2.Args[1]
if ptr != v_2.Args[0] {
break
}
- y := v_2.Args[1]
v.reset(OpAMD64ADDQ)
- v.AddArg(x)
v0 := b.NewValue0(v_2.Pos, OpAMD64MOVQf2i, typ.UInt64)
v0.AddArg(y)
- v.AddArg(v0)
+ v.AddArg2(x, v0)
return true
}
return false
@@ -2568,9 +2513,7 @@ func rewriteValueAMD64_OpAMD64ADDQmodify(v *Value) bool {
v.reset(OpAMD64ADDQmodify)
v.AuxInt = off1 + off2
v.Aux = sym
- v.AddArg(base)
- v.AddArg(val)
- v.AddArg(mem)
+ v.AddArg3(base, val, mem)
return true
}
// match: (ADDQmodify [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
@@ -2593,9 +2536,7 @@ func rewriteValueAMD64_OpAMD64ADDQmodify(v *Value) bool {
v.reset(OpAMD64ADDQmodify)
v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
- v.AddArg(base)
- v.AddArg(val)
- v.AddArg(mem)
+ v.AddArg3(base, val, mem)
return true
}
return false
@@ -2623,9 +2564,7 @@ func rewriteValueAMD64_OpAMD64ADDSD(v *Value) bool {
v.reset(OpAMD64ADDSDload)
v.AuxInt = off
v.Aux = sym
- v.AddArg(x)
- v.AddArg(ptr)
- v.AddArg(mem)
+ v.AddArg3(x, ptr, mem)
return true
}
break
@@ -2657,9 +2596,7 @@ func rewriteValueAMD64_OpAMD64ADDSDload(v *Value) bool {
v.reset(OpAMD64ADDSDload)
v.AuxInt = off1 + off2
v.Aux = sym
- v.AddArg(val)
- v.AddArg(base)
- v.AddArg(mem)
+ v.AddArg3(val, base, mem)
return true
}
// match: (ADDSDload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem)
@@ -2682,9 +2619,7 @@ func rewriteValueAMD64_OpAMD64ADDSDload(v *Value) bool {
v.reset(OpAMD64ADDSDload)
v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
- v.AddArg(val)
- v.AddArg(base)
- v.AddArg(mem)
+ v.AddArg3(val, base, mem)
return true
}
// match: (ADDSDload x [off] {sym} ptr (MOVQstore [off] {sym} ptr y _))
@@ -2697,16 +2632,14 @@ func rewriteValueAMD64_OpAMD64ADDSDload(v *Value) bool {
if v_2.Op != OpAMD64MOVQstore || v_2.AuxInt != off || v_2.Aux != sym {
break
}
- _ = v_2.Args[2]
+ y := v_2.Args[1]
if ptr != v_2.Args[0] {
break
}
- y := v_2.Args[1]
v.reset(OpAMD64ADDSD)
- v.AddArg(x)
v0 := b.NewValue0(v_2.Pos, OpAMD64MOVQi2f, typ.Float64)
v0.AddArg(y)
- v.AddArg(v0)
+ v.AddArg2(x, v0)
return true
}
return false
@@ -2734,9 +2667,7 @@ func rewriteValueAMD64_OpAMD64ADDSS(v *Value) bool {
v.reset(OpAMD64ADDSSload)
v.AuxInt = off
v.Aux = sym
- v.AddArg(x)
- v.AddArg(ptr)
- v.AddArg(mem)
+ v.AddArg3(x, ptr, mem)
return true
}
break
@@ -2768,9 +2699,7 @@ func rewriteValueAMD64_OpAMD64ADDSSload(v *Value) bool {
v.reset(OpAMD64ADDSSload)
v.AuxInt = off1 + off2
v.Aux = sym
- v.AddArg(val)
- v.AddArg(base)
- v.AddArg(mem)
+ v.AddArg3(val, base, mem)
return true
}
// match: (ADDSSload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem)
@@ -2793,9 +2722,7 @@ func rewriteValueAMD64_OpAMD64ADDSSload(v *Value) bool {
v.reset(OpAMD64ADDSSload)
v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
- v.AddArg(val)
- v.AddArg(base)
- v.AddArg(mem)
+ v.AddArg3(val, base, mem)
return true
}
// match: (ADDSSload x [off] {sym} ptr (MOVLstore [off] {sym} ptr y _))
@@ -2808,16 +2735,14 @@ func rewriteValueAMD64_OpAMD64ADDSSload(v *Value) bool {
if v_2.Op != OpAMD64MOVLstore || v_2.AuxInt != off || v_2.Aux != sym {
break
}
- _ = v_2.Args[2]
+ y := v_2.Args[1]
if ptr != v_2.Args[0] {
break
}
- y := v_2.Args[1]
v.reset(OpAMD64ADDSS)
- v.AddArg(x)
v0 := b.NewValue0(v_2.Pos, OpAMD64MOVLi2f, typ.Float32)
v0.AddArg(y)
- v.AddArg(v0)
+ v.AddArg2(x, v0)
return true
}
return false
@@ -2843,8 +2768,7 @@ func rewriteValueAMD64_OpAMD64ANDL(v *Value) bool {
}
x := v_1
v.reset(OpAMD64BTRL)
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
break
@@ -2892,9 +2816,7 @@ func rewriteValueAMD64_OpAMD64ANDL(v *Value) bool {
if x != v_1 {
break
}
- v.reset(OpCopy)
- v.Type = x.Type
- v.AddArg(x)
+ v.copyOf(x)
return true
}
// match: (ANDL x l:(MOVLload [off] {sym} ptr mem))
@@ -2917,9 +2839,7 @@ func rewriteValueAMD64_OpAMD64ANDL(v *Value) bool {
v.reset(OpAMD64ANDLload)
v.AuxInt = off
v.Aux = sym
- v.AddArg(x)
- v.AddArg(ptr)
- v.AddArg(mem)
+ v.AddArg3(x, ptr, mem)
return true
}
break
@@ -3013,9 +2933,7 @@ func rewriteValueAMD64_OpAMD64ANDLconst(v *Value) bool {
if !(int32(c) == -1) {
break
}
- v.reset(OpCopy)
- v.Type = x.Type
- v.AddArg(x)
+ v.copyOf(x)
return true
}
// match: (ANDLconst [c] (MOVLconst [d]))
@@ -3053,8 +2971,7 @@ func rewriteValueAMD64_OpAMD64ANDLconstmodify(v *Value) bool {
v.reset(OpAMD64ANDLconstmodify)
v.AuxInt = ValAndOff(valoff1).add(off2)
v.Aux = sym
- v.AddArg(base)
- v.AddArg(mem)
+ v.AddArg2(base, mem)
return true
}
// match: (ANDLconstmodify [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem)
@@ -3076,8 +2993,7 @@ func rewriteValueAMD64_OpAMD64ANDLconstmodify(v *Value) bool {
v.reset(OpAMD64ANDLconstmodify)
v.AuxInt = ValAndOff(valoff1).add(off2)
v.Aux = mergeSym(sym1, sym2)
- v.AddArg(base)
- v.AddArg(mem)
+ v.AddArg2(base, mem)
return true
}
return false
@@ -3107,9 +3023,7 @@ func rewriteValueAMD64_OpAMD64ANDLload(v *Value) bool {
v.reset(OpAMD64ANDLload)
v.AuxInt = off1 + off2
v.Aux = sym
- v.AddArg(val)
- v.AddArg(base)
- v.AddArg(mem)
+ v.AddArg3(val, base, mem)
return true
}
// match: (ANDLload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem)
@@ -3132,9 +3046,7 @@ func rewriteValueAMD64_OpAMD64ANDLload(v *Value) bool {
v.reset(OpAMD64ANDLload)
v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
- v.AddArg(val)
- v.AddArg(base)
- v.AddArg(mem)
+ v.AddArg3(val, base, mem)
return true
}
// match: (ANDLload x [off] {sym} ptr (MOVSSstore [off] {sym} ptr y _))
@@ -3147,16 +3059,14 @@ func rewriteValueAMD64_OpAMD64ANDLload(v *Value) bool {
if v_2.Op != OpAMD64MOVSSstore || v_2.AuxInt != off || v_2.Aux != sym {
break
}
- _ = v_2.Args[2]
+ y := v_2.Args[1]
if ptr != v_2.Args[0] {
break
}
- y := v_2.Args[1]
v.reset(OpAMD64ANDL)
- v.AddArg(x)
v0 := b.NewValue0(v_2.Pos, OpAMD64MOVLf2i, typ.UInt32)
v0.AddArg(y)
- v.AddArg(v0)
+ v.AddArg2(x, v0)
return true
}
return false
@@ -3184,9 +3094,7 @@ func rewriteValueAMD64_OpAMD64ANDLmodify(v *Value) bool {
v.reset(OpAMD64ANDLmodify)
v.AuxInt = off1 + off2
v.Aux = sym
- v.AddArg(base)
- v.AddArg(val)
- v.AddArg(mem)
+ v.AddArg3(base, val, mem)
return true
}
// match: (ANDLmodify [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
@@ -3209,9 +3117,7 @@ func rewriteValueAMD64_OpAMD64ANDLmodify(v *Value) bool {
v.reset(OpAMD64ANDLmodify)
v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
- v.AddArg(base)
- v.AddArg(val)
- v.AddArg(mem)
+ v.AddArg3(base, val, mem)
return true
}
return false
@@ -3237,8 +3143,7 @@ func rewriteValueAMD64_OpAMD64ANDQ(v *Value) bool {
}
x := v_1
v.reset(OpAMD64BTRQ)
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
break
@@ -3290,9 +3195,7 @@ func rewriteValueAMD64_OpAMD64ANDQ(v *Value) bool {
if x != v_1 {
break
}
- v.reset(OpCopy)
- v.Type = x.Type
- v.AddArg(x)
+ v.copyOf(x)
return true
}
// match: (ANDQ x l:(MOVQload [off] {sym} ptr mem))
@@ -3315,9 +3218,7 @@ func rewriteValueAMD64_OpAMD64ANDQ(v *Value) bool {
v.reset(OpAMD64ANDQload)
v.AuxInt = off
v.Aux = sym
- v.AddArg(x)
- v.AddArg(ptr)
- v.AddArg(mem)
+ v.AddArg3(x, ptr, mem)
return true
}
break
@@ -3418,9 +3319,7 @@ func rewriteValueAMD64_OpAMD64ANDQconst(v *Value) bool {
break
}
x := v_0
- v.reset(OpCopy)
- v.Type = x.Type
- v.AddArg(x)
+ v.copyOf(x)
return true
}
// match: (ANDQconst [c] (MOVQconst [d]))
@@ -3458,8 +3357,7 @@ func rewriteValueAMD64_OpAMD64ANDQconstmodify(v *Value) bool {
v.reset(OpAMD64ANDQconstmodify)
v.AuxInt = ValAndOff(valoff1).add(off2)
v.Aux = sym
- v.AddArg(base)
- v.AddArg(mem)
+ v.AddArg2(base, mem)
return true
}
// match: (ANDQconstmodify [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem)
@@ -3481,8 +3379,7 @@ func rewriteValueAMD64_OpAMD64ANDQconstmodify(v *Value) bool {
v.reset(OpAMD64ANDQconstmodify)
v.AuxInt = ValAndOff(valoff1).add(off2)
v.Aux = mergeSym(sym1, sym2)
- v.AddArg(base)
- v.AddArg(mem)
+ v.AddArg2(base, mem)
return true
}
return false
@@ -3512,9 +3409,7 @@ func rewriteValueAMD64_OpAMD64ANDQload(v *Value) bool {
v.reset(OpAMD64ANDQload)
v.AuxInt = off1 + off2
v.Aux = sym
- v.AddArg(val)
- v.AddArg(base)
- v.AddArg(mem)
+ v.AddArg3(val, base, mem)
return true
}
// match: (ANDQload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem)
@@ -3537,9 +3432,7 @@ func rewriteValueAMD64_OpAMD64ANDQload(v *Value) bool {
v.reset(OpAMD64ANDQload)
v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
- v.AddArg(val)
- v.AddArg(base)
- v.AddArg(mem)
+ v.AddArg3(val, base, mem)
return true
}
// match: (ANDQload x [off] {sym} ptr (MOVSDstore [off] {sym} ptr y _))
@@ -3552,16 +3445,14 @@ func rewriteValueAMD64_OpAMD64ANDQload(v *Value) bool {
if v_2.Op != OpAMD64MOVSDstore || v_2.AuxInt != off || v_2.Aux != sym {
break
}
- _ = v_2.Args[2]
+ y := v_2.Args[1]
if ptr != v_2.Args[0] {
break
}
- y := v_2.Args[1]
v.reset(OpAMD64ANDQ)
- v.AddArg(x)
v0 := b.NewValue0(v_2.Pos, OpAMD64MOVQf2i, typ.UInt64)
v0.AddArg(y)
- v.AddArg(v0)
+ v.AddArg2(x, v0)
return true
}
return false
@@ -3589,9 +3480,7 @@ func rewriteValueAMD64_OpAMD64ANDQmodify(v *Value) bool {
v.reset(OpAMD64ANDQmodify)
v.AuxInt = off1 + off2
v.Aux = sym
- v.AddArg(base)
- v.AddArg(val)
- v.AddArg(mem)
+ v.AddArg3(base, val, mem)
return true
}
// match: (ANDQmodify [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
@@ -3614,9 +3503,7 @@ func rewriteValueAMD64_OpAMD64ANDQmodify(v *Value) bool {
v.reset(OpAMD64ANDQmodify)
v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
- v.AddArg(base)
- v.AddArg(val)
- v.AddArg(mem)
+ v.AddArg3(base, val, mem)
return true
}
return false
@@ -3735,8 +3622,7 @@ func rewriteValueAMD64_OpAMD64BTCLconstmodify(v *Value) bool {
v.reset(OpAMD64BTCLconstmodify)
v.AuxInt = ValAndOff(valoff1).add(off2)
v.Aux = sym
- v.AddArg(base)
- v.AddArg(mem)
+ v.AddArg2(base, mem)
return true
}
// match: (BTCLconstmodify [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem)
@@ -3758,8 +3644,7 @@ func rewriteValueAMD64_OpAMD64BTCLconstmodify(v *Value) bool {
v.reset(OpAMD64BTCLconstmodify)
v.AuxInt = ValAndOff(valoff1).add(off2)
v.Aux = mergeSym(sym1, sym2)
- v.AddArg(base)
- v.AddArg(mem)
+ v.AddArg2(base, mem)
return true
}
return false
@@ -3787,9 +3672,7 @@ func rewriteValueAMD64_OpAMD64BTCLmodify(v *Value) bool {
v.reset(OpAMD64BTCLmodify)
v.AuxInt = off1 + off2
v.Aux = sym
- v.AddArg(base)
- v.AddArg(val)
- v.AddArg(mem)
+ v.AddArg3(base, val, mem)
return true
}
// match: (BTCLmodify [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
@@ -3812,9 +3695,7 @@ func rewriteValueAMD64_OpAMD64BTCLmodify(v *Value) bool {
v.reset(OpAMD64BTCLmodify)
v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
- v.AddArg(base)
- v.AddArg(val)
- v.AddArg(mem)
+ v.AddArg3(base, val, mem)
return true
}
return false
@@ -3884,8 +3765,7 @@ func rewriteValueAMD64_OpAMD64BTCQconstmodify(v *Value) bool {
v.reset(OpAMD64BTCQconstmodify)
v.AuxInt = ValAndOff(valoff1).add(off2)
v.Aux = sym
- v.AddArg(base)
- v.AddArg(mem)
+ v.AddArg2(base, mem)
return true
}
// match: (BTCQconstmodify [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem)
@@ -3907,8 +3787,7 @@ func rewriteValueAMD64_OpAMD64BTCQconstmodify(v *Value) bool {
v.reset(OpAMD64BTCQconstmodify)
v.AuxInt = ValAndOff(valoff1).add(off2)
v.Aux = mergeSym(sym1, sym2)
- v.AddArg(base)
- v.AddArg(mem)
+ v.AddArg2(base, mem)
return true
}
return false
@@ -3936,9 +3815,7 @@ func rewriteValueAMD64_OpAMD64BTCQmodify(v *Value) bool {
v.reset(OpAMD64BTCQmodify)
v.AuxInt = off1 + off2
v.Aux = sym
- v.AddArg(base)
- v.AddArg(val)
- v.AddArg(mem)
+ v.AddArg3(base, val, mem)
return true
}
// match: (BTCQmodify [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
@@ -3961,9 +3838,7 @@ func rewriteValueAMD64_OpAMD64BTCQmodify(v *Value) bool {
v.reset(OpAMD64BTCQmodify)
v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
- v.AddArg(base)
- v.AddArg(val)
- v.AddArg(mem)
+ v.AddArg3(base, val, mem)
return true
}
return false
@@ -4019,8 +3894,7 @@ func rewriteValueAMD64_OpAMD64BTLconst(v *Value) bool {
y := s.Args[1]
x := s.Args[0]
v.reset(OpAMD64BTQ)
- v.AddArg(y)
- v.AddArg(x)
+ v.AddArg2(y, x)
return true
}
// match: (BTLconst [c] (SHRLconst [d] x))
@@ -4072,8 +3946,7 @@ func rewriteValueAMD64_OpAMD64BTLconst(v *Value) bool {
y := s.Args[1]
x := s.Args[0]
v.reset(OpAMD64BTL)
- v.AddArg(y)
- v.AddArg(x)
+ v.AddArg2(y, x)
return true
}
return false
@@ -4129,8 +4002,7 @@ func rewriteValueAMD64_OpAMD64BTQconst(v *Value) bool {
y := s.Args[1]
x := s.Args[0]
v.reset(OpAMD64BTQ)
- v.AddArg(y)
- v.AddArg(x)
+ v.AddArg2(y, x)
return true
}
return false
@@ -4226,8 +4098,7 @@ func rewriteValueAMD64_OpAMD64BTRLconstmodify(v *Value) bool {
v.reset(OpAMD64BTRLconstmodify)
v.AuxInt = ValAndOff(valoff1).add(off2)
v.Aux = sym
- v.AddArg(base)
- v.AddArg(mem)
+ v.AddArg2(base, mem)
return true
}
// match: (BTRLconstmodify [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem)
@@ -4249,8 +4120,7 @@ func rewriteValueAMD64_OpAMD64BTRLconstmodify(v *Value) bool {
v.reset(OpAMD64BTRLconstmodify)
v.AuxInt = ValAndOff(valoff1).add(off2)
v.Aux = mergeSym(sym1, sym2)
- v.AddArg(base)
- v.AddArg(mem)
+ v.AddArg2(base, mem)
return true
}
return false
@@ -4278,9 +4148,7 @@ func rewriteValueAMD64_OpAMD64BTRLmodify(v *Value) bool {
v.reset(OpAMD64BTRLmodify)
v.AuxInt = off1 + off2
v.Aux = sym
- v.AddArg(base)
- v.AddArg(val)
- v.AddArg(mem)
+ v.AddArg3(base, val, mem)
return true
}
// match: (BTRLmodify [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
@@ -4303,9 +4171,7 @@ func rewriteValueAMD64_OpAMD64BTRLmodify(v *Value) bool {
v.reset(OpAMD64BTRLmodify)
v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
- v.AddArg(base)
- v.AddArg(val)
- v.AddArg(mem)
+ v.AddArg3(base, val, mem)
return true
}
return false
@@ -4401,8 +4267,7 @@ func rewriteValueAMD64_OpAMD64BTRQconstmodify(v *Value) bool {
v.reset(OpAMD64BTRQconstmodify)
v.AuxInt = ValAndOff(valoff1).add(off2)
v.Aux = sym
- v.AddArg(base)
- v.AddArg(mem)
+ v.AddArg2(base, mem)
return true
}
// match: (BTRQconstmodify [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem)
@@ -4424,8 +4289,7 @@ func rewriteValueAMD64_OpAMD64BTRQconstmodify(v *Value) bool {
v.reset(OpAMD64BTRQconstmodify)
v.AuxInt = ValAndOff(valoff1).add(off2)
v.Aux = mergeSym(sym1, sym2)
- v.AddArg(base)
- v.AddArg(mem)
+ v.AddArg2(base, mem)
return true
}
return false
@@ -4453,9 +4317,7 @@ func rewriteValueAMD64_OpAMD64BTRQmodify(v *Value) bool {
v.reset(OpAMD64BTRQmodify)
v.AuxInt = off1 + off2
v.Aux = sym
- v.AddArg(base)
- v.AddArg(val)
- v.AddArg(mem)
+ v.AddArg3(base, val, mem)
return true
}
// match: (BTRQmodify [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
@@ -4478,9 +4340,7 @@ func rewriteValueAMD64_OpAMD64BTRQmodify(v *Value) bool {
v.reset(OpAMD64BTRQmodify)
v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
- v.AddArg(base)
- v.AddArg(val)
- v.AddArg(mem)
+ v.AddArg3(base, val, mem)
return true
}
return false
@@ -4576,8 +4436,7 @@ func rewriteValueAMD64_OpAMD64BTSLconstmodify(v *Value) bool {
v.reset(OpAMD64BTSLconstmodify)
v.AuxInt = ValAndOff(valoff1).add(off2)
v.Aux = sym
- v.AddArg(base)
- v.AddArg(mem)
+ v.AddArg2(base, mem)
return true
}
// match: (BTSLconstmodify [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem)
@@ -4599,8 +4458,7 @@ func rewriteValueAMD64_OpAMD64BTSLconstmodify(v *Value) bool {
v.reset(OpAMD64BTSLconstmodify)
v.AuxInt = ValAndOff(valoff1).add(off2)
v.Aux = mergeSym(sym1, sym2)
- v.AddArg(base)
- v.AddArg(mem)
+ v.AddArg2(base, mem)
return true
}
return false
@@ -4628,9 +4486,7 @@ func rewriteValueAMD64_OpAMD64BTSLmodify(v *Value) bool {
v.reset(OpAMD64BTSLmodify)
v.AuxInt = off1 + off2
v.Aux = sym
- v.AddArg(base)
- v.AddArg(val)
- v.AddArg(mem)
+ v.AddArg3(base, val, mem)
return true
}
// match: (BTSLmodify [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
@@ -4653,9 +4509,7 @@ func rewriteValueAMD64_OpAMD64BTSLmodify(v *Value) bool {
v.reset(OpAMD64BTSLmodify)
v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
- v.AddArg(base)
- v.AddArg(val)
- v.AddArg(mem)
+ v.AddArg3(base, val, mem)
return true
}
return false
@@ -4751,8 +4605,7 @@ func rewriteValueAMD64_OpAMD64BTSQconstmodify(v *Value) bool {
v.reset(OpAMD64BTSQconstmodify)
v.AuxInt = ValAndOff(valoff1).add(off2)
v.Aux = sym
- v.AddArg(base)
- v.AddArg(mem)
+ v.AddArg2(base, mem)
return true
}
// match: (BTSQconstmodify [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem)
@@ -4774,8 +4627,7 @@ func rewriteValueAMD64_OpAMD64BTSQconstmodify(v *Value) bool {
v.reset(OpAMD64BTSQconstmodify)
v.AuxInt = ValAndOff(valoff1).add(off2)
v.Aux = mergeSym(sym1, sym2)
- v.AddArg(base)
- v.AddArg(mem)
+ v.AddArg2(base, mem)
return true
}
return false
@@ -4803,9 +4655,7 @@ func rewriteValueAMD64_OpAMD64BTSQmodify(v *Value) bool {
v.reset(OpAMD64BTSQmodify)
v.AuxInt = off1 + off2
v.Aux = sym
- v.AddArg(base)
- v.AddArg(val)
- v.AddArg(mem)
+ v.AddArg3(base, val, mem)
return true
}
// match: (BTSQmodify [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
@@ -4828,9 +4678,7 @@ func rewriteValueAMD64_OpAMD64BTSQmodify(v *Value) bool {
v.reset(OpAMD64BTSQmodify)
v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
- v.AddArg(base)
- v.AddArg(val)
- v.AddArg(mem)
+ v.AddArg3(base, val, mem)
return true
}
return false
@@ -4849,9 +4697,7 @@ func rewriteValueAMD64_OpAMD64CMOVLCC(v *Value) bool {
}
cond := v_2.Args[0]
v.reset(OpAMD64CMOVLLS)
- v.AddArg(x)
- v.AddArg(y)
- v.AddArg(cond)
+ v.AddArg3(x, y, cond)
return true
}
// match: (CMOVLCC _ x (FlagEQ))
@@ -4861,9 +4707,7 @@ func rewriteValueAMD64_OpAMD64CMOVLCC(v *Value) bool {
if v_2.Op != OpAMD64FlagEQ {
break
}
- v.reset(OpCopy)
- v.Type = x.Type
- v.AddArg(x)
+ v.copyOf(x)
return true
}
// match: (CMOVLCC _ x (FlagGT_UGT))
@@ -4873,9 +4717,7 @@ func rewriteValueAMD64_OpAMD64CMOVLCC(v *Value) bool {
if v_2.Op != OpAMD64FlagGT_UGT {
break
}
- v.reset(OpCopy)
- v.Type = x.Type
- v.AddArg(x)
+ v.copyOf(x)
return true
}
// match: (CMOVLCC y _ (FlagGT_ULT))
@@ -4885,9 +4727,7 @@ func rewriteValueAMD64_OpAMD64CMOVLCC(v *Value) bool {
if v_2.Op != OpAMD64FlagGT_ULT {
break
}
- v.reset(OpCopy)
- v.Type = y.Type
- v.AddArg(y)
+ v.copyOf(y)
return true
}
// match: (CMOVLCC y _ (FlagLT_ULT))
@@ -4897,9 +4737,7 @@ func rewriteValueAMD64_OpAMD64CMOVLCC(v *Value) bool {
if v_2.Op != OpAMD64FlagLT_ULT {
break
}
- v.reset(OpCopy)
- v.Type = y.Type
- v.AddArg(y)
+ v.copyOf(y)
return true
}
// match: (CMOVLCC _ x (FlagLT_UGT))
@@ -4909,9 +4747,7 @@ func rewriteValueAMD64_OpAMD64CMOVLCC(v *Value) bool {
if v_2.Op != OpAMD64FlagLT_UGT {
break
}
- v.reset(OpCopy)
- v.Type = x.Type
- v.AddArg(x)
+ v.copyOf(x)
return true
}
return false
@@ -4930,9 +4766,7 @@ func rewriteValueAMD64_OpAMD64CMOVLCS(v *Value) bool {
}
cond := v_2.Args[0]
v.reset(OpAMD64CMOVLHI)
- v.AddArg(x)
- v.AddArg(y)
- v.AddArg(cond)
+ v.AddArg3(x, y, cond)
return true
}
// match: (CMOVLCS y _ (FlagEQ))
@@ -4942,9 +4776,7 @@ func rewriteValueAMD64_OpAMD64CMOVLCS(v *Value) bool {
if v_2.Op != OpAMD64FlagEQ {
break
}
- v.reset(OpCopy)
- v.Type = y.Type
- v.AddArg(y)
+ v.copyOf(y)
return true
}
// match: (CMOVLCS y _ (FlagGT_UGT))
@@ -4954,9 +4786,7 @@ func rewriteValueAMD64_OpAMD64CMOVLCS(v *Value) bool {
if v_2.Op != OpAMD64FlagGT_UGT {
break
}
- v.reset(OpCopy)
- v.Type = y.Type
- v.AddArg(y)
+ v.copyOf(y)
return true
}
// match: (CMOVLCS _ x (FlagGT_ULT))
@@ -4966,9 +4796,7 @@ func rewriteValueAMD64_OpAMD64CMOVLCS(v *Value) bool {
if v_2.Op != OpAMD64FlagGT_ULT {
break
}
- v.reset(OpCopy)
- v.Type = x.Type
- v.AddArg(x)
+ v.copyOf(x)
return true
}
// match: (CMOVLCS _ x (FlagLT_ULT))
@@ -4978,9 +4806,7 @@ func rewriteValueAMD64_OpAMD64CMOVLCS(v *Value) bool {
if v_2.Op != OpAMD64FlagLT_ULT {
break
}
- v.reset(OpCopy)
- v.Type = x.Type
- v.AddArg(x)
+ v.copyOf(x)
return true
}
// match: (CMOVLCS y _ (FlagLT_UGT))
@@ -4990,9 +4816,7 @@ func rewriteValueAMD64_OpAMD64CMOVLCS(v *Value) bool {
if v_2.Op != OpAMD64FlagLT_UGT {
break
}
- v.reset(OpCopy)
- v.Type = y.Type
- v.AddArg(y)
+ v.copyOf(y)
return true
}
return false
@@ -5011,9 +4835,7 @@ func rewriteValueAMD64_OpAMD64CMOVLEQ(v *Value) bool {
}
cond := v_2.Args[0]
v.reset(OpAMD64CMOVLEQ)
- v.AddArg(x)
- v.AddArg(y)
- v.AddArg(cond)
+ v.AddArg3(x, y, cond)
return true
}
// match: (CMOVLEQ _ x (FlagEQ))
@@ -5023,9 +4845,7 @@ func rewriteValueAMD64_OpAMD64CMOVLEQ(v *Value) bool {
if v_2.Op != OpAMD64FlagEQ {
break
}
- v.reset(OpCopy)
- v.Type = x.Type
- v.AddArg(x)
+ v.copyOf(x)
return true
}
// match: (CMOVLEQ y _ (FlagGT_UGT))
@@ -5035,9 +4855,7 @@ func rewriteValueAMD64_OpAMD64CMOVLEQ(v *Value) bool {
if v_2.Op != OpAMD64FlagGT_UGT {
break
}
- v.reset(OpCopy)
- v.Type = y.Type
- v.AddArg(y)
+ v.copyOf(y)
return true
}
// match: (CMOVLEQ y _ (FlagGT_ULT))
@@ -5047,9 +4865,7 @@ func rewriteValueAMD64_OpAMD64CMOVLEQ(v *Value) bool {
if v_2.Op != OpAMD64FlagGT_ULT {
break
}
- v.reset(OpCopy)
- v.Type = y.Type
- v.AddArg(y)
+ v.copyOf(y)
return true
}
// match: (CMOVLEQ y _ (FlagLT_ULT))
@@ -5059,9 +4875,7 @@ func rewriteValueAMD64_OpAMD64CMOVLEQ(v *Value) bool {
if v_2.Op != OpAMD64FlagLT_ULT {
break
}
- v.reset(OpCopy)
- v.Type = y.Type
- v.AddArg(y)
+ v.copyOf(y)
return true
}
// match: (CMOVLEQ y _ (FlagLT_UGT))
@@ -5071,9 +4885,7 @@ func rewriteValueAMD64_OpAMD64CMOVLEQ(v *Value) bool {
if v_2.Op != OpAMD64FlagLT_UGT {
break
}
- v.reset(OpCopy)
- v.Type = y.Type
- v.AddArg(y)
+ v.copyOf(y)
return true
}
return false
@@ -5092,9 +4904,7 @@ func rewriteValueAMD64_OpAMD64CMOVLGE(v *Value) bool {
}
cond := v_2.Args[0]
v.reset(OpAMD64CMOVLLE)
- v.AddArg(x)
- v.AddArg(y)
- v.AddArg(cond)
+ v.AddArg3(x, y, cond)
return true
}
// match: (CMOVLGE _ x (FlagEQ))
@@ -5104,9 +4914,7 @@ func rewriteValueAMD64_OpAMD64CMOVLGE(v *Value) bool {
if v_2.Op != OpAMD64FlagEQ {
break
}
- v.reset(OpCopy)
- v.Type = x.Type
- v.AddArg(x)
+ v.copyOf(x)
return true
}
// match: (CMOVLGE _ x (FlagGT_UGT))
@@ -5116,9 +4924,7 @@ func rewriteValueAMD64_OpAMD64CMOVLGE(v *Value) bool {
if v_2.Op != OpAMD64FlagGT_UGT {
break
}
- v.reset(OpCopy)
- v.Type = x.Type
- v.AddArg(x)
+ v.copyOf(x)
return true
}
// match: (CMOVLGE _ x (FlagGT_ULT))
@@ -5128,9 +4934,7 @@ func rewriteValueAMD64_OpAMD64CMOVLGE(v *Value) bool {
if v_2.Op != OpAMD64FlagGT_ULT {
break
}
- v.reset(OpCopy)
- v.Type = x.Type
- v.AddArg(x)
+ v.copyOf(x)
return true
}
// match: (CMOVLGE y _ (FlagLT_ULT))
@@ -5140,9 +4944,7 @@ func rewriteValueAMD64_OpAMD64CMOVLGE(v *Value) bool {
if v_2.Op != OpAMD64FlagLT_ULT {
break
}
- v.reset(OpCopy)
- v.Type = y.Type
- v.AddArg(y)
+ v.copyOf(y)
return true
}
// match: (CMOVLGE y _ (FlagLT_UGT))
@@ -5152,9 +4954,7 @@ func rewriteValueAMD64_OpAMD64CMOVLGE(v *Value) bool {
if v_2.Op != OpAMD64FlagLT_UGT {
break
}
- v.reset(OpCopy)
- v.Type = y.Type
- v.AddArg(y)
+ v.copyOf(y)
return true
}
return false
@@ -5173,9 +4973,7 @@ func rewriteValueAMD64_OpAMD64CMOVLGT(v *Value) bool {
}
cond := v_2.Args[0]
v.reset(OpAMD64CMOVLLT)
- v.AddArg(x)
- v.AddArg(y)
- v.AddArg(cond)
+ v.AddArg3(x, y, cond)
return true
}
// match: (CMOVLGT y _ (FlagEQ))
@@ -5185,9 +4983,7 @@ func rewriteValueAMD64_OpAMD64CMOVLGT(v *Value) bool {
if v_2.Op != OpAMD64FlagEQ {
break
}
- v.reset(OpCopy)
- v.Type = y.Type
- v.AddArg(y)
+ v.copyOf(y)
return true
}
// match: (CMOVLGT _ x (FlagGT_UGT))
@@ -5197,9 +4993,7 @@ func rewriteValueAMD64_OpAMD64CMOVLGT(v *Value) bool {
if v_2.Op != OpAMD64FlagGT_UGT {
break
}
- v.reset(OpCopy)
- v.Type = x.Type
- v.AddArg(x)
+ v.copyOf(x)
return true
}
// match: (CMOVLGT _ x (FlagGT_ULT))
@@ -5209,9 +5003,7 @@ func rewriteValueAMD64_OpAMD64CMOVLGT(v *Value) bool {
if v_2.Op != OpAMD64FlagGT_ULT {
break
}
- v.reset(OpCopy)
- v.Type = x.Type
- v.AddArg(x)
+ v.copyOf(x)
return true
}
// match: (CMOVLGT y _ (FlagLT_ULT))
@@ -5221,9 +5013,7 @@ func rewriteValueAMD64_OpAMD64CMOVLGT(v *Value) bool {
if v_2.Op != OpAMD64FlagLT_ULT {
break
}
- v.reset(OpCopy)
- v.Type = y.Type
- v.AddArg(y)
+ v.copyOf(y)
return true
}
// match: (CMOVLGT y _ (FlagLT_UGT))
@@ -5233,9 +5023,7 @@ func rewriteValueAMD64_OpAMD64CMOVLGT(v *Value) bool {
if v_2.Op != OpAMD64FlagLT_UGT {
break
}
- v.reset(OpCopy)
- v.Type = y.Type
- v.AddArg(y)
+ v.copyOf(y)
return true
}
return false
@@ -5254,9 +5042,7 @@ func rewriteValueAMD64_OpAMD64CMOVLHI(v *Value) bool {
}
cond := v_2.Args[0]
v.reset(OpAMD64CMOVLCS)
- v.AddArg(x)
- v.AddArg(y)
- v.AddArg(cond)
+ v.AddArg3(x, y, cond)
return true
}
// match: (CMOVLHI y _ (FlagEQ))
@@ -5266,9 +5052,7 @@ func rewriteValueAMD64_OpAMD64CMOVLHI(v *Value) bool {
if v_2.Op != OpAMD64FlagEQ {
break
}
- v.reset(OpCopy)
- v.Type = y.Type
- v.AddArg(y)
+ v.copyOf(y)
return true
}
// match: (CMOVLHI _ x (FlagGT_UGT))
@@ -5278,9 +5062,7 @@ func rewriteValueAMD64_OpAMD64CMOVLHI(v *Value) bool {
if v_2.Op != OpAMD64FlagGT_UGT {
break
}
- v.reset(OpCopy)
- v.Type = x.Type
- v.AddArg(x)
+ v.copyOf(x)
return true
}
// match: (CMOVLHI y _ (FlagGT_ULT))
@@ -5290,9 +5072,7 @@ func rewriteValueAMD64_OpAMD64CMOVLHI(v *Value) bool {
if v_2.Op != OpAMD64FlagGT_ULT {
break
}
- v.reset(OpCopy)
- v.Type = y.Type
- v.AddArg(y)
+ v.copyOf(y)
return true
}
// match: (CMOVLHI y _ (FlagLT_ULT))
@@ -5302,9 +5082,7 @@ func rewriteValueAMD64_OpAMD64CMOVLHI(v *Value) bool {
if v_2.Op != OpAMD64FlagLT_ULT {
break
}
- v.reset(OpCopy)
- v.Type = y.Type
- v.AddArg(y)
+ v.copyOf(y)
return true
}
// match: (CMOVLHI _ x (FlagLT_UGT))
@@ -5314,9 +5092,7 @@ func rewriteValueAMD64_OpAMD64CMOVLHI(v *Value) bool {
if v_2.Op != OpAMD64FlagLT_UGT {
break
}
- v.reset(OpCopy)
- v.Type = x.Type
- v.AddArg(x)
+ v.copyOf(x)
return true
}
return false
@@ -5335,9 +5111,7 @@ func rewriteValueAMD64_OpAMD64CMOVLLE(v *Value) bool {
}
cond := v_2.Args[0]
v.reset(OpAMD64CMOVLGE)
- v.AddArg(x)
- v.AddArg(y)
- v.AddArg(cond)
+ v.AddArg3(x, y, cond)
return true
}
// match: (CMOVLLE _ x (FlagEQ))
@@ -5347,9 +5121,7 @@ func rewriteValueAMD64_OpAMD64CMOVLLE(v *Value) bool {
if v_2.Op != OpAMD64FlagEQ {
break
}
- v.reset(OpCopy)
- v.Type = x.Type
- v.AddArg(x)
+ v.copyOf(x)
return true
}
// match: (CMOVLLE y _ (FlagGT_UGT))
@@ -5359,9 +5131,7 @@ func rewriteValueAMD64_OpAMD64CMOVLLE(v *Value) bool {
if v_2.Op != OpAMD64FlagGT_UGT {
break
}
- v.reset(OpCopy)
- v.Type = y.Type
- v.AddArg(y)
+ v.copyOf(y)
return true
}
// match: (CMOVLLE y _ (FlagGT_ULT))
@@ -5371,9 +5141,7 @@ func rewriteValueAMD64_OpAMD64CMOVLLE(v *Value) bool {
if v_2.Op != OpAMD64FlagGT_ULT {
break
}
- v.reset(OpCopy)
- v.Type = y.Type
- v.AddArg(y)
+ v.copyOf(y)
return true
}
// match: (CMOVLLE _ x (FlagLT_ULT))
@@ -5383,9 +5151,7 @@ func rewriteValueAMD64_OpAMD64CMOVLLE(v *Value) bool {
if v_2.Op != OpAMD64FlagLT_ULT {
break
}
- v.reset(OpCopy)
- v.Type = x.Type
- v.AddArg(x)
+ v.copyOf(x)
return true
}
// match: (CMOVLLE _ x (FlagLT_UGT))
@@ -5395,9 +5161,7 @@ func rewriteValueAMD64_OpAMD64CMOVLLE(v *Value) bool {
if v_2.Op != OpAMD64FlagLT_UGT {
break
}
- v.reset(OpCopy)
- v.Type = x.Type
- v.AddArg(x)
+ v.copyOf(x)
return true
}
return false
@@ -5416,9 +5180,7 @@ func rewriteValueAMD64_OpAMD64CMOVLLS(v *Value) bool {
}
cond := v_2.Args[0]
v.reset(OpAMD64CMOVLCC)
- v.AddArg(x)
- v.AddArg(y)
- v.AddArg(cond)
+ v.AddArg3(x, y, cond)
return true
}
// match: (CMOVLLS _ x (FlagEQ))
@@ -5428,9 +5190,7 @@ func rewriteValueAMD64_OpAMD64CMOVLLS(v *Value) bool {
if v_2.Op != OpAMD64FlagEQ {
break
}
- v.reset(OpCopy)
- v.Type = x.Type
- v.AddArg(x)
+ v.copyOf(x)
return true
}
// match: (CMOVLLS y _ (FlagGT_UGT))
@@ -5440,9 +5200,7 @@ func rewriteValueAMD64_OpAMD64CMOVLLS(v *Value) bool {
if v_2.Op != OpAMD64FlagGT_UGT {
break
}
- v.reset(OpCopy)
- v.Type = y.Type
- v.AddArg(y)
+ v.copyOf(y)
return true
}
// match: (CMOVLLS _ x (FlagGT_ULT))
@@ -5452,9 +5210,7 @@ func rewriteValueAMD64_OpAMD64CMOVLLS(v *Value) bool {
if v_2.Op != OpAMD64FlagGT_ULT {
break
}
- v.reset(OpCopy)
- v.Type = x.Type
- v.AddArg(x)
+ v.copyOf(x)
return true
}
// match: (CMOVLLS _ x (FlagLT_ULT))
@@ -5464,9 +5220,7 @@ func rewriteValueAMD64_OpAMD64CMOVLLS(v *Value) bool {
if v_2.Op != OpAMD64FlagLT_ULT {
break
}
- v.reset(OpCopy)
- v.Type = x.Type
- v.AddArg(x)
+ v.copyOf(x)
return true
}
// match: (CMOVLLS y _ (FlagLT_UGT))
@@ -5476,9 +5230,7 @@ func rewriteValueAMD64_OpAMD64CMOVLLS(v *Value) bool {
if v_2.Op != OpAMD64FlagLT_UGT {
break
}
- v.reset(OpCopy)
- v.Type = y.Type
- v.AddArg(y)
+ v.copyOf(y)
return true
}
return false
@@ -5497,9 +5249,7 @@ func rewriteValueAMD64_OpAMD64CMOVLLT(v *Value) bool {
}
cond := v_2.Args[0]
v.reset(OpAMD64CMOVLGT)
- v.AddArg(x)
- v.AddArg(y)
- v.AddArg(cond)
+ v.AddArg3(x, y, cond)
return true
}
// match: (CMOVLLT y _ (FlagEQ))
@@ -5509,9 +5259,7 @@ func rewriteValueAMD64_OpAMD64CMOVLLT(v *Value) bool {
if v_2.Op != OpAMD64FlagEQ {
break
}
- v.reset(OpCopy)
- v.Type = y.Type
- v.AddArg(y)
+ v.copyOf(y)
return true
}
// match: (CMOVLLT y _ (FlagGT_UGT))
@@ -5521,9 +5269,7 @@ func rewriteValueAMD64_OpAMD64CMOVLLT(v *Value) bool {
if v_2.Op != OpAMD64FlagGT_UGT {
break
}
- v.reset(OpCopy)
- v.Type = y.Type
- v.AddArg(y)
+ v.copyOf(y)
return true
}
// match: (CMOVLLT y _ (FlagGT_ULT))
@@ -5533,9 +5279,7 @@ func rewriteValueAMD64_OpAMD64CMOVLLT(v *Value) bool {
if v_2.Op != OpAMD64FlagGT_ULT {
break
}
- v.reset(OpCopy)
- v.Type = y.Type
- v.AddArg(y)
+ v.copyOf(y)
return true
}
// match: (CMOVLLT _ x (FlagLT_ULT))
@@ -5545,9 +5289,7 @@ func rewriteValueAMD64_OpAMD64CMOVLLT(v *Value) bool {
if v_2.Op != OpAMD64FlagLT_ULT {
break
}
- v.reset(OpCopy)
- v.Type = x.Type
- v.AddArg(x)
+ v.copyOf(x)
return true
}
// match: (CMOVLLT _ x (FlagLT_UGT))
@@ -5557,9 +5299,7 @@ func rewriteValueAMD64_OpAMD64CMOVLLT(v *Value) bool {
if v_2.Op != OpAMD64FlagLT_UGT {
break
}
- v.reset(OpCopy)
- v.Type = x.Type
- v.AddArg(x)
+ v.copyOf(x)
return true
}
return false
@@ -5578,9 +5318,7 @@ func rewriteValueAMD64_OpAMD64CMOVLNE(v *Value) bool {
}
cond := v_2.Args[0]
v.reset(OpAMD64CMOVLNE)
- v.AddArg(x)
- v.AddArg(y)
- v.AddArg(cond)
+ v.AddArg3(x, y, cond)
return true
}
// match: (CMOVLNE y _ (FlagEQ))
@@ -5590,9 +5328,7 @@ func rewriteValueAMD64_OpAMD64CMOVLNE(v *Value) bool {
if v_2.Op != OpAMD64FlagEQ {
break
}
- v.reset(OpCopy)
- v.Type = y.Type
- v.AddArg(y)
+ v.copyOf(y)
return true
}
// match: (CMOVLNE _ x (FlagGT_UGT))
@@ -5602,9 +5338,7 @@ func rewriteValueAMD64_OpAMD64CMOVLNE(v *Value) bool {
if v_2.Op != OpAMD64FlagGT_UGT {
break
}
- v.reset(OpCopy)
- v.Type = x.Type
- v.AddArg(x)
+ v.copyOf(x)
return true
}
// match: (CMOVLNE _ x (FlagGT_ULT))
@@ -5614,9 +5348,7 @@ func rewriteValueAMD64_OpAMD64CMOVLNE(v *Value) bool {
if v_2.Op != OpAMD64FlagGT_ULT {
break
}
- v.reset(OpCopy)
- v.Type = x.Type
- v.AddArg(x)
+ v.copyOf(x)
return true
}
// match: (CMOVLNE _ x (FlagLT_ULT))
@@ -5626,9 +5358,7 @@ func rewriteValueAMD64_OpAMD64CMOVLNE(v *Value) bool {
if v_2.Op != OpAMD64FlagLT_ULT {
break
}
- v.reset(OpCopy)
- v.Type = x.Type
- v.AddArg(x)
+ v.copyOf(x)
return true
}
// match: (CMOVLNE _ x (FlagLT_UGT))
@@ -5638,9 +5368,7 @@ func rewriteValueAMD64_OpAMD64CMOVLNE(v *Value) bool {
if v_2.Op != OpAMD64FlagLT_UGT {
break
}
- v.reset(OpCopy)
- v.Type = x.Type
- v.AddArg(x)
+ v.copyOf(x)
return true
}
return false
@@ -5659,9 +5387,7 @@ func rewriteValueAMD64_OpAMD64CMOVQCC(v *Value) bool {
}
cond := v_2.Args[0]
v.reset(OpAMD64CMOVQLS)
- v.AddArg(x)
- v.AddArg(y)
- v.AddArg(cond)
+ v.AddArg3(x, y, cond)
return true
}
// match: (CMOVQCC _ x (FlagEQ))
@@ -5671,9 +5397,7 @@ func rewriteValueAMD64_OpAMD64CMOVQCC(v *Value) bool {
if v_2.Op != OpAMD64FlagEQ {
break
}
- v.reset(OpCopy)
- v.Type = x.Type
- v.AddArg(x)
+ v.copyOf(x)
return true
}
// match: (CMOVQCC _ x (FlagGT_UGT))
@@ -5683,9 +5407,7 @@ func rewriteValueAMD64_OpAMD64CMOVQCC(v *Value) bool {
if v_2.Op != OpAMD64FlagGT_UGT {
break
}
- v.reset(OpCopy)
- v.Type = x.Type
- v.AddArg(x)
+ v.copyOf(x)
return true
}
// match: (CMOVQCC y _ (FlagGT_ULT))
@@ -5695,9 +5417,7 @@ func rewriteValueAMD64_OpAMD64CMOVQCC(v *Value) bool {
if v_2.Op != OpAMD64FlagGT_ULT {
break
}
- v.reset(OpCopy)
- v.Type = y.Type
- v.AddArg(y)
+ v.copyOf(y)
return true
}
// match: (CMOVQCC y _ (FlagLT_ULT))
@@ -5707,9 +5427,7 @@ func rewriteValueAMD64_OpAMD64CMOVQCC(v *Value) bool {
if v_2.Op != OpAMD64FlagLT_ULT {
break
}
- v.reset(OpCopy)
- v.Type = y.Type
- v.AddArg(y)
+ v.copyOf(y)
return true
}
// match: (CMOVQCC _ x (FlagLT_UGT))
@@ -5719,9 +5437,7 @@ func rewriteValueAMD64_OpAMD64CMOVQCC(v *Value) bool {
if v_2.Op != OpAMD64FlagLT_UGT {
break
}
- v.reset(OpCopy)
- v.Type = x.Type
- v.AddArg(x)
+ v.copyOf(x)
return true
}
return false
@@ -5740,9 +5456,7 @@ func rewriteValueAMD64_OpAMD64CMOVQCS(v *Value) bool {
}
cond := v_2.Args[0]
v.reset(OpAMD64CMOVQHI)
- v.AddArg(x)
- v.AddArg(y)
- v.AddArg(cond)
+ v.AddArg3(x, y, cond)
return true
}
// match: (CMOVQCS y _ (FlagEQ))
@@ -5752,9 +5466,7 @@ func rewriteValueAMD64_OpAMD64CMOVQCS(v *Value) bool {
if v_2.Op != OpAMD64FlagEQ {
break
}
- v.reset(OpCopy)
- v.Type = y.Type
- v.AddArg(y)
+ v.copyOf(y)
return true
}
// match: (CMOVQCS y _ (FlagGT_UGT))
@@ -5764,9 +5476,7 @@ func rewriteValueAMD64_OpAMD64CMOVQCS(v *Value) bool {
if v_2.Op != OpAMD64FlagGT_UGT {
break
}
- v.reset(OpCopy)
- v.Type = y.Type
- v.AddArg(y)
+ v.copyOf(y)
return true
}
// match: (CMOVQCS _ x (FlagGT_ULT))
@@ -5776,9 +5486,7 @@ func rewriteValueAMD64_OpAMD64CMOVQCS(v *Value) bool {
if v_2.Op != OpAMD64FlagGT_ULT {
break
}
- v.reset(OpCopy)
- v.Type = x.Type
- v.AddArg(x)
+ v.copyOf(x)
return true
}
// match: (CMOVQCS _ x (FlagLT_ULT))
@@ -5788,9 +5496,7 @@ func rewriteValueAMD64_OpAMD64CMOVQCS(v *Value) bool {
if v_2.Op != OpAMD64FlagLT_ULT {
break
}
- v.reset(OpCopy)
- v.Type = x.Type
- v.AddArg(x)
+ v.copyOf(x)
return true
}
// match: (CMOVQCS y _ (FlagLT_UGT))
@@ -5800,9 +5506,7 @@ func rewriteValueAMD64_OpAMD64CMOVQCS(v *Value) bool {
if v_2.Op != OpAMD64FlagLT_UGT {
break
}
- v.reset(OpCopy)
- v.Type = y.Type
- v.AddArg(y)
+ v.copyOf(y)
return true
}
return false
@@ -5821,9 +5525,7 @@ func rewriteValueAMD64_OpAMD64CMOVQEQ(v *Value) bool {
}
cond := v_2.Args[0]
v.reset(OpAMD64CMOVQEQ)
- v.AddArg(x)
- v.AddArg(y)
- v.AddArg(cond)
+ v.AddArg3(x, y, cond)
return true
}
// match: (CMOVQEQ _ x (FlagEQ))
@@ -5833,9 +5535,7 @@ func rewriteValueAMD64_OpAMD64CMOVQEQ(v *Value) bool {
if v_2.Op != OpAMD64FlagEQ {
break
}
- v.reset(OpCopy)
- v.Type = x.Type
- v.AddArg(x)
+ v.copyOf(x)
return true
}
// match: (CMOVQEQ y _ (FlagGT_UGT))
@@ -5845,9 +5545,7 @@ func rewriteValueAMD64_OpAMD64CMOVQEQ(v *Value) bool {
if v_2.Op != OpAMD64FlagGT_UGT {
break
}
- v.reset(OpCopy)
- v.Type = y.Type
- v.AddArg(y)
+ v.copyOf(y)
return true
}
// match: (CMOVQEQ y _ (FlagGT_ULT))
@@ -5857,9 +5555,7 @@ func rewriteValueAMD64_OpAMD64CMOVQEQ(v *Value) bool {
if v_2.Op != OpAMD64FlagGT_ULT {
break
}
- v.reset(OpCopy)
- v.Type = y.Type
- v.AddArg(y)
+ v.copyOf(y)
return true
}
// match: (CMOVQEQ y _ (FlagLT_ULT))
@@ -5869,9 +5565,7 @@ func rewriteValueAMD64_OpAMD64CMOVQEQ(v *Value) bool {
if v_2.Op != OpAMD64FlagLT_ULT {
break
}
- v.reset(OpCopy)
- v.Type = y.Type
- v.AddArg(y)
+ v.copyOf(y)
return true
}
// match: (CMOVQEQ y _ (FlagLT_UGT))
@@ -5881,9 +5575,7 @@ func rewriteValueAMD64_OpAMD64CMOVQEQ(v *Value) bool {
if v_2.Op != OpAMD64FlagLT_UGT {
break
}
- v.reset(OpCopy)
- v.Type = y.Type
- v.AddArg(y)
+ v.copyOf(y)
return true
}
// match: (CMOVQEQ x _ (Select1 (BSFQ (ORQconst [c] _))))
@@ -5906,9 +5598,7 @@ func rewriteValueAMD64_OpAMD64CMOVQEQ(v *Value) bool {
if !(c != 0) {
break
}
- v.reset(OpCopy)
- v.Type = x.Type
- v.AddArg(x)
+ v.copyOf(x)
return true
}
return false
@@ -5927,9 +5617,7 @@ func rewriteValueAMD64_OpAMD64CMOVQGE(v *Value) bool {
}
cond := v_2.Args[0]
v.reset(OpAMD64CMOVQLE)
- v.AddArg(x)
- v.AddArg(y)
- v.AddArg(cond)
+ v.AddArg3(x, y, cond)
return true
}
// match: (CMOVQGE _ x (FlagEQ))
@@ -5939,9 +5627,7 @@ func rewriteValueAMD64_OpAMD64CMOVQGE(v *Value) bool {
if v_2.Op != OpAMD64FlagEQ {
break
}
- v.reset(OpCopy)
- v.Type = x.Type
- v.AddArg(x)
+ v.copyOf(x)
return true
}
// match: (CMOVQGE _ x (FlagGT_UGT))
@@ -5951,9 +5637,7 @@ func rewriteValueAMD64_OpAMD64CMOVQGE(v *Value) bool {
if v_2.Op != OpAMD64FlagGT_UGT {
break
}
- v.reset(OpCopy)
- v.Type = x.Type
- v.AddArg(x)
+ v.copyOf(x)
return true
}
// match: (CMOVQGE _ x (FlagGT_ULT))
@@ -5963,9 +5647,7 @@ func rewriteValueAMD64_OpAMD64CMOVQGE(v *Value) bool {
if v_2.Op != OpAMD64FlagGT_ULT {
break
}
- v.reset(OpCopy)
- v.Type = x.Type
- v.AddArg(x)
+ v.copyOf(x)
return true
}
// match: (CMOVQGE y _ (FlagLT_ULT))
@@ -5975,9 +5657,7 @@ func rewriteValueAMD64_OpAMD64CMOVQGE(v *Value) bool {
if v_2.Op != OpAMD64FlagLT_ULT {
break
}
- v.reset(OpCopy)
- v.Type = y.Type
- v.AddArg(y)
+ v.copyOf(y)
return true
}
// match: (CMOVQGE y _ (FlagLT_UGT))
@@ -5987,9 +5667,7 @@ func rewriteValueAMD64_OpAMD64CMOVQGE(v *Value) bool {
if v_2.Op != OpAMD64FlagLT_UGT {
break
}
- v.reset(OpCopy)
- v.Type = y.Type
- v.AddArg(y)
+ v.copyOf(y)
return true
}
return false
@@ -6008,9 +5686,7 @@ func rewriteValueAMD64_OpAMD64CMOVQGT(v *Value) bool {
}
cond := v_2.Args[0]
v.reset(OpAMD64CMOVQLT)
- v.AddArg(x)
- v.AddArg(y)
- v.AddArg(cond)
+ v.AddArg3(x, y, cond)
return true
}
// match: (CMOVQGT y _ (FlagEQ))
@@ -6020,9 +5696,7 @@ func rewriteValueAMD64_OpAMD64CMOVQGT(v *Value) bool {
if v_2.Op != OpAMD64FlagEQ {
break
}
- v.reset(OpCopy)
- v.Type = y.Type
- v.AddArg(y)
+ v.copyOf(y)
return true
}
// match: (CMOVQGT _ x (FlagGT_UGT))
@@ -6032,9 +5706,7 @@ func rewriteValueAMD64_OpAMD64CMOVQGT(v *Value) bool {
if v_2.Op != OpAMD64FlagGT_UGT {
break
}
- v.reset(OpCopy)
- v.Type = x.Type
- v.AddArg(x)
+ v.copyOf(x)
return true
}
// match: (CMOVQGT _ x (FlagGT_ULT))
@@ -6044,9 +5716,7 @@ func rewriteValueAMD64_OpAMD64CMOVQGT(v *Value) bool {
if v_2.Op != OpAMD64FlagGT_ULT {
break
}
- v.reset(OpCopy)
- v.Type = x.Type
- v.AddArg(x)
+ v.copyOf(x)
return true
}
// match: (CMOVQGT y _ (FlagLT_ULT))
@@ -6056,9 +5726,7 @@ func rewriteValueAMD64_OpAMD64CMOVQGT(v *Value) bool {
if v_2.Op != OpAMD64FlagLT_ULT {
break
}
- v.reset(OpCopy)
- v.Type = y.Type
- v.AddArg(y)
+ v.copyOf(y)
return true
}
// match: (CMOVQGT y _ (FlagLT_UGT))
@@ -6068,9 +5736,7 @@ func rewriteValueAMD64_OpAMD64CMOVQGT(v *Value) bool {
if v_2.Op != OpAMD64FlagLT_UGT {
break
}
- v.reset(OpCopy)
- v.Type = y.Type
- v.AddArg(y)
+ v.copyOf(y)
return true
}
return false
@@ -6089,9 +5755,7 @@ func rewriteValueAMD64_OpAMD64CMOVQHI(v *Value) bool {
}
cond := v_2.Args[0]
v.reset(OpAMD64CMOVQCS)
- v.AddArg(x)
- v.AddArg(y)
- v.AddArg(cond)
+ v.AddArg3(x, y, cond)
return true
}
// match: (CMOVQHI y _ (FlagEQ))
@@ -6101,9 +5765,7 @@ func rewriteValueAMD64_OpAMD64CMOVQHI(v *Value) bool {
if v_2.Op != OpAMD64FlagEQ {
break
}
- v.reset(OpCopy)
- v.Type = y.Type
- v.AddArg(y)
+ v.copyOf(y)
return true
}
// match: (CMOVQHI _ x (FlagGT_UGT))
@@ -6113,9 +5775,7 @@ func rewriteValueAMD64_OpAMD64CMOVQHI(v *Value) bool {
if v_2.Op != OpAMD64FlagGT_UGT {
break
}
- v.reset(OpCopy)
- v.Type = x.Type
- v.AddArg(x)
+ v.copyOf(x)
return true
}
// match: (CMOVQHI y _ (FlagGT_ULT))
@@ -6125,9 +5785,7 @@ func rewriteValueAMD64_OpAMD64CMOVQHI(v *Value) bool {
if v_2.Op != OpAMD64FlagGT_ULT {
break
}
- v.reset(OpCopy)
- v.Type = y.Type
- v.AddArg(y)
+ v.copyOf(y)
return true
}
// match: (CMOVQHI y _ (FlagLT_ULT))
@@ -6137,9 +5795,7 @@ func rewriteValueAMD64_OpAMD64CMOVQHI(v *Value) bool {
if v_2.Op != OpAMD64FlagLT_ULT {
break
}
- v.reset(OpCopy)
- v.Type = y.Type
- v.AddArg(y)
+ v.copyOf(y)
return true
}
// match: (CMOVQHI _ x (FlagLT_UGT))
@@ -6149,9 +5805,7 @@ func rewriteValueAMD64_OpAMD64CMOVQHI(v *Value) bool {
if v_2.Op != OpAMD64FlagLT_UGT {
break
}
- v.reset(OpCopy)
- v.Type = x.Type
- v.AddArg(x)
+ v.copyOf(x)
return true
}
return false
@@ -6170,9 +5824,7 @@ func rewriteValueAMD64_OpAMD64CMOVQLE(v *Value) bool {
}
cond := v_2.Args[0]
v.reset(OpAMD64CMOVQGE)
- v.AddArg(x)
- v.AddArg(y)
- v.AddArg(cond)
+ v.AddArg3(x, y, cond)
return true
}
// match: (CMOVQLE _ x (FlagEQ))
@@ -6182,9 +5834,7 @@ func rewriteValueAMD64_OpAMD64CMOVQLE(v *Value) bool {
if v_2.Op != OpAMD64FlagEQ {
break
}
- v.reset(OpCopy)
- v.Type = x.Type
- v.AddArg(x)
+ v.copyOf(x)
return true
}
// match: (CMOVQLE y _ (FlagGT_UGT))
@@ -6194,9 +5844,7 @@ func rewriteValueAMD64_OpAMD64CMOVQLE(v *Value) bool {
if v_2.Op != OpAMD64FlagGT_UGT {
break
}
- v.reset(OpCopy)
- v.Type = y.Type
- v.AddArg(y)
+ v.copyOf(y)
return true
}
// match: (CMOVQLE y _ (FlagGT_ULT))
@@ -6206,9 +5854,7 @@ func rewriteValueAMD64_OpAMD64CMOVQLE(v *Value) bool {
if v_2.Op != OpAMD64FlagGT_ULT {
break
}
- v.reset(OpCopy)
- v.Type = y.Type
- v.AddArg(y)
+ v.copyOf(y)
return true
}
// match: (CMOVQLE _ x (FlagLT_ULT))
@@ -6218,9 +5864,7 @@ func rewriteValueAMD64_OpAMD64CMOVQLE(v *Value) bool {
if v_2.Op != OpAMD64FlagLT_ULT {
break
}
- v.reset(OpCopy)
- v.Type = x.Type
- v.AddArg(x)
+ v.copyOf(x)
return true
}
// match: (CMOVQLE _ x (FlagLT_UGT))
@@ -6230,9 +5874,7 @@ func rewriteValueAMD64_OpAMD64CMOVQLE(v *Value) bool {
if v_2.Op != OpAMD64FlagLT_UGT {
break
}
- v.reset(OpCopy)
- v.Type = x.Type
- v.AddArg(x)
+ v.copyOf(x)
return true
}
return false
@@ -6251,9 +5893,7 @@ func rewriteValueAMD64_OpAMD64CMOVQLS(v *Value) bool {
}
cond := v_2.Args[0]
v.reset(OpAMD64CMOVQCC)
- v.AddArg(x)
- v.AddArg(y)
- v.AddArg(cond)
+ v.AddArg3(x, y, cond)
return true
}
// match: (CMOVQLS _ x (FlagEQ))
@@ -6263,9 +5903,7 @@ func rewriteValueAMD64_OpAMD64CMOVQLS(v *Value) bool {
if v_2.Op != OpAMD64FlagEQ {
break
}
- v.reset(OpCopy)
- v.Type = x.Type
- v.AddArg(x)
+ v.copyOf(x)
return true
}
// match: (CMOVQLS y _ (FlagGT_UGT))
@@ -6275,9 +5913,7 @@ func rewriteValueAMD64_OpAMD64CMOVQLS(v *Value) bool {
if v_2.Op != OpAMD64FlagGT_UGT {
break
}
- v.reset(OpCopy)
- v.Type = y.Type
- v.AddArg(y)
+ v.copyOf(y)
return true
}
// match: (CMOVQLS _ x (FlagGT_ULT))
@@ -6287,9 +5923,7 @@ func rewriteValueAMD64_OpAMD64CMOVQLS(v *Value) bool {
if v_2.Op != OpAMD64FlagGT_ULT {
break
}
- v.reset(OpCopy)
- v.Type = x.Type
- v.AddArg(x)
+ v.copyOf(x)
return true
}
// match: (CMOVQLS _ x (FlagLT_ULT))
@@ -6299,9 +5933,7 @@ func rewriteValueAMD64_OpAMD64CMOVQLS(v *Value) bool {
if v_2.Op != OpAMD64FlagLT_ULT {
break
}
- v.reset(OpCopy)
- v.Type = x.Type
- v.AddArg(x)
+ v.copyOf(x)
return true
}
// match: (CMOVQLS y _ (FlagLT_UGT))
@@ -6311,9 +5943,7 @@ func rewriteValueAMD64_OpAMD64CMOVQLS(v *Value) bool {
if v_2.Op != OpAMD64FlagLT_UGT {
break
}
- v.reset(OpCopy)
- v.Type = y.Type
- v.AddArg(y)
+ v.copyOf(y)
return true
}
return false
@@ -6332,9 +5962,7 @@ func rewriteValueAMD64_OpAMD64CMOVQLT(v *Value) bool {
}
cond := v_2.Args[0]
v.reset(OpAMD64CMOVQGT)
- v.AddArg(x)
- v.AddArg(y)
- v.AddArg(cond)
+ v.AddArg3(x, y, cond)
return true
}
// match: (CMOVQLT y _ (FlagEQ))
@@ -6344,9 +5972,7 @@ func rewriteValueAMD64_OpAMD64CMOVQLT(v *Value) bool {
if v_2.Op != OpAMD64FlagEQ {
break
}
- v.reset(OpCopy)
- v.Type = y.Type
- v.AddArg(y)
+ v.copyOf(y)
return true
}
// match: (CMOVQLT y _ (FlagGT_UGT))
@@ -6356,9 +5982,7 @@ func rewriteValueAMD64_OpAMD64CMOVQLT(v *Value) bool {
if v_2.Op != OpAMD64FlagGT_UGT {
break
}
- v.reset(OpCopy)
- v.Type = y.Type
- v.AddArg(y)
+ v.copyOf(y)
return true
}
// match: (CMOVQLT y _ (FlagGT_ULT))
@@ -6368,9 +5992,7 @@ func rewriteValueAMD64_OpAMD64CMOVQLT(v *Value) bool {
if v_2.Op != OpAMD64FlagGT_ULT {
break
}
- v.reset(OpCopy)
- v.Type = y.Type
- v.AddArg(y)
+ v.copyOf(y)
return true
}
// match: (CMOVQLT _ x (FlagLT_ULT))
@@ -6380,9 +6002,7 @@ func rewriteValueAMD64_OpAMD64CMOVQLT(v *Value) bool {
if v_2.Op != OpAMD64FlagLT_ULT {
break
}
- v.reset(OpCopy)
- v.Type = x.Type
- v.AddArg(x)
+ v.copyOf(x)
return true
}
// match: (CMOVQLT _ x (FlagLT_UGT))
@@ -6392,9 +6012,7 @@ func rewriteValueAMD64_OpAMD64CMOVQLT(v *Value) bool {
if v_2.Op != OpAMD64FlagLT_UGT {
break
}
- v.reset(OpCopy)
- v.Type = x.Type
- v.AddArg(x)
+ v.copyOf(x)
return true
}
return false
@@ -6413,9 +6031,7 @@ func rewriteValueAMD64_OpAMD64CMOVQNE(v *Value) bool {
}
cond := v_2.Args[0]
v.reset(OpAMD64CMOVQNE)
- v.AddArg(x)
- v.AddArg(y)
- v.AddArg(cond)
+ v.AddArg3(x, y, cond)
return true
}
// match: (CMOVQNE y _ (FlagEQ))
@@ -6425,9 +6041,7 @@ func rewriteValueAMD64_OpAMD64CMOVQNE(v *Value) bool {
if v_2.Op != OpAMD64FlagEQ {
break
}
- v.reset(OpCopy)
- v.Type = y.Type
- v.AddArg(y)
+ v.copyOf(y)
return true
}
// match: (CMOVQNE _ x (FlagGT_UGT))
@@ -6437,9 +6051,7 @@ func rewriteValueAMD64_OpAMD64CMOVQNE(v *Value) bool {
if v_2.Op != OpAMD64FlagGT_UGT {
break
}
- v.reset(OpCopy)
- v.Type = x.Type
- v.AddArg(x)
+ v.copyOf(x)
return true
}
// match: (CMOVQNE _ x (FlagGT_ULT))
@@ -6449,9 +6061,7 @@ func rewriteValueAMD64_OpAMD64CMOVQNE(v *Value) bool {
if v_2.Op != OpAMD64FlagGT_ULT {
break
}
- v.reset(OpCopy)
- v.Type = x.Type
- v.AddArg(x)
+ v.copyOf(x)
return true
}
// match: (CMOVQNE _ x (FlagLT_ULT))
@@ -6461,9 +6071,7 @@ func rewriteValueAMD64_OpAMD64CMOVQNE(v *Value) bool {
if v_2.Op != OpAMD64FlagLT_ULT {
break
}
- v.reset(OpCopy)
- v.Type = x.Type
- v.AddArg(x)
+ v.copyOf(x)
return true
}
// match: (CMOVQNE _ x (FlagLT_UGT))
@@ -6473,9 +6081,7 @@ func rewriteValueAMD64_OpAMD64CMOVQNE(v *Value) bool {
if v_2.Op != OpAMD64FlagLT_UGT {
break
}
- v.reset(OpCopy)
- v.Type = x.Type
- v.AddArg(x)
+ v.copyOf(x)
return true
}
return false
@@ -6494,9 +6100,7 @@ func rewriteValueAMD64_OpAMD64CMOVWCC(v *Value) bool {
}
cond := v_2.Args[0]
v.reset(OpAMD64CMOVWLS)
- v.AddArg(x)
- v.AddArg(y)
- v.AddArg(cond)
+ v.AddArg3(x, y, cond)
return true
}
// match: (CMOVWCC _ x (FlagEQ))
@@ -6506,9 +6110,7 @@ func rewriteValueAMD64_OpAMD64CMOVWCC(v *Value) bool {
if v_2.Op != OpAMD64FlagEQ {
break
}
- v.reset(OpCopy)
- v.Type = x.Type
- v.AddArg(x)
+ v.copyOf(x)
return true
}
// match: (CMOVWCC _ x (FlagGT_UGT))
@@ -6518,9 +6120,7 @@ func rewriteValueAMD64_OpAMD64CMOVWCC(v *Value) bool {
if v_2.Op != OpAMD64FlagGT_UGT {
break
}
- v.reset(OpCopy)
- v.Type = x.Type
- v.AddArg(x)
+ v.copyOf(x)
return true
}
// match: (CMOVWCC y _ (FlagGT_ULT))
@@ -6530,9 +6130,7 @@ func rewriteValueAMD64_OpAMD64CMOVWCC(v *Value) bool {
if v_2.Op != OpAMD64FlagGT_ULT {
break
}
- v.reset(OpCopy)
- v.Type = y.Type
- v.AddArg(y)
+ v.copyOf(y)
return true
}
// match: (CMOVWCC y _ (FlagLT_ULT))
@@ -6542,9 +6140,7 @@ func rewriteValueAMD64_OpAMD64CMOVWCC(v *Value) bool {
if v_2.Op != OpAMD64FlagLT_ULT {
break
}
- v.reset(OpCopy)
- v.Type = y.Type
- v.AddArg(y)
+ v.copyOf(y)
return true
}
// match: (CMOVWCC _ x (FlagLT_UGT))
@@ -6554,9 +6150,7 @@ func rewriteValueAMD64_OpAMD64CMOVWCC(v *Value) bool {
if v_2.Op != OpAMD64FlagLT_UGT {
break
}
- v.reset(OpCopy)
- v.Type = x.Type
- v.AddArg(x)
+ v.copyOf(x)
return true
}
return false
@@ -6575,9 +6169,7 @@ func rewriteValueAMD64_OpAMD64CMOVWCS(v *Value) bool {
}
cond := v_2.Args[0]
v.reset(OpAMD64CMOVWHI)
- v.AddArg(x)
- v.AddArg(y)
- v.AddArg(cond)
+ v.AddArg3(x, y, cond)
return true
}
// match: (CMOVWCS y _ (FlagEQ))
@@ -6587,9 +6179,7 @@ func rewriteValueAMD64_OpAMD64CMOVWCS(v *Value) bool {
if v_2.Op != OpAMD64FlagEQ {
break
}
- v.reset(OpCopy)
- v.Type = y.Type
- v.AddArg(y)
+ v.copyOf(y)
return true
}
// match: (CMOVWCS y _ (FlagGT_UGT))
@@ -6599,9 +6189,7 @@ func rewriteValueAMD64_OpAMD64CMOVWCS(v *Value) bool {
if v_2.Op != OpAMD64FlagGT_UGT {
break
}
- v.reset(OpCopy)
- v.Type = y.Type
- v.AddArg(y)
+ v.copyOf(y)
return true
}
// match: (CMOVWCS _ x (FlagGT_ULT))
@@ -6611,9 +6199,7 @@ func rewriteValueAMD64_OpAMD64CMOVWCS(v *Value) bool {
if v_2.Op != OpAMD64FlagGT_ULT {
break
}
- v.reset(OpCopy)
- v.Type = x.Type
- v.AddArg(x)
+ v.copyOf(x)
return true
}
// match: (CMOVWCS _ x (FlagLT_ULT))
@@ -6623,9 +6209,7 @@ func rewriteValueAMD64_OpAMD64CMOVWCS(v *Value) bool {
if v_2.Op != OpAMD64FlagLT_ULT {
break
}
- v.reset(OpCopy)
- v.Type = x.Type
- v.AddArg(x)
+ v.copyOf(x)
return true
}
// match: (CMOVWCS y _ (FlagLT_UGT))
@@ -6635,9 +6219,7 @@ func rewriteValueAMD64_OpAMD64CMOVWCS(v *Value) bool {
if v_2.Op != OpAMD64FlagLT_UGT {
break
}
- v.reset(OpCopy)
- v.Type = y.Type
- v.AddArg(y)
+ v.copyOf(y)
return true
}
return false
@@ -6656,9 +6238,7 @@ func rewriteValueAMD64_OpAMD64CMOVWEQ(v *Value) bool {
}
cond := v_2.Args[0]
v.reset(OpAMD64CMOVWEQ)
- v.AddArg(x)
- v.AddArg(y)
- v.AddArg(cond)
+ v.AddArg3(x, y, cond)
return true
}
// match: (CMOVWEQ _ x (FlagEQ))
@@ -6668,9 +6248,7 @@ func rewriteValueAMD64_OpAMD64CMOVWEQ(v *Value) bool {
if v_2.Op != OpAMD64FlagEQ {
break
}
- v.reset(OpCopy)
- v.Type = x.Type
- v.AddArg(x)
+ v.copyOf(x)
return true
}
// match: (CMOVWEQ y _ (FlagGT_UGT))
@@ -6680,9 +6258,7 @@ func rewriteValueAMD64_OpAMD64CMOVWEQ(v *Value) bool {
if v_2.Op != OpAMD64FlagGT_UGT {
break
}
- v.reset(OpCopy)
- v.Type = y.Type
- v.AddArg(y)
+ v.copyOf(y)
return true
}
// match: (CMOVWEQ y _ (FlagGT_ULT))
@@ -6692,9 +6268,7 @@ func rewriteValueAMD64_OpAMD64CMOVWEQ(v *Value) bool {
if v_2.Op != OpAMD64FlagGT_ULT {
break
}
- v.reset(OpCopy)
- v.Type = y.Type
- v.AddArg(y)
+ v.copyOf(y)
return true
}
// match: (CMOVWEQ y _ (FlagLT_ULT))
@@ -6704,9 +6278,7 @@ func rewriteValueAMD64_OpAMD64CMOVWEQ(v *Value) bool {
if v_2.Op != OpAMD64FlagLT_ULT {
break
}
- v.reset(OpCopy)
- v.Type = y.Type
- v.AddArg(y)
+ v.copyOf(y)
return true
}
// match: (CMOVWEQ y _ (FlagLT_UGT))
@@ -6716,9 +6288,7 @@ func rewriteValueAMD64_OpAMD64CMOVWEQ(v *Value) bool {
if v_2.Op != OpAMD64FlagLT_UGT {
break
}
- v.reset(OpCopy)
- v.Type = y.Type
- v.AddArg(y)
+ v.copyOf(y)
return true
}
return false
@@ -6737,9 +6307,7 @@ func rewriteValueAMD64_OpAMD64CMOVWGE(v *Value) bool {
}
cond := v_2.Args[0]
v.reset(OpAMD64CMOVWLE)
- v.AddArg(x)
- v.AddArg(y)
- v.AddArg(cond)
+ v.AddArg3(x, y, cond)
return true
}
// match: (CMOVWGE _ x (FlagEQ))
@@ -6749,9 +6317,7 @@ func rewriteValueAMD64_OpAMD64CMOVWGE(v *Value) bool {
if v_2.Op != OpAMD64FlagEQ {
break
}
- v.reset(OpCopy)
- v.Type = x.Type
- v.AddArg(x)
+ v.copyOf(x)
return true
}
// match: (CMOVWGE _ x (FlagGT_UGT))
@@ -6761,9 +6327,7 @@ func rewriteValueAMD64_OpAMD64CMOVWGE(v *Value) bool {
if v_2.Op != OpAMD64FlagGT_UGT {
break
}
- v.reset(OpCopy)
- v.Type = x.Type
- v.AddArg(x)
+ v.copyOf(x)
return true
}
// match: (CMOVWGE _ x (FlagGT_ULT))
@@ -6773,9 +6337,7 @@ func rewriteValueAMD64_OpAMD64CMOVWGE(v *Value) bool {
if v_2.Op != OpAMD64FlagGT_ULT {
break
}
- v.reset(OpCopy)
- v.Type = x.Type
- v.AddArg(x)
+ v.copyOf(x)
return true
}
// match: (CMOVWGE y _ (FlagLT_ULT))
@@ -6785,9 +6347,7 @@ func rewriteValueAMD64_OpAMD64CMOVWGE(v *Value) bool {
if v_2.Op != OpAMD64FlagLT_ULT {
break
}
- v.reset(OpCopy)
- v.Type = y.Type
- v.AddArg(y)
+ v.copyOf(y)
return true
}
// match: (CMOVWGE y _ (FlagLT_UGT))
@@ -6797,9 +6357,7 @@ func rewriteValueAMD64_OpAMD64CMOVWGE(v *Value) bool {
if v_2.Op != OpAMD64FlagLT_UGT {
break
}
- v.reset(OpCopy)
- v.Type = y.Type
- v.AddArg(y)
+ v.copyOf(y)
return true
}
return false
@@ -6818,9 +6376,7 @@ func rewriteValueAMD64_OpAMD64CMOVWGT(v *Value) bool {
}
cond := v_2.Args[0]
v.reset(OpAMD64CMOVWLT)
- v.AddArg(x)
- v.AddArg(y)
- v.AddArg(cond)
+ v.AddArg3(x, y, cond)
return true
}
// match: (CMOVWGT y _ (FlagEQ))
@@ -6830,9 +6386,7 @@ func rewriteValueAMD64_OpAMD64CMOVWGT(v *Value) bool {
if v_2.Op != OpAMD64FlagEQ {
break
}
- v.reset(OpCopy)
- v.Type = y.Type
- v.AddArg(y)
+ v.copyOf(y)
return true
}
// match: (CMOVWGT _ x (FlagGT_UGT))
@@ -6842,9 +6396,7 @@ func rewriteValueAMD64_OpAMD64CMOVWGT(v *Value) bool {
if v_2.Op != OpAMD64FlagGT_UGT {
break
}
- v.reset(OpCopy)
- v.Type = x.Type
- v.AddArg(x)
+ v.copyOf(x)
return true
}
// match: (CMOVWGT _ x (FlagGT_ULT))
@@ -6854,9 +6406,7 @@ func rewriteValueAMD64_OpAMD64CMOVWGT(v *Value) bool {
if v_2.Op != OpAMD64FlagGT_ULT {
break
}
- v.reset(OpCopy)
- v.Type = x.Type
- v.AddArg(x)
+ v.copyOf(x)
return true
}
// match: (CMOVWGT y _ (FlagLT_ULT))
@@ -6866,9 +6416,7 @@ func rewriteValueAMD64_OpAMD64CMOVWGT(v *Value) bool {
if v_2.Op != OpAMD64FlagLT_ULT {
break
}
- v.reset(OpCopy)
- v.Type = y.Type
- v.AddArg(y)
+ v.copyOf(y)
return true
}
// match: (CMOVWGT y _ (FlagLT_UGT))
@@ -6878,9 +6426,7 @@ func rewriteValueAMD64_OpAMD64CMOVWGT(v *Value) bool {
if v_2.Op != OpAMD64FlagLT_UGT {
break
}
- v.reset(OpCopy)
- v.Type = y.Type
- v.AddArg(y)
+ v.copyOf(y)
return true
}
return false
@@ -6899,9 +6445,7 @@ func rewriteValueAMD64_OpAMD64CMOVWHI(v *Value) bool {
}
cond := v_2.Args[0]
v.reset(OpAMD64CMOVWCS)
- v.AddArg(x)
- v.AddArg(y)
- v.AddArg(cond)
+ v.AddArg3(x, y, cond)
return true
}
// match: (CMOVWHI y _ (FlagEQ))
@@ -6911,9 +6455,7 @@ func rewriteValueAMD64_OpAMD64CMOVWHI(v *Value) bool {
if v_2.Op != OpAMD64FlagEQ {
break
}
- v.reset(OpCopy)
- v.Type = y.Type
- v.AddArg(y)
+ v.copyOf(y)
return true
}
// match: (CMOVWHI _ x (FlagGT_UGT))
@@ -6923,9 +6465,7 @@ func rewriteValueAMD64_OpAMD64CMOVWHI(v *Value) bool {
if v_2.Op != OpAMD64FlagGT_UGT {
break
}
- v.reset(OpCopy)
- v.Type = x.Type
- v.AddArg(x)
+ v.copyOf(x)
return true
}
// match: (CMOVWHI y _ (FlagGT_ULT))
@@ -6935,9 +6475,7 @@ func rewriteValueAMD64_OpAMD64CMOVWHI(v *Value) bool {
if v_2.Op != OpAMD64FlagGT_ULT {
break
}
- v.reset(OpCopy)
- v.Type = y.Type
- v.AddArg(y)
+ v.copyOf(y)
return true
}
// match: (CMOVWHI y _ (FlagLT_ULT))
@@ -6947,9 +6485,7 @@ func rewriteValueAMD64_OpAMD64CMOVWHI(v *Value) bool {
if v_2.Op != OpAMD64FlagLT_ULT {
break
}
- v.reset(OpCopy)
- v.Type = y.Type
- v.AddArg(y)
+ v.copyOf(y)
return true
}
// match: (CMOVWHI _ x (FlagLT_UGT))
@@ -6959,9 +6495,7 @@ func rewriteValueAMD64_OpAMD64CMOVWHI(v *Value) bool {
if v_2.Op != OpAMD64FlagLT_UGT {
break
}
- v.reset(OpCopy)
- v.Type = x.Type
- v.AddArg(x)
+ v.copyOf(x)
return true
}
return false
@@ -6980,9 +6514,7 @@ func rewriteValueAMD64_OpAMD64CMOVWLE(v *Value) bool {
}
cond := v_2.Args[0]
v.reset(OpAMD64CMOVWGE)
- v.AddArg(x)
- v.AddArg(y)
- v.AddArg(cond)
+ v.AddArg3(x, y, cond)
return true
}
// match: (CMOVWLE _ x (FlagEQ))
@@ -6992,9 +6524,7 @@ func rewriteValueAMD64_OpAMD64CMOVWLE(v *Value) bool {
if v_2.Op != OpAMD64FlagEQ {
break
}
- v.reset(OpCopy)
- v.Type = x.Type
- v.AddArg(x)
+ v.copyOf(x)
return true
}
// match: (CMOVWLE y _ (FlagGT_UGT))
@@ -7004,9 +6534,7 @@ func rewriteValueAMD64_OpAMD64CMOVWLE(v *Value) bool {
if v_2.Op != OpAMD64FlagGT_UGT {
break
}
- v.reset(OpCopy)
- v.Type = y.Type
- v.AddArg(y)
+ v.copyOf(y)
return true
}
// match: (CMOVWLE y _ (FlagGT_ULT))
@@ -7016,9 +6544,7 @@ func rewriteValueAMD64_OpAMD64CMOVWLE(v *Value) bool {
if v_2.Op != OpAMD64FlagGT_ULT {
break
}
- v.reset(OpCopy)
- v.Type = y.Type
- v.AddArg(y)
+ v.copyOf(y)
return true
}
// match: (CMOVWLE _ x (FlagLT_ULT))
@@ -7028,9 +6554,7 @@ func rewriteValueAMD64_OpAMD64CMOVWLE(v *Value) bool {
if v_2.Op != OpAMD64FlagLT_ULT {
break
}
- v.reset(OpCopy)
- v.Type = x.Type
- v.AddArg(x)
+ v.copyOf(x)
return true
}
// match: (CMOVWLE _ x (FlagLT_UGT))
@@ -7040,9 +6564,7 @@ func rewriteValueAMD64_OpAMD64CMOVWLE(v *Value) bool {
if v_2.Op != OpAMD64FlagLT_UGT {
break
}
- v.reset(OpCopy)
- v.Type = x.Type
- v.AddArg(x)
+ v.copyOf(x)
return true
}
return false
@@ -7061,9 +6583,7 @@ func rewriteValueAMD64_OpAMD64CMOVWLS(v *Value) bool {
}
cond := v_2.Args[0]
v.reset(OpAMD64CMOVWCC)
- v.AddArg(x)
- v.AddArg(y)
- v.AddArg(cond)
+ v.AddArg3(x, y, cond)
return true
}
// match: (CMOVWLS _ x (FlagEQ))
@@ -7073,9 +6593,7 @@ func rewriteValueAMD64_OpAMD64CMOVWLS(v *Value) bool {
if v_2.Op != OpAMD64FlagEQ {
break
}
- v.reset(OpCopy)
- v.Type = x.Type
- v.AddArg(x)
+ v.copyOf(x)
return true
}
// match: (CMOVWLS y _ (FlagGT_UGT))
@@ -7085,9 +6603,7 @@ func rewriteValueAMD64_OpAMD64CMOVWLS(v *Value) bool {
if v_2.Op != OpAMD64FlagGT_UGT {
break
}
- v.reset(OpCopy)
- v.Type = y.Type
- v.AddArg(y)
+ v.copyOf(y)
return true
}
// match: (CMOVWLS _ x (FlagGT_ULT))
@@ -7097,9 +6613,7 @@ func rewriteValueAMD64_OpAMD64CMOVWLS(v *Value) bool {
if v_2.Op != OpAMD64FlagGT_ULT {
break
}
- v.reset(OpCopy)
- v.Type = x.Type
- v.AddArg(x)
+ v.copyOf(x)
return true
}
// match: (CMOVWLS _ x (FlagLT_ULT))
@@ -7109,9 +6623,7 @@ func rewriteValueAMD64_OpAMD64CMOVWLS(v *Value) bool {
if v_2.Op != OpAMD64FlagLT_ULT {
break
}
- v.reset(OpCopy)
- v.Type = x.Type
- v.AddArg(x)
+ v.copyOf(x)
return true
}
// match: (CMOVWLS y _ (FlagLT_UGT))
@@ -7121,9 +6633,7 @@ func rewriteValueAMD64_OpAMD64CMOVWLS(v *Value) bool {
if v_2.Op != OpAMD64FlagLT_UGT {
break
}
- v.reset(OpCopy)
- v.Type = y.Type
- v.AddArg(y)
+ v.copyOf(y)
return true
}
return false
@@ -7142,9 +6652,7 @@ func rewriteValueAMD64_OpAMD64CMOVWLT(v *Value) bool {
}
cond := v_2.Args[0]
v.reset(OpAMD64CMOVWGT)
- v.AddArg(x)
- v.AddArg(y)
- v.AddArg(cond)
+ v.AddArg3(x, y, cond)
return true
}
// match: (CMOVWLT y _ (FlagEQ))
@@ -7154,9 +6662,7 @@ func rewriteValueAMD64_OpAMD64CMOVWLT(v *Value) bool {
if v_2.Op != OpAMD64FlagEQ {
break
}
- v.reset(OpCopy)
- v.Type = y.Type
- v.AddArg(y)
+ v.copyOf(y)
return true
}
// match: (CMOVWLT y _ (FlagGT_UGT))
@@ -7166,9 +6672,7 @@ func rewriteValueAMD64_OpAMD64CMOVWLT(v *Value) bool {
if v_2.Op != OpAMD64FlagGT_UGT {
break
}
- v.reset(OpCopy)
- v.Type = y.Type
- v.AddArg(y)
+ v.copyOf(y)
return true
}
// match: (CMOVWLT y _ (FlagGT_ULT))
@@ -7178,9 +6682,7 @@ func rewriteValueAMD64_OpAMD64CMOVWLT(v *Value) bool {
if v_2.Op != OpAMD64FlagGT_ULT {
break
}
- v.reset(OpCopy)
- v.Type = y.Type
- v.AddArg(y)
+ v.copyOf(y)
return true
}
// match: (CMOVWLT _ x (FlagLT_ULT))
@@ -7190,9 +6692,7 @@ func rewriteValueAMD64_OpAMD64CMOVWLT(v *Value) bool {
if v_2.Op != OpAMD64FlagLT_ULT {
break
}
- v.reset(OpCopy)
- v.Type = x.Type
- v.AddArg(x)
+ v.copyOf(x)
return true
}
// match: (CMOVWLT _ x (FlagLT_UGT))
@@ -7202,9 +6702,7 @@ func rewriteValueAMD64_OpAMD64CMOVWLT(v *Value) bool {
if v_2.Op != OpAMD64FlagLT_UGT {
break
}
- v.reset(OpCopy)
- v.Type = x.Type
- v.AddArg(x)
+ v.copyOf(x)
return true
}
return false
@@ -7223,9 +6721,7 @@ func rewriteValueAMD64_OpAMD64CMOVWNE(v *Value) bool {
}
cond := v_2.Args[0]
v.reset(OpAMD64CMOVWNE)
- v.AddArg(x)
- v.AddArg(y)
- v.AddArg(cond)
+ v.AddArg3(x, y, cond)
return true
}
// match: (CMOVWNE y _ (FlagEQ))
@@ -7235,9 +6731,7 @@ func rewriteValueAMD64_OpAMD64CMOVWNE(v *Value) bool {
if v_2.Op != OpAMD64FlagEQ {
break
}
- v.reset(OpCopy)
- v.Type = y.Type
- v.AddArg(y)
+ v.copyOf(y)
return true
}
// match: (CMOVWNE _ x (FlagGT_UGT))
@@ -7247,9 +6741,7 @@ func rewriteValueAMD64_OpAMD64CMOVWNE(v *Value) bool {
if v_2.Op != OpAMD64FlagGT_UGT {
break
}
- v.reset(OpCopy)
- v.Type = x.Type
- v.AddArg(x)
+ v.copyOf(x)
return true
}
// match: (CMOVWNE _ x (FlagGT_ULT))
@@ -7259,9 +6751,7 @@ func rewriteValueAMD64_OpAMD64CMOVWNE(v *Value) bool {
if v_2.Op != OpAMD64FlagGT_ULT {
break
}
- v.reset(OpCopy)
- v.Type = x.Type
- v.AddArg(x)
+ v.copyOf(x)
return true
}
// match: (CMOVWNE _ x (FlagLT_ULT))
@@ -7271,9 +6761,7 @@ func rewriteValueAMD64_OpAMD64CMOVWNE(v *Value) bool {
if v_2.Op != OpAMD64FlagLT_ULT {
break
}
- v.reset(OpCopy)
- v.Type = x.Type
- v.AddArg(x)
+ v.copyOf(x)
return true
}
// match: (CMOVWNE _ x (FlagLT_UGT))
@@ -7283,9 +6771,7 @@ func rewriteValueAMD64_OpAMD64CMOVWNE(v *Value) bool {
if v_2.Op != OpAMD64FlagLT_UGT {
break
}
- v.reset(OpCopy)
- v.Type = x.Type
- v.AddArg(x)
+ v.copyOf(x)
return true
}
return false
@@ -7333,8 +6819,7 @@ func rewriteValueAMD64_OpAMD64CMPB(v *Value) bool {
}
v.reset(OpAMD64InvertFlags)
v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags)
- v0.AddArg(y)
- v0.AddArg(x)
+ v0.AddArg2(y, x)
v.AddArg(v0)
return true
}
@@ -7357,9 +6842,7 @@ func rewriteValueAMD64_OpAMD64CMPB(v *Value) bool {
v.reset(OpAMD64CMPBload)
v.AuxInt = off
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(x)
- v.AddArg(mem)
+ v.AddArg3(ptr, x, mem)
return true
}
// match: (CMPB x l:(MOVBload {sym} [off] ptr mem))
@@ -7382,9 +6865,7 @@ func rewriteValueAMD64_OpAMD64CMPB(v *Value) bool {
v0 := b.NewValue0(l.Pos, OpAMD64CMPBload, types.TypeFlags)
v0.AuxInt = off
v0.Aux = sym
- v0.AddArg(ptr)
- v0.AddArg(x)
- v0.AddArg(mem)
+ v0.AddArg3(ptr, x, mem)
v.AddArg(v0)
return true
}
@@ -7492,8 +6973,7 @@ func rewriteValueAMD64_OpAMD64CMPBconst(v *Value) bool {
y := v_0.Args[1]
x := v_0.Args[0]
v.reset(OpAMD64TESTB)
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
// match: (CMPBconst (ANDLconst [c] x) [0])
@@ -7517,8 +6997,7 @@ func rewriteValueAMD64_OpAMD64CMPBconst(v *Value) bool {
}
x := v_0
v.reset(OpAMD64TESTB)
- v.AddArg(x)
- v.AddArg(x)
+ v.AddArg2(x, x)
return true
}
// match: (CMPBconst l:(MOVBload {sym} [off] ptr mem) [c])
@@ -7539,12 +7018,10 @@ func rewriteValueAMD64_OpAMD64CMPBconst(v *Value) bool {
}
b = l.Block
v0 := b.NewValue0(l.Pos, OpAMD64CMPBconstload, types.TypeFlags)
- v.reset(OpCopy)
- v.AddArg(v0)
+ v.copyOf(v0)
v0.AuxInt = makeValAndOff(c, off)
v0.Aux = sym
- v0.AddArg(ptr)
- v0.AddArg(mem)
+ v0.AddArg2(ptr, mem)
return true
}
return false
@@ -7570,8 +7047,7 @@ func rewriteValueAMD64_OpAMD64CMPBconstload(v *Value) bool {
v.reset(OpAMD64CMPBconstload)
v.AuxInt = ValAndOff(valoff1).add(off2)
v.Aux = sym
- v.AddArg(base)
- v.AddArg(mem)
+ v.AddArg2(base, mem)
return true
}
// match: (CMPBconstload [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem)
@@ -7593,8 +7069,7 @@ func rewriteValueAMD64_OpAMD64CMPBconstload(v *Value) bool {
v.reset(OpAMD64CMPBconstload)
v.AuxInt = ValAndOff(valoff1).add(off2)
v.Aux = mergeSym(sym1, sym2)
- v.AddArg(base)
- v.AddArg(mem)
+ v.AddArg2(base, mem)
return true
}
return false
@@ -7622,9 +7097,7 @@ func rewriteValueAMD64_OpAMD64CMPBload(v *Value) bool {
v.reset(OpAMD64CMPBload)
v.AuxInt = off1 + off2
v.Aux = sym
- v.AddArg(base)
- v.AddArg(val)
- v.AddArg(mem)
+ v.AddArg3(base, val, mem)
return true
}
// match: (CMPBload [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
@@ -7647,9 +7120,7 @@ func rewriteValueAMD64_OpAMD64CMPBload(v *Value) bool {
v.reset(OpAMD64CMPBload)
v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
- v.AddArg(base)
- v.AddArg(val)
- v.AddArg(mem)
+ v.AddArg3(base, val, mem)
return true
}
// match: (CMPBload {sym} [off] ptr (MOVLconst [c]) mem)
@@ -7670,8 +7141,7 @@ func rewriteValueAMD64_OpAMD64CMPBload(v *Value) bool {
v.reset(OpAMD64CMPBconstload)
v.AuxInt = makeValAndOff(int64(int8(c)), off)
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(mem)
+ v.AddArg2(ptr, mem)
return true
}
return false
@@ -7719,8 +7189,7 @@ func rewriteValueAMD64_OpAMD64CMPL(v *Value) bool {
}
v.reset(OpAMD64InvertFlags)
v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags)
- v0.AddArg(y)
- v0.AddArg(x)
+ v0.AddArg2(y, x)
v.AddArg(v0)
return true
}
@@ -7743,9 +7212,7 @@ func rewriteValueAMD64_OpAMD64CMPL(v *Value) bool {
v.reset(OpAMD64CMPLload)
v.AuxInt = off
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(x)
- v.AddArg(mem)
+ v.AddArg3(ptr, x, mem)
return true
}
// match: (CMPL x l:(MOVLload {sym} [off] ptr mem))
@@ -7768,9 +7235,7 @@ func rewriteValueAMD64_OpAMD64CMPL(v *Value) bool {
v0 := b.NewValue0(l.Pos, OpAMD64CMPLload, types.TypeFlags)
v0.AuxInt = off
v0.Aux = sym
- v0.AddArg(ptr)
- v0.AddArg(x)
- v0.AddArg(mem)
+ v0.AddArg3(ptr, x, mem)
v.AddArg(v0)
return true
}
@@ -7893,8 +7358,7 @@ func rewriteValueAMD64_OpAMD64CMPLconst(v *Value) bool {
y := v_0.Args[1]
x := v_0.Args[0]
v.reset(OpAMD64TESTL)
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
// match: (CMPLconst (ANDLconst [c] x) [0])
@@ -7918,8 +7382,7 @@ func rewriteValueAMD64_OpAMD64CMPLconst(v *Value) bool {
}
x := v_0
v.reset(OpAMD64TESTL)
- v.AddArg(x)
- v.AddArg(x)
+ v.AddArg2(x, x)
return true
}
// match: (CMPLconst l:(MOVLload {sym} [off] ptr mem) [c])
@@ -7940,12 +7403,10 @@ func rewriteValueAMD64_OpAMD64CMPLconst(v *Value) bool {
}
b = l.Block
v0 := b.NewValue0(l.Pos, OpAMD64CMPLconstload, types.TypeFlags)
- v.reset(OpCopy)
- v.AddArg(v0)
+ v.copyOf(v0)
v0.AuxInt = makeValAndOff(c, off)
v0.Aux = sym
- v0.AddArg(ptr)
- v0.AddArg(mem)
+ v0.AddArg2(ptr, mem)
return true
}
return false
@@ -7971,8 +7432,7 @@ func rewriteValueAMD64_OpAMD64CMPLconstload(v *Value) bool {
v.reset(OpAMD64CMPLconstload)
v.AuxInt = ValAndOff(valoff1).add(off2)
v.Aux = sym
- v.AddArg(base)
- v.AddArg(mem)
+ v.AddArg2(base, mem)
return true
}
// match: (CMPLconstload [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem)
@@ -7994,8 +7454,7 @@ func rewriteValueAMD64_OpAMD64CMPLconstload(v *Value) bool {
v.reset(OpAMD64CMPLconstload)
v.AuxInt = ValAndOff(valoff1).add(off2)
v.Aux = mergeSym(sym1, sym2)
- v.AddArg(base)
- v.AddArg(mem)
+ v.AddArg2(base, mem)
return true
}
return false
@@ -8023,9 +7482,7 @@ func rewriteValueAMD64_OpAMD64CMPLload(v *Value) bool {
v.reset(OpAMD64CMPLload)
v.AuxInt = off1 + off2
v.Aux = sym
- v.AddArg(base)
- v.AddArg(val)
- v.AddArg(mem)
+ v.AddArg3(base, val, mem)
return true
}
// match: (CMPLload [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
@@ -8048,9 +7505,7 @@ func rewriteValueAMD64_OpAMD64CMPLload(v *Value) bool {
v.reset(OpAMD64CMPLload)
v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
- v.AddArg(base)
- v.AddArg(val)
- v.AddArg(mem)
+ v.AddArg3(base, val, mem)
return true
}
// match: (CMPLload {sym} [off] ptr (MOVLconst [c]) mem)
@@ -8071,8 +7526,7 @@ func rewriteValueAMD64_OpAMD64CMPLload(v *Value) bool {
v.reset(OpAMD64CMPLconstload)
v.AuxInt = makeValAndOff(c, off)
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(mem)
+ v.AddArg2(ptr, mem)
return true
}
return false
@@ -8128,11 +7582,100 @@ func rewriteValueAMD64_OpAMD64CMPQ(v *Value) bool {
}
v.reset(OpAMD64InvertFlags)
v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags)
- v0.AddArg(y)
- v0.AddArg(x)
+ v0.AddArg2(y, x)
v.AddArg(v0)
return true
}
+ // match: (CMPQ (MOVQconst [x]) (MOVQconst [y]))
+ // cond: x==y
+ // result: (FlagEQ)
+ for {
+ if v_0.Op != OpAMD64MOVQconst {
+ break
+ }
+ x := v_0.AuxInt
+ if v_1.Op != OpAMD64MOVQconst {
+ break
+ }
+ y := v_1.AuxInt
+ if !(x == y) {
+ break
+ }
+ v.reset(OpAMD64FlagEQ)
+ return true
+ }
+ // match: (CMPQ (MOVQconst [x]) (MOVQconst [y]))
+ // cond: xuint64(y)
+ // result: (FlagLT_UGT)
+ for {
+ if v_0.Op != OpAMD64MOVQconst {
+ break
+ }
+ x := v_0.AuxInt
+ if v_1.Op != OpAMD64MOVQconst {
+ break
+ }
+ y := v_1.AuxInt
+ if !(x < y && uint64(x) > uint64(y)) {
+ break
+ }
+ v.reset(OpAMD64FlagLT_UGT)
+ return true
+ }
+ // match: (CMPQ (MOVQconst [x]) (MOVQconst [y]))
+ // cond: x>y && uint64(x) y && uint64(x) < uint64(y)) {
+ break
+ }
+ v.reset(OpAMD64FlagGT_ULT)
+ return true
+ }
+ // match: (CMPQ (MOVQconst [x]) (MOVQconst [y]))
+ // cond: x>y && uint64(x)>uint64(y)
+ // result: (FlagGT_UGT)
+ for {
+ if v_0.Op != OpAMD64MOVQconst {
+ break
+ }
+ x := v_0.AuxInt
+ if v_1.Op != OpAMD64MOVQconst {
+ break
+ }
+ y := v_1.AuxInt
+ if !(x > y && uint64(x) > uint64(y)) {
+ break
+ }
+ v.reset(OpAMD64FlagGT_UGT)
+ return true
+ }
// match: (CMPQ l:(MOVQload {sym} [off] ptr mem) x)
// cond: canMergeLoad(v, l) && clobber(l)
// result: (CMPQload {sym} [off] ptr x mem)
@@ -8152,9 +7695,7 @@ func rewriteValueAMD64_OpAMD64CMPQ(v *Value) bool {
v.reset(OpAMD64CMPQload)
v.AuxInt = off
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(x)
- v.AddArg(mem)
+ v.AddArg3(ptr, x, mem)
return true
}
// match: (CMPQ x l:(MOVQload {sym} [off] ptr mem))
@@ -8177,9 +7718,7 @@ func rewriteValueAMD64_OpAMD64CMPQ(v *Value) bool {
v0 := b.NewValue0(l.Pos, OpAMD64CMPQload, types.TypeFlags)
v0.AuxInt = off
v0.Aux = sym
- v0.AddArg(ptr)
- v0.AddArg(x)
- v0.AddArg(mem)
+ v0.AddArg3(ptr, x, mem)
v.AddArg(v0)
return true
}
@@ -8384,8 +7923,7 @@ func rewriteValueAMD64_OpAMD64CMPQconst(v *Value) bool {
y := v_0.Args[1]
x := v_0.Args[0]
v.reset(OpAMD64TESTQ)
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
// match: (CMPQconst (ANDQconst [c] x) [0])
@@ -8409,8 +7947,7 @@ func rewriteValueAMD64_OpAMD64CMPQconst(v *Value) bool {
}
x := v_0
v.reset(OpAMD64TESTQ)
- v.AddArg(x)
- v.AddArg(x)
+ v.AddArg2(x, x)
return true
}
// match: (CMPQconst l:(MOVQload {sym} [off] ptr mem) [c])
@@ -8431,12 +7968,10 @@ func rewriteValueAMD64_OpAMD64CMPQconst(v *Value) bool {
}
b = l.Block
v0 := b.NewValue0(l.Pos, OpAMD64CMPQconstload, types.TypeFlags)
- v.reset(OpCopy)
- v.AddArg(v0)
+ v.copyOf(v0)
v0.AuxInt = makeValAndOff(c, off)
v0.Aux = sym
- v0.AddArg(ptr)
- v0.AddArg(mem)
+ v0.AddArg2(ptr, mem)
return true
}
return false
@@ -8462,8 +7997,7 @@ func rewriteValueAMD64_OpAMD64CMPQconstload(v *Value) bool {
v.reset(OpAMD64CMPQconstload)
v.AuxInt = ValAndOff(valoff1).add(off2)
v.Aux = sym
- v.AddArg(base)
- v.AddArg(mem)
+ v.AddArg2(base, mem)
return true
}
// match: (CMPQconstload [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem)
@@ -8485,8 +8019,7 @@ func rewriteValueAMD64_OpAMD64CMPQconstload(v *Value) bool {
v.reset(OpAMD64CMPQconstload)
v.AuxInt = ValAndOff(valoff1).add(off2)
v.Aux = mergeSym(sym1, sym2)
- v.AddArg(base)
- v.AddArg(mem)
+ v.AddArg2(base, mem)
return true
}
return false
@@ -8514,9 +8047,7 @@ func rewriteValueAMD64_OpAMD64CMPQload(v *Value) bool {
v.reset(OpAMD64CMPQload)
v.AuxInt = off1 + off2
v.Aux = sym
- v.AddArg(base)
- v.AddArg(val)
- v.AddArg(mem)
+ v.AddArg3(base, val, mem)
return true
}
// match: (CMPQload [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
@@ -8539,9 +8070,7 @@ func rewriteValueAMD64_OpAMD64CMPQload(v *Value) bool {
v.reset(OpAMD64CMPQload)
v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
- v.AddArg(base)
- v.AddArg(val)
- v.AddArg(mem)
+ v.AddArg3(base, val, mem)
return true
}
// match: (CMPQload {sym} [off] ptr (MOVQconst [c]) mem)
@@ -8562,8 +8091,7 @@ func rewriteValueAMD64_OpAMD64CMPQload(v *Value) bool {
v.reset(OpAMD64CMPQconstload)
v.AuxInt = makeValAndOff(c, off)
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(mem)
+ v.AddArg2(ptr, mem)
return true
}
return false
@@ -8611,8 +8139,7 @@ func rewriteValueAMD64_OpAMD64CMPW(v *Value) bool {
}
v.reset(OpAMD64InvertFlags)
v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags)
- v0.AddArg(y)
- v0.AddArg(x)
+ v0.AddArg2(y, x)
v.AddArg(v0)
return true
}
@@ -8635,9 +8162,7 @@ func rewriteValueAMD64_OpAMD64CMPW(v *Value) bool {
v.reset(OpAMD64CMPWload)
v.AuxInt = off
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(x)
- v.AddArg(mem)
+ v.AddArg3(ptr, x, mem)
return true
}
// match: (CMPW x l:(MOVWload {sym} [off] ptr mem))
@@ -8660,9 +8185,7 @@ func rewriteValueAMD64_OpAMD64CMPW(v *Value) bool {
v0 := b.NewValue0(l.Pos, OpAMD64CMPWload, types.TypeFlags)
v0.AuxInt = off
v0.Aux = sym
- v0.AddArg(ptr)
- v0.AddArg(x)
- v0.AddArg(mem)
+ v0.AddArg3(ptr, x, mem)
v.AddArg(v0)
return true
}
@@ -8770,8 +8293,7 @@ func rewriteValueAMD64_OpAMD64CMPWconst(v *Value) bool {
y := v_0.Args[1]
x := v_0.Args[0]
v.reset(OpAMD64TESTW)
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
// match: (CMPWconst (ANDLconst [c] x) [0])
@@ -8795,8 +8317,7 @@ func rewriteValueAMD64_OpAMD64CMPWconst(v *Value) bool {
}
x := v_0
v.reset(OpAMD64TESTW)
- v.AddArg(x)
- v.AddArg(x)
+ v.AddArg2(x, x)
return true
}
// match: (CMPWconst l:(MOVWload {sym} [off] ptr mem) [c])
@@ -8817,12 +8338,10 @@ func rewriteValueAMD64_OpAMD64CMPWconst(v *Value) bool {
}
b = l.Block
v0 := b.NewValue0(l.Pos, OpAMD64CMPWconstload, types.TypeFlags)
- v.reset(OpCopy)
- v.AddArg(v0)
+ v.copyOf(v0)
v0.AuxInt = makeValAndOff(c, off)
v0.Aux = sym
- v0.AddArg(ptr)
- v0.AddArg(mem)
+ v0.AddArg2(ptr, mem)
return true
}
return false
@@ -8848,8 +8367,7 @@ func rewriteValueAMD64_OpAMD64CMPWconstload(v *Value) bool {
v.reset(OpAMD64CMPWconstload)
v.AuxInt = ValAndOff(valoff1).add(off2)
v.Aux = sym
- v.AddArg(base)
- v.AddArg(mem)
+ v.AddArg2(base, mem)
return true
}
// match: (CMPWconstload [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem)
@@ -8871,8 +8389,7 @@ func rewriteValueAMD64_OpAMD64CMPWconstload(v *Value) bool {
v.reset(OpAMD64CMPWconstload)
v.AuxInt = ValAndOff(valoff1).add(off2)
v.Aux = mergeSym(sym1, sym2)
- v.AddArg(base)
- v.AddArg(mem)
+ v.AddArg2(base, mem)
return true
}
return false
@@ -8900,9 +8417,7 @@ func rewriteValueAMD64_OpAMD64CMPWload(v *Value) bool {
v.reset(OpAMD64CMPWload)
v.AuxInt = off1 + off2
v.Aux = sym
- v.AddArg(base)
- v.AddArg(val)
- v.AddArg(mem)
+ v.AddArg3(base, val, mem)
return true
}
// match: (CMPWload [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
@@ -8925,9 +8440,7 @@ func rewriteValueAMD64_OpAMD64CMPWload(v *Value) bool {
v.reset(OpAMD64CMPWload)
v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
- v.AddArg(base)
- v.AddArg(val)
- v.AddArg(mem)
+ v.AddArg3(base, val, mem)
return true
}
// match: (CMPWload {sym} [off] ptr (MOVLconst [c]) mem)
@@ -8948,8 +8461,7 @@ func rewriteValueAMD64_OpAMD64CMPWload(v *Value) bool {
v.reset(OpAMD64CMPWconstload)
v.AuxInt = makeValAndOff(int64(int16(c)), off)
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(mem)
+ v.AddArg2(ptr, mem)
return true
}
return false
@@ -8979,10 +8491,7 @@ func rewriteValueAMD64_OpAMD64CMPXCHGLlock(v *Value) bool {
v.reset(OpAMD64CMPXCHGLlock)
v.AuxInt = off1 + off2
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(old)
- v.AddArg(new_)
- v.AddArg(mem)
+ v.AddArg4(ptr, old, new_, mem)
return true
}
return false
@@ -9012,10 +8521,7 @@ func rewriteValueAMD64_OpAMD64CMPXCHGQlock(v *Value) bool {
v.reset(OpAMD64CMPXCHGQlock)
v.AuxInt = off1 + off2
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(old)
- v.AddArg(new_)
- v.AddArg(mem)
+ v.AddArg4(ptr, old, new_, mem)
return true
}
return false
@@ -9042,9 +8548,7 @@ func rewriteValueAMD64_OpAMD64DIVSD(v *Value) bool {
v.reset(OpAMD64DIVSDload)
v.AuxInt = off
v.Aux = sym
- v.AddArg(x)
- v.AddArg(ptr)
- v.AddArg(mem)
+ v.AddArg3(x, ptr, mem)
return true
}
return false
@@ -9072,9 +8576,7 @@ func rewriteValueAMD64_OpAMD64DIVSDload(v *Value) bool {
v.reset(OpAMD64DIVSDload)
v.AuxInt = off1 + off2
v.Aux = sym
- v.AddArg(val)
- v.AddArg(base)
- v.AddArg(mem)
+ v.AddArg3(val, base, mem)
return true
}
// match: (DIVSDload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem)
@@ -9097,9 +8599,7 @@ func rewriteValueAMD64_OpAMD64DIVSDload(v *Value) bool {
v.reset(OpAMD64DIVSDload)
v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
- v.AddArg(val)
- v.AddArg(base)
- v.AddArg(mem)
+ v.AddArg3(val, base, mem)
return true
}
return false
@@ -9126,9 +8626,7 @@ func rewriteValueAMD64_OpAMD64DIVSS(v *Value) bool {
v.reset(OpAMD64DIVSSload)
v.AuxInt = off
v.Aux = sym
- v.AddArg(x)
- v.AddArg(ptr)
- v.AddArg(mem)
+ v.AddArg3(x, ptr, mem)
return true
}
return false
@@ -9156,9 +8654,7 @@ func rewriteValueAMD64_OpAMD64DIVSSload(v *Value) bool {
v.reset(OpAMD64DIVSSload)
v.AuxInt = off1 + off2
v.Aux = sym
- v.AddArg(val)
- v.AddArg(base)
- v.AddArg(mem)
+ v.AddArg3(val, base, mem)
return true
}
// match: (DIVSSload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem)
@@ -9181,9 +8677,7 @@ func rewriteValueAMD64_OpAMD64DIVSSload(v *Value) bool {
v.reset(OpAMD64DIVSSload)
v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
- v.AddArg(val)
- v.AddArg(base)
- v.AddArg(mem)
+ v.AddArg3(val, base, mem)
return true
}
return false
@@ -9201,8 +8695,7 @@ func rewriteValueAMD64_OpAMD64HMULL(v *Value) bool {
break
}
v.reset(OpAMD64HMULL)
- v.AddArg(y)
- v.AddArg(x)
+ v.AddArg2(y, x)
return true
}
return false
@@ -9220,8 +8713,7 @@ func rewriteValueAMD64_OpAMD64HMULLU(v *Value) bool {
break
}
v.reset(OpAMD64HMULLU)
- v.AddArg(y)
- v.AddArg(x)
+ v.AddArg2(y, x)
return true
}
return false
@@ -9239,8 +8731,7 @@ func rewriteValueAMD64_OpAMD64HMULQ(v *Value) bool {
break
}
v.reset(OpAMD64HMULQ)
- v.AddArg(y)
- v.AddArg(x)
+ v.AddArg2(y, x)
return true
}
return false
@@ -9258,8 +8749,7 @@ func rewriteValueAMD64_OpAMD64HMULQU(v *Value) bool {
break
}
v.reset(OpAMD64HMULQU)
- v.AddArg(y)
- v.AddArg(x)
+ v.AddArg2(y, x)
return true
}
return false
@@ -9307,8 +8797,7 @@ func rewriteValueAMD64_OpAMD64LEAL(v *Value) bool {
v.reset(OpAMD64LEAL1)
v.AuxInt = c
v.Aux = s
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
break
@@ -9337,8 +8826,7 @@ func rewriteValueAMD64_OpAMD64LEAL1(v *Value) bool {
v.reset(OpAMD64LEAL1)
v.AuxInt = c + d
v.Aux = s
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
break
@@ -9357,8 +8845,7 @@ func rewriteValueAMD64_OpAMD64LEAL1(v *Value) bool {
v.reset(OpAMD64LEAL2)
v.AuxInt = c
v.Aux = s
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
break
@@ -9377,8 +8864,7 @@ func rewriteValueAMD64_OpAMD64LEAL1(v *Value) bool {
v.reset(OpAMD64LEAL4)
v.AuxInt = c
v.Aux = s
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
break
@@ -9397,8 +8883,7 @@ func rewriteValueAMD64_OpAMD64LEAL1(v *Value) bool {
v.reset(OpAMD64LEAL8)
v.AuxInt = c
v.Aux = s
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
break
@@ -9426,8 +8911,7 @@ func rewriteValueAMD64_OpAMD64LEAL2(v *Value) bool {
v.reset(OpAMD64LEAL2)
v.AuxInt = c + d
v.Aux = s
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
// match: (LEAL2 [c] {s} x (ADDLconst [d] y))
@@ -9448,8 +8932,7 @@ func rewriteValueAMD64_OpAMD64LEAL2(v *Value) bool {
v.reset(OpAMD64LEAL2)
v.AuxInt = c + 2*d
v.Aux = s
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
// match: (LEAL2 [c] {s} x (SHLLconst [1] y))
@@ -9465,8 +8948,7 @@ func rewriteValueAMD64_OpAMD64LEAL2(v *Value) bool {
v.reset(OpAMD64LEAL4)
v.AuxInt = c
v.Aux = s
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
// match: (LEAL2 [c] {s} x (SHLLconst [2] y))
@@ -9482,8 +8964,7 @@ func rewriteValueAMD64_OpAMD64LEAL2(v *Value) bool {
v.reset(OpAMD64LEAL8)
v.AuxInt = c
v.Aux = s
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
return false
@@ -9509,8 +8990,7 @@ func rewriteValueAMD64_OpAMD64LEAL4(v *Value) bool {
v.reset(OpAMD64LEAL4)
v.AuxInt = c + d
v.Aux = s
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
// match: (LEAL4 [c] {s} x (ADDLconst [d] y))
@@ -9531,8 +9011,7 @@ func rewriteValueAMD64_OpAMD64LEAL4(v *Value) bool {
v.reset(OpAMD64LEAL4)
v.AuxInt = c + 4*d
v.Aux = s
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
// match: (LEAL4 [c] {s} x (SHLLconst [1] y))
@@ -9548,8 +9027,7 @@ func rewriteValueAMD64_OpAMD64LEAL4(v *Value) bool {
v.reset(OpAMD64LEAL8)
v.AuxInt = c
v.Aux = s
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
return false
@@ -9575,8 +9053,7 @@ func rewriteValueAMD64_OpAMD64LEAL8(v *Value) bool {
v.reset(OpAMD64LEAL8)
v.AuxInt = c + d
v.Aux = s
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
// match: (LEAL8 [c] {s} x (ADDLconst [d] y))
@@ -9597,8 +9074,7 @@ func rewriteValueAMD64_OpAMD64LEAL8(v *Value) bool {
v.reset(OpAMD64LEAL8)
v.AuxInt = c + 8*d
v.Aux = s
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
return false
@@ -9646,8 +9122,7 @@ func rewriteValueAMD64_OpAMD64LEAQ(v *Value) bool {
v.reset(OpAMD64LEAQ1)
v.AuxInt = c
v.Aux = s
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
break
@@ -9692,8 +9167,7 @@ func rewriteValueAMD64_OpAMD64LEAQ(v *Value) bool {
v.reset(OpAMD64LEAQ1)
v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
// match: (LEAQ [off1] {sym1} (LEAQ2 [off2] {sym2} x y))
@@ -9715,8 +9189,7 @@ func rewriteValueAMD64_OpAMD64LEAQ(v *Value) bool {
v.reset(OpAMD64LEAQ2)
v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
// match: (LEAQ [off1] {sym1} (LEAQ4 [off2] {sym2} x y))
@@ -9738,8 +9211,7 @@ func rewriteValueAMD64_OpAMD64LEAQ(v *Value) bool {
v.reset(OpAMD64LEAQ4)
v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
// match: (LEAQ [off1] {sym1} (LEAQ8 [off2] {sym2} x y))
@@ -9761,8 +9233,7 @@ func rewriteValueAMD64_OpAMD64LEAQ(v *Value) bool {
v.reset(OpAMD64LEAQ8)
v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
return false
@@ -9789,8 +9260,7 @@ func rewriteValueAMD64_OpAMD64LEAQ1(v *Value) bool {
v.reset(OpAMD64LEAQ1)
v.AuxInt = c + d
v.Aux = s
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
break
@@ -9809,8 +9279,7 @@ func rewriteValueAMD64_OpAMD64LEAQ1(v *Value) bool {
v.reset(OpAMD64LEAQ2)
v.AuxInt = c
v.Aux = s
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
break
@@ -9829,8 +9298,7 @@ func rewriteValueAMD64_OpAMD64LEAQ1(v *Value) bool {
v.reset(OpAMD64LEAQ4)
v.AuxInt = c
v.Aux = s
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
break
@@ -9849,8 +9317,7 @@ func rewriteValueAMD64_OpAMD64LEAQ1(v *Value) bool {
v.reset(OpAMD64LEAQ8)
v.AuxInt = c
v.Aux = s
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
break
@@ -9875,8 +9342,7 @@ func rewriteValueAMD64_OpAMD64LEAQ1(v *Value) bool {
v.reset(OpAMD64LEAQ1)
v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
break
@@ -9894,8 +9360,7 @@ func rewriteValueAMD64_OpAMD64LEAQ1(v *Value) bool {
break
}
v.reset(OpAMD64ADDQ)
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
return false
@@ -9921,8 +9386,7 @@ func rewriteValueAMD64_OpAMD64LEAQ2(v *Value) bool {
v.reset(OpAMD64LEAQ2)
v.AuxInt = c + d
v.Aux = s
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
// match: (LEAQ2 [c] {s} x (ADDQconst [d] y))
@@ -9943,8 +9407,7 @@ func rewriteValueAMD64_OpAMD64LEAQ2(v *Value) bool {
v.reset(OpAMD64LEAQ2)
v.AuxInt = c + 2*d
v.Aux = s
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
// match: (LEAQ2 [c] {s} x (SHLQconst [1] y))
@@ -9960,8 +9423,7 @@ func rewriteValueAMD64_OpAMD64LEAQ2(v *Value) bool {
v.reset(OpAMD64LEAQ4)
v.AuxInt = c
v.Aux = s
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
// match: (LEAQ2 [c] {s} x (SHLQconst [2] y))
@@ -9977,8 +9439,7 @@ func rewriteValueAMD64_OpAMD64LEAQ2(v *Value) bool {
v.reset(OpAMD64LEAQ8)
v.AuxInt = c
v.Aux = s
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
// match: (LEAQ2 [off1] {sym1} (LEAQ [off2] {sym2} x) y)
@@ -10000,8 +9461,7 @@ func rewriteValueAMD64_OpAMD64LEAQ2(v *Value) bool {
v.reset(OpAMD64LEAQ2)
v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
return false
@@ -10027,8 +9487,7 @@ func rewriteValueAMD64_OpAMD64LEAQ4(v *Value) bool {
v.reset(OpAMD64LEAQ4)
v.AuxInt = c + d
v.Aux = s
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
// match: (LEAQ4 [c] {s} x (ADDQconst [d] y))
@@ -10049,8 +9508,7 @@ func rewriteValueAMD64_OpAMD64LEAQ4(v *Value) bool {
v.reset(OpAMD64LEAQ4)
v.AuxInt = c + 4*d
v.Aux = s
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
// match: (LEAQ4 [c] {s} x (SHLQconst [1] y))
@@ -10066,8 +9524,7 @@ func rewriteValueAMD64_OpAMD64LEAQ4(v *Value) bool {
v.reset(OpAMD64LEAQ8)
v.AuxInt = c
v.Aux = s
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
// match: (LEAQ4 [off1] {sym1} (LEAQ [off2] {sym2} x) y)
@@ -10089,8 +9546,7 @@ func rewriteValueAMD64_OpAMD64LEAQ4(v *Value) bool {
v.reset(OpAMD64LEAQ4)
v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
return false
@@ -10116,8 +9572,7 @@ func rewriteValueAMD64_OpAMD64LEAQ8(v *Value) bool {
v.reset(OpAMD64LEAQ8)
v.AuxInt = c + d
v.Aux = s
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
// match: (LEAQ8 [c] {s} x (ADDQconst [d] y))
@@ -10138,8 +9593,7 @@ func rewriteValueAMD64_OpAMD64LEAQ8(v *Value) bool {
v.reset(OpAMD64LEAQ8)
v.AuxInt = c + 8*d
v.Aux = s
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
// match: (LEAQ8 [off1] {sym1} (LEAQ [off2] {sym2} x) y)
@@ -10161,8 +9615,7 @@ func rewriteValueAMD64_OpAMD64LEAQ8(v *Value) bool {
v.reset(OpAMD64LEAQ8)
v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
return false
@@ -10187,12 +9640,10 @@ func rewriteValueAMD64_OpAMD64MOVBQSX(v *Value) bool {
}
b = x.Block
v0 := b.NewValue0(x.Pos, OpAMD64MOVBQSXload, v.Type)
- v.reset(OpCopy)
- v.AddArg(v0)
+ v.copyOf(v0)
v0.AuxInt = off
v0.Aux = sym
- v0.AddArg(ptr)
- v0.AddArg(mem)
+ v0.AddArg2(ptr, mem)
return true
}
// match: (MOVBQSX x:(MOVWload [off] {sym} ptr mem))
@@ -10212,12 +9663,10 @@ func rewriteValueAMD64_OpAMD64MOVBQSX(v *Value) bool {
}
b = x.Block
v0 := b.NewValue0(x.Pos, OpAMD64MOVBQSXload, v.Type)
- v.reset(OpCopy)
- v.AddArg(v0)
+ v.copyOf(v0)
v0.AuxInt = off
v0.Aux = sym
- v0.AddArg(ptr)
- v0.AddArg(mem)
+ v0.AddArg2(ptr, mem)
return true
}
// match: (MOVBQSX x:(MOVLload [off] {sym} ptr mem))
@@ -10237,12 +9686,10 @@ func rewriteValueAMD64_OpAMD64MOVBQSX(v *Value) bool {
}
b = x.Block
v0 := b.NewValue0(x.Pos, OpAMD64MOVBQSXload, v.Type)
- v.reset(OpCopy)
- v.AddArg(v0)
+ v.copyOf(v0)
v0.AuxInt = off
v0.Aux = sym
- v0.AddArg(ptr)
- v0.AddArg(mem)
+ v0.AddArg2(ptr, mem)
return true
}
// match: (MOVBQSX x:(MOVQload [off] {sym} ptr mem))
@@ -10262,12 +9709,10 @@ func rewriteValueAMD64_OpAMD64MOVBQSX(v *Value) bool {
}
b = x.Block
v0 := b.NewValue0(x.Pos, OpAMD64MOVBQSXload, v.Type)
- v.reset(OpCopy)
- v.AddArg(v0)
+ v.copyOf(v0)
v0.AuxInt = off
v0.Aux = sym
- v0.AddArg(ptr)
- v0.AddArg(mem)
+ v0.AddArg2(ptr, mem)
return true
}
// match: (MOVBQSX (ANDLconst [c] x))
@@ -10315,9 +9760,8 @@ func rewriteValueAMD64_OpAMD64MOVBQSXload(v *Value) bool {
}
off2 := v_1.AuxInt
sym2 := v_1.Aux
- _ = v_1.Args[2]
- ptr2 := v_1.Args[0]
x := v_1.Args[1]
+ ptr2 := v_1.Args[0]
if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
break
}
@@ -10344,8 +9788,7 @@ func rewriteValueAMD64_OpAMD64MOVBQSXload(v *Value) bool {
v.reset(OpAMD64MOVBQSXload)
v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
- v.AddArg(base)
- v.AddArg(mem)
+ v.AddArg2(base, mem)
return true
}
return false
@@ -10370,12 +9813,10 @@ func rewriteValueAMD64_OpAMD64MOVBQZX(v *Value) bool {
}
b = x.Block
v0 := b.NewValue0(x.Pos, OpAMD64MOVBload, v.Type)
- v.reset(OpCopy)
- v.AddArg(v0)
+ v.copyOf(v0)
v0.AuxInt = off
v0.Aux = sym
- v0.AddArg(ptr)
- v0.AddArg(mem)
+ v0.AddArg2(ptr, mem)
return true
}
// match: (MOVBQZX x:(MOVWload [off] {sym} ptr mem))
@@ -10395,12 +9836,10 @@ func rewriteValueAMD64_OpAMD64MOVBQZX(v *Value) bool {
}
b = x.Block
v0 := b.NewValue0(x.Pos, OpAMD64MOVBload, v.Type)
- v.reset(OpCopy)
- v.AddArg(v0)
+ v.copyOf(v0)
v0.AuxInt = off
v0.Aux = sym
- v0.AddArg(ptr)
- v0.AddArg(mem)
+ v0.AddArg2(ptr, mem)
return true
}
// match: (MOVBQZX x:(MOVLload [off] {sym} ptr mem))
@@ -10420,12 +9859,10 @@ func rewriteValueAMD64_OpAMD64MOVBQZX(v *Value) bool {
}
b = x.Block
v0 := b.NewValue0(x.Pos, OpAMD64MOVBload, v.Type)
- v.reset(OpCopy)
- v.AddArg(v0)
+ v.copyOf(v0)
v0.AuxInt = off
v0.Aux = sym
- v0.AddArg(ptr)
- v0.AddArg(mem)
+ v0.AddArg2(ptr, mem)
return true
}
// match: (MOVBQZX x:(MOVQload [off] {sym} ptr mem))
@@ -10445,12 +9882,10 @@ func rewriteValueAMD64_OpAMD64MOVBQZX(v *Value) bool {
}
b = x.Block
v0 := b.NewValue0(x.Pos, OpAMD64MOVBload, v.Type)
- v.reset(OpCopy)
- v.AddArg(v0)
+ v.copyOf(v0)
v0.AuxInt = off
v0.Aux = sym
- v0.AddArg(ptr)
- v0.AddArg(mem)
+ v0.AddArg2(ptr, mem)
return true
}
// match: (MOVBQZX x)
@@ -10461,9 +9896,7 @@ func rewriteValueAMD64_OpAMD64MOVBQZX(v *Value) bool {
if !(zeroUpper56Bits(x, 3)) {
break
}
- v.reset(OpCopy)
- v.Type = x.Type
- v.AddArg(x)
+ v.copyOf(x)
return true
}
// match: (MOVBQZX x:(MOVBloadidx1 [off] {sym} ptr idx mem))
@@ -10484,13 +9917,10 @@ func rewriteValueAMD64_OpAMD64MOVBQZX(v *Value) bool {
}
b = x.Block
v0 := b.NewValue0(v.Pos, OpAMD64MOVBloadidx1, v.Type)
- v.reset(OpCopy)
- v.AddArg(v0)
+ v.copyOf(v0)
v0.AuxInt = off
v0.Aux = sym
- v0.AddArg(ptr)
- v0.AddArg(idx)
- v0.AddArg(mem)
+ v0.AddArg3(ptr, idx, mem)
return true
}
// match: (MOVBQZX (ANDLconst [c] x))
@@ -10540,8 +9970,7 @@ func rewriteValueAMD64_OpAMD64MOVBatomicload(v *Value) bool {
v.reset(OpAMD64MOVBatomicload)
v.AuxInt = off1 + off2
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(mem)
+ v.AddArg2(ptr, mem)
return true
}
// match: (MOVBatomicload [off1] {sym1} (LEAQ [off2] {sym2} ptr) mem)
@@ -10563,8 +9992,7 @@ func rewriteValueAMD64_OpAMD64MOVBatomicload(v *Value) bool {
v.reset(OpAMD64MOVBatomicload)
v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
- v.AddArg(ptr)
- v.AddArg(mem)
+ v.AddArg2(ptr, mem)
return true
}
return false
@@ -10584,9 +10012,8 @@ func rewriteValueAMD64_OpAMD64MOVBload(v *Value) bool {
}
off2 := v_1.AuxInt
sym2 := v_1.Aux
- _ = v_1.Args[2]
- ptr2 := v_1.Args[0]
x := v_1.Args[1]
+ ptr2 := v_1.Args[0]
if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
break
}
@@ -10612,8 +10039,7 @@ func rewriteValueAMD64_OpAMD64MOVBload(v *Value) bool {
v.reset(OpAMD64MOVBload)
v.AuxInt = off1 + off2
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(mem)
+ v.AddArg2(ptr, mem)
return true
}
// match: (MOVBload [off1] {sym1} (LEAQ [off2] {sym2} base) mem)
@@ -10635,8 +10061,7 @@ func rewriteValueAMD64_OpAMD64MOVBload(v *Value) bool {
v.reset(OpAMD64MOVBload)
v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
- v.AddArg(base)
- v.AddArg(mem)
+ v.AddArg2(base, mem)
return true
}
// match: (MOVBload [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) mem)
@@ -10659,9 +10084,7 @@ func rewriteValueAMD64_OpAMD64MOVBload(v *Value) bool {
v.reset(OpAMD64MOVBloadidx1)
v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
- v.AddArg(ptr)
- v.AddArg(idx)
- v.AddArg(mem)
+ v.AddArg3(ptr, idx, mem)
return true
}
// match: (MOVBload [off] {sym} (ADDQ ptr idx) mem)
@@ -10686,9 +10109,7 @@ func rewriteValueAMD64_OpAMD64MOVBload(v *Value) bool {
v.reset(OpAMD64MOVBloadidx1)
v.AuxInt = off
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(idx)
- v.AddArg(mem)
+ v.AddArg3(ptr, idx, mem)
return true
}
break
@@ -10712,8 +10133,7 @@ func rewriteValueAMD64_OpAMD64MOVBload(v *Value) bool {
v.reset(OpAMD64MOVBload)
v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
- v.AddArg(base)
- v.AddArg(mem)
+ v.AddArg2(base, mem)
return true
}
// match: (MOVBload [off1] {sym} (ADDLconst [off2] ptr) mem)
@@ -10734,8 +10154,7 @@ func rewriteValueAMD64_OpAMD64MOVBload(v *Value) bool {
v.reset(OpAMD64MOVBload)
v.AuxInt = off1 + off2
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(mem)
+ v.AddArg2(ptr, mem)
return true
}
// match: (MOVBload [off] {sym} (SB) _)
@@ -10777,9 +10196,7 @@ func rewriteValueAMD64_OpAMD64MOVBloadidx1(v *Value) bool {
v.reset(OpAMD64MOVBloadidx1)
v.AuxInt = c + d
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(idx)
- v.AddArg(mem)
+ v.AddArg3(ptr, idx, mem)
return true
}
break
@@ -10804,9 +10221,7 @@ func rewriteValueAMD64_OpAMD64MOVBloadidx1(v *Value) bool {
v.reset(OpAMD64MOVBloadidx1)
v.AuxInt = c + d
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(idx)
- v.AddArg(mem)
+ v.AddArg3(ptr, idx, mem)
return true
}
break
@@ -10830,8 +10245,7 @@ func rewriteValueAMD64_OpAMD64MOVBloadidx1(v *Value) bool {
v.reset(OpAMD64MOVBload)
v.AuxInt = i + c
v.Aux = s
- v.AddArg(p)
- v.AddArg(mem)
+ v.AddArg2(p, mem)
return true
}
break
@@ -10863,9 +10277,7 @@ func rewriteValueAMD64_OpAMD64MOVBstore(v *Value) bool {
v.reset(OpAMD64SETLstore)
v.AuxInt = off
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(x)
- v.AddArg(mem)
+ v.AddArg3(ptr, x, mem)
return true
}
// match: (MOVBstore [off] {sym} ptr y:(SETLE x) mem)
@@ -10887,9 +10299,7 @@ func rewriteValueAMD64_OpAMD64MOVBstore(v *Value) bool {
v.reset(OpAMD64SETLEstore)
v.AuxInt = off
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(x)
- v.AddArg(mem)
+ v.AddArg3(ptr, x, mem)
return true
}
// match: (MOVBstore [off] {sym} ptr y:(SETG x) mem)
@@ -10911,9 +10321,7 @@ func rewriteValueAMD64_OpAMD64MOVBstore(v *Value) bool {
v.reset(OpAMD64SETGstore)
v.AuxInt = off
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(x)
- v.AddArg(mem)
+ v.AddArg3(ptr, x, mem)
return true
}
// match: (MOVBstore [off] {sym} ptr y:(SETGE x) mem)
@@ -10935,9 +10343,7 @@ func rewriteValueAMD64_OpAMD64MOVBstore(v *Value) bool {
v.reset(OpAMD64SETGEstore)
v.AuxInt = off
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(x)
- v.AddArg(mem)
+ v.AddArg3(ptr, x, mem)
return true
}
// match: (MOVBstore [off] {sym} ptr y:(SETEQ x) mem)
@@ -10959,9 +10365,7 @@ func rewriteValueAMD64_OpAMD64MOVBstore(v *Value) bool {
v.reset(OpAMD64SETEQstore)
v.AuxInt = off
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(x)
- v.AddArg(mem)
+ v.AddArg3(ptr, x, mem)
return true
}
// match: (MOVBstore [off] {sym} ptr y:(SETNE x) mem)
@@ -10983,9 +10387,7 @@ func rewriteValueAMD64_OpAMD64MOVBstore(v *Value) bool {
v.reset(OpAMD64SETNEstore)
v.AuxInt = off
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(x)
- v.AddArg(mem)
+ v.AddArg3(ptr, x, mem)
return true
}
// match: (MOVBstore [off] {sym} ptr y:(SETB x) mem)
@@ -11007,9 +10409,7 @@ func rewriteValueAMD64_OpAMD64MOVBstore(v *Value) bool {
v.reset(OpAMD64SETBstore)
v.AuxInt = off
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(x)
- v.AddArg(mem)
+ v.AddArg3(ptr, x, mem)
return true
}
// match: (MOVBstore [off] {sym} ptr y:(SETBE x) mem)
@@ -11031,9 +10431,7 @@ func rewriteValueAMD64_OpAMD64MOVBstore(v *Value) bool {
v.reset(OpAMD64SETBEstore)
v.AuxInt = off
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(x)
- v.AddArg(mem)
+ v.AddArg3(ptr, x, mem)
return true
}
// match: (MOVBstore [off] {sym} ptr y:(SETA x) mem)
@@ -11055,9 +10453,7 @@ func rewriteValueAMD64_OpAMD64MOVBstore(v *Value) bool {
v.reset(OpAMD64SETAstore)
v.AuxInt = off
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(x)
- v.AddArg(mem)
+ v.AddArg3(ptr, x, mem)
return true
}
// match: (MOVBstore [off] {sym} ptr y:(SETAE x) mem)
@@ -11079,9 +10475,7 @@ func rewriteValueAMD64_OpAMD64MOVBstore(v *Value) bool {
v.reset(OpAMD64SETAEstore)
v.AuxInt = off
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(x)
- v.AddArg(mem)
+ v.AddArg3(ptr, x, mem)
return true
}
// match: (MOVBstore [off] {sym} ptr (MOVBQSX x) mem)
@@ -11098,9 +10492,7 @@ func rewriteValueAMD64_OpAMD64MOVBstore(v *Value) bool {
v.reset(OpAMD64MOVBstore)
v.AuxInt = off
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(x)
- v.AddArg(mem)
+ v.AddArg3(ptr, x, mem)
return true
}
// match: (MOVBstore [off] {sym} ptr (MOVBQZX x) mem)
@@ -11117,9 +10509,7 @@ func rewriteValueAMD64_OpAMD64MOVBstore(v *Value) bool {
v.reset(OpAMD64MOVBstore)
v.AuxInt = off
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(x)
- v.AddArg(mem)
+ v.AddArg3(ptr, x, mem)
return true
}
// match: (MOVBstore [off1] {sym} (ADDQconst [off2] ptr) val mem)
@@ -11141,9 +10531,7 @@ func rewriteValueAMD64_OpAMD64MOVBstore(v *Value) bool {
v.reset(OpAMD64MOVBstore)
v.AuxInt = off1 + off2
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(val)
- v.AddArg(mem)
+ v.AddArg3(ptr, val, mem)
return true
}
// match: (MOVBstore [off] {sym} ptr (MOVLconst [c]) mem)
@@ -11164,8 +10552,7 @@ func rewriteValueAMD64_OpAMD64MOVBstore(v *Value) bool {
v.reset(OpAMD64MOVBstoreconst)
v.AuxInt = makeValAndOff(int64(int8(c)), off)
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(mem)
+ v.AddArg2(ptr, mem)
return true
}
// match: (MOVBstore [off] {sym} ptr (MOVQconst [c]) mem)
@@ -11186,8 +10573,7 @@ func rewriteValueAMD64_OpAMD64MOVBstore(v *Value) bool {
v.reset(OpAMD64MOVBstoreconst)
v.AuxInt = makeValAndOff(int64(int8(c)), off)
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(mem)
+ v.AddArg2(ptr, mem)
return true
}
// match: (MOVBstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
@@ -11210,9 +10596,7 @@ func rewriteValueAMD64_OpAMD64MOVBstore(v *Value) bool {
v.reset(OpAMD64MOVBstore)
v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
- v.AddArg(base)
- v.AddArg(val)
- v.AddArg(mem)
+ v.AddArg3(base, val, mem)
return true
}
// match: (MOVBstore [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) val mem)
@@ -11236,10 +10620,7 @@ func rewriteValueAMD64_OpAMD64MOVBstore(v *Value) bool {
v.reset(OpAMD64MOVBstoreidx1)
v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
- v.AddArg(ptr)
- v.AddArg(idx)
- v.AddArg(val)
- v.AddArg(mem)
+ v.AddArg4(ptr, idx, val, mem)
return true
}
// match: (MOVBstore [off] {sym} (ADDQ ptr idx) val mem)
@@ -11265,10 +10646,7 @@ func rewriteValueAMD64_OpAMD64MOVBstore(v *Value) bool {
v.reset(OpAMD64MOVBstoreidx1)
v.AuxInt = off
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(idx)
- v.AddArg(val)
- v.AddArg(mem)
+ v.AddArg4(ptr, idx, val, mem)
return true
}
break
@@ -11296,12 +10674,10 @@ func rewriteValueAMD64_OpAMD64MOVBstore(v *Value) bool {
v.reset(OpAMD64MOVWstore)
v.AuxInt = i - 1
v.Aux = s
- v.AddArg(p)
v0 := b.NewValue0(x0.Pos, OpAMD64ROLWconst, w.Type)
v0.AuxInt = 8
v0.AddArg(w)
- v.AddArg(v0)
- v.AddArg(mem)
+ v.AddArg3(p, v0, mem)
return true
}
// match: (MOVBstore [i] {s} p w x2:(MOVBstore [i-1] {s} p (SHRLconst [8] w) x1:(MOVBstore [i-2] {s} p (SHRLconst [16] w) x0:(MOVBstore [i-3] {s} p (SHRLconst [24] w) mem))))
@@ -11351,11 +10727,9 @@ func rewriteValueAMD64_OpAMD64MOVBstore(v *Value) bool {
v.reset(OpAMD64MOVLstore)
v.AuxInt = i - 3
v.Aux = s
- v.AddArg(p)
v0 := b.NewValue0(x0.Pos, OpAMD64BSWAPL, w.Type)
v0.AddArg(w)
- v.AddArg(v0)
- v.AddArg(mem)
+ v.AddArg3(p, v0, mem)
return true
}
// match: (MOVBstore [i] {s} p w x6:(MOVBstore [i-1] {s} p (SHRQconst [8] w) x5:(MOVBstore [i-2] {s} p (SHRQconst [16] w) x4:(MOVBstore [i-3] {s} p (SHRQconst [24] w) x3:(MOVBstore [i-4] {s} p (SHRQconst [32] w) x2:(MOVBstore [i-5] {s} p (SHRQconst [40] w) x1:(MOVBstore [i-6] {s} p (SHRQconst [48] w) x0:(MOVBstore [i-7] {s} p (SHRQconst [56] w) mem))))))))
@@ -11453,11 +10827,9 @@ func rewriteValueAMD64_OpAMD64MOVBstore(v *Value) bool {
v.reset(OpAMD64MOVQstore)
v.AuxInt = i - 7
v.Aux = s
- v.AddArg(p)
v0 := b.NewValue0(x0.Pos, OpAMD64BSWAPQ, w.Type)
v0.AddArg(w)
- v.AddArg(v0)
- v.AddArg(mem)
+ v.AddArg3(p, v0, mem)
return true
}
// match: (MOVBstore [i] {s} p (SHRWconst [8] w) x:(MOVBstore [i-1] {s} p w mem))
@@ -11482,9 +10854,7 @@ func rewriteValueAMD64_OpAMD64MOVBstore(v *Value) bool {
v.reset(OpAMD64MOVWstore)
v.AuxInt = i - 1
v.Aux = s
- v.AddArg(p)
- v.AddArg(w)
- v.AddArg(mem)
+ v.AddArg3(p, w, mem)
return true
}
// match: (MOVBstore [i] {s} p (SHRLconst [8] w) x:(MOVBstore [i-1] {s} p w mem))
@@ -11509,9 +10879,7 @@ func rewriteValueAMD64_OpAMD64MOVBstore(v *Value) bool {
v.reset(OpAMD64MOVWstore)
v.AuxInt = i - 1
v.Aux = s
- v.AddArg(p)
- v.AddArg(w)
- v.AddArg(mem)
+ v.AddArg3(p, w, mem)
return true
}
// match: (MOVBstore [i] {s} p (SHRQconst [8] w) x:(MOVBstore [i-1] {s} p w mem))
@@ -11536,9 +10904,7 @@ func rewriteValueAMD64_OpAMD64MOVBstore(v *Value) bool {
v.reset(OpAMD64MOVWstore)
v.AuxInt = i - 1
v.Aux = s
- v.AddArg(p)
- v.AddArg(w)
- v.AddArg(mem)
+ v.AddArg3(p, w, mem)
return true
}
// match: (MOVBstore [i] {s} p w x:(MOVBstore [i+1] {s} p (SHRWconst [8] w) mem))
@@ -11564,9 +10930,7 @@ func rewriteValueAMD64_OpAMD64MOVBstore(v *Value) bool {
v.reset(OpAMD64MOVWstore)
v.AuxInt = i
v.Aux = s
- v.AddArg(p)
- v.AddArg(w)
- v.AddArg(mem)
+ v.AddArg3(p, w, mem)
return true
}
// match: (MOVBstore [i] {s} p w x:(MOVBstore [i+1] {s} p (SHRLconst [8] w) mem))
@@ -11592,9 +10956,7 @@ func rewriteValueAMD64_OpAMD64MOVBstore(v *Value) bool {
v.reset(OpAMD64MOVWstore)
v.AuxInt = i
v.Aux = s
- v.AddArg(p)
- v.AddArg(w)
- v.AddArg(mem)
+ v.AddArg3(p, w, mem)
return true
}
// match: (MOVBstore [i] {s} p w x:(MOVBstore [i+1] {s} p (SHRQconst [8] w) mem))
@@ -11620,9 +10982,7 @@ func rewriteValueAMD64_OpAMD64MOVBstore(v *Value) bool {
v.reset(OpAMD64MOVWstore)
v.AuxInt = i
v.Aux = s
- v.AddArg(p)
- v.AddArg(w)
- v.AddArg(mem)
+ v.AddArg3(p, w, mem)
return true
}
// match: (MOVBstore [i] {s} p (SHRLconst [j] w) x:(MOVBstore [i-1] {s} p w0:(SHRLconst [j-8] w) mem))
@@ -11652,9 +11012,7 @@ func rewriteValueAMD64_OpAMD64MOVBstore(v *Value) bool {
v.reset(OpAMD64MOVWstore)
v.AuxInt = i - 1
v.Aux = s
- v.AddArg(p)
- v.AddArg(w0)
- v.AddArg(mem)
+ v.AddArg3(p, w0, mem)
return true
}
// match: (MOVBstore [i] {s} p (SHRQconst [j] w) x:(MOVBstore [i-1] {s} p w0:(SHRQconst [j-8] w) mem))
@@ -11684,9 +11042,7 @@ func rewriteValueAMD64_OpAMD64MOVBstore(v *Value) bool {
v.reset(OpAMD64MOVWstore)
v.AuxInt = i - 1
v.Aux = s
- v.AddArg(p)
- v.AddArg(w0)
- v.AddArg(mem)
+ v.AddArg3(p, w0, mem)
return true
}
// match: (MOVBstore [i] {s} p x1:(MOVBload [j] {s2} p2 mem) mem2:(MOVBstore [i-1] {s} p x2:(MOVBload [j-1] {s2} p2 mem) mem))
@@ -11723,14 +11079,11 @@ func rewriteValueAMD64_OpAMD64MOVBstore(v *Value) bool {
v.reset(OpAMD64MOVWstore)
v.AuxInt = i - 1
v.Aux = s
- v.AddArg(p)
v0 := b.NewValue0(x2.Pos, OpAMD64MOVWload, typ.UInt16)
v0.AuxInt = j - 1
v0.Aux = s2
- v0.AddArg(p2)
- v0.AddArg(mem)
- v.AddArg(v0)
- v.AddArg(mem)
+ v0.AddArg2(p2, mem)
+ v.AddArg3(p, v0, mem)
return true
}
// match: (MOVBstore [off1] {sym1} (LEAL [off2] {sym2} base) val mem)
@@ -11753,9 +11106,7 @@ func rewriteValueAMD64_OpAMD64MOVBstore(v *Value) bool {
v.reset(OpAMD64MOVBstore)
v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
- v.AddArg(base)
- v.AddArg(val)
- v.AddArg(mem)
+ v.AddArg3(base, val, mem)
return true
}
// match: (MOVBstore [off1] {sym} (ADDLconst [off2] ptr) val mem)
@@ -11777,9 +11128,7 @@ func rewriteValueAMD64_OpAMD64MOVBstore(v *Value) bool {
v.reset(OpAMD64MOVBstore)
v.AuxInt = off1 + off2
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(val)
- v.AddArg(mem)
+ v.AddArg3(ptr, val, mem)
return true
}
return false
@@ -11805,8 +11154,7 @@ func rewriteValueAMD64_OpAMD64MOVBstoreconst(v *Value) bool {
v.reset(OpAMD64MOVBstoreconst)
v.AuxInt = ValAndOff(sc).add(off)
v.Aux = s
- v.AddArg(ptr)
- v.AddArg(mem)
+ v.AddArg2(ptr, mem)
return true
}
// match: (MOVBstoreconst [sc] {sym1} (LEAQ [off] {sym2} ptr) mem)
@@ -11828,8 +11176,7 @@ func rewriteValueAMD64_OpAMD64MOVBstoreconst(v *Value) bool {
v.reset(OpAMD64MOVBstoreconst)
v.AuxInt = ValAndOff(sc).add(off)
v.Aux = mergeSym(sym1, sym2)
- v.AddArg(ptr)
- v.AddArg(mem)
+ v.AddArg2(ptr, mem)
return true
}
// match: (MOVBstoreconst [x] {sym1} (LEAQ1 [off] {sym2} ptr idx) mem)
@@ -11852,9 +11199,7 @@ func rewriteValueAMD64_OpAMD64MOVBstoreconst(v *Value) bool {
v.reset(OpAMD64MOVBstoreconstidx1)
v.AuxInt = ValAndOff(x).add(off)
v.Aux = mergeSym(sym1, sym2)
- v.AddArg(ptr)
- v.AddArg(idx)
- v.AddArg(mem)
+ v.AddArg3(ptr, idx, mem)
return true
}
// match: (MOVBstoreconst [x] {sym} (ADDQ ptr idx) mem)
@@ -11871,9 +11216,7 @@ func rewriteValueAMD64_OpAMD64MOVBstoreconst(v *Value) bool {
v.reset(OpAMD64MOVBstoreconstidx1)
v.AuxInt = x
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(idx)
- v.AddArg(mem)
+ v.AddArg3(ptr, idx, mem)
return true
}
// match: (MOVBstoreconst [c] {s} p x:(MOVBstoreconst [a] {s} p mem))
@@ -11898,8 +11241,7 @@ func rewriteValueAMD64_OpAMD64MOVBstoreconst(v *Value) bool {
v.reset(OpAMD64MOVWstoreconst)
v.AuxInt = makeValAndOff(ValAndOff(a).Val()&0xff|ValAndOff(c).Val()<<8, ValAndOff(a).Off())
v.Aux = s
- v.AddArg(p)
- v.AddArg(mem)
+ v.AddArg2(p, mem)
return true
}
// match: (MOVBstoreconst [a] {s} p x:(MOVBstoreconst [c] {s} p mem))
@@ -11924,8 +11266,7 @@ func rewriteValueAMD64_OpAMD64MOVBstoreconst(v *Value) bool {
v.reset(OpAMD64MOVWstoreconst)
v.AuxInt = makeValAndOff(ValAndOff(a).Val()&0xff|ValAndOff(c).Val()<<8, ValAndOff(a).Off())
v.Aux = s
- v.AddArg(p)
- v.AddArg(mem)
+ v.AddArg2(p, mem)
return true
}
// match: (MOVBstoreconst [sc] {sym1} (LEAL [off] {sym2} ptr) mem)
@@ -11947,8 +11288,7 @@ func rewriteValueAMD64_OpAMD64MOVBstoreconst(v *Value) bool {
v.reset(OpAMD64MOVBstoreconst)
v.AuxInt = ValAndOff(sc).add(off)
v.Aux = mergeSym(sym1, sym2)
- v.AddArg(ptr)
- v.AddArg(mem)
+ v.AddArg2(ptr, mem)
return true
}
// match: (MOVBstoreconst [sc] {s} (ADDLconst [off] ptr) mem)
@@ -11969,8 +11309,7 @@ func rewriteValueAMD64_OpAMD64MOVBstoreconst(v *Value) bool {
v.reset(OpAMD64MOVBstoreconst)
v.AuxInt = ValAndOff(sc).add(off)
v.Aux = s
- v.AddArg(ptr)
- v.AddArg(mem)
+ v.AddArg2(ptr, mem)
return true
}
return false
@@ -11999,9 +11338,7 @@ func rewriteValueAMD64_OpAMD64MOVBstoreconstidx1(v *Value) bool {
v.reset(OpAMD64MOVBstoreconstidx1)
v.AuxInt = ValAndOff(x).add(c)
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(idx)
- v.AddArg(mem)
+ v.AddArg3(ptr, idx, mem)
return true
}
break
@@ -12026,9 +11363,7 @@ func rewriteValueAMD64_OpAMD64MOVBstoreconstidx1(v *Value) bool {
v.reset(OpAMD64MOVBstoreconstidx1)
v.AuxInt = ValAndOff(x).add(c)
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(idx)
- v.AddArg(mem)
+ v.AddArg3(ptr, idx, mem)
return true
}
break
@@ -12060,9 +11395,7 @@ func rewriteValueAMD64_OpAMD64MOVBstoreconstidx1(v *Value) bool {
v.reset(OpAMD64MOVWstoreconstidx1)
v.AuxInt = makeValAndOff(ValAndOff(a).Val()&0xff|ValAndOff(c).Val()<<8, ValAndOff(a).Off())
v.Aux = s
- v.AddArg(p)
- v.AddArg(i)
- v.AddArg(mem)
+ v.AddArg3(p, i, mem)
return true
}
}
@@ -12097,10 +11430,7 @@ func rewriteValueAMD64_OpAMD64MOVBstoreidx1(v *Value) bool {
v.reset(OpAMD64MOVBstoreidx1)
v.AuxInt = c + d
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(idx)
- v.AddArg(val)
- v.AddArg(mem)
+ v.AddArg4(ptr, idx, val, mem)
return true
}
break
@@ -12126,10 +11456,7 @@ func rewriteValueAMD64_OpAMD64MOVBstoreidx1(v *Value) bool {
v.reset(OpAMD64MOVBstoreidx1)
v.AuxInt = c + d
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(idx)
- v.AddArg(val)
- v.AddArg(mem)
+ v.AddArg4(ptr, idx, val, mem)
return true
}
break
@@ -12162,13 +11489,10 @@ func rewriteValueAMD64_OpAMD64MOVBstoreidx1(v *Value) bool {
v.reset(OpAMD64MOVWstoreidx1)
v.AuxInt = i - 1
v.Aux = s
- v.AddArg(p)
- v.AddArg(idx)
v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, w.Type)
v0.AuxInt = 8
v0.AddArg(w)
- v.AddArg(v0)
- v.AddArg(mem)
+ v.AddArg4(p, idx, v0, mem)
return true
}
}
@@ -12232,12 +11556,9 @@ func rewriteValueAMD64_OpAMD64MOVBstoreidx1(v *Value) bool {
v.reset(OpAMD64MOVLstoreidx1)
v.AuxInt = i - 3
v.Aux = s
- v.AddArg(p)
- v.AddArg(idx)
v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, w.Type)
v0.AddArg(w)
- v.AddArg(v0)
- v.AddArg(mem)
+ v.AddArg4(p, idx, v0, mem)
return true
}
}
@@ -12363,12 +11684,9 @@ func rewriteValueAMD64_OpAMD64MOVBstoreidx1(v *Value) bool {
v.reset(OpAMD64MOVQstoreidx1)
v.AuxInt = i - 7
v.Aux = s
- v.AddArg(p)
- v.AddArg(idx)
v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, w.Type)
v0.AddArg(w)
- v.AddArg(v0)
- v.AddArg(mem)
+ v.AddArg4(p, idx, v0, mem)
return true
}
}
@@ -12407,10 +11725,7 @@ func rewriteValueAMD64_OpAMD64MOVBstoreidx1(v *Value) bool {
v.reset(OpAMD64MOVWstoreidx1)
v.AuxInt = i - 1
v.Aux = s
- v.AddArg(p)
- v.AddArg(idx)
- v.AddArg(w)
- v.AddArg(mem)
+ v.AddArg4(p, idx, w, mem)
return true
}
}
@@ -12443,10 +11758,7 @@ func rewriteValueAMD64_OpAMD64MOVBstoreidx1(v *Value) bool {
v.reset(OpAMD64MOVWstoreidx1)
v.AuxInt = i - 1
v.Aux = s
- v.AddArg(p)
- v.AddArg(idx)
- v.AddArg(w)
- v.AddArg(mem)
+ v.AddArg4(p, idx, w, mem)
return true
}
}
@@ -12479,10 +11791,7 @@ func rewriteValueAMD64_OpAMD64MOVBstoreidx1(v *Value) bool {
v.reset(OpAMD64MOVWstoreidx1)
v.AuxInt = i - 1
v.Aux = s
- v.AddArg(p)
- v.AddArg(idx)
- v.AddArg(w)
- v.AddArg(mem)
+ v.AddArg4(p, idx, w, mem)
return true
}
}
@@ -12520,10 +11829,7 @@ func rewriteValueAMD64_OpAMD64MOVBstoreidx1(v *Value) bool {
v.reset(OpAMD64MOVWstoreidx1)
v.AuxInt = i - 1
v.Aux = s
- v.AddArg(p)
- v.AddArg(idx)
- v.AddArg(w0)
- v.AddArg(mem)
+ v.AddArg4(p, idx, w0, mem)
return true
}
}
@@ -12561,10 +11867,7 @@ func rewriteValueAMD64_OpAMD64MOVBstoreidx1(v *Value) bool {
v.reset(OpAMD64MOVWstoreidx1)
v.AuxInt = i - 1
v.Aux = s
- v.AddArg(p)
- v.AddArg(idx)
- v.AddArg(w0)
- v.AddArg(mem)
+ v.AddArg4(p, idx, w0, mem)
return true
}
}
@@ -12590,9 +11893,7 @@ func rewriteValueAMD64_OpAMD64MOVBstoreidx1(v *Value) bool {
v.reset(OpAMD64MOVBstore)
v.AuxInt = i + c
v.Aux = s
- v.AddArg(p)
- v.AddArg(w)
- v.AddArg(mem)
+ v.AddArg3(p, w, mem)
return true
}
break
@@ -12616,9 +11917,7 @@ func rewriteValueAMD64_OpAMD64MOVBstoreidx1(v *Value) bool {
v.reset(OpAMD64MOVBstoreconstidx1)
v.AuxInt = makeValAndOff(int64(int8(c)), off)
v.Aux = s
- v.AddArg(ptr)
- v.AddArg(idx)
- v.AddArg(mem)
+ v.AddArg3(ptr, idx, mem)
return true
}
return false
@@ -12643,12 +11942,10 @@ func rewriteValueAMD64_OpAMD64MOVLQSX(v *Value) bool {
}
b = x.Block
v0 := b.NewValue0(x.Pos, OpAMD64MOVLQSXload, v.Type)
- v.reset(OpCopy)
- v.AddArg(v0)
+ v.copyOf(v0)
v0.AuxInt = off
v0.Aux = sym
- v0.AddArg(ptr)
- v0.AddArg(mem)
+ v0.AddArg2(ptr, mem)
return true
}
// match: (MOVLQSX x:(MOVQload [off] {sym} ptr mem))
@@ -12668,12 +11965,10 @@ func rewriteValueAMD64_OpAMD64MOVLQSX(v *Value) bool {
}
b = x.Block
v0 := b.NewValue0(x.Pos, OpAMD64MOVLQSXload, v.Type)
- v.reset(OpCopy)
- v.AddArg(v0)
+ v.copyOf(v0)
v0.AuxInt = off
v0.Aux = sym
- v0.AddArg(ptr)
- v0.AddArg(mem)
+ v0.AddArg2(ptr, mem)
return true
}
// match: (MOVLQSX (ANDLconst [c] x))
@@ -12743,9 +12038,8 @@ func rewriteValueAMD64_OpAMD64MOVLQSXload(v *Value) bool {
}
off2 := v_1.AuxInt
sym2 := v_1.Aux
- _ = v_1.Args[2]
- ptr2 := v_1.Args[0]
x := v_1.Args[1]
+ ptr2 := v_1.Args[0]
if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
break
}
@@ -12772,8 +12066,7 @@ func rewriteValueAMD64_OpAMD64MOVLQSXload(v *Value) bool {
v.reset(OpAMD64MOVLQSXload)
v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
- v.AddArg(base)
- v.AddArg(mem)
+ v.AddArg2(base, mem)
return true
}
return false
@@ -12798,12 +12091,10 @@ func rewriteValueAMD64_OpAMD64MOVLQZX(v *Value) bool {
}
b = x.Block
v0 := b.NewValue0(x.Pos, OpAMD64MOVLload, v.Type)
- v.reset(OpCopy)
- v.AddArg(v0)
+ v.copyOf(v0)
v0.AuxInt = off
v0.Aux = sym
- v0.AddArg(ptr)
- v0.AddArg(mem)
+ v0.AddArg2(ptr, mem)
return true
}
// match: (MOVLQZX x:(MOVQload [off] {sym} ptr mem))
@@ -12823,12 +12114,10 @@ func rewriteValueAMD64_OpAMD64MOVLQZX(v *Value) bool {
}
b = x.Block
v0 := b.NewValue0(x.Pos, OpAMD64MOVLload, v.Type)
- v.reset(OpCopy)
- v.AddArg(v0)
+ v.copyOf(v0)
v0.AuxInt = off
v0.Aux = sym
- v0.AddArg(ptr)
- v0.AddArg(mem)
+ v0.AddArg2(ptr, mem)
return true
}
// match: (MOVLQZX x)
@@ -12839,9 +12128,7 @@ func rewriteValueAMD64_OpAMD64MOVLQZX(v *Value) bool {
if !(zeroUpper32Bits(x, 3)) {
break
}
- v.reset(OpCopy)
- v.Type = x.Type
- v.AddArg(x)
+ v.copyOf(x)
return true
}
// match: (MOVLQZX x:(MOVLloadidx1 [off] {sym} ptr idx mem))
@@ -12862,13 +12149,10 @@ func rewriteValueAMD64_OpAMD64MOVLQZX(v *Value) bool {
}
b = x.Block
v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, v.Type)
- v.reset(OpCopy)
- v.AddArg(v0)
+ v.copyOf(v0)
v0.AuxInt = off
v0.Aux = sym
- v0.AddArg(ptr)
- v0.AddArg(idx)
- v0.AddArg(mem)
+ v0.AddArg3(ptr, idx, mem)
return true
}
// match: (MOVLQZX x:(MOVLloadidx4 [off] {sym} ptr idx mem))
@@ -12889,13 +12173,10 @@ func rewriteValueAMD64_OpAMD64MOVLQZX(v *Value) bool {
}
b = x.Block
v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx4, v.Type)
- v.reset(OpCopy)
- v.AddArg(v0)
+ v.copyOf(v0)
v0.AuxInt = off
v0.Aux = sym
- v0.AddArg(ptr)
- v0.AddArg(idx)
- v0.AddArg(mem)
+ v0.AddArg3(ptr, idx, mem)
return true
}
// match: (MOVLQZX (ANDLconst [c] x))
@@ -12967,8 +12248,7 @@ func rewriteValueAMD64_OpAMD64MOVLatomicload(v *Value) bool {
v.reset(OpAMD64MOVLatomicload)
v.AuxInt = off1 + off2
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(mem)
+ v.AddArg2(ptr, mem)
return true
}
// match: (MOVLatomicload [off1] {sym1} (LEAQ [off2] {sym2} ptr) mem)
@@ -12990,8 +12270,7 @@ func rewriteValueAMD64_OpAMD64MOVLatomicload(v *Value) bool {
v.reset(OpAMD64MOVLatomicload)
v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
- v.AddArg(ptr)
- v.AddArg(mem)
+ v.AddArg2(ptr, mem)
return true
}
return false
@@ -13015,8 +12294,7 @@ func rewriteValueAMD64_OpAMD64MOVLf2i(v *Value) bool {
}
b = b.Func.Entry
v0 := b.NewValue0(v.Pos, OpArg, t)
- v.reset(OpCopy)
- v.AddArg(v0)
+ v.copyOf(v0)
v0.AuxInt = off
v0.Aux = sym
return true
@@ -13042,8 +12320,7 @@ func rewriteValueAMD64_OpAMD64MOVLi2f(v *Value) bool {
}
b = b.Func.Entry
v0 := b.NewValue0(v.Pos, OpArg, t)
- v.reset(OpCopy)
- v.AddArg(v0)
+ v.copyOf(v0)
v0.AuxInt = off
v0.Aux = sym
return true
@@ -13067,9 +12344,8 @@ func rewriteValueAMD64_OpAMD64MOVLload(v *Value) bool {
}
off2 := v_1.AuxInt
sym2 := v_1.Aux
- _ = v_1.Args[2]
- ptr2 := v_1.Args[0]
x := v_1.Args[1]
+ ptr2 := v_1.Args[0]
if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
break
}
@@ -13095,8 +12371,7 @@ func rewriteValueAMD64_OpAMD64MOVLload(v *Value) bool {
v.reset(OpAMD64MOVLload)
v.AuxInt = off1 + off2
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(mem)
+ v.AddArg2(ptr, mem)
return true
}
// match: (MOVLload [off1] {sym1} (LEAQ [off2] {sym2} base) mem)
@@ -13118,8 +12393,7 @@ func rewriteValueAMD64_OpAMD64MOVLload(v *Value) bool {
v.reset(OpAMD64MOVLload)
v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
- v.AddArg(base)
- v.AddArg(mem)
+ v.AddArg2(base, mem)
return true
}
// match: (MOVLload [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) mem)
@@ -13142,9 +12416,7 @@ func rewriteValueAMD64_OpAMD64MOVLload(v *Value) bool {
v.reset(OpAMD64MOVLloadidx1)
v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
- v.AddArg(ptr)
- v.AddArg(idx)
- v.AddArg(mem)
+ v.AddArg3(ptr, idx, mem)
return true
}
// match: (MOVLload [off1] {sym1} (LEAQ4 [off2] {sym2} ptr idx) mem)
@@ -13167,9 +12439,7 @@ func rewriteValueAMD64_OpAMD64MOVLload(v *Value) bool {
v.reset(OpAMD64MOVLloadidx4)
v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
- v.AddArg(ptr)
- v.AddArg(idx)
- v.AddArg(mem)
+ v.AddArg3(ptr, idx, mem)
return true
}
// match: (MOVLload [off1] {sym1} (LEAQ8 [off2] {sym2} ptr idx) mem)
@@ -13192,9 +12462,7 @@ func rewriteValueAMD64_OpAMD64MOVLload(v *Value) bool {
v.reset(OpAMD64MOVLloadidx8)
v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
- v.AddArg(ptr)
- v.AddArg(idx)
- v.AddArg(mem)
+ v.AddArg3(ptr, idx, mem)
return true
}
// match: (MOVLload [off] {sym} (ADDQ ptr idx) mem)
@@ -13219,9 +12487,7 @@ func rewriteValueAMD64_OpAMD64MOVLload(v *Value) bool {
v.reset(OpAMD64MOVLloadidx1)
v.AuxInt = off
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(idx)
- v.AddArg(mem)
+ v.AddArg3(ptr, idx, mem)
return true
}
break
@@ -13245,8 +12511,7 @@ func rewriteValueAMD64_OpAMD64MOVLload(v *Value) bool {
v.reset(OpAMD64MOVLload)
v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
- v.AddArg(base)
- v.AddArg(mem)
+ v.AddArg2(base, mem)
return true
}
// match: (MOVLload [off1] {sym} (ADDLconst [off2] ptr) mem)
@@ -13267,8 +12532,7 @@ func rewriteValueAMD64_OpAMD64MOVLload(v *Value) bool {
v.reset(OpAMD64MOVLload)
v.AuxInt = off1 + off2
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(mem)
+ v.AddArg2(ptr, mem)
return true
}
// match: (MOVLload [off] {sym} ptr (MOVSSstore [off] {sym} ptr val _))
@@ -13280,11 +12544,10 @@ func rewriteValueAMD64_OpAMD64MOVLload(v *Value) bool {
if v_1.Op != OpAMD64MOVSSstore || v_1.AuxInt != off || v_1.Aux != sym {
break
}
- _ = v_1.Args[2]
+ val := v_1.Args[1]
if ptr != v_1.Args[0] {
break
}
- val := v_1.Args[1]
v.reset(OpAMD64MOVLf2i)
v.AddArg(val)
return true
@@ -13323,9 +12586,7 @@ func rewriteValueAMD64_OpAMD64MOVLloadidx1(v *Value) bool {
v.reset(OpAMD64MOVLloadidx4)
v.AuxInt = c
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(idx)
- v.AddArg(mem)
+ v.AddArg3(ptr, idx, mem)
return true
}
break
@@ -13345,9 +12606,7 @@ func rewriteValueAMD64_OpAMD64MOVLloadidx1(v *Value) bool {
v.reset(OpAMD64MOVLloadidx8)
v.AuxInt = c
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(idx)
- v.AddArg(mem)
+ v.AddArg3(ptr, idx, mem)
return true
}
break
@@ -13372,9 +12631,7 @@ func rewriteValueAMD64_OpAMD64MOVLloadidx1(v *Value) bool {
v.reset(OpAMD64MOVLloadidx1)
v.AuxInt = c + d
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(idx)
- v.AddArg(mem)
+ v.AddArg3(ptr, idx, mem)
return true
}
break
@@ -13399,9 +12656,7 @@ func rewriteValueAMD64_OpAMD64MOVLloadidx1(v *Value) bool {
v.reset(OpAMD64MOVLloadidx1)
v.AuxInt = c + d
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(idx)
- v.AddArg(mem)
+ v.AddArg3(ptr, idx, mem)
return true
}
break
@@ -13425,8 +12680,7 @@ func rewriteValueAMD64_OpAMD64MOVLloadidx1(v *Value) bool {
v.reset(OpAMD64MOVLload)
v.AuxInt = i + c
v.Aux = s
- v.AddArg(p)
- v.AddArg(mem)
+ v.AddArg2(p, mem)
return true
}
break
@@ -13456,9 +12710,7 @@ func rewriteValueAMD64_OpAMD64MOVLloadidx4(v *Value) bool {
v.reset(OpAMD64MOVLloadidx4)
v.AuxInt = c + d
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(idx)
- v.AddArg(mem)
+ v.AddArg3(ptr, idx, mem)
return true
}
// match: (MOVLloadidx4 [c] {sym} ptr (ADDQconst [d] idx) mem)
@@ -13480,9 +12732,7 @@ func rewriteValueAMD64_OpAMD64MOVLloadidx4(v *Value) bool {
v.reset(OpAMD64MOVLloadidx4)
v.AuxInt = c + 4*d
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(idx)
- v.AddArg(mem)
+ v.AddArg3(ptr, idx, mem)
return true
}
// match: (MOVLloadidx4 [i] {s} p (MOVQconst [c]) mem)
@@ -13503,8 +12753,7 @@ func rewriteValueAMD64_OpAMD64MOVLloadidx4(v *Value) bool {
v.reset(OpAMD64MOVLload)
v.AuxInt = i + 4*c
v.Aux = s
- v.AddArg(p)
- v.AddArg(mem)
+ v.AddArg2(p, mem)
return true
}
return false
@@ -13532,9 +12781,7 @@ func rewriteValueAMD64_OpAMD64MOVLloadidx8(v *Value) bool {
v.reset(OpAMD64MOVLloadidx8)
v.AuxInt = c + d
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(idx)
- v.AddArg(mem)
+ v.AddArg3(ptr, idx, mem)
return true
}
// match: (MOVLloadidx8 [c] {sym} ptr (ADDQconst [d] idx) mem)
@@ -13556,9 +12803,7 @@ func rewriteValueAMD64_OpAMD64MOVLloadidx8(v *Value) bool {
v.reset(OpAMD64MOVLloadidx8)
v.AuxInt = c + 8*d
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(idx)
- v.AddArg(mem)
+ v.AddArg3(ptr, idx, mem)
return true
}
// match: (MOVLloadidx8 [i] {s} p (MOVQconst [c]) mem)
@@ -13579,8 +12824,7 @@ func rewriteValueAMD64_OpAMD64MOVLloadidx8(v *Value) bool {
v.reset(OpAMD64MOVLload)
v.AuxInt = i + 8*c
v.Aux = s
- v.AddArg(p)
- v.AddArg(mem)
+ v.AddArg2(p, mem)
return true
}
return false
@@ -13605,9 +12849,7 @@ func rewriteValueAMD64_OpAMD64MOVLstore(v *Value) bool {
v.reset(OpAMD64MOVLstore)
v.AuxInt = off
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(x)
- v.AddArg(mem)
+ v.AddArg3(ptr, x, mem)
return true
}
// match: (MOVLstore [off] {sym} ptr (MOVLQZX x) mem)
@@ -13624,9 +12866,7 @@ func rewriteValueAMD64_OpAMD64MOVLstore(v *Value) bool {
v.reset(OpAMD64MOVLstore)
v.AuxInt = off
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(x)
- v.AddArg(mem)
+ v.AddArg3(ptr, x, mem)
return true
}
// match: (MOVLstore [off1] {sym} (ADDQconst [off2] ptr) val mem)
@@ -13648,9 +12888,7 @@ func rewriteValueAMD64_OpAMD64MOVLstore(v *Value) bool {
v.reset(OpAMD64MOVLstore)
v.AuxInt = off1 + off2
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(val)
- v.AddArg(mem)
+ v.AddArg3(ptr, val, mem)
return true
}
// match: (MOVLstore [off] {sym} ptr (MOVLconst [c]) mem)
@@ -13671,8 +12909,7 @@ func rewriteValueAMD64_OpAMD64MOVLstore(v *Value) bool {
v.reset(OpAMD64MOVLstoreconst)
v.AuxInt = makeValAndOff(int64(int32(c)), off)
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(mem)
+ v.AddArg2(ptr, mem)
return true
}
// match: (MOVLstore [off] {sym} ptr (MOVQconst [c]) mem)
@@ -13693,8 +12930,7 @@ func rewriteValueAMD64_OpAMD64MOVLstore(v *Value) bool {
v.reset(OpAMD64MOVLstoreconst)
v.AuxInt = makeValAndOff(int64(int32(c)), off)
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(mem)
+ v.AddArg2(ptr, mem)
return true
}
// match: (MOVLstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
@@ -13717,9 +12953,7 @@ func rewriteValueAMD64_OpAMD64MOVLstore(v *Value) bool {
v.reset(OpAMD64MOVLstore)
v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
- v.AddArg(base)
- v.AddArg(val)
- v.AddArg(mem)
+ v.AddArg3(base, val, mem)
return true
}
// match: (MOVLstore [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) val mem)
@@ -13743,10 +12977,7 @@ func rewriteValueAMD64_OpAMD64MOVLstore(v *Value) bool {
v.reset(OpAMD64MOVLstoreidx1)
v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
- v.AddArg(ptr)
- v.AddArg(idx)
- v.AddArg(val)
- v.AddArg(mem)
+ v.AddArg4(ptr, idx, val, mem)
return true
}
// match: (MOVLstore [off1] {sym1} (LEAQ4 [off2] {sym2} ptr idx) val mem)
@@ -13770,10 +13001,7 @@ func rewriteValueAMD64_OpAMD64MOVLstore(v *Value) bool {
v.reset(OpAMD64MOVLstoreidx4)
v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
- v.AddArg(ptr)
- v.AddArg(idx)
- v.AddArg(val)
- v.AddArg(mem)
+ v.AddArg4(ptr, idx, val, mem)
return true
}
// match: (MOVLstore [off1] {sym1} (LEAQ8 [off2] {sym2} ptr idx) val mem)
@@ -13797,10 +13025,7 @@ func rewriteValueAMD64_OpAMD64MOVLstore(v *Value) bool {
v.reset(OpAMD64MOVLstoreidx8)
v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
- v.AddArg(ptr)
- v.AddArg(idx)
- v.AddArg(val)
- v.AddArg(mem)
+ v.AddArg4(ptr, idx, val, mem)
return true
}
// match: (MOVLstore [off] {sym} (ADDQ ptr idx) val mem)
@@ -13826,10 +13051,7 @@ func rewriteValueAMD64_OpAMD64MOVLstore(v *Value) bool {
v.reset(OpAMD64MOVLstoreidx1)
v.AuxInt = off
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(idx)
- v.AddArg(val)
- v.AddArg(mem)
+ v.AddArg4(ptr, idx, val, mem)
return true
}
break
@@ -13856,9 +13078,7 @@ func rewriteValueAMD64_OpAMD64MOVLstore(v *Value) bool {
v.reset(OpAMD64MOVQstore)
v.AuxInt = i - 4
v.Aux = s
- v.AddArg(p)
- v.AddArg(w)
- v.AddArg(mem)
+ v.AddArg3(p, w, mem)
return true
}
// match: (MOVLstore [i] {s} p (SHRQconst [j] w) x:(MOVLstore [i-4] {s} p w0:(SHRQconst [j-32] w) mem))
@@ -13888,9 +13108,7 @@ func rewriteValueAMD64_OpAMD64MOVLstore(v *Value) bool {
v.reset(OpAMD64MOVQstore)
v.AuxInt = i - 4
v.Aux = s
- v.AddArg(p)
- v.AddArg(w0)
- v.AddArg(mem)
+ v.AddArg3(p, w0, mem)
return true
}
// match: (MOVLstore [i] {s} p x1:(MOVLload [j] {s2} p2 mem) mem2:(MOVLstore [i-4] {s} p x2:(MOVLload [j-4] {s2} p2 mem) mem))
@@ -13927,14 +13145,11 @@ func rewriteValueAMD64_OpAMD64MOVLstore(v *Value) bool {
v.reset(OpAMD64MOVQstore)
v.AuxInt = i - 4
v.Aux = s
- v.AddArg(p)
v0 := b.NewValue0(x2.Pos, OpAMD64MOVQload, typ.UInt64)
v0.AuxInt = j - 4
v0.Aux = s2
- v0.AddArg(p2)
- v0.AddArg(mem)
- v.AddArg(v0)
- v.AddArg(mem)
+ v0.AddArg2(p2, mem)
+ v.AddArg3(p, v0, mem)
return true
}
// match: (MOVLstore [off1] {sym1} (LEAL [off2] {sym2} base) val mem)
@@ -13957,9 +13172,7 @@ func rewriteValueAMD64_OpAMD64MOVLstore(v *Value) bool {
v.reset(OpAMD64MOVLstore)
v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
- v.AddArg(base)
- v.AddArg(val)
- v.AddArg(mem)
+ v.AddArg3(base, val, mem)
return true
}
// match: (MOVLstore [off1] {sym} (ADDLconst [off2] ptr) val mem)
@@ -13981,9 +13194,7 @@ func rewriteValueAMD64_OpAMD64MOVLstore(v *Value) bool {
v.reset(OpAMD64MOVLstore)
v.AuxInt = off1 + off2
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(val)
- v.AddArg(mem)
+ v.AddArg3(ptr, val, mem)
return true
}
// match: (MOVLstore {sym} [off] ptr y:(ADDLload x [off] {sym} ptr mem) mem)
@@ -14005,9 +13216,7 @@ func rewriteValueAMD64_OpAMD64MOVLstore(v *Value) bool {
v.reset(OpAMD64ADDLmodify)
v.AuxInt = off
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(x)
- v.AddArg(mem)
+ v.AddArg3(ptr, x, mem)
return true
}
// match: (MOVLstore {sym} [off] ptr y:(ANDLload x [off] {sym} ptr mem) mem)
@@ -14029,9 +13238,7 @@ func rewriteValueAMD64_OpAMD64MOVLstore(v *Value) bool {
v.reset(OpAMD64ANDLmodify)
v.AuxInt = off
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(x)
- v.AddArg(mem)
+ v.AddArg3(ptr, x, mem)
return true
}
// match: (MOVLstore {sym} [off] ptr y:(ORLload x [off] {sym} ptr mem) mem)
@@ -14053,9 +13260,7 @@ func rewriteValueAMD64_OpAMD64MOVLstore(v *Value) bool {
v.reset(OpAMD64ORLmodify)
v.AuxInt = off
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(x)
- v.AddArg(mem)
+ v.AddArg3(ptr, x, mem)
return true
}
// match: (MOVLstore {sym} [off] ptr y:(XORLload x [off] {sym} ptr mem) mem)
@@ -14077,9 +13282,7 @@ func rewriteValueAMD64_OpAMD64MOVLstore(v *Value) bool {
v.reset(OpAMD64XORLmodify)
v.AuxInt = off
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(x)
- v.AddArg(mem)
+ v.AddArg3(ptr, x, mem)
return true
}
// match: (MOVLstore {sym} [off] ptr y:(ADDL l:(MOVLload [off] {sym} ptr mem) x) mem)
@@ -14112,9 +13315,7 @@ func rewriteValueAMD64_OpAMD64MOVLstore(v *Value) bool {
v.reset(OpAMD64ADDLmodify)
v.AuxInt = off
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(x)
- v.AddArg(mem)
+ v.AddArg3(ptr, x, mem)
return true
}
break
@@ -14142,9 +13343,7 @@ func rewriteValueAMD64_OpAMD64MOVLstore(v *Value) bool {
v.reset(OpAMD64SUBLmodify)
v.AuxInt = off
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(x)
- v.AddArg(mem)
+ v.AddArg3(ptr, x, mem)
return true
}
// match: (MOVLstore {sym} [off] ptr y:(ANDL l:(MOVLload [off] {sym} ptr mem) x) mem)
@@ -14177,9 +13376,7 @@ func rewriteValueAMD64_OpAMD64MOVLstore(v *Value) bool {
v.reset(OpAMD64ANDLmodify)
v.AuxInt = off
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(x)
- v.AddArg(mem)
+ v.AddArg3(ptr, x, mem)
return true
}
break
@@ -14214,9 +13411,7 @@ func rewriteValueAMD64_OpAMD64MOVLstore(v *Value) bool {
v.reset(OpAMD64ORLmodify)
v.AuxInt = off
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(x)
- v.AddArg(mem)
+ v.AddArg3(ptr, x, mem)
return true
}
break
@@ -14251,9 +13446,7 @@ func rewriteValueAMD64_OpAMD64MOVLstore(v *Value) bool {
v.reset(OpAMD64XORLmodify)
v.AuxInt = off
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(x)
- v.AddArg(mem)
+ v.AddArg3(ptr, x, mem)
return true
}
break
@@ -14281,9 +13474,7 @@ func rewriteValueAMD64_OpAMD64MOVLstore(v *Value) bool {
v.reset(OpAMD64BTCLmodify)
v.AuxInt = off
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(x)
- v.AddArg(mem)
+ v.AddArg3(ptr, x, mem)
return true
}
// match: (MOVLstore {sym} [off] ptr y:(BTRL l:(MOVLload [off] {sym} ptr mem) x) mem)
@@ -14309,9 +13500,7 @@ func rewriteValueAMD64_OpAMD64MOVLstore(v *Value) bool {
v.reset(OpAMD64BTRLmodify)
v.AuxInt = off
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(x)
- v.AddArg(mem)
+ v.AddArg3(ptr, x, mem)
return true
}
// match: (MOVLstore {sym} [off] ptr y:(BTSL l:(MOVLload [off] {sym} ptr mem) x) mem)
@@ -14337,9 +13526,7 @@ func rewriteValueAMD64_OpAMD64MOVLstore(v *Value) bool {
v.reset(OpAMD64BTSLmodify)
v.AuxInt = off
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(x)
- v.AddArg(mem)
+ v.AddArg3(ptr, x, mem)
return true
}
// match: (MOVLstore [off] {sym} ptr a:(ADDLconst [c] l:(MOVLload [off] {sym} ptr2 mem)) mem)
@@ -14366,8 +13553,7 @@ func rewriteValueAMD64_OpAMD64MOVLstore(v *Value) bool {
v.reset(OpAMD64ADDLconstmodify)
v.AuxInt = makeValAndOff(c, off)
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(mem)
+ v.AddArg2(ptr, mem)
return true
}
// match: (MOVLstore [off] {sym} ptr a:(ANDLconst [c] l:(MOVLload [off] {sym} ptr2 mem)) mem)
@@ -14394,8 +13580,7 @@ func rewriteValueAMD64_OpAMD64MOVLstore(v *Value) bool {
v.reset(OpAMD64ANDLconstmodify)
v.AuxInt = makeValAndOff(c, off)
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(mem)
+ v.AddArg2(ptr, mem)
return true
}
// match: (MOVLstore [off] {sym} ptr a:(ORLconst [c] l:(MOVLload [off] {sym} ptr2 mem)) mem)
@@ -14422,8 +13607,7 @@ func rewriteValueAMD64_OpAMD64MOVLstore(v *Value) bool {
v.reset(OpAMD64ORLconstmodify)
v.AuxInt = makeValAndOff(c, off)
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(mem)
+ v.AddArg2(ptr, mem)
return true
}
// match: (MOVLstore [off] {sym} ptr a:(XORLconst [c] l:(MOVLload [off] {sym} ptr2 mem)) mem)
@@ -14450,8 +13634,7 @@ func rewriteValueAMD64_OpAMD64MOVLstore(v *Value) bool {
v.reset(OpAMD64XORLconstmodify)
v.AuxInt = makeValAndOff(c, off)
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(mem)
+ v.AddArg2(ptr, mem)
return true
}
// match: (MOVLstore [off] {sym} ptr a:(BTCLconst [c] l:(MOVLload [off] {sym} ptr2 mem)) mem)
@@ -14478,8 +13661,7 @@ func rewriteValueAMD64_OpAMD64MOVLstore(v *Value) bool {
v.reset(OpAMD64BTCLconstmodify)
v.AuxInt = makeValAndOff(c, off)
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(mem)
+ v.AddArg2(ptr, mem)
return true
}
// match: (MOVLstore [off] {sym} ptr a:(BTRLconst [c] l:(MOVLload [off] {sym} ptr2 mem)) mem)
@@ -14506,8 +13688,7 @@ func rewriteValueAMD64_OpAMD64MOVLstore(v *Value) bool {
v.reset(OpAMD64BTRLconstmodify)
v.AuxInt = makeValAndOff(c, off)
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(mem)
+ v.AddArg2(ptr, mem)
return true
}
// match: (MOVLstore [off] {sym} ptr a:(BTSLconst [c] l:(MOVLload [off] {sym} ptr2 mem)) mem)
@@ -14534,8 +13715,7 @@ func rewriteValueAMD64_OpAMD64MOVLstore(v *Value) bool {
v.reset(OpAMD64BTSLconstmodify)
v.AuxInt = makeValAndOff(c, off)
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(mem)
+ v.AddArg2(ptr, mem)
return true
}
// match: (MOVLstore [off] {sym} ptr (MOVLf2i val) mem)
@@ -14552,9 +13732,7 @@ func rewriteValueAMD64_OpAMD64MOVLstore(v *Value) bool {
v.reset(OpAMD64MOVSSstore)
v.AuxInt = off
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(val)
- v.AddArg(mem)
+ v.AddArg3(ptr, val, mem)
return true
}
return false
@@ -14582,8 +13760,7 @@ func rewriteValueAMD64_OpAMD64MOVLstoreconst(v *Value) bool {
v.reset(OpAMD64MOVLstoreconst)
v.AuxInt = ValAndOff(sc).add(off)
v.Aux = s
- v.AddArg(ptr)
- v.AddArg(mem)
+ v.AddArg2(ptr, mem)
return true
}
// match: (MOVLstoreconst [sc] {sym1} (LEAQ [off] {sym2} ptr) mem)
@@ -14605,8 +13782,7 @@ func rewriteValueAMD64_OpAMD64MOVLstoreconst(v *Value) bool {
v.reset(OpAMD64MOVLstoreconst)
v.AuxInt = ValAndOff(sc).add(off)
v.Aux = mergeSym(sym1, sym2)
- v.AddArg(ptr)
- v.AddArg(mem)
+ v.AddArg2(ptr, mem)
return true
}
// match: (MOVLstoreconst [x] {sym1} (LEAQ1 [off] {sym2} ptr idx) mem)
@@ -14629,9 +13805,7 @@ func rewriteValueAMD64_OpAMD64MOVLstoreconst(v *Value) bool {
v.reset(OpAMD64MOVLstoreconstidx1)
v.AuxInt = ValAndOff(x).add(off)
v.Aux = mergeSym(sym1, sym2)
- v.AddArg(ptr)
- v.AddArg(idx)
- v.AddArg(mem)
+ v.AddArg3(ptr, idx, mem)
return true
}
// match: (MOVLstoreconst [x] {sym1} (LEAQ4 [off] {sym2} ptr idx) mem)
@@ -14654,9 +13828,7 @@ func rewriteValueAMD64_OpAMD64MOVLstoreconst(v *Value) bool {
v.reset(OpAMD64MOVLstoreconstidx4)
v.AuxInt = ValAndOff(x).add(off)
v.Aux = mergeSym(sym1, sym2)
- v.AddArg(ptr)
- v.AddArg(idx)
- v.AddArg(mem)
+ v.AddArg3(ptr, idx, mem)
return true
}
// match: (MOVLstoreconst [x] {sym} (ADDQ ptr idx) mem)
@@ -14673,9 +13845,7 @@ func rewriteValueAMD64_OpAMD64MOVLstoreconst(v *Value) bool {
v.reset(OpAMD64MOVLstoreconstidx1)
v.AuxInt = x
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(idx)
- v.AddArg(mem)
+ v.AddArg3(ptr, idx, mem)
return true
}
// match: (MOVLstoreconst [c] {s} p x:(MOVLstoreconst [a] {s} p mem))
@@ -14700,11 +13870,9 @@ func rewriteValueAMD64_OpAMD64MOVLstoreconst(v *Value) bool {
v.reset(OpAMD64MOVQstore)
v.AuxInt = ValAndOff(a).Off()
v.Aux = s
- v.AddArg(p)
v0 := b.NewValue0(x.Pos, OpAMD64MOVQconst, typ.UInt64)
v0.AuxInt = ValAndOff(a).Val()&0xffffffff | ValAndOff(c).Val()<<32
- v.AddArg(v0)
- v.AddArg(mem)
+ v.AddArg3(p, v0, mem)
return true
}
// match: (MOVLstoreconst [a] {s} p x:(MOVLstoreconst [c] {s} p mem))
@@ -14729,11 +13897,9 @@ func rewriteValueAMD64_OpAMD64MOVLstoreconst(v *Value) bool {
v.reset(OpAMD64MOVQstore)
v.AuxInt = ValAndOff(a).Off()
v.Aux = s
- v.AddArg(p)
v0 := b.NewValue0(x.Pos, OpAMD64MOVQconst, typ.UInt64)
v0.AuxInt = ValAndOff(a).Val()&0xffffffff | ValAndOff(c).Val()<<32
- v.AddArg(v0)
- v.AddArg(mem)
+ v.AddArg3(p, v0, mem)
return true
}
// match: (MOVLstoreconst [sc] {sym1} (LEAL [off] {sym2} ptr) mem)
@@ -14755,8 +13921,7 @@ func rewriteValueAMD64_OpAMD64MOVLstoreconst(v *Value) bool {
v.reset(OpAMD64MOVLstoreconst)
v.AuxInt = ValAndOff(sc).add(off)
v.Aux = mergeSym(sym1, sym2)
- v.AddArg(ptr)
- v.AddArg(mem)
+ v.AddArg2(ptr, mem)
return true
}
// match: (MOVLstoreconst [sc] {s} (ADDLconst [off] ptr) mem)
@@ -14777,8 +13942,7 @@ func rewriteValueAMD64_OpAMD64MOVLstoreconst(v *Value) bool {
v.reset(OpAMD64MOVLstoreconst)
v.AuxInt = ValAndOff(sc).add(off)
v.Aux = s
- v.AddArg(ptr)
- v.AddArg(mem)
+ v.AddArg2(ptr, mem)
return true
}
return false
@@ -14804,9 +13968,7 @@ func rewriteValueAMD64_OpAMD64MOVLstoreconstidx1(v *Value) bool {
v.reset(OpAMD64MOVLstoreconstidx4)
v.AuxInt = c
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(idx)
- v.AddArg(mem)
+ v.AddArg3(ptr, idx, mem)
return true
}
break
@@ -14831,9 +13993,7 @@ func rewriteValueAMD64_OpAMD64MOVLstoreconstidx1(v *Value) bool {
v.reset(OpAMD64MOVLstoreconstidx1)
v.AuxInt = ValAndOff(x).add(c)
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(idx)
- v.AddArg(mem)
+ v.AddArg3(ptr, idx, mem)
return true
}
break
@@ -14858,9 +14018,7 @@ func rewriteValueAMD64_OpAMD64MOVLstoreconstidx1(v *Value) bool {
v.reset(OpAMD64MOVLstoreconstidx1)
v.AuxInt = ValAndOff(x).add(c)
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(idx)
- v.AddArg(mem)
+ v.AddArg3(ptr, idx, mem)
return true
}
break
@@ -14892,12 +14050,9 @@ func rewriteValueAMD64_OpAMD64MOVLstoreconstidx1(v *Value) bool {
v.reset(OpAMD64MOVQstoreidx1)
v.AuxInt = ValAndOff(a).Off()
v.Aux = s
- v.AddArg(p)
- v.AddArg(i)
v0 := b.NewValue0(v.Pos, OpAMD64MOVQconst, typ.UInt64)
v0.AuxInt = ValAndOff(a).Val()&0xffffffff | ValAndOff(c).Val()<<32
- v.AddArg(v0)
- v.AddArg(mem)
+ v.AddArg4(p, i, v0, mem)
return true
}
}
@@ -14930,9 +14085,7 @@ func rewriteValueAMD64_OpAMD64MOVLstoreconstidx4(v *Value) bool {
v.reset(OpAMD64MOVLstoreconstidx4)
v.AuxInt = ValAndOff(x).add(c)
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(idx)
- v.AddArg(mem)
+ v.AddArg3(ptr, idx, mem)
return true
}
// match: (MOVLstoreconstidx4 [x] {sym} ptr (ADDQconst [c] idx) mem)
@@ -14954,9 +14107,7 @@ func rewriteValueAMD64_OpAMD64MOVLstoreconstidx4(v *Value) bool {
v.reset(OpAMD64MOVLstoreconstidx4)
v.AuxInt = ValAndOff(x).add(4 * c)
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(idx)
- v.AddArg(mem)
+ v.AddArg3(ptr, idx, mem)
return true
}
// match: (MOVLstoreconstidx4 [c] {s} p i x:(MOVLstoreconstidx4 [a] {s} p i mem))
@@ -14982,15 +14133,12 @@ func rewriteValueAMD64_OpAMD64MOVLstoreconstidx4(v *Value) bool {
v.reset(OpAMD64MOVQstoreidx1)
v.AuxInt = ValAndOff(a).Off()
v.Aux = s
- v.AddArg(p)
v0 := b.NewValue0(v.Pos, OpAMD64SHLQconst, i.Type)
v0.AuxInt = 2
v0.AddArg(i)
- v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpAMD64MOVQconst, typ.UInt64)
v1.AuxInt = ValAndOff(a).Val()&0xffffffff | ValAndOff(c).Val()<<32
- v.AddArg(v1)
- v.AddArg(mem)
+ v.AddArg4(p, v0, v1, mem)
return true
}
return false
@@ -15016,10 +14164,7 @@ func rewriteValueAMD64_OpAMD64MOVLstoreidx1(v *Value) bool {
v.reset(OpAMD64MOVLstoreidx4)
v.AuxInt = c
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(idx)
- v.AddArg(val)
- v.AddArg(mem)
+ v.AddArg4(ptr, idx, val, mem)
return true
}
break
@@ -15040,10 +14185,7 @@ func rewriteValueAMD64_OpAMD64MOVLstoreidx1(v *Value) bool {
v.reset(OpAMD64MOVLstoreidx8)
v.AuxInt = c
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(idx)
- v.AddArg(val)
- v.AddArg(mem)
+ v.AddArg4(ptr, idx, val, mem)
return true
}
break
@@ -15069,10 +14211,7 @@ func rewriteValueAMD64_OpAMD64MOVLstoreidx1(v *Value) bool {
v.reset(OpAMD64MOVLstoreidx1)
v.AuxInt = c + d
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(idx)
- v.AddArg(val)
- v.AddArg(mem)
+ v.AddArg4(ptr, idx, val, mem)
return true
}
break
@@ -15098,10 +14237,7 @@ func rewriteValueAMD64_OpAMD64MOVLstoreidx1(v *Value) bool {
v.reset(OpAMD64MOVLstoreidx1)
v.AuxInt = c + d
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(idx)
- v.AddArg(val)
- v.AddArg(mem)
+ v.AddArg4(ptr, idx, val, mem)
return true
}
break
@@ -15133,10 +14269,7 @@ func rewriteValueAMD64_OpAMD64MOVLstoreidx1(v *Value) bool {
v.reset(OpAMD64MOVQstoreidx1)
v.AuxInt = i - 4
v.Aux = s
- v.AddArg(p)
- v.AddArg(idx)
- v.AddArg(w)
- v.AddArg(mem)
+ v.AddArg4(p, idx, w, mem)
return true
}
}
@@ -15174,10 +14307,7 @@ func rewriteValueAMD64_OpAMD64MOVLstoreidx1(v *Value) bool {
v.reset(OpAMD64MOVQstoreidx1)
v.AuxInt = i - 4
v.Aux = s
- v.AddArg(p)
- v.AddArg(idx)
- v.AddArg(w0)
- v.AddArg(mem)
+ v.AddArg4(p, idx, w0, mem)
return true
}
}
@@ -15203,9 +14333,7 @@ func rewriteValueAMD64_OpAMD64MOVLstoreidx1(v *Value) bool {
v.reset(OpAMD64MOVLstore)
v.AuxInt = i + c
v.Aux = s
- v.AddArg(p)
- v.AddArg(w)
- v.AddArg(mem)
+ v.AddArg3(p, w, mem)
return true
}
break
@@ -15229,9 +14357,7 @@ func rewriteValueAMD64_OpAMD64MOVLstoreidx1(v *Value) bool {
v.reset(OpAMD64MOVLstoreconstidx1)
v.AuxInt = makeValAndOff(int64(int32(c)), off)
v.Aux = s
- v.AddArg(ptr)
- v.AddArg(idx)
- v.AddArg(mem)
+ v.AddArg3(ptr, idx, mem)
return true
}
return false
@@ -15262,10 +14388,7 @@ func rewriteValueAMD64_OpAMD64MOVLstoreidx4(v *Value) bool {
v.reset(OpAMD64MOVLstoreidx4)
v.AuxInt = c + d
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(idx)
- v.AddArg(val)
- v.AddArg(mem)
+ v.AddArg4(ptr, idx, val, mem)
return true
}
// match: (MOVLstoreidx4 [c] {sym} ptr (ADDQconst [d] idx) val mem)
@@ -15288,10 +14411,7 @@ func rewriteValueAMD64_OpAMD64MOVLstoreidx4(v *Value) bool {
v.reset(OpAMD64MOVLstoreidx4)
v.AuxInt = c + 4*d
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(idx)
- v.AddArg(val)
- v.AddArg(mem)
+ v.AddArg4(ptr, idx, val, mem)
return true
}
// match: (MOVLstoreidx4 [i] {s} p idx (SHRQconst [32] w) x:(MOVLstoreidx4 [i-4] {s} p idx w mem))
@@ -15317,13 +14437,10 @@ func rewriteValueAMD64_OpAMD64MOVLstoreidx4(v *Value) bool {
v.reset(OpAMD64MOVQstoreidx1)
v.AuxInt = i - 4
v.Aux = s
- v.AddArg(p)
v0 := b.NewValue0(v.Pos, OpAMD64SHLQconst, idx.Type)
v0.AuxInt = 2
v0.AddArg(idx)
- v.AddArg(v0)
- v.AddArg(w)
- v.AddArg(mem)
+ v.AddArg4(p, v0, w, mem)
return true
}
// match: (MOVLstoreidx4 [i] {s} p idx (SHRQconst [j] w) x:(MOVLstoreidx4 [i-4] {s} p idx w0:(SHRQconst [j-32] w) mem))
@@ -15354,13 +14471,10 @@ func rewriteValueAMD64_OpAMD64MOVLstoreidx4(v *Value) bool {
v.reset(OpAMD64MOVQstoreidx1)
v.AuxInt = i - 4
v.Aux = s
- v.AddArg(p)
v0 := b.NewValue0(v.Pos, OpAMD64SHLQconst, idx.Type)
v0.AuxInt = 2
v0.AddArg(idx)
- v.AddArg(v0)
- v.AddArg(w0)
- v.AddArg(mem)
+ v.AddArg4(p, v0, w0, mem)
return true
}
// match: (MOVLstoreidx4 [i] {s} p (MOVQconst [c]) w mem)
@@ -15382,9 +14496,7 @@ func rewriteValueAMD64_OpAMD64MOVLstoreidx4(v *Value) bool {
v.reset(OpAMD64MOVLstore)
v.AuxInt = i + 4*c
v.Aux = s
- v.AddArg(p)
- v.AddArg(w)
- v.AddArg(mem)
+ v.AddArg3(p, w, mem)
return true
}
// match: (MOVLstoreidx4 [off] {s} ptr idx (MOVQconst [c]) mem)
@@ -15406,9 +14518,7 @@ func rewriteValueAMD64_OpAMD64MOVLstoreidx4(v *Value) bool {
v.reset(OpAMD64MOVLstoreconstidx4)
v.AuxInt = makeValAndOff(int64(int32(c)), off)
v.Aux = s
- v.AddArg(ptr)
- v.AddArg(idx)
- v.AddArg(mem)
+ v.AddArg3(ptr, idx, mem)
return true
}
return false
@@ -15438,10 +14548,7 @@ func rewriteValueAMD64_OpAMD64MOVLstoreidx8(v *Value) bool {
v.reset(OpAMD64MOVLstoreidx8)
v.AuxInt = c + d
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(idx)
- v.AddArg(val)
- v.AddArg(mem)
+ v.AddArg4(ptr, idx, val, mem)
return true
}
// match: (MOVLstoreidx8 [c] {sym} ptr (ADDQconst [d] idx) val mem)
@@ -15464,10 +14571,7 @@ func rewriteValueAMD64_OpAMD64MOVLstoreidx8(v *Value) bool {
v.reset(OpAMD64MOVLstoreidx8)
v.AuxInt = c + 8*d
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(idx)
- v.AddArg(val)
- v.AddArg(mem)
+ v.AddArg4(ptr, idx, val, mem)
return true
}
// match: (MOVLstoreidx8 [i] {s} p (MOVQconst [c]) w mem)
@@ -15489,9 +14593,7 @@ func rewriteValueAMD64_OpAMD64MOVLstoreidx8(v *Value) bool {
v.reset(OpAMD64MOVLstore)
v.AuxInt = i + 8*c
v.Aux = s
- v.AddArg(p)
- v.AddArg(w)
- v.AddArg(mem)
+ v.AddArg3(p, w, mem)
return true
}
return false
@@ -15517,8 +14619,7 @@ func rewriteValueAMD64_OpAMD64MOVOload(v *Value) bool {
v.reset(OpAMD64MOVOload)
v.AuxInt = off1 + off2
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(mem)
+ v.AddArg2(ptr, mem)
return true
}
// match: (MOVOload [off1] {sym1} (LEAQ [off2] {sym2} base) mem)
@@ -15540,8 +14641,7 @@ func rewriteValueAMD64_OpAMD64MOVOload(v *Value) bool {
v.reset(OpAMD64MOVOload)
v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
- v.AddArg(base)
- v.AddArg(mem)
+ v.AddArg2(base, mem)
return true
}
return false
@@ -15572,9 +14672,7 @@ func rewriteValueAMD64_OpAMD64MOVOstore(v *Value) bool {
v.reset(OpAMD64MOVOstore)
v.AuxInt = off1 + off2
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(val)
- v.AddArg(mem)
+ v.AddArg3(ptr, val, mem)
return true
}
// match: (MOVOstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
@@ -15597,9 +14695,7 @@ func rewriteValueAMD64_OpAMD64MOVOstore(v *Value) bool {
v.reset(OpAMD64MOVOstore)
v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
- v.AddArg(base)
- v.AddArg(val)
- v.AddArg(mem)
+ v.AddArg3(base, val, mem)
return true
}
// match: (MOVOstore [dstOff] {dstSym} ptr (MOVOload [srcOff] {srcSym} (SB) _) mem)
@@ -15614,7 +14710,6 @@ func rewriteValueAMD64_OpAMD64MOVOstore(v *Value) bool {
}
srcOff := v_1.AuxInt
srcSym := v_1.Aux
- _ = v_1.Args[1]
v_1_0 := v_1.Args[0]
if v_1_0.Op != OpSB {
break
@@ -15626,19 +14721,15 @@ func rewriteValueAMD64_OpAMD64MOVOstore(v *Value) bool {
v.reset(OpAMD64MOVQstore)
v.AuxInt = dstOff + 8
v.Aux = dstSym
- v.AddArg(ptr)
v0 := b.NewValue0(v_1.Pos, OpAMD64MOVQconst, typ.UInt64)
v0.AuxInt = int64(read64(srcSym, srcOff+8, config.ctxt.Arch.ByteOrder))
- v.AddArg(v0)
v1 := b.NewValue0(v_1.Pos, OpAMD64MOVQstore, types.TypeMem)
v1.AuxInt = dstOff
v1.Aux = dstSym
- v1.AddArg(ptr)
v2 := b.NewValue0(v_1.Pos, OpAMD64MOVQconst, typ.UInt64)
v2.AuxInt = int64(read64(srcSym, srcOff, config.ctxt.Arch.ByteOrder))
- v1.AddArg(v2)
- v1.AddArg(mem)
- v.AddArg(v1)
+ v1.AddArg3(ptr, v2, mem)
+ v.AddArg3(ptr, v0, v1)
return true
}
return false
@@ -15664,8 +14755,7 @@ func rewriteValueAMD64_OpAMD64MOVQatomicload(v *Value) bool {
v.reset(OpAMD64MOVQatomicload)
v.AuxInt = off1 + off2
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(mem)
+ v.AddArg2(ptr, mem)
return true
}
// match: (MOVQatomicload [off1] {sym1} (LEAQ [off2] {sym2} ptr) mem)
@@ -15687,8 +14777,7 @@ func rewriteValueAMD64_OpAMD64MOVQatomicload(v *Value) bool {
v.reset(OpAMD64MOVQatomicload)
v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
- v.AddArg(ptr)
- v.AddArg(mem)
+ v.AddArg2(ptr, mem)
return true
}
return false
@@ -15712,8 +14801,7 @@ func rewriteValueAMD64_OpAMD64MOVQf2i(v *Value) bool {
}
b = b.Func.Entry
v0 := b.NewValue0(v.Pos, OpArg, t)
- v.reset(OpCopy)
- v.AddArg(v0)
+ v.copyOf(v0)
v0.AuxInt = off
v0.Aux = sym
return true
@@ -15739,8 +14827,7 @@ func rewriteValueAMD64_OpAMD64MOVQi2f(v *Value) bool {
}
b = b.Func.Entry
v0 := b.NewValue0(v.Pos, OpArg, t)
- v.reset(OpCopy)
- v.AddArg(v0)
+ v.copyOf(v0)
v0.AuxInt = off
v0.Aux = sym
return true
@@ -15764,15 +14851,12 @@ func rewriteValueAMD64_OpAMD64MOVQload(v *Value) bool {
}
off2 := v_1.AuxInt
sym2 := v_1.Aux
- _ = v_1.Args[2]
- ptr2 := v_1.Args[0]
x := v_1.Args[1]
+ ptr2 := v_1.Args[0]
if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
break
}
- v.reset(OpCopy)
- v.Type = x.Type
- v.AddArg(x)
+ v.copyOf(x)
return true
}
// match: (MOVQload [off1] {sym} (ADDQconst [off2] ptr) mem)
@@ -15793,8 +14877,7 @@ func rewriteValueAMD64_OpAMD64MOVQload(v *Value) bool {
v.reset(OpAMD64MOVQload)
v.AuxInt = off1 + off2
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(mem)
+ v.AddArg2(ptr, mem)
return true
}
// match: (MOVQload [off1] {sym1} (LEAQ [off2] {sym2} base) mem)
@@ -15816,8 +14899,7 @@ func rewriteValueAMD64_OpAMD64MOVQload(v *Value) bool {
v.reset(OpAMD64MOVQload)
v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
- v.AddArg(base)
- v.AddArg(mem)
+ v.AddArg2(base, mem)
return true
}
// match: (MOVQload [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) mem)
@@ -15840,9 +14922,7 @@ func rewriteValueAMD64_OpAMD64MOVQload(v *Value) bool {
v.reset(OpAMD64MOVQloadidx1)
v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
- v.AddArg(ptr)
- v.AddArg(idx)
- v.AddArg(mem)
+ v.AddArg3(ptr, idx, mem)
return true
}
// match: (MOVQload [off1] {sym1} (LEAQ8 [off2] {sym2} ptr idx) mem)
@@ -15865,9 +14945,7 @@ func rewriteValueAMD64_OpAMD64MOVQload(v *Value) bool {
v.reset(OpAMD64MOVQloadidx8)
v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
- v.AddArg(ptr)
- v.AddArg(idx)
- v.AddArg(mem)
+ v.AddArg3(ptr, idx, mem)
return true
}
// match: (MOVQload [off] {sym} (ADDQ ptr idx) mem)
@@ -15892,9 +14970,7 @@ func rewriteValueAMD64_OpAMD64MOVQload(v *Value) bool {
v.reset(OpAMD64MOVQloadidx1)
v.AuxInt = off
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(idx)
- v.AddArg(mem)
+ v.AddArg3(ptr, idx, mem)
return true
}
break
@@ -15918,8 +14994,7 @@ func rewriteValueAMD64_OpAMD64MOVQload(v *Value) bool {
v.reset(OpAMD64MOVQload)
v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
- v.AddArg(base)
- v.AddArg(mem)
+ v.AddArg2(base, mem)
return true
}
// match: (MOVQload [off1] {sym} (ADDLconst [off2] ptr) mem)
@@ -15940,8 +15015,7 @@ func rewriteValueAMD64_OpAMD64MOVQload(v *Value) bool {
v.reset(OpAMD64MOVQload)
v.AuxInt = off1 + off2
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(mem)
+ v.AddArg2(ptr, mem)
return true
}
// match: (MOVQload [off] {sym} ptr (MOVSDstore [off] {sym} ptr val _))
@@ -15953,11 +15027,10 @@ func rewriteValueAMD64_OpAMD64MOVQload(v *Value) bool {
if v_1.Op != OpAMD64MOVSDstore || v_1.AuxInt != off || v_1.Aux != sym {
break
}
- _ = v_1.Args[2]
+ val := v_1.Args[1]
if ptr != v_1.Args[0] {
break
}
- val := v_1.Args[1]
v.reset(OpAMD64MOVQf2i)
v.AddArg(val)
return true
@@ -15996,9 +15069,7 @@ func rewriteValueAMD64_OpAMD64MOVQloadidx1(v *Value) bool {
v.reset(OpAMD64MOVQloadidx8)
v.AuxInt = c
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(idx)
- v.AddArg(mem)
+ v.AddArg3(ptr, idx, mem)
return true
}
break
@@ -16023,9 +15094,7 @@ func rewriteValueAMD64_OpAMD64MOVQloadidx1(v *Value) bool {
v.reset(OpAMD64MOVQloadidx1)
v.AuxInt = c + d
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(idx)
- v.AddArg(mem)
+ v.AddArg3(ptr, idx, mem)
return true
}
break
@@ -16050,9 +15119,7 @@ func rewriteValueAMD64_OpAMD64MOVQloadidx1(v *Value) bool {
v.reset(OpAMD64MOVQloadidx1)
v.AuxInt = c + d
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(idx)
- v.AddArg(mem)
+ v.AddArg3(ptr, idx, mem)
return true
}
break
@@ -16076,8 +15143,7 @@ func rewriteValueAMD64_OpAMD64MOVQloadidx1(v *Value) bool {
v.reset(OpAMD64MOVQload)
v.AuxInt = i + c
v.Aux = s
- v.AddArg(p)
- v.AddArg(mem)
+ v.AddArg2(p, mem)
return true
}
break
@@ -16107,9 +15173,7 @@ func rewriteValueAMD64_OpAMD64MOVQloadidx8(v *Value) bool {
v.reset(OpAMD64MOVQloadidx8)
v.AuxInt = c + d
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(idx)
- v.AddArg(mem)
+ v.AddArg3(ptr, idx, mem)
return true
}
// match: (MOVQloadidx8 [c] {sym} ptr (ADDQconst [d] idx) mem)
@@ -16131,9 +15195,7 @@ func rewriteValueAMD64_OpAMD64MOVQloadidx8(v *Value) bool {
v.reset(OpAMD64MOVQloadidx8)
v.AuxInt = c + 8*d
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(idx)
- v.AddArg(mem)
+ v.AddArg3(ptr, idx, mem)
return true
}
// match: (MOVQloadidx8 [i] {s} p (MOVQconst [c]) mem)
@@ -16154,8 +15216,7 @@ func rewriteValueAMD64_OpAMD64MOVQloadidx8(v *Value) bool {
v.reset(OpAMD64MOVQload)
v.AuxInt = i + 8*c
v.Aux = s
- v.AddArg(p)
- v.AddArg(mem)
+ v.AddArg2(p, mem)
return true
}
return false
@@ -16183,9 +15244,7 @@ func rewriteValueAMD64_OpAMD64MOVQstore(v *Value) bool {
v.reset(OpAMD64MOVQstore)
v.AuxInt = off1 + off2
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(val)
- v.AddArg(mem)
+ v.AddArg3(ptr, val, mem)
return true
}
// match: (MOVQstore [off] {sym} ptr (MOVQconst [c]) mem)
@@ -16206,8 +15265,7 @@ func rewriteValueAMD64_OpAMD64MOVQstore(v *Value) bool {
v.reset(OpAMD64MOVQstoreconst)
v.AuxInt = makeValAndOff(c, off)
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(mem)
+ v.AddArg2(ptr, mem)
return true
}
// match: (MOVQstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
@@ -16230,9 +15288,7 @@ func rewriteValueAMD64_OpAMD64MOVQstore(v *Value) bool {
v.reset(OpAMD64MOVQstore)
v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
- v.AddArg(base)
- v.AddArg(val)
- v.AddArg(mem)
+ v.AddArg3(base, val, mem)
return true
}
// match: (MOVQstore [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) val mem)
@@ -16256,10 +15312,7 @@ func rewriteValueAMD64_OpAMD64MOVQstore(v *Value) bool {
v.reset(OpAMD64MOVQstoreidx1)
v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
- v.AddArg(ptr)
- v.AddArg(idx)
- v.AddArg(val)
- v.AddArg(mem)
+ v.AddArg4(ptr, idx, val, mem)
return true
}
// match: (MOVQstore [off1] {sym1} (LEAQ8 [off2] {sym2} ptr idx) val mem)
@@ -16283,10 +15336,7 @@ func rewriteValueAMD64_OpAMD64MOVQstore(v *Value) bool {
v.reset(OpAMD64MOVQstoreidx8)
v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
- v.AddArg(ptr)
- v.AddArg(idx)
- v.AddArg(val)
- v.AddArg(mem)
+ v.AddArg4(ptr, idx, val, mem)
return true
}
// match: (MOVQstore [off] {sym} (ADDQ ptr idx) val mem)
@@ -16312,10 +15362,7 @@ func rewriteValueAMD64_OpAMD64MOVQstore(v *Value) bool {
v.reset(OpAMD64MOVQstoreidx1)
v.AuxInt = off
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(idx)
- v.AddArg(val)
- v.AddArg(mem)
+ v.AddArg4(ptr, idx, val, mem)
return true
}
break
@@ -16340,9 +15387,7 @@ func rewriteValueAMD64_OpAMD64MOVQstore(v *Value) bool {
v.reset(OpAMD64MOVQstore)
v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
- v.AddArg(base)
- v.AddArg(val)
- v.AddArg(mem)
+ v.AddArg3(base, val, mem)
return true
}
// match: (MOVQstore [off1] {sym} (ADDLconst [off2] ptr) val mem)
@@ -16364,9 +15409,7 @@ func rewriteValueAMD64_OpAMD64MOVQstore(v *Value) bool {
v.reset(OpAMD64MOVQstore)
v.AuxInt = off1 + off2
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(val)
- v.AddArg(mem)
+ v.AddArg3(ptr, val, mem)
return true
}
// match: (MOVQstore {sym} [off] ptr y:(ADDQload x [off] {sym} ptr mem) mem)
@@ -16388,9 +15431,7 @@ func rewriteValueAMD64_OpAMD64MOVQstore(v *Value) bool {
v.reset(OpAMD64ADDQmodify)
v.AuxInt = off
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(x)
- v.AddArg(mem)
+ v.AddArg3(ptr, x, mem)
return true
}
// match: (MOVQstore {sym} [off] ptr y:(ANDQload x [off] {sym} ptr mem) mem)
@@ -16412,9 +15453,7 @@ func rewriteValueAMD64_OpAMD64MOVQstore(v *Value) bool {
v.reset(OpAMD64ANDQmodify)
v.AuxInt = off
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(x)
- v.AddArg(mem)
+ v.AddArg3(ptr, x, mem)
return true
}
// match: (MOVQstore {sym} [off] ptr y:(ORQload x [off] {sym} ptr mem) mem)
@@ -16436,9 +15475,7 @@ func rewriteValueAMD64_OpAMD64MOVQstore(v *Value) bool {
v.reset(OpAMD64ORQmodify)
v.AuxInt = off
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(x)
- v.AddArg(mem)
+ v.AddArg3(ptr, x, mem)
return true
}
// match: (MOVQstore {sym} [off] ptr y:(XORQload x [off] {sym} ptr mem) mem)
@@ -16460,9 +15497,7 @@ func rewriteValueAMD64_OpAMD64MOVQstore(v *Value) bool {
v.reset(OpAMD64XORQmodify)
v.AuxInt = off
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(x)
- v.AddArg(mem)
+ v.AddArg3(ptr, x, mem)
return true
}
// match: (MOVQstore {sym} [off] ptr y:(ADDQ l:(MOVQload [off] {sym} ptr mem) x) mem)
@@ -16495,9 +15530,7 @@ func rewriteValueAMD64_OpAMD64MOVQstore(v *Value) bool {
v.reset(OpAMD64ADDQmodify)
v.AuxInt = off
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(x)
- v.AddArg(mem)
+ v.AddArg3(ptr, x, mem)
return true
}
break
@@ -16525,9 +15558,7 @@ func rewriteValueAMD64_OpAMD64MOVQstore(v *Value) bool {
v.reset(OpAMD64SUBQmodify)
v.AuxInt = off
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(x)
- v.AddArg(mem)
+ v.AddArg3(ptr, x, mem)
return true
}
// match: (MOVQstore {sym} [off] ptr y:(ANDQ l:(MOVQload [off] {sym} ptr mem) x) mem)
@@ -16560,9 +15591,7 @@ func rewriteValueAMD64_OpAMD64MOVQstore(v *Value) bool {
v.reset(OpAMD64ANDQmodify)
v.AuxInt = off
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(x)
- v.AddArg(mem)
+ v.AddArg3(ptr, x, mem)
return true
}
break
@@ -16597,9 +15626,7 @@ func rewriteValueAMD64_OpAMD64MOVQstore(v *Value) bool {
v.reset(OpAMD64ORQmodify)
v.AuxInt = off
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(x)
- v.AddArg(mem)
+ v.AddArg3(ptr, x, mem)
return true
}
break
@@ -16634,9 +15661,7 @@ func rewriteValueAMD64_OpAMD64MOVQstore(v *Value) bool {
v.reset(OpAMD64XORQmodify)
v.AuxInt = off
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(x)
- v.AddArg(mem)
+ v.AddArg3(ptr, x, mem)
return true
}
break
@@ -16664,9 +15689,7 @@ func rewriteValueAMD64_OpAMD64MOVQstore(v *Value) bool {
v.reset(OpAMD64BTCQmodify)
v.AuxInt = off
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(x)
- v.AddArg(mem)
+ v.AddArg3(ptr, x, mem)
return true
}
// match: (MOVQstore {sym} [off] ptr y:(BTRQ l:(MOVQload [off] {sym} ptr mem) x) mem)
@@ -16692,9 +15715,7 @@ func rewriteValueAMD64_OpAMD64MOVQstore(v *Value) bool {
v.reset(OpAMD64BTRQmodify)
v.AuxInt = off
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(x)
- v.AddArg(mem)
+ v.AddArg3(ptr, x, mem)
return true
}
// match: (MOVQstore {sym} [off] ptr y:(BTSQ l:(MOVQload [off] {sym} ptr mem) x) mem)
@@ -16720,9 +15741,7 @@ func rewriteValueAMD64_OpAMD64MOVQstore(v *Value) bool {
v.reset(OpAMD64BTSQmodify)
v.AuxInt = off
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(x)
- v.AddArg(mem)
+ v.AddArg3(ptr, x, mem)
return true
}
// match: (MOVQstore [off] {sym} ptr a:(ADDQconst [c] l:(MOVQload [off] {sym} ptr2 mem)) mem)
@@ -16749,8 +15768,7 @@ func rewriteValueAMD64_OpAMD64MOVQstore(v *Value) bool {
v.reset(OpAMD64ADDQconstmodify)
v.AuxInt = makeValAndOff(c, off)
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(mem)
+ v.AddArg2(ptr, mem)
return true
}
// match: (MOVQstore [off] {sym} ptr a:(ANDQconst [c] l:(MOVQload [off] {sym} ptr2 mem)) mem)
@@ -16777,8 +15795,7 @@ func rewriteValueAMD64_OpAMD64MOVQstore(v *Value) bool {
v.reset(OpAMD64ANDQconstmodify)
v.AuxInt = makeValAndOff(c, off)
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(mem)
+ v.AddArg2(ptr, mem)
return true
}
// match: (MOVQstore [off] {sym} ptr a:(ORQconst [c] l:(MOVQload [off] {sym} ptr2 mem)) mem)
@@ -16805,8 +15822,7 @@ func rewriteValueAMD64_OpAMD64MOVQstore(v *Value) bool {
v.reset(OpAMD64ORQconstmodify)
v.AuxInt = makeValAndOff(c, off)
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(mem)
+ v.AddArg2(ptr, mem)
return true
}
// match: (MOVQstore [off] {sym} ptr a:(XORQconst [c] l:(MOVQload [off] {sym} ptr2 mem)) mem)
@@ -16833,8 +15849,7 @@ func rewriteValueAMD64_OpAMD64MOVQstore(v *Value) bool {
v.reset(OpAMD64XORQconstmodify)
v.AuxInt = makeValAndOff(c, off)
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(mem)
+ v.AddArg2(ptr, mem)
return true
}
// match: (MOVQstore [off] {sym} ptr a:(BTCQconst [c] l:(MOVQload [off] {sym} ptr2 mem)) mem)
@@ -16861,8 +15876,7 @@ func rewriteValueAMD64_OpAMD64MOVQstore(v *Value) bool {
v.reset(OpAMD64BTCQconstmodify)
v.AuxInt = makeValAndOff(c, off)
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(mem)
+ v.AddArg2(ptr, mem)
return true
}
// match: (MOVQstore [off] {sym} ptr a:(BTRQconst [c] l:(MOVQload [off] {sym} ptr2 mem)) mem)
@@ -16889,8 +15903,7 @@ func rewriteValueAMD64_OpAMD64MOVQstore(v *Value) bool {
v.reset(OpAMD64BTRQconstmodify)
v.AuxInt = makeValAndOff(c, off)
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(mem)
+ v.AddArg2(ptr, mem)
return true
}
// match: (MOVQstore [off] {sym} ptr a:(BTSQconst [c] l:(MOVQload [off] {sym} ptr2 mem)) mem)
@@ -16917,8 +15930,7 @@ func rewriteValueAMD64_OpAMD64MOVQstore(v *Value) bool {
v.reset(OpAMD64BTSQconstmodify)
v.AuxInt = makeValAndOff(c, off)
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(mem)
+ v.AddArg2(ptr, mem)
return true
}
// match: (MOVQstore [off] {sym} ptr (MOVQf2i val) mem)
@@ -16935,9 +15947,7 @@ func rewriteValueAMD64_OpAMD64MOVQstore(v *Value) bool {
v.reset(OpAMD64MOVSDstore)
v.AuxInt = off
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(val)
- v.AddArg(mem)
+ v.AddArg3(ptr, val, mem)
return true
}
return false
@@ -16965,8 +15975,7 @@ func rewriteValueAMD64_OpAMD64MOVQstoreconst(v *Value) bool {
v.reset(OpAMD64MOVQstoreconst)
v.AuxInt = ValAndOff(sc).add(off)
v.Aux = s
- v.AddArg(ptr)
- v.AddArg(mem)
+ v.AddArg2(ptr, mem)
return true
}
// match: (MOVQstoreconst [sc] {sym1} (LEAQ [off] {sym2} ptr) mem)
@@ -16988,8 +15997,7 @@ func rewriteValueAMD64_OpAMD64MOVQstoreconst(v *Value) bool {
v.reset(OpAMD64MOVQstoreconst)
v.AuxInt = ValAndOff(sc).add(off)
v.Aux = mergeSym(sym1, sym2)
- v.AddArg(ptr)
- v.AddArg(mem)
+ v.AddArg2(ptr, mem)
return true
}
// match: (MOVQstoreconst [x] {sym1} (LEAQ1 [off] {sym2} ptr idx) mem)
@@ -17012,9 +16020,7 @@ func rewriteValueAMD64_OpAMD64MOVQstoreconst(v *Value) bool {
v.reset(OpAMD64MOVQstoreconstidx1)
v.AuxInt = ValAndOff(x).add(off)
v.Aux = mergeSym(sym1, sym2)
- v.AddArg(ptr)
- v.AddArg(idx)
- v.AddArg(mem)
+ v.AddArg3(ptr, idx, mem)
return true
}
// match: (MOVQstoreconst [x] {sym1} (LEAQ8 [off] {sym2} ptr idx) mem)
@@ -17037,9 +16043,7 @@ func rewriteValueAMD64_OpAMD64MOVQstoreconst(v *Value) bool {
v.reset(OpAMD64MOVQstoreconstidx8)
v.AuxInt = ValAndOff(x).add(off)
v.Aux = mergeSym(sym1, sym2)
- v.AddArg(ptr)
- v.AddArg(idx)
- v.AddArg(mem)
+ v.AddArg3(ptr, idx, mem)
return true
}
// match: (MOVQstoreconst [x] {sym} (ADDQ ptr idx) mem)
@@ -17056,9 +16060,7 @@ func rewriteValueAMD64_OpAMD64MOVQstoreconst(v *Value) bool {
v.reset(OpAMD64MOVQstoreconstidx1)
v.AuxInt = x
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(idx)
- v.AddArg(mem)
+ v.AddArg3(ptr, idx, mem)
return true
}
// match: (MOVQstoreconst [c] {s} p x:(MOVQstoreconst [c2] {s} p mem))
@@ -17083,11 +16085,9 @@ func rewriteValueAMD64_OpAMD64MOVQstoreconst(v *Value) bool {
v.reset(OpAMD64MOVOstore)
v.AuxInt = ValAndOff(c2).Off()
v.Aux = s
- v.AddArg(p)
v0 := b.NewValue0(x.Pos, OpAMD64MOVOconst, types.TypeInt128)
v0.AuxInt = 0
- v.AddArg(v0)
- v.AddArg(mem)
+ v.AddArg3(p, v0, mem)
return true
}
// match: (MOVQstoreconst [sc] {sym1} (LEAL [off] {sym2} ptr) mem)
@@ -17109,8 +16109,7 @@ func rewriteValueAMD64_OpAMD64MOVQstoreconst(v *Value) bool {
v.reset(OpAMD64MOVQstoreconst)
v.AuxInt = ValAndOff(sc).add(off)
v.Aux = mergeSym(sym1, sym2)
- v.AddArg(ptr)
- v.AddArg(mem)
+ v.AddArg2(ptr, mem)
return true
}
// match: (MOVQstoreconst [sc] {s} (ADDLconst [off] ptr) mem)
@@ -17131,8 +16130,7 @@ func rewriteValueAMD64_OpAMD64MOVQstoreconst(v *Value) bool {
v.reset(OpAMD64MOVQstoreconst)
v.AuxInt = ValAndOff(sc).add(off)
v.Aux = s
- v.AddArg(ptr)
- v.AddArg(mem)
+ v.AddArg2(ptr, mem)
return true
}
return false
@@ -17156,9 +16154,7 @@ func rewriteValueAMD64_OpAMD64MOVQstoreconstidx1(v *Value) bool {
v.reset(OpAMD64MOVQstoreconstidx8)
v.AuxInt = c
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(idx)
- v.AddArg(mem)
+ v.AddArg3(ptr, idx, mem)
return true
}
break
@@ -17183,9 +16179,7 @@ func rewriteValueAMD64_OpAMD64MOVQstoreconstidx1(v *Value) bool {
v.reset(OpAMD64MOVQstoreconstidx1)
v.AuxInt = ValAndOff(x).add(c)
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(idx)
- v.AddArg(mem)
+ v.AddArg3(ptr, idx, mem)
return true
}
break
@@ -17210,9 +16204,7 @@ func rewriteValueAMD64_OpAMD64MOVQstoreconstidx1(v *Value) bool {
v.reset(OpAMD64MOVQstoreconstidx1)
v.AuxInt = ValAndOff(x).add(c)
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(idx)
- v.AddArg(mem)
+ v.AddArg3(ptr, idx, mem)
return true
}
break
@@ -17242,9 +16234,7 @@ func rewriteValueAMD64_OpAMD64MOVQstoreconstidx8(v *Value) bool {
v.reset(OpAMD64MOVQstoreconstidx8)
v.AuxInt = ValAndOff(x).add(c)
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(idx)
- v.AddArg(mem)
+ v.AddArg3(ptr, idx, mem)
return true
}
// match: (MOVQstoreconstidx8 [x] {sym} ptr (ADDQconst [c] idx) mem)
@@ -17266,9 +16256,7 @@ func rewriteValueAMD64_OpAMD64MOVQstoreconstidx8(v *Value) bool {
v.reset(OpAMD64MOVQstoreconstidx8)
v.AuxInt = ValAndOff(x).add(8 * c)
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(idx)
- v.AddArg(mem)
+ v.AddArg3(ptr, idx, mem)
return true
}
return false
@@ -17294,10 +16282,7 @@ func rewriteValueAMD64_OpAMD64MOVQstoreidx1(v *Value) bool {
v.reset(OpAMD64MOVQstoreidx8)
v.AuxInt = c
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(idx)
- v.AddArg(val)
- v.AddArg(mem)
+ v.AddArg4(ptr, idx, val, mem)
return true
}
break
@@ -17323,10 +16308,7 @@ func rewriteValueAMD64_OpAMD64MOVQstoreidx1(v *Value) bool {
v.reset(OpAMD64MOVQstoreidx1)
v.AuxInt = c + d
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(idx)
- v.AddArg(val)
- v.AddArg(mem)
+ v.AddArg4(ptr, idx, val, mem)
return true
}
break
@@ -17352,10 +16334,7 @@ func rewriteValueAMD64_OpAMD64MOVQstoreidx1(v *Value) bool {
v.reset(OpAMD64MOVQstoreidx1)
v.AuxInt = c + d
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(idx)
- v.AddArg(val)
- v.AddArg(mem)
+ v.AddArg4(ptr, idx, val, mem)
return true
}
break
@@ -17380,9 +16359,7 @@ func rewriteValueAMD64_OpAMD64MOVQstoreidx1(v *Value) bool {
v.reset(OpAMD64MOVQstore)
v.AuxInt = i + c
v.Aux = s
- v.AddArg(p)
- v.AddArg(w)
- v.AddArg(mem)
+ v.AddArg3(p, w, mem)
return true
}
break
@@ -17406,9 +16383,7 @@ func rewriteValueAMD64_OpAMD64MOVQstoreidx1(v *Value) bool {
v.reset(OpAMD64MOVQstoreconstidx1)
v.AuxInt = makeValAndOff(c, off)
v.Aux = s
- v.AddArg(ptr)
- v.AddArg(idx)
- v.AddArg(mem)
+ v.AddArg3(ptr, idx, mem)
return true
}
return false
@@ -17438,10 +16413,7 @@ func rewriteValueAMD64_OpAMD64MOVQstoreidx8(v *Value) bool {
v.reset(OpAMD64MOVQstoreidx8)
v.AuxInt = c + d
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(idx)
- v.AddArg(val)
- v.AddArg(mem)
+ v.AddArg4(ptr, idx, val, mem)
return true
}
// match: (MOVQstoreidx8 [c] {sym} ptr (ADDQconst [d] idx) val mem)
@@ -17464,10 +16436,7 @@ func rewriteValueAMD64_OpAMD64MOVQstoreidx8(v *Value) bool {
v.reset(OpAMD64MOVQstoreidx8)
v.AuxInt = c + 8*d
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(idx)
- v.AddArg(val)
- v.AddArg(mem)
+ v.AddArg4(ptr, idx, val, mem)
return true
}
// match: (MOVQstoreidx8 [i] {s} p (MOVQconst [c]) w mem)
@@ -17489,9 +16458,7 @@ func rewriteValueAMD64_OpAMD64MOVQstoreidx8(v *Value) bool {
v.reset(OpAMD64MOVQstore)
v.AuxInt = i + 8*c
v.Aux = s
- v.AddArg(p)
- v.AddArg(w)
- v.AddArg(mem)
+ v.AddArg3(p, w, mem)
return true
}
// match: (MOVQstoreidx8 [off] {s} ptr idx (MOVQconst [c]) mem)
@@ -17513,9 +16480,7 @@ func rewriteValueAMD64_OpAMD64MOVQstoreidx8(v *Value) bool {
v.reset(OpAMD64MOVQstoreconstidx8)
v.AuxInt = makeValAndOff(c, off)
v.Aux = s
- v.AddArg(ptr)
- v.AddArg(idx)
- v.AddArg(mem)
+ v.AddArg3(ptr, idx, mem)
return true
}
return false
@@ -17541,8 +16506,7 @@ func rewriteValueAMD64_OpAMD64MOVSDload(v *Value) bool {
v.reset(OpAMD64MOVSDload)
v.AuxInt = off1 + off2
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(mem)
+ v.AddArg2(ptr, mem)
return true
}
// match: (MOVSDload [off1] {sym1} (LEAQ [off2] {sym2} base) mem)
@@ -17564,8 +16528,7 @@ func rewriteValueAMD64_OpAMD64MOVSDload(v *Value) bool {
v.reset(OpAMD64MOVSDload)
v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
- v.AddArg(base)
- v.AddArg(mem)
+ v.AddArg2(base, mem)
return true
}
// match: (MOVSDload [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) mem)
@@ -17588,9 +16551,7 @@ func rewriteValueAMD64_OpAMD64MOVSDload(v *Value) bool {
v.reset(OpAMD64MOVSDloadidx1)
v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
- v.AddArg(ptr)
- v.AddArg(idx)
- v.AddArg(mem)
+ v.AddArg3(ptr, idx, mem)
return true
}
// match: (MOVSDload [off1] {sym1} (LEAQ8 [off2] {sym2} ptr idx) mem)
@@ -17613,9 +16574,7 @@ func rewriteValueAMD64_OpAMD64MOVSDload(v *Value) bool {
v.reset(OpAMD64MOVSDloadidx8)
v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
- v.AddArg(ptr)
- v.AddArg(idx)
- v.AddArg(mem)
+ v.AddArg3(ptr, idx, mem)
return true
}
// match: (MOVSDload [off] {sym} (ADDQ ptr idx) mem)
@@ -17640,9 +16599,7 @@ func rewriteValueAMD64_OpAMD64MOVSDload(v *Value) bool {
v.reset(OpAMD64MOVSDloadidx1)
v.AuxInt = off
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(idx)
- v.AddArg(mem)
+ v.AddArg3(ptr, idx, mem)
return true
}
break
@@ -17656,11 +16613,10 @@ func rewriteValueAMD64_OpAMD64MOVSDload(v *Value) bool {
if v_1.Op != OpAMD64MOVQstore || v_1.AuxInt != off || v_1.Aux != sym {
break
}
- _ = v_1.Args[2]
+ val := v_1.Args[1]
if ptr != v_1.Args[0] {
break
}
- val := v_1.Args[1]
v.reset(OpAMD64MOVQi2f)
v.AddArg(val)
return true
@@ -17685,9 +16641,7 @@ func rewriteValueAMD64_OpAMD64MOVSDloadidx1(v *Value) bool {
v.reset(OpAMD64MOVSDloadidx8)
v.AuxInt = c
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(idx)
- v.AddArg(mem)
+ v.AddArg3(ptr, idx, mem)
return true
}
// match: (MOVSDloadidx1 [c] {sym} (ADDQconst [d] ptr) idx mem)
@@ -17709,9 +16663,7 @@ func rewriteValueAMD64_OpAMD64MOVSDloadidx1(v *Value) bool {
v.reset(OpAMD64MOVSDloadidx1)
v.AuxInt = c + d
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(idx)
- v.AddArg(mem)
+ v.AddArg3(ptr, idx, mem)
return true
}
// match: (MOVSDloadidx1 [c] {sym} ptr (ADDQconst [d] idx) mem)
@@ -17733,9 +16685,7 @@ func rewriteValueAMD64_OpAMD64MOVSDloadidx1(v *Value) bool {
v.reset(OpAMD64MOVSDloadidx1)
v.AuxInt = c + d
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(idx)
- v.AddArg(mem)
+ v.AddArg3(ptr, idx, mem)
return true
}
// match: (MOVSDloadidx1 [i] {s} p (MOVQconst [c]) mem)
@@ -17756,8 +16706,7 @@ func rewriteValueAMD64_OpAMD64MOVSDloadidx1(v *Value) bool {
v.reset(OpAMD64MOVSDload)
v.AuxInt = i + c
v.Aux = s
- v.AddArg(p)
- v.AddArg(mem)
+ v.AddArg2(p, mem)
return true
}
return false
@@ -17785,9 +16734,7 @@ func rewriteValueAMD64_OpAMD64MOVSDloadidx8(v *Value) bool {
v.reset(OpAMD64MOVSDloadidx8)
v.AuxInt = c + d
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(idx)
- v.AddArg(mem)
+ v.AddArg3(ptr, idx, mem)
return true
}
// match: (MOVSDloadidx8 [c] {sym} ptr (ADDQconst [d] idx) mem)
@@ -17809,9 +16756,7 @@ func rewriteValueAMD64_OpAMD64MOVSDloadidx8(v *Value) bool {
v.reset(OpAMD64MOVSDloadidx8)
v.AuxInt = c + 8*d
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(idx)
- v.AddArg(mem)
+ v.AddArg3(ptr, idx, mem)
return true
}
// match: (MOVSDloadidx8 [i] {s} p (MOVQconst [c]) mem)
@@ -17832,8 +16777,7 @@ func rewriteValueAMD64_OpAMD64MOVSDloadidx8(v *Value) bool {
v.reset(OpAMD64MOVSDload)
v.AuxInt = i + 8*c
v.Aux = s
- v.AddArg(p)
- v.AddArg(mem)
+ v.AddArg2(p, mem)
return true
}
return false
@@ -17861,9 +16805,7 @@ func rewriteValueAMD64_OpAMD64MOVSDstore(v *Value) bool {
v.reset(OpAMD64MOVSDstore)
v.AuxInt = off1 + off2
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(val)
- v.AddArg(mem)
+ v.AddArg3(ptr, val, mem)
return true
}
// match: (MOVSDstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
@@ -17886,9 +16828,7 @@ func rewriteValueAMD64_OpAMD64MOVSDstore(v *Value) bool {
v.reset(OpAMD64MOVSDstore)
v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
- v.AddArg(base)
- v.AddArg(val)
- v.AddArg(mem)
+ v.AddArg3(base, val, mem)
return true
}
// match: (MOVSDstore [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) val mem)
@@ -17912,10 +16852,7 @@ func rewriteValueAMD64_OpAMD64MOVSDstore(v *Value) bool {
v.reset(OpAMD64MOVSDstoreidx1)
v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
- v.AddArg(ptr)
- v.AddArg(idx)
- v.AddArg(val)
- v.AddArg(mem)
+ v.AddArg4(ptr, idx, val, mem)
return true
}
// match: (MOVSDstore [off1] {sym1} (LEAQ8 [off2] {sym2} ptr idx) val mem)
@@ -17939,10 +16876,7 @@ func rewriteValueAMD64_OpAMD64MOVSDstore(v *Value) bool {
v.reset(OpAMD64MOVSDstoreidx8)
v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
- v.AddArg(ptr)
- v.AddArg(idx)
- v.AddArg(val)
- v.AddArg(mem)
+ v.AddArg4(ptr, idx, val, mem)
return true
}
// match: (MOVSDstore [off] {sym} (ADDQ ptr idx) val mem)
@@ -17968,10 +16902,7 @@ func rewriteValueAMD64_OpAMD64MOVSDstore(v *Value) bool {
v.reset(OpAMD64MOVSDstoreidx1)
v.AuxInt = off
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(idx)
- v.AddArg(val)
- v.AddArg(mem)
+ v.AddArg4(ptr, idx, val, mem)
return true
}
break
@@ -17990,9 +16921,7 @@ func rewriteValueAMD64_OpAMD64MOVSDstore(v *Value) bool {
v.reset(OpAMD64MOVQstore)
v.AuxInt = off
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(val)
- v.AddArg(mem)
+ v.AddArg3(ptr, val, mem)
return true
}
return false
@@ -18017,10 +16946,7 @@ func rewriteValueAMD64_OpAMD64MOVSDstoreidx1(v *Value) bool {
v.reset(OpAMD64MOVSDstoreidx8)
v.AuxInt = c
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(idx)
- v.AddArg(val)
- v.AddArg(mem)
+ v.AddArg4(ptr, idx, val, mem)
return true
}
// match: (MOVSDstoreidx1 [c] {sym} (ADDQconst [d] ptr) idx val mem)
@@ -18043,10 +16969,7 @@ func rewriteValueAMD64_OpAMD64MOVSDstoreidx1(v *Value) bool {
v.reset(OpAMD64MOVSDstoreidx1)
v.AuxInt = c + d
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(idx)
- v.AddArg(val)
- v.AddArg(mem)
+ v.AddArg4(ptr, idx, val, mem)
return true
}
// match: (MOVSDstoreidx1 [c] {sym} ptr (ADDQconst [d] idx) val mem)
@@ -18069,10 +16992,7 @@ func rewriteValueAMD64_OpAMD64MOVSDstoreidx1(v *Value) bool {
v.reset(OpAMD64MOVSDstoreidx1)
v.AuxInt = c + d
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(idx)
- v.AddArg(val)
- v.AddArg(mem)
+ v.AddArg4(ptr, idx, val, mem)
return true
}
// match: (MOVSDstoreidx1 [i] {s} p (MOVQconst [c]) w mem)
@@ -18094,9 +17014,7 @@ func rewriteValueAMD64_OpAMD64MOVSDstoreidx1(v *Value) bool {
v.reset(OpAMD64MOVSDstore)
v.AuxInt = i + c
v.Aux = s
- v.AddArg(p)
- v.AddArg(w)
- v.AddArg(mem)
+ v.AddArg3(p, w, mem)
return true
}
return false
@@ -18126,10 +17044,7 @@ func rewriteValueAMD64_OpAMD64MOVSDstoreidx8(v *Value) bool {
v.reset(OpAMD64MOVSDstoreidx8)
v.AuxInt = c + d
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(idx)
- v.AddArg(val)
- v.AddArg(mem)
+ v.AddArg4(ptr, idx, val, mem)
return true
}
// match: (MOVSDstoreidx8 [c] {sym} ptr (ADDQconst [d] idx) val mem)
@@ -18152,10 +17067,7 @@ func rewriteValueAMD64_OpAMD64MOVSDstoreidx8(v *Value) bool {
v.reset(OpAMD64MOVSDstoreidx8)
v.AuxInt = c + 8*d
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(idx)
- v.AddArg(val)
- v.AddArg(mem)
+ v.AddArg4(ptr, idx, val, mem)
return true
}
// match: (MOVSDstoreidx8 [i] {s} p (MOVQconst [c]) w mem)
@@ -18177,9 +17089,7 @@ func rewriteValueAMD64_OpAMD64MOVSDstoreidx8(v *Value) bool {
v.reset(OpAMD64MOVSDstore)
v.AuxInt = i + 8*c
v.Aux = s
- v.AddArg(p)
- v.AddArg(w)
- v.AddArg(mem)
+ v.AddArg3(p, w, mem)
return true
}
return false
@@ -18205,8 +17115,7 @@ func rewriteValueAMD64_OpAMD64MOVSSload(v *Value) bool {
v.reset(OpAMD64MOVSSload)
v.AuxInt = off1 + off2
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(mem)
+ v.AddArg2(ptr, mem)
return true
}
// match: (MOVSSload [off1] {sym1} (LEAQ [off2] {sym2} base) mem)
@@ -18228,8 +17137,7 @@ func rewriteValueAMD64_OpAMD64MOVSSload(v *Value) bool {
v.reset(OpAMD64MOVSSload)
v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
- v.AddArg(base)
- v.AddArg(mem)
+ v.AddArg2(base, mem)
return true
}
// match: (MOVSSload [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) mem)
@@ -18252,9 +17160,7 @@ func rewriteValueAMD64_OpAMD64MOVSSload(v *Value) bool {
v.reset(OpAMD64MOVSSloadidx1)
v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
- v.AddArg(ptr)
- v.AddArg(idx)
- v.AddArg(mem)
+ v.AddArg3(ptr, idx, mem)
return true
}
// match: (MOVSSload [off1] {sym1} (LEAQ4 [off2] {sym2} ptr idx) mem)
@@ -18277,9 +17183,7 @@ func rewriteValueAMD64_OpAMD64MOVSSload(v *Value) bool {
v.reset(OpAMD64MOVSSloadidx4)
v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
- v.AddArg(ptr)
- v.AddArg(idx)
- v.AddArg(mem)
+ v.AddArg3(ptr, idx, mem)
return true
}
// match: (MOVSSload [off] {sym} (ADDQ ptr idx) mem)
@@ -18304,9 +17208,7 @@ func rewriteValueAMD64_OpAMD64MOVSSload(v *Value) bool {
v.reset(OpAMD64MOVSSloadidx1)
v.AuxInt = off
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(idx)
- v.AddArg(mem)
+ v.AddArg3(ptr, idx, mem)
return true
}
break
@@ -18320,11 +17222,10 @@ func rewriteValueAMD64_OpAMD64MOVSSload(v *Value) bool {
if v_1.Op != OpAMD64MOVLstore || v_1.AuxInt != off || v_1.Aux != sym {
break
}
- _ = v_1.Args[2]
+ val := v_1.Args[1]
if ptr != v_1.Args[0] {
break
}
- val := v_1.Args[1]
v.reset(OpAMD64MOVLi2f)
v.AddArg(val)
return true
@@ -18349,9 +17250,7 @@ func rewriteValueAMD64_OpAMD64MOVSSloadidx1(v *Value) bool {
v.reset(OpAMD64MOVSSloadidx4)
v.AuxInt = c
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(idx)
- v.AddArg(mem)
+ v.AddArg3(ptr, idx, mem)
return true
}
// match: (MOVSSloadidx1 [c] {sym} (ADDQconst [d] ptr) idx mem)
@@ -18373,9 +17272,7 @@ func rewriteValueAMD64_OpAMD64MOVSSloadidx1(v *Value) bool {
v.reset(OpAMD64MOVSSloadidx1)
v.AuxInt = c + d
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(idx)
- v.AddArg(mem)
+ v.AddArg3(ptr, idx, mem)
return true
}
// match: (MOVSSloadidx1 [c] {sym} ptr (ADDQconst [d] idx) mem)
@@ -18397,9 +17294,7 @@ func rewriteValueAMD64_OpAMD64MOVSSloadidx1(v *Value) bool {
v.reset(OpAMD64MOVSSloadidx1)
v.AuxInt = c + d
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(idx)
- v.AddArg(mem)
+ v.AddArg3(ptr, idx, mem)
return true
}
// match: (MOVSSloadidx1 [i] {s} p (MOVQconst [c]) mem)
@@ -18420,8 +17315,7 @@ func rewriteValueAMD64_OpAMD64MOVSSloadidx1(v *Value) bool {
v.reset(OpAMD64MOVSSload)
v.AuxInt = i + c
v.Aux = s
- v.AddArg(p)
- v.AddArg(mem)
+ v.AddArg2(p, mem)
return true
}
return false
@@ -18449,9 +17343,7 @@ func rewriteValueAMD64_OpAMD64MOVSSloadidx4(v *Value) bool {
v.reset(OpAMD64MOVSSloadidx4)
v.AuxInt = c + d
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(idx)
- v.AddArg(mem)
+ v.AddArg3(ptr, idx, mem)
return true
}
// match: (MOVSSloadidx4 [c] {sym} ptr (ADDQconst [d] idx) mem)
@@ -18473,9 +17365,7 @@ func rewriteValueAMD64_OpAMD64MOVSSloadidx4(v *Value) bool {
v.reset(OpAMD64MOVSSloadidx4)
v.AuxInt = c + 4*d
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(idx)
- v.AddArg(mem)
+ v.AddArg3(ptr, idx, mem)
return true
}
// match: (MOVSSloadidx4 [i] {s} p (MOVQconst [c]) mem)
@@ -18496,8 +17386,7 @@ func rewriteValueAMD64_OpAMD64MOVSSloadidx4(v *Value) bool {
v.reset(OpAMD64MOVSSload)
v.AuxInt = i + 4*c
v.Aux = s
- v.AddArg(p)
- v.AddArg(mem)
+ v.AddArg2(p, mem)
return true
}
return false
@@ -18525,9 +17414,7 @@ func rewriteValueAMD64_OpAMD64MOVSSstore(v *Value) bool {
v.reset(OpAMD64MOVSSstore)
v.AuxInt = off1 + off2
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(val)
- v.AddArg(mem)
+ v.AddArg3(ptr, val, mem)
return true
}
// match: (MOVSSstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
@@ -18550,9 +17437,7 @@ func rewriteValueAMD64_OpAMD64MOVSSstore(v *Value) bool {
v.reset(OpAMD64MOVSSstore)
v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
- v.AddArg(base)
- v.AddArg(val)
- v.AddArg(mem)
+ v.AddArg3(base, val, mem)
return true
}
// match: (MOVSSstore [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) val mem)
@@ -18576,10 +17461,7 @@ func rewriteValueAMD64_OpAMD64MOVSSstore(v *Value) bool {
v.reset(OpAMD64MOVSSstoreidx1)
v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
- v.AddArg(ptr)
- v.AddArg(idx)
- v.AddArg(val)
- v.AddArg(mem)
+ v.AddArg4(ptr, idx, val, mem)
return true
}
// match: (MOVSSstore [off1] {sym1} (LEAQ4 [off2] {sym2} ptr idx) val mem)
@@ -18603,10 +17485,7 @@ func rewriteValueAMD64_OpAMD64MOVSSstore(v *Value) bool {
v.reset(OpAMD64MOVSSstoreidx4)
v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
- v.AddArg(ptr)
- v.AddArg(idx)
- v.AddArg(val)
- v.AddArg(mem)
+ v.AddArg4(ptr, idx, val, mem)
return true
}
// match: (MOVSSstore [off] {sym} (ADDQ ptr idx) val mem)
@@ -18632,10 +17511,7 @@ func rewriteValueAMD64_OpAMD64MOVSSstore(v *Value) bool {
v.reset(OpAMD64MOVSSstoreidx1)
v.AuxInt = off
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(idx)
- v.AddArg(val)
- v.AddArg(mem)
+ v.AddArg4(ptr, idx, val, mem)
return true
}
break
@@ -18654,9 +17530,7 @@ func rewriteValueAMD64_OpAMD64MOVSSstore(v *Value) bool {
v.reset(OpAMD64MOVLstore)
v.AuxInt = off
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(val)
- v.AddArg(mem)
+ v.AddArg3(ptr, val, mem)
return true
}
return false
@@ -18681,10 +17555,7 @@ func rewriteValueAMD64_OpAMD64MOVSSstoreidx1(v *Value) bool {
v.reset(OpAMD64MOVSSstoreidx4)
v.AuxInt = c
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(idx)
- v.AddArg(val)
- v.AddArg(mem)
+ v.AddArg4(ptr, idx, val, mem)
return true
}
// match: (MOVSSstoreidx1 [c] {sym} (ADDQconst [d] ptr) idx val mem)
@@ -18707,10 +17578,7 @@ func rewriteValueAMD64_OpAMD64MOVSSstoreidx1(v *Value) bool {
v.reset(OpAMD64MOVSSstoreidx1)
v.AuxInt = c + d
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(idx)
- v.AddArg(val)
- v.AddArg(mem)
+ v.AddArg4(ptr, idx, val, mem)
return true
}
// match: (MOVSSstoreidx1 [c] {sym} ptr (ADDQconst [d] idx) val mem)
@@ -18733,10 +17601,7 @@ func rewriteValueAMD64_OpAMD64MOVSSstoreidx1(v *Value) bool {
v.reset(OpAMD64MOVSSstoreidx1)
v.AuxInt = c + d
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(idx)
- v.AddArg(val)
- v.AddArg(mem)
+ v.AddArg4(ptr, idx, val, mem)
return true
}
// match: (MOVSSstoreidx1 [i] {s} p (MOVQconst [c]) w mem)
@@ -18758,9 +17623,7 @@ func rewriteValueAMD64_OpAMD64MOVSSstoreidx1(v *Value) bool {
v.reset(OpAMD64MOVSSstore)
v.AuxInt = i + c
v.Aux = s
- v.AddArg(p)
- v.AddArg(w)
- v.AddArg(mem)
+ v.AddArg3(p, w, mem)
return true
}
return false
@@ -18790,10 +17653,7 @@ func rewriteValueAMD64_OpAMD64MOVSSstoreidx4(v *Value) bool {
v.reset(OpAMD64MOVSSstoreidx4)
v.AuxInt = c + d
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(idx)
- v.AddArg(val)
- v.AddArg(mem)
+ v.AddArg4(ptr, idx, val, mem)
return true
}
// match: (MOVSSstoreidx4 [c] {sym} ptr (ADDQconst [d] idx) val mem)
@@ -18816,10 +17676,7 @@ func rewriteValueAMD64_OpAMD64MOVSSstoreidx4(v *Value) bool {
v.reset(OpAMD64MOVSSstoreidx4)
v.AuxInt = c + 4*d
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(idx)
- v.AddArg(val)
- v.AddArg(mem)
+ v.AddArg4(ptr, idx, val, mem)
return true
}
// match: (MOVSSstoreidx4 [i] {s} p (MOVQconst [c]) w mem)
@@ -18841,9 +17698,7 @@ func rewriteValueAMD64_OpAMD64MOVSSstoreidx4(v *Value) bool {
v.reset(OpAMD64MOVSSstore)
v.AuxInt = i + 4*c
v.Aux = s
- v.AddArg(p)
- v.AddArg(w)
- v.AddArg(mem)
+ v.AddArg3(p, w, mem)
return true
}
return false
@@ -18868,12 +17723,10 @@ func rewriteValueAMD64_OpAMD64MOVWQSX(v *Value) bool {
}
b = x.Block
v0 := b.NewValue0(x.Pos, OpAMD64MOVWQSXload, v.Type)
- v.reset(OpCopy)
- v.AddArg(v0)
+ v.copyOf(v0)
v0.AuxInt = off
v0.Aux = sym
- v0.AddArg(ptr)
- v0.AddArg(mem)
+ v0.AddArg2(ptr, mem)
return true
}
// match: (MOVWQSX x:(MOVLload [off] {sym} ptr mem))
@@ -18893,12 +17746,10 @@ func rewriteValueAMD64_OpAMD64MOVWQSX(v *Value) bool {
}
b = x.Block
v0 := b.NewValue0(x.Pos, OpAMD64MOVWQSXload, v.Type)
- v.reset(OpCopy)
- v.AddArg(v0)
+ v.copyOf(v0)
v0.AuxInt = off
v0.Aux = sym
- v0.AddArg(ptr)
- v0.AddArg(mem)
+ v0.AddArg2(ptr, mem)
return true
}
// match: (MOVWQSX x:(MOVQload [off] {sym} ptr mem))
@@ -18918,12 +17769,10 @@ func rewriteValueAMD64_OpAMD64MOVWQSX(v *Value) bool {
}
b = x.Block
v0 := b.NewValue0(x.Pos, OpAMD64MOVWQSXload, v.Type)
- v.reset(OpCopy)
- v.AddArg(v0)
+ v.copyOf(v0)
v0.AuxInt = off
v0.Aux = sym
- v0.AddArg(ptr)
- v0.AddArg(mem)
+ v0.AddArg2(ptr, mem)
return true
}
// match: (MOVWQSX (ANDLconst [c] x))
@@ -18982,9 +17831,8 @@ func rewriteValueAMD64_OpAMD64MOVWQSXload(v *Value) bool {
}
off2 := v_1.AuxInt
sym2 := v_1.Aux
- _ = v_1.Args[2]
- ptr2 := v_1.Args[0]
x := v_1.Args[1]
+ ptr2 := v_1.Args[0]
if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
break
}
@@ -19011,8 +17859,7 @@ func rewriteValueAMD64_OpAMD64MOVWQSXload(v *Value) bool {
v.reset(OpAMD64MOVWQSXload)
v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
- v.AddArg(base)
- v.AddArg(mem)
+ v.AddArg2(base, mem)
return true
}
return false
@@ -19037,12 +17884,10 @@ func rewriteValueAMD64_OpAMD64MOVWQZX(v *Value) bool {
}
b = x.Block
v0 := b.NewValue0(x.Pos, OpAMD64MOVWload, v.Type)
- v.reset(OpCopy)
- v.AddArg(v0)
+ v.copyOf(v0)
v0.AuxInt = off
v0.Aux = sym
- v0.AddArg(ptr)
- v0.AddArg(mem)
+ v0.AddArg2(ptr, mem)
return true
}
// match: (MOVWQZX x:(MOVLload [off] {sym} ptr mem))
@@ -19062,12 +17907,10 @@ func rewriteValueAMD64_OpAMD64MOVWQZX(v *Value) bool {
}
b = x.Block
v0 := b.NewValue0(x.Pos, OpAMD64MOVWload, v.Type)
- v.reset(OpCopy)
- v.AddArg(v0)
+ v.copyOf(v0)
v0.AuxInt = off
v0.Aux = sym
- v0.AddArg(ptr)
- v0.AddArg(mem)
+ v0.AddArg2(ptr, mem)
return true
}
// match: (MOVWQZX x:(MOVQload [off] {sym} ptr mem))
@@ -19087,12 +17930,10 @@ func rewriteValueAMD64_OpAMD64MOVWQZX(v *Value) bool {
}
b = x.Block
v0 := b.NewValue0(x.Pos, OpAMD64MOVWload, v.Type)
- v.reset(OpCopy)
- v.AddArg(v0)
+ v.copyOf(v0)
v0.AuxInt = off
v0.Aux = sym
- v0.AddArg(ptr)
- v0.AddArg(mem)
+ v0.AddArg2(ptr, mem)
return true
}
// match: (MOVWQZX x)
@@ -19103,9 +17944,7 @@ func rewriteValueAMD64_OpAMD64MOVWQZX(v *Value) bool {
if !(zeroUpper48Bits(x, 3)) {
break
}
- v.reset(OpCopy)
- v.Type = x.Type
- v.AddArg(x)
+ v.copyOf(x)
return true
}
// match: (MOVWQZX x:(MOVWloadidx1 [off] {sym} ptr idx mem))
@@ -19126,13 +17965,10 @@ func rewriteValueAMD64_OpAMD64MOVWQZX(v *Value) bool {
}
b = x.Block
v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, v.Type)
- v.reset(OpCopy)
- v.AddArg(v0)
+ v.copyOf(v0)
v0.AuxInt = off
v0.Aux = sym
- v0.AddArg(ptr)
- v0.AddArg(idx)
- v0.AddArg(mem)
+ v0.AddArg3(ptr, idx, mem)
return true
}
// match: (MOVWQZX x:(MOVWloadidx2 [off] {sym} ptr idx mem))
@@ -19153,13 +17989,10 @@ func rewriteValueAMD64_OpAMD64MOVWQZX(v *Value) bool {
}
b = x.Block
v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx2, v.Type)
- v.reset(OpCopy)
- v.AddArg(v0)
+ v.copyOf(v0)
v0.AuxInt = off
v0.Aux = sym
- v0.AddArg(ptr)
- v0.AddArg(idx)
- v0.AddArg(mem)
+ v0.AddArg3(ptr, idx, mem)
return true
}
// match: (MOVWQZX (ANDLconst [c] x))
@@ -19216,9 +18049,8 @@ func rewriteValueAMD64_OpAMD64MOVWload(v *Value) bool {
}
off2 := v_1.AuxInt
sym2 := v_1.Aux
- _ = v_1.Args[2]
- ptr2 := v_1.Args[0]
x := v_1.Args[1]
+ ptr2 := v_1.Args[0]
if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
break
}
@@ -19244,8 +18076,7 @@ func rewriteValueAMD64_OpAMD64MOVWload(v *Value) bool {
v.reset(OpAMD64MOVWload)
v.AuxInt = off1 + off2
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(mem)
+ v.AddArg2(ptr, mem)
return true
}
// match: (MOVWload [off1] {sym1} (LEAQ [off2] {sym2} base) mem)
@@ -19267,8 +18098,7 @@ func rewriteValueAMD64_OpAMD64MOVWload(v *Value) bool {
v.reset(OpAMD64MOVWload)
v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
- v.AddArg(base)
- v.AddArg(mem)
+ v.AddArg2(base, mem)
return true
}
// match: (MOVWload [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) mem)
@@ -19291,9 +18121,7 @@ func rewriteValueAMD64_OpAMD64MOVWload(v *Value) bool {
v.reset(OpAMD64MOVWloadidx1)
v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
- v.AddArg(ptr)
- v.AddArg(idx)
- v.AddArg(mem)
+ v.AddArg3(ptr, idx, mem)
return true
}
// match: (MOVWload [off1] {sym1} (LEAQ2 [off2] {sym2} ptr idx) mem)
@@ -19316,9 +18144,7 @@ func rewriteValueAMD64_OpAMD64MOVWload(v *Value) bool {
v.reset(OpAMD64MOVWloadidx2)
v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
- v.AddArg(ptr)
- v.AddArg(idx)
- v.AddArg(mem)
+ v.AddArg3(ptr, idx, mem)
return true
}
// match: (MOVWload [off] {sym} (ADDQ ptr idx) mem)
@@ -19343,9 +18169,7 @@ func rewriteValueAMD64_OpAMD64MOVWload(v *Value) bool {
v.reset(OpAMD64MOVWloadidx1)
v.AuxInt = off
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(idx)
- v.AddArg(mem)
+ v.AddArg3(ptr, idx, mem)
return true
}
break
@@ -19369,8 +18193,7 @@ func rewriteValueAMD64_OpAMD64MOVWload(v *Value) bool {
v.reset(OpAMD64MOVWload)
v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
- v.AddArg(base)
- v.AddArg(mem)
+ v.AddArg2(base, mem)
return true
}
// match: (MOVWload [off1] {sym} (ADDLconst [off2] ptr) mem)
@@ -19391,8 +18214,7 @@ func rewriteValueAMD64_OpAMD64MOVWload(v *Value) bool {
v.reset(OpAMD64MOVWload)
v.AuxInt = off1 + off2
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(mem)
+ v.AddArg2(ptr, mem)
return true
}
// match: (MOVWload [off] {sym} (SB) _)
@@ -19429,9 +18251,7 @@ func rewriteValueAMD64_OpAMD64MOVWloadidx1(v *Value) bool {
v.reset(OpAMD64MOVWloadidx2)
v.AuxInt = c
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(idx)
- v.AddArg(mem)
+ v.AddArg3(ptr, idx, mem)
return true
}
break
@@ -19456,9 +18276,7 @@ func rewriteValueAMD64_OpAMD64MOVWloadidx1(v *Value) bool {
v.reset(OpAMD64MOVWloadidx1)
v.AuxInt = c + d
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(idx)
- v.AddArg(mem)
+ v.AddArg3(ptr, idx, mem)
return true
}
break
@@ -19483,9 +18301,7 @@ func rewriteValueAMD64_OpAMD64MOVWloadidx1(v *Value) bool {
v.reset(OpAMD64MOVWloadidx1)
v.AuxInt = c + d
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(idx)
- v.AddArg(mem)
+ v.AddArg3(ptr, idx, mem)
return true
}
break
@@ -19509,8 +18325,7 @@ func rewriteValueAMD64_OpAMD64MOVWloadidx1(v *Value) bool {
v.reset(OpAMD64MOVWload)
v.AuxInt = i + c
v.Aux = s
- v.AddArg(p)
- v.AddArg(mem)
+ v.AddArg2(p, mem)
return true
}
break
@@ -19540,9 +18355,7 @@ func rewriteValueAMD64_OpAMD64MOVWloadidx2(v *Value) bool {
v.reset(OpAMD64MOVWloadidx2)
v.AuxInt = c + d
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(idx)
- v.AddArg(mem)
+ v.AddArg3(ptr, idx, mem)
return true
}
// match: (MOVWloadidx2 [c] {sym} ptr (ADDQconst [d] idx) mem)
@@ -19564,9 +18377,7 @@ func rewriteValueAMD64_OpAMD64MOVWloadidx2(v *Value) bool {
v.reset(OpAMD64MOVWloadidx2)
v.AuxInt = c + 2*d
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(idx)
- v.AddArg(mem)
+ v.AddArg3(ptr, idx, mem)
return true
}
// match: (MOVWloadidx2 [i] {s} p (MOVQconst [c]) mem)
@@ -19587,8 +18398,7 @@ func rewriteValueAMD64_OpAMD64MOVWloadidx2(v *Value) bool {
v.reset(OpAMD64MOVWload)
v.AuxInt = i + 2*c
v.Aux = s
- v.AddArg(p)
- v.AddArg(mem)
+ v.AddArg2(p, mem)
return true
}
return false
@@ -19613,9 +18423,7 @@ func rewriteValueAMD64_OpAMD64MOVWstore(v *Value) bool {
v.reset(OpAMD64MOVWstore)
v.AuxInt = off
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(x)
- v.AddArg(mem)
+ v.AddArg3(ptr, x, mem)
return true
}
// match: (MOVWstore [off] {sym} ptr (MOVWQZX x) mem)
@@ -19632,9 +18440,7 @@ func rewriteValueAMD64_OpAMD64MOVWstore(v *Value) bool {
v.reset(OpAMD64MOVWstore)
v.AuxInt = off
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(x)
- v.AddArg(mem)
+ v.AddArg3(ptr, x, mem)
return true
}
// match: (MOVWstore [off1] {sym} (ADDQconst [off2] ptr) val mem)
@@ -19656,9 +18462,7 @@ func rewriteValueAMD64_OpAMD64MOVWstore(v *Value) bool {
v.reset(OpAMD64MOVWstore)
v.AuxInt = off1 + off2
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(val)
- v.AddArg(mem)
+ v.AddArg3(ptr, val, mem)
return true
}
// match: (MOVWstore [off] {sym} ptr (MOVLconst [c]) mem)
@@ -19679,8 +18483,7 @@ func rewriteValueAMD64_OpAMD64MOVWstore(v *Value) bool {
v.reset(OpAMD64MOVWstoreconst)
v.AuxInt = makeValAndOff(int64(int16(c)), off)
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(mem)
+ v.AddArg2(ptr, mem)
return true
}
// match: (MOVWstore [off] {sym} ptr (MOVQconst [c]) mem)
@@ -19701,8 +18504,7 @@ func rewriteValueAMD64_OpAMD64MOVWstore(v *Value) bool {
v.reset(OpAMD64MOVWstoreconst)
v.AuxInt = makeValAndOff(int64(int16(c)), off)
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(mem)
+ v.AddArg2(ptr, mem)
return true
}
// match: (MOVWstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
@@ -19725,9 +18527,7 @@ func rewriteValueAMD64_OpAMD64MOVWstore(v *Value) bool {
v.reset(OpAMD64MOVWstore)
v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
- v.AddArg(base)
- v.AddArg(val)
- v.AddArg(mem)
+ v.AddArg3(base, val, mem)
return true
}
// match: (MOVWstore [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) val mem)
@@ -19751,10 +18551,7 @@ func rewriteValueAMD64_OpAMD64MOVWstore(v *Value) bool {
v.reset(OpAMD64MOVWstoreidx1)
v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
- v.AddArg(ptr)
- v.AddArg(idx)
- v.AddArg(val)
- v.AddArg(mem)
+ v.AddArg4(ptr, idx, val, mem)
return true
}
// match: (MOVWstore [off1] {sym1} (LEAQ2 [off2] {sym2} ptr idx) val mem)
@@ -19778,10 +18575,7 @@ func rewriteValueAMD64_OpAMD64MOVWstore(v *Value) bool {
v.reset(OpAMD64MOVWstoreidx2)
v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
- v.AddArg(ptr)
- v.AddArg(idx)
- v.AddArg(val)
- v.AddArg(mem)
+ v.AddArg4(ptr, idx, val, mem)
return true
}
// match: (MOVWstore [off] {sym} (ADDQ ptr idx) val mem)
@@ -19807,10 +18601,7 @@ func rewriteValueAMD64_OpAMD64MOVWstore(v *Value) bool {
v.reset(OpAMD64MOVWstoreidx1)
v.AuxInt = off
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(idx)
- v.AddArg(val)
- v.AddArg(mem)
+ v.AddArg4(ptr, idx, val, mem)
return true
}
break
@@ -19837,9 +18628,7 @@ func rewriteValueAMD64_OpAMD64MOVWstore(v *Value) bool {
v.reset(OpAMD64MOVLstore)
v.AuxInt = i - 2
v.Aux = s
- v.AddArg(p)
- v.AddArg(w)
- v.AddArg(mem)
+ v.AddArg3(p, w, mem)
return true
}
// match: (MOVWstore [i] {s} p (SHRQconst [16] w) x:(MOVWstore [i-2] {s} p w mem))
@@ -19864,9 +18653,7 @@ func rewriteValueAMD64_OpAMD64MOVWstore(v *Value) bool {
v.reset(OpAMD64MOVLstore)
v.AuxInt = i - 2
v.Aux = s
- v.AddArg(p)
- v.AddArg(w)
- v.AddArg(mem)
+ v.AddArg3(p, w, mem)
return true
}
// match: (MOVWstore [i] {s} p (SHRLconst [j] w) x:(MOVWstore [i-2] {s} p w0:(SHRLconst [j-16] w) mem))
@@ -19896,9 +18683,7 @@ func rewriteValueAMD64_OpAMD64MOVWstore(v *Value) bool {
v.reset(OpAMD64MOVLstore)
v.AuxInt = i - 2
v.Aux = s
- v.AddArg(p)
- v.AddArg(w0)
- v.AddArg(mem)
+ v.AddArg3(p, w0, mem)
return true
}
// match: (MOVWstore [i] {s} p (SHRQconst [j] w) x:(MOVWstore [i-2] {s} p w0:(SHRQconst [j-16] w) mem))
@@ -19928,9 +18713,7 @@ func rewriteValueAMD64_OpAMD64MOVWstore(v *Value) bool {
v.reset(OpAMD64MOVLstore)
v.AuxInt = i - 2
v.Aux = s
- v.AddArg(p)
- v.AddArg(w0)
- v.AddArg(mem)
+ v.AddArg3(p, w0, mem)
return true
}
// match: (MOVWstore [i] {s} p x1:(MOVWload [j] {s2} p2 mem) mem2:(MOVWstore [i-2] {s} p x2:(MOVWload [j-2] {s2} p2 mem) mem))
@@ -19967,14 +18750,11 @@ func rewriteValueAMD64_OpAMD64MOVWstore(v *Value) bool {
v.reset(OpAMD64MOVLstore)
v.AuxInt = i - 2
v.Aux = s
- v.AddArg(p)
v0 := b.NewValue0(x2.Pos, OpAMD64MOVLload, typ.UInt32)
v0.AuxInt = j - 2
v0.Aux = s2
- v0.AddArg(p2)
- v0.AddArg(mem)
- v.AddArg(v0)
- v.AddArg(mem)
+ v0.AddArg2(p2, mem)
+ v.AddArg3(p, v0, mem)
return true
}
// match: (MOVWstore [off1] {sym1} (LEAL [off2] {sym2} base) val mem)
@@ -19997,9 +18777,7 @@ func rewriteValueAMD64_OpAMD64MOVWstore(v *Value) bool {
v.reset(OpAMD64MOVWstore)
v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
- v.AddArg(base)
- v.AddArg(val)
- v.AddArg(mem)
+ v.AddArg3(base, val, mem)
return true
}
// match: (MOVWstore [off1] {sym} (ADDLconst [off2] ptr) val mem)
@@ -20021,9 +18799,7 @@ func rewriteValueAMD64_OpAMD64MOVWstore(v *Value) bool {
v.reset(OpAMD64MOVWstore)
v.AuxInt = off1 + off2
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(val)
- v.AddArg(mem)
+ v.AddArg3(ptr, val, mem)
return true
}
return false
@@ -20049,8 +18825,7 @@ func rewriteValueAMD64_OpAMD64MOVWstoreconst(v *Value) bool {
v.reset(OpAMD64MOVWstoreconst)
v.AuxInt = ValAndOff(sc).add(off)
v.Aux = s
- v.AddArg(ptr)
- v.AddArg(mem)
+ v.AddArg2(ptr, mem)
return true
}
// match: (MOVWstoreconst [sc] {sym1} (LEAQ [off] {sym2} ptr) mem)
@@ -20072,8 +18847,7 @@ func rewriteValueAMD64_OpAMD64MOVWstoreconst(v *Value) bool {
v.reset(OpAMD64MOVWstoreconst)
v.AuxInt = ValAndOff(sc).add(off)
v.Aux = mergeSym(sym1, sym2)
- v.AddArg(ptr)
- v.AddArg(mem)
+ v.AddArg2(ptr, mem)
return true
}
// match: (MOVWstoreconst [x] {sym1} (LEAQ1 [off] {sym2} ptr idx) mem)
@@ -20096,9 +18870,7 @@ func rewriteValueAMD64_OpAMD64MOVWstoreconst(v *Value) bool {
v.reset(OpAMD64MOVWstoreconstidx1)
v.AuxInt = ValAndOff(x).add(off)
v.Aux = mergeSym(sym1, sym2)
- v.AddArg(ptr)
- v.AddArg(idx)
- v.AddArg(mem)
+ v.AddArg3(ptr, idx, mem)
return true
}
// match: (MOVWstoreconst [x] {sym1} (LEAQ2 [off] {sym2} ptr idx) mem)
@@ -20121,9 +18893,7 @@ func rewriteValueAMD64_OpAMD64MOVWstoreconst(v *Value) bool {
v.reset(OpAMD64MOVWstoreconstidx2)
v.AuxInt = ValAndOff(x).add(off)
v.Aux = mergeSym(sym1, sym2)
- v.AddArg(ptr)
- v.AddArg(idx)
- v.AddArg(mem)
+ v.AddArg3(ptr, idx, mem)
return true
}
// match: (MOVWstoreconst [x] {sym} (ADDQ ptr idx) mem)
@@ -20140,9 +18910,7 @@ func rewriteValueAMD64_OpAMD64MOVWstoreconst(v *Value) bool {
v.reset(OpAMD64MOVWstoreconstidx1)
v.AuxInt = x
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(idx)
- v.AddArg(mem)
+ v.AddArg3(ptr, idx, mem)
return true
}
// match: (MOVWstoreconst [c] {s} p x:(MOVWstoreconst [a] {s} p mem))
@@ -20167,8 +18935,7 @@ func rewriteValueAMD64_OpAMD64MOVWstoreconst(v *Value) bool {
v.reset(OpAMD64MOVLstoreconst)
v.AuxInt = makeValAndOff(ValAndOff(a).Val()&0xffff|ValAndOff(c).Val()<<16, ValAndOff(a).Off())
v.Aux = s
- v.AddArg(p)
- v.AddArg(mem)
+ v.AddArg2(p, mem)
return true
}
// match: (MOVWstoreconst [a] {s} p x:(MOVWstoreconst [c] {s} p mem))
@@ -20193,8 +18960,7 @@ func rewriteValueAMD64_OpAMD64MOVWstoreconst(v *Value) bool {
v.reset(OpAMD64MOVLstoreconst)
v.AuxInt = makeValAndOff(ValAndOff(a).Val()&0xffff|ValAndOff(c).Val()<<16, ValAndOff(a).Off())
v.Aux = s
- v.AddArg(p)
- v.AddArg(mem)
+ v.AddArg2(p, mem)
return true
}
// match: (MOVWstoreconst [sc] {sym1} (LEAL [off] {sym2} ptr) mem)
@@ -20216,8 +18982,7 @@ func rewriteValueAMD64_OpAMD64MOVWstoreconst(v *Value) bool {
v.reset(OpAMD64MOVWstoreconst)
v.AuxInt = ValAndOff(sc).add(off)
v.Aux = mergeSym(sym1, sym2)
- v.AddArg(ptr)
- v.AddArg(mem)
+ v.AddArg2(ptr, mem)
return true
}
// match: (MOVWstoreconst [sc] {s} (ADDLconst [off] ptr) mem)
@@ -20238,8 +19003,7 @@ func rewriteValueAMD64_OpAMD64MOVWstoreconst(v *Value) bool {
v.reset(OpAMD64MOVWstoreconst)
v.AuxInt = ValAndOff(sc).add(off)
v.Aux = s
- v.AddArg(ptr)
- v.AddArg(mem)
+ v.AddArg2(ptr, mem)
return true
}
return false
@@ -20263,9 +19027,7 @@ func rewriteValueAMD64_OpAMD64MOVWstoreconstidx1(v *Value) bool {
v.reset(OpAMD64MOVWstoreconstidx2)
v.AuxInt = c
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(idx)
- v.AddArg(mem)
+ v.AddArg3(ptr, idx, mem)
return true
}
break
@@ -20290,9 +19052,7 @@ func rewriteValueAMD64_OpAMD64MOVWstoreconstidx1(v *Value) bool {
v.reset(OpAMD64MOVWstoreconstidx1)
v.AuxInt = ValAndOff(x).add(c)
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(idx)
- v.AddArg(mem)
+ v.AddArg3(ptr, idx, mem)
return true
}
break
@@ -20317,9 +19077,7 @@ func rewriteValueAMD64_OpAMD64MOVWstoreconstidx1(v *Value) bool {
v.reset(OpAMD64MOVWstoreconstidx1)
v.AuxInt = ValAndOff(x).add(c)
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(idx)
- v.AddArg(mem)
+ v.AddArg3(ptr, idx, mem)
return true
}
break
@@ -20351,9 +19109,7 @@ func rewriteValueAMD64_OpAMD64MOVWstoreconstidx1(v *Value) bool {
v.reset(OpAMD64MOVLstoreconstidx1)
v.AuxInt = makeValAndOff(ValAndOff(a).Val()&0xffff|ValAndOff(c).Val()<<16, ValAndOff(a).Off())
v.Aux = s
- v.AddArg(p)
- v.AddArg(i)
- v.AddArg(mem)
+ v.AddArg3(p, i, mem)
return true
}
}
@@ -20385,9 +19141,7 @@ func rewriteValueAMD64_OpAMD64MOVWstoreconstidx2(v *Value) bool {
v.reset(OpAMD64MOVWstoreconstidx2)
v.AuxInt = ValAndOff(x).add(c)
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(idx)
- v.AddArg(mem)
+ v.AddArg3(ptr, idx, mem)
return true
}
// match: (MOVWstoreconstidx2 [x] {sym} ptr (ADDQconst [c] idx) mem)
@@ -20409,9 +19163,7 @@ func rewriteValueAMD64_OpAMD64MOVWstoreconstidx2(v *Value) bool {
v.reset(OpAMD64MOVWstoreconstidx2)
v.AuxInt = ValAndOff(x).add(2 * c)
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(idx)
- v.AddArg(mem)
+ v.AddArg3(ptr, idx, mem)
return true
}
// match: (MOVWstoreconstidx2 [c] {s} p i x:(MOVWstoreconstidx2 [a] {s} p i mem))
@@ -20437,12 +19189,10 @@ func rewriteValueAMD64_OpAMD64MOVWstoreconstidx2(v *Value) bool {
v.reset(OpAMD64MOVLstoreconstidx1)
v.AuxInt = makeValAndOff(ValAndOff(a).Val()&0xffff|ValAndOff(c).Val()<<16, ValAndOff(a).Off())
v.Aux = s
- v.AddArg(p)
v0 := b.NewValue0(v.Pos, OpAMD64SHLQconst, i.Type)
v0.AuxInt = 1
v0.AddArg(i)
- v.AddArg(v0)
- v.AddArg(mem)
+ v.AddArg3(p, v0, mem)
return true
}
return false
@@ -20468,10 +19218,7 @@ func rewriteValueAMD64_OpAMD64MOVWstoreidx1(v *Value) bool {
v.reset(OpAMD64MOVWstoreidx2)
v.AuxInt = c
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(idx)
- v.AddArg(val)
- v.AddArg(mem)
+ v.AddArg4(ptr, idx, val, mem)
return true
}
break
@@ -20497,10 +19244,7 @@ func rewriteValueAMD64_OpAMD64MOVWstoreidx1(v *Value) bool {
v.reset(OpAMD64MOVWstoreidx1)
v.AuxInt = c + d
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(idx)
- v.AddArg(val)
- v.AddArg(mem)
+ v.AddArg4(ptr, idx, val, mem)
return true
}
break
@@ -20526,10 +19270,7 @@ func rewriteValueAMD64_OpAMD64MOVWstoreidx1(v *Value) bool {
v.reset(OpAMD64MOVWstoreidx1)
v.AuxInt = c + d
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(idx)
- v.AddArg(val)
- v.AddArg(mem)
+ v.AddArg4(ptr, idx, val, mem)
return true
}
break
@@ -20561,10 +19302,7 @@ func rewriteValueAMD64_OpAMD64MOVWstoreidx1(v *Value) bool {
v.reset(OpAMD64MOVLstoreidx1)
v.AuxInt = i - 2
v.Aux = s
- v.AddArg(p)
- v.AddArg(idx)
- v.AddArg(w)
- v.AddArg(mem)
+ v.AddArg4(p, idx, w, mem)
return true
}
}
@@ -20597,10 +19335,7 @@ func rewriteValueAMD64_OpAMD64MOVWstoreidx1(v *Value) bool {
v.reset(OpAMD64MOVLstoreidx1)
v.AuxInt = i - 2
v.Aux = s
- v.AddArg(p)
- v.AddArg(idx)
- v.AddArg(w)
- v.AddArg(mem)
+ v.AddArg4(p, idx, w, mem)
return true
}
}
@@ -20638,10 +19373,7 @@ func rewriteValueAMD64_OpAMD64MOVWstoreidx1(v *Value) bool {
v.reset(OpAMD64MOVLstoreidx1)
v.AuxInt = i - 2
v.Aux = s
- v.AddArg(p)
- v.AddArg(idx)
- v.AddArg(w0)
- v.AddArg(mem)
+ v.AddArg4(p, idx, w0, mem)
return true
}
}
@@ -20679,10 +19411,7 @@ func rewriteValueAMD64_OpAMD64MOVWstoreidx1(v *Value) bool {
v.reset(OpAMD64MOVLstoreidx1)
v.AuxInt = i - 2
v.Aux = s
- v.AddArg(p)
- v.AddArg(idx)
- v.AddArg(w0)
- v.AddArg(mem)
+ v.AddArg4(p, idx, w0, mem)
return true
}
}
@@ -20708,9 +19437,7 @@ func rewriteValueAMD64_OpAMD64MOVWstoreidx1(v *Value) bool {
v.reset(OpAMD64MOVWstore)
v.AuxInt = i + c
v.Aux = s
- v.AddArg(p)
- v.AddArg(w)
- v.AddArg(mem)
+ v.AddArg3(p, w, mem)
return true
}
break
@@ -20734,9 +19461,7 @@ func rewriteValueAMD64_OpAMD64MOVWstoreidx1(v *Value) bool {
v.reset(OpAMD64MOVWstoreconstidx1)
v.AuxInt = makeValAndOff(int64(int16(c)), off)
v.Aux = s
- v.AddArg(ptr)
- v.AddArg(idx)
- v.AddArg(mem)
+ v.AddArg3(ptr, idx, mem)
return true
}
return false
@@ -20767,10 +19492,7 @@ func rewriteValueAMD64_OpAMD64MOVWstoreidx2(v *Value) bool {
v.reset(OpAMD64MOVWstoreidx2)
v.AuxInt = c + d
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(idx)
- v.AddArg(val)
- v.AddArg(mem)
+ v.AddArg4(ptr, idx, val, mem)
return true
}
// match: (MOVWstoreidx2 [c] {sym} ptr (ADDQconst [d] idx) val mem)
@@ -20793,10 +19515,7 @@ func rewriteValueAMD64_OpAMD64MOVWstoreidx2(v *Value) bool {
v.reset(OpAMD64MOVWstoreidx2)
v.AuxInt = c + 2*d
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(idx)
- v.AddArg(val)
- v.AddArg(mem)
+ v.AddArg4(ptr, idx, val, mem)
return true
}
// match: (MOVWstoreidx2 [i] {s} p idx (SHRLconst [16] w) x:(MOVWstoreidx2 [i-2] {s} p idx w mem))
@@ -20822,13 +19541,10 @@ func rewriteValueAMD64_OpAMD64MOVWstoreidx2(v *Value) bool {
v.reset(OpAMD64MOVLstoreidx1)
v.AuxInt = i - 2
v.Aux = s
- v.AddArg(p)
v0 := b.NewValue0(v.Pos, OpAMD64SHLQconst, idx.Type)
v0.AuxInt = 1
v0.AddArg(idx)
- v.AddArg(v0)
- v.AddArg(w)
- v.AddArg(mem)
+ v.AddArg4(p, v0, w, mem)
return true
}
// match: (MOVWstoreidx2 [i] {s} p idx (SHRQconst [16] w) x:(MOVWstoreidx2 [i-2] {s} p idx w mem))
@@ -20854,13 +19570,10 @@ func rewriteValueAMD64_OpAMD64MOVWstoreidx2(v *Value) bool {
v.reset(OpAMD64MOVLstoreidx1)
v.AuxInt = i - 2
v.Aux = s
- v.AddArg(p)
v0 := b.NewValue0(v.Pos, OpAMD64SHLQconst, idx.Type)
v0.AuxInt = 1
v0.AddArg(idx)
- v.AddArg(v0)
- v.AddArg(w)
- v.AddArg(mem)
+ v.AddArg4(p, v0, w, mem)
return true
}
// match: (MOVWstoreidx2 [i] {s} p idx (SHRQconst [j] w) x:(MOVWstoreidx2 [i-2] {s} p idx w0:(SHRQconst [j-16] w) mem))
@@ -20891,13 +19604,10 @@ func rewriteValueAMD64_OpAMD64MOVWstoreidx2(v *Value) bool {
v.reset(OpAMD64MOVLstoreidx1)
v.AuxInt = i - 2
v.Aux = s
- v.AddArg(p)
v0 := b.NewValue0(v.Pos, OpAMD64SHLQconst, idx.Type)
v0.AuxInt = 1
v0.AddArg(idx)
- v.AddArg(v0)
- v.AddArg(w0)
- v.AddArg(mem)
+ v.AddArg4(p, v0, w0, mem)
return true
}
// match: (MOVWstoreidx2 [i] {s} p (MOVQconst [c]) w mem)
@@ -20919,9 +19629,7 @@ func rewriteValueAMD64_OpAMD64MOVWstoreidx2(v *Value) bool {
v.reset(OpAMD64MOVWstore)
v.AuxInt = i + 2*c
v.Aux = s
- v.AddArg(p)
- v.AddArg(w)
- v.AddArg(mem)
+ v.AddArg3(p, w, mem)
return true
}
// match: (MOVWstoreidx2 [off] {s} ptr idx (MOVLconst [c]) mem)
@@ -20943,9 +19651,7 @@ func rewriteValueAMD64_OpAMD64MOVWstoreidx2(v *Value) bool {
v.reset(OpAMD64MOVWstoreconstidx2)
v.AuxInt = makeValAndOff(int64(int16(c)), off)
v.Aux = s
- v.AddArg(ptr)
- v.AddArg(idx)
- v.AddArg(mem)
+ v.AddArg3(ptr, idx, mem)
return true
}
return false
@@ -20997,8 +19703,7 @@ func rewriteValueAMD64_OpAMD64MULLconst(v *Value) bool {
x := v_0
v.reset(OpAMD64NEGL)
v0 := b.NewValue0(v.Pos, OpAMD64LEAL8, v.Type)
- v0.AddArg(x)
- v0.AddArg(x)
+ v0.AddArg2(x, x)
v.AddArg(v0)
return true
}
@@ -21011,8 +19716,7 @@ func rewriteValueAMD64_OpAMD64MULLconst(v *Value) bool {
x := v_0
v.reset(OpAMD64NEGL)
v0 := b.NewValue0(v.Pos, OpAMD64LEAL4, v.Type)
- v0.AddArg(x)
- v0.AddArg(x)
+ v0.AddArg2(x, x)
v.AddArg(v0)
return true
}
@@ -21025,8 +19729,7 @@ func rewriteValueAMD64_OpAMD64MULLconst(v *Value) bool {
x := v_0
v.reset(OpAMD64NEGL)
v0 := b.NewValue0(v.Pos, OpAMD64LEAL2, v.Type)
- v0.AddArg(x)
- v0.AddArg(x)
+ v0.AddArg2(x, x)
v.AddArg(v0)
return true
}
@@ -21058,9 +19761,7 @@ func rewriteValueAMD64_OpAMD64MULLconst(v *Value) bool {
break
}
x := v_0
- v.reset(OpCopy)
- v.Type = x.Type
- v.AddArg(x)
+ v.copyOf(x)
return true
}
// match: (MULLconst [ 3] x)
@@ -21071,8 +19772,7 @@ func rewriteValueAMD64_OpAMD64MULLconst(v *Value) bool {
}
x := v_0
v.reset(OpAMD64LEAL2)
- v.AddArg(x)
- v.AddArg(x)
+ v.AddArg2(x, x)
return true
}
// match: (MULLconst [ 5] x)
@@ -21083,8 +19783,7 @@ func rewriteValueAMD64_OpAMD64MULLconst(v *Value) bool {
}
x := v_0
v.reset(OpAMD64LEAL4)
- v.AddArg(x)
- v.AddArg(x)
+ v.AddArg2(x, x)
return true
}
// match: (MULLconst [ 7] x)
@@ -21095,11 +19794,9 @@ func rewriteValueAMD64_OpAMD64MULLconst(v *Value) bool {
}
x := v_0
v.reset(OpAMD64LEAL2)
- v.AddArg(x)
v0 := b.NewValue0(v.Pos, OpAMD64LEAL2, v.Type)
- v0.AddArg(x)
- v0.AddArg(x)
- v.AddArg(v0)
+ v0.AddArg2(x, x)
+ v.AddArg2(x, v0)
return true
}
// match: (MULLconst [ 9] x)
@@ -21110,8 +19807,7 @@ func rewriteValueAMD64_OpAMD64MULLconst(v *Value) bool {
}
x := v_0
v.reset(OpAMD64LEAL8)
- v.AddArg(x)
- v.AddArg(x)
+ v.AddArg2(x, x)
return true
}
// match: (MULLconst [11] x)
@@ -21122,11 +19818,9 @@ func rewriteValueAMD64_OpAMD64MULLconst(v *Value) bool {
}
x := v_0
v.reset(OpAMD64LEAL2)
- v.AddArg(x)
v0 := b.NewValue0(v.Pos, OpAMD64LEAL4, v.Type)
- v0.AddArg(x)
- v0.AddArg(x)
- v.AddArg(v0)
+ v0.AddArg2(x, x)
+ v.AddArg2(x, v0)
return true
}
// match: (MULLconst [13] x)
@@ -21137,11 +19831,9 @@ func rewriteValueAMD64_OpAMD64MULLconst(v *Value) bool {
}
x := v_0
v.reset(OpAMD64LEAL4)
- v.AddArg(x)
v0 := b.NewValue0(v.Pos, OpAMD64LEAL2, v.Type)
- v0.AddArg(x)
- v0.AddArg(x)
- v.AddArg(v0)
+ v0.AddArg2(x, x)
+ v.AddArg2(x, v0)
return true
}
// match: (MULLconst [19] x)
@@ -21152,11 +19844,9 @@ func rewriteValueAMD64_OpAMD64MULLconst(v *Value) bool {
}
x := v_0
v.reset(OpAMD64LEAL2)
- v.AddArg(x)
v0 := b.NewValue0(v.Pos, OpAMD64LEAL8, v.Type)
- v0.AddArg(x)
- v0.AddArg(x)
- v.AddArg(v0)
+ v0.AddArg2(x, x)
+ v.AddArg2(x, v0)
return true
}
// match: (MULLconst [21] x)
@@ -21167,11 +19857,9 @@ func rewriteValueAMD64_OpAMD64MULLconst(v *Value) bool {
}
x := v_0
v.reset(OpAMD64LEAL4)
- v.AddArg(x)
v0 := b.NewValue0(v.Pos, OpAMD64LEAL4, v.Type)
- v0.AddArg(x)
- v0.AddArg(x)
- v.AddArg(v0)
+ v0.AddArg2(x, x)
+ v.AddArg2(x, v0)
return true
}
// match: (MULLconst [25] x)
@@ -21182,11 +19870,9 @@ func rewriteValueAMD64_OpAMD64MULLconst(v *Value) bool {
}
x := v_0
v.reset(OpAMD64LEAL8)
- v.AddArg(x)
v0 := b.NewValue0(v.Pos, OpAMD64LEAL2, v.Type)
- v0.AddArg(x)
- v0.AddArg(x)
- v.AddArg(v0)
+ v0.AddArg2(x, x)
+ v.AddArg2(x, v0)
return true
}
// match: (MULLconst [27] x)
@@ -21198,13 +19884,10 @@ func rewriteValueAMD64_OpAMD64MULLconst(v *Value) bool {
x := v_0
v.reset(OpAMD64LEAL8)
v0 := b.NewValue0(v.Pos, OpAMD64LEAL2, v.Type)
- v0.AddArg(x)
- v0.AddArg(x)
- v.AddArg(v0)
+ v0.AddArg2(x, x)
v1 := b.NewValue0(v.Pos, OpAMD64LEAL2, v.Type)
- v1.AddArg(x)
- v1.AddArg(x)
- v.AddArg(v1)
+ v1.AddArg2(x, x)
+ v.AddArg2(v0, v1)
return true
}
// match: (MULLconst [37] x)
@@ -21215,11 +19898,9 @@ func rewriteValueAMD64_OpAMD64MULLconst(v *Value) bool {
}
x := v_0
v.reset(OpAMD64LEAL4)
- v.AddArg(x)
v0 := b.NewValue0(v.Pos, OpAMD64LEAL8, v.Type)
- v0.AddArg(x)
- v0.AddArg(x)
- v.AddArg(v0)
+ v0.AddArg2(x, x)
+ v.AddArg2(x, v0)
return true
}
// match: (MULLconst [41] x)
@@ -21230,11 +19911,9 @@ func rewriteValueAMD64_OpAMD64MULLconst(v *Value) bool {
}
x := v_0
v.reset(OpAMD64LEAL8)
- v.AddArg(x)
v0 := b.NewValue0(v.Pos, OpAMD64LEAL4, v.Type)
- v0.AddArg(x)
- v0.AddArg(x)
- v.AddArg(v0)
+ v0.AddArg2(x, x)
+ v.AddArg2(x, v0)
return true
}
// match: (MULLconst [45] x)
@@ -21246,13 +19925,10 @@ func rewriteValueAMD64_OpAMD64MULLconst(v *Value) bool {
x := v_0
v.reset(OpAMD64LEAL8)
v0 := b.NewValue0(v.Pos, OpAMD64LEAL4, v.Type)
- v0.AddArg(x)
- v0.AddArg(x)
- v.AddArg(v0)
+ v0.AddArg2(x, x)
v1 := b.NewValue0(v.Pos, OpAMD64LEAL4, v.Type)
- v1.AddArg(x)
- v1.AddArg(x)
- v.AddArg(v1)
+ v1.AddArg2(x, x)
+ v.AddArg2(v0, v1)
return true
}
// match: (MULLconst [73] x)
@@ -21263,11 +19939,9 @@ func rewriteValueAMD64_OpAMD64MULLconst(v *Value) bool {
}
x := v_0
v.reset(OpAMD64LEAL8)
- v.AddArg(x)
v0 := b.NewValue0(v.Pos, OpAMD64LEAL8, v.Type)
- v0.AddArg(x)
- v0.AddArg(x)
- v.AddArg(v0)
+ v0.AddArg2(x, x)
+ v.AddArg2(x, v0)
return true
}
// match: (MULLconst [81] x)
@@ -21279,13 +19953,10 @@ func rewriteValueAMD64_OpAMD64MULLconst(v *Value) bool {
x := v_0
v.reset(OpAMD64LEAL8)
v0 := b.NewValue0(v.Pos, OpAMD64LEAL8, v.Type)
- v0.AddArg(x)
- v0.AddArg(x)
- v.AddArg(v0)
+ v0.AddArg2(x, x)
v1 := b.NewValue0(v.Pos, OpAMD64LEAL8, v.Type)
- v1.AddArg(x)
- v1.AddArg(x)
- v.AddArg(v1)
+ v1.AddArg2(x, x)
+ v.AddArg2(v0, v1)
return true
}
// match: (MULLconst [c] x)
@@ -21301,8 +19972,7 @@ func rewriteValueAMD64_OpAMD64MULLconst(v *Value) bool {
v0 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type)
v0.AuxInt = log2(c + 1)
v0.AddArg(x)
- v.AddArg(v0)
- v.AddArg(x)
+ v.AddArg2(v0, x)
return true
}
// match: (MULLconst [c] x)
@@ -21318,8 +19988,7 @@ func rewriteValueAMD64_OpAMD64MULLconst(v *Value) bool {
v0 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type)
v0.AuxInt = log2(c - 1)
v0.AddArg(x)
- v.AddArg(v0)
- v.AddArg(x)
+ v.AddArg2(v0, x)
return true
}
// match: (MULLconst [c] x)
@@ -21335,8 +20004,7 @@ func rewriteValueAMD64_OpAMD64MULLconst(v *Value) bool {
v0 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type)
v0.AuxInt = log2(c - 2)
v0.AddArg(x)
- v.AddArg(v0)
- v.AddArg(x)
+ v.AddArg2(v0, x)
return true
}
// match: (MULLconst [c] x)
@@ -21352,8 +20020,7 @@ func rewriteValueAMD64_OpAMD64MULLconst(v *Value) bool {
v0 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type)
v0.AuxInt = log2(c - 4)
v0.AddArg(x)
- v.AddArg(v0)
- v.AddArg(x)
+ v.AddArg2(v0, x)
return true
}
// match: (MULLconst [c] x)
@@ -21369,8 +20036,7 @@ func rewriteValueAMD64_OpAMD64MULLconst(v *Value) bool {
v0 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type)
v0.AuxInt = log2(c - 8)
v0.AddArg(x)
- v.AddArg(v0)
- v.AddArg(x)
+ v.AddArg2(v0, x)
return true
}
// match: (MULLconst [c] x)
@@ -21385,8 +20051,7 @@ func rewriteValueAMD64_OpAMD64MULLconst(v *Value) bool {
v.reset(OpAMD64SHLLconst)
v.AuxInt = log2(c / 3)
v0 := b.NewValue0(v.Pos, OpAMD64LEAL2, v.Type)
- v0.AddArg(x)
- v0.AddArg(x)
+ v0.AddArg2(x, x)
v.AddArg(v0)
return true
}
@@ -21402,8 +20067,7 @@ func rewriteValueAMD64_OpAMD64MULLconst(v *Value) bool {
v.reset(OpAMD64SHLLconst)
v.AuxInt = log2(c / 5)
v0 := b.NewValue0(v.Pos, OpAMD64LEAL4, v.Type)
- v0.AddArg(x)
- v0.AddArg(x)
+ v0.AddArg2(x, x)
v.AddArg(v0)
return true
}
@@ -21419,8 +20083,7 @@ func rewriteValueAMD64_OpAMD64MULLconst(v *Value) bool {
v.reset(OpAMD64SHLLconst)
v.AuxInt = log2(c / 9)
v0 := b.NewValue0(v.Pos, OpAMD64LEAL8, v.Type)
- v0.AddArg(x)
- v0.AddArg(x)
+ v0.AddArg2(x, x)
v.AddArg(v0)
return true
}
@@ -21493,8 +20156,7 @@ func rewriteValueAMD64_OpAMD64MULQconst(v *Value) bool {
x := v_0
v.reset(OpAMD64NEGQ)
v0 := b.NewValue0(v.Pos, OpAMD64LEAQ8, v.Type)
- v0.AddArg(x)
- v0.AddArg(x)
+ v0.AddArg2(x, x)
v.AddArg(v0)
return true
}
@@ -21507,8 +20169,7 @@ func rewriteValueAMD64_OpAMD64MULQconst(v *Value) bool {
x := v_0
v.reset(OpAMD64NEGQ)
v0 := b.NewValue0(v.Pos, OpAMD64LEAQ4, v.Type)
- v0.AddArg(x)
- v0.AddArg(x)
+ v0.AddArg2(x, x)
v.AddArg(v0)
return true
}
@@ -21521,8 +20182,7 @@ func rewriteValueAMD64_OpAMD64MULQconst(v *Value) bool {
x := v_0
v.reset(OpAMD64NEGQ)
v0 := b.NewValue0(v.Pos, OpAMD64LEAQ2, v.Type)
- v0.AddArg(x)
- v0.AddArg(x)
+ v0.AddArg2(x, x)
v.AddArg(v0)
return true
}
@@ -21554,9 +20214,7 @@ func rewriteValueAMD64_OpAMD64MULQconst(v *Value) bool {
break
}
x := v_0
- v.reset(OpCopy)
- v.Type = x.Type
- v.AddArg(x)
+ v.copyOf(x)
return true
}
// match: (MULQconst [ 3] x)
@@ -21567,8 +20225,7 @@ func rewriteValueAMD64_OpAMD64MULQconst(v *Value) bool {
}
x := v_0
v.reset(OpAMD64LEAQ2)
- v.AddArg(x)
- v.AddArg(x)
+ v.AddArg2(x, x)
return true
}
// match: (MULQconst [ 5] x)
@@ -21579,8 +20236,7 @@ func rewriteValueAMD64_OpAMD64MULQconst(v *Value) bool {
}
x := v_0
v.reset(OpAMD64LEAQ4)
- v.AddArg(x)
- v.AddArg(x)
+ v.AddArg2(x, x)
return true
}
// match: (MULQconst [ 7] x)
@@ -21591,11 +20247,9 @@ func rewriteValueAMD64_OpAMD64MULQconst(v *Value) bool {
}
x := v_0
v.reset(OpAMD64LEAQ2)
- v.AddArg(x)
v0 := b.NewValue0(v.Pos, OpAMD64LEAQ2, v.Type)
- v0.AddArg(x)
- v0.AddArg(x)
- v.AddArg(v0)
+ v0.AddArg2(x, x)
+ v.AddArg2(x, v0)
return true
}
// match: (MULQconst [ 9] x)
@@ -21606,8 +20260,7 @@ func rewriteValueAMD64_OpAMD64MULQconst(v *Value) bool {
}
x := v_0
v.reset(OpAMD64LEAQ8)
- v.AddArg(x)
- v.AddArg(x)
+ v.AddArg2(x, x)
return true
}
// match: (MULQconst [11] x)
@@ -21618,11 +20271,9 @@ func rewriteValueAMD64_OpAMD64MULQconst(v *Value) bool {
}
x := v_0
v.reset(OpAMD64LEAQ2)
- v.AddArg(x)
v0 := b.NewValue0(v.Pos, OpAMD64LEAQ4, v.Type)
- v0.AddArg(x)
- v0.AddArg(x)
- v.AddArg(v0)
+ v0.AddArg2(x, x)
+ v.AddArg2(x, v0)
return true
}
// match: (MULQconst [13] x)
@@ -21633,11 +20284,9 @@ func rewriteValueAMD64_OpAMD64MULQconst(v *Value) bool {
}
x := v_0
v.reset(OpAMD64LEAQ4)
- v.AddArg(x)
v0 := b.NewValue0(v.Pos, OpAMD64LEAQ2, v.Type)
- v0.AddArg(x)
- v0.AddArg(x)
- v.AddArg(v0)
+ v0.AddArg2(x, x)
+ v.AddArg2(x, v0)
return true
}
// match: (MULQconst [19] x)
@@ -21648,11 +20297,9 @@ func rewriteValueAMD64_OpAMD64MULQconst(v *Value) bool {
}
x := v_0
v.reset(OpAMD64LEAQ2)
- v.AddArg(x)
v0 := b.NewValue0(v.Pos, OpAMD64LEAQ8, v.Type)
- v0.AddArg(x)
- v0.AddArg(x)
- v.AddArg(v0)
+ v0.AddArg2(x, x)
+ v.AddArg2(x, v0)
return true
}
// match: (MULQconst [21] x)
@@ -21663,11 +20310,9 @@ func rewriteValueAMD64_OpAMD64MULQconst(v *Value) bool {
}
x := v_0
v.reset(OpAMD64LEAQ4)
- v.AddArg(x)
v0 := b.NewValue0(v.Pos, OpAMD64LEAQ4, v.Type)
- v0.AddArg(x)
- v0.AddArg(x)
- v.AddArg(v0)
+ v0.AddArg2(x, x)
+ v.AddArg2(x, v0)
return true
}
// match: (MULQconst [25] x)
@@ -21678,11 +20323,9 @@ func rewriteValueAMD64_OpAMD64MULQconst(v *Value) bool {
}
x := v_0
v.reset(OpAMD64LEAQ8)
- v.AddArg(x)
v0 := b.NewValue0(v.Pos, OpAMD64LEAQ2, v.Type)
- v0.AddArg(x)
- v0.AddArg(x)
- v.AddArg(v0)
+ v0.AddArg2(x, x)
+ v.AddArg2(x, v0)
return true
}
// match: (MULQconst [27] x)
@@ -21694,13 +20337,10 @@ func rewriteValueAMD64_OpAMD64MULQconst(v *Value) bool {
x := v_0
v.reset(OpAMD64LEAQ8)
v0 := b.NewValue0(v.Pos, OpAMD64LEAQ2, v.Type)
- v0.AddArg(x)
- v0.AddArg(x)
- v.AddArg(v0)
+ v0.AddArg2(x, x)
v1 := b.NewValue0(v.Pos, OpAMD64LEAQ2, v.Type)
- v1.AddArg(x)
- v1.AddArg(x)
- v.AddArg(v1)
+ v1.AddArg2(x, x)
+ v.AddArg2(v0, v1)
return true
}
// match: (MULQconst [37] x)
@@ -21711,11 +20351,9 @@ func rewriteValueAMD64_OpAMD64MULQconst(v *Value) bool {
}
x := v_0
v.reset(OpAMD64LEAQ4)
- v.AddArg(x)
v0 := b.NewValue0(v.Pos, OpAMD64LEAQ8, v.Type)
- v0.AddArg(x)
- v0.AddArg(x)
- v.AddArg(v0)
+ v0.AddArg2(x, x)
+ v.AddArg2(x, v0)
return true
}
// match: (MULQconst [41] x)
@@ -21726,11 +20364,9 @@ func rewriteValueAMD64_OpAMD64MULQconst(v *Value) bool {
}
x := v_0
v.reset(OpAMD64LEAQ8)
- v.AddArg(x)
v0 := b.NewValue0(v.Pos, OpAMD64LEAQ4, v.Type)
- v0.AddArg(x)
- v0.AddArg(x)
- v.AddArg(v0)
+ v0.AddArg2(x, x)
+ v.AddArg2(x, v0)
return true
}
// match: (MULQconst [45] x)
@@ -21742,13 +20378,10 @@ func rewriteValueAMD64_OpAMD64MULQconst(v *Value) bool {
x := v_0
v.reset(OpAMD64LEAQ8)
v0 := b.NewValue0(v.Pos, OpAMD64LEAQ4, v.Type)
- v0.AddArg(x)
- v0.AddArg(x)
- v.AddArg(v0)
+ v0.AddArg2(x, x)
v1 := b.NewValue0(v.Pos, OpAMD64LEAQ4, v.Type)
- v1.AddArg(x)
- v1.AddArg(x)
- v.AddArg(v1)
+ v1.AddArg2(x, x)
+ v.AddArg2(v0, v1)
return true
}
// match: (MULQconst [73] x)
@@ -21759,11 +20392,9 @@ func rewriteValueAMD64_OpAMD64MULQconst(v *Value) bool {
}
x := v_0
v.reset(OpAMD64LEAQ8)
- v.AddArg(x)
v0 := b.NewValue0(v.Pos, OpAMD64LEAQ8, v.Type)
- v0.AddArg(x)
- v0.AddArg(x)
- v.AddArg(v0)
+ v0.AddArg2(x, x)
+ v.AddArg2(x, v0)
return true
}
// match: (MULQconst [81] x)
@@ -21775,13 +20406,10 @@ func rewriteValueAMD64_OpAMD64MULQconst(v *Value) bool {
x := v_0
v.reset(OpAMD64LEAQ8)
v0 := b.NewValue0(v.Pos, OpAMD64LEAQ8, v.Type)
- v0.AddArg(x)
- v0.AddArg(x)
- v.AddArg(v0)
+ v0.AddArg2(x, x)
v1 := b.NewValue0(v.Pos, OpAMD64LEAQ8, v.Type)
- v1.AddArg(x)
- v1.AddArg(x)
- v.AddArg(v1)
+ v1.AddArg2(x, x)
+ v.AddArg2(v0, v1)
return true
}
// match: (MULQconst [c] x)
@@ -21797,8 +20425,7 @@ func rewriteValueAMD64_OpAMD64MULQconst(v *Value) bool {
v0 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type)
v0.AuxInt = log2(c + 1)
v0.AddArg(x)
- v.AddArg(v0)
- v.AddArg(x)
+ v.AddArg2(v0, x)
return true
}
// match: (MULQconst [c] x)
@@ -21814,8 +20441,7 @@ func rewriteValueAMD64_OpAMD64MULQconst(v *Value) bool {
v0 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type)
v0.AuxInt = log2(c - 1)
v0.AddArg(x)
- v.AddArg(v0)
- v.AddArg(x)
+ v.AddArg2(v0, x)
return true
}
// match: (MULQconst [c] x)
@@ -21831,8 +20457,7 @@ func rewriteValueAMD64_OpAMD64MULQconst(v *Value) bool {
v0 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type)
v0.AuxInt = log2(c - 2)
v0.AddArg(x)
- v.AddArg(v0)
- v.AddArg(x)
+ v.AddArg2(v0, x)
return true
}
// match: (MULQconst [c] x)
@@ -21848,8 +20473,7 @@ func rewriteValueAMD64_OpAMD64MULQconst(v *Value) bool {
v0 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type)
v0.AuxInt = log2(c - 4)
v0.AddArg(x)
- v.AddArg(v0)
- v.AddArg(x)
+ v.AddArg2(v0, x)
return true
}
// match: (MULQconst [c] x)
@@ -21865,8 +20489,7 @@ func rewriteValueAMD64_OpAMD64MULQconst(v *Value) bool {
v0 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type)
v0.AuxInt = log2(c - 8)
v0.AddArg(x)
- v.AddArg(v0)
- v.AddArg(x)
+ v.AddArg2(v0, x)
return true
}
// match: (MULQconst [c] x)
@@ -21881,8 +20504,7 @@ func rewriteValueAMD64_OpAMD64MULQconst(v *Value) bool {
v.reset(OpAMD64SHLQconst)
v.AuxInt = log2(c / 3)
v0 := b.NewValue0(v.Pos, OpAMD64LEAQ2, v.Type)
- v0.AddArg(x)
- v0.AddArg(x)
+ v0.AddArg2(x, x)
v.AddArg(v0)
return true
}
@@ -21898,8 +20520,7 @@ func rewriteValueAMD64_OpAMD64MULQconst(v *Value) bool {
v.reset(OpAMD64SHLQconst)
v.AuxInt = log2(c / 5)
v0 := b.NewValue0(v.Pos, OpAMD64LEAQ4, v.Type)
- v0.AddArg(x)
- v0.AddArg(x)
+ v0.AddArg2(x, x)
v.AddArg(v0)
return true
}
@@ -21915,8 +20536,7 @@ func rewriteValueAMD64_OpAMD64MULQconst(v *Value) bool {
v.reset(OpAMD64SHLQconst)
v.AuxInt = log2(c / 9)
v0 := b.NewValue0(v.Pos, OpAMD64LEAQ8, v.Type)
- v0.AddArg(x)
- v0.AddArg(x)
+ v0.AddArg2(x, x)
v.AddArg(v0)
return true
}
@@ -21974,9 +20594,7 @@ func rewriteValueAMD64_OpAMD64MULSD(v *Value) bool {
v.reset(OpAMD64MULSDload)
v.AuxInt = off
v.Aux = sym
- v.AddArg(x)
- v.AddArg(ptr)
- v.AddArg(mem)
+ v.AddArg3(x, ptr, mem)
return true
}
break
@@ -22008,9 +20626,7 @@ func rewriteValueAMD64_OpAMD64MULSDload(v *Value) bool {
v.reset(OpAMD64MULSDload)
v.AuxInt = off1 + off2
v.Aux = sym
- v.AddArg(val)
- v.AddArg(base)
- v.AddArg(mem)
+ v.AddArg3(val, base, mem)
return true
}
// match: (MULSDload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem)
@@ -22033,9 +20649,7 @@ func rewriteValueAMD64_OpAMD64MULSDload(v *Value) bool {
v.reset(OpAMD64MULSDload)
v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
- v.AddArg(val)
- v.AddArg(base)
- v.AddArg(mem)
+ v.AddArg3(val, base, mem)
return true
}
// match: (MULSDload x [off] {sym} ptr (MOVQstore [off] {sym} ptr y _))
@@ -22048,16 +20662,14 @@ func rewriteValueAMD64_OpAMD64MULSDload(v *Value) bool {
if v_2.Op != OpAMD64MOVQstore || v_2.AuxInt != off || v_2.Aux != sym {
break
}
- _ = v_2.Args[2]
+ y := v_2.Args[1]
if ptr != v_2.Args[0] {
break
}
- y := v_2.Args[1]
v.reset(OpAMD64MULSD)
- v.AddArg(x)
v0 := b.NewValue0(v_2.Pos, OpAMD64MOVQi2f, typ.Float64)
v0.AddArg(y)
- v.AddArg(v0)
+ v.AddArg2(x, v0)
return true
}
return false
@@ -22085,9 +20697,7 @@ func rewriteValueAMD64_OpAMD64MULSS(v *Value) bool {
v.reset(OpAMD64MULSSload)
v.AuxInt = off
v.Aux = sym
- v.AddArg(x)
- v.AddArg(ptr)
- v.AddArg(mem)
+ v.AddArg3(x, ptr, mem)
return true
}
break
@@ -22119,9 +20729,7 @@ func rewriteValueAMD64_OpAMD64MULSSload(v *Value) bool {
v.reset(OpAMD64MULSSload)
v.AuxInt = off1 + off2
v.Aux = sym
- v.AddArg(val)
- v.AddArg(base)
- v.AddArg(mem)
+ v.AddArg3(val, base, mem)
return true
}
// match: (MULSSload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem)
@@ -22144,9 +20752,7 @@ func rewriteValueAMD64_OpAMD64MULSSload(v *Value) bool {
v.reset(OpAMD64MULSSload)
v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
- v.AddArg(val)
- v.AddArg(base)
- v.AddArg(mem)
+ v.AddArg3(val, base, mem)
return true
}
// match: (MULSSload x [off] {sym} ptr (MOVLstore [off] {sym} ptr y _))
@@ -22159,16 +20765,14 @@ func rewriteValueAMD64_OpAMD64MULSSload(v *Value) bool {
if v_2.Op != OpAMD64MOVLstore || v_2.AuxInt != off || v_2.Aux != sym {
break
}
- _ = v_2.Args[2]
+ y := v_2.Args[1]
if ptr != v_2.Args[0] {
break
}
- y := v_2.Args[1]
v.reset(OpAMD64MULSS)
- v.AddArg(x)
v0 := b.NewValue0(v_2.Pos, OpAMD64MOVLi2f, typ.Float32)
v0.AddArg(y)
- v.AddArg(v0)
+ v.AddArg2(x, v0)
return true
}
return false
@@ -22182,9 +20786,7 @@ func rewriteValueAMD64_OpAMD64NEGL(v *Value) bool {
break
}
x := v_0.Args[0]
- v.reset(OpCopy)
- v.Type = x.Type
- v.AddArg(x)
+ v.copyOf(x)
return true
}
// match: (NEGL s:(SUBL x y))
@@ -22201,8 +20803,7 @@ func rewriteValueAMD64_OpAMD64NEGL(v *Value) bool {
break
}
v.reset(OpAMD64SUBL)
- v.AddArg(y)
- v.AddArg(x)
+ v.AddArg2(y, x)
return true
}
// match: (NEGL (MOVLconst [c]))
@@ -22227,9 +20828,7 @@ func rewriteValueAMD64_OpAMD64NEGQ(v *Value) bool {
break
}
x := v_0.Args[0]
- v.reset(OpCopy)
- v.Type = x.Type
- v.AddArg(x)
+ v.copyOf(x)
return true
}
// match: (NEGQ s:(SUBQ x y))
@@ -22246,8 +20845,7 @@ func rewriteValueAMD64_OpAMD64NEGQ(v *Value) bool {
break
}
v.reset(OpAMD64SUBQ)
- v.AddArg(y)
- v.AddArg(x)
+ v.AddArg2(y, x)
return true
}
// match: (NEGQ (MOVQconst [c]))
@@ -22333,8 +20931,7 @@ func rewriteValueAMD64_OpAMD64ORL(v *Value) bool {
}
x := v_1
v.reset(OpAMD64BTSL)
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
break
@@ -22493,8 +21090,7 @@ func rewriteValueAMD64_OpAMD64ORL(v *Value) bool {
continue
}
v.reset(OpAMD64ROLL)
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
}
@@ -22544,8 +21140,7 @@ func rewriteValueAMD64_OpAMD64ORL(v *Value) bool {
continue
}
v.reset(OpAMD64ROLL)
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
}
@@ -22595,8 +21190,7 @@ func rewriteValueAMD64_OpAMD64ORL(v *Value) bool {
continue
}
v.reset(OpAMD64RORL)
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
}
@@ -22646,8 +21240,7 @@ func rewriteValueAMD64_OpAMD64ORL(v *Value) bool {
continue
}
v.reset(OpAMD64RORL)
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
}
@@ -22711,8 +21304,7 @@ func rewriteValueAMD64_OpAMD64ORL(v *Value) bool {
continue
}
v.reset(OpAMD64ROLW)
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
}
@@ -22776,8 +21368,7 @@ func rewriteValueAMD64_OpAMD64ORL(v *Value) bool {
continue
}
v.reset(OpAMD64ROLW)
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
}
@@ -22818,8 +21409,7 @@ func rewriteValueAMD64_OpAMD64ORL(v *Value) bool {
continue
}
v.reset(OpAMD64RORW)
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
break
@@ -22859,8 +21449,7 @@ func rewriteValueAMD64_OpAMD64ORL(v *Value) bool {
continue
}
v.reset(OpAMD64RORW)
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
break
@@ -22923,8 +21512,7 @@ func rewriteValueAMD64_OpAMD64ORL(v *Value) bool {
continue
}
v.reset(OpAMD64ROLB)
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
}
@@ -22988,8 +21576,7 @@ func rewriteValueAMD64_OpAMD64ORL(v *Value) bool {
continue
}
v.reset(OpAMD64ROLB)
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
}
@@ -23030,8 +21617,7 @@ func rewriteValueAMD64_OpAMD64ORL(v *Value) bool {
continue
}
v.reset(OpAMD64RORB)
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
break
@@ -23071,8 +21657,7 @@ func rewriteValueAMD64_OpAMD64ORL(v *Value) bool {
continue
}
v.reset(OpAMD64RORB)
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
break
@@ -23084,9 +21669,7 @@ func rewriteValueAMD64_OpAMD64ORL(v *Value) bool {
if x != v_1 {
break
}
- v.reset(OpCopy)
- v.Type = x.Type
- v.AddArg(x)
+ v.copyOf(x)
return true
}
// match: (ORL x0:(MOVBload [i0] {s} p mem) sh:(SHLLconst [8] x1:(MOVBload [i1] {s} p mem)))
@@ -23120,12 +21703,10 @@ func rewriteValueAMD64_OpAMD64ORL(v *Value) bool {
}
b = mergePoint(b, x0, x1)
v0 := b.NewValue0(x1.Pos, OpAMD64MOVWload, typ.UInt16)
- v.reset(OpCopy)
- v.AddArg(v0)
+ v.copyOf(v0)
v0.AuxInt = i0
v0.Aux = s
- v0.AddArg(p)
- v0.AddArg(mem)
+ v0.AddArg2(p, mem)
return true
}
break
@@ -23161,12 +21742,10 @@ func rewriteValueAMD64_OpAMD64ORL(v *Value) bool {
}
b = mergePoint(b, x0, x1)
v0 := b.NewValue0(x1.Pos, OpAMD64MOVLload, typ.UInt32)
- v.reset(OpCopy)
- v.AddArg(v0)
+ v.copyOf(v0)
v0.AuxInt = i0
v0.Aux = s
- v0.AddArg(p)
- v0.AddArg(mem)
+ v0.AddArg2(p, mem)
return true
}
break
@@ -23220,18 +21799,15 @@ func rewriteValueAMD64_OpAMD64ORL(v *Value) bool {
}
b = mergePoint(b, x0, x1, y)
v0 := b.NewValue0(x0.Pos, OpAMD64ORL, v.Type)
- v.reset(OpCopy)
- v.AddArg(v0)
+ v.copyOf(v0)
v1 := b.NewValue0(x0.Pos, OpAMD64SHLLconst, v.Type)
v1.AuxInt = j0
v2 := b.NewValue0(x0.Pos, OpAMD64MOVWload, typ.UInt16)
v2.AuxInt = i0
v2.Aux = s
- v2.AddArg(p)
- v2.AddArg(mem)
+ v2.AddArg2(p, mem)
v1.AddArg(v2)
- v0.AddArg(v1)
- v0.AddArg(y)
+ v0.AddArg2(v1, y)
return true
}
}
@@ -23275,13 +21851,10 @@ func rewriteValueAMD64_OpAMD64ORL(v *Value) bool {
}
b = mergePoint(b, x0, x1)
v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, v.Type)
- v.reset(OpCopy)
- v.AddArg(v0)
+ v.copyOf(v0)
v0.AuxInt = i0
v0.Aux = s
- v0.AddArg(p)
- v0.AddArg(idx)
- v0.AddArg(mem)
+ v0.AddArg3(p, idx, mem)
return true
}
}
@@ -23326,13 +21899,10 @@ func rewriteValueAMD64_OpAMD64ORL(v *Value) bool {
}
b = mergePoint(b, x0, x1)
v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32)
- v.reset(OpCopy)
- v.AddArg(v0)
+ v.copyOf(v0)
v0.AuxInt = i0
v0.Aux = s
- v0.AddArg(p)
- v0.AddArg(idx)
- v0.AddArg(mem)
+ v0.AddArg3(p, idx, mem)
return true
}
}
@@ -23395,19 +21965,15 @@ func rewriteValueAMD64_OpAMD64ORL(v *Value) bool {
}
b = mergePoint(b, x0, x1, y)
v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type)
- v.reset(OpCopy)
- v.AddArg(v0)
+ v.copyOf(v0)
v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type)
v1.AuxInt = j0
v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16)
v2.AuxInt = i0
v2.Aux = s
- v2.AddArg(p)
- v2.AddArg(idx)
- v2.AddArg(mem)
+ v2.AddArg3(p, idx, mem)
v1.AddArg(v2)
- v0.AddArg(v1)
- v0.AddArg(y)
+ v0.AddArg2(v1, y)
return true
}
}
@@ -23446,14 +22012,12 @@ func rewriteValueAMD64_OpAMD64ORL(v *Value) bool {
}
b = mergePoint(b, x0, x1)
v0 := b.NewValue0(x0.Pos, OpAMD64ROLWconst, v.Type)
- v.reset(OpCopy)
- v.AddArg(v0)
+ v.copyOf(v0)
v0.AuxInt = 8
v1 := b.NewValue0(x0.Pos, OpAMD64MOVWload, typ.UInt16)
v1.AuxInt = i0
v1.Aux = s
- v1.AddArg(p)
- v1.AddArg(mem)
+ v1.AddArg2(p, mem)
v0.AddArg(v1)
return true
}
@@ -23498,13 +22062,11 @@ func rewriteValueAMD64_OpAMD64ORL(v *Value) bool {
}
b = mergePoint(b, x0, x1)
v0 := b.NewValue0(x0.Pos, OpAMD64BSWAPL, v.Type)
- v.reset(OpCopy)
- v.AddArg(v0)
+ v.copyOf(v0)
v1 := b.NewValue0(x0.Pos, OpAMD64MOVLload, typ.UInt32)
v1.AuxInt = i0
v1.Aux = s
- v1.AddArg(p)
- v1.AddArg(mem)
+ v1.AddArg2(p, mem)
v0.AddArg(v1)
return true
}
@@ -23559,8 +22121,7 @@ func rewriteValueAMD64_OpAMD64ORL(v *Value) bool {
}
b = mergePoint(b, x0, x1, y)
v0 := b.NewValue0(x1.Pos, OpAMD64ORL, v.Type)
- v.reset(OpCopy)
- v.AddArg(v0)
+ v.copyOf(v0)
v1 := b.NewValue0(x1.Pos, OpAMD64SHLLconst, v.Type)
v1.AuxInt = j1
v2 := b.NewValue0(x1.Pos, OpAMD64ROLWconst, typ.UInt16)
@@ -23568,12 +22129,10 @@ func rewriteValueAMD64_OpAMD64ORL(v *Value) bool {
v3 := b.NewValue0(x1.Pos, OpAMD64MOVWload, typ.UInt16)
v3.AuxInt = i0
v3.Aux = s
- v3.AddArg(p)
- v3.AddArg(mem)
+ v3.AddArg2(p, mem)
v2.AddArg(v3)
v1.AddArg(v2)
- v0.AddArg(v1)
- v0.AddArg(y)
+ v0.AddArg2(v1, y)
return true
}
}
@@ -23617,15 +22176,12 @@ func rewriteValueAMD64_OpAMD64ORL(v *Value) bool {
}
b = mergePoint(b, x0, x1)
v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, v.Type)
- v.reset(OpCopy)
- v.AddArg(v0)
+ v.copyOf(v0)
v0.AuxInt = 8
v1 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16)
v1.AuxInt = i0
v1.Aux = s
- v1.AddArg(p)
- v1.AddArg(idx)
- v1.AddArg(mem)
+ v1.AddArg3(p, idx, mem)
v0.AddArg(v1)
return true
}
@@ -23679,14 +22235,11 @@ func rewriteValueAMD64_OpAMD64ORL(v *Value) bool {
}
b = mergePoint(b, x0, x1)
v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type)
- v.reset(OpCopy)
- v.AddArg(v0)
+ v.copyOf(v0)
v1 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32)
v1.AuxInt = i0
v1.Aux = s
- v1.AddArg(p)
- v1.AddArg(idx)
- v1.AddArg(mem)
+ v1.AddArg3(p, idx, mem)
v0.AddArg(v1)
return true
}
@@ -23750,8 +22303,7 @@ func rewriteValueAMD64_OpAMD64ORL(v *Value) bool {
}
b = mergePoint(b, x0, x1, y)
v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type)
- v.reset(OpCopy)
- v.AddArg(v0)
+ v.copyOf(v0)
v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type)
v1.AuxInt = j1
v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16)
@@ -23759,13 +22311,10 @@ func rewriteValueAMD64_OpAMD64ORL(v *Value) bool {
v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16)
v3.AuxInt = i0
v3.Aux = s
- v3.AddArg(p)
- v3.AddArg(idx)
- v3.AddArg(mem)
+ v3.AddArg3(p, idx, mem)
v2.AddArg(v3)
v1.AddArg(v2)
- v0.AddArg(v1)
- v0.AddArg(y)
+ v0.AddArg2(v1, y)
return true
}
}
@@ -23793,9 +22342,7 @@ func rewriteValueAMD64_OpAMD64ORL(v *Value) bool {
v.reset(OpAMD64ORLload)
v.AuxInt = off
v.Aux = sym
- v.AddArg(x)
- v.AddArg(ptr)
- v.AddArg(mem)
+ v.AddArg3(x, ptr, mem)
return true
}
break
@@ -23855,9 +22402,7 @@ func rewriteValueAMD64_OpAMD64ORLconst(v *Value) bool {
if !(int32(c) == 0) {
break
}
- v.reset(OpCopy)
- v.Type = x.Type
- v.AddArg(x)
+ v.copyOf(x)
return true
}
// match: (ORLconst [c] _)
@@ -23907,8 +22452,7 @@ func rewriteValueAMD64_OpAMD64ORLconstmodify(v *Value) bool {
v.reset(OpAMD64ORLconstmodify)
v.AuxInt = ValAndOff(valoff1).add(off2)
v.Aux = sym
- v.AddArg(base)
- v.AddArg(mem)
+ v.AddArg2(base, mem)
return true
}
// match: (ORLconstmodify [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem)
@@ -23930,8 +22474,7 @@ func rewriteValueAMD64_OpAMD64ORLconstmodify(v *Value) bool {
v.reset(OpAMD64ORLconstmodify)
v.AuxInt = ValAndOff(valoff1).add(off2)
v.Aux = mergeSym(sym1, sym2)
- v.AddArg(base)
- v.AddArg(mem)
+ v.AddArg2(base, mem)
return true
}
return false
@@ -23961,9 +22504,7 @@ func rewriteValueAMD64_OpAMD64ORLload(v *Value) bool {
v.reset(OpAMD64ORLload)
v.AuxInt = off1 + off2
v.Aux = sym
- v.AddArg(val)
- v.AddArg(base)
- v.AddArg(mem)
+ v.AddArg3(val, base, mem)
return true
}
// match: (ORLload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem)
@@ -23986,9 +22527,7 @@ func rewriteValueAMD64_OpAMD64ORLload(v *Value) bool {
v.reset(OpAMD64ORLload)
v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
- v.AddArg(val)
- v.AddArg(base)
- v.AddArg(mem)
+ v.AddArg3(val, base, mem)
return true
}
// match: ( ORLload x [off] {sym} ptr (MOVSSstore [off] {sym} ptr y _))
@@ -24001,16 +22540,14 @@ func rewriteValueAMD64_OpAMD64ORLload(v *Value) bool {
if v_2.Op != OpAMD64MOVSSstore || v_2.AuxInt != off || v_2.Aux != sym {
break
}
- _ = v_2.Args[2]
+ y := v_2.Args[1]
if ptr != v_2.Args[0] {
break
}
- y := v_2.Args[1]
v.reset(OpAMD64ORL)
- v.AddArg(x)
v0 := b.NewValue0(v_2.Pos, OpAMD64MOVLf2i, typ.UInt32)
v0.AddArg(y)
- v.AddArg(v0)
+ v.AddArg2(x, v0)
return true
}
return false
@@ -24038,9 +22575,7 @@ func rewriteValueAMD64_OpAMD64ORLmodify(v *Value) bool {
v.reset(OpAMD64ORLmodify)
v.AuxInt = off1 + off2
v.Aux = sym
- v.AddArg(base)
- v.AddArg(val)
- v.AddArg(mem)
+ v.AddArg3(base, val, mem)
return true
}
// match: (ORLmodify [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
@@ -24063,9 +22598,7 @@ func rewriteValueAMD64_OpAMD64ORLmodify(v *Value) bool {
v.reset(OpAMD64ORLmodify)
v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
- v.AddArg(base)
- v.AddArg(val)
- v.AddArg(mem)
+ v.AddArg3(base, val, mem)
return true
}
return false
@@ -24089,8 +22622,7 @@ func rewriteValueAMD64_OpAMD64ORQ(v *Value) bool {
}
x := v_1
v.reset(OpAMD64BTSQ)
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
break
@@ -24135,6 +22667,22 @@ func rewriteValueAMD64_OpAMD64ORQ(v *Value) bool {
}
break
}
+ // match: (ORQ x (MOVLconst [c]))
+ // result: (ORQconst [c] x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpAMD64MOVLconst {
+ continue
+ }
+ c := v_1.AuxInt
+ v.reset(OpAMD64ORQconst)
+ v.AuxInt = c
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
// match: (ORQ (SHLQconst x [c]) (SHRQconst x [d]))
// cond: d==64-c
// result: (ROLQconst x [c])
@@ -24203,8 +22751,7 @@ func rewriteValueAMD64_OpAMD64ORQ(v *Value) bool {
continue
}
v.reset(OpAMD64ROLQ)
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
}
@@ -24254,8 +22801,7 @@ func rewriteValueAMD64_OpAMD64ORQ(v *Value) bool {
continue
}
v.reset(OpAMD64ROLQ)
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
}
@@ -24305,8 +22851,7 @@ func rewriteValueAMD64_OpAMD64ORQ(v *Value) bool {
continue
}
v.reset(OpAMD64RORQ)
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
}
@@ -24356,13 +22901,30 @@ func rewriteValueAMD64_OpAMD64ORQ(v *Value) bool {
continue
}
v.reset(OpAMD64RORQ)
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
}
break
}
+ // match: (ORQ (MOVQconst [c]) (MOVQconst [d]))
+ // result: (MOVQconst [c|d])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpAMD64MOVQconst {
+ continue
+ }
+ c := v_0.AuxInt
+ if v_1.Op != OpAMD64MOVQconst {
+ continue
+ }
+ d := v_1.AuxInt
+ v.reset(OpAMD64MOVQconst)
+ v.AuxInt = c | d
+ return true
+ }
+ break
+ }
// match: (ORQ x x)
// result: x
for {
@@ -24370,9 +22932,7 @@ func rewriteValueAMD64_OpAMD64ORQ(v *Value) bool {
if x != v_1 {
break
}
- v.reset(OpCopy)
- v.Type = x.Type
- v.AddArg(x)
+ v.copyOf(x)
return true
}
// match: (ORQ x0:(MOVBload [i0] {s} p mem) sh:(SHLQconst [8] x1:(MOVBload [i1] {s} p mem)))
@@ -24406,12 +22966,10 @@ func rewriteValueAMD64_OpAMD64ORQ(v *Value) bool {
}
b = mergePoint(b, x0, x1)
v0 := b.NewValue0(x1.Pos, OpAMD64MOVWload, typ.UInt16)
- v.reset(OpCopy)
- v.AddArg(v0)
+ v.copyOf(v0)
v0.AuxInt = i0
v0.Aux = s
- v0.AddArg(p)
- v0.AddArg(mem)
+ v0.AddArg2(p, mem)
return true
}
break
@@ -24447,12 +23005,10 @@ func rewriteValueAMD64_OpAMD64ORQ(v *Value) bool {
}
b = mergePoint(b, x0, x1)
v0 := b.NewValue0(x1.Pos, OpAMD64MOVLload, typ.UInt32)
- v.reset(OpCopy)
- v.AddArg(v0)
+ v.copyOf(v0)
v0.AuxInt = i0
v0.Aux = s
- v0.AddArg(p)
- v0.AddArg(mem)
+ v0.AddArg2(p, mem)
return true
}
break
@@ -24488,12 +23044,10 @@ func rewriteValueAMD64_OpAMD64ORQ(v *Value) bool {
}
b = mergePoint(b, x0, x1)
v0 := b.NewValue0(x1.Pos, OpAMD64MOVQload, typ.UInt64)
- v.reset(OpCopy)
- v.AddArg(v0)
+ v.copyOf(v0)
v0.AuxInt = i0
v0.Aux = s
- v0.AddArg(p)
- v0.AddArg(mem)
+ v0.AddArg2(p, mem)
return true
}
break
@@ -24547,18 +23101,15 @@ func rewriteValueAMD64_OpAMD64ORQ(v *Value) bool {
}
b = mergePoint(b, x0, x1, y)
v0 := b.NewValue0(x0.Pos, OpAMD64ORQ, v.Type)
- v.reset(OpCopy)
- v.AddArg(v0)
+ v.copyOf(v0)
v1 := b.NewValue0(x0.Pos, OpAMD64SHLQconst, v.Type)
v1.AuxInt = j0
v2 := b.NewValue0(x0.Pos, OpAMD64MOVWload, typ.UInt16)
v2.AuxInt = i0
v2.Aux = s
- v2.AddArg(p)
- v2.AddArg(mem)
+ v2.AddArg2(p, mem)
v1.AddArg(v2)
- v0.AddArg(v1)
- v0.AddArg(y)
+ v0.AddArg2(v1, y)
return true
}
}
@@ -24613,18 +23164,15 @@ func rewriteValueAMD64_OpAMD64ORQ(v *Value) bool {
}
b = mergePoint(b, x0, x1, y)
v0 := b.NewValue0(x0.Pos, OpAMD64ORQ, v.Type)
- v.reset(OpCopy)
- v.AddArg(v0)
+ v.copyOf(v0)
v1 := b.NewValue0(x0.Pos, OpAMD64SHLQconst, v.Type)
v1.AuxInt = j0
v2 := b.NewValue0(x0.Pos, OpAMD64MOVLload, typ.UInt32)
v2.AuxInt = i0
v2.Aux = s
- v2.AddArg(p)
- v2.AddArg(mem)
+ v2.AddArg2(p, mem)
v1.AddArg(v2)
- v0.AddArg(v1)
- v0.AddArg(y)
+ v0.AddArg2(v1, y)
return true
}
}
@@ -24668,13 +23216,10 @@ func rewriteValueAMD64_OpAMD64ORQ(v *Value) bool {
}
b = mergePoint(b, x0, x1)
v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, v.Type)
- v.reset(OpCopy)
- v.AddArg(v0)
+ v.copyOf(v0)
v0.AuxInt = i0
v0.Aux = s
- v0.AddArg(p)
- v0.AddArg(idx)
- v0.AddArg(mem)
+ v0.AddArg3(p, idx, mem)
return true
}
}
@@ -24719,13 +23264,10 @@ func rewriteValueAMD64_OpAMD64ORQ(v *Value) bool {
}
b = mergePoint(b, x0, x1)
v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32)
- v.reset(OpCopy)
- v.AddArg(v0)
+ v.copyOf(v0)
v0.AuxInt = i0
v0.Aux = s
- v0.AddArg(p)
- v0.AddArg(idx)
- v0.AddArg(mem)
+ v0.AddArg3(p, idx, mem)
return true
}
}
@@ -24770,13 +23312,10 @@ func rewriteValueAMD64_OpAMD64ORQ(v *Value) bool {
}
b = mergePoint(b, x0, x1)
v0 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx1, typ.UInt64)
- v.reset(OpCopy)
- v.AddArg(v0)
+ v.copyOf(v0)
v0.AuxInt = i0
v0.Aux = s
- v0.AddArg(p)
- v0.AddArg(idx)
- v0.AddArg(mem)
+ v0.AddArg3(p, idx, mem)
return true
}
}
@@ -24839,19 +23378,15 @@ func rewriteValueAMD64_OpAMD64ORQ(v *Value) bool {
}
b = mergePoint(b, x0, x1, y)
v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type)
- v.reset(OpCopy)
- v.AddArg(v0)
+ v.copyOf(v0)
v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type)
v1.AuxInt = j0
v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16)
v2.AuxInt = i0
v2.Aux = s
- v2.AddArg(p)
- v2.AddArg(idx)
- v2.AddArg(mem)
+ v2.AddArg3(p, idx, mem)
v1.AddArg(v2)
- v0.AddArg(v1)
- v0.AddArg(y)
+ v0.AddArg2(v1, y)
return true
}
}
@@ -24915,19 +23450,15 @@ func rewriteValueAMD64_OpAMD64ORQ(v *Value) bool {
}
b = mergePoint(b, x0, x1, y)
v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type)
- v.reset(OpCopy)
- v.AddArg(v0)
+ v.copyOf(v0)
v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type)
v1.AuxInt = j0
v2 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32)
v2.AuxInt = i0
v2.Aux = s
- v2.AddArg(p)
- v2.AddArg(idx)
- v2.AddArg(mem)
+ v2.AddArg3(p, idx, mem)
v1.AddArg(v2)
- v0.AddArg(v1)
- v0.AddArg(y)
+ v0.AddArg2(v1, y)
return true
}
}
@@ -24966,14 +23497,12 @@ func rewriteValueAMD64_OpAMD64ORQ(v *Value) bool {
}
b = mergePoint(b, x0, x1)
v0 := b.NewValue0(x0.Pos, OpAMD64ROLWconst, v.Type)
- v.reset(OpCopy)
- v.AddArg(v0)
+ v.copyOf(v0)
v0.AuxInt = 8
v1 := b.NewValue0(x0.Pos, OpAMD64MOVWload, typ.UInt16)
v1.AuxInt = i0
v1.Aux = s
- v1.AddArg(p)
- v1.AddArg(mem)
+ v1.AddArg2(p, mem)
v0.AddArg(v1)
return true
}
@@ -25018,13 +23547,11 @@ func rewriteValueAMD64_OpAMD64ORQ(v *Value) bool {
}
b = mergePoint(b, x0, x1)
v0 := b.NewValue0(x0.Pos, OpAMD64BSWAPL, v.Type)
- v.reset(OpCopy)
- v.AddArg(v0)
+ v.copyOf(v0)
v1 := b.NewValue0(x0.Pos, OpAMD64MOVLload, typ.UInt32)
v1.AuxInt = i0
v1.Aux = s
- v1.AddArg(p)
- v1.AddArg(mem)
+ v1.AddArg2(p, mem)
v0.AddArg(v1)
return true
}
@@ -25069,13 +23596,11 @@ func rewriteValueAMD64_OpAMD64ORQ(v *Value) bool {
}
b = mergePoint(b, x0, x1)
v0 := b.NewValue0(x0.Pos, OpAMD64BSWAPQ, v.Type)
- v.reset(OpCopy)
- v.AddArg(v0)
+ v.copyOf(v0)
v1 := b.NewValue0(x0.Pos, OpAMD64MOVQload, typ.UInt64)
v1.AuxInt = i0
v1.Aux = s
- v1.AddArg(p)
- v1.AddArg(mem)
+ v1.AddArg2(p, mem)
v0.AddArg(v1)
return true
}
@@ -25130,8 +23655,7 @@ func rewriteValueAMD64_OpAMD64ORQ(v *Value) bool {
}
b = mergePoint(b, x0, x1, y)
v0 := b.NewValue0(x1.Pos, OpAMD64ORQ, v.Type)
- v.reset(OpCopy)
- v.AddArg(v0)
+ v.copyOf(v0)
v1 := b.NewValue0(x1.Pos, OpAMD64SHLQconst, v.Type)
v1.AuxInt = j1
v2 := b.NewValue0(x1.Pos, OpAMD64ROLWconst, typ.UInt16)
@@ -25139,12 +23663,10 @@ func rewriteValueAMD64_OpAMD64ORQ(v *Value) bool {
v3 := b.NewValue0(x1.Pos, OpAMD64MOVWload, typ.UInt16)
v3.AuxInt = i0
v3.Aux = s
- v3.AddArg(p)
- v3.AddArg(mem)
+ v3.AddArg2(p, mem)
v2.AddArg(v3)
v1.AddArg(v2)
- v0.AddArg(v1)
- v0.AddArg(y)
+ v0.AddArg2(v1, y)
return true
}
}
@@ -25207,20 +23729,17 @@ func rewriteValueAMD64_OpAMD64ORQ(v *Value) bool {
}
b = mergePoint(b, x0, x1, y)
v0 := b.NewValue0(x1.Pos, OpAMD64ORQ, v.Type)
- v.reset(OpCopy)
- v.AddArg(v0)
+ v.copyOf(v0)
v1 := b.NewValue0(x1.Pos, OpAMD64SHLQconst, v.Type)
v1.AuxInt = j1
v2 := b.NewValue0(x1.Pos, OpAMD64BSWAPL, typ.UInt32)
v3 := b.NewValue0(x1.Pos, OpAMD64MOVLload, typ.UInt32)
v3.AuxInt = i0
v3.Aux = s
- v3.AddArg(p)
- v3.AddArg(mem)
+ v3.AddArg2(p, mem)
v2.AddArg(v3)
v1.AddArg(v2)
- v0.AddArg(v1)
- v0.AddArg(y)
+ v0.AddArg2(v1, y)
return true
}
}
@@ -25264,15 +23783,12 @@ func rewriteValueAMD64_OpAMD64ORQ(v *Value) bool {
}
b = mergePoint(b, x0, x1)
v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, v.Type)
- v.reset(OpCopy)
- v.AddArg(v0)
+ v.copyOf(v0)
v0.AuxInt = 8
v1 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16)
v1.AuxInt = i0
v1.Aux = s
- v1.AddArg(p)
- v1.AddArg(idx)
- v1.AddArg(mem)
+ v1.AddArg3(p, idx, mem)
v0.AddArg(v1)
return true
}
@@ -25326,14 +23842,11 @@ func rewriteValueAMD64_OpAMD64ORQ(v *Value) bool {
}
b = mergePoint(b, x0, x1)
v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type)
- v.reset(OpCopy)
- v.AddArg(v0)
+ v.copyOf(v0)
v1 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32)
v1.AuxInt = i0
v1.Aux = s
- v1.AddArg(p)
- v1.AddArg(idx)
- v1.AddArg(mem)
+ v1.AddArg3(p, idx, mem)
v0.AddArg(v1)
return true
}
@@ -25387,14 +23900,11 @@ func rewriteValueAMD64_OpAMD64ORQ(v *Value) bool {
}
b = mergePoint(b, x0, x1)
v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, v.Type)
- v.reset(OpCopy)
- v.AddArg(v0)
+ v.copyOf(v0)
v1 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx1, typ.UInt64)
v1.AuxInt = i0
v1.Aux = s
- v1.AddArg(p)
- v1.AddArg(idx)
- v1.AddArg(mem)
+ v1.AddArg3(p, idx, mem)
v0.AddArg(v1)
return true
}
@@ -25458,8 +23968,7 @@ func rewriteValueAMD64_OpAMD64ORQ(v *Value) bool {
}
b = mergePoint(b, x0, x1, y)
v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type)
- v.reset(OpCopy)
- v.AddArg(v0)
+ v.copyOf(v0)
v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type)
v1.AuxInt = j1
v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16)
@@ -25467,13 +23976,10 @@ func rewriteValueAMD64_OpAMD64ORQ(v *Value) bool {
v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16)
v3.AuxInt = i0
v3.Aux = s
- v3.AddArg(p)
- v3.AddArg(idx)
- v3.AddArg(mem)
+ v3.AddArg3(p, idx, mem)
v2.AddArg(v3)
v1.AddArg(v2)
- v0.AddArg(v1)
- v0.AddArg(y)
+ v0.AddArg2(v1, y)
return true
}
}
@@ -25545,21 +24051,17 @@ func rewriteValueAMD64_OpAMD64ORQ(v *Value) bool {
}
b = mergePoint(b, x0, x1, y)
v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type)
- v.reset(OpCopy)
- v.AddArg(v0)
+ v.copyOf(v0)
v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type)
v1.AuxInt = j1
v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, typ.UInt32)
v3 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32)
v3.AuxInt = i0
v3.Aux = s
- v3.AddArg(p)
- v3.AddArg(idx)
- v3.AddArg(mem)
+ v3.AddArg3(p, idx, mem)
v2.AddArg(v3)
v1.AddArg(v2)
- v0.AddArg(v1)
- v0.AddArg(y)
+ v0.AddArg2(v1, y)
return true
}
}
@@ -25587,9 +24089,7 @@ func rewriteValueAMD64_OpAMD64ORQ(v *Value) bool {
v.reset(OpAMD64ORQload)
v.AuxInt = off
v.Aux = sym
- v.AddArg(x)
- v.AddArg(ptr)
- v.AddArg(mem)
+ v.AddArg3(x, ptr, mem)
return true
}
break
@@ -25647,9 +24147,7 @@ func rewriteValueAMD64_OpAMD64ORQconst(v *Value) bool {
break
}
x := v_0
- v.reset(OpCopy)
- v.Type = x.Type
- v.AddArg(x)
+ v.copyOf(x)
return true
}
// match: (ORQconst [-1] _)
@@ -25697,8 +24195,7 @@ func rewriteValueAMD64_OpAMD64ORQconstmodify(v *Value) bool {
v.reset(OpAMD64ORQconstmodify)
v.AuxInt = ValAndOff(valoff1).add(off2)
v.Aux = sym
- v.AddArg(base)
- v.AddArg(mem)
+ v.AddArg2(base, mem)
return true
}
// match: (ORQconstmodify [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem)
@@ -25720,8 +24217,7 @@ func rewriteValueAMD64_OpAMD64ORQconstmodify(v *Value) bool {
v.reset(OpAMD64ORQconstmodify)
v.AuxInt = ValAndOff(valoff1).add(off2)
v.Aux = mergeSym(sym1, sym2)
- v.AddArg(base)
- v.AddArg(mem)
+ v.AddArg2(base, mem)
return true
}
return false
@@ -25751,9 +24247,7 @@ func rewriteValueAMD64_OpAMD64ORQload(v *Value) bool {
v.reset(OpAMD64ORQload)
v.AuxInt = off1 + off2
v.Aux = sym
- v.AddArg(val)
- v.AddArg(base)
- v.AddArg(mem)
+ v.AddArg3(val, base, mem)
return true
}
// match: (ORQload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem)
@@ -25776,9 +24270,7 @@ func rewriteValueAMD64_OpAMD64ORQload(v *Value) bool {
v.reset(OpAMD64ORQload)
v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
- v.AddArg(val)
- v.AddArg(base)
- v.AddArg(mem)
+ v.AddArg3(val, base, mem)
return true
}
// match: ( ORQload x [off] {sym} ptr (MOVSDstore [off] {sym} ptr y _))
@@ -25791,16 +24283,14 @@ func rewriteValueAMD64_OpAMD64ORQload(v *Value) bool {
if v_2.Op != OpAMD64MOVSDstore || v_2.AuxInt != off || v_2.Aux != sym {
break
}
- _ = v_2.Args[2]
+ y := v_2.Args[1]
if ptr != v_2.Args[0] {
break
}
- y := v_2.Args[1]
v.reset(OpAMD64ORQ)
- v.AddArg(x)
v0 := b.NewValue0(v_2.Pos, OpAMD64MOVQf2i, typ.UInt64)
v0.AddArg(y)
- v.AddArg(v0)
+ v.AddArg2(x, v0)
return true
}
return false
@@ -25828,9 +24318,7 @@ func rewriteValueAMD64_OpAMD64ORQmodify(v *Value) bool {
v.reset(OpAMD64ORQmodify)
v.AuxInt = off1 + off2
v.Aux = sym
- v.AddArg(base)
- v.AddArg(val)
- v.AddArg(mem)
+ v.AddArg3(base, val, mem)
return true
}
// match: (ORQmodify [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
@@ -25853,9 +24341,7 @@ func rewriteValueAMD64_OpAMD64ORQmodify(v *Value) bool {
v.reset(OpAMD64ORQmodify)
v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
- v.AddArg(base)
- v.AddArg(val)
- v.AddArg(mem)
+ v.AddArg3(base, val, mem)
return true
}
return false
@@ -25872,8 +24358,7 @@ func rewriteValueAMD64_OpAMD64ROLB(v *Value) bool {
}
y := v_1.Args[0]
v.reset(OpAMD64RORB)
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
// match: (ROLB x (NEGL y))
@@ -25885,8 +24370,7 @@ func rewriteValueAMD64_OpAMD64ROLB(v *Value) bool {
}
y := v_1.Args[0]
v.reset(OpAMD64RORB)
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
// match: (ROLB x (MOVQconst [c]))
@@ -25940,9 +24424,7 @@ func rewriteValueAMD64_OpAMD64ROLBconst(v *Value) bool {
break
}
x := v_0
- v.reset(OpCopy)
- v.Type = x.Type
- v.AddArg(x)
+ v.copyOf(x)
return true
}
return false
@@ -25959,8 +24441,7 @@ func rewriteValueAMD64_OpAMD64ROLL(v *Value) bool {
}
y := v_1.Args[0]
v.reset(OpAMD64RORL)
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
// match: (ROLL x (NEGL y))
@@ -25972,8 +24453,7 @@ func rewriteValueAMD64_OpAMD64ROLL(v *Value) bool {
}
y := v_1.Args[0]
v.reset(OpAMD64RORL)
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
// match: (ROLL x (MOVQconst [c]))
@@ -26027,9 +24507,7 @@ func rewriteValueAMD64_OpAMD64ROLLconst(v *Value) bool {
break
}
x := v_0
- v.reset(OpCopy)
- v.Type = x.Type
- v.AddArg(x)
+ v.copyOf(x)
return true
}
return false
@@ -26046,8 +24524,7 @@ func rewriteValueAMD64_OpAMD64ROLQ(v *Value) bool {
}
y := v_1.Args[0]
v.reset(OpAMD64RORQ)
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
// match: (ROLQ x (NEGL y))
@@ -26059,8 +24536,7 @@ func rewriteValueAMD64_OpAMD64ROLQ(v *Value) bool {
}
y := v_1.Args[0]
v.reset(OpAMD64RORQ)
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
// match: (ROLQ x (MOVQconst [c]))
@@ -26114,9 +24590,7 @@ func rewriteValueAMD64_OpAMD64ROLQconst(v *Value) bool {
break
}
x := v_0
- v.reset(OpCopy)
- v.Type = x.Type
- v.AddArg(x)
+ v.copyOf(x)
return true
}
return false
@@ -26133,8 +24607,7 @@ func rewriteValueAMD64_OpAMD64ROLW(v *Value) bool {
}
y := v_1.Args[0]
v.reset(OpAMD64RORW)
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
// match: (ROLW x (NEGL y))
@@ -26146,8 +24619,7 @@ func rewriteValueAMD64_OpAMD64ROLW(v *Value) bool {
}
y := v_1.Args[0]
v.reset(OpAMD64RORW)
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
// match: (ROLW x (MOVQconst [c]))
@@ -26201,9 +24673,7 @@ func rewriteValueAMD64_OpAMD64ROLWconst(v *Value) bool {
break
}
x := v_0
- v.reset(OpCopy)
- v.Type = x.Type
- v.AddArg(x)
+ v.copyOf(x)
return true
}
return false
@@ -26220,8 +24690,7 @@ func rewriteValueAMD64_OpAMD64RORB(v *Value) bool {
}
y := v_1.Args[0]
v.reset(OpAMD64ROLB)
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
// match: (RORB x (NEGL y))
@@ -26233,8 +24702,7 @@ func rewriteValueAMD64_OpAMD64RORB(v *Value) bool {
}
y := v_1.Args[0]
v.reset(OpAMD64ROLB)
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
// match: (RORB x (MOVQconst [c]))
@@ -26277,8 +24745,7 @@ func rewriteValueAMD64_OpAMD64RORL(v *Value) bool {
}
y := v_1.Args[0]
v.reset(OpAMD64ROLL)
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
// match: (RORL x (NEGL y))
@@ -26290,8 +24757,7 @@ func rewriteValueAMD64_OpAMD64RORL(v *Value) bool {
}
y := v_1.Args[0]
v.reset(OpAMD64ROLL)
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
// match: (RORL x (MOVQconst [c]))
@@ -26334,8 +24800,7 @@ func rewriteValueAMD64_OpAMD64RORQ(v *Value) bool {
}
y := v_1.Args[0]
v.reset(OpAMD64ROLQ)
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
// match: (RORQ x (NEGL y))
@@ -26347,8 +24812,7 @@ func rewriteValueAMD64_OpAMD64RORQ(v *Value) bool {
}
y := v_1.Args[0]
v.reset(OpAMD64ROLQ)
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
// match: (RORQ x (MOVQconst [c]))
@@ -26391,8 +24855,7 @@ func rewriteValueAMD64_OpAMD64RORW(v *Value) bool {
}
y := v_1.Args[0]
v.reset(OpAMD64ROLW)
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
// match: (RORW x (NEGL y))
@@ -26404,8 +24867,7 @@ func rewriteValueAMD64_OpAMD64RORW(v *Value) bool {
}
y := v_1.Args[0]
v.reset(OpAMD64ROLW)
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
// match: (RORW x (MOVQconst [c]))
@@ -26476,9 +24938,7 @@ func rewriteValueAMD64_OpAMD64SARBconst(v *Value) bool {
break
}
x := v_0
- v.reset(OpCopy)
- v.Type = x.Type
- v.AddArg(x)
+ v.copyOf(x)
return true
}
// match: (SARBconst [c] (MOVQconst [d]))
@@ -26539,8 +24999,7 @@ func rewriteValueAMD64_OpAMD64SARL(v *Value) bool {
break
}
v.reset(OpAMD64SARL)
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
// match: (SARL x (NEGQ (ADDQconst [c] y)))
@@ -26562,10 +25021,9 @@ func rewriteValueAMD64_OpAMD64SARL(v *Value) bool {
break
}
v.reset(OpAMD64SARL)
- v.AddArg(x)
v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t)
v0.AddArg(y)
- v.AddArg(v0)
+ v.AddArg2(x, v0)
return true
}
// match: (SARL x (ANDQconst [c] y))
@@ -26582,8 +25040,7 @@ func rewriteValueAMD64_OpAMD64SARL(v *Value) bool {
break
}
v.reset(OpAMD64SARL)
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
// match: (SARL x (NEGQ (ANDQconst [c] y)))
@@ -26605,10 +25062,9 @@ func rewriteValueAMD64_OpAMD64SARL(v *Value) bool {
break
}
v.reset(OpAMD64SARL)
- v.AddArg(x)
v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t)
v0.AddArg(y)
- v.AddArg(v0)
+ v.AddArg2(x, v0)
return true
}
// match: (SARL x (ADDLconst [c] y))
@@ -26625,8 +25081,7 @@ func rewriteValueAMD64_OpAMD64SARL(v *Value) bool {
break
}
v.reset(OpAMD64SARL)
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
// match: (SARL x (NEGL (ADDLconst [c] y)))
@@ -26648,10 +25103,9 @@ func rewriteValueAMD64_OpAMD64SARL(v *Value) bool {
break
}
v.reset(OpAMD64SARL)
- v.AddArg(x)
v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t)
v0.AddArg(y)
- v.AddArg(v0)
+ v.AddArg2(x, v0)
return true
}
// match: (SARL x (ANDLconst [c] y))
@@ -26668,8 +25122,7 @@ func rewriteValueAMD64_OpAMD64SARL(v *Value) bool {
break
}
v.reset(OpAMD64SARL)
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
// match: (SARL x (NEGL (ANDLconst [c] y)))
@@ -26691,10 +25144,9 @@ func rewriteValueAMD64_OpAMD64SARL(v *Value) bool {
break
}
v.reset(OpAMD64SARL)
- v.AddArg(x)
v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t)
v0.AddArg(y)
- v.AddArg(v0)
+ v.AddArg2(x, v0)
return true
}
return false
@@ -26708,9 +25160,7 @@ func rewriteValueAMD64_OpAMD64SARLconst(v *Value) bool {
break
}
x := v_0
- v.reset(OpCopy)
- v.Type = x.Type
- v.AddArg(x)
+ v.copyOf(x)
return true
}
// match: (SARLconst [c] (MOVQconst [d]))
@@ -26771,8 +25221,7 @@ func rewriteValueAMD64_OpAMD64SARQ(v *Value) bool {
break
}
v.reset(OpAMD64SARQ)
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
// match: (SARQ x (NEGQ (ADDQconst [c] y)))
@@ -26794,10 +25243,9 @@ func rewriteValueAMD64_OpAMD64SARQ(v *Value) bool {
break
}
v.reset(OpAMD64SARQ)
- v.AddArg(x)
v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t)
v0.AddArg(y)
- v.AddArg(v0)
+ v.AddArg2(x, v0)
return true
}
// match: (SARQ x (ANDQconst [c] y))
@@ -26814,8 +25262,7 @@ func rewriteValueAMD64_OpAMD64SARQ(v *Value) bool {
break
}
v.reset(OpAMD64SARQ)
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
// match: (SARQ x (NEGQ (ANDQconst [c] y)))
@@ -26837,10 +25284,9 @@ func rewriteValueAMD64_OpAMD64SARQ(v *Value) bool {
break
}
v.reset(OpAMD64SARQ)
- v.AddArg(x)
v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t)
v0.AddArg(y)
- v.AddArg(v0)
+ v.AddArg2(x, v0)
return true
}
// match: (SARQ x (ADDLconst [c] y))
@@ -26857,8 +25303,7 @@ func rewriteValueAMD64_OpAMD64SARQ(v *Value) bool {
break
}
v.reset(OpAMD64SARQ)
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
// match: (SARQ x (NEGL (ADDLconst [c] y)))
@@ -26880,10 +25325,9 @@ func rewriteValueAMD64_OpAMD64SARQ(v *Value) bool {
break
}
v.reset(OpAMD64SARQ)
- v.AddArg(x)
v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t)
v0.AddArg(y)
- v.AddArg(v0)
+ v.AddArg2(x, v0)
return true
}
// match: (SARQ x (ANDLconst [c] y))
@@ -26900,8 +25344,7 @@ func rewriteValueAMD64_OpAMD64SARQ(v *Value) bool {
break
}
v.reset(OpAMD64SARQ)
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
// match: (SARQ x (NEGL (ANDLconst [c] y)))
@@ -26923,10 +25366,9 @@ func rewriteValueAMD64_OpAMD64SARQ(v *Value) bool {
break
}
v.reset(OpAMD64SARQ)
- v.AddArg(x)
v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t)
v0.AddArg(y)
- v.AddArg(v0)
+ v.AddArg2(x, v0)
return true
}
return false
@@ -26940,9 +25382,7 @@ func rewriteValueAMD64_OpAMD64SARQconst(v *Value) bool {
break
}
x := v_0
- v.reset(OpCopy)
- v.Type = x.Type
- v.AddArg(x)
+ v.copyOf(x)
return true
}
// match: (SARQconst [c] (MOVQconst [d]))
@@ -26999,9 +25439,7 @@ func rewriteValueAMD64_OpAMD64SARWconst(v *Value) bool {
break
}
x := v_0
- v.reset(OpCopy)
- v.Type = x.Type
- v.AddArg(x)
+ v.copyOf(x)
return true
}
// match: (SARWconst [c] (MOVQconst [d]))
@@ -27091,8 +25529,7 @@ func rewriteValueAMD64_OpAMD64SBBQ(v *Value) bool {
}
v.reset(OpAMD64SBBQconst)
v.AuxInt = c
- v.AddArg(x)
- v.AddArg(borrow)
+ v.AddArg2(x, borrow)
return true
}
// match: (SBBQ x y (FlagEQ))
@@ -27104,8 +25541,7 @@ func rewriteValueAMD64_OpAMD64SBBQ(v *Value) bool {
break
}
v.reset(OpAMD64SUBQborrow)
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
return false
@@ -27388,9 +25824,7 @@ func rewriteValueAMD64_OpAMD64SETAEstore(v *Value) bool {
v.reset(OpAMD64SETBEstore)
v.AuxInt = off
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(x)
- v.AddArg(mem)
+ v.AddArg3(ptr, x, mem)
return true
}
// match: (SETAEstore [off1] {sym} (ADDQconst [off2] base) val mem)
@@ -27412,9 +25846,7 @@ func rewriteValueAMD64_OpAMD64SETAEstore(v *Value) bool {
v.reset(OpAMD64SETAEstore)
v.AuxInt = off1 + off2
v.Aux = sym
- v.AddArg(base)
- v.AddArg(val)
- v.AddArg(mem)
+ v.AddArg3(base, val, mem)
return true
}
// match: (SETAEstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
@@ -27437,9 +25869,7 @@ func rewriteValueAMD64_OpAMD64SETAEstore(v *Value) bool {
v.reset(OpAMD64SETAEstore)
v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
- v.AddArg(base)
- v.AddArg(val)
- v.AddArg(mem)
+ v.AddArg3(base, val, mem)
return true
}
// match: (SETAEstore [off] {sym} ptr (FlagEQ) mem)
@@ -27455,11 +25885,9 @@ func rewriteValueAMD64_OpAMD64SETAEstore(v *Value) bool {
v.reset(OpAMD64MOVBstore)
v.AuxInt = off
v.Aux = sym
- v.AddArg(ptr)
v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
v0.AuxInt = 1
- v.AddArg(v0)
- v.AddArg(mem)
+ v.AddArg3(ptr, v0, mem)
return true
}
// match: (SETAEstore [off] {sym} ptr (FlagLT_ULT) mem)
@@ -27475,11 +25903,9 @@ func rewriteValueAMD64_OpAMD64SETAEstore(v *Value) bool {
v.reset(OpAMD64MOVBstore)
v.AuxInt = off
v.Aux = sym
- v.AddArg(ptr)
v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
v0.AuxInt = 0
- v.AddArg(v0)
- v.AddArg(mem)
+ v.AddArg3(ptr, v0, mem)
return true
}
// match: (SETAEstore [off] {sym} ptr (FlagLT_UGT) mem)
@@ -27495,11 +25921,9 @@ func rewriteValueAMD64_OpAMD64SETAEstore(v *Value) bool {
v.reset(OpAMD64MOVBstore)
v.AuxInt = off
v.Aux = sym
- v.AddArg(ptr)
v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
v0.AuxInt = 1
- v.AddArg(v0)
- v.AddArg(mem)
+ v.AddArg3(ptr, v0, mem)
return true
}
// match: (SETAEstore [off] {sym} ptr (FlagGT_ULT) mem)
@@ -27515,11 +25939,9 @@ func rewriteValueAMD64_OpAMD64SETAEstore(v *Value) bool {
v.reset(OpAMD64MOVBstore)
v.AuxInt = off
v.Aux = sym
- v.AddArg(ptr)
v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
v0.AuxInt = 0
- v.AddArg(v0)
- v.AddArg(mem)
+ v.AddArg3(ptr, v0, mem)
return true
}
// match: (SETAEstore [off] {sym} ptr (FlagGT_UGT) mem)
@@ -27535,11 +25957,9 @@ func rewriteValueAMD64_OpAMD64SETAEstore(v *Value) bool {
v.reset(OpAMD64MOVBstore)
v.AuxInt = off
v.Aux = sym
- v.AddArg(ptr)
v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
v0.AuxInt = 1
- v.AddArg(v0)
- v.AddArg(mem)
+ v.AddArg3(ptr, v0, mem)
return true
}
return false
@@ -27564,9 +25984,7 @@ func rewriteValueAMD64_OpAMD64SETAstore(v *Value) bool {
v.reset(OpAMD64SETBstore)
v.AuxInt = off
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(x)
- v.AddArg(mem)
+ v.AddArg3(ptr, x, mem)
return true
}
// match: (SETAstore [off1] {sym} (ADDQconst [off2] base) val mem)
@@ -27588,9 +26006,7 @@ func rewriteValueAMD64_OpAMD64SETAstore(v *Value) bool {
v.reset(OpAMD64SETAstore)
v.AuxInt = off1 + off2
v.Aux = sym
- v.AddArg(base)
- v.AddArg(val)
- v.AddArg(mem)
+ v.AddArg3(base, val, mem)
return true
}
// match: (SETAstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
@@ -27613,9 +26029,7 @@ func rewriteValueAMD64_OpAMD64SETAstore(v *Value) bool {
v.reset(OpAMD64SETAstore)
v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
- v.AddArg(base)
- v.AddArg(val)
- v.AddArg(mem)
+ v.AddArg3(base, val, mem)
return true
}
// match: (SETAstore [off] {sym} ptr (FlagEQ) mem)
@@ -27631,11 +26045,9 @@ func rewriteValueAMD64_OpAMD64SETAstore(v *Value) bool {
v.reset(OpAMD64MOVBstore)
v.AuxInt = off
v.Aux = sym
- v.AddArg(ptr)
v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
v0.AuxInt = 0
- v.AddArg(v0)
- v.AddArg(mem)
+ v.AddArg3(ptr, v0, mem)
return true
}
// match: (SETAstore [off] {sym} ptr (FlagLT_ULT) mem)
@@ -27651,11 +26063,9 @@ func rewriteValueAMD64_OpAMD64SETAstore(v *Value) bool {
v.reset(OpAMD64MOVBstore)
v.AuxInt = off
v.Aux = sym
- v.AddArg(ptr)
v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
v0.AuxInt = 0
- v.AddArg(v0)
- v.AddArg(mem)
+ v.AddArg3(ptr, v0, mem)
return true
}
// match: (SETAstore [off] {sym} ptr (FlagLT_UGT) mem)
@@ -27671,11 +26081,9 @@ func rewriteValueAMD64_OpAMD64SETAstore(v *Value) bool {
v.reset(OpAMD64MOVBstore)
v.AuxInt = off
v.Aux = sym
- v.AddArg(ptr)
v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
v0.AuxInt = 1
- v.AddArg(v0)
- v.AddArg(mem)
+ v.AddArg3(ptr, v0, mem)
return true
}
// match: (SETAstore [off] {sym} ptr (FlagGT_ULT) mem)
@@ -27691,11 +26099,9 @@ func rewriteValueAMD64_OpAMD64SETAstore(v *Value) bool {
v.reset(OpAMD64MOVBstore)
v.AuxInt = off
v.Aux = sym
- v.AddArg(ptr)
v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
v0.AuxInt = 0
- v.AddArg(v0)
- v.AddArg(mem)
+ v.AddArg3(ptr, v0, mem)
return true
}
// match: (SETAstore [off] {sym} ptr (FlagGT_UGT) mem)
@@ -27711,11 +26117,9 @@ func rewriteValueAMD64_OpAMD64SETAstore(v *Value) bool {
v.reset(OpAMD64MOVBstore)
v.AuxInt = off
v.Aux = sym
- v.AddArg(ptr)
v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
v0.AuxInt = 1
- v.AddArg(v0)
- v.AddArg(mem)
+ v.AddArg3(ptr, v0, mem)
return true
}
return false
@@ -27926,9 +26330,7 @@ func rewriteValueAMD64_OpAMD64SETBEstore(v *Value) bool {
v.reset(OpAMD64SETAEstore)
v.AuxInt = off
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(x)
- v.AddArg(mem)
+ v.AddArg3(ptr, x, mem)
return true
}
// match: (SETBEstore [off1] {sym} (ADDQconst [off2] base) val mem)
@@ -27950,9 +26352,7 @@ func rewriteValueAMD64_OpAMD64SETBEstore(v *Value) bool {
v.reset(OpAMD64SETBEstore)
v.AuxInt = off1 + off2
v.Aux = sym
- v.AddArg(base)
- v.AddArg(val)
- v.AddArg(mem)
+ v.AddArg3(base, val, mem)
return true
}
// match: (SETBEstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
@@ -27975,9 +26375,7 @@ func rewriteValueAMD64_OpAMD64SETBEstore(v *Value) bool {
v.reset(OpAMD64SETBEstore)
v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
- v.AddArg(base)
- v.AddArg(val)
- v.AddArg(mem)
+ v.AddArg3(base, val, mem)
return true
}
// match: (SETBEstore [off] {sym} ptr (FlagEQ) mem)
@@ -27993,11 +26391,9 @@ func rewriteValueAMD64_OpAMD64SETBEstore(v *Value) bool {
v.reset(OpAMD64MOVBstore)
v.AuxInt = off
v.Aux = sym
- v.AddArg(ptr)
v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
v0.AuxInt = 1
- v.AddArg(v0)
- v.AddArg(mem)
+ v.AddArg3(ptr, v0, mem)
return true
}
// match: (SETBEstore [off] {sym} ptr (FlagLT_ULT) mem)
@@ -28013,11 +26409,9 @@ func rewriteValueAMD64_OpAMD64SETBEstore(v *Value) bool {
v.reset(OpAMD64MOVBstore)
v.AuxInt = off
v.Aux = sym
- v.AddArg(ptr)
v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
v0.AuxInt = 1
- v.AddArg(v0)
- v.AddArg(mem)
+ v.AddArg3(ptr, v0, mem)
return true
}
// match: (SETBEstore [off] {sym} ptr (FlagLT_UGT) mem)
@@ -28033,11 +26427,9 @@ func rewriteValueAMD64_OpAMD64SETBEstore(v *Value) bool {
v.reset(OpAMD64MOVBstore)
v.AuxInt = off
v.Aux = sym
- v.AddArg(ptr)
v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
v0.AuxInt = 0
- v.AddArg(v0)
- v.AddArg(mem)
+ v.AddArg3(ptr, v0, mem)
return true
}
// match: (SETBEstore [off] {sym} ptr (FlagGT_ULT) mem)
@@ -28053,11 +26445,9 @@ func rewriteValueAMD64_OpAMD64SETBEstore(v *Value) bool {
v.reset(OpAMD64MOVBstore)
v.AuxInt = off
v.Aux = sym
- v.AddArg(ptr)
v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
v0.AuxInt = 1
- v.AddArg(v0)
- v.AddArg(mem)
+ v.AddArg3(ptr, v0, mem)
return true
}
// match: (SETBEstore [off] {sym} ptr (FlagGT_UGT) mem)
@@ -28073,11 +26463,9 @@ func rewriteValueAMD64_OpAMD64SETBEstore(v *Value) bool {
v.reset(OpAMD64MOVBstore)
v.AuxInt = off
v.Aux = sym
- v.AddArg(ptr)
v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
v0.AuxInt = 0
- v.AddArg(v0)
- v.AddArg(mem)
+ v.AddArg3(ptr, v0, mem)
return true
}
return false
@@ -28102,9 +26490,7 @@ func rewriteValueAMD64_OpAMD64SETBstore(v *Value) bool {
v.reset(OpAMD64SETAstore)
v.AuxInt = off
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(x)
- v.AddArg(mem)
+ v.AddArg3(ptr, x, mem)
return true
}
// match: (SETBstore [off1] {sym} (ADDQconst [off2] base) val mem)
@@ -28126,9 +26512,7 @@ func rewriteValueAMD64_OpAMD64SETBstore(v *Value) bool {
v.reset(OpAMD64SETBstore)
v.AuxInt = off1 + off2
v.Aux = sym
- v.AddArg(base)
- v.AddArg(val)
- v.AddArg(mem)
+ v.AddArg3(base, val, mem)
return true
}
// match: (SETBstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
@@ -28151,9 +26535,7 @@ func rewriteValueAMD64_OpAMD64SETBstore(v *Value) bool {
v.reset(OpAMD64SETBstore)
v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
- v.AddArg(base)
- v.AddArg(val)
- v.AddArg(mem)
+ v.AddArg3(base, val, mem)
return true
}
// match: (SETBstore [off] {sym} ptr (FlagEQ) mem)
@@ -28169,11 +26551,9 @@ func rewriteValueAMD64_OpAMD64SETBstore(v *Value) bool {
v.reset(OpAMD64MOVBstore)
v.AuxInt = off
v.Aux = sym
- v.AddArg(ptr)
v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
v0.AuxInt = 0
- v.AddArg(v0)
- v.AddArg(mem)
+ v.AddArg3(ptr, v0, mem)
return true
}
// match: (SETBstore [off] {sym} ptr (FlagLT_ULT) mem)
@@ -28189,11 +26569,9 @@ func rewriteValueAMD64_OpAMD64SETBstore(v *Value) bool {
v.reset(OpAMD64MOVBstore)
v.AuxInt = off
v.Aux = sym
- v.AddArg(ptr)
v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
v0.AuxInt = 1
- v.AddArg(v0)
- v.AddArg(mem)
+ v.AddArg3(ptr, v0, mem)
return true
}
// match: (SETBstore [off] {sym} ptr (FlagLT_UGT) mem)
@@ -28209,11 +26587,9 @@ func rewriteValueAMD64_OpAMD64SETBstore(v *Value) bool {
v.reset(OpAMD64MOVBstore)
v.AuxInt = off
v.Aux = sym
- v.AddArg(ptr)
v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
v0.AuxInt = 0
- v.AddArg(v0)
- v.AddArg(mem)
+ v.AddArg3(ptr, v0, mem)
return true
}
// match: (SETBstore [off] {sym} ptr (FlagGT_ULT) mem)
@@ -28229,11 +26605,9 @@ func rewriteValueAMD64_OpAMD64SETBstore(v *Value) bool {
v.reset(OpAMD64MOVBstore)
v.AuxInt = off
v.Aux = sym
- v.AddArg(ptr)
v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
v0.AuxInt = 1
- v.AddArg(v0)
- v.AddArg(mem)
+ v.AddArg3(ptr, v0, mem)
return true
}
// match: (SETBstore [off] {sym} ptr (FlagGT_UGT) mem)
@@ -28249,11 +26623,9 @@ func rewriteValueAMD64_OpAMD64SETBstore(v *Value) bool {
v.reset(OpAMD64MOVBstore)
v.AuxInt = off
v.Aux = sym
- v.AddArg(ptr)
v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
v0.AuxInt = 0
- v.AddArg(v0)
- v.AddArg(mem)
+ v.AddArg3(ptr, v0, mem)
return true
}
return false
@@ -28282,8 +26654,7 @@ func rewriteValueAMD64_OpAMD64SETEQ(v *Value) bool {
y := v_0_1
v.reset(OpAMD64SETAE)
v0 := b.NewValue0(v.Pos, OpAMD64BTL, types.TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
+ v0.AddArg2(x, y)
v.AddArg(v0)
return true
}
@@ -28310,8 +26681,7 @@ func rewriteValueAMD64_OpAMD64SETEQ(v *Value) bool {
y := v_0_1
v.reset(OpAMD64SETAE)
v0 := b.NewValue0(v.Pos, OpAMD64BTQ, types.TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
+ v0.AddArg2(x, y)
v.AddArg(v0)
return true
}
@@ -28702,12 +27072,9 @@ func rewriteValueAMD64_OpAMD64SETEQstore(v *Value) bool {
v.reset(OpAMD64SETAEstore)
v.AuxInt = off
v.Aux = sym
- v.AddArg(ptr)
v0 := b.NewValue0(v.Pos, OpAMD64BTL, types.TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- v.AddArg(mem)
+ v0.AddArg2(x, y)
+ v.AddArg3(ptr, v0, mem)
return true
}
break
@@ -28738,12 +27105,9 @@ func rewriteValueAMD64_OpAMD64SETEQstore(v *Value) bool {
v.reset(OpAMD64SETAEstore)
v.AuxInt = off
v.Aux = sym
- v.AddArg(ptr)
v0 := b.NewValue0(v.Pos, OpAMD64BTQ, types.TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- v.AddArg(mem)
+ v0.AddArg2(x, y)
+ v.AddArg3(ptr, v0, mem)
return true
}
break
@@ -28767,12 +27131,10 @@ func rewriteValueAMD64_OpAMD64SETEQstore(v *Value) bool {
v.reset(OpAMD64SETAEstore)
v.AuxInt = off
v.Aux = sym
- v.AddArg(ptr)
v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags)
v0.AuxInt = log2uint32(c)
v0.AddArg(x)
- v.AddArg(v0)
- v.AddArg(mem)
+ v.AddArg3(ptr, v0, mem)
return true
}
// match: (SETEQstore [off] {sym} ptr (TESTQconst [c] x) mem)
@@ -28794,12 +27156,10 @@ func rewriteValueAMD64_OpAMD64SETEQstore(v *Value) bool {
v.reset(OpAMD64SETAEstore)
v.AuxInt = off
v.Aux = sym
- v.AddArg(ptr)
v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
v0.AuxInt = log2(c)
v0.AddArg(x)
- v.AddArg(v0)
- v.AddArg(mem)
+ v.AddArg3(ptr, v0, mem)
return true
}
// match: (SETEQstore [off] {sym} ptr (TESTQ (MOVQconst [c]) x) mem)
@@ -28828,12 +27188,10 @@ func rewriteValueAMD64_OpAMD64SETEQstore(v *Value) bool {
v.reset(OpAMD64SETAEstore)
v.AuxInt = off
v.Aux = sym
- v.AddArg(ptr)
v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
v0.AuxInt = log2(c)
v0.AddArg(x)
- v.AddArg(v0)
- v.AddArg(mem)
+ v.AddArg3(ptr, v0, mem)
return true
}
break
@@ -28855,12 +27213,10 @@ func rewriteValueAMD64_OpAMD64SETEQstore(v *Value) bool {
v.reset(OpAMD64SETNEstore)
v.AuxInt = off
v.Aux = sym
- v.AddArg(ptr)
v0 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags)
v0.AuxInt = 0
v0.AddArg(s)
- v.AddArg(v0)
- v.AddArg(mem)
+ v.AddArg3(ptr, v0, mem)
return true
}
// match: (SETEQstore [off] {sym} ptr (CMPQconst [1] s:(ANDQconst [1] _)) mem)
@@ -28880,12 +27236,10 @@ func rewriteValueAMD64_OpAMD64SETEQstore(v *Value) bool {
v.reset(OpAMD64SETNEstore)
v.AuxInt = off
v.Aux = sym
- v.AddArg(ptr)
v0 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
v0.AuxInt = 0
v0.AddArg(s)
- v.AddArg(v0)
- v.AddArg(mem)
+ v.AddArg3(ptr, v0, mem)
return true
}
// match: (SETEQstore [off] {sym} ptr (TESTQ z1:(SHLQconst [63] (SHRQconst [63] x)) z2) mem)
@@ -28919,12 +27273,10 @@ func rewriteValueAMD64_OpAMD64SETEQstore(v *Value) bool {
v.reset(OpAMD64SETAEstore)
v.AuxInt = off
v.Aux = sym
- v.AddArg(ptr)
v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
v0.AuxInt = 63
v0.AddArg(x)
- v.AddArg(v0)
- v.AddArg(mem)
+ v.AddArg3(ptr, v0, mem)
return true
}
break
@@ -28960,12 +27312,10 @@ func rewriteValueAMD64_OpAMD64SETEQstore(v *Value) bool {
v.reset(OpAMD64SETAEstore)
v.AuxInt = off
v.Aux = sym
- v.AddArg(ptr)
v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags)
v0.AuxInt = 31
v0.AddArg(x)
- v.AddArg(v0)
- v.AddArg(mem)
+ v.AddArg3(ptr, v0, mem)
return true
}
break
@@ -29001,12 +27351,10 @@ func rewriteValueAMD64_OpAMD64SETEQstore(v *Value) bool {
v.reset(OpAMD64SETAEstore)
v.AuxInt = off
v.Aux = sym
- v.AddArg(ptr)
v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
v0.AuxInt = 0
v0.AddArg(x)
- v.AddArg(v0)
- v.AddArg(mem)
+ v.AddArg3(ptr, v0, mem)
return true
}
break
@@ -29042,12 +27390,10 @@ func rewriteValueAMD64_OpAMD64SETEQstore(v *Value) bool {
v.reset(OpAMD64SETAEstore)
v.AuxInt = off
v.Aux = sym
- v.AddArg(ptr)
v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags)
v0.AuxInt = 0
v0.AddArg(x)
- v.AddArg(v0)
- v.AddArg(mem)
+ v.AddArg3(ptr, v0, mem)
return true
}
break
@@ -29079,12 +27425,10 @@ func rewriteValueAMD64_OpAMD64SETEQstore(v *Value) bool {
v.reset(OpAMD64SETAEstore)
v.AuxInt = off
v.Aux = sym
- v.AddArg(ptr)
v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
v0.AuxInt = 63
v0.AddArg(x)
- v.AddArg(v0)
- v.AddArg(mem)
+ v.AddArg3(ptr, v0, mem)
return true
}
break
@@ -29116,12 +27460,10 @@ func rewriteValueAMD64_OpAMD64SETEQstore(v *Value) bool {
v.reset(OpAMD64SETAEstore)
v.AuxInt = off
v.Aux = sym
- v.AddArg(ptr)
v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags)
v0.AuxInt = 31
v0.AddArg(x)
- v.AddArg(v0)
- v.AddArg(mem)
+ v.AddArg3(ptr, v0, mem)
return true
}
break
@@ -29140,9 +27482,7 @@ func rewriteValueAMD64_OpAMD64SETEQstore(v *Value) bool {
v.reset(OpAMD64SETEQstore)
v.AuxInt = off
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(x)
- v.AddArg(mem)
+ v.AddArg3(ptr, x, mem)
return true
}
// match: (SETEQstore [off1] {sym} (ADDQconst [off2] base) val mem)
@@ -29164,9 +27504,7 @@ func rewriteValueAMD64_OpAMD64SETEQstore(v *Value) bool {
v.reset(OpAMD64SETEQstore)
v.AuxInt = off1 + off2
v.Aux = sym
- v.AddArg(base)
- v.AddArg(val)
- v.AddArg(mem)
+ v.AddArg3(base, val, mem)
return true
}
// match: (SETEQstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
@@ -29189,9 +27527,7 @@ func rewriteValueAMD64_OpAMD64SETEQstore(v *Value) bool {
v.reset(OpAMD64SETEQstore)
v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
- v.AddArg(base)
- v.AddArg(val)
- v.AddArg(mem)
+ v.AddArg3(base, val, mem)
return true
}
// match: (SETEQstore [off] {sym} ptr (FlagEQ) mem)
@@ -29207,11 +27543,9 @@ func rewriteValueAMD64_OpAMD64SETEQstore(v *Value) bool {
v.reset(OpAMD64MOVBstore)
v.AuxInt = off
v.Aux = sym
- v.AddArg(ptr)
v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
v0.AuxInt = 1
- v.AddArg(v0)
- v.AddArg(mem)
+ v.AddArg3(ptr, v0, mem)
return true
}
// match: (SETEQstore [off] {sym} ptr (FlagLT_ULT) mem)
@@ -29227,11 +27561,9 @@ func rewriteValueAMD64_OpAMD64SETEQstore(v *Value) bool {
v.reset(OpAMD64MOVBstore)
v.AuxInt = off
v.Aux = sym
- v.AddArg(ptr)
v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
v0.AuxInt = 0
- v.AddArg(v0)
- v.AddArg(mem)
+ v.AddArg3(ptr, v0, mem)
return true
}
// match: (SETEQstore [off] {sym} ptr (FlagLT_UGT) mem)
@@ -29247,11 +27579,9 @@ func rewriteValueAMD64_OpAMD64SETEQstore(v *Value) bool {
v.reset(OpAMD64MOVBstore)
v.AuxInt = off
v.Aux = sym
- v.AddArg(ptr)
v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
v0.AuxInt = 0
- v.AddArg(v0)
- v.AddArg(mem)
+ v.AddArg3(ptr, v0, mem)
return true
}
// match: (SETEQstore [off] {sym} ptr (FlagGT_ULT) mem)
@@ -29267,11 +27597,9 @@ func rewriteValueAMD64_OpAMD64SETEQstore(v *Value) bool {
v.reset(OpAMD64MOVBstore)
v.AuxInt = off
v.Aux = sym
- v.AddArg(ptr)
v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
v0.AuxInt = 0
- v.AddArg(v0)
- v.AddArg(mem)
+ v.AddArg3(ptr, v0, mem)
return true
}
// match: (SETEQstore [off] {sym} ptr (FlagGT_UGT) mem)
@@ -29287,11 +27615,9 @@ func rewriteValueAMD64_OpAMD64SETEQstore(v *Value) bool {
v.reset(OpAMD64MOVBstore)
v.AuxInt = off
v.Aux = sym
- v.AddArg(ptr)
v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
v0.AuxInt = 0
- v.AddArg(v0)
- v.AddArg(mem)
+ v.AddArg3(ptr, v0, mem)
return true
}
return false
@@ -29446,9 +27772,7 @@ func rewriteValueAMD64_OpAMD64SETGEstore(v *Value) bool {
v.reset(OpAMD64SETLEstore)
v.AuxInt = off
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(x)
- v.AddArg(mem)
+ v.AddArg3(ptr, x, mem)
return true
}
// match: (SETGEstore [off1] {sym} (ADDQconst [off2] base) val mem)
@@ -29470,9 +27794,7 @@ func rewriteValueAMD64_OpAMD64SETGEstore(v *Value) bool {
v.reset(OpAMD64SETGEstore)
v.AuxInt = off1 + off2
v.Aux = sym
- v.AddArg(base)
- v.AddArg(val)
- v.AddArg(mem)
+ v.AddArg3(base, val, mem)
return true
}
// match: (SETGEstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
@@ -29495,9 +27817,7 @@ func rewriteValueAMD64_OpAMD64SETGEstore(v *Value) bool {
v.reset(OpAMD64SETGEstore)
v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
- v.AddArg(base)
- v.AddArg(val)
- v.AddArg(mem)
+ v.AddArg3(base, val, mem)
return true
}
// match: (SETGEstore [off] {sym} ptr (FlagEQ) mem)
@@ -29513,11 +27833,9 @@ func rewriteValueAMD64_OpAMD64SETGEstore(v *Value) bool {
v.reset(OpAMD64MOVBstore)
v.AuxInt = off
v.Aux = sym
- v.AddArg(ptr)
v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
v0.AuxInt = 1
- v.AddArg(v0)
- v.AddArg(mem)
+ v.AddArg3(ptr, v0, mem)
return true
}
// match: (SETGEstore [off] {sym} ptr (FlagLT_ULT) mem)
@@ -29533,11 +27851,9 @@ func rewriteValueAMD64_OpAMD64SETGEstore(v *Value) bool {
v.reset(OpAMD64MOVBstore)
v.AuxInt = off
v.Aux = sym
- v.AddArg(ptr)
v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
v0.AuxInt = 0
- v.AddArg(v0)
- v.AddArg(mem)
+ v.AddArg3(ptr, v0, mem)
return true
}
// match: (SETGEstore [off] {sym} ptr (FlagLT_UGT) mem)
@@ -29553,11 +27869,9 @@ func rewriteValueAMD64_OpAMD64SETGEstore(v *Value) bool {
v.reset(OpAMD64MOVBstore)
v.AuxInt = off
v.Aux = sym
- v.AddArg(ptr)
v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
v0.AuxInt = 0
- v.AddArg(v0)
- v.AddArg(mem)
+ v.AddArg3(ptr, v0, mem)
return true
}
// match: (SETGEstore [off] {sym} ptr (FlagGT_ULT) mem)
@@ -29573,11 +27887,9 @@ func rewriteValueAMD64_OpAMD64SETGEstore(v *Value) bool {
v.reset(OpAMD64MOVBstore)
v.AuxInt = off
v.Aux = sym
- v.AddArg(ptr)
v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
v0.AuxInt = 1
- v.AddArg(v0)
- v.AddArg(mem)
+ v.AddArg3(ptr, v0, mem)
return true
}
// match: (SETGEstore [off] {sym} ptr (FlagGT_UGT) mem)
@@ -29593,11 +27905,9 @@ func rewriteValueAMD64_OpAMD64SETGEstore(v *Value) bool {
v.reset(OpAMD64MOVBstore)
v.AuxInt = off
v.Aux = sym
- v.AddArg(ptr)
v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
v0.AuxInt = 1
- v.AddArg(v0)
- v.AddArg(mem)
+ v.AddArg3(ptr, v0, mem)
return true
}
return false
@@ -29622,9 +27932,7 @@ func rewriteValueAMD64_OpAMD64SETGstore(v *Value) bool {
v.reset(OpAMD64SETLstore)
v.AuxInt = off
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(x)
- v.AddArg(mem)
+ v.AddArg3(ptr, x, mem)
return true
}
// match: (SETGstore [off1] {sym} (ADDQconst [off2] base) val mem)
@@ -29646,9 +27954,7 @@ func rewriteValueAMD64_OpAMD64SETGstore(v *Value) bool {
v.reset(OpAMD64SETGstore)
v.AuxInt = off1 + off2
v.Aux = sym
- v.AddArg(base)
- v.AddArg(val)
- v.AddArg(mem)
+ v.AddArg3(base, val, mem)
return true
}
// match: (SETGstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
@@ -29671,9 +27977,7 @@ func rewriteValueAMD64_OpAMD64SETGstore(v *Value) bool {
v.reset(OpAMD64SETGstore)
v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
- v.AddArg(base)
- v.AddArg(val)
- v.AddArg(mem)
+ v.AddArg3(base, val, mem)
return true
}
// match: (SETGstore [off] {sym} ptr (FlagEQ) mem)
@@ -29689,11 +27993,9 @@ func rewriteValueAMD64_OpAMD64SETGstore(v *Value) bool {
v.reset(OpAMD64MOVBstore)
v.AuxInt = off
v.Aux = sym
- v.AddArg(ptr)
v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
v0.AuxInt = 0
- v.AddArg(v0)
- v.AddArg(mem)
+ v.AddArg3(ptr, v0, mem)
return true
}
// match: (SETGstore [off] {sym} ptr (FlagLT_ULT) mem)
@@ -29709,11 +28011,9 @@ func rewriteValueAMD64_OpAMD64SETGstore(v *Value) bool {
v.reset(OpAMD64MOVBstore)
v.AuxInt = off
v.Aux = sym
- v.AddArg(ptr)
v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
v0.AuxInt = 0
- v.AddArg(v0)
- v.AddArg(mem)
+ v.AddArg3(ptr, v0, mem)
return true
}
// match: (SETGstore [off] {sym} ptr (FlagLT_UGT) mem)
@@ -29729,11 +28029,9 @@ func rewriteValueAMD64_OpAMD64SETGstore(v *Value) bool {
v.reset(OpAMD64MOVBstore)
v.AuxInt = off
v.Aux = sym
- v.AddArg(ptr)
v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
v0.AuxInt = 0
- v.AddArg(v0)
- v.AddArg(mem)
+ v.AddArg3(ptr, v0, mem)
return true
}
// match: (SETGstore [off] {sym} ptr (FlagGT_ULT) mem)
@@ -29749,11 +28047,9 @@ func rewriteValueAMD64_OpAMD64SETGstore(v *Value) bool {
v.reset(OpAMD64MOVBstore)
v.AuxInt = off
v.Aux = sym
- v.AddArg(ptr)
v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
v0.AuxInt = 1
- v.AddArg(v0)
- v.AddArg(mem)
+ v.AddArg3(ptr, v0, mem)
return true
}
// match: (SETGstore [off] {sym} ptr (FlagGT_UGT) mem)
@@ -29769,11 +28065,9 @@ func rewriteValueAMD64_OpAMD64SETGstore(v *Value) bool {
v.reset(OpAMD64MOVBstore)
v.AuxInt = off
v.Aux = sym
- v.AddArg(ptr)
v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
v0.AuxInt = 1
- v.AddArg(v0)
- v.AddArg(mem)
+ v.AddArg3(ptr, v0, mem)
return true
}
return false
@@ -29928,9 +28222,7 @@ func rewriteValueAMD64_OpAMD64SETLEstore(v *Value) bool {
v.reset(OpAMD64SETGEstore)
v.AuxInt = off
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(x)
- v.AddArg(mem)
+ v.AddArg3(ptr, x, mem)
return true
}
// match: (SETLEstore [off1] {sym} (ADDQconst [off2] base) val mem)
@@ -29952,9 +28244,7 @@ func rewriteValueAMD64_OpAMD64SETLEstore(v *Value) bool {
v.reset(OpAMD64SETLEstore)
v.AuxInt = off1 + off2
v.Aux = sym
- v.AddArg(base)
- v.AddArg(val)
- v.AddArg(mem)
+ v.AddArg3(base, val, mem)
return true
}
// match: (SETLEstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
@@ -29977,9 +28267,7 @@ func rewriteValueAMD64_OpAMD64SETLEstore(v *Value) bool {
v.reset(OpAMD64SETLEstore)
v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
- v.AddArg(base)
- v.AddArg(val)
- v.AddArg(mem)
+ v.AddArg3(base, val, mem)
return true
}
// match: (SETLEstore [off] {sym} ptr (FlagEQ) mem)
@@ -29995,11 +28283,9 @@ func rewriteValueAMD64_OpAMD64SETLEstore(v *Value) bool {
v.reset(OpAMD64MOVBstore)
v.AuxInt = off
v.Aux = sym
- v.AddArg(ptr)
v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
v0.AuxInt = 1
- v.AddArg(v0)
- v.AddArg(mem)
+ v.AddArg3(ptr, v0, mem)
return true
}
// match: (SETLEstore [off] {sym} ptr (FlagLT_ULT) mem)
@@ -30015,11 +28301,9 @@ func rewriteValueAMD64_OpAMD64SETLEstore(v *Value) bool {
v.reset(OpAMD64MOVBstore)
v.AuxInt = off
v.Aux = sym
- v.AddArg(ptr)
v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
v0.AuxInt = 1
- v.AddArg(v0)
- v.AddArg(mem)
+ v.AddArg3(ptr, v0, mem)
return true
}
// match: (SETLEstore [off] {sym} ptr (FlagLT_UGT) mem)
@@ -30035,11 +28319,9 @@ func rewriteValueAMD64_OpAMD64SETLEstore(v *Value) bool {
v.reset(OpAMD64MOVBstore)
v.AuxInt = off
v.Aux = sym
- v.AddArg(ptr)
v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
v0.AuxInt = 1
- v.AddArg(v0)
- v.AddArg(mem)
+ v.AddArg3(ptr, v0, mem)
return true
}
// match: (SETLEstore [off] {sym} ptr (FlagGT_ULT) mem)
@@ -30055,11 +28337,9 @@ func rewriteValueAMD64_OpAMD64SETLEstore(v *Value) bool {
v.reset(OpAMD64MOVBstore)
v.AuxInt = off
v.Aux = sym
- v.AddArg(ptr)
v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
v0.AuxInt = 0
- v.AddArg(v0)
- v.AddArg(mem)
+ v.AddArg3(ptr, v0, mem)
return true
}
// match: (SETLEstore [off] {sym} ptr (FlagGT_UGT) mem)
@@ -30075,11 +28355,9 @@ func rewriteValueAMD64_OpAMD64SETLEstore(v *Value) bool {
v.reset(OpAMD64MOVBstore)
v.AuxInt = off
v.Aux = sym
- v.AddArg(ptr)
v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
v0.AuxInt = 0
- v.AddArg(v0)
- v.AddArg(mem)
+ v.AddArg3(ptr, v0, mem)
return true
}
return false
@@ -30104,9 +28382,7 @@ func rewriteValueAMD64_OpAMD64SETLstore(v *Value) bool {
v.reset(OpAMD64SETGstore)
v.AuxInt = off
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(x)
- v.AddArg(mem)
+ v.AddArg3(ptr, x, mem)
return true
}
// match: (SETLstore [off1] {sym} (ADDQconst [off2] base) val mem)
@@ -30128,9 +28404,7 @@ func rewriteValueAMD64_OpAMD64SETLstore(v *Value) bool {
v.reset(OpAMD64SETLstore)
v.AuxInt = off1 + off2
v.Aux = sym
- v.AddArg(base)
- v.AddArg(val)
- v.AddArg(mem)
+ v.AddArg3(base, val, mem)
return true
}
// match: (SETLstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
@@ -30153,9 +28427,7 @@ func rewriteValueAMD64_OpAMD64SETLstore(v *Value) bool {
v.reset(OpAMD64SETLstore)
v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
- v.AddArg(base)
- v.AddArg(val)
- v.AddArg(mem)
+ v.AddArg3(base, val, mem)
return true
}
// match: (SETLstore [off] {sym} ptr (FlagEQ) mem)
@@ -30171,11 +28443,9 @@ func rewriteValueAMD64_OpAMD64SETLstore(v *Value) bool {
v.reset(OpAMD64MOVBstore)
v.AuxInt = off
v.Aux = sym
- v.AddArg(ptr)
v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
v0.AuxInt = 0
- v.AddArg(v0)
- v.AddArg(mem)
+ v.AddArg3(ptr, v0, mem)
return true
}
// match: (SETLstore [off] {sym} ptr (FlagLT_ULT) mem)
@@ -30191,11 +28461,9 @@ func rewriteValueAMD64_OpAMD64SETLstore(v *Value) bool {
v.reset(OpAMD64MOVBstore)
v.AuxInt = off
v.Aux = sym
- v.AddArg(ptr)
v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
v0.AuxInt = 1
- v.AddArg(v0)
- v.AddArg(mem)
+ v.AddArg3(ptr, v0, mem)
return true
}
// match: (SETLstore [off] {sym} ptr (FlagLT_UGT) mem)
@@ -30211,11 +28479,9 @@ func rewriteValueAMD64_OpAMD64SETLstore(v *Value) bool {
v.reset(OpAMD64MOVBstore)
v.AuxInt = off
v.Aux = sym
- v.AddArg(ptr)
v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
v0.AuxInt = 1
- v.AddArg(v0)
- v.AddArg(mem)
+ v.AddArg3(ptr, v0, mem)
return true
}
// match: (SETLstore [off] {sym} ptr (FlagGT_ULT) mem)
@@ -30231,11 +28497,9 @@ func rewriteValueAMD64_OpAMD64SETLstore(v *Value) bool {
v.reset(OpAMD64MOVBstore)
v.AuxInt = off
v.Aux = sym
- v.AddArg(ptr)
v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
v0.AuxInt = 0
- v.AddArg(v0)
- v.AddArg(mem)
+ v.AddArg3(ptr, v0, mem)
return true
}
// match: (SETLstore [off] {sym} ptr (FlagGT_UGT) mem)
@@ -30251,11 +28515,9 @@ func rewriteValueAMD64_OpAMD64SETLstore(v *Value) bool {
v.reset(OpAMD64MOVBstore)
v.AuxInt = off
v.Aux = sym
- v.AddArg(ptr)
v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
v0.AuxInt = 0
- v.AddArg(v0)
- v.AddArg(mem)
+ v.AddArg3(ptr, v0, mem)
return true
}
return false
@@ -30284,8 +28546,7 @@ func rewriteValueAMD64_OpAMD64SETNE(v *Value) bool {
y := v_0_1
v.reset(OpAMD64SETB)
v0 := b.NewValue0(v.Pos, OpAMD64BTL, types.TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
+ v0.AddArg2(x, y)
v.AddArg(v0)
return true
}
@@ -30312,8 +28573,7 @@ func rewriteValueAMD64_OpAMD64SETNE(v *Value) bool {
y := v_0_1
v.reset(OpAMD64SETB)
v0 := b.NewValue0(v.Pos, OpAMD64BTQ, types.TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
+ v0.AddArg2(x, y)
v.AddArg(v0)
return true
}
@@ -30704,12 +28964,9 @@ func rewriteValueAMD64_OpAMD64SETNEstore(v *Value) bool {
v.reset(OpAMD64SETBstore)
v.AuxInt = off
v.Aux = sym
- v.AddArg(ptr)
v0 := b.NewValue0(v.Pos, OpAMD64BTL, types.TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- v.AddArg(mem)
+ v0.AddArg2(x, y)
+ v.AddArg3(ptr, v0, mem)
return true
}
break
@@ -30740,12 +28997,9 @@ func rewriteValueAMD64_OpAMD64SETNEstore(v *Value) bool {
v.reset(OpAMD64SETBstore)
v.AuxInt = off
v.Aux = sym
- v.AddArg(ptr)
v0 := b.NewValue0(v.Pos, OpAMD64BTQ, types.TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- v.AddArg(mem)
+ v0.AddArg2(x, y)
+ v.AddArg3(ptr, v0, mem)
return true
}
break
@@ -30769,12 +29023,10 @@ func rewriteValueAMD64_OpAMD64SETNEstore(v *Value) bool {
v.reset(OpAMD64SETBstore)
v.AuxInt = off
v.Aux = sym
- v.AddArg(ptr)
v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags)
v0.AuxInt = log2uint32(c)
v0.AddArg(x)
- v.AddArg(v0)
- v.AddArg(mem)
+ v.AddArg3(ptr, v0, mem)
return true
}
// match: (SETNEstore [off] {sym} ptr (TESTQconst [c] x) mem)
@@ -30796,12 +29048,10 @@ func rewriteValueAMD64_OpAMD64SETNEstore(v *Value) bool {
v.reset(OpAMD64SETBstore)
v.AuxInt = off
v.Aux = sym
- v.AddArg(ptr)
v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
v0.AuxInt = log2(c)
v0.AddArg(x)
- v.AddArg(v0)
- v.AddArg(mem)
+ v.AddArg3(ptr, v0, mem)
return true
}
// match: (SETNEstore [off] {sym} ptr (TESTQ (MOVQconst [c]) x) mem)
@@ -30830,12 +29080,10 @@ func rewriteValueAMD64_OpAMD64SETNEstore(v *Value) bool {
v.reset(OpAMD64SETBstore)
v.AuxInt = off
v.Aux = sym
- v.AddArg(ptr)
v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
v0.AuxInt = log2(c)
v0.AddArg(x)
- v.AddArg(v0)
- v.AddArg(mem)
+ v.AddArg3(ptr, v0, mem)
return true
}
break
@@ -30857,12 +29105,10 @@ func rewriteValueAMD64_OpAMD64SETNEstore(v *Value) bool {
v.reset(OpAMD64SETEQstore)
v.AuxInt = off
v.Aux = sym
- v.AddArg(ptr)
v0 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags)
v0.AuxInt = 0
v0.AddArg(s)
- v.AddArg(v0)
- v.AddArg(mem)
+ v.AddArg3(ptr, v0, mem)
return true
}
// match: (SETNEstore [off] {sym} ptr (CMPQconst [1] s:(ANDQconst [1] _)) mem)
@@ -30882,12 +29128,10 @@ func rewriteValueAMD64_OpAMD64SETNEstore(v *Value) bool {
v.reset(OpAMD64SETEQstore)
v.AuxInt = off
v.Aux = sym
- v.AddArg(ptr)
v0 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
v0.AuxInt = 0
v0.AddArg(s)
- v.AddArg(v0)
- v.AddArg(mem)
+ v.AddArg3(ptr, v0, mem)
return true
}
// match: (SETNEstore [off] {sym} ptr (TESTQ z1:(SHLQconst [63] (SHRQconst [63] x)) z2) mem)
@@ -30921,12 +29165,10 @@ func rewriteValueAMD64_OpAMD64SETNEstore(v *Value) bool {
v.reset(OpAMD64SETBstore)
v.AuxInt = off
v.Aux = sym
- v.AddArg(ptr)
v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
v0.AuxInt = 63
v0.AddArg(x)
- v.AddArg(v0)
- v.AddArg(mem)
+ v.AddArg3(ptr, v0, mem)
return true
}
break
@@ -30962,12 +29204,10 @@ func rewriteValueAMD64_OpAMD64SETNEstore(v *Value) bool {
v.reset(OpAMD64SETBstore)
v.AuxInt = off
v.Aux = sym
- v.AddArg(ptr)
v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags)
v0.AuxInt = 31
v0.AddArg(x)
- v.AddArg(v0)
- v.AddArg(mem)
+ v.AddArg3(ptr, v0, mem)
return true
}
break
@@ -31003,12 +29243,10 @@ func rewriteValueAMD64_OpAMD64SETNEstore(v *Value) bool {
v.reset(OpAMD64SETBstore)
v.AuxInt = off
v.Aux = sym
- v.AddArg(ptr)
v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
v0.AuxInt = 0
v0.AddArg(x)
- v.AddArg(v0)
- v.AddArg(mem)
+ v.AddArg3(ptr, v0, mem)
return true
}
break
@@ -31044,12 +29282,10 @@ func rewriteValueAMD64_OpAMD64SETNEstore(v *Value) bool {
v.reset(OpAMD64SETBstore)
v.AuxInt = off
v.Aux = sym
- v.AddArg(ptr)
v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags)
v0.AuxInt = 0
v0.AddArg(x)
- v.AddArg(v0)
- v.AddArg(mem)
+ v.AddArg3(ptr, v0, mem)
return true
}
break
@@ -31081,12 +29317,10 @@ func rewriteValueAMD64_OpAMD64SETNEstore(v *Value) bool {
v.reset(OpAMD64SETBstore)
v.AuxInt = off
v.Aux = sym
- v.AddArg(ptr)
v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
v0.AuxInt = 63
v0.AddArg(x)
- v.AddArg(v0)
- v.AddArg(mem)
+ v.AddArg3(ptr, v0, mem)
return true
}
break
@@ -31118,12 +29352,10 @@ func rewriteValueAMD64_OpAMD64SETNEstore(v *Value) bool {
v.reset(OpAMD64SETBstore)
v.AuxInt = off
v.Aux = sym
- v.AddArg(ptr)
v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags)
v0.AuxInt = 31
v0.AddArg(x)
- v.AddArg(v0)
- v.AddArg(mem)
+ v.AddArg3(ptr, v0, mem)
return true
}
break
@@ -31142,9 +29374,7 @@ func rewriteValueAMD64_OpAMD64SETNEstore(v *Value) bool {
v.reset(OpAMD64SETNEstore)
v.AuxInt = off
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(x)
- v.AddArg(mem)
+ v.AddArg3(ptr, x, mem)
return true
}
// match: (SETNEstore [off1] {sym} (ADDQconst [off2] base) val mem)
@@ -31166,9 +29396,7 @@ func rewriteValueAMD64_OpAMD64SETNEstore(v *Value) bool {
v.reset(OpAMD64SETNEstore)
v.AuxInt = off1 + off2
v.Aux = sym
- v.AddArg(base)
- v.AddArg(val)
- v.AddArg(mem)
+ v.AddArg3(base, val, mem)
return true
}
// match: (SETNEstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
@@ -31191,9 +29419,7 @@ func rewriteValueAMD64_OpAMD64SETNEstore(v *Value) bool {
v.reset(OpAMD64SETNEstore)
v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
- v.AddArg(base)
- v.AddArg(val)
- v.AddArg(mem)
+ v.AddArg3(base, val, mem)
return true
}
// match: (SETNEstore [off] {sym} ptr (FlagEQ) mem)
@@ -31209,11 +29435,9 @@ func rewriteValueAMD64_OpAMD64SETNEstore(v *Value) bool {
v.reset(OpAMD64MOVBstore)
v.AuxInt = off
v.Aux = sym
- v.AddArg(ptr)
v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
v0.AuxInt = 0
- v.AddArg(v0)
- v.AddArg(mem)
+ v.AddArg3(ptr, v0, mem)
return true
}
// match: (SETNEstore [off] {sym} ptr (FlagLT_ULT) mem)
@@ -31229,11 +29453,9 @@ func rewriteValueAMD64_OpAMD64SETNEstore(v *Value) bool {
v.reset(OpAMD64MOVBstore)
v.AuxInt = off
v.Aux = sym
- v.AddArg(ptr)
v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
v0.AuxInt = 1
- v.AddArg(v0)
- v.AddArg(mem)
+ v.AddArg3(ptr, v0, mem)
return true
}
// match: (SETNEstore [off] {sym} ptr (FlagLT_UGT) mem)
@@ -31249,11 +29471,9 @@ func rewriteValueAMD64_OpAMD64SETNEstore(v *Value) bool {
v.reset(OpAMD64MOVBstore)
v.AuxInt = off
v.Aux = sym
- v.AddArg(ptr)
v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
v0.AuxInt = 1
- v.AddArg(v0)
- v.AddArg(mem)
+ v.AddArg3(ptr, v0, mem)
return true
}
// match: (SETNEstore [off] {sym} ptr (FlagGT_ULT) mem)
@@ -31269,11 +29489,9 @@ func rewriteValueAMD64_OpAMD64SETNEstore(v *Value) bool {
v.reset(OpAMD64MOVBstore)
v.AuxInt = off
v.Aux = sym
- v.AddArg(ptr)
v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
v0.AuxInt = 1
- v.AddArg(v0)
- v.AddArg(mem)
+ v.AddArg3(ptr, v0, mem)
return true
}
// match: (SETNEstore [off] {sym} ptr (FlagGT_UGT) mem)
@@ -31289,11 +29507,9 @@ func rewriteValueAMD64_OpAMD64SETNEstore(v *Value) bool {
v.reset(OpAMD64MOVBstore)
v.AuxInt = off
v.Aux = sym
- v.AddArg(ptr)
v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
v0.AuxInt = 1
- v.AddArg(v0)
- v.AddArg(mem)
+ v.AddArg3(ptr, v0, mem)
return true
}
return false
@@ -31342,8 +29558,7 @@ func rewriteValueAMD64_OpAMD64SHLL(v *Value) bool {
break
}
v.reset(OpAMD64SHLL)
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
// match: (SHLL x (NEGQ (ADDQconst [c] y)))
@@ -31365,10 +29580,9 @@ func rewriteValueAMD64_OpAMD64SHLL(v *Value) bool {
break
}
v.reset(OpAMD64SHLL)
- v.AddArg(x)
v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t)
v0.AddArg(y)
- v.AddArg(v0)
+ v.AddArg2(x, v0)
return true
}
// match: (SHLL x (ANDQconst [c] y))
@@ -31385,8 +29599,7 @@ func rewriteValueAMD64_OpAMD64SHLL(v *Value) bool {
break
}
v.reset(OpAMD64SHLL)
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
// match: (SHLL x (NEGQ (ANDQconst [c] y)))
@@ -31408,10 +29621,9 @@ func rewriteValueAMD64_OpAMD64SHLL(v *Value) bool {
break
}
v.reset(OpAMD64SHLL)
- v.AddArg(x)
v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t)
v0.AddArg(y)
- v.AddArg(v0)
+ v.AddArg2(x, v0)
return true
}
// match: (SHLL x (ADDLconst [c] y))
@@ -31428,8 +29640,7 @@ func rewriteValueAMD64_OpAMD64SHLL(v *Value) bool {
break
}
v.reset(OpAMD64SHLL)
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
// match: (SHLL x (NEGL (ADDLconst [c] y)))
@@ -31451,10 +29662,9 @@ func rewriteValueAMD64_OpAMD64SHLL(v *Value) bool {
break
}
v.reset(OpAMD64SHLL)
- v.AddArg(x)
v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t)
v0.AddArg(y)
- v.AddArg(v0)
+ v.AddArg2(x, v0)
return true
}
// match: (SHLL x (ANDLconst [c] y))
@@ -31471,8 +29681,7 @@ func rewriteValueAMD64_OpAMD64SHLL(v *Value) bool {
break
}
v.reset(OpAMD64SHLL)
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
// match: (SHLL x (NEGL (ANDLconst [c] y)))
@@ -31494,10 +29703,9 @@ func rewriteValueAMD64_OpAMD64SHLL(v *Value) bool {
break
}
v.reset(OpAMD64SHLL)
- v.AddArg(x)
v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t)
v0.AddArg(y)
- v.AddArg(v0)
+ v.AddArg2(x, v0)
return true
}
return false
@@ -31523,9 +29731,7 @@ func rewriteValueAMD64_OpAMD64SHLLconst(v *Value) bool {
break
}
x := v_0
- v.reset(OpCopy)
- v.Type = x.Type
- v.AddArg(x)
+ v.copyOf(x)
return true
}
// match: (SHLLconst [d] (MOVLconst [c]))
@@ -31586,8 +29792,7 @@ func rewriteValueAMD64_OpAMD64SHLQ(v *Value) bool {
break
}
v.reset(OpAMD64SHLQ)
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
// match: (SHLQ x (NEGQ (ADDQconst [c] y)))
@@ -31609,10 +29814,9 @@ func rewriteValueAMD64_OpAMD64SHLQ(v *Value) bool {
break
}
v.reset(OpAMD64SHLQ)
- v.AddArg(x)
v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t)
v0.AddArg(y)
- v.AddArg(v0)
+ v.AddArg2(x, v0)
return true
}
// match: (SHLQ x (ANDQconst [c] y))
@@ -31629,8 +29833,7 @@ func rewriteValueAMD64_OpAMD64SHLQ(v *Value) bool {
break
}
v.reset(OpAMD64SHLQ)
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
// match: (SHLQ x (NEGQ (ANDQconst [c] y)))
@@ -31652,10 +29855,9 @@ func rewriteValueAMD64_OpAMD64SHLQ(v *Value) bool {
break
}
v.reset(OpAMD64SHLQ)
- v.AddArg(x)
v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t)
v0.AddArg(y)
- v.AddArg(v0)
+ v.AddArg2(x, v0)
return true
}
// match: (SHLQ x (ADDLconst [c] y))
@@ -31672,8 +29874,7 @@ func rewriteValueAMD64_OpAMD64SHLQ(v *Value) bool {
break
}
v.reset(OpAMD64SHLQ)
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
// match: (SHLQ x (NEGL (ADDLconst [c] y)))
@@ -31695,10 +29896,9 @@ func rewriteValueAMD64_OpAMD64SHLQ(v *Value) bool {
break
}
v.reset(OpAMD64SHLQ)
- v.AddArg(x)
v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t)
v0.AddArg(y)
- v.AddArg(v0)
+ v.AddArg2(x, v0)
return true
}
// match: (SHLQ x (ANDLconst [c] y))
@@ -31715,8 +29915,7 @@ func rewriteValueAMD64_OpAMD64SHLQ(v *Value) bool {
break
}
v.reset(OpAMD64SHLQ)
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
// match: (SHLQ x (NEGL (ANDLconst [c] y)))
@@ -31738,10 +29937,9 @@ func rewriteValueAMD64_OpAMD64SHLQ(v *Value) bool {
break
}
v.reset(OpAMD64SHLQ)
- v.AddArg(x)
v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t)
v0.AddArg(y)
- v.AddArg(v0)
+ v.AddArg2(x, v0)
return true
}
return false
@@ -31767,9 +29965,7 @@ func rewriteValueAMD64_OpAMD64SHLQconst(v *Value) bool {
break
}
x := v_0
- v.reset(OpCopy)
- v.Type = x.Type
- v.AddArg(x)
+ v.copyOf(x)
return true
}
// match: (SHLQconst [d] (MOVQconst [c]))
@@ -31784,6 +29980,18 @@ func rewriteValueAMD64_OpAMD64SHLQconst(v *Value) bool {
v.AuxInt = c << uint64(d)
return true
}
+ // match: (SHLQconst [d] (MOVLconst [c]))
+ // result: (MOVQconst [int64(int32(c)) << uint64(d)])
+ for {
+ d := v.AuxInt
+ if v_0.Op != OpAMD64MOVLconst {
+ break
+ }
+ c := v_0.AuxInt
+ v.reset(OpAMD64MOVQconst)
+ v.AuxInt = int64(int32(c)) << uint64(d)
+ return true
+ }
return false
}
func rewriteValueAMD64_OpAMD64SHRB(v *Value) bool {
@@ -31864,9 +30072,7 @@ func rewriteValueAMD64_OpAMD64SHRBconst(v *Value) bool {
break
}
x := v_0
- v.reset(OpCopy)
- v.Type = x.Type
- v.AddArg(x)
+ v.copyOf(x)
return true
}
return false
@@ -31915,8 +30121,7 @@ func rewriteValueAMD64_OpAMD64SHRL(v *Value) bool {
break
}
v.reset(OpAMD64SHRL)
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
// match: (SHRL x (NEGQ (ADDQconst [c] y)))
@@ -31938,10 +30143,9 @@ func rewriteValueAMD64_OpAMD64SHRL(v *Value) bool {
break
}
v.reset(OpAMD64SHRL)
- v.AddArg(x)
v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t)
v0.AddArg(y)
- v.AddArg(v0)
+ v.AddArg2(x, v0)
return true
}
// match: (SHRL x (ANDQconst [c] y))
@@ -31958,8 +30162,7 @@ func rewriteValueAMD64_OpAMD64SHRL(v *Value) bool {
break
}
v.reset(OpAMD64SHRL)
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
// match: (SHRL x (NEGQ (ANDQconst [c] y)))
@@ -31981,10 +30184,9 @@ func rewriteValueAMD64_OpAMD64SHRL(v *Value) bool {
break
}
v.reset(OpAMD64SHRL)
- v.AddArg(x)
v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t)
v0.AddArg(y)
- v.AddArg(v0)
+ v.AddArg2(x, v0)
return true
}
// match: (SHRL x (ADDLconst [c] y))
@@ -32001,8 +30203,7 @@ func rewriteValueAMD64_OpAMD64SHRL(v *Value) bool {
break
}
v.reset(OpAMD64SHRL)
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
// match: (SHRL x (NEGL (ADDLconst [c] y)))
@@ -32024,10 +30225,9 @@ func rewriteValueAMD64_OpAMD64SHRL(v *Value) bool {
break
}
v.reset(OpAMD64SHRL)
- v.AddArg(x)
v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t)
v0.AddArg(y)
- v.AddArg(v0)
+ v.AddArg2(x, v0)
return true
}
// match: (SHRL x (ANDLconst [c] y))
@@ -32044,8 +30244,7 @@ func rewriteValueAMD64_OpAMD64SHRL(v *Value) bool {
break
}
v.reset(OpAMD64SHRL)
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
// match: (SHRL x (NEGL (ANDLconst [c] y)))
@@ -32067,10 +30266,9 @@ func rewriteValueAMD64_OpAMD64SHRL(v *Value) bool {
break
}
v.reset(OpAMD64SHRL)
- v.AddArg(x)
v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t)
v0.AddArg(y)
- v.AddArg(v0)
+ v.AddArg2(x, v0)
return true
}
return false
@@ -32096,9 +30294,7 @@ func rewriteValueAMD64_OpAMD64SHRLconst(v *Value) bool {
break
}
x := v_0
- v.reset(OpCopy)
- v.Type = x.Type
- v.AddArg(x)
+ v.copyOf(x)
return true
}
return false
@@ -32147,8 +30343,7 @@ func rewriteValueAMD64_OpAMD64SHRQ(v *Value) bool {
break
}
v.reset(OpAMD64SHRQ)
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
// match: (SHRQ x (NEGQ (ADDQconst [c] y)))
@@ -32170,10 +30365,9 @@ func rewriteValueAMD64_OpAMD64SHRQ(v *Value) bool {
break
}
v.reset(OpAMD64SHRQ)
- v.AddArg(x)
v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t)
v0.AddArg(y)
- v.AddArg(v0)
+ v.AddArg2(x, v0)
return true
}
// match: (SHRQ x (ANDQconst [c] y))
@@ -32190,8 +30384,7 @@ func rewriteValueAMD64_OpAMD64SHRQ(v *Value) bool {
break
}
v.reset(OpAMD64SHRQ)
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
// match: (SHRQ x (NEGQ (ANDQconst [c] y)))
@@ -32213,10 +30406,9 @@ func rewriteValueAMD64_OpAMD64SHRQ(v *Value) bool {
break
}
v.reset(OpAMD64SHRQ)
- v.AddArg(x)
v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t)
v0.AddArg(y)
- v.AddArg(v0)
+ v.AddArg2(x, v0)
return true
}
// match: (SHRQ x (ADDLconst [c] y))
@@ -32233,8 +30425,7 @@ func rewriteValueAMD64_OpAMD64SHRQ(v *Value) bool {
break
}
v.reset(OpAMD64SHRQ)
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
// match: (SHRQ x (NEGL (ADDLconst [c] y)))
@@ -32256,10 +30447,9 @@ func rewriteValueAMD64_OpAMD64SHRQ(v *Value) bool {
break
}
v.reset(OpAMD64SHRQ)
- v.AddArg(x)
v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t)
v0.AddArg(y)
- v.AddArg(v0)
+ v.AddArg2(x, v0)
return true
}
// match: (SHRQ x (ANDLconst [c] y))
@@ -32276,8 +30466,7 @@ func rewriteValueAMD64_OpAMD64SHRQ(v *Value) bool {
break
}
v.reset(OpAMD64SHRQ)
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
// match: (SHRQ x (NEGL (ANDLconst [c] y)))
@@ -32299,10 +30488,9 @@ func rewriteValueAMD64_OpAMD64SHRQ(v *Value) bool {
break
}
v.reset(OpAMD64SHRQ)
- v.AddArg(x)
v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t)
v0.AddArg(y)
- v.AddArg(v0)
+ v.AddArg2(x, v0)
return true
}
return false
@@ -32328,9 +30516,7 @@ func rewriteValueAMD64_OpAMD64SHRQconst(v *Value) bool {
break
}
x := v_0
- v.reset(OpCopy)
- v.Type = x.Type
- v.AddArg(x)
+ v.copyOf(x)
return true
}
return false
@@ -32413,9 +30599,7 @@ func rewriteValueAMD64_OpAMD64SHRWconst(v *Value) bool {
break
}
x := v_0
- v.reset(OpCopy)
- v.Type = x.Type
- v.AddArg(x)
+ v.copyOf(x)
return true
}
return false
@@ -32482,9 +30666,7 @@ func rewriteValueAMD64_OpAMD64SUBL(v *Value) bool {
v.reset(OpAMD64SUBLload)
v.AuxInt = off
v.Aux = sym
- v.AddArg(x)
- v.AddArg(ptr)
- v.AddArg(mem)
+ v.AddArg3(x, ptr, mem)
return true
}
return false
@@ -32500,9 +30682,7 @@ func rewriteValueAMD64_OpAMD64SUBLconst(v *Value) bool {
if !(int32(c) == 0) {
break
}
- v.reset(OpCopy)
- v.Type = x.Type
- v.AddArg(x)
+ v.copyOf(x)
return true
}
// match: (SUBLconst [c] x)
@@ -32541,9 +30721,7 @@ func rewriteValueAMD64_OpAMD64SUBLload(v *Value) bool {
v.reset(OpAMD64SUBLload)
v.AuxInt = off1 + off2
v.Aux = sym
- v.AddArg(val)
- v.AddArg(base)
- v.AddArg(mem)
+ v.AddArg3(val, base, mem)
return true
}
// match: (SUBLload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem)
@@ -32566,9 +30744,7 @@ func rewriteValueAMD64_OpAMD64SUBLload(v *Value) bool {
v.reset(OpAMD64SUBLload)
v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
- v.AddArg(val)
- v.AddArg(base)
- v.AddArg(mem)
+ v.AddArg3(val, base, mem)
return true
}
// match: (SUBLload x [off] {sym} ptr (MOVSSstore [off] {sym} ptr y _))
@@ -32581,16 +30757,14 @@ func rewriteValueAMD64_OpAMD64SUBLload(v *Value) bool {
if v_2.Op != OpAMD64MOVSSstore || v_2.AuxInt != off || v_2.Aux != sym {
break
}
- _ = v_2.Args[2]
+ y := v_2.Args[1]
if ptr != v_2.Args[0] {
break
}
- y := v_2.Args[1]
v.reset(OpAMD64SUBL)
- v.AddArg(x)
v0 := b.NewValue0(v_2.Pos, OpAMD64MOVLf2i, typ.UInt32)
v0.AddArg(y)
- v.AddArg(v0)
+ v.AddArg2(x, v0)
return true
}
return false
@@ -32618,9 +30792,7 @@ func rewriteValueAMD64_OpAMD64SUBLmodify(v *Value) bool {
v.reset(OpAMD64SUBLmodify)
v.AuxInt = off1 + off2
v.Aux = sym
- v.AddArg(base)
- v.AddArg(val)
- v.AddArg(mem)
+ v.AddArg3(base, val, mem)
return true
}
// match: (SUBLmodify [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
@@ -32643,9 +30815,7 @@ func rewriteValueAMD64_OpAMD64SUBLmodify(v *Value) bool {
v.reset(OpAMD64SUBLmodify)
v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
- v.AddArg(base)
- v.AddArg(val)
- v.AddArg(mem)
+ v.AddArg3(base, val, mem)
return true
}
return false
@@ -32720,9 +30890,7 @@ func rewriteValueAMD64_OpAMD64SUBQ(v *Value) bool {
v.reset(OpAMD64SUBQload)
v.AuxInt = off
v.Aux = sym
- v.AddArg(x)
- v.AddArg(ptr)
- v.AddArg(mem)
+ v.AddArg3(x, ptr, mem)
return true
}
return false
@@ -32758,9 +30926,7 @@ func rewriteValueAMD64_OpAMD64SUBQconst(v *Value) bool {
break
}
x := v_0
- v.reset(OpCopy)
- v.Type = x.Type
- v.AddArg(x)
+ v.copyOf(x)
return true
}
// match: (SUBQconst [c] x)
@@ -32834,9 +31000,7 @@ func rewriteValueAMD64_OpAMD64SUBQload(v *Value) bool {
v.reset(OpAMD64SUBQload)
v.AuxInt = off1 + off2
v.Aux = sym
- v.AddArg(val)
- v.AddArg(base)
- v.AddArg(mem)
+ v.AddArg3(val, base, mem)
return true
}
// match: (SUBQload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem)
@@ -32859,9 +31023,7 @@ func rewriteValueAMD64_OpAMD64SUBQload(v *Value) bool {
v.reset(OpAMD64SUBQload)
v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
- v.AddArg(val)
- v.AddArg(base)
- v.AddArg(mem)
+ v.AddArg3(val, base, mem)
return true
}
// match: (SUBQload x [off] {sym} ptr (MOVSDstore [off] {sym} ptr y _))
@@ -32874,16 +31036,14 @@ func rewriteValueAMD64_OpAMD64SUBQload(v *Value) bool {
if v_2.Op != OpAMD64MOVSDstore || v_2.AuxInt != off || v_2.Aux != sym {
break
}
- _ = v_2.Args[2]
+ y := v_2.Args[1]
if ptr != v_2.Args[0] {
break
}
- y := v_2.Args[1]
v.reset(OpAMD64SUBQ)
- v.AddArg(x)
v0 := b.NewValue0(v_2.Pos, OpAMD64MOVQf2i, typ.UInt64)
v0.AddArg(y)
- v.AddArg(v0)
+ v.AddArg2(x, v0)
return true
}
return false
@@ -32911,9 +31071,7 @@ func rewriteValueAMD64_OpAMD64SUBQmodify(v *Value) bool {
v.reset(OpAMD64SUBQmodify)
v.AuxInt = off1 + off2
v.Aux = sym
- v.AddArg(base)
- v.AddArg(val)
- v.AddArg(mem)
+ v.AddArg3(base, val, mem)
return true
}
// match: (SUBQmodify [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
@@ -32936,9 +31094,7 @@ func rewriteValueAMD64_OpAMD64SUBQmodify(v *Value) bool {
v.reset(OpAMD64SUBQmodify)
v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
- v.AddArg(base)
- v.AddArg(val)
- v.AddArg(mem)
+ v.AddArg3(base, val, mem)
return true
}
return false
@@ -32965,9 +31121,7 @@ func rewriteValueAMD64_OpAMD64SUBSD(v *Value) bool {
v.reset(OpAMD64SUBSDload)
v.AuxInt = off
v.Aux = sym
- v.AddArg(x)
- v.AddArg(ptr)
- v.AddArg(mem)
+ v.AddArg3(x, ptr, mem)
return true
}
return false
@@ -32997,9 +31151,7 @@ func rewriteValueAMD64_OpAMD64SUBSDload(v *Value) bool {
v.reset(OpAMD64SUBSDload)
v.AuxInt = off1 + off2
v.Aux = sym
- v.AddArg(val)
- v.AddArg(base)
- v.AddArg(mem)
+ v.AddArg3(val, base, mem)
return true
}
// match: (SUBSDload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem)
@@ -33022,9 +31174,7 @@ func rewriteValueAMD64_OpAMD64SUBSDload(v *Value) bool {
v.reset(OpAMD64SUBSDload)
v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
- v.AddArg(val)
- v.AddArg(base)
- v.AddArg(mem)
+ v.AddArg3(val, base, mem)
return true
}
// match: (SUBSDload x [off] {sym} ptr (MOVQstore [off] {sym} ptr y _))
@@ -33037,16 +31187,14 @@ func rewriteValueAMD64_OpAMD64SUBSDload(v *Value) bool {
if v_2.Op != OpAMD64MOVQstore || v_2.AuxInt != off || v_2.Aux != sym {
break
}
- _ = v_2.Args[2]
+ y := v_2.Args[1]
if ptr != v_2.Args[0] {
break
}
- y := v_2.Args[1]
v.reset(OpAMD64SUBSD)
- v.AddArg(x)
v0 := b.NewValue0(v_2.Pos, OpAMD64MOVQi2f, typ.Float64)
v0.AddArg(y)
- v.AddArg(v0)
+ v.AddArg2(x, v0)
return true
}
return false
@@ -33073,9 +31221,7 @@ func rewriteValueAMD64_OpAMD64SUBSS(v *Value) bool {
v.reset(OpAMD64SUBSSload)
v.AuxInt = off
v.Aux = sym
- v.AddArg(x)
- v.AddArg(ptr)
- v.AddArg(mem)
+ v.AddArg3(x, ptr, mem)
return true
}
return false
@@ -33105,9 +31251,7 @@ func rewriteValueAMD64_OpAMD64SUBSSload(v *Value) bool {
v.reset(OpAMD64SUBSSload)
v.AuxInt = off1 + off2
v.Aux = sym
- v.AddArg(val)
- v.AddArg(base)
- v.AddArg(mem)
+ v.AddArg3(val, base, mem)
return true
}
// match: (SUBSSload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem)
@@ -33130,9 +31274,7 @@ func rewriteValueAMD64_OpAMD64SUBSSload(v *Value) bool {
v.reset(OpAMD64SUBSSload)
v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
- v.AddArg(val)
- v.AddArg(base)
- v.AddArg(mem)
+ v.AddArg3(val, base, mem)
return true
}
// match: (SUBSSload x [off] {sym} ptr (MOVLstore [off] {sym} ptr y _))
@@ -33145,16 +31287,14 @@ func rewriteValueAMD64_OpAMD64SUBSSload(v *Value) bool {
if v_2.Op != OpAMD64MOVLstore || v_2.AuxInt != off || v_2.Aux != sym {
break
}
- _ = v_2.Args[2]
+ y := v_2.Args[1]
if ptr != v_2.Args[0] {
break
}
- y := v_2.Args[1]
v.reset(OpAMD64SUBSS)
- v.AddArg(x)
v0 := b.NewValue0(v_2.Pos, OpAMD64MOVLi2f, typ.Float32)
v0.AddArg(y)
- v.AddArg(v0)
+ v.AddArg2(x, v0)
return true
}
return false
@@ -33198,12 +31338,10 @@ func rewriteValueAMD64_OpAMD64TESTB(v *Value) bool {
}
b = l.Block
v0 := b.NewValue0(l.Pos, OpAMD64CMPBconstload, types.TypeFlags)
- v.reset(OpCopy)
- v.AddArg(v0)
+ v.copyOf(v0)
v0.AuxInt = makeValAndOff(0, off)
v0.Aux = sym
- v0.AddArg(ptr)
- v0.AddArg(mem)
+ v0.AddArg2(ptr, mem)
return true
}
break
@@ -33224,8 +31362,7 @@ func rewriteValueAMD64_OpAMD64TESTBconst(v *Value) bool {
break
}
v.reset(OpAMD64TESTB)
- v.AddArg(x)
- v.AddArg(x)
+ v.AddArg2(x, x)
return true
}
return false
@@ -33269,12 +31406,10 @@ func rewriteValueAMD64_OpAMD64TESTL(v *Value) bool {
}
b = l.Block
v0 := b.NewValue0(l.Pos, OpAMD64CMPLconstload, types.TypeFlags)
- v.reset(OpCopy)
- v.AddArg(v0)
+ v.copyOf(v0)
v0.AuxInt = makeValAndOff(0, off)
v0.Aux = sym
- v0.AddArg(ptr)
- v0.AddArg(mem)
+ v0.AddArg2(ptr, mem)
return true
}
break
@@ -33305,8 +31440,7 @@ func rewriteValueAMD64_OpAMD64TESTLconst(v *Value) bool {
break
}
v.reset(OpAMD64TESTL)
- v.AddArg(x)
- v.AddArg(x)
+ v.AddArg2(x, x)
return true
}
return false
@@ -33354,12 +31488,10 @@ func rewriteValueAMD64_OpAMD64TESTQ(v *Value) bool {
}
b = l.Block
v0 := b.NewValue0(l.Pos, OpAMD64CMPQconstload, types.TypeFlags)
- v.reset(OpCopy)
- v.AddArg(v0)
+ v.copyOf(v0)
v0.AuxInt = makeValAndOff(0, off)
v0.Aux = sym
- v0.AddArg(ptr)
- v0.AddArg(mem)
+ v0.AddArg2(ptr, mem)
return true
}
break
@@ -33390,8 +31522,7 @@ func rewriteValueAMD64_OpAMD64TESTQconst(v *Value) bool {
break
}
v.reset(OpAMD64TESTQ)
- v.AddArg(x)
- v.AddArg(x)
+ v.AddArg2(x, x)
return true
}
return false
@@ -33435,12 +31566,10 @@ func rewriteValueAMD64_OpAMD64TESTW(v *Value) bool {
}
b = l.Block
v0 := b.NewValue0(l.Pos, OpAMD64CMPWconstload, types.TypeFlags)
- v.reset(OpCopy)
- v.AddArg(v0)
+ v.copyOf(v0)
v0.AuxInt = makeValAndOff(0, off)
v0.Aux = sym
- v0.AddArg(ptr)
- v0.AddArg(mem)
+ v0.AddArg2(ptr, mem)
return true
}
break
@@ -33461,8 +31590,7 @@ func rewriteValueAMD64_OpAMD64TESTWconst(v *Value) bool {
break
}
v.reset(OpAMD64TESTW)
- v.AddArg(x)
- v.AddArg(x)
+ v.AddArg2(x, x)
return true
}
return false
@@ -33490,9 +31618,7 @@ func rewriteValueAMD64_OpAMD64XADDLlock(v *Value) bool {
v.reset(OpAMD64XADDLlock)
v.AuxInt = off1 + off2
v.Aux = sym
- v.AddArg(val)
- v.AddArg(ptr)
- v.AddArg(mem)
+ v.AddArg3(val, ptr, mem)
return true
}
return false
@@ -33520,9 +31646,7 @@ func rewriteValueAMD64_OpAMD64XADDQlock(v *Value) bool {
v.reset(OpAMD64XADDQlock)
v.AuxInt = off1 + off2
v.Aux = sym
- v.AddArg(val)
- v.AddArg(ptr)
- v.AddArg(mem)
+ v.AddArg3(val, ptr, mem)
return true
}
return false
@@ -33550,9 +31674,7 @@ func rewriteValueAMD64_OpAMD64XCHGL(v *Value) bool {
v.reset(OpAMD64XCHGL)
v.AuxInt = off1 + off2
v.Aux = sym
- v.AddArg(val)
- v.AddArg(ptr)
- v.AddArg(mem)
+ v.AddArg3(val, ptr, mem)
return true
}
// match: (XCHGL [off1] {sym1} val (LEAQ [off2] {sym2} ptr) mem)
@@ -33575,9 +31697,7 @@ func rewriteValueAMD64_OpAMD64XCHGL(v *Value) bool {
v.reset(OpAMD64XCHGL)
v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
- v.AddArg(val)
- v.AddArg(ptr)
- v.AddArg(mem)
+ v.AddArg3(val, ptr, mem)
return true
}
return false
@@ -33605,9 +31725,7 @@ func rewriteValueAMD64_OpAMD64XCHGQ(v *Value) bool {
v.reset(OpAMD64XCHGQ)
v.AuxInt = off1 + off2
v.Aux = sym
- v.AddArg(val)
- v.AddArg(ptr)
- v.AddArg(mem)
+ v.AddArg3(val, ptr, mem)
return true
}
// match: (XCHGQ [off1] {sym1} val (LEAQ [off2] {sym2} ptr) mem)
@@ -33630,9 +31748,7 @@ func rewriteValueAMD64_OpAMD64XCHGQ(v *Value) bool {
v.reset(OpAMD64XCHGQ)
v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
- v.AddArg(val)
- v.AddArg(ptr)
- v.AddArg(mem)
+ v.AddArg3(val, ptr, mem)
return true
}
return false
@@ -33654,8 +31770,7 @@ func rewriteValueAMD64_OpAMD64XORL(v *Value) bool {
}
x := v_1
v.reset(OpAMD64BTCL)
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
break
@@ -33801,9 +31916,7 @@ func rewriteValueAMD64_OpAMD64XORL(v *Value) bool {
v.reset(OpAMD64XORLload)
v.AuxInt = off
v.Aux = sym
- v.AddArg(x)
- v.AddArg(ptr)
- v.AddArg(mem)
+ v.AddArg3(x, ptr, mem)
return true
}
break
@@ -33973,9 +32086,7 @@ func rewriteValueAMD64_OpAMD64XORLconst(v *Value) bool {
if !(int32(c) == 0) {
break
}
- v.reset(OpCopy)
- v.Type = x.Type
- v.AddArg(x)
+ v.copyOf(x)
return true
}
// match: (XORLconst [c] (MOVLconst [d]))
@@ -34013,8 +32124,7 @@ func rewriteValueAMD64_OpAMD64XORLconstmodify(v *Value) bool {
v.reset(OpAMD64XORLconstmodify)
v.AuxInt = ValAndOff(valoff1).add(off2)
v.Aux = sym
- v.AddArg(base)
- v.AddArg(mem)
+ v.AddArg2(base, mem)
return true
}
// match: (XORLconstmodify [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem)
@@ -34036,8 +32146,7 @@ func rewriteValueAMD64_OpAMD64XORLconstmodify(v *Value) bool {
v.reset(OpAMD64XORLconstmodify)
v.AuxInt = ValAndOff(valoff1).add(off2)
v.Aux = mergeSym(sym1, sym2)
- v.AddArg(base)
- v.AddArg(mem)
+ v.AddArg2(base, mem)
return true
}
return false
@@ -34067,9 +32176,7 @@ func rewriteValueAMD64_OpAMD64XORLload(v *Value) bool {
v.reset(OpAMD64XORLload)
v.AuxInt = off1 + off2
v.Aux = sym
- v.AddArg(val)
- v.AddArg(base)
- v.AddArg(mem)
+ v.AddArg3(val, base, mem)
return true
}
// match: (XORLload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem)
@@ -34092,9 +32199,7 @@ func rewriteValueAMD64_OpAMD64XORLload(v *Value) bool {
v.reset(OpAMD64XORLload)
v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
- v.AddArg(val)
- v.AddArg(base)
- v.AddArg(mem)
+ v.AddArg3(val, base, mem)
return true
}
// match: (XORLload x [off] {sym} ptr (MOVSSstore [off] {sym} ptr y _))
@@ -34107,16 +32212,14 @@ func rewriteValueAMD64_OpAMD64XORLload(v *Value) bool {
if v_2.Op != OpAMD64MOVSSstore || v_2.AuxInt != off || v_2.Aux != sym {
break
}
- _ = v_2.Args[2]
+ y := v_2.Args[1]
if ptr != v_2.Args[0] {
break
}
- y := v_2.Args[1]
v.reset(OpAMD64XORL)
- v.AddArg(x)
v0 := b.NewValue0(v_2.Pos, OpAMD64MOVLf2i, typ.UInt32)
v0.AddArg(y)
- v.AddArg(v0)
+ v.AddArg2(x, v0)
return true
}
return false
@@ -34144,9 +32247,7 @@ func rewriteValueAMD64_OpAMD64XORLmodify(v *Value) bool {
v.reset(OpAMD64XORLmodify)
v.AuxInt = off1 + off2
v.Aux = sym
- v.AddArg(base)
- v.AddArg(val)
- v.AddArg(mem)
+ v.AddArg3(base, val, mem)
return true
}
// match: (XORLmodify [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
@@ -34169,9 +32270,7 @@ func rewriteValueAMD64_OpAMD64XORLmodify(v *Value) bool {
v.reset(OpAMD64XORLmodify)
v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
- v.AddArg(base)
- v.AddArg(val)
- v.AddArg(mem)
+ v.AddArg3(base, val, mem)
return true
}
return false
@@ -34193,8 +32292,7 @@ func rewriteValueAMD64_OpAMD64XORQ(v *Value) bool {
}
x := v_1
v.reset(OpAMD64BTCQ)
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
break
@@ -34294,9 +32392,7 @@ func rewriteValueAMD64_OpAMD64XORQ(v *Value) bool {
v.reset(OpAMD64XORQload)
v.AuxInt = off
v.Aux = sym
- v.AddArg(x)
- v.AddArg(ptr)
- v.AddArg(mem)
+ v.AddArg3(x, ptr, mem)
return true
}
break
@@ -34354,9 +32450,7 @@ func rewriteValueAMD64_OpAMD64XORQconst(v *Value) bool {
break
}
x := v_0
- v.reset(OpCopy)
- v.Type = x.Type
- v.AddArg(x)
+ v.copyOf(x)
return true
}
// match: (XORQconst [c] (MOVQconst [d]))
@@ -34394,8 +32488,7 @@ func rewriteValueAMD64_OpAMD64XORQconstmodify(v *Value) bool {
v.reset(OpAMD64XORQconstmodify)
v.AuxInt = ValAndOff(valoff1).add(off2)
v.Aux = sym
- v.AddArg(base)
- v.AddArg(mem)
+ v.AddArg2(base, mem)
return true
}
// match: (XORQconstmodify [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem)
@@ -34417,8 +32510,7 @@ func rewriteValueAMD64_OpAMD64XORQconstmodify(v *Value) bool {
v.reset(OpAMD64XORQconstmodify)
v.AuxInt = ValAndOff(valoff1).add(off2)
v.Aux = mergeSym(sym1, sym2)
- v.AddArg(base)
- v.AddArg(mem)
+ v.AddArg2(base, mem)
return true
}
return false
@@ -34448,9 +32540,7 @@ func rewriteValueAMD64_OpAMD64XORQload(v *Value) bool {
v.reset(OpAMD64XORQload)
v.AuxInt = off1 + off2
v.Aux = sym
- v.AddArg(val)
- v.AddArg(base)
- v.AddArg(mem)
+ v.AddArg3(val, base, mem)
return true
}
// match: (XORQload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem)
@@ -34473,9 +32563,7 @@ func rewriteValueAMD64_OpAMD64XORQload(v *Value) bool {
v.reset(OpAMD64XORQload)
v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
- v.AddArg(val)
- v.AddArg(base)
- v.AddArg(mem)
+ v.AddArg3(val, base, mem)
return true
}
// match: (XORQload x [off] {sym} ptr (MOVSDstore [off] {sym} ptr y _))
@@ -34488,16 +32576,14 @@ func rewriteValueAMD64_OpAMD64XORQload(v *Value) bool {
if v_2.Op != OpAMD64MOVSDstore || v_2.AuxInt != off || v_2.Aux != sym {
break
}
- _ = v_2.Args[2]
+ y := v_2.Args[1]
if ptr != v_2.Args[0] {
break
}
- y := v_2.Args[1]
v.reset(OpAMD64XORQ)
- v.AddArg(x)
v0 := b.NewValue0(v_2.Pos, OpAMD64MOVQf2i, typ.UInt64)
v0.AddArg(y)
- v.AddArg(v0)
+ v.AddArg2(x, v0)
return true
}
return false
@@ -34525,9 +32611,7 @@ func rewriteValueAMD64_OpAMD64XORQmodify(v *Value) bool {
v.reset(OpAMD64XORQmodify)
v.AuxInt = off1 + off2
v.Aux = sym
- v.AddArg(base)
- v.AddArg(val)
- v.AddArg(mem)
+ v.AddArg3(base, val, mem)
return true
}
// match: (XORQmodify [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
@@ -34550,9 +32634,7 @@ func rewriteValueAMD64_OpAMD64XORQmodify(v *Value) bool {
v.reset(OpAMD64XORQmodify)
v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
- v.AddArg(base)
- v.AddArg(val)
- v.AddArg(mem)
+ v.AddArg3(base, val, mem)
return true
}
return false
@@ -34570,12 +32652,9 @@ func rewriteValueAMD64_OpAtomicAdd32(v *Value) bool {
val := v_1
mem := v_2
v.reset(OpAMD64AddTupleFirst32)
- v.AddArg(val)
v0 := b.NewValue0(v.Pos, OpAMD64XADDLlock, types.NewTuple(typ.UInt32, types.TypeMem))
- v0.AddArg(val)
- v0.AddArg(ptr)
- v0.AddArg(mem)
- v.AddArg(v0)
+ v0.AddArg3(val, ptr, mem)
+ v.AddArg2(val, v0)
return true
}
}
@@ -34592,12 +32671,9 @@ func rewriteValueAMD64_OpAtomicAdd64(v *Value) bool {
val := v_1
mem := v_2
v.reset(OpAMD64AddTupleFirst64)
- v.AddArg(val)
v0 := b.NewValue0(v.Pos, OpAMD64XADDQlock, types.NewTuple(typ.UInt64, types.TypeMem))
- v0.AddArg(val)
- v0.AddArg(ptr)
- v0.AddArg(mem)
- v.AddArg(v0)
+ v0.AddArg3(val, ptr, mem)
+ v.AddArg2(val, v0)
return true
}
}
@@ -34612,9 +32688,7 @@ func rewriteValueAMD64_OpAtomicExchange32(v *Value) bool {
val := v_1
mem := v_2
v.reset(OpAMD64XCHGL)
- v.AddArg(val)
- v.AddArg(ptr)
- v.AddArg(mem)
+ v.AddArg3(val, ptr, mem)
return true
}
}
@@ -34629,9 +32703,7 @@ func rewriteValueAMD64_OpAtomicExchange64(v *Value) bool {
val := v_1
mem := v_2
v.reset(OpAMD64XCHGQ)
- v.AddArg(val)
- v.AddArg(ptr)
- v.AddArg(mem)
+ v.AddArg3(val, ptr, mem)
return true
}
}
@@ -34649,9 +32721,7 @@ func rewriteValueAMD64_OpAtomicStore32(v *Value) bool {
mem := v_2
v.reset(OpSelect1)
v0 := b.NewValue0(v.Pos, OpAMD64XCHGL, types.NewTuple(typ.UInt32, types.TypeMem))
- v0.AddArg(val)
- v0.AddArg(ptr)
- v0.AddArg(mem)
+ v0.AddArg3(val, ptr, mem)
v.AddArg(v0)
return true
}
@@ -34670,9 +32740,7 @@ func rewriteValueAMD64_OpAtomicStore64(v *Value) bool {
mem := v_2
v.reset(OpSelect1)
v0 := b.NewValue0(v.Pos, OpAMD64XCHGQ, types.NewTuple(typ.UInt64, types.TypeMem))
- v0.AddArg(val)
- v0.AddArg(ptr)
- v0.AddArg(mem)
+ v0.AddArg3(val, ptr, mem)
v.AddArg(v0)
return true
}
@@ -34691,9 +32759,7 @@ func rewriteValueAMD64_OpAtomicStore8(v *Value) bool {
mem := v_2
v.reset(OpSelect1)
v0 := b.NewValue0(v.Pos, OpAMD64XCHGB, types.NewTuple(typ.UInt8, types.TypeMem))
- v0.AddArg(val)
- v0.AddArg(ptr)
- v0.AddArg(mem)
+ v0.AddArg3(val, ptr, mem)
v.AddArg(v0)
return true
}
@@ -34712,9 +32778,7 @@ func rewriteValueAMD64_OpAtomicStorePtrNoWB(v *Value) bool {
mem := v_2
v.reset(OpSelect1)
v0 := b.NewValue0(v.Pos, OpAMD64XCHGQ, types.NewTuple(typ.BytePtr, types.TypeMem))
- v0.AddArg(val)
- v0.AddArg(ptr)
- v0.AddArg(mem)
+ v0.AddArg3(val, ptr, mem)
v.AddArg(v0)
return true
}
@@ -34732,10 +32796,9 @@ func rewriteValueAMD64_OpBitLen16(v *Value) bool {
v0.AuxInt = 1
v1 := b.NewValue0(v.Pos, OpAMD64MOVWQZX, typ.UInt32)
v1.AddArg(x)
- v0.AddArg(v1)
v2 := b.NewValue0(v.Pos, OpAMD64MOVWQZX, typ.UInt32)
v2.AddArg(x)
- v0.AddArg(v2)
+ v0.AddArg2(v1, v2)
v.AddArg(v0)
return true
}
@@ -34754,10 +32817,9 @@ func rewriteValueAMD64_OpBitLen32(v *Value) bool {
v1.AuxInt = 1
v2 := b.NewValue0(v.Pos, OpAMD64MOVLQZX, typ.UInt64)
v2.AddArg(x)
- v1.AddArg(v2)
v3 := b.NewValue0(v.Pos, OpAMD64MOVLQZX, typ.UInt64)
v3.AddArg(x)
- v1.AddArg(v3)
+ v1.AddArg2(v2, v3)
v0.AddArg(v1)
v.AddArg(v0)
return true
@@ -34779,15 +32841,13 @@ func rewriteValueAMD64_OpBitLen64(v *Value) bool {
v2 := b.NewValue0(v.Pos, OpAMD64BSRQ, types.NewTuple(typ.UInt64, types.TypeFlags))
v2.AddArg(x)
v1.AddArg(v2)
- v0.AddArg(v1)
v3 := b.NewValue0(v.Pos, OpAMD64MOVQconst, t)
v3.AuxInt = -1
- v0.AddArg(v3)
v4 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
v5 := b.NewValue0(v.Pos, OpAMD64BSRQ, types.NewTuple(typ.UInt64, types.TypeFlags))
v5.AddArg(x)
v4.AddArg(v5)
- v0.AddArg(v4)
+ v0.AddArg3(v1, v3, v4)
v.AddArg(v0)
return true
}
@@ -34805,10 +32865,9 @@ func rewriteValueAMD64_OpBitLen8(v *Value) bool {
v0.AuxInt = 1
v1 := b.NewValue0(v.Pos, OpAMD64MOVBQZX, typ.UInt32)
v1.AddArg(x)
- v0.AddArg(v1)
v2 := b.NewValue0(v.Pos, OpAMD64MOVBQZX, typ.UInt32)
v2.AddArg(x)
- v0.AddArg(v2)
+ v0.AddArg2(v1, v2)
v.AddArg(v0)
return true
}
@@ -34846,9 +32905,7 @@ func rewriteValueAMD64_OpCondSelect(v *Value) bool {
break
}
v.reset(OpAMD64CMOVQEQ)
- v.AddArg(y)
- v.AddArg(x)
- v.AddArg(cond)
+ v.AddArg3(y, x, cond)
return true
}
// match: (CondSelect x y (SETNE cond))
@@ -34866,9 +32923,7 @@ func rewriteValueAMD64_OpCondSelect(v *Value) bool {
break
}
v.reset(OpAMD64CMOVQNE)
- v.AddArg(y)
- v.AddArg(x)
- v.AddArg(cond)
+ v.AddArg3(y, x, cond)
return true
}
// match: (CondSelect x y (SETL cond))
@@ -34886,9 +32941,7 @@ func rewriteValueAMD64_OpCondSelect(v *Value) bool {
break
}
v.reset(OpAMD64CMOVQLT)
- v.AddArg(y)
- v.AddArg(x)
- v.AddArg(cond)
+ v.AddArg3(y, x, cond)
return true
}
// match: (CondSelect x y (SETG cond))
@@ -34906,9 +32959,7 @@ func rewriteValueAMD64_OpCondSelect(v *Value) bool {
break
}
v.reset(OpAMD64CMOVQGT)
- v.AddArg(y)
- v.AddArg(x)
- v.AddArg(cond)
+ v.AddArg3(y, x, cond)
return true
}
// match: (CondSelect x y (SETLE cond))
@@ -34926,9 +32977,7 @@ func rewriteValueAMD64_OpCondSelect(v *Value) bool {
break
}
v.reset(OpAMD64CMOVQLE)
- v.AddArg(y)
- v.AddArg(x)
- v.AddArg(cond)
+ v.AddArg3(y, x, cond)
return true
}
// match: (CondSelect x y (SETGE cond))
@@ -34946,9 +32995,7 @@ func rewriteValueAMD64_OpCondSelect(v *Value) bool {
break
}
v.reset(OpAMD64CMOVQGE)
- v.AddArg(y)
- v.AddArg(x)
- v.AddArg(cond)
+ v.AddArg3(y, x, cond)
return true
}
// match: (CondSelect x y (SETA cond))
@@ -34966,9 +33013,7 @@ func rewriteValueAMD64_OpCondSelect(v *Value) bool {
break
}
v.reset(OpAMD64CMOVQHI)
- v.AddArg(y)
- v.AddArg(x)
- v.AddArg(cond)
+ v.AddArg3(y, x, cond)
return true
}
// match: (CondSelect x y (SETB cond))
@@ -34986,9 +33031,7 @@ func rewriteValueAMD64_OpCondSelect(v *Value) bool {
break
}
v.reset(OpAMD64CMOVQCS)
- v.AddArg(y)
- v.AddArg(x)
- v.AddArg(cond)
+ v.AddArg3(y, x, cond)
return true
}
// match: (CondSelect x y (SETAE cond))
@@ -35006,9 +33049,7 @@ func rewriteValueAMD64_OpCondSelect(v *Value) bool {
break
}
v.reset(OpAMD64CMOVQCC)
- v.AddArg(y)
- v.AddArg(x)
- v.AddArg(cond)
+ v.AddArg3(y, x, cond)
return true
}
// match: (CondSelect x y (SETBE cond))
@@ -35026,9 +33067,7 @@ func rewriteValueAMD64_OpCondSelect(v *Value) bool {
break
}
v.reset(OpAMD64CMOVQLS)
- v.AddArg(y)
- v.AddArg(x)
- v.AddArg(cond)
+ v.AddArg3(y, x, cond)
return true
}
// match: (CondSelect x y (SETEQF cond))
@@ -35046,9 +33085,7 @@ func rewriteValueAMD64_OpCondSelect(v *Value) bool {
break
}
v.reset(OpAMD64CMOVQEQF)
- v.AddArg(y)
- v.AddArg(x)
- v.AddArg(cond)
+ v.AddArg3(y, x, cond)
return true
}
// match: (CondSelect x y (SETNEF cond))
@@ -35066,9 +33103,7 @@ func rewriteValueAMD64_OpCondSelect(v *Value) bool {
break
}
v.reset(OpAMD64CMOVQNEF)
- v.AddArg(y)
- v.AddArg(x)
- v.AddArg(cond)
+ v.AddArg3(y, x, cond)
return true
}
// match: (CondSelect x y (SETGF cond))
@@ -35086,9 +33121,7 @@ func rewriteValueAMD64_OpCondSelect(v *Value) bool {
break
}
v.reset(OpAMD64CMOVQGTF)
- v.AddArg(y)
- v.AddArg(x)
- v.AddArg(cond)
+ v.AddArg3(y, x, cond)
return true
}
// match: (CondSelect x y (SETGEF cond))
@@ -35106,9 +33139,7 @@ func rewriteValueAMD64_OpCondSelect(v *Value) bool {
break
}
v.reset(OpAMD64CMOVQGEF)
- v.AddArg(y)
- v.AddArg(x)
- v.AddArg(cond)
+ v.AddArg3(y, x, cond)
return true
}
// match: (CondSelect x y (SETEQ cond))
@@ -35126,9 +33157,7 @@ func rewriteValueAMD64_OpCondSelect(v *Value) bool {
break
}
v.reset(OpAMD64CMOVLEQ)
- v.AddArg(y)
- v.AddArg(x)
- v.AddArg(cond)
+ v.AddArg3(y, x, cond)
return true
}
// match: (CondSelect x y (SETNE cond))
@@ -35146,9 +33175,7 @@ func rewriteValueAMD64_OpCondSelect(v *Value) bool {
break
}
v.reset(OpAMD64CMOVLNE)
- v.AddArg(y)
- v.AddArg(x)
- v.AddArg(cond)
+ v.AddArg3(y, x, cond)
return true
}
// match: (CondSelect x y (SETL cond))
@@ -35166,9 +33193,7 @@ func rewriteValueAMD64_OpCondSelect(v *Value) bool {
break
}
v.reset(OpAMD64CMOVLLT)
- v.AddArg(y)
- v.AddArg(x)
- v.AddArg(cond)
+ v.AddArg3(y, x, cond)
return true
}
// match: (CondSelect x y (SETG cond))
@@ -35186,9 +33211,7 @@ func rewriteValueAMD64_OpCondSelect(v *Value) bool {
break
}
v.reset(OpAMD64CMOVLGT)
- v.AddArg(y)
- v.AddArg(x)
- v.AddArg(cond)
+ v.AddArg3(y, x, cond)
return true
}
// match: (CondSelect x y (SETLE cond))
@@ -35206,9 +33229,7 @@ func rewriteValueAMD64_OpCondSelect(v *Value) bool {
break
}
v.reset(OpAMD64CMOVLLE)
- v.AddArg(y)
- v.AddArg(x)
- v.AddArg(cond)
+ v.AddArg3(y, x, cond)
return true
}
// match: (CondSelect x y (SETGE cond))
@@ -35226,9 +33247,7 @@ func rewriteValueAMD64_OpCondSelect(v *Value) bool {
break
}
v.reset(OpAMD64CMOVLGE)
- v.AddArg(y)
- v.AddArg(x)
- v.AddArg(cond)
+ v.AddArg3(y, x, cond)
return true
}
// match: (CondSelect x y (SETA cond))
@@ -35246,9 +33265,7 @@ func rewriteValueAMD64_OpCondSelect(v *Value) bool {
break
}
v.reset(OpAMD64CMOVLHI)
- v.AddArg(y)
- v.AddArg(x)
- v.AddArg(cond)
+ v.AddArg3(y, x, cond)
return true
}
// match: (CondSelect x y (SETB cond))
@@ -35266,9 +33283,7 @@ func rewriteValueAMD64_OpCondSelect(v *Value) bool {
break
}
v.reset(OpAMD64CMOVLCS)
- v.AddArg(y)
- v.AddArg(x)
- v.AddArg(cond)
+ v.AddArg3(y, x, cond)
return true
}
// match: (CondSelect x y (SETAE cond))
@@ -35286,9 +33301,7 @@ func rewriteValueAMD64_OpCondSelect(v *Value) bool {
break
}
v.reset(OpAMD64CMOVLCC)
- v.AddArg(y)
- v.AddArg(x)
- v.AddArg(cond)
+ v.AddArg3(y, x, cond)
return true
}
// match: (CondSelect x y (SETBE cond))
@@ -35306,9 +33319,7 @@ func rewriteValueAMD64_OpCondSelect(v *Value) bool {
break
}
v.reset(OpAMD64CMOVLLS)
- v.AddArg(y)
- v.AddArg(x)
- v.AddArg(cond)
+ v.AddArg3(y, x, cond)
return true
}
// match: (CondSelect x y (SETEQF cond))
@@ -35326,9 +33337,7 @@ func rewriteValueAMD64_OpCondSelect(v *Value) bool {
break
}
v.reset(OpAMD64CMOVLEQF)
- v.AddArg(y)
- v.AddArg(x)
- v.AddArg(cond)
+ v.AddArg3(y, x, cond)
return true
}
// match: (CondSelect x y (SETNEF cond))
@@ -35346,9 +33355,7 @@ func rewriteValueAMD64_OpCondSelect(v *Value) bool {
break
}
v.reset(OpAMD64CMOVLNEF)
- v.AddArg(y)
- v.AddArg(x)
- v.AddArg(cond)
+ v.AddArg3(y, x, cond)
return true
}
// match: (CondSelect x y (SETGF cond))
@@ -35366,9 +33373,7 @@ func rewriteValueAMD64_OpCondSelect(v *Value) bool {
break
}
v.reset(OpAMD64CMOVLGTF)
- v.AddArg(y)
- v.AddArg(x)
- v.AddArg(cond)
+ v.AddArg3(y, x, cond)
return true
}
// match: (CondSelect x y (SETGEF cond))
@@ -35386,9 +33391,7 @@ func rewriteValueAMD64_OpCondSelect(v *Value) bool {
break
}
v.reset(OpAMD64CMOVLGEF)
- v.AddArg(y)
- v.AddArg(x)
- v.AddArg(cond)
+ v.AddArg3(y, x, cond)
return true
}
// match: (CondSelect x y (SETEQ cond))
@@ -35406,9 +33409,7 @@ func rewriteValueAMD64_OpCondSelect(v *Value) bool {
break
}
v.reset(OpAMD64CMOVWEQ)
- v.AddArg(y)
- v.AddArg(x)
- v.AddArg(cond)
+ v.AddArg3(y, x, cond)
return true
}
// match: (CondSelect x y (SETNE cond))
@@ -35426,9 +33427,7 @@ func rewriteValueAMD64_OpCondSelect(v *Value) bool {
break
}
v.reset(OpAMD64CMOVWNE)
- v.AddArg(y)
- v.AddArg(x)
- v.AddArg(cond)
+ v.AddArg3(y, x, cond)
return true
}
// match: (CondSelect x y (SETL cond))
@@ -35446,9 +33445,7 @@ func rewriteValueAMD64_OpCondSelect(v *Value) bool {
break
}
v.reset(OpAMD64CMOVWLT)
- v.AddArg(y)
- v.AddArg(x)
- v.AddArg(cond)
+ v.AddArg3(y, x, cond)
return true
}
// match: (CondSelect x y (SETG cond))
@@ -35466,9 +33463,7 @@ func rewriteValueAMD64_OpCondSelect(v *Value) bool {
break
}
v.reset(OpAMD64CMOVWGT)
- v.AddArg(y)
- v.AddArg(x)
- v.AddArg(cond)
+ v.AddArg3(y, x, cond)
return true
}
// match: (CondSelect x y (SETLE cond))
@@ -35486,9 +33481,7 @@ func rewriteValueAMD64_OpCondSelect(v *Value) bool {
break
}
v.reset(OpAMD64CMOVWLE)
- v.AddArg(y)
- v.AddArg(x)
- v.AddArg(cond)
+ v.AddArg3(y, x, cond)
return true
}
// match: (CondSelect x y (SETGE cond))
@@ -35506,9 +33499,7 @@ func rewriteValueAMD64_OpCondSelect(v *Value) bool {
break
}
v.reset(OpAMD64CMOVWGE)
- v.AddArg(y)
- v.AddArg(x)
- v.AddArg(cond)
+ v.AddArg3(y, x, cond)
return true
}
// match: (CondSelect x y (SETA cond))
@@ -35526,9 +33517,7 @@ func rewriteValueAMD64_OpCondSelect(v *Value) bool {
break
}
v.reset(OpAMD64CMOVWHI)
- v.AddArg(y)
- v.AddArg(x)
- v.AddArg(cond)
+ v.AddArg3(y, x, cond)
return true
}
// match: (CondSelect x y (SETB cond))
@@ -35546,9 +33535,7 @@ func rewriteValueAMD64_OpCondSelect(v *Value) bool {
break
}
v.reset(OpAMD64CMOVWCS)
- v.AddArg(y)
- v.AddArg(x)
- v.AddArg(cond)
+ v.AddArg3(y, x, cond)
return true
}
// match: (CondSelect x y (SETAE cond))
@@ -35566,9 +33553,7 @@ func rewriteValueAMD64_OpCondSelect(v *Value) bool {
break
}
v.reset(OpAMD64CMOVWCC)
- v.AddArg(y)
- v.AddArg(x)
- v.AddArg(cond)
+ v.AddArg3(y, x, cond)
return true
}
// match: (CondSelect x y (SETBE cond))
@@ -35586,9 +33571,7 @@ func rewriteValueAMD64_OpCondSelect(v *Value) bool {
break
}
v.reset(OpAMD64CMOVWLS)
- v.AddArg(y)
- v.AddArg(x)
- v.AddArg(cond)
+ v.AddArg3(y, x, cond)
return true
}
// match: (CondSelect x y (SETEQF cond))
@@ -35606,9 +33589,7 @@ func rewriteValueAMD64_OpCondSelect(v *Value) bool {
break
}
v.reset(OpAMD64CMOVWEQF)
- v.AddArg(y)
- v.AddArg(x)
- v.AddArg(cond)
+ v.AddArg3(y, x, cond)
return true
}
// match: (CondSelect x y (SETNEF cond))
@@ -35626,9 +33607,7 @@ func rewriteValueAMD64_OpCondSelect(v *Value) bool {
break
}
v.reset(OpAMD64CMOVWNEF)
- v.AddArg(y)
- v.AddArg(x)
- v.AddArg(cond)
+ v.AddArg3(y, x, cond)
return true
}
// match: (CondSelect x y (SETGF cond))
@@ -35646,9 +33625,7 @@ func rewriteValueAMD64_OpCondSelect(v *Value) bool {
break
}
v.reset(OpAMD64CMOVWGTF)
- v.AddArg(y)
- v.AddArg(x)
- v.AddArg(cond)
+ v.AddArg3(y, x, cond)
return true
}
// match: (CondSelect x y (SETGEF cond))
@@ -35666,9 +33643,7 @@ func rewriteValueAMD64_OpCondSelect(v *Value) bool {
break
}
v.reset(OpAMD64CMOVWGEF)
- v.AddArg(y)
- v.AddArg(x)
- v.AddArg(cond)
+ v.AddArg3(y, x, cond)
return true
}
// match: (CondSelect x y check)
@@ -35684,11 +33659,9 @@ func rewriteValueAMD64_OpCondSelect(v *Value) bool {
}
v.reset(OpCondSelect)
v.Type = t
- v.AddArg(x)
- v.AddArg(y)
v0 := b.NewValue0(v.Pos, OpAMD64MOVBQZX, typ.UInt64)
v0.AddArg(check)
- v.AddArg(v0)
+ v.AddArg3(x, y, v0)
return true
}
// match: (CondSelect x y check)
@@ -35704,11 +33677,9 @@ func rewriteValueAMD64_OpCondSelect(v *Value) bool {
}
v.reset(OpCondSelect)
v.Type = t
- v.AddArg(x)
- v.AddArg(y)
v0 := b.NewValue0(v.Pos, OpAMD64MOVWQZX, typ.UInt64)
v0.AddArg(check)
- v.AddArg(v0)
+ v.AddArg3(x, y, v0)
return true
}
// match: (CondSelect x y check)
@@ -35724,11 +33695,9 @@ func rewriteValueAMD64_OpCondSelect(v *Value) bool {
}
v.reset(OpCondSelect)
v.Type = t
- v.AddArg(x)
- v.AddArg(y)
v0 := b.NewValue0(v.Pos, OpAMD64MOVLQZX, typ.UInt64)
v0.AddArg(check)
- v.AddArg(v0)
+ v.AddArg3(x, y, v0)
return true
}
// match: (CondSelect x y check)
@@ -35743,12 +33712,10 @@ func rewriteValueAMD64_OpCondSelect(v *Value) bool {
break
}
v.reset(OpAMD64CMOVQNE)
- v.AddArg(y)
- v.AddArg(x)
v0 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
v0.AuxInt = 0
v0.AddArg(check)
- v.AddArg(v0)
+ v.AddArg3(y, x, v0)
return true
}
// match: (CondSelect x y check)
@@ -35763,12 +33730,10 @@ func rewriteValueAMD64_OpCondSelect(v *Value) bool {
break
}
v.reset(OpAMD64CMOVLNE)
- v.AddArg(y)
- v.AddArg(x)
v0 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
v0.AuxInt = 0
v0.AddArg(check)
- v.AddArg(v0)
+ v.AddArg3(y, x, v0)
return true
}
// match: (CondSelect x y check)
@@ -35783,12 +33748,10 @@ func rewriteValueAMD64_OpCondSelect(v *Value) bool {
break
}
v.reset(OpAMD64CMOVWNE)
- v.AddArg(y)
- v.AddArg(x)
v0 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
v0.AuxInt = 0
v0.AddArg(check)
- v.AddArg(v0)
+ v.AddArg3(y, x, v0)
return true
}
return false
@@ -35841,15 +33804,13 @@ func rewriteValueAMD64_OpCtz64(v *Value) bool {
v1 := b.NewValue0(v.Pos, OpAMD64BSFQ, types.NewTuple(typ.UInt64, types.TypeFlags))
v1.AddArg(x)
v0.AddArg(v1)
- v.AddArg(v0)
v2 := b.NewValue0(v.Pos, OpAMD64MOVQconst, t)
v2.AuxInt = 64
- v.AddArg(v2)
v3 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
v4 := b.NewValue0(v.Pos, OpAMD64BSFQ, types.NewTuple(typ.UInt64, types.TypeFlags))
v4.AddArg(x)
v3.AddArg(v4)
- v.AddArg(v3)
+ v.AddArg3(v0, v2, v3)
return true
}
}
@@ -35898,8 +33859,7 @@ func rewriteValueAMD64_OpDiv16(v *Value) bool {
v.reset(OpSelect0)
v0 := b.NewValue0(v.Pos, OpAMD64DIVW, types.NewTuple(typ.Int16, typ.Int16))
v0.AuxInt = a
- v0.AddArg(x)
- v0.AddArg(y)
+ v0.AddArg2(x, y)
v.AddArg(v0)
return true
}
@@ -35916,8 +33876,7 @@ func rewriteValueAMD64_OpDiv16u(v *Value) bool {
y := v_1
v.reset(OpSelect0)
v0 := b.NewValue0(v.Pos, OpAMD64DIVWU, types.NewTuple(typ.UInt16, typ.UInt16))
- v0.AddArg(x)
- v0.AddArg(y)
+ v0.AddArg2(x, y)
v.AddArg(v0)
return true
}
@@ -35936,8 +33895,7 @@ func rewriteValueAMD64_OpDiv32(v *Value) bool {
v.reset(OpSelect0)
v0 := b.NewValue0(v.Pos, OpAMD64DIVL, types.NewTuple(typ.Int32, typ.Int32))
v0.AuxInt = a
- v0.AddArg(x)
- v0.AddArg(y)
+ v0.AddArg2(x, y)
v.AddArg(v0)
return true
}
@@ -35954,8 +33912,7 @@ func rewriteValueAMD64_OpDiv32u(v *Value) bool {
y := v_1
v.reset(OpSelect0)
v0 := b.NewValue0(v.Pos, OpAMD64DIVLU, types.NewTuple(typ.UInt32, typ.UInt32))
- v0.AddArg(x)
- v0.AddArg(y)
+ v0.AddArg2(x, y)
v.AddArg(v0)
return true
}
@@ -35974,8 +33931,7 @@ func rewriteValueAMD64_OpDiv64(v *Value) bool {
v.reset(OpSelect0)
v0 := b.NewValue0(v.Pos, OpAMD64DIVQ, types.NewTuple(typ.Int64, typ.Int64))
v0.AuxInt = a
- v0.AddArg(x)
- v0.AddArg(y)
+ v0.AddArg2(x, y)
v.AddArg(v0)
return true
}
@@ -35992,8 +33948,7 @@ func rewriteValueAMD64_OpDiv64u(v *Value) bool {
y := v_1
v.reset(OpSelect0)
v0 := b.NewValue0(v.Pos, OpAMD64DIVQU, types.NewTuple(typ.UInt64, typ.UInt64))
- v0.AddArg(x)
- v0.AddArg(y)
+ v0.AddArg2(x, y)
v.AddArg(v0)
return true
}
@@ -36012,10 +33967,9 @@ func rewriteValueAMD64_OpDiv8(v *Value) bool {
v0 := b.NewValue0(v.Pos, OpAMD64DIVW, types.NewTuple(typ.Int16, typ.Int16))
v1 := b.NewValue0(v.Pos, OpSignExt8to16, typ.Int16)
v1.AddArg(x)
- v0.AddArg(v1)
v2 := b.NewValue0(v.Pos, OpSignExt8to16, typ.Int16)
v2.AddArg(y)
- v0.AddArg(v2)
+ v0.AddArg2(v1, v2)
v.AddArg(v0)
return true
}
@@ -36034,10 +33988,9 @@ func rewriteValueAMD64_OpDiv8u(v *Value) bool {
v0 := b.NewValue0(v.Pos, OpAMD64DIVWU, types.NewTuple(typ.UInt16, typ.UInt16))
v1 := b.NewValue0(v.Pos, OpZeroExt8to16, typ.UInt16)
v1.AddArg(x)
- v0.AddArg(v1)
v2 := b.NewValue0(v.Pos, OpZeroExt8to16, typ.UInt16)
v2.AddArg(y)
- v0.AddArg(v2)
+ v0.AddArg2(v1, v2)
v.AddArg(v0)
return true
}
@@ -36053,8 +34006,7 @@ func rewriteValueAMD64_OpEq16(v *Value) bool {
y := v_1
v.reset(OpAMD64SETEQ)
v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
+ v0.AddArg2(x, y)
v.AddArg(v0)
return true
}
@@ -36070,8 +34022,7 @@ func rewriteValueAMD64_OpEq32(v *Value) bool {
y := v_1
v.reset(OpAMD64SETEQ)
v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
+ v0.AddArg2(x, y)
v.AddArg(v0)
return true
}
@@ -36087,8 +34038,7 @@ func rewriteValueAMD64_OpEq32F(v *Value) bool {
y := v_1
v.reset(OpAMD64SETEQF)
v0 := b.NewValue0(v.Pos, OpAMD64UCOMISS, types.TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
+ v0.AddArg2(x, y)
v.AddArg(v0)
return true
}
@@ -36104,8 +34054,7 @@ func rewriteValueAMD64_OpEq64(v *Value) bool {
y := v_1
v.reset(OpAMD64SETEQ)
v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
+ v0.AddArg2(x, y)
v.AddArg(v0)
return true
}
@@ -36121,8 +34070,7 @@ func rewriteValueAMD64_OpEq64F(v *Value) bool {
y := v_1
v.reset(OpAMD64SETEQF)
v0 := b.NewValue0(v.Pos, OpAMD64UCOMISD, types.TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
+ v0.AddArg2(x, y)
v.AddArg(v0)
return true
}
@@ -36138,8 +34086,7 @@ func rewriteValueAMD64_OpEq8(v *Value) bool {
y := v_1
v.reset(OpAMD64SETEQ)
v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
+ v0.AddArg2(x, y)
v.AddArg(v0)
return true
}
@@ -36155,8 +34102,7 @@ func rewriteValueAMD64_OpEqB(v *Value) bool {
y := v_1
v.reset(OpAMD64SETEQ)
v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
+ v0.AddArg2(x, y)
v.AddArg(v0)
return true
}
@@ -36172,8 +34118,7 @@ func rewriteValueAMD64_OpEqPtr(v *Value) bool {
y := v_1
v.reset(OpAMD64SETEQ)
v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
+ v0.AddArg2(x, y)
v.AddArg(v0)
return true
}
@@ -36189,9 +34134,7 @@ func rewriteValueAMD64_OpFMA(v *Value) bool {
y := v_1
z := v_2
v.reset(OpAMD64VFMADD231SD)
- v.AddArg(z)
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg3(z, x, y)
return true
}
}
@@ -36218,8 +34161,7 @@ func rewriteValueAMD64_OpGeq32F(v *Value) bool {
y := v_1
v.reset(OpAMD64SETGEF)
v0 := b.NewValue0(v.Pos, OpAMD64UCOMISS, types.TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
+ v0.AddArg2(x, y)
v.AddArg(v0)
return true
}
@@ -36235,8 +34177,7 @@ func rewriteValueAMD64_OpGeq64F(v *Value) bool {
y := v_1
v.reset(OpAMD64SETGEF)
v0 := b.NewValue0(v.Pos, OpAMD64UCOMISD, types.TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
+ v0.AddArg2(x, y)
v.AddArg(v0)
return true
}
@@ -36252,8 +34193,7 @@ func rewriteValueAMD64_OpGreater32F(v *Value) bool {
y := v_1
v.reset(OpAMD64SETGF)
v0 := b.NewValue0(v.Pos, OpAMD64UCOMISS, types.TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
+ v0.AddArg2(x, y)
v.AddArg(v0)
return true
}
@@ -36269,8 +34209,7 @@ func rewriteValueAMD64_OpGreater64F(v *Value) bool {
y := v_1
v.reset(OpAMD64SETGF)
v0 := b.NewValue0(v.Pos, OpAMD64UCOMISD, types.TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
+ v0.AddArg2(x, y)
v.AddArg(v0)
return true
}
@@ -36286,8 +34225,7 @@ func rewriteValueAMD64_OpIsInBounds(v *Value) bool {
len := v_1
v.reset(OpAMD64SETB)
v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags)
- v0.AddArg(idx)
- v0.AddArg(len)
+ v0.AddArg2(idx, len)
v.AddArg(v0)
return true
}
@@ -36301,8 +34239,7 @@ func rewriteValueAMD64_OpIsNonNil(v *Value) bool {
p := v_0
v.reset(OpAMD64SETNE)
v0 := b.NewValue0(v.Pos, OpAMD64TESTQ, types.TypeFlags)
- v0.AddArg(p)
- v0.AddArg(p)
+ v0.AddArg2(p, p)
v.AddArg(v0)
return true
}
@@ -36318,8 +34255,7 @@ func rewriteValueAMD64_OpIsSliceInBounds(v *Value) bool {
len := v_1
v.reset(OpAMD64SETBE)
v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags)
- v0.AddArg(idx)
- v0.AddArg(len)
+ v0.AddArg2(idx, len)
v.AddArg(v0)
return true
}
@@ -36335,8 +34271,7 @@ func rewriteValueAMD64_OpLeq16(v *Value) bool {
y := v_1
v.reset(OpAMD64SETLE)
v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
+ v0.AddArg2(x, y)
v.AddArg(v0)
return true
}
@@ -36352,8 +34287,7 @@ func rewriteValueAMD64_OpLeq16U(v *Value) bool {
y := v_1
v.reset(OpAMD64SETBE)
v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
+ v0.AddArg2(x, y)
v.AddArg(v0)
return true
}
@@ -36369,8 +34303,7 @@ func rewriteValueAMD64_OpLeq32(v *Value) bool {
y := v_1
v.reset(OpAMD64SETLE)
v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
+ v0.AddArg2(x, y)
v.AddArg(v0)
return true
}
@@ -36386,8 +34319,7 @@ func rewriteValueAMD64_OpLeq32F(v *Value) bool {
y := v_1
v.reset(OpAMD64SETGEF)
v0 := b.NewValue0(v.Pos, OpAMD64UCOMISS, types.TypeFlags)
- v0.AddArg(y)
- v0.AddArg(x)
+ v0.AddArg2(y, x)
v.AddArg(v0)
return true
}
@@ -36403,8 +34335,7 @@ func rewriteValueAMD64_OpLeq32U(v *Value) bool {
y := v_1
v.reset(OpAMD64SETBE)
v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
+ v0.AddArg2(x, y)
v.AddArg(v0)
return true
}
@@ -36420,8 +34351,7 @@ func rewriteValueAMD64_OpLeq64(v *Value) bool {
y := v_1
v.reset(OpAMD64SETLE)
v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
+ v0.AddArg2(x, y)
v.AddArg(v0)
return true
}
@@ -36437,8 +34367,7 @@ func rewriteValueAMD64_OpLeq64F(v *Value) bool {
y := v_1
v.reset(OpAMD64SETGEF)
v0 := b.NewValue0(v.Pos, OpAMD64UCOMISD, types.TypeFlags)
- v0.AddArg(y)
- v0.AddArg(x)
+ v0.AddArg2(y, x)
v.AddArg(v0)
return true
}
@@ -36454,8 +34383,7 @@ func rewriteValueAMD64_OpLeq64U(v *Value) bool {
y := v_1
v.reset(OpAMD64SETBE)
v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
+ v0.AddArg2(x, y)
v.AddArg(v0)
return true
}
@@ -36471,8 +34399,7 @@ func rewriteValueAMD64_OpLeq8(v *Value) bool {
y := v_1
v.reset(OpAMD64SETLE)
v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
+ v0.AddArg2(x, y)
v.AddArg(v0)
return true
}
@@ -36488,8 +34415,7 @@ func rewriteValueAMD64_OpLeq8U(v *Value) bool {
y := v_1
v.reset(OpAMD64SETBE)
v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
+ v0.AddArg2(x, y)
v.AddArg(v0)
return true
}
@@ -36505,8 +34431,7 @@ func rewriteValueAMD64_OpLess16(v *Value) bool {
y := v_1
v.reset(OpAMD64SETL)
v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
+ v0.AddArg2(x, y)
v.AddArg(v0)
return true
}
@@ -36522,8 +34447,7 @@ func rewriteValueAMD64_OpLess16U(v *Value) bool {
y := v_1
v.reset(OpAMD64SETB)
v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
+ v0.AddArg2(x, y)
v.AddArg(v0)
return true
}
@@ -36539,8 +34463,7 @@ func rewriteValueAMD64_OpLess32(v *Value) bool {
y := v_1
v.reset(OpAMD64SETL)
v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
+ v0.AddArg2(x, y)
v.AddArg(v0)
return true
}
@@ -36556,8 +34479,7 @@ func rewriteValueAMD64_OpLess32F(v *Value) bool {
y := v_1
v.reset(OpAMD64SETGF)
v0 := b.NewValue0(v.Pos, OpAMD64UCOMISS, types.TypeFlags)
- v0.AddArg(y)
- v0.AddArg(x)
+ v0.AddArg2(y, x)
v.AddArg(v0)
return true
}
@@ -36573,8 +34495,7 @@ func rewriteValueAMD64_OpLess32U(v *Value) bool {
y := v_1
v.reset(OpAMD64SETB)
v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
+ v0.AddArg2(x, y)
v.AddArg(v0)
return true
}
@@ -36590,8 +34511,7 @@ func rewriteValueAMD64_OpLess64(v *Value) bool {
y := v_1
v.reset(OpAMD64SETL)
v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
+ v0.AddArg2(x, y)
v.AddArg(v0)
return true
}
@@ -36607,8 +34527,7 @@ func rewriteValueAMD64_OpLess64F(v *Value) bool {
y := v_1
v.reset(OpAMD64SETGF)
v0 := b.NewValue0(v.Pos, OpAMD64UCOMISD, types.TypeFlags)
- v0.AddArg(y)
- v0.AddArg(x)
+ v0.AddArg2(y, x)
v.AddArg(v0)
return true
}
@@ -36624,8 +34543,7 @@ func rewriteValueAMD64_OpLess64U(v *Value) bool {
y := v_1
v.reset(OpAMD64SETB)
v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
+ v0.AddArg2(x, y)
v.AddArg(v0)
return true
}
@@ -36641,8 +34559,7 @@ func rewriteValueAMD64_OpLess8(v *Value) bool {
y := v_1
v.reset(OpAMD64SETL)
v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
+ v0.AddArg2(x, y)
v.AddArg(v0)
return true
}
@@ -36658,8 +34575,7 @@ func rewriteValueAMD64_OpLess8U(v *Value) bool {
y := v_1
v.reset(OpAMD64SETB)
v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
+ v0.AddArg2(x, y)
v.AddArg(v0)
return true
}
@@ -36678,8 +34594,7 @@ func rewriteValueAMD64_OpLoad(v *Value) bool {
break
}
v.reset(OpAMD64MOVQload)
- v.AddArg(ptr)
- v.AddArg(mem)
+ v.AddArg2(ptr, mem)
return true
}
// match: (Load ptr mem)
@@ -36693,8 +34608,7 @@ func rewriteValueAMD64_OpLoad(v *Value) bool {
break
}
v.reset(OpAMD64MOVLload)
- v.AddArg(ptr)
- v.AddArg(mem)
+ v.AddArg2(ptr, mem)
return true
}
// match: (Load ptr mem)
@@ -36708,8 +34622,7 @@ func rewriteValueAMD64_OpLoad(v *Value) bool {
break
}
v.reset(OpAMD64MOVWload)
- v.AddArg(ptr)
- v.AddArg(mem)
+ v.AddArg2(ptr, mem)
return true
}
// match: (Load ptr mem)
@@ -36723,8 +34636,7 @@ func rewriteValueAMD64_OpLoad(v *Value) bool {
break
}
v.reset(OpAMD64MOVBload)
- v.AddArg(ptr)
- v.AddArg(mem)
+ v.AddArg2(ptr, mem)
return true
}
// match: (Load ptr mem)
@@ -36738,8 +34650,7 @@ func rewriteValueAMD64_OpLoad(v *Value) bool {
break
}
v.reset(OpAMD64MOVSSload)
- v.AddArg(ptr)
- v.AddArg(mem)
+ v.AddArg2(ptr, mem)
return true
}
// match: (Load ptr mem)
@@ -36753,8 +34664,7 @@ func rewriteValueAMD64_OpLoad(v *Value) bool {
break
}
v.reset(OpAMD64MOVSDload)
- v.AddArg(ptr)
- v.AddArg(mem)
+ v.AddArg2(ptr, mem)
return true
}
return false
@@ -36788,15 +34698,13 @@ func rewriteValueAMD64_OpLsh16x16(v *Value) bool {
}
v.reset(OpAMD64ANDL)
v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
+ v0.AddArg2(x, y)
v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags)
v2.AuxInt = 32
v2.AddArg(y)
v1.AddArg(v2)
- v.AddArg(v1)
+ v.AddArg2(v0, v1)
return true
}
// match: (Lsh16x16 x y)
@@ -36809,8 +34717,7 @@ func rewriteValueAMD64_OpLsh16x16(v *Value) bool {
break
}
v.reset(OpAMD64SHLL)
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
return false
@@ -36831,15 +34738,13 @@ func rewriteValueAMD64_OpLsh16x32(v *Value) bool {
}
v.reset(OpAMD64ANDL)
v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
+ v0.AddArg2(x, y)
v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags)
v2.AuxInt = 32
v2.AddArg(y)
v1.AddArg(v2)
- v.AddArg(v1)
+ v.AddArg2(v0, v1)
return true
}
// match: (Lsh16x32 x y)
@@ -36852,8 +34757,7 @@ func rewriteValueAMD64_OpLsh16x32(v *Value) bool {
break
}
v.reset(OpAMD64SHLL)
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
return false
@@ -36874,15 +34778,13 @@ func rewriteValueAMD64_OpLsh16x64(v *Value) bool {
}
v.reset(OpAMD64ANDL)
v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
+ v0.AddArg2(x, y)
v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
v2.AuxInt = 32
v2.AddArg(y)
v1.AddArg(v2)
- v.AddArg(v1)
+ v.AddArg2(v0, v1)
return true
}
// match: (Lsh16x64 x y)
@@ -36895,8 +34797,7 @@ func rewriteValueAMD64_OpLsh16x64(v *Value) bool {
break
}
v.reset(OpAMD64SHLL)
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
return false
@@ -36917,15 +34818,13 @@ func rewriteValueAMD64_OpLsh16x8(v *Value) bool {
}
v.reset(OpAMD64ANDL)
v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
+ v0.AddArg2(x, y)
v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags)
v2.AuxInt = 32
v2.AddArg(y)
v1.AddArg(v2)
- v.AddArg(v1)
+ v.AddArg2(v0, v1)
return true
}
// match: (Lsh16x8 x y)
@@ -36938,8 +34837,7 @@ func rewriteValueAMD64_OpLsh16x8(v *Value) bool {
break
}
v.reset(OpAMD64SHLL)
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
return false
@@ -36960,15 +34858,13 @@ func rewriteValueAMD64_OpLsh32x16(v *Value) bool {
}
v.reset(OpAMD64ANDL)
v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
+ v0.AddArg2(x, y)
v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags)
v2.AuxInt = 32
v2.AddArg(y)
v1.AddArg(v2)
- v.AddArg(v1)
+ v.AddArg2(v0, v1)
return true
}
// match: (Lsh32x16 x y)
@@ -36981,8 +34877,7 @@ func rewriteValueAMD64_OpLsh32x16(v *Value) bool {
break
}
v.reset(OpAMD64SHLL)
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
return false
@@ -37003,15 +34898,13 @@ func rewriteValueAMD64_OpLsh32x32(v *Value) bool {
}
v.reset(OpAMD64ANDL)
v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
+ v0.AddArg2(x, y)
v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags)
v2.AuxInt = 32
v2.AddArg(y)
v1.AddArg(v2)
- v.AddArg(v1)
+ v.AddArg2(v0, v1)
return true
}
// match: (Lsh32x32 x y)
@@ -37024,8 +34917,7 @@ func rewriteValueAMD64_OpLsh32x32(v *Value) bool {
break
}
v.reset(OpAMD64SHLL)
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
return false
@@ -37046,15 +34938,13 @@ func rewriteValueAMD64_OpLsh32x64(v *Value) bool {
}
v.reset(OpAMD64ANDL)
v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
+ v0.AddArg2(x, y)
v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
v2.AuxInt = 32
v2.AddArg(y)
v1.AddArg(v2)
- v.AddArg(v1)
+ v.AddArg2(v0, v1)
return true
}
// match: (Lsh32x64 x y)
@@ -37067,8 +34957,7 @@ func rewriteValueAMD64_OpLsh32x64(v *Value) bool {
break
}
v.reset(OpAMD64SHLL)
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
return false
@@ -37089,15 +34978,13 @@ func rewriteValueAMD64_OpLsh32x8(v *Value) bool {
}
v.reset(OpAMD64ANDL)
v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
+ v0.AddArg2(x, y)
v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags)
v2.AuxInt = 32
v2.AddArg(y)
v1.AddArg(v2)
- v.AddArg(v1)
+ v.AddArg2(v0, v1)
return true
}
// match: (Lsh32x8 x y)
@@ -37110,8 +34997,7 @@ func rewriteValueAMD64_OpLsh32x8(v *Value) bool {
break
}
v.reset(OpAMD64SHLL)
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
return false
@@ -37132,15 +35018,13 @@ func rewriteValueAMD64_OpLsh64x16(v *Value) bool {
}
v.reset(OpAMD64ANDQ)
v0 := b.NewValue0(v.Pos, OpAMD64SHLQ, t)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
+ v0.AddArg2(x, y)
v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t)
v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags)
v2.AuxInt = 64
v2.AddArg(y)
v1.AddArg(v2)
- v.AddArg(v1)
+ v.AddArg2(v0, v1)
return true
}
// match: (Lsh64x16 x y)
@@ -37153,8 +35037,7 @@ func rewriteValueAMD64_OpLsh64x16(v *Value) bool {
break
}
v.reset(OpAMD64SHLQ)
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
return false
@@ -37175,15 +35058,13 @@ func rewriteValueAMD64_OpLsh64x32(v *Value) bool {
}
v.reset(OpAMD64ANDQ)
v0 := b.NewValue0(v.Pos, OpAMD64SHLQ, t)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
+ v0.AddArg2(x, y)
v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t)
v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags)
v2.AuxInt = 64
v2.AddArg(y)
v1.AddArg(v2)
- v.AddArg(v1)
+ v.AddArg2(v0, v1)
return true
}
// match: (Lsh64x32 x y)
@@ -37196,8 +35077,7 @@ func rewriteValueAMD64_OpLsh64x32(v *Value) bool {
break
}
v.reset(OpAMD64SHLQ)
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
return false
@@ -37218,15 +35098,13 @@ func rewriteValueAMD64_OpLsh64x64(v *Value) bool {
}
v.reset(OpAMD64ANDQ)
v0 := b.NewValue0(v.Pos, OpAMD64SHLQ, t)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
+ v0.AddArg2(x, y)
v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t)
v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
v2.AuxInt = 64
v2.AddArg(y)
v1.AddArg(v2)
- v.AddArg(v1)
+ v.AddArg2(v0, v1)
return true
}
// match: (Lsh64x64 x y)
@@ -37239,8 +35117,7 @@ func rewriteValueAMD64_OpLsh64x64(v *Value) bool {
break
}
v.reset(OpAMD64SHLQ)
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
return false
@@ -37261,15 +35138,13 @@ func rewriteValueAMD64_OpLsh64x8(v *Value) bool {
}
v.reset(OpAMD64ANDQ)
v0 := b.NewValue0(v.Pos, OpAMD64SHLQ, t)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
+ v0.AddArg2(x, y)
v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t)
v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags)
v2.AuxInt = 64
v2.AddArg(y)
v1.AddArg(v2)
- v.AddArg(v1)
+ v.AddArg2(v0, v1)
return true
}
// match: (Lsh64x8 x y)
@@ -37282,8 +35157,7 @@ func rewriteValueAMD64_OpLsh64x8(v *Value) bool {
break
}
v.reset(OpAMD64SHLQ)
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
return false
@@ -37304,15 +35178,13 @@ func rewriteValueAMD64_OpLsh8x16(v *Value) bool {
}
v.reset(OpAMD64ANDL)
v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
+ v0.AddArg2(x, y)
v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags)
v2.AuxInt = 32
v2.AddArg(y)
v1.AddArg(v2)
- v.AddArg(v1)
+ v.AddArg2(v0, v1)
return true
}
// match: (Lsh8x16 x y)
@@ -37325,8 +35197,7 @@ func rewriteValueAMD64_OpLsh8x16(v *Value) bool {
break
}
v.reset(OpAMD64SHLL)
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
return false
@@ -37347,15 +35218,13 @@ func rewriteValueAMD64_OpLsh8x32(v *Value) bool {
}
v.reset(OpAMD64ANDL)
v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
+ v0.AddArg2(x, y)
v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags)
v2.AuxInt = 32
v2.AddArg(y)
v1.AddArg(v2)
- v.AddArg(v1)
+ v.AddArg2(v0, v1)
return true
}
// match: (Lsh8x32 x y)
@@ -37368,8 +35237,7 @@ func rewriteValueAMD64_OpLsh8x32(v *Value) bool {
break
}
v.reset(OpAMD64SHLL)
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
return false
@@ -37390,15 +35258,13 @@ func rewriteValueAMD64_OpLsh8x64(v *Value) bool {
}
v.reset(OpAMD64ANDL)
v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
+ v0.AddArg2(x, y)
v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
v2.AuxInt = 32
v2.AddArg(y)
v1.AddArg(v2)
- v.AddArg(v1)
+ v.AddArg2(v0, v1)
return true
}
// match: (Lsh8x64 x y)
@@ -37411,8 +35277,7 @@ func rewriteValueAMD64_OpLsh8x64(v *Value) bool {
break
}
v.reset(OpAMD64SHLL)
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
return false
@@ -37433,15 +35298,13 @@ func rewriteValueAMD64_OpLsh8x8(v *Value) bool {
}
v.reset(OpAMD64ANDL)
v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
+ v0.AddArg2(x, y)
v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags)
v2.AuxInt = 32
v2.AddArg(y)
v1.AddArg(v2)
- v.AddArg(v1)
+ v.AddArg2(v0, v1)
return true
}
// match: (Lsh8x8 x y)
@@ -37454,8 +35317,7 @@ func rewriteValueAMD64_OpLsh8x8(v *Value) bool {
break
}
v.reset(OpAMD64SHLL)
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
return false
@@ -37474,8 +35336,7 @@ func rewriteValueAMD64_OpMod16(v *Value) bool {
v.reset(OpSelect1)
v0 := b.NewValue0(v.Pos, OpAMD64DIVW, types.NewTuple(typ.Int16, typ.Int16))
v0.AuxInt = a
- v0.AddArg(x)
- v0.AddArg(y)
+ v0.AddArg2(x, y)
v.AddArg(v0)
return true
}
@@ -37492,8 +35353,7 @@ func rewriteValueAMD64_OpMod16u(v *Value) bool {
y := v_1
v.reset(OpSelect1)
v0 := b.NewValue0(v.Pos, OpAMD64DIVWU, types.NewTuple(typ.UInt16, typ.UInt16))
- v0.AddArg(x)
- v0.AddArg(y)
+ v0.AddArg2(x, y)
v.AddArg(v0)
return true
}
@@ -37512,8 +35372,7 @@ func rewriteValueAMD64_OpMod32(v *Value) bool {
v.reset(OpSelect1)
v0 := b.NewValue0(v.Pos, OpAMD64DIVL, types.NewTuple(typ.Int32, typ.Int32))
v0.AuxInt = a
- v0.AddArg(x)
- v0.AddArg(y)
+ v0.AddArg2(x, y)
v.AddArg(v0)
return true
}
@@ -37530,8 +35389,7 @@ func rewriteValueAMD64_OpMod32u(v *Value) bool {
y := v_1
v.reset(OpSelect1)
v0 := b.NewValue0(v.Pos, OpAMD64DIVLU, types.NewTuple(typ.UInt32, typ.UInt32))
- v0.AddArg(x)
- v0.AddArg(y)
+ v0.AddArg2(x, y)
v.AddArg(v0)
return true
}
@@ -37550,8 +35408,7 @@ func rewriteValueAMD64_OpMod64(v *Value) bool {
v.reset(OpSelect1)
v0 := b.NewValue0(v.Pos, OpAMD64DIVQ, types.NewTuple(typ.Int64, typ.Int64))
v0.AuxInt = a
- v0.AddArg(x)
- v0.AddArg(y)
+ v0.AddArg2(x, y)
v.AddArg(v0)
return true
}
@@ -37568,8 +35425,7 @@ func rewriteValueAMD64_OpMod64u(v *Value) bool {
y := v_1
v.reset(OpSelect1)
v0 := b.NewValue0(v.Pos, OpAMD64DIVQU, types.NewTuple(typ.UInt64, typ.UInt64))
- v0.AddArg(x)
- v0.AddArg(y)
+ v0.AddArg2(x, y)
v.AddArg(v0)
return true
}
@@ -37588,10 +35444,9 @@ func rewriteValueAMD64_OpMod8(v *Value) bool {
v0 := b.NewValue0(v.Pos, OpAMD64DIVW, types.NewTuple(typ.Int16, typ.Int16))
v1 := b.NewValue0(v.Pos, OpSignExt8to16, typ.Int16)
v1.AddArg(x)
- v0.AddArg(v1)
v2 := b.NewValue0(v.Pos, OpSignExt8to16, typ.Int16)
v2.AddArg(y)
- v0.AddArg(v2)
+ v0.AddArg2(v1, v2)
v.AddArg(v0)
return true
}
@@ -37610,10 +35465,9 @@ func rewriteValueAMD64_OpMod8u(v *Value) bool {
v0 := b.NewValue0(v.Pos, OpAMD64DIVWU, types.NewTuple(typ.UInt16, typ.UInt16))
v1 := b.NewValue0(v.Pos, OpZeroExt8to16, typ.UInt16)
v1.AddArg(x)
- v0.AddArg(v1)
v2 := b.NewValue0(v.Pos, OpZeroExt8to16, typ.UInt16)
v2.AddArg(y)
- v0.AddArg(v2)
+ v0.AddArg2(v1, v2)
v.AddArg(v0)
return true
}
@@ -37632,9 +35486,7 @@ func rewriteValueAMD64_OpMove(v *Value) bool {
break
}
mem := v_2
- v.reset(OpCopy)
- v.Type = mem.Type
- v.AddArg(mem)
+ v.copyOf(mem)
return true
}
// match: (Move [1] dst src mem)
@@ -37647,12 +35499,9 @@ func rewriteValueAMD64_OpMove(v *Value) bool {
src := v_1
mem := v_2
v.reset(OpAMD64MOVBstore)
- v.AddArg(dst)
v0 := b.NewValue0(v.Pos, OpAMD64MOVBload, typ.UInt8)
- v0.AddArg(src)
- v0.AddArg(mem)
- v.AddArg(v0)
- v.AddArg(mem)
+ v0.AddArg2(src, mem)
+ v.AddArg3(dst, v0, mem)
return true
}
// match: (Move [2] dst src mem)
@@ -37665,12 +35514,9 @@ func rewriteValueAMD64_OpMove(v *Value) bool {
src := v_1
mem := v_2
v.reset(OpAMD64MOVWstore)
- v.AddArg(dst)
v0 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16)
- v0.AddArg(src)
- v0.AddArg(mem)
- v.AddArg(v0)
- v.AddArg(mem)
+ v0.AddArg2(src, mem)
+ v.AddArg3(dst, v0, mem)
return true
}
// match: (Move [4] dst src mem)
@@ -37683,12 +35529,9 @@ func rewriteValueAMD64_OpMove(v *Value) bool {
src := v_1
mem := v_2
v.reset(OpAMD64MOVLstore)
- v.AddArg(dst)
v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32)
- v0.AddArg(src)
- v0.AddArg(mem)
- v.AddArg(v0)
- v.AddArg(mem)
+ v0.AddArg2(src, mem)
+ v.AddArg3(dst, v0, mem)
return true
}
// match: (Move [8] dst src mem)
@@ -37701,12 +35544,9 @@ func rewriteValueAMD64_OpMove(v *Value) bool {
src := v_1
mem := v_2
v.reset(OpAMD64MOVQstore)
- v.AddArg(dst)
v0 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64)
- v0.AddArg(src)
- v0.AddArg(mem)
- v.AddArg(v0)
- v.AddArg(mem)
+ v0.AddArg2(src, mem)
+ v.AddArg3(dst, v0, mem)
return true
}
// match: (Move [16] dst src mem)
@@ -37723,12 +35563,9 @@ func rewriteValueAMD64_OpMove(v *Value) bool {
break
}
v.reset(OpAMD64MOVOstore)
- v.AddArg(dst)
v0 := b.NewValue0(v.Pos, OpAMD64MOVOload, types.TypeInt128)
- v0.AddArg(src)
- v0.AddArg(mem)
- v.AddArg(v0)
- v.AddArg(mem)
+ v0.AddArg2(src, mem)
+ v.AddArg3(dst, v0, mem)
return true
}
// match: (Move [16] dst src mem)
@@ -37746,20 +35583,14 @@ func rewriteValueAMD64_OpMove(v *Value) bool {
}
v.reset(OpAMD64MOVQstore)
v.AuxInt = 8
- v.AddArg(dst)
v0 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64)
v0.AuxInt = 8
- v0.AddArg(src)
- v0.AddArg(mem)
- v.AddArg(v0)
+ v0.AddArg2(src, mem)
v1 := b.NewValue0(v.Pos, OpAMD64MOVQstore, types.TypeMem)
- v1.AddArg(dst)
v2 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64)
- v2.AddArg(src)
- v2.AddArg(mem)
- v1.AddArg(v2)
- v1.AddArg(mem)
- v.AddArg(v1)
+ v2.AddArg2(src, mem)
+ v1.AddArg3(dst, v2, mem)
+ v.AddArg3(dst, v0, v1)
return true
}
// match: (Move [32] dst src mem)
@@ -37776,17 +35607,13 @@ func rewriteValueAMD64_OpMove(v *Value) bool {
v0 := b.NewValue0(v.Pos, OpOffPtr, dst.Type)
v0.AuxInt = 16
v0.AddArg(dst)
- v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpOffPtr, src.Type)
v1.AuxInt = 16
v1.AddArg(src)
- v.AddArg(v1)
v2 := b.NewValue0(v.Pos, OpMove, types.TypeMem)
v2.AuxInt = 16
- v2.AddArg(dst)
- v2.AddArg(src)
- v2.AddArg(mem)
- v.AddArg(v2)
+ v2.AddArg3(dst, src, mem)
+ v.AddArg3(v0, v1, v2)
return true
}
// match: (Move [48] dst src mem)
@@ -37807,17 +35634,13 @@ func rewriteValueAMD64_OpMove(v *Value) bool {
v0 := b.NewValue0(v.Pos, OpOffPtr, dst.Type)
v0.AuxInt = 16
v0.AddArg(dst)
- v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpOffPtr, src.Type)
v1.AuxInt = 16
v1.AddArg(src)
- v.AddArg(v1)
v2 := b.NewValue0(v.Pos, OpMove, types.TypeMem)
v2.AuxInt = 16
- v2.AddArg(dst)
- v2.AddArg(src)
- v2.AddArg(mem)
- v.AddArg(v2)
+ v2.AddArg3(dst, src, mem)
+ v.AddArg3(v0, v1, v2)
return true
}
// match: (Move [64] dst src mem)
@@ -37838,17 +35661,13 @@ func rewriteValueAMD64_OpMove(v *Value) bool {
v0 := b.NewValue0(v.Pos, OpOffPtr, dst.Type)
v0.AuxInt = 32
v0.AddArg(dst)
- v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpOffPtr, src.Type)
v1.AuxInt = 32
v1.AddArg(src)
- v.AddArg(v1)
v2 := b.NewValue0(v.Pos, OpMove, types.TypeMem)
v2.AuxInt = 32
- v2.AddArg(dst)
- v2.AddArg(src)
- v2.AddArg(mem)
- v.AddArg(v2)
+ v2.AddArg3(dst, src, mem)
+ v.AddArg3(v0, v1, v2)
return true
}
// match: (Move [3] dst src mem)
@@ -37862,20 +35681,14 @@ func rewriteValueAMD64_OpMove(v *Value) bool {
mem := v_2
v.reset(OpAMD64MOVBstore)
v.AuxInt = 2
- v.AddArg(dst)
v0 := b.NewValue0(v.Pos, OpAMD64MOVBload, typ.UInt8)
v0.AuxInt = 2
- v0.AddArg(src)
- v0.AddArg(mem)
- v.AddArg(v0)
+ v0.AddArg2(src, mem)
v1 := b.NewValue0(v.Pos, OpAMD64MOVWstore, types.TypeMem)
- v1.AddArg(dst)
v2 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16)
- v2.AddArg(src)
- v2.AddArg(mem)
- v1.AddArg(v2)
- v1.AddArg(mem)
- v.AddArg(v1)
+ v2.AddArg2(src, mem)
+ v1.AddArg3(dst, v2, mem)
+ v.AddArg3(dst, v0, v1)
return true
}
// match: (Move [5] dst src mem)
@@ -37889,20 +35702,14 @@ func rewriteValueAMD64_OpMove(v *Value) bool {
mem := v_2
v.reset(OpAMD64MOVBstore)
v.AuxInt = 4
- v.AddArg(dst)
v0 := b.NewValue0(v.Pos, OpAMD64MOVBload, typ.UInt8)
v0.AuxInt = 4
- v0.AddArg(src)
- v0.AddArg(mem)
- v.AddArg(v0)
+ v0.AddArg2(src, mem)
v1 := b.NewValue0(v.Pos, OpAMD64MOVLstore, types.TypeMem)
- v1.AddArg(dst)
v2 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32)
- v2.AddArg(src)
- v2.AddArg(mem)
- v1.AddArg(v2)
- v1.AddArg(mem)
- v.AddArg(v1)
+ v2.AddArg2(src, mem)
+ v1.AddArg3(dst, v2, mem)
+ v.AddArg3(dst, v0, v1)
return true
}
// match: (Move [6] dst src mem)
@@ -37916,20 +35723,14 @@ func rewriteValueAMD64_OpMove(v *Value) bool {
mem := v_2
v.reset(OpAMD64MOVWstore)
v.AuxInt = 4
- v.AddArg(dst)
v0 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16)
v0.AuxInt = 4
- v0.AddArg(src)
- v0.AddArg(mem)
- v.AddArg(v0)
+ v0.AddArg2(src, mem)
v1 := b.NewValue0(v.Pos, OpAMD64MOVLstore, types.TypeMem)
- v1.AddArg(dst)
v2 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32)
- v2.AddArg(src)
- v2.AddArg(mem)
- v1.AddArg(v2)
- v1.AddArg(mem)
- v.AddArg(v1)
+ v2.AddArg2(src, mem)
+ v1.AddArg3(dst, v2, mem)
+ v.AddArg3(dst, v0, v1)
return true
}
// match: (Move [7] dst src mem)
@@ -37943,20 +35744,14 @@ func rewriteValueAMD64_OpMove(v *Value) bool {
mem := v_2
v.reset(OpAMD64MOVLstore)
v.AuxInt = 3
- v.AddArg(dst)
v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32)
v0.AuxInt = 3
- v0.AddArg(src)
- v0.AddArg(mem)
- v.AddArg(v0)
+ v0.AddArg2(src, mem)
v1 := b.NewValue0(v.Pos, OpAMD64MOVLstore, types.TypeMem)
- v1.AddArg(dst)
v2 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32)
- v2.AddArg(src)
- v2.AddArg(mem)
- v1.AddArg(v2)
- v1.AddArg(mem)
- v.AddArg(v1)
+ v2.AddArg2(src, mem)
+ v1.AddArg3(dst, v2, mem)
+ v.AddArg3(dst, v0, v1)
return true
}
// match: (Move [9] dst src mem)
@@ -37970,20 +35765,14 @@ func rewriteValueAMD64_OpMove(v *Value) bool {
mem := v_2
v.reset(OpAMD64MOVBstore)
v.AuxInt = 8
- v.AddArg(dst)
v0 := b.NewValue0(v.Pos, OpAMD64MOVBload, typ.UInt8)
v0.AuxInt = 8
- v0.AddArg(src)
- v0.AddArg(mem)
- v.AddArg(v0)
+ v0.AddArg2(src, mem)
v1 := b.NewValue0(v.Pos, OpAMD64MOVQstore, types.TypeMem)
- v1.AddArg(dst)
v2 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64)
- v2.AddArg(src)
- v2.AddArg(mem)
- v1.AddArg(v2)
- v1.AddArg(mem)
- v.AddArg(v1)
+ v2.AddArg2(src, mem)
+ v1.AddArg3(dst, v2, mem)
+ v.AddArg3(dst, v0, v1)
return true
}
// match: (Move [10] dst src mem)
@@ -37997,20 +35786,14 @@ func rewriteValueAMD64_OpMove(v *Value) bool {
mem := v_2
v.reset(OpAMD64MOVWstore)
v.AuxInt = 8
- v.AddArg(dst)
v0 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16)
v0.AuxInt = 8
- v0.AddArg(src)
- v0.AddArg(mem)
- v.AddArg(v0)
+ v0.AddArg2(src, mem)
v1 := b.NewValue0(v.Pos, OpAMD64MOVQstore, types.TypeMem)
- v1.AddArg(dst)
v2 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64)
- v2.AddArg(src)
- v2.AddArg(mem)
- v1.AddArg(v2)
- v1.AddArg(mem)
- v.AddArg(v1)
+ v2.AddArg2(src, mem)
+ v1.AddArg3(dst, v2, mem)
+ v.AddArg3(dst, v0, v1)
return true
}
// match: (Move [12] dst src mem)
@@ -38024,20 +35807,14 @@ func rewriteValueAMD64_OpMove(v *Value) bool {
mem := v_2
v.reset(OpAMD64MOVLstore)
v.AuxInt = 8
- v.AddArg(dst)
v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32)
v0.AuxInt = 8
- v0.AddArg(src)
- v0.AddArg(mem)
- v.AddArg(v0)
+ v0.AddArg2(src, mem)
v1 := b.NewValue0(v.Pos, OpAMD64MOVQstore, types.TypeMem)
- v1.AddArg(dst)
v2 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64)
- v2.AddArg(src)
- v2.AddArg(mem)
- v1.AddArg(v2)
- v1.AddArg(mem)
- v.AddArg(v1)
+ v2.AddArg2(src, mem)
+ v1.AddArg3(dst, v2, mem)
+ v.AddArg3(dst, v0, v1)
return true
}
// match: (Move [s] dst src mem)
@@ -38053,20 +35830,14 @@ func rewriteValueAMD64_OpMove(v *Value) bool {
}
v.reset(OpAMD64MOVQstore)
v.AuxInt = s - 8
- v.AddArg(dst)
v0 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64)
v0.AuxInt = s - 8
- v0.AddArg(src)
- v0.AddArg(mem)
- v.AddArg(v0)
+ v0.AddArg2(src, mem)
v1 := b.NewValue0(v.Pos, OpAMD64MOVQstore, types.TypeMem)
- v1.AddArg(dst)
v2 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64)
- v2.AddArg(src)
- v2.AddArg(mem)
- v1.AddArg(v2)
- v1.AddArg(mem)
- v.AddArg(v1)
+ v2.AddArg2(src, mem)
+ v1.AddArg3(dst, v2, mem)
+ v.AddArg3(dst, v0, v1)
return true
}
// match: (Move [s] dst src mem)
@@ -38085,19 +35856,14 @@ func rewriteValueAMD64_OpMove(v *Value) bool {
v0 := b.NewValue0(v.Pos, OpOffPtr, dst.Type)
v0.AuxInt = s % 16
v0.AddArg(dst)
- v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpOffPtr, src.Type)
v1.AuxInt = s % 16
v1.AddArg(src)
- v.AddArg(v1)
v2 := b.NewValue0(v.Pos, OpAMD64MOVQstore, types.TypeMem)
- v2.AddArg(dst)
v3 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64)
- v3.AddArg(src)
- v3.AddArg(mem)
- v2.AddArg(v3)
- v2.AddArg(mem)
- v.AddArg(v2)
+ v3.AddArg2(src, mem)
+ v2.AddArg3(dst, v3, mem)
+ v.AddArg3(v0, v1, v2)
return true
}
// match: (Move [s] dst src mem)
@@ -38116,19 +35882,14 @@ func rewriteValueAMD64_OpMove(v *Value) bool {
v0 := b.NewValue0(v.Pos, OpOffPtr, dst.Type)
v0.AuxInt = s % 16
v0.AddArg(dst)
- v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpOffPtr, src.Type)
v1.AuxInt = s % 16
v1.AddArg(src)
- v.AddArg(v1)
v2 := b.NewValue0(v.Pos, OpAMD64MOVOstore, types.TypeMem)
- v2.AddArg(dst)
v3 := b.NewValue0(v.Pos, OpAMD64MOVOload, types.TypeInt128)
- v3.AddArg(src)
- v3.AddArg(mem)
- v2.AddArg(v3)
- v2.AddArg(mem)
- v.AddArg(v2)
+ v3.AddArg2(src, mem)
+ v2.AddArg3(dst, v3, mem)
+ v.AddArg3(v0, v1, v2)
return true
}
// match: (Move [s] dst src mem)
@@ -38147,28 +35908,20 @@ func rewriteValueAMD64_OpMove(v *Value) bool {
v0 := b.NewValue0(v.Pos, OpOffPtr, dst.Type)
v0.AuxInt = s % 16
v0.AddArg(dst)
- v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpOffPtr, src.Type)
v1.AuxInt = s % 16
v1.AddArg(src)
- v.AddArg(v1)
v2 := b.NewValue0(v.Pos, OpAMD64MOVQstore, types.TypeMem)
v2.AuxInt = 8
- v2.AddArg(dst)
v3 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64)
v3.AuxInt = 8
- v3.AddArg(src)
- v3.AddArg(mem)
- v2.AddArg(v3)
+ v3.AddArg2(src, mem)
v4 := b.NewValue0(v.Pos, OpAMD64MOVQstore, types.TypeMem)
- v4.AddArg(dst)
v5 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64)
- v5.AddArg(src)
- v5.AddArg(mem)
- v4.AddArg(v5)
- v4.AddArg(mem)
- v2.AddArg(v4)
- v.AddArg(v2)
+ v5.AddArg2(src, mem)
+ v4.AddArg3(dst, v5, mem)
+ v2.AddArg3(dst, v3, v4)
+ v.AddArg3(v0, v1, v2)
return true
}
// match: (Move [s] dst src mem)
@@ -38184,9 +35937,7 @@ func rewriteValueAMD64_OpMove(v *Value) bool {
}
v.reset(OpAMD64DUFFCOPY)
v.AuxInt = 14 * (64 - s/16)
- v.AddArg(dst)
- v.AddArg(src)
- v.AddArg(mem)
+ v.AddArg3(dst, src, mem)
return true
}
// match: (Move [s] dst src mem)
@@ -38201,12 +35952,9 @@ func rewriteValueAMD64_OpMove(v *Value) bool {
break
}
v.reset(OpAMD64REPMOVSQ)
- v.AddArg(dst)
- v.AddArg(src)
v0 := b.NewValue0(v.Pos, OpAMD64MOVQconst, typ.UInt64)
v0.AuxInt = s / 8
- v.AddArg(v0)
- v.AddArg(mem)
+ v.AddArg4(dst, src, v0, mem)
return true
}
return false
@@ -38220,10 +35968,9 @@ func rewriteValueAMD64_OpNeg32F(v *Value) bool {
for {
x := v_0
v.reset(OpAMD64PXOR)
- v.AddArg(x)
v0 := b.NewValue0(v.Pos, OpAMD64MOVSSconst, typ.Float32)
v0.AuxInt = auxFrom32F(float32(math.Copysign(0, -1)))
- v.AddArg(v0)
+ v.AddArg2(x, v0)
return true
}
}
@@ -38236,10 +35983,9 @@ func rewriteValueAMD64_OpNeg64F(v *Value) bool {
for {
x := v_0
v.reset(OpAMD64PXOR)
- v.AddArg(x)
v0 := b.NewValue0(v.Pos, OpAMD64MOVSDconst, typ.Float64)
v0.AuxInt = auxFrom64F(math.Copysign(0, -1))
- v.AddArg(v0)
+ v.AddArg2(x, v0)
return true
}
}
@@ -38254,8 +36000,7 @@ func rewriteValueAMD64_OpNeq16(v *Value) bool {
y := v_1
v.reset(OpAMD64SETNE)
v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
+ v0.AddArg2(x, y)
v.AddArg(v0)
return true
}
@@ -38271,8 +36016,7 @@ func rewriteValueAMD64_OpNeq32(v *Value) bool {
y := v_1
v.reset(OpAMD64SETNE)
v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
+ v0.AddArg2(x, y)
v.AddArg(v0)
return true
}
@@ -38288,8 +36032,7 @@ func rewriteValueAMD64_OpNeq32F(v *Value) bool {
y := v_1
v.reset(OpAMD64SETNEF)
v0 := b.NewValue0(v.Pos, OpAMD64UCOMISS, types.TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
+ v0.AddArg2(x, y)
v.AddArg(v0)
return true
}
@@ -38305,8 +36048,7 @@ func rewriteValueAMD64_OpNeq64(v *Value) bool {
y := v_1
v.reset(OpAMD64SETNE)
v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
+ v0.AddArg2(x, y)
v.AddArg(v0)
return true
}
@@ -38322,8 +36064,7 @@ func rewriteValueAMD64_OpNeq64F(v *Value) bool {
y := v_1
v.reset(OpAMD64SETNEF)
v0 := b.NewValue0(v.Pos, OpAMD64UCOMISD, types.TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
+ v0.AddArg2(x, y)
v.AddArg(v0)
return true
}
@@ -38339,8 +36080,7 @@ func rewriteValueAMD64_OpNeq8(v *Value) bool {
y := v_1
v.reset(OpAMD64SETNE)
v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
+ v0.AddArg2(x, y)
v.AddArg(v0)
return true
}
@@ -38356,8 +36096,7 @@ func rewriteValueAMD64_OpNeqB(v *Value) bool {
y := v_1
v.reset(OpAMD64SETNE)
v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
+ v0.AddArg2(x, y)
v.AddArg(v0)
return true
}
@@ -38373,8 +36112,7 @@ func rewriteValueAMD64_OpNeqPtr(v *Value) bool {
y := v_1
v.reset(OpAMD64SETNE)
v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
+ v0.AddArg2(x, y)
v.AddArg(v0)
return true
}
@@ -38417,8 +36155,7 @@ func rewriteValueAMD64_OpOffPtr(v *Value) bool {
v.reset(OpAMD64ADDQ)
v0 := b.NewValue0(v.Pos, OpAMD64MOVQconst, typ.UInt64)
v0.AuxInt = off
- v.AddArg(v0)
- v.AddArg(ptr)
+ v.AddArg2(v0, ptr)
return true
}
}
@@ -38439,9 +36176,7 @@ func rewriteValueAMD64_OpPanicBounds(v *Value) bool {
}
v.reset(OpAMD64LoweredPanicBoundsA)
v.AuxInt = kind
- v.AddArg(x)
- v.AddArg(y)
- v.AddArg(mem)
+ v.AddArg3(x, y, mem)
return true
}
// match: (PanicBounds [kind] x y mem)
@@ -38457,9 +36192,7 @@ func rewriteValueAMD64_OpPanicBounds(v *Value) bool {
}
v.reset(OpAMD64LoweredPanicBoundsB)
v.AuxInt = kind
- v.AddArg(x)
- v.AddArg(y)
- v.AddArg(mem)
+ v.AddArg3(x, y, mem)
return true
}
// match: (PanicBounds [kind] x y mem)
@@ -38475,9 +36208,7 @@ func rewriteValueAMD64_OpPanicBounds(v *Value) bool {
}
v.reset(OpAMD64LoweredPanicBoundsC)
v.AuxInt = kind
- v.AddArg(x)
- v.AddArg(y)
- v.AddArg(mem)
+ v.AddArg3(x, y, mem)
return true
}
return false
@@ -38540,15 +36271,13 @@ func rewriteValueAMD64_OpRsh16Ux16(v *Value) bool {
}
v.reset(OpAMD64ANDL)
v0 := b.NewValue0(v.Pos, OpAMD64SHRW, t)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
+ v0.AddArg2(x, y)
v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags)
v2.AuxInt = 16
v2.AddArg(y)
v1.AddArg(v2)
- v.AddArg(v1)
+ v.AddArg2(v0, v1)
return true
}
// match: (Rsh16Ux16 x y)
@@ -38561,8 +36290,7 @@ func rewriteValueAMD64_OpRsh16Ux16(v *Value) bool {
break
}
v.reset(OpAMD64SHRW)
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
return false
@@ -38583,15 +36311,13 @@ func rewriteValueAMD64_OpRsh16Ux32(v *Value) bool {
}
v.reset(OpAMD64ANDL)
v0 := b.NewValue0(v.Pos, OpAMD64SHRW, t)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
+ v0.AddArg2(x, y)
v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags)
v2.AuxInt = 16
v2.AddArg(y)
v1.AddArg(v2)
- v.AddArg(v1)
+ v.AddArg2(v0, v1)
return true
}
// match: (Rsh16Ux32 x y)
@@ -38604,8 +36330,7 @@ func rewriteValueAMD64_OpRsh16Ux32(v *Value) bool {
break
}
v.reset(OpAMD64SHRW)
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
return false
@@ -38626,15 +36351,13 @@ func rewriteValueAMD64_OpRsh16Ux64(v *Value) bool {
}
v.reset(OpAMD64ANDL)
v0 := b.NewValue0(v.Pos, OpAMD64SHRW, t)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
+ v0.AddArg2(x, y)
v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
v2.AuxInt = 16
v2.AddArg(y)
v1.AddArg(v2)
- v.AddArg(v1)
+ v.AddArg2(v0, v1)
return true
}
// match: (Rsh16Ux64 x y)
@@ -38647,8 +36370,7 @@ func rewriteValueAMD64_OpRsh16Ux64(v *Value) bool {
break
}
v.reset(OpAMD64SHRW)
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
return false
@@ -38669,15 +36391,13 @@ func rewriteValueAMD64_OpRsh16Ux8(v *Value) bool {
}
v.reset(OpAMD64ANDL)
v0 := b.NewValue0(v.Pos, OpAMD64SHRW, t)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
+ v0.AddArg2(x, y)
v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags)
v2.AuxInt = 16
v2.AddArg(y)
v1.AddArg(v2)
- v.AddArg(v1)
+ v.AddArg2(v0, v1)
return true
}
// match: (Rsh16Ux8 x y)
@@ -38690,8 +36410,7 @@ func rewriteValueAMD64_OpRsh16Ux8(v *Value) bool {
break
}
v.reset(OpAMD64SHRW)
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
return false
@@ -38712,9 +36431,7 @@ func rewriteValueAMD64_OpRsh16x16(v *Value) bool {
}
v.reset(OpAMD64SARW)
v.Type = t
- v.AddArg(x)
v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type)
- v0.AddArg(y)
v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type)
v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type)
v3 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags)
@@ -38722,8 +36439,8 @@ func rewriteValueAMD64_OpRsh16x16(v *Value) bool {
v3.AddArg(y)
v2.AddArg(v3)
v1.AddArg(v2)
- v0.AddArg(v1)
- v.AddArg(v0)
+ v0.AddArg2(y, v1)
+ v.AddArg2(x, v0)
return true
}
// match: (Rsh16x16 x y)
@@ -38736,8 +36453,7 @@ func rewriteValueAMD64_OpRsh16x16(v *Value) bool {
break
}
v.reset(OpAMD64SARW)
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
return false
@@ -38758,9 +36474,7 @@ func rewriteValueAMD64_OpRsh16x32(v *Value) bool {
}
v.reset(OpAMD64SARW)
v.Type = t
- v.AddArg(x)
v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type)
- v0.AddArg(y)
v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type)
v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type)
v3 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags)
@@ -38768,8 +36482,8 @@ func rewriteValueAMD64_OpRsh16x32(v *Value) bool {
v3.AddArg(y)
v2.AddArg(v3)
v1.AddArg(v2)
- v0.AddArg(v1)
- v.AddArg(v0)
+ v0.AddArg2(y, v1)
+ v.AddArg2(x, v0)
return true
}
// match: (Rsh16x32 x y)
@@ -38782,8 +36496,7 @@ func rewriteValueAMD64_OpRsh16x32(v *Value) bool {
break
}
v.reset(OpAMD64SARW)
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
return false
@@ -38804,9 +36517,7 @@ func rewriteValueAMD64_OpRsh16x64(v *Value) bool {
}
v.reset(OpAMD64SARW)
v.Type = t
- v.AddArg(x)
v0 := b.NewValue0(v.Pos, OpAMD64ORQ, y.Type)
- v0.AddArg(y)
v1 := b.NewValue0(v.Pos, OpAMD64NOTQ, y.Type)
v2 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, y.Type)
v3 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
@@ -38814,8 +36525,8 @@ func rewriteValueAMD64_OpRsh16x64(v *Value) bool {
v3.AddArg(y)
v2.AddArg(v3)
v1.AddArg(v2)
- v0.AddArg(v1)
- v.AddArg(v0)
+ v0.AddArg2(y, v1)
+ v.AddArg2(x, v0)
return true
}
// match: (Rsh16x64 x y)
@@ -38828,8 +36539,7 @@ func rewriteValueAMD64_OpRsh16x64(v *Value) bool {
break
}
v.reset(OpAMD64SARW)
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
return false
@@ -38850,9 +36560,7 @@ func rewriteValueAMD64_OpRsh16x8(v *Value) bool {
}
v.reset(OpAMD64SARW)
v.Type = t
- v.AddArg(x)
v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type)
- v0.AddArg(y)
v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type)
v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type)
v3 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags)
@@ -38860,8 +36568,8 @@ func rewriteValueAMD64_OpRsh16x8(v *Value) bool {
v3.AddArg(y)
v2.AddArg(v3)
v1.AddArg(v2)
- v0.AddArg(v1)
- v.AddArg(v0)
+ v0.AddArg2(y, v1)
+ v.AddArg2(x, v0)
return true
}
// match: (Rsh16x8 x y)
@@ -38874,8 +36582,7 @@ func rewriteValueAMD64_OpRsh16x8(v *Value) bool {
break
}
v.reset(OpAMD64SARW)
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
return false
@@ -38896,15 +36603,13 @@ func rewriteValueAMD64_OpRsh32Ux16(v *Value) bool {
}
v.reset(OpAMD64ANDL)
v0 := b.NewValue0(v.Pos, OpAMD64SHRL, t)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
+ v0.AddArg2(x, y)
v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags)
v2.AuxInt = 32
v2.AddArg(y)
v1.AddArg(v2)
- v.AddArg(v1)
+ v.AddArg2(v0, v1)
return true
}
// match: (Rsh32Ux16 x y)
@@ -38917,8 +36622,7 @@ func rewriteValueAMD64_OpRsh32Ux16(v *Value) bool {
break
}
v.reset(OpAMD64SHRL)
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
return false
@@ -38939,15 +36643,13 @@ func rewriteValueAMD64_OpRsh32Ux32(v *Value) bool {
}
v.reset(OpAMD64ANDL)
v0 := b.NewValue0(v.Pos, OpAMD64SHRL, t)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
+ v0.AddArg2(x, y)
v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags)
v2.AuxInt = 32
v2.AddArg(y)
v1.AddArg(v2)
- v.AddArg(v1)
+ v.AddArg2(v0, v1)
return true
}
// match: (Rsh32Ux32 x y)
@@ -38960,8 +36662,7 @@ func rewriteValueAMD64_OpRsh32Ux32(v *Value) bool {
break
}
v.reset(OpAMD64SHRL)
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
return false
@@ -38982,15 +36683,13 @@ func rewriteValueAMD64_OpRsh32Ux64(v *Value) bool {
}
v.reset(OpAMD64ANDL)
v0 := b.NewValue0(v.Pos, OpAMD64SHRL, t)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
+ v0.AddArg2(x, y)
v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
v2.AuxInt = 32
v2.AddArg(y)
v1.AddArg(v2)
- v.AddArg(v1)
+ v.AddArg2(v0, v1)
return true
}
// match: (Rsh32Ux64 x y)
@@ -39003,8 +36702,7 @@ func rewriteValueAMD64_OpRsh32Ux64(v *Value) bool {
break
}
v.reset(OpAMD64SHRL)
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
return false
@@ -39025,15 +36723,13 @@ func rewriteValueAMD64_OpRsh32Ux8(v *Value) bool {
}
v.reset(OpAMD64ANDL)
v0 := b.NewValue0(v.Pos, OpAMD64SHRL, t)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
+ v0.AddArg2(x, y)
v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags)
v2.AuxInt = 32
v2.AddArg(y)
v1.AddArg(v2)
- v.AddArg(v1)
+ v.AddArg2(v0, v1)
return true
}
// match: (Rsh32Ux8 x y)
@@ -39046,8 +36742,7 @@ func rewriteValueAMD64_OpRsh32Ux8(v *Value) bool {
break
}
v.reset(OpAMD64SHRL)
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
return false
@@ -39068,9 +36763,7 @@ func rewriteValueAMD64_OpRsh32x16(v *Value) bool {
}
v.reset(OpAMD64SARL)
v.Type = t
- v.AddArg(x)
v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type)
- v0.AddArg(y)
v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type)
v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type)
v3 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags)
@@ -39078,8 +36771,8 @@ func rewriteValueAMD64_OpRsh32x16(v *Value) bool {
v3.AddArg(y)
v2.AddArg(v3)
v1.AddArg(v2)
- v0.AddArg(v1)
- v.AddArg(v0)
+ v0.AddArg2(y, v1)
+ v.AddArg2(x, v0)
return true
}
// match: (Rsh32x16 x y)
@@ -39092,8 +36785,7 @@ func rewriteValueAMD64_OpRsh32x16(v *Value) bool {
break
}
v.reset(OpAMD64SARL)
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
return false
@@ -39114,9 +36806,7 @@ func rewriteValueAMD64_OpRsh32x32(v *Value) bool {
}
v.reset(OpAMD64SARL)
v.Type = t
- v.AddArg(x)
v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type)
- v0.AddArg(y)
v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type)
v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type)
v3 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags)
@@ -39124,8 +36814,8 @@ func rewriteValueAMD64_OpRsh32x32(v *Value) bool {
v3.AddArg(y)
v2.AddArg(v3)
v1.AddArg(v2)
- v0.AddArg(v1)
- v.AddArg(v0)
+ v0.AddArg2(y, v1)
+ v.AddArg2(x, v0)
return true
}
// match: (Rsh32x32 x y)
@@ -39138,8 +36828,7 @@ func rewriteValueAMD64_OpRsh32x32(v *Value) bool {
break
}
v.reset(OpAMD64SARL)
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
return false
@@ -39160,9 +36849,7 @@ func rewriteValueAMD64_OpRsh32x64(v *Value) bool {
}
v.reset(OpAMD64SARL)
v.Type = t
- v.AddArg(x)
v0 := b.NewValue0(v.Pos, OpAMD64ORQ, y.Type)
- v0.AddArg(y)
v1 := b.NewValue0(v.Pos, OpAMD64NOTQ, y.Type)
v2 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, y.Type)
v3 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
@@ -39170,8 +36857,8 @@ func rewriteValueAMD64_OpRsh32x64(v *Value) bool {
v3.AddArg(y)
v2.AddArg(v3)
v1.AddArg(v2)
- v0.AddArg(v1)
- v.AddArg(v0)
+ v0.AddArg2(y, v1)
+ v.AddArg2(x, v0)
return true
}
// match: (Rsh32x64 x y)
@@ -39184,8 +36871,7 @@ func rewriteValueAMD64_OpRsh32x64(v *Value) bool {
break
}
v.reset(OpAMD64SARL)
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
return false
@@ -39206,9 +36892,7 @@ func rewriteValueAMD64_OpRsh32x8(v *Value) bool {
}
v.reset(OpAMD64SARL)
v.Type = t
- v.AddArg(x)
v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type)
- v0.AddArg(y)
v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type)
v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type)
v3 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags)
@@ -39216,8 +36900,8 @@ func rewriteValueAMD64_OpRsh32x8(v *Value) bool {
v3.AddArg(y)
v2.AddArg(v3)
v1.AddArg(v2)
- v0.AddArg(v1)
- v.AddArg(v0)
+ v0.AddArg2(y, v1)
+ v.AddArg2(x, v0)
return true
}
// match: (Rsh32x8 x y)
@@ -39230,8 +36914,7 @@ func rewriteValueAMD64_OpRsh32x8(v *Value) bool {
break
}
v.reset(OpAMD64SARL)
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
return false
@@ -39252,15 +36935,13 @@ func rewriteValueAMD64_OpRsh64Ux16(v *Value) bool {
}
v.reset(OpAMD64ANDQ)
v0 := b.NewValue0(v.Pos, OpAMD64SHRQ, t)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
+ v0.AddArg2(x, y)
v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t)
v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags)
v2.AuxInt = 64
v2.AddArg(y)
v1.AddArg(v2)
- v.AddArg(v1)
+ v.AddArg2(v0, v1)
return true
}
// match: (Rsh64Ux16 x y)
@@ -39273,8 +36954,7 @@ func rewriteValueAMD64_OpRsh64Ux16(v *Value) bool {
break
}
v.reset(OpAMD64SHRQ)
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
return false
@@ -39295,15 +36975,13 @@ func rewriteValueAMD64_OpRsh64Ux32(v *Value) bool {
}
v.reset(OpAMD64ANDQ)
v0 := b.NewValue0(v.Pos, OpAMD64SHRQ, t)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
+ v0.AddArg2(x, y)
v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t)
v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags)
v2.AuxInt = 64
v2.AddArg(y)
v1.AddArg(v2)
- v.AddArg(v1)
+ v.AddArg2(v0, v1)
return true
}
// match: (Rsh64Ux32 x y)
@@ -39316,8 +36994,7 @@ func rewriteValueAMD64_OpRsh64Ux32(v *Value) bool {
break
}
v.reset(OpAMD64SHRQ)
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
return false
@@ -39338,15 +37015,13 @@ func rewriteValueAMD64_OpRsh64Ux64(v *Value) bool {
}
v.reset(OpAMD64ANDQ)
v0 := b.NewValue0(v.Pos, OpAMD64SHRQ, t)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
+ v0.AddArg2(x, y)
v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t)
v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
v2.AuxInt = 64
v2.AddArg(y)
v1.AddArg(v2)
- v.AddArg(v1)
+ v.AddArg2(v0, v1)
return true
}
// match: (Rsh64Ux64 x y)
@@ -39359,8 +37034,7 @@ func rewriteValueAMD64_OpRsh64Ux64(v *Value) bool {
break
}
v.reset(OpAMD64SHRQ)
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
return false
@@ -39381,15 +37055,13 @@ func rewriteValueAMD64_OpRsh64Ux8(v *Value) bool {
}
v.reset(OpAMD64ANDQ)
v0 := b.NewValue0(v.Pos, OpAMD64SHRQ, t)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
+ v0.AddArg2(x, y)
v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t)
v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags)
v2.AuxInt = 64
v2.AddArg(y)
v1.AddArg(v2)
- v.AddArg(v1)
+ v.AddArg2(v0, v1)
return true
}
// match: (Rsh64Ux8 x y)
@@ -39402,8 +37074,7 @@ func rewriteValueAMD64_OpRsh64Ux8(v *Value) bool {
break
}
v.reset(OpAMD64SHRQ)
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
return false
@@ -39424,9 +37095,7 @@ func rewriteValueAMD64_OpRsh64x16(v *Value) bool {
}
v.reset(OpAMD64SARQ)
v.Type = t
- v.AddArg(x)
v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type)
- v0.AddArg(y)
v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type)
v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type)
v3 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags)
@@ -39434,8 +37103,8 @@ func rewriteValueAMD64_OpRsh64x16(v *Value) bool {
v3.AddArg(y)
v2.AddArg(v3)
v1.AddArg(v2)
- v0.AddArg(v1)
- v.AddArg(v0)
+ v0.AddArg2(y, v1)
+ v.AddArg2(x, v0)
return true
}
// match: (Rsh64x16 x y)
@@ -39448,8 +37117,7 @@ func rewriteValueAMD64_OpRsh64x16(v *Value) bool {
break
}
v.reset(OpAMD64SARQ)
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
return false
@@ -39470,9 +37138,7 @@ func rewriteValueAMD64_OpRsh64x32(v *Value) bool {
}
v.reset(OpAMD64SARQ)
v.Type = t
- v.AddArg(x)
v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type)
- v0.AddArg(y)
v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type)
v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type)
v3 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags)
@@ -39480,8 +37146,8 @@ func rewriteValueAMD64_OpRsh64x32(v *Value) bool {
v3.AddArg(y)
v2.AddArg(v3)
v1.AddArg(v2)
- v0.AddArg(v1)
- v.AddArg(v0)
+ v0.AddArg2(y, v1)
+ v.AddArg2(x, v0)
return true
}
// match: (Rsh64x32 x y)
@@ -39494,8 +37160,7 @@ func rewriteValueAMD64_OpRsh64x32(v *Value) bool {
break
}
v.reset(OpAMD64SARQ)
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
return false
@@ -39516,9 +37181,7 @@ func rewriteValueAMD64_OpRsh64x64(v *Value) bool {
}
v.reset(OpAMD64SARQ)
v.Type = t
- v.AddArg(x)
v0 := b.NewValue0(v.Pos, OpAMD64ORQ, y.Type)
- v0.AddArg(y)
v1 := b.NewValue0(v.Pos, OpAMD64NOTQ, y.Type)
v2 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, y.Type)
v3 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
@@ -39526,8 +37189,8 @@ func rewriteValueAMD64_OpRsh64x64(v *Value) bool {
v3.AddArg(y)
v2.AddArg(v3)
v1.AddArg(v2)
- v0.AddArg(v1)
- v.AddArg(v0)
+ v0.AddArg2(y, v1)
+ v.AddArg2(x, v0)
return true
}
// match: (Rsh64x64 x y)
@@ -39540,8 +37203,7 @@ func rewriteValueAMD64_OpRsh64x64(v *Value) bool {
break
}
v.reset(OpAMD64SARQ)
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
return false
@@ -39562,9 +37224,7 @@ func rewriteValueAMD64_OpRsh64x8(v *Value) bool {
}
v.reset(OpAMD64SARQ)
v.Type = t
- v.AddArg(x)
v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type)
- v0.AddArg(y)
v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type)
v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type)
v3 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags)
@@ -39572,8 +37232,8 @@ func rewriteValueAMD64_OpRsh64x8(v *Value) bool {
v3.AddArg(y)
v2.AddArg(v3)
v1.AddArg(v2)
- v0.AddArg(v1)
- v.AddArg(v0)
+ v0.AddArg2(y, v1)
+ v.AddArg2(x, v0)
return true
}
// match: (Rsh64x8 x y)
@@ -39586,8 +37246,7 @@ func rewriteValueAMD64_OpRsh64x8(v *Value) bool {
break
}
v.reset(OpAMD64SARQ)
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
return false
@@ -39608,15 +37267,13 @@ func rewriteValueAMD64_OpRsh8Ux16(v *Value) bool {
}
v.reset(OpAMD64ANDL)
v0 := b.NewValue0(v.Pos, OpAMD64SHRB, t)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
+ v0.AddArg2(x, y)
v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags)
v2.AuxInt = 8
v2.AddArg(y)
v1.AddArg(v2)
- v.AddArg(v1)
+ v.AddArg2(v0, v1)
return true
}
// match: (Rsh8Ux16 x y)
@@ -39629,8 +37286,7 @@ func rewriteValueAMD64_OpRsh8Ux16(v *Value) bool {
break
}
v.reset(OpAMD64SHRB)
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
return false
@@ -39651,15 +37307,13 @@ func rewriteValueAMD64_OpRsh8Ux32(v *Value) bool {
}
v.reset(OpAMD64ANDL)
v0 := b.NewValue0(v.Pos, OpAMD64SHRB, t)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
+ v0.AddArg2(x, y)
v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags)
v2.AuxInt = 8
v2.AddArg(y)
v1.AddArg(v2)
- v.AddArg(v1)
+ v.AddArg2(v0, v1)
return true
}
// match: (Rsh8Ux32 x y)
@@ -39672,8 +37326,7 @@ func rewriteValueAMD64_OpRsh8Ux32(v *Value) bool {
break
}
v.reset(OpAMD64SHRB)
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
return false
@@ -39694,15 +37347,13 @@ func rewriteValueAMD64_OpRsh8Ux64(v *Value) bool {
}
v.reset(OpAMD64ANDL)
v0 := b.NewValue0(v.Pos, OpAMD64SHRB, t)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
+ v0.AddArg2(x, y)
v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
v2.AuxInt = 8
v2.AddArg(y)
v1.AddArg(v2)
- v.AddArg(v1)
+ v.AddArg2(v0, v1)
return true
}
// match: (Rsh8Ux64 x y)
@@ -39715,8 +37366,7 @@ func rewriteValueAMD64_OpRsh8Ux64(v *Value) bool {
break
}
v.reset(OpAMD64SHRB)
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
return false
@@ -39737,15 +37387,13 @@ func rewriteValueAMD64_OpRsh8Ux8(v *Value) bool {
}
v.reset(OpAMD64ANDL)
v0 := b.NewValue0(v.Pos, OpAMD64SHRB, t)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
+ v0.AddArg2(x, y)
v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags)
v2.AuxInt = 8
v2.AddArg(y)
v1.AddArg(v2)
- v.AddArg(v1)
+ v.AddArg2(v0, v1)
return true
}
// match: (Rsh8Ux8 x y)
@@ -39758,8 +37406,7 @@ func rewriteValueAMD64_OpRsh8Ux8(v *Value) bool {
break
}
v.reset(OpAMD64SHRB)
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
return false
@@ -39780,9 +37427,7 @@ func rewriteValueAMD64_OpRsh8x16(v *Value) bool {
}
v.reset(OpAMD64SARB)
v.Type = t
- v.AddArg(x)
v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type)
- v0.AddArg(y)
v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type)
v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type)
v3 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags)
@@ -39790,8 +37435,8 @@ func rewriteValueAMD64_OpRsh8x16(v *Value) bool {
v3.AddArg(y)
v2.AddArg(v3)
v1.AddArg(v2)
- v0.AddArg(v1)
- v.AddArg(v0)
+ v0.AddArg2(y, v1)
+ v.AddArg2(x, v0)
return true
}
// match: (Rsh8x16 x y)
@@ -39804,8 +37449,7 @@ func rewriteValueAMD64_OpRsh8x16(v *Value) bool {
break
}
v.reset(OpAMD64SARB)
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
return false
@@ -39826,9 +37470,7 @@ func rewriteValueAMD64_OpRsh8x32(v *Value) bool {
}
v.reset(OpAMD64SARB)
v.Type = t
- v.AddArg(x)
v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type)
- v0.AddArg(y)
v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type)
v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type)
v3 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags)
@@ -39836,8 +37478,8 @@ func rewriteValueAMD64_OpRsh8x32(v *Value) bool {
v3.AddArg(y)
v2.AddArg(v3)
v1.AddArg(v2)
- v0.AddArg(v1)
- v.AddArg(v0)
+ v0.AddArg2(y, v1)
+ v.AddArg2(x, v0)
return true
}
// match: (Rsh8x32 x y)
@@ -39850,8 +37492,7 @@ func rewriteValueAMD64_OpRsh8x32(v *Value) bool {
break
}
v.reset(OpAMD64SARB)
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
return false
@@ -39872,9 +37513,7 @@ func rewriteValueAMD64_OpRsh8x64(v *Value) bool {
}
v.reset(OpAMD64SARB)
v.Type = t
- v.AddArg(x)
v0 := b.NewValue0(v.Pos, OpAMD64ORQ, y.Type)
- v0.AddArg(y)
v1 := b.NewValue0(v.Pos, OpAMD64NOTQ, y.Type)
v2 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, y.Type)
v3 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
@@ -39882,8 +37521,8 @@ func rewriteValueAMD64_OpRsh8x64(v *Value) bool {
v3.AddArg(y)
v2.AddArg(v3)
v1.AddArg(v2)
- v0.AddArg(v1)
- v.AddArg(v0)
+ v0.AddArg2(y, v1)
+ v.AddArg2(x, v0)
return true
}
// match: (Rsh8x64 x y)
@@ -39896,8 +37535,7 @@ func rewriteValueAMD64_OpRsh8x64(v *Value) bool {
break
}
v.reset(OpAMD64SARB)
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
return false
@@ -39918,9 +37556,7 @@ func rewriteValueAMD64_OpRsh8x8(v *Value) bool {
}
v.reset(OpAMD64SARB)
v.Type = t
- v.AddArg(x)
v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type)
- v0.AddArg(y)
v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type)
v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type)
v3 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags)
@@ -39928,8 +37564,8 @@ func rewriteValueAMD64_OpRsh8x8(v *Value) bool {
v3.AddArg(y)
v2.AddArg(v3)
v1.AddArg(v2)
- v0.AddArg(v1)
- v.AddArg(v0)
+ v0.AddArg2(y, v1)
+ v.AddArg2(x, v0)
return true
}
// match: (Rsh8x8 x y)
@@ -39942,8 +37578,7 @@ func rewriteValueAMD64_OpRsh8x8(v *Value) bool {
break
}
v.reset(OpAMD64SARB)
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
return false
@@ -39963,8 +37598,7 @@ func rewriteValueAMD64_OpSelect0(v *Value) bool {
v.reset(OpSelect0)
v.Type = typ.UInt64
v0 := b.NewValue0(v.Pos, OpAMD64MULQU, types.NewTuple(typ.UInt64, types.TypeFlags))
- v0.AddArg(x)
- v0.AddArg(y)
+ v0.AddArg2(x, y)
v.AddArg(v0)
return true
}
@@ -39979,8 +37613,7 @@ func rewriteValueAMD64_OpSelect0(v *Value) bool {
v.reset(OpSelect0)
v.Type = typ.UInt32
v0 := b.NewValue0(v.Pos, OpAMD64MULLU, types.NewTuple(typ.UInt32, types.TypeFlags))
- v0.AddArg(x)
- v0.AddArg(y)
+ v0.AddArg2(x, y)
v.AddArg(v0)
return true
}
@@ -39996,13 +37629,11 @@ func rewriteValueAMD64_OpSelect0(v *Value) bool {
v.reset(OpSelect0)
v.Type = typ.UInt64
v0 := b.NewValue0(v.Pos, OpAMD64ADCQ, types.NewTuple(typ.UInt64, types.TypeFlags))
- v0.AddArg(x)
- v0.AddArg(y)
v1 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
v2 := b.NewValue0(v.Pos, OpAMD64NEGLflags, types.NewTuple(typ.UInt32, types.TypeFlags))
v2.AddArg(c)
v1.AddArg(v2)
- v0.AddArg(v1)
+ v0.AddArg3(x, y, v1)
v.AddArg(v0)
return true
}
@@ -40018,13 +37649,11 @@ func rewriteValueAMD64_OpSelect0(v *Value) bool {
v.reset(OpSelect0)
v.Type = typ.UInt64
v0 := b.NewValue0(v.Pos, OpAMD64SBBQ, types.NewTuple(typ.UInt64, types.TypeFlags))
- v0.AddArg(x)
- v0.AddArg(y)
v1 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
v2 := b.NewValue0(v.Pos, OpAMD64NEGLflags, types.NewTuple(typ.UInt32, types.TypeFlags))
v2.AddArg(c)
v1.AddArg(v2)
- v0.AddArg(v1)
+ v0.AddArg3(x, y, v1)
v.AddArg(v0)
return true
}
@@ -40038,10 +37667,9 @@ func rewriteValueAMD64_OpSelect0(v *Value) bool {
tuple := v_0.Args[1]
val := v_0.Args[0]
v.reset(OpAMD64ADDL)
- v.AddArg(val)
v0 := b.NewValue0(v.Pos, OpSelect0, t)
v0.AddArg(tuple)
- v.AddArg(v0)
+ v.AddArg2(val, v0)
return true
}
// match: (Select0 (AddTupleFirst64 val tuple))
@@ -40054,10 +37682,9 @@ func rewriteValueAMD64_OpSelect0(v *Value) bool {
tuple := v_0.Args[1]
val := v_0.Args[0]
v.reset(OpAMD64ADDQ)
- v.AddArg(val)
v0 := b.NewValue0(v.Pos, OpSelect0, t)
v0.AddArg(tuple)
- v.AddArg(v0)
+ v.AddArg2(val, v0)
return true
}
return false
@@ -40077,8 +37704,7 @@ func rewriteValueAMD64_OpSelect1(v *Value) bool {
v.reset(OpAMD64SETO)
v0 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
v1 := b.NewValue0(v.Pos, OpAMD64MULQU, types.NewTuple(typ.UInt64, types.TypeFlags))
- v1.AddArg(x)
- v1.AddArg(y)
+ v1.AddArg2(x, y)
v0.AddArg(v1)
v.AddArg(v0)
return true
@@ -40094,8 +37720,7 @@ func rewriteValueAMD64_OpSelect1(v *Value) bool {
v.reset(OpAMD64SETO)
v0 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
v1 := b.NewValue0(v.Pos, OpAMD64MULLU, types.NewTuple(typ.UInt32, types.TypeFlags))
- v1.AddArg(x)
- v1.AddArg(y)
+ v1.AddArg2(x, y)
v0.AddArg(v1)
v.AddArg(v0)
return true
@@ -40114,13 +37739,11 @@ func rewriteValueAMD64_OpSelect1(v *Value) bool {
v0 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, typ.UInt64)
v1 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
v2 := b.NewValue0(v.Pos, OpAMD64ADCQ, types.NewTuple(typ.UInt64, types.TypeFlags))
- v2.AddArg(x)
- v2.AddArg(y)
v3 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
v4 := b.NewValue0(v.Pos, OpAMD64NEGLflags, types.NewTuple(typ.UInt32, types.TypeFlags))
v4.AddArg(c)
v3.AddArg(v4)
- v2.AddArg(v3)
+ v2.AddArg3(x, y, v3)
v1.AddArg(v2)
v0.AddArg(v1)
v.AddArg(v0)
@@ -40140,13 +37763,11 @@ func rewriteValueAMD64_OpSelect1(v *Value) bool {
v0 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, typ.UInt64)
v1 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
v2 := b.NewValue0(v.Pos, OpAMD64SBBQ, types.NewTuple(typ.UInt64, types.TypeFlags))
- v2.AddArg(x)
- v2.AddArg(y)
v3 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
v4 := b.NewValue0(v.Pos, OpAMD64NEGLflags, types.NewTuple(typ.UInt32, types.TypeFlags))
v4.AddArg(c)
v3.AddArg(v4)
- v2.AddArg(v3)
+ v2.AddArg3(x, y, v3)
v1.AddArg(v2)
v0.AddArg(v1)
v.AddArg(v0)
@@ -40180,9 +37801,7 @@ func rewriteValueAMD64_OpSelect1(v *Value) bool {
break
}
x := v_0_0_0.Args[0]
- v.reset(OpCopy)
- v.Type = x.Type
- v.AddArg(x)
+ v.copyOf(x)
return true
}
// match: (Select1 (AddTupleFirst32 _ tuple))
@@ -40241,9 +37860,7 @@ func rewriteValueAMD64_OpStore(v *Value) bool {
break
}
v.reset(OpAMD64MOVSDstore)
- v.AddArg(ptr)
- v.AddArg(val)
- v.AddArg(mem)
+ v.AddArg3(ptr, val, mem)
return true
}
// match: (Store {t} ptr val mem)
@@ -40258,9 +37875,7 @@ func rewriteValueAMD64_OpStore(v *Value) bool {
break
}
v.reset(OpAMD64MOVSSstore)
- v.AddArg(ptr)
- v.AddArg(val)
- v.AddArg(mem)
+ v.AddArg3(ptr, val, mem)
return true
}
// match: (Store {t} ptr val mem)
@@ -40275,9 +37890,7 @@ func rewriteValueAMD64_OpStore(v *Value) bool {
break
}
v.reset(OpAMD64MOVQstore)
- v.AddArg(ptr)
- v.AddArg(val)
- v.AddArg(mem)
+ v.AddArg3(ptr, val, mem)
return true
}
// match: (Store {t} ptr val mem)
@@ -40292,9 +37905,7 @@ func rewriteValueAMD64_OpStore(v *Value) bool {
break
}
v.reset(OpAMD64MOVLstore)
- v.AddArg(ptr)
- v.AddArg(val)
- v.AddArg(mem)
+ v.AddArg3(ptr, val, mem)
return true
}
// match: (Store {t} ptr val mem)
@@ -40309,9 +37920,7 @@ func rewriteValueAMD64_OpStore(v *Value) bool {
break
}
v.reset(OpAMD64MOVWstore)
- v.AddArg(ptr)
- v.AddArg(val)
- v.AddArg(mem)
+ v.AddArg3(ptr, val, mem)
return true
}
// match: (Store {t} ptr val mem)
@@ -40326,9 +37935,7 @@ func rewriteValueAMD64_OpStore(v *Value) bool {
break
}
v.reset(OpAMD64MOVBstore)
- v.AddArg(ptr)
- v.AddArg(val)
- v.AddArg(mem)
+ v.AddArg3(ptr, val, mem)
return true
}
return false
@@ -40358,9 +37965,7 @@ func rewriteValueAMD64_OpZero(v *Value) bool {
break
}
mem := v_1
- v.reset(OpCopy)
- v.Type = mem.Type
- v.AddArg(mem)
+ v.copyOf(mem)
return true
}
// match: (Zero [1] destptr mem)
@@ -40373,8 +37978,7 @@ func rewriteValueAMD64_OpZero(v *Value) bool {
mem := v_1
v.reset(OpAMD64MOVBstoreconst)
v.AuxInt = 0
- v.AddArg(destptr)
- v.AddArg(mem)
+ v.AddArg2(destptr, mem)
return true
}
// match: (Zero [2] destptr mem)
@@ -40387,8 +37991,7 @@ func rewriteValueAMD64_OpZero(v *Value) bool {
mem := v_1
v.reset(OpAMD64MOVWstoreconst)
v.AuxInt = 0
- v.AddArg(destptr)
- v.AddArg(mem)
+ v.AddArg2(destptr, mem)
return true
}
// match: (Zero [4] destptr mem)
@@ -40401,8 +38004,7 @@ func rewriteValueAMD64_OpZero(v *Value) bool {
mem := v_1
v.reset(OpAMD64MOVLstoreconst)
v.AuxInt = 0
- v.AddArg(destptr)
- v.AddArg(mem)
+ v.AddArg2(destptr, mem)
return true
}
// match: (Zero [8] destptr mem)
@@ -40415,8 +38017,7 @@ func rewriteValueAMD64_OpZero(v *Value) bool {
mem := v_1
v.reset(OpAMD64MOVQstoreconst)
v.AuxInt = 0
- v.AddArg(destptr)
- v.AddArg(mem)
+ v.AddArg2(destptr, mem)
return true
}
// match: (Zero [3] destptr mem)
@@ -40429,12 +38030,10 @@ func rewriteValueAMD64_OpZero(v *Value) bool {
mem := v_1
v.reset(OpAMD64MOVBstoreconst)
v.AuxInt = makeValAndOff(0, 2)
- v.AddArg(destptr)
v0 := b.NewValue0(v.Pos, OpAMD64MOVWstoreconst, types.TypeMem)
v0.AuxInt = 0
- v0.AddArg(destptr)
- v0.AddArg(mem)
- v.AddArg(v0)
+ v0.AddArg2(destptr, mem)
+ v.AddArg2(destptr, v0)
return true
}
// match: (Zero [5] destptr mem)
@@ -40447,12 +38046,10 @@ func rewriteValueAMD64_OpZero(v *Value) bool {
mem := v_1
v.reset(OpAMD64MOVBstoreconst)
v.AuxInt = makeValAndOff(0, 4)
- v.AddArg(destptr)
v0 := b.NewValue0(v.Pos, OpAMD64MOVLstoreconst, types.TypeMem)
v0.AuxInt = 0
- v0.AddArg(destptr)
- v0.AddArg(mem)
- v.AddArg(v0)
+ v0.AddArg2(destptr, mem)
+ v.AddArg2(destptr, v0)
return true
}
// match: (Zero [6] destptr mem)
@@ -40465,12 +38062,10 @@ func rewriteValueAMD64_OpZero(v *Value) bool {
mem := v_1
v.reset(OpAMD64MOVWstoreconst)
v.AuxInt = makeValAndOff(0, 4)
- v.AddArg(destptr)
v0 := b.NewValue0(v.Pos, OpAMD64MOVLstoreconst, types.TypeMem)
v0.AuxInt = 0
- v0.AddArg(destptr)
- v0.AddArg(mem)
- v.AddArg(v0)
+ v0.AddArg2(destptr, mem)
+ v.AddArg2(destptr, v0)
return true
}
// match: (Zero [7] destptr mem)
@@ -40483,12 +38078,10 @@ func rewriteValueAMD64_OpZero(v *Value) bool {
mem := v_1
v.reset(OpAMD64MOVLstoreconst)
v.AuxInt = makeValAndOff(0, 3)
- v.AddArg(destptr)
v0 := b.NewValue0(v.Pos, OpAMD64MOVLstoreconst, types.TypeMem)
v0.AuxInt = 0
- v0.AddArg(destptr)
- v0.AddArg(mem)
- v.AddArg(v0)
+ v0.AddArg2(destptr, mem)
+ v.AddArg2(destptr, v0)
return true
}
// match: (Zero [s] destptr mem)
@@ -40506,12 +38099,10 @@ func rewriteValueAMD64_OpZero(v *Value) bool {
v0 := b.NewValue0(v.Pos, OpOffPtr, destptr.Type)
v0.AuxInt = s % 8
v0.AddArg(destptr)
- v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, types.TypeMem)
v1.AuxInt = 0
- v1.AddArg(destptr)
- v1.AddArg(mem)
- v.AddArg(v1)
+ v1.AddArg2(destptr, mem)
+ v.AddArg2(v0, v1)
return true
}
// match: (Zero [16] destptr mem)
@@ -40528,12 +38119,10 @@ func rewriteValueAMD64_OpZero(v *Value) bool {
}
v.reset(OpAMD64MOVQstoreconst)
v.AuxInt = makeValAndOff(0, 8)
- v.AddArg(destptr)
v0 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, types.TypeMem)
v0.AuxInt = 0
- v0.AddArg(destptr)
- v0.AddArg(mem)
- v.AddArg(v0)
+ v0.AddArg2(destptr, mem)
+ v.AddArg2(destptr, v0)
return true
}
// match: (Zero [24] destptr mem)
@@ -40550,16 +38139,13 @@ func rewriteValueAMD64_OpZero(v *Value) bool {
}
v.reset(OpAMD64MOVQstoreconst)
v.AuxInt = makeValAndOff(0, 16)
- v.AddArg(destptr)
v0 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, types.TypeMem)
v0.AuxInt = makeValAndOff(0, 8)
- v0.AddArg(destptr)
v1 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, types.TypeMem)
v1.AuxInt = 0
- v1.AddArg(destptr)
- v1.AddArg(mem)
- v0.AddArg(v1)
- v.AddArg(v0)
+ v1.AddArg2(destptr, mem)
+ v0.AddArg2(destptr, v1)
+ v.AddArg2(destptr, v0)
return true
}
// match: (Zero [32] destptr mem)
@@ -40576,20 +38162,16 @@ func rewriteValueAMD64_OpZero(v *Value) bool {
}
v.reset(OpAMD64MOVQstoreconst)
v.AuxInt = makeValAndOff(0, 24)
- v.AddArg(destptr)
v0 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, types.TypeMem)
v0.AuxInt = makeValAndOff(0, 16)
- v0.AddArg(destptr)
v1 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, types.TypeMem)
v1.AuxInt = makeValAndOff(0, 8)
- v1.AddArg(destptr)
v2 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, types.TypeMem)
v2.AuxInt = 0
- v2.AddArg(destptr)
- v2.AddArg(mem)
- v1.AddArg(v2)
- v0.AddArg(v1)
- v.AddArg(v0)
+ v2.AddArg2(destptr, mem)
+ v1.AddArg2(destptr, v2)
+ v0.AddArg2(destptr, v1)
+ v.AddArg2(destptr, v0)
return true
}
// match: (Zero [s] destptr mem)
@@ -40604,12 +38186,10 @@ func rewriteValueAMD64_OpZero(v *Value) bool {
}
v.reset(OpAMD64MOVQstoreconst)
v.AuxInt = makeValAndOff(0, s-8)
- v.AddArg(destptr)
v0 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, types.TypeMem)
v0.AuxInt = 0
- v0.AddArg(destptr)
- v0.AddArg(mem)
- v.AddArg(v0)
+ v0.AddArg2(destptr, mem)
+ v.AddArg2(destptr, v0)
return true
}
// match: (Zero [s] destptr mem)
@@ -40627,14 +38207,11 @@ func rewriteValueAMD64_OpZero(v *Value) bool {
v0 := b.NewValue0(v.Pos, OpOffPtr, destptr.Type)
v0.AuxInt = s % 16
v0.AddArg(destptr)
- v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpAMD64MOVOstore, types.TypeMem)
- v1.AddArg(destptr)
v2 := b.NewValue0(v.Pos, OpAMD64MOVOconst, types.TypeInt128)
v2.AuxInt = 0
- v1.AddArg(v2)
- v1.AddArg(mem)
- v.AddArg(v1)
+ v1.AddArg3(destptr, v2, mem)
+ v.AddArg2(v0, v1)
return true
}
// match: (Zero [s] destptr mem)
@@ -40652,12 +38229,10 @@ func rewriteValueAMD64_OpZero(v *Value) bool {
v0 := b.NewValue0(v.Pos, OpOffPtr, destptr.Type)
v0.AuxInt = s % 16
v0.AddArg(destptr)
- v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, types.TypeMem)
v1.AuxInt = 0
- v1.AddArg(destptr)
- v1.AddArg(mem)
- v.AddArg(v1)
+ v1.AddArg2(destptr, mem)
+ v.AddArg2(v0, v1)
return true
}
// match: (Zero [16] destptr mem)
@@ -40673,11 +38248,9 @@ func rewriteValueAMD64_OpZero(v *Value) bool {
break
}
v.reset(OpAMD64MOVOstore)
- v.AddArg(destptr)
v0 := b.NewValue0(v.Pos, OpAMD64MOVOconst, types.TypeInt128)
v0.AuxInt = 0
- v.AddArg(v0)
- v.AddArg(mem)
+ v.AddArg3(destptr, v0, mem)
return true
}
// match: (Zero [32] destptr mem)
@@ -40696,17 +38269,13 @@ func rewriteValueAMD64_OpZero(v *Value) bool {
v0 := b.NewValue0(v.Pos, OpOffPtr, destptr.Type)
v0.AuxInt = 16
v0.AddArg(destptr)
- v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpAMD64MOVOconst, types.TypeInt128)
v1.AuxInt = 0
- v.AddArg(v1)
v2 := b.NewValue0(v.Pos, OpAMD64MOVOstore, types.TypeMem)
- v2.AddArg(destptr)
v3 := b.NewValue0(v.Pos, OpAMD64MOVOconst, types.TypeInt128)
v3.AuxInt = 0
- v2.AddArg(v3)
- v2.AddArg(mem)
- v.AddArg(v2)
+ v2.AddArg3(destptr, v3, mem)
+ v.AddArg3(v0, v1, v2)
return true
}
// match: (Zero [48] destptr mem)
@@ -40725,26 +38294,20 @@ func rewriteValueAMD64_OpZero(v *Value) bool {
v0 := b.NewValue0(v.Pos, OpOffPtr, destptr.Type)
v0.AuxInt = 32
v0.AddArg(destptr)
- v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpAMD64MOVOconst, types.TypeInt128)
v1.AuxInt = 0
- v.AddArg(v1)
v2 := b.NewValue0(v.Pos, OpAMD64MOVOstore, types.TypeMem)
v3 := b.NewValue0(v.Pos, OpOffPtr, destptr.Type)
v3.AuxInt = 16
v3.AddArg(destptr)
- v2.AddArg(v3)
v4 := b.NewValue0(v.Pos, OpAMD64MOVOconst, types.TypeInt128)
v4.AuxInt = 0
- v2.AddArg(v4)
v5 := b.NewValue0(v.Pos, OpAMD64MOVOstore, types.TypeMem)
- v5.AddArg(destptr)
v6 := b.NewValue0(v.Pos, OpAMD64MOVOconst, types.TypeInt128)
v6.AuxInt = 0
- v5.AddArg(v6)
- v5.AddArg(mem)
- v2.AddArg(v5)
- v.AddArg(v2)
+ v5.AddArg3(destptr, v6, mem)
+ v2.AddArg3(v3, v4, v5)
+ v.AddArg3(v0, v1, v2)
return true
}
// match: (Zero [64] destptr mem)
@@ -40763,35 +38326,27 @@ func rewriteValueAMD64_OpZero(v *Value) bool {
v0 := b.NewValue0(v.Pos, OpOffPtr, destptr.Type)
v0.AuxInt = 48
v0.AddArg(destptr)
- v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpAMD64MOVOconst, types.TypeInt128)
v1.AuxInt = 0
- v.AddArg(v1)
v2 := b.NewValue0(v.Pos, OpAMD64MOVOstore, types.TypeMem)
v3 := b.NewValue0(v.Pos, OpOffPtr, destptr.Type)
v3.AuxInt = 32
v3.AddArg(destptr)
- v2.AddArg(v3)
v4 := b.NewValue0(v.Pos, OpAMD64MOVOconst, types.TypeInt128)
v4.AuxInt = 0
- v2.AddArg(v4)
v5 := b.NewValue0(v.Pos, OpAMD64MOVOstore, types.TypeMem)
v6 := b.NewValue0(v.Pos, OpOffPtr, destptr.Type)
v6.AuxInt = 16
v6.AddArg(destptr)
- v5.AddArg(v6)
v7 := b.NewValue0(v.Pos, OpAMD64MOVOconst, types.TypeInt128)
v7.AuxInt = 0
- v5.AddArg(v7)
v8 := b.NewValue0(v.Pos, OpAMD64MOVOstore, types.TypeMem)
- v8.AddArg(destptr)
v9 := b.NewValue0(v.Pos, OpAMD64MOVOconst, types.TypeInt128)
v9.AuxInt = 0
- v8.AddArg(v9)
- v8.AddArg(mem)
- v5.AddArg(v8)
- v2.AddArg(v5)
- v.AddArg(v2)
+ v8.AddArg3(destptr, v9, mem)
+ v5.AddArg3(v6, v7, v8)
+ v2.AddArg3(v3, v4, v5)
+ v.AddArg3(v0, v1, v2)
return true
}
// match: (Zero [s] destptr mem)
@@ -40806,11 +38361,9 @@ func rewriteValueAMD64_OpZero(v *Value) bool {
}
v.reset(OpAMD64DUFFZERO)
v.AuxInt = s
- v.AddArg(destptr)
v0 := b.NewValue0(v.Pos, OpAMD64MOVOconst, types.TypeInt128)
v0.AuxInt = 0
- v.AddArg(v0)
- v.AddArg(mem)
+ v.AddArg3(destptr, v0, mem)
return true
}
// match: (Zero [s] destptr mem)
@@ -40824,14 +38377,11 @@ func rewriteValueAMD64_OpZero(v *Value) bool {
break
}
v.reset(OpAMD64REPSTOSQ)
- v.AddArg(destptr)
v0 := b.NewValue0(v.Pos, OpAMD64MOVQconst, typ.UInt64)
v0.AuxInt = s / 8
- v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpAMD64MOVQconst, typ.UInt64)
v1.AuxInt = 0
- v.AddArg(v1)
- v.AddArg(mem)
+ v.AddArg4(destptr, v0, v1, mem)
return true
}
return false
@@ -40856,11 +38406,9 @@ func rewriteBlockAMD64(b *Block) bool {
continue
}
y := v_0_1
- b.Reset(BlockAMD64UGE)
v0 := b.NewValue0(v_0.Pos, OpAMD64BTL, types.TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
- b.AddControl(v0)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockAMD64UGE, v0)
return true
}
break
@@ -40882,11 +38430,9 @@ func rewriteBlockAMD64(b *Block) bool {
continue
}
y := v_0_1
- b.Reset(BlockAMD64UGE)
v0 := b.NewValue0(v_0.Pos, OpAMD64BTQ, types.TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
- b.AddControl(v0)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockAMD64UGE, v0)
return true
}
break
@@ -40901,11 +38447,10 @@ func rewriteBlockAMD64(b *Block) bool {
if !(isUint32PowerOfTwo(c)) {
break
}
- b.Reset(BlockAMD64UGE)
v0 := b.NewValue0(v_0.Pos, OpAMD64BTLconst, types.TypeFlags)
v0.AuxInt = log2uint32(c)
v0.AddArg(x)
- b.AddControl(v0)
+ b.resetWithControl(BlockAMD64UGE, v0)
return true
}
// match: (EQ (TESTQconst [c] x))
@@ -40918,11 +38463,10 @@ func rewriteBlockAMD64(b *Block) bool {
if !(isUint64PowerOfTwo(c)) {
break
}
- b.Reset(BlockAMD64UGE)
v0 := b.NewValue0(v_0.Pos, OpAMD64BTQconst, types.TypeFlags)
v0.AuxInt = log2(c)
v0.AddArg(x)
- b.AddControl(v0)
+ b.resetWithControl(BlockAMD64UGE, v0)
return true
}
// match: (EQ (TESTQ (MOVQconst [c]) x))
@@ -40942,11 +38486,10 @@ func rewriteBlockAMD64(b *Block) bool {
if !(isUint64PowerOfTwo(c)) {
continue
}
- b.Reset(BlockAMD64UGE)
v0 := b.NewValue0(v_0.Pos, OpAMD64BTQconst, types.TypeFlags)
v0.AuxInt = log2(c)
v0.AddArg(x)
- b.AddControl(v0)
+ b.resetWithControl(BlockAMD64UGE, v0)
return true
}
break
@@ -40973,11 +38516,10 @@ func rewriteBlockAMD64(b *Block) bool {
if !(z1 == z2) {
continue
}
- b.Reset(BlockAMD64UGE)
v0 := b.NewValue0(v_0.Pos, OpAMD64BTQconst, types.TypeFlags)
v0.AuxInt = 63
v0.AddArg(x)
- b.AddControl(v0)
+ b.resetWithControl(BlockAMD64UGE, v0)
return true
}
break
@@ -41004,11 +38546,10 @@ func rewriteBlockAMD64(b *Block) bool {
if !(z1 == z2) {
continue
}
- b.Reset(BlockAMD64UGE)
v0 := b.NewValue0(v_0.Pos, OpAMD64BTQconst, types.TypeFlags)
v0.AuxInt = 31
v0.AddArg(x)
- b.AddControl(v0)
+ b.resetWithControl(BlockAMD64UGE, v0)
return true
}
break
@@ -41035,11 +38576,10 @@ func rewriteBlockAMD64(b *Block) bool {
if !(z1 == z2) {
continue
}
- b.Reset(BlockAMD64UGE)
v0 := b.NewValue0(v_0.Pos, OpAMD64BTQconst, types.TypeFlags)
v0.AuxInt = 0
v0.AddArg(x)
- b.AddControl(v0)
+ b.resetWithControl(BlockAMD64UGE, v0)
return true
}
break
@@ -41066,11 +38606,10 @@ func rewriteBlockAMD64(b *Block) bool {
if !(z1 == z2) {
continue
}
- b.Reset(BlockAMD64UGE)
v0 := b.NewValue0(v_0.Pos, OpAMD64BTLconst, types.TypeFlags)
v0.AuxInt = 0
v0.AddArg(x)
- b.AddControl(v0)
+ b.resetWithControl(BlockAMD64UGE, v0)
return true
}
break
@@ -41093,11 +38632,10 @@ func rewriteBlockAMD64(b *Block) bool {
if !(z1 == z2) {
continue
}
- b.Reset(BlockAMD64UGE)
v0 := b.NewValue0(v_0.Pos, OpAMD64BTQconst, types.TypeFlags)
v0.AuxInt = 63
v0.AddArg(x)
- b.AddControl(v0)
+ b.resetWithControl(BlockAMD64UGE, v0)
return true
}
break
@@ -41120,11 +38658,10 @@ func rewriteBlockAMD64(b *Block) bool {
if !(z1 == z2) {
continue
}
- b.Reset(BlockAMD64UGE)
v0 := b.NewValue0(v_0.Pos, OpAMD64BTLconst, types.TypeFlags)
v0.AuxInt = 31
v0.AddArg(x)
- b.AddControl(v0)
+ b.resetWithControl(BlockAMD64UGE, v0)
return true
}
break
@@ -41134,8 +38671,7 @@ func rewriteBlockAMD64(b *Block) bool {
for b.Controls[0].Op == OpAMD64InvertFlags {
v_0 := b.Controls[0]
cmp := v_0.Args[0]
- b.Reset(BlockAMD64EQ)
- b.AddControl(cmp)
+ b.resetWithControl(BlockAMD64EQ, cmp)
return true
}
// match: (EQ (FlagEQ) yes no)
@@ -41178,8 +38714,7 @@ func rewriteBlockAMD64(b *Block) bool {
for b.Controls[0].Op == OpAMD64InvertFlags {
v_0 := b.Controls[0]
cmp := v_0.Args[0]
- b.Reset(BlockAMD64LE)
- b.AddControl(cmp)
+ b.resetWithControl(BlockAMD64LE, cmp)
return true
}
// match: (GE (FlagEQ) yes no)
@@ -41220,8 +38755,7 @@ func rewriteBlockAMD64(b *Block) bool {
for b.Controls[0].Op == OpAMD64InvertFlags {
v_0 := b.Controls[0]
cmp := v_0.Args[0]
- b.Reset(BlockAMD64LT)
- b.AddControl(cmp)
+ b.resetWithControl(BlockAMD64LT, cmp)
return true
}
// match: (GT (FlagEQ) yes no)
@@ -41263,8 +38797,7 @@ func rewriteBlockAMD64(b *Block) bool {
for b.Controls[0].Op == OpAMD64SETL {
v_0 := b.Controls[0]
cmp := v_0.Args[0]
- b.Reset(BlockAMD64LT)
- b.AddControl(cmp)
+ b.resetWithControl(BlockAMD64LT, cmp)
return true
}
// match: (If (SETLE cmp) yes no)
@@ -41272,8 +38805,7 @@ func rewriteBlockAMD64(b *Block) bool {
for b.Controls[0].Op == OpAMD64SETLE {
v_0 := b.Controls[0]
cmp := v_0.Args[0]
- b.Reset(BlockAMD64LE)
- b.AddControl(cmp)
+ b.resetWithControl(BlockAMD64LE, cmp)
return true
}
// match: (If (SETG cmp) yes no)
@@ -41281,8 +38813,7 @@ func rewriteBlockAMD64(b *Block) bool {
for b.Controls[0].Op == OpAMD64SETG {
v_0 := b.Controls[0]
cmp := v_0.Args[0]
- b.Reset(BlockAMD64GT)
- b.AddControl(cmp)
+ b.resetWithControl(BlockAMD64GT, cmp)
return true
}
// match: (If (SETGE cmp) yes no)
@@ -41290,8 +38821,7 @@ func rewriteBlockAMD64(b *Block) bool {
for b.Controls[0].Op == OpAMD64SETGE {
v_0 := b.Controls[0]
cmp := v_0.Args[0]
- b.Reset(BlockAMD64GE)
- b.AddControl(cmp)
+ b.resetWithControl(BlockAMD64GE, cmp)
return true
}
// match: (If (SETEQ cmp) yes no)
@@ -41299,8 +38829,7 @@ func rewriteBlockAMD64(b *Block) bool {
for b.Controls[0].Op == OpAMD64SETEQ {
v_0 := b.Controls[0]
cmp := v_0.Args[0]
- b.Reset(BlockAMD64EQ)
- b.AddControl(cmp)
+ b.resetWithControl(BlockAMD64EQ, cmp)
return true
}
// match: (If (SETNE cmp) yes no)
@@ -41308,8 +38837,7 @@ func rewriteBlockAMD64(b *Block) bool {
for b.Controls[0].Op == OpAMD64SETNE {
v_0 := b.Controls[0]
cmp := v_0.Args[0]
- b.Reset(BlockAMD64NE)
- b.AddControl(cmp)
+ b.resetWithControl(BlockAMD64NE, cmp)
return true
}
// match: (If (SETB cmp) yes no)
@@ -41317,8 +38845,7 @@ func rewriteBlockAMD64(b *Block) bool {
for b.Controls[0].Op == OpAMD64SETB {
v_0 := b.Controls[0]
cmp := v_0.Args[0]
- b.Reset(BlockAMD64ULT)
- b.AddControl(cmp)
+ b.resetWithControl(BlockAMD64ULT, cmp)
return true
}
// match: (If (SETBE cmp) yes no)
@@ -41326,8 +38853,7 @@ func rewriteBlockAMD64(b *Block) bool {
for b.Controls[0].Op == OpAMD64SETBE {
v_0 := b.Controls[0]
cmp := v_0.Args[0]
- b.Reset(BlockAMD64ULE)
- b.AddControl(cmp)
+ b.resetWithControl(BlockAMD64ULE, cmp)
return true
}
// match: (If (SETA cmp) yes no)
@@ -41335,8 +38861,7 @@ func rewriteBlockAMD64(b *Block) bool {
for b.Controls[0].Op == OpAMD64SETA {
v_0 := b.Controls[0]
cmp := v_0.Args[0]
- b.Reset(BlockAMD64UGT)
- b.AddControl(cmp)
+ b.resetWithControl(BlockAMD64UGT, cmp)
return true
}
// match: (If (SETAE cmp) yes no)
@@ -41344,8 +38869,7 @@ func rewriteBlockAMD64(b *Block) bool {
for b.Controls[0].Op == OpAMD64SETAE {
v_0 := b.Controls[0]
cmp := v_0.Args[0]
- b.Reset(BlockAMD64UGE)
- b.AddControl(cmp)
+ b.resetWithControl(BlockAMD64UGE, cmp)
return true
}
// match: (If (SETO cmp) yes no)
@@ -41353,8 +38877,7 @@ func rewriteBlockAMD64(b *Block) bool {
for b.Controls[0].Op == OpAMD64SETO {
v_0 := b.Controls[0]
cmp := v_0.Args[0]
- b.Reset(BlockAMD64OS)
- b.AddControl(cmp)
+ b.resetWithControl(BlockAMD64OS, cmp)
return true
}
// match: (If (SETGF cmp) yes no)
@@ -41362,8 +38885,7 @@ func rewriteBlockAMD64(b *Block) bool {
for b.Controls[0].Op == OpAMD64SETGF {
v_0 := b.Controls[0]
cmp := v_0.Args[0]
- b.Reset(BlockAMD64UGT)
- b.AddControl(cmp)
+ b.resetWithControl(BlockAMD64UGT, cmp)
return true
}
// match: (If (SETGEF cmp) yes no)
@@ -41371,8 +38893,7 @@ func rewriteBlockAMD64(b *Block) bool {
for b.Controls[0].Op == OpAMD64SETGEF {
v_0 := b.Controls[0]
cmp := v_0.Args[0]
- b.Reset(BlockAMD64UGE)
- b.AddControl(cmp)
+ b.resetWithControl(BlockAMD64UGE, cmp)
return true
}
// match: (If (SETEQF cmp) yes no)
@@ -41380,8 +38901,7 @@ func rewriteBlockAMD64(b *Block) bool {
for b.Controls[0].Op == OpAMD64SETEQF {
v_0 := b.Controls[0]
cmp := v_0.Args[0]
- b.Reset(BlockAMD64EQF)
- b.AddControl(cmp)
+ b.resetWithControl(BlockAMD64EQF, cmp)
return true
}
// match: (If (SETNEF cmp) yes no)
@@ -41389,19 +38909,16 @@ func rewriteBlockAMD64(b *Block) bool {
for b.Controls[0].Op == OpAMD64SETNEF {
v_0 := b.Controls[0]
cmp := v_0.Args[0]
- b.Reset(BlockAMD64NEF)
- b.AddControl(cmp)
+ b.resetWithControl(BlockAMD64NEF, cmp)
return true
}
// match: (If cond yes no)
// result: (NE (TESTB cond cond) yes no)
for {
cond := b.Controls[0]
- b.Reset(BlockAMD64NE)
v0 := b.NewValue0(cond.Pos, OpAMD64TESTB, types.TypeFlags)
- v0.AddArg(cond)
- v0.AddArg(cond)
- b.AddControl(v0)
+ v0.AddArg2(cond, cond)
+ b.resetWithControl(BlockAMD64NE, v0)
return true
}
case BlockAMD64LE:
@@ -41410,8 +38927,7 @@ func rewriteBlockAMD64(b *Block) bool {
for b.Controls[0].Op == OpAMD64InvertFlags {
v_0 := b.Controls[0]
cmp := v_0.Args[0]
- b.Reset(BlockAMD64GE)
- b.AddControl(cmp)
+ b.resetWithControl(BlockAMD64GE, cmp)
return true
}
// match: (LE (FlagEQ) yes no)
@@ -41452,8 +38968,7 @@ func rewriteBlockAMD64(b *Block) bool {
for b.Controls[0].Op == OpAMD64InvertFlags {
v_0 := b.Controls[0]
cmp := v_0.Args[0]
- b.Reset(BlockAMD64GT)
- b.AddControl(cmp)
+ b.resetWithControl(BlockAMD64GT, cmp)
return true
}
// match: (LT (FlagEQ) yes no)
@@ -41504,8 +39019,7 @@ func rewriteBlockAMD64(b *Block) bool {
if v_0_1.Op != OpAMD64SETL || cmp != v_0_1.Args[0] {
break
}
- b.Reset(BlockAMD64LT)
- b.AddControl(cmp)
+ b.resetWithControl(BlockAMD64LT, cmp)
return true
}
// match: (NE (TESTB (SETLE cmp) (SETLE cmp)) yes no)
@@ -41522,8 +39036,7 @@ func rewriteBlockAMD64(b *Block) bool {
if v_0_1.Op != OpAMD64SETLE || cmp != v_0_1.Args[0] {
break
}
- b.Reset(BlockAMD64LE)
- b.AddControl(cmp)
+ b.resetWithControl(BlockAMD64LE, cmp)
return true
}
// match: (NE (TESTB (SETG cmp) (SETG cmp)) yes no)
@@ -41540,8 +39053,7 @@ func rewriteBlockAMD64(b *Block) bool {
if v_0_1.Op != OpAMD64SETG || cmp != v_0_1.Args[0] {
break
}
- b.Reset(BlockAMD64GT)
- b.AddControl(cmp)
+ b.resetWithControl(BlockAMD64GT, cmp)
return true
}
// match: (NE (TESTB (SETGE cmp) (SETGE cmp)) yes no)
@@ -41558,8 +39070,7 @@ func rewriteBlockAMD64(b *Block) bool {
if v_0_1.Op != OpAMD64SETGE || cmp != v_0_1.Args[0] {
break
}
- b.Reset(BlockAMD64GE)
- b.AddControl(cmp)
+ b.resetWithControl(BlockAMD64GE, cmp)
return true
}
// match: (NE (TESTB (SETEQ cmp) (SETEQ cmp)) yes no)
@@ -41576,8 +39087,7 @@ func rewriteBlockAMD64(b *Block) bool {
if v_0_1.Op != OpAMD64SETEQ || cmp != v_0_1.Args[0] {
break
}
- b.Reset(BlockAMD64EQ)
- b.AddControl(cmp)
+ b.resetWithControl(BlockAMD64EQ, cmp)
return true
}
// match: (NE (TESTB (SETNE cmp) (SETNE cmp)) yes no)
@@ -41594,8 +39104,7 @@ func rewriteBlockAMD64(b *Block) bool {
if v_0_1.Op != OpAMD64SETNE || cmp != v_0_1.Args[0] {
break
}
- b.Reset(BlockAMD64NE)
- b.AddControl(cmp)
+ b.resetWithControl(BlockAMD64NE, cmp)
return true
}
// match: (NE (TESTB (SETB cmp) (SETB cmp)) yes no)
@@ -41612,8 +39121,7 @@ func rewriteBlockAMD64(b *Block) bool {
if v_0_1.Op != OpAMD64SETB || cmp != v_0_1.Args[0] {
break
}
- b.Reset(BlockAMD64ULT)
- b.AddControl(cmp)
+ b.resetWithControl(BlockAMD64ULT, cmp)
return true
}
// match: (NE (TESTB (SETBE cmp) (SETBE cmp)) yes no)
@@ -41630,8 +39138,7 @@ func rewriteBlockAMD64(b *Block) bool {
if v_0_1.Op != OpAMD64SETBE || cmp != v_0_1.Args[0] {
break
}
- b.Reset(BlockAMD64ULE)
- b.AddControl(cmp)
+ b.resetWithControl(BlockAMD64ULE, cmp)
return true
}
// match: (NE (TESTB (SETA cmp) (SETA cmp)) yes no)
@@ -41648,8 +39155,7 @@ func rewriteBlockAMD64(b *Block) bool {
if v_0_1.Op != OpAMD64SETA || cmp != v_0_1.Args[0] {
break
}
- b.Reset(BlockAMD64UGT)
- b.AddControl(cmp)
+ b.resetWithControl(BlockAMD64UGT, cmp)
return true
}
// match: (NE (TESTB (SETAE cmp) (SETAE cmp)) yes no)
@@ -41666,8 +39172,7 @@ func rewriteBlockAMD64(b *Block) bool {
if v_0_1.Op != OpAMD64SETAE || cmp != v_0_1.Args[0] {
break
}
- b.Reset(BlockAMD64UGE)
- b.AddControl(cmp)
+ b.resetWithControl(BlockAMD64UGE, cmp)
return true
}
// match: (NE (TESTB (SETO cmp) (SETO cmp)) yes no)
@@ -41684,8 +39189,7 @@ func rewriteBlockAMD64(b *Block) bool {
if v_0_1.Op != OpAMD64SETO || cmp != v_0_1.Args[0] {
break
}
- b.Reset(BlockAMD64OS)
- b.AddControl(cmp)
+ b.resetWithControl(BlockAMD64OS, cmp)
return true
}
// match: (NE (TESTL (SHLL (MOVLconst [1]) x) y))
@@ -41705,11 +39209,9 @@ func rewriteBlockAMD64(b *Block) bool {
continue
}
y := v_0_1
- b.Reset(BlockAMD64ULT)
v0 := b.NewValue0(v_0.Pos, OpAMD64BTL, types.TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
- b.AddControl(v0)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockAMD64ULT, v0)
return true
}
break
@@ -41731,11 +39233,9 @@ func rewriteBlockAMD64(b *Block) bool {
continue
}
y := v_0_1
- b.Reset(BlockAMD64ULT)
v0 := b.NewValue0(v_0.Pos, OpAMD64BTQ, types.TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
- b.AddControl(v0)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockAMD64ULT, v0)
return true
}
break
@@ -41750,11 +39250,10 @@ func rewriteBlockAMD64(b *Block) bool {
if !(isUint32PowerOfTwo(c)) {
break
}
- b.Reset(BlockAMD64ULT)
v0 := b.NewValue0(v_0.Pos, OpAMD64BTLconst, types.TypeFlags)
v0.AuxInt = log2uint32(c)
v0.AddArg(x)
- b.AddControl(v0)
+ b.resetWithControl(BlockAMD64ULT, v0)
return true
}
// match: (NE (TESTQconst [c] x))
@@ -41767,11 +39266,10 @@ func rewriteBlockAMD64(b *Block) bool {
if !(isUint64PowerOfTwo(c)) {
break
}
- b.Reset(BlockAMD64ULT)
v0 := b.NewValue0(v_0.Pos, OpAMD64BTQconst, types.TypeFlags)
v0.AuxInt = log2(c)
v0.AddArg(x)
- b.AddControl(v0)
+ b.resetWithControl(BlockAMD64ULT, v0)
return true
}
// match: (NE (TESTQ (MOVQconst [c]) x))
@@ -41791,11 +39289,10 @@ func rewriteBlockAMD64(b *Block) bool {
if !(isUint64PowerOfTwo(c)) {
continue
}
- b.Reset(BlockAMD64ULT)
v0 := b.NewValue0(v_0.Pos, OpAMD64BTQconst, types.TypeFlags)
v0.AuxInt = log2(c)
v0.AddArg(x)
- b.AddControl(v0)
+ b.resetWithControl(BlockAMD64ULT, v0)
return true
}
break
@@ -41822,11 +39319,10 @@ func rewriteBlockAMD64(b *Block) bool {
if !(z1 == z2) {
continue
}
- b.Reset(BlockAMD64ULT)
v0 := b.NewValue0(v_0.Pos, OpAMD64BTQconst, types.TypeFlags)
v0.AuxInt = 63
v0.AddArg(x)
- b.AddControl(v0)
+ b.resetWithControl(BlockAMD64ULT, v0)
return true
}
break
@@ -41853,11 +39349,10 @@ func rewriteBlockAMD64(b *Block) bool {
if !(z1 == z2) {
continue
}
- b.Reset(BlockAMD64ULT)
v0 := b.NewValue0(v_0.Pos, OpAMD64BTQconst, types.TypeFlags)
v0.AuxInt = 31
v0.AddArg(x)
- b.AddControl(v0)
+ b.resetWithControl(BlockAMD64ULT, v0)
return true
}
break
@@ -41884,11 +39379,10 @@ func rewriteBlockAMD64(b *Block) bool {
if !(z1 == z2) {
continue
}
- b.Reset(BlockAMD64ULT)
v0 := b.NewValue0(v_0.Pos, OpAMD64BTQconst, types.TypeFlags)
v0.AuxInt = 0
v0.AddArg(x)
- b.AddControl(v0)
+ b.resetWithControl(BlockAMD64ULT, v0)
return true
}
break
@@ -41915,11 +39409,10 @@ func rewriteBlockAMD64(b *Block) bool {
if !(z1 == z2) {
continue
}
- b.Reset(BlockAMD64ULT)
v0 := b.NewValue0(v_0.Pos, OpAMD64BTLconst, types.TypeFlags)
v0.AuxInt = 0
v0.AddArg(x)
- b.AddControl(v0)
+ b.resetWithControl(BlockAMD64ULT, v0)
return true
}
break
@@ -41942,11 +39435,10 @@ func rewriteBlockAMD64(b *Block) bool {
if !(z1 == z2) {
continue
}
- b.Reset(BlockAMD64ULT)
v0 := b.NewValue0(v_0.Pos, OpAMD64BTQconst, types.TypeFlags)
v0.AuxInt = 63
v0.AddArg(x)
- b.AddControl(v0)
+ b.resetWithControl(BlockAMD64ULT, v0)
return true
}
break
@@ -41969,11 +39461,10 @@ func rewriteBlockAMD64(b *Block) bool {
if !(z1 == z2) {
continue
}
- b.Reset(BlockAMD64ULT)
v0 := b.NewValue0(v_0.Pos, OpAMD64BTLconst, types.TypeFlags)
v0.AuxInt = 31
v0.AddArg(x)
- b.AddControl(v0)
+ b.resetWithControl(BlockAMD64ULT, v0)
return true
}
break
@@ -41992,8 +39483,7 @@ func rewriteBlockAMD64(b *Block) bool {
if v_0_1.Op != OpAMD64SETGF || cmp != v_0_1.Args[0] {
break
}
- b.Reset(BlockAMD64UGT)
- b.AddControl(cmp)
+ b.resetWithControl(BlockAMD64UGT, cmp)
return true
}
// match: (NE (TESTB (SETGEF cmp) (SETGEF cmp)) yes no)
@@ -42010,8 +39500,7 @@ func rewriteBlockAMD64(b *Block) bool {
if v_0_1.Op != OpAMD64SETGEF || cmp != v_0_1.Args[0] {
break
}
- b.Reset(BlockAMD64UGE)
- b.AddControl(cmp)
+ b.resetWithControl(BlockAMD64UGE, cmp)
return true
}
// match: (NE (TESTB (SETEQF cmp) (SETEQF cmp)) yes no)
@@ -42028,8 +39517,7 @@ func rewriteBlockAMD64(b *Block) bool {
if v_0_1.Op != OpAMD64SETEQF || cmp != v_0_1.Args[0] {
break
}
- b.Reset(BlockAMD64EQF)
- b.AddControl(cmp)
+ b.resetWithControl(BlockAMD64EQF, cmp)
return true
}
// match: (NE (TESTB (SETNEF cmp) (SETNEF cmp)) yes no)
@@ -42046,8 +39534,7 @@ func rewriteBlockAMD64(b *Block) bool {
if v_0_1.Op != OpAMD64SETNEF || cmp != v_0_1.Args[0] {
break
}
- b.Reset(BlockAMD64NEF)
- b.AddControl(cmp)
+ b.resetWithControl(BlockAMD64NEF, cmp)
return true
}
// match: (NE (InvertFlags cmp) yes no)
@@ -42055,8 +39542,7 @@ func rewriteBlockAMD64(b *Block) bool {
for b.Controls[0].Op == OpAMD64InvertFlags {
v_0 := b.Controls[0]
cmp := v_0.Args[0]
- b.Reset(BlockAMD64NE)
- b.AddControl(cmp)
+ b.resetWithControl(BlockAMD64NE, cmp)
return true
}
// match: (NE (FlagEQ) yes no)
@@ -42140,8 +39626,7 @@ func rewriteBlockAMD64(b *Block) bool {
for b.Controls[0].Op == OpAMD64InvertFlags {
v_0 := b.Controls[0]
cmp := v_0.Args[0]
- b.Reset(BlockAMD64ULE)
- b.AddControl(cmp)
+ b.resetWithControl(BlockAMD64ULE, cmp)
return true
}
// match: (UGE (FlagEQ) yes no)
@@ -42182,8 +39667,7 @@ func rewriteBlockAMD64(b *Block) bool {
for b.Controls[0].Op == OpAMD64InvertFlags {
v_0 := b.Controls[0]
cmp := v_0.Args[0]
- b.Reset(BlockAMD64ULT)
- b.AddControl(cmp)
+ b.resetWithControl(BlockAMD64ULT, cmp)
return true
}
// match: (UGT (FlagEQ) yes no)
@@ -42225,8 +39709,7 @@ func rewriteBlockAMD64(b *Block) bool {
for b.Controls[0].Op == OpAMD64InvertFlags {
v_0 := b.Controls[0]
cmp := v_0.Args[0]
- b.Reset(BlockAMD64UGE)
- b.AddControl(cmp)
+ b.resetWithControl(BlockAMD64UGE, cmp)
return true
}
// match: (ULE (FlagEQ) yes no)
@@ -42315,8 +39798,7 @@ func rewriteBlockAMD64(b *Block) bool {
for b.Controls[0].Op == OpAMD64InvertFlags {
v_0 := b.Controls[0]
cmp := v_0.Args[0]
- b.Reset(BlockAMD64UGT)
- b.AddControl(cmp)
+ b.resetWithControl(BlockAMD64UGT, cmp)
return true
}
// match: (ULT (FlagEQ) yes no)
diff --git a/src/cmd/compile/internal/ssa/rewriteAMD64splitload.go b/src/cmd/compile/internal/ssa/rewriteAMD64splitload.go
index 69df3f7a1e..40a7013744 100644
--- a/src/cmd/compile/internal/ssa/rewriteAMD64splitload.go
+++ b/src/cmd/compile/internal/ssa/rewriteAMD64splitload.go
@@ -41,8 +41,7 @@ func rewriteValueAMD64splitload_OpAMD64CMPBconstload(v *Value) bool {
v0 := b.NewValue0(v.Pos, OpAMD64MOVBload, typ.UInt8)
v0.AuxInt = offOnly(vo)
v0.Aux = sym
- v0.AddArg(ptr)
- v0.AddArg(mem)
+ v0.AddArg2(ptr, mem)
v.AddArg(v0)
return true
}
@@ -65,10 +64,8 @@ func rewriteValueAMD64splitload_OpAMD64CMPBload(v *Value) bool {
v0 := b.NewValue0(v.Pos, OpAMD64MOVBload, typ.UInt8)
v0.AuxInt = off
v0.Aux = sym
- v0.AddArg(ptr)
- v0.AddArg(mem)
- v.AddArg(v0)
- v.AddArg(x)
+ v0.AddArg2(ptr, mem)
+ v.AddArg2(v0, x)
return true
}
}
@@ -89,8 +86,7 @@ func rewriteValueAMD64splitload_OpAMD64CMPLconstload(v *Value) bool {
v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32)
v0.AuxInt = offOnly(vo)
v0.Aux = sym
- v0.AddArg(ptr)
- v0.AddArg(mem)
+ v0.AddArg2(ptr, mem)
v.AddArg(v0)
return true
}
@@ -113,10 +109,8 @@ func rewriteValueAMD64splitload_OpAMD64CMPLload(v *Value) bool {
v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32)
v0.AuxInt = off
v0.Aux = sym
- v0.AddArg(ptr)
- v0.AddArg(mem)
- v.AddArg(v0)
- v.AddArg(x)
+ v0.AddArg2(ptr, mem)
+ v.AddArg2(v0, x)
return true
}
}
@@ -137,8 +131,7 @@ func rewriteValueAMD64splitload_OpAMD64CMPQconstload(v *Value) bool {
v0 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64)
v0.AuxInt = offOnly(vo)
v0.Aux = sym
- v0.AddArg(ptr)
- v0.AddArg(mem)
+ v0.AddArg2(ptr, mem)
v.AddArg(v0)
return true
}
@@ -161,10 +154,8 @@ func rewriteValueAMD64splitload_OpAMD64CMPQload(v *Value) bool {
v0 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64)
v0.AuxInt = off
v0.Aux = sym
- v0.AddArg(ptr)
- v0.AddArg(mem)
- v.AddArg(v0)
- v.AddArg(x)
+ v0.AddArg2(ptr, mem)
+ v.AddArg2(v0, x)
return true
}
}
@@ -185,8 +176,7 @@ func rewriteValueAMD64splitload_OpAMD64CMPWconstload(v *Value) bool {
v0 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16)
v0.AuxInt = offOnly(vo)
v0.Aux = sym
- v0.AddArg(ptr)
- v0.AddArg(mem)
+ v0.AddArg2(ptr, mem)
v.AddArg(v0)
return true
}
@@ -209,10 +199,8 @@ func rewriteValueAMD64splitload_OpAMD64CMPWload(v *Value) bool {
v0 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16)
v0.AuxInt = off
v0.Aux = sym
- v0.AddArg(ptr)
- v0.AddArg(mem)
- v.AddArg(v0)
- v.AddArg(x)
+ v0.AddArg2(ptr, mem)
+ v.AddArg2(v0, x)
return true
}
}
diff --git a/src/cmd/compile/internal/ssa/rewriteARM.go b/src/cmd/compile/internal/ssa/rewriteARM.go
index baa3c66e0f..91ef5fe14f 100644
--- a/src/cmd/compile/internal/ssa/rewriteARM.go
+++ b/src/cmd/compile/internal/ssa/rewriteARM.go
@@ -545,6 +545,9 @@ func rewriteValueARM(v *Value) bool {
case OpCvt64Fto32U:
v.Op = OpARMMOVDWU
return true
+ case OpCvtBoolToUint8:
+ v.Op = OpCopy
+ return true
case OpDiv16:
return rewriteValueARM_OpDiv16(v)
case OpDiv16u:
@@ -915,8 +918,7 @@ func rewriteValueARM_OpARMADC(v *Value) bool {
flags := v_2
v.reset(OpARMADCconst)
v.AuxInt = c
- v.AddArg(x)
- v.AddArg(flags)
+ v.AddArg2(x, flags)
return true
}
break
@@ -934,9 +936,7 @@ func rewriteValueARM_OpARMADC(v *Value) bool {
flags := v_2
v.reset(OpARMADCshiftLL)
v.AuxInt = c
- v.AddArg(x)
- v.AddArg(y)
- v.AddArg(flags)
+ v.AddArg3(x, y, flags)
return true
}
break
@@ -954,9 +954,7 @@ func rewriteValueARM_OpARMADC(v *Value) bool {
flags := v_2
v.reset(OpARMADCshiftRL)
v.AuxInt = c
- v.AddArg(x)
- v.AddArg(y)
- v.AddArg(flags)
+ v.AddArg3(x, y, flags)
return true
}
break
@@ -974,9 +972,7 @@ func rewriteValueARM_OpARMADC(v *Value) bool {
flags := v_2
v.reset(OpARMADCshiftRA)
v.AuxInt = c
- v.AddArg(x)
- v.AddArg(y)
- v.AddArg(flags)
+ v.AddArg3(x, y, flags)
return true
}
break
@@ -993,10 +989,7 @@ func rewriteValueARM_OpARMADC(v *Value) bool {
y := v_1.Args[0]
flags := v_2
v.reset(OpARMADCshiftLLreg)
- v.AddArg(x)
- v.AddArg(y)
- v.AddArg(z)
- v.AddArg(flags)
+ v.AddArg4(x, y, z, flags)
return true
}
break
@@ -1013,10 +1006,7 @@ func rewriteValueARM_OpARMADC(v *Value) bool {
y := v_1.Args[0]
flags := v_2
v.reset(OpARMADCshiftRLreg)
- v.AddArg(x)
- v.AddArg(y)
- v.AddArg(z)
- v.AddArg(flags)
+ v.AddArg4(x, y, z, flags)
return true
}
break
@@ -1033,10 +1023,7 @@ func rewriteValueARM_OpARMADC(v *Value) bool {
y := v_1.Args[0]
flags := v_2
v.reset(OpARMADCshiftRAreg)
- v.AddArg(x)
- v.AddArg(y)
- v.AddArg(z)
- v.AddArg(flags)
+ v.AddArg4(x, y, z, flags)
return true
}
break
@@ -1058,8 +1045,7 @@ func rewriteValueARM_OpARMADCconst(v *Value) bool {
flags := v_1
v.reset(OpARMADCconst)
v.AuxInt = int64(int32(c + d))
- v.AddArg(x)
- v.AddArg(flags)
+ v.AddArg2(x, flags)
return true
}
// match: (ADCconst [c] (SUBconst [d] x) flags)
@@ -1074,8 +1060,7 @@ func rewriteValueARM_OpARMADCconst(v *Value) bool {
flags := v_1
v.reset(OpARMADCconst)
v.AuxInt = int64(int32(c - d))
- v.AddArg(x)
- v.AddArg(flags)
+ v.AddArg2(x, flags)
return true
}
return false
@@ -1100,8 +1085,7 @@ func rewriteValueARM_OpARMADCshiftLL(v *Value) bool {
v0 := b.NewValue0(v.Pos, OpARMSLLconst, x.Type)
v0.AuxInt = d
v0.AddArg(x)
- v.AddArg(v0)
- v.AddArg(flags)
+ v.AddArg2(v0, flags)
return true
}
// match: (ADCshiftLL x (MOVWconst [c]) [d] flags)
@@ -1116,8 +1100,7 @@ func rewriteValueARM_OpARMADCshiftLL(v *Value) bool {
flags := v_2
v.reset(OpARMADCconst)
v.AuxInt = int64(int32(uint32(c) << uint64(d)))
- v.AddArg(x)
- v.AddArg(flags)
+ v.AddArg2(x, flags)
return true
}
return false
@@ -1141,10 +1124,8 @@ func rewriteValueARM_OpARMADCshiftLLreg(v *Value) bool {
v.reset(OpARMADCconst)
v.AuxInt = c
v0 := b.NewValue0(v.Pos, OpARMSLL, x.Type)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- v.AddArg(flags)
+ v0.AddArg2(x, y)
+ v.AddArg2(v0, flags)
return true
}
// match: (ADCshiftLLreg x y (MOVWconst [c]) flags)
@@ -1159,9 +1140,7 @@ func rewriteValueARM_OpARMADCshiftLLreg(v *Value) bool {
flags := v_3
v.reset(OpARMADCshiftLL)
v.AuxInt = c
- v.AddArg(x)
- v.AddArg(y)
- v.AddArg(flags)
+ v.AddArg3(x, y, flags)
return true
}
return false
@@ -1186,8 +1165,7 @@ func rewriteValueARM_OpARMADCshiftRA(v *Value) bool {
v0 := b.NewValue0(v.Pos, OpARMSRAconst, x.Type)
v0.AuxInt = d
v0.AddArg(x)
- v.AddArg(v0)
- v.AddArg(flags)
+ v.AddArg2(v0, flags)
return true
}
// match: (ADCshiftRA x (MOVWconst [c]) [d] flags)
@@ -1202,8 +1180,7 @@ func rewriteValueARM_OpARMADCshiftRA(v *Value) bool {
flags := v_2
v.reset(OpARMADCconst)
v.AuxInt = int64(int32(c) >> uint64(d))
- v.AddArg(x)
- v.AddArg(flags)
+ v.AddArg2(x, flags)
return true
}
return false
@@ -1227,10 +1204,8 @@ func rewriteValueARM_OpARMADCshiftRAreg(v *Value) bool {
v.reset(OpARMADCconst)
v.AuxInt = c
v0 := b.NewValue0(v.Pos, OpARMSRA, x.Type)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- v.AddArg(flags)
+ v0.AddArg2(x, y)
+ v.AddArg2(v0, flags)
return true
}
// match: (ADCshiftRAreg x y (MOVWconst [c]) flags)
@@ -1245,9 +1220,7 @@ func rewriteValueARM_OpARMADCshiftRAreg(v *Value) bool {
flags := v_3
v.reset(OpARMADCshiftRA)
v.AuxInt = c
- v.AddArg(x)
- v.AddArg(y)
- v.AddArg(flags)
+ v.AddArg3(x, y, flags)
return true
}
return false
@@ -1272,8 +1245,7 @@ func rewriteValueARM_OpARMADCshiftRL(v *Value) bool {
v0 := b.NewValue0(v.Pos, OpARMSRLconst, x.Type)
v0.AuxInt = d
v0.AddArg(x)
- v.AddArg(v0)
- v.AddArg(flags)
+ v.AddArg2(v0, flags)
return true
}
// match: (ADCshiftRL x (MOVWconst [c]) [d] flags)
@@ -1288,8 +1260,7 @@ func rewriteValueARM_OpARMADCshiftRL(v *Value) bool {
flags := v_2
v.reset(OpARMADCconst)
v.AuxInt = int64(int32(uint32(c) >> uint64(d)))
- v.AddArg(x)
- v.AddArg(flags)
+ v.AddArg2(x, flags)
return true
}
return false
@@ -1313,10 +1284,8 @@ func rewriteValueARM_OpARMADCshiftRLreg(v *Value) bool {
v.reset(OpARMADCconst)
v.AuxInt = c
v0 := b.NewValue0(v.Pos, OpARMSRL, x.Type)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- v.AddArg(flags)
+ v0.AddArg2(x, y)
+ v.AddArg2(v0, flags)
return true
}
// match: (ADCshiftRLreg x y (MOVWconst [c]) flags)
@@ -1331,9 +1300,7 @@ func rewriteValueARM_OpARMADCshiftRLreg(v *Value) bool {
flags := v_3
v.reset(OpARMADCshiftRL)
v.AuxInt = c
- v.AddArg(x)
- v.AddArg(y)
- v.AddArg(flags)
+ v.AddArg3(x, y, flags)
return true
}
return false
@@ -1370,8 +1337,7 @@ func rewriteValueARM_OpARMADD(v *Value) bool {
y := v_1.Args[0]
v.reset(OpARMADDshiftLL)
v.AuxInt = c
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
break
@@ -1388,8 +1354,7 @@ func rewriteValueARM_OpARMADD(v *Value) bool {
y := v_1.Args[0]
v.reset(OpARMADDshiftRL)
v.AuxInt = c
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
break
@@ -1406,8 +1371,7 @@ func rewriteValueARM_OpARMADD(v *Value) bool {
y := v_1.Args[0]
v.reset(OpARMADDshiftRA)
v.AuxInt = c
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
break
@@ -1423,9 +1387,7 @@ func rewriteValueARM_OpARMADD(v *Value) bool {
z := v_1.Args[1]
y := v_1.Args[0]
v.reset(OpARMADDshiftLLreg)
- v.AddArg(x)
- v.AddArg(y)
- v.AddArg(z)
+ v.AddArg3(x, y, z)
return true
}
break
@@ -1441,9 +1403,7 @@ func rewriteValueARM_OpARMADD(v *Value) bool {
z := v_1.Args[1]
y := v_1.Args[0]
v.reset(OpARMADDshiftRLreg)
- v.AddArg(x)
- v.AddArg(y)
- v.AddArg(z)
+ v.AddArg3(x, y, z)
return true
}
break
@@ -1459,9 +1419,7 @@ func rewriteValueARM_OpARMADD(v *Value) bool {
z := v_1.Args[1]
y := v_1.Args[0]
v.reset(OpARMADDshiftRAreg)
- v.AddArg(x)
- v.AddArg(y)
- v.AddArg(z)
+ v.AddArg3(x, y, z)
return true
}
break
@@ -1476,8 +1434,7 @@ func rewriteValueARM_OpARMADD(v *Value) bool {
}
y := v_1.Args[0]
v.reset(OpARMSUB)
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
break
@@ -1500,8 +1457,7 @@ func rewriteValueARM_OpARMADD(v *Value) bool {
v.reset(OpARMRSBconst)
v.AuxInt = c + d
v0 := b.NewValue0(v.Pos, OpARMADD, t)
- v0.AddArg(x)
- v0.AddArg(y)
+ v0.AddArg2(x, y)
v.AddArg(v0)
return true
}
@@ -1518,9 +1474,7 @@ func rewriteValueARM_OpARMADD(v *Value) bool {
x := v_0.Args[0]
a := v_1
v.reset(OpARMMULA)
- v.AddArg(x)
- v.AddArg(y)
- v.AddArg(a)
+ v.AddArg3(x, y, a)
return true
}
break
@@ -1545,9 +1499,7 @@ func rewriteValueARM_OpARMADDD(v *Value) bool {
continue
}
v.reset(OpARMMULAD)
- v.AddArg(a)
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg3(a, x, y)
return true
}
break
@@ -1567,9 +1519,7 @@ func rewriteValueARM_OpARMADDD(v *Value) bool {
continue
}
v.reset(OpARMMULSD)
- v.AddArg(a)
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg3(a, x, y)
return true
}
break
@@ -1594,9 +1544,7 @@ func rewriteValueARM_OpARMADDF(v *Value) bool {
continue
}
v.reset(OpARMMULAF)
- v.AddArg(a)
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg3(a, x, y)
return true
}
break
@@ -1616,9 +1564,7 @@ func rewriteValueARM_OpARMADDF(v *Value) bool {
continue
}
v.reset(OpARMMULSF)
- v.AddArg(a)
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg3(a, x, y)
return true
}
break
@@ -1656,8 +1602,7 @@ func rewriteValueARM_OpARMADDS(v *Value) bool {
y := v_1.Args[0]
v.reset(OpARMADDSshiftLL)
v.AuxInt = c
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
break
@@ -1674,8 +1619,7 @@ func rewriteValueARM_OpARMADDS(v *Value) bool {
y := v_1.Args[0]
v.reset(OpARMADDSshiftRL)
v.AuxInt = c
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
break
@@ -1692,8 +1636,7 @@ func rewriteValueARM_OpARMADDS(v *Value) bool {
y := v_1.Args[0]
v.reset(OpARMADDSshiftRA)
v.AuxInt = c
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
break
@@ -1709,9 +1652,7 @@ func rewriteValueARM_OpARMADDS(v *Value) bool {
z := v_1.Args[1]
y := v_1.Args[0]
v.reset(OpARMADDSshiftLLreg)
- v.AddArg(x)
- v.AddArg(y)
- v.AddArg(z)
+ v.AddArg3(x, y, z)
return true
}
break
@@ -1727,9 +1668,7 @@ func rewriteValueARM_OpARMADDS(v *Value) bool {
z := v_1.Args[1]
y := v_1.Args[0]
v.reset(OpARMADDSshiftRLreg)
- v.AddArg(x)
- v.AddArg(y)
- v.AddArg(z)
+ v.AddArg3(x, y, z)
return true
}
break
@@ -1745,9 +1684,7 @@ func rewriteValueARM_OpARMADDS(v *Value) bool {
z := v_1.Args[1]
y := v_1.Args[0]
v.reset(OpARMADDSshiftRAreg)
- v.AddArg(x)
- v.AddArg(y)
- v.AddArg(z)
+ v.AddArg3(x, y, z)
return true
}
break
@@ -1808,8 +1745,7 @@ func rewriteValueARM_OpARMADDSshiftLLreg(v *Value) bool {
v.reset(OpARMADDSconst)
v.AuxInt = c
v0 := b.NewValue0(v.Pos, OpARMSLL, x.Type)
- v0.AddArg(x)
- v0.AddArg(y)
+ v0.AddArg2(x, y)
v.AddArg(v0)
return true
}
@@ -1824,8 +1760,7 @@ func rewriteValueARM_OpARMADDSshiftLLreg(v *Value) bool {
c := v_2.AuxInt
v.reset(OpARMADDSshiftLL)
v.AuxInt = c
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
return false
@@ -1884,8 +1819,7 @@ func rewriteValueARM_OpARMADDSshiftRAreg(v *Value) bool {
v.reset(OpARMADDSconst)
v.AuxInt = c
v0 := b.NewValue0(v.Pos, OpARMSRA, x.Type)
- v0.AddArg(x)
- v0.AddArg(y)
+ v0.AddArg2(x, y)
v.AddArg(v0)
return true
}
@@ -1900,8 +1834,7 @@ func rewriteValueARM_OpARMADDSshiftRAreg(v *Value) bool {
c := v_2.AuxInt
v.reset(OpARMADDSshiftRA)
v.AuxInt = c
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
return false
@@ -1960,8 +1893,7 @@ func rewriteValueARM_OpARMADDSshiftRLreg(v *Value) bool {
v.reset(OpARMADDSconst)
v.AuxInt = c
v0 := b.NewValue0(v.Pos, OpARMSRL, x.Type)
- v0.AddArg(x)
- v0.AddArg(y)
+ v0.AddArg2(x, y)
v.AddArg(v0)
return true
}
@@ -1976,8 +1908,7 @@ func rewriteValueARM_OpARMADDSshiftRLreg(v *Value) bool {
c := v_2.AuxInt
v.reset(OpARMADDSshiftRL)
v.AuxInt = c
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
return false
@@ -2007,9 +1938,7 @@ func rewriteValueARM_OpARMADDconst(v *Value) bool {
break
}
x := v_0
- v.reset(OpCopy)
- v.Type = x.Type
- v.AddArg(x)
+ v.copyOf(x)
return true
}
// match: (ADDconst [c] x)
@@ -2200,8 +2129,7 @@ func rewriteValueARM_OpARMADDshiftLLreg(v *Value) bool {
v.reset(OpARMADDconst)
v.AuxInt = c
v0 := b.NewValue0(v.Pos, OpARMSLL, x.Type)
- v0.AddArg(x)
- v0.AddArg(y)
+ v0.AddArg2(x, y)
v.AddArg(v0)
return true
}
@@ -2216,8 +2144,7 @@ func rewriteValueARM_OpARMADDshiftLLreg(v *Value) bool {
c := v_2.AuxInt
v.reset(OpARMADDshiftLL)
v.AuxInt = c
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
return false
@@ -2276,8 +2203,7 @@ func rewriteValueARM_OpARMADDshiftRAreg(v *Value) bool {
v.reset(OpARMADDconst)
v.AuxInt = c
v0 := b.NewValue0(v.Pos, OpARMSRA, x.Type)
- v0.AddArg(x)
- v0.AddArg(y)
+ v0.AddArg2(x, y)
v.AddArg(v0)
return true
}
@@ -2292,8 +2218,7 @@ func rewriteValueARM_OpARMADDshiftRAreg(v *Value) bool {
c := v_2.AuxInt
v.reset(OpARMADDshiftRA)
v.AuxInt = c
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
return false
@@ -2368,8 +2293,7 @@ func rewriteValueARM_OpARMADDshiftRLreg(v *Value) bool {
v.reset(OpARMADDconst)
v.AuxInt = c
v0 := b.NewValue0(v.Pos, OpARMSRL, x.Type)
- v0.AddArg(x)
- v0.AddArg(y)
+ v0.AddArg2(x, y)
v.AddArg(v0)
return true
}
@@ -2384,8 +2308,7 @@ func rewriteValueARM_OpARMADDshiftRLreg(v *Value) bool {
c := v_2.AuxInt
v.reset(OpARMADDshiftRL)
v.AuxInt = c
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
return false
@@ -2421,8 +2344,7 @@ func rewriteValueARM_OpARMAND(v *Value) bool {
y := v_1.Args[0]
v.reset(OpARMANDshiftLL)
v.AuxInt = c
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
break
@@ -2439,8 +2361,7 @@ func rewriteValueARM_OpARMAND(v *Value) bool {
y := v_1.Args[0]
v.reset(OpARMANDshiftRL)
v.AuxInt = c
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
break
@@ -2457,8 +2378,7 @@ func rewriteValueARM_OpARMAND(v *Value) bool {
y := v_1.Args[0]
v.reset(OpARMANDshiftRA)
v.AuxInt = c
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
break
@@ -2474,9 +2394,7 @@ func rewriteValueARM_OpARMAND(v *Value) bool {
z := v_1.Args[1]
y := v_1.Args[0]
v.reset(OpARMANDshiftLLreg)
- v.AddArg(x)
- v.AddArg(y)
- v.AddArg(z)
+ v.AddArg3(x, y, z)
return true
}
break
@@ -2492,9 +2410,7 @@ func rewriteValueARM_OpARMAND(v *Value) bool {
z := v_1.Args[1]
y := v_1.Args[0]
v.reset(OpARMANDshiftRLreg)
- v.AddArg(x)
- v.AddArg(y)
- v.AddArg(z)
+ v.AddArg3(x, y, z)
return true
}
break
@@ -2510,9 +2426,7 @@ func rewriteValueARM_OpARMAND(v *Value) bool {
z := v_1.Args[1]
y := v_1.Args[0]
v.reset(OpARMANDshiftRAreg)
- v.AddArg(x)
- v.AddArg(y)
- v.AddArg(z)
+ v.AddArg3(x, y, z)
return true
}
break
@@ -2524,9 +2438,7 @@ func rewriteValueARM_OpARMAND(v *Value) bool {
if x != v_1 {
break
}
- v.reset(OpCopy)
- v.Type = x.Type
- v.AddArg(x)
+ v.copyOf(x)
return true
}
// match: (AND x (MVN y))
@@ -2539,8 +2451,7 @@ func rewriteValueARM_OpARMAND(v *Value) bool {
}
y := v_1.Args[0]
v.reset(OpARMBIC)
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
break
@@ -2557,8 +2468,7 @@ func rewriteValueARM_OpARMAND(v *Value) bool {
y := v_1.Args[0]
v.reset(OpARMBICshiftLL)
v.AuxInt = c
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
break
@@ -2575,8 +2485,7 @@ func rewriteValueARM_OpARMAND(v *Value) bool {
y := v_1.Args[0]
v.reset(OpARMBICshiftRL)
v.AuxInt = c
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
break
@@ -2593,8 +2502,7 @@ func rewriteValueARM_OpARMAND(v *Value) bool {
y := v_1.Args[0]
v.reset(OpARMBICshiftRA)
v.AuxInt = c
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
break
@@ -2622,9 +2530,7 @@ func rewriteValueARM_OpARMANDconst(v *Value) bool {
if !(int32(c) == -1) {
break
}
- v.reset(OpCopy)
- v.Type = x.Type
- v.AddArg(x)
+ v.copyOf(x)
return true
}
// match: (ANDconst [c] x)
@@ -2732,9 +2638,7 @@ func rewriteValueARM_OpARMANDshiftLL(v *Value) bool {
if x != y.Args[0] || !(c == d) {
break
}
- v.reset(OpCopy)
- v.Type = y.Type
- v.AddArg(y)
+ v.copyOf(y)
return true
}
return false
@@ -2756,8 +2660,7 @@ func rewriteValueARM_OpARMANDshiftLLreg(v *Value) bool {
v.reset(OpARMANDconst)
v.AuxInt = c
v0 := b.NewValue0(v.Pos, OpARMSLL, x.Type)
- v0.AddArg(x)
- v0.AddArg(y)
+ v0.AddArg2(x, y)
v.AddArg(v0)
return true
}
@@ -2772,8 +2675,7 @@ func rewriteValueARM_OpARMANDshiftLLreg(v *Value) bool {
c := v_2.AuxInt
v.reset(OpARMANDshiftLL)
v.AuxInt = c
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
return false
@@ -2827,9 +2729,7 @@ func rewriteValueARM_OpARMANDshiftRA(v *Value) bool {
if x != y.Args[0] || !(c == d) {
break
}
- v.reset(OpCopy)
- v.Type = y.Type
- v.AddArg(y)
+ v.copyOf(y)
return true
}
return false
@@ -2851,8 +2751,7 @@ func rewriteValueARM_OpARMANDshiftRAreg(v *Value) bool {
v.reset(OpARMANDconst)
v.AuxInt = c
v0 := b.NewValue0(v.Pos, OpARMSRA, x.Type)
- v0.AddArg(x)
- v0.AddArg(y)
+ v0.AddArg2(x, y)
v.AddArg(v0)
return true
}
@@ -2867,8 +2766,7 @@ func rewriteValueARM_OpARMANDshiftRAreg(v *Value) bool {
c := v_2.AuxInt
v.reset(OpARMANDshiftRA)
v.AuxInt = c
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
return false
@@ -2922,9 +2820,7 @@ func rewriteValueARM_OpARMANDshiftRL(v *Value) bool {
if x != y.Args[0] || !(c == d) {
break
}
- v.reset(OpCopy)
- v.Type = y.Type
- v.AddArg(y)
+ v.copyOf(y)
return true
}
return false
@@ -2946,8 +2842,7 @@ func rewriteValueARM_OpARMANDshiftRLreg(v *Value) bool {
v.reset(OpARMANDconst)
v.AuxInt = c
v0 := b.NewValue0(v.Pos, OpARMSRL, x.Type)
- v0.AddArg(x)
- v0.AddArg(y)
+ v0.AddArg2(x, y)
v.AddArg(v0)
return true
}
@@ -2962,8 +2857,7 @@ func rewriteValueARM_OpARMANDshiftRLreg(v *Value) bool {
c := v_2.AuxInt
v.reset(OpARMANDshiftRL)
v.AuxInt = c
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
return false
@@ -3027,8 +2921,7 @@ func rewriteValueARM_OpARMBIC(v *Value) bool {
y := v_1.Args[0]
v.reset(OpARMBICshiftLL)
v.AuxInt = c
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
// match: (BIC x (SRLconst [c] y))
@@ -3042,8 +2935,7 @@ func rewriteValueARM_OpARMBIC(v *Value) bool {
y := v_1.Args[0]
v.reset(OpARMBICshiftRL)
v.AuxInt = c
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
// match: (BIC x (SRAconst [c] y))
@@ -3057,8 +2949,7 @@ func rewriteValueARM_OpARMBIC(v *Value) bool {
y := v_1.Args[0]
v.reset(OpARMBICshiftRA)
v.AuxInt = c
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
// match: (BIC x (SLL y z))
@@ -3071,9 +2962,7 @@ func rewriteValueARM_OpARMBIC(v *Value) bool {
z := v_1.Args[1]
y := v_1.Args[0]
v.reset(OpARMBICshiftLLreg)
- v.AddArg(x)
- v.AddArg(y)
- v.AddArg(z)
+ v.AddArg3(x, y, z)
return true
}
// match: (BIC x (SRL y z))
@@ -3086,9 +2975,7 @@ func rewriteValueARM_OpARMBIC(v *Value) bool {
z := v_1.Args[1]
y := v_1.Args[0]
v.reset(OpARMBICshiftRLreg)
- v.AddArg(x)
- v.AddArg(y)
- v.AddArg(z)
+ v.AddArg3(x, y, z)
return true
}
// match: (BIC x (SRA y z))
@@ -3101,9 +2988,7 @@ func rewriteValueARM_OpARMBIC(v *Value) bool {
z := v_1.Args[1]
y := v_1.Args[0]
v.reset(OpARMBICshiftRAreg)
- v.AddArg(x)
- v.AddArg(y)
- v.AddArg(z)
+ v.AddArg3(x, y, z)
return true
}
// match: (BIC x x)
@@ -3128,9 +3013,7 @@ func rewriteValueARM_OpARMBICconst(v *Value) bool {
break
}
x := v_0
- v.reset(OpCopy)
- v.Type = x.Type
- v.AddArg(x)
+ v.copyOf(x)
return true
}
// match: (BICconst [c] _)
@@ -3252,8 +3135,7 @@ func rewriteValueARM_OpARMBICshiftLLreg(v *Value) bool {
c := v_2.AuxInt
v.reset(OpARMBICshiftLL)
v.AuxInt = c
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
return false
@@ -3309,8 +3191,7 @@ func rewriteValueARM_OpARMBICshiftRAreg(v *Value) bool {
c := v_2.AuxInt
v.reset(OpARMBICshiftRA)
v.AuxInt = c
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
return false
@@ -3366,8 +3247,7 @@ func rewriteValueARM_OpARMBICshiftRLreg(v *Value) bool {
c := v_2.AuxInt
v.reset(OpARMBICshiftRL)
v.AuxInt = c
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
return false
@@ -3403,8 +3283,7 @@ func rewriteValueARM_OpARMCMN(v *Value) bool {
y := v_1.Args[0]
v.reset(OpARMCMNshiftLL)
v.AuxInt = c
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
break
@@ -3421,8 +3300,7 @@ func rewriteValueARM_OpARMCMN(v *Value) bool {
y := v_1.Args[0]
v.reset(OpARMCMNshiftRL)
v.AuxInt = c
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
break
@@ -3439,8 +3317,7 @@ func rewriteValueARM_OpARMCMN(v *Value) bool {
y := v_1.Args[0]
v.reset(OpARMCMNshiftRA)
v.AuxInt = c
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
break
@@ -3456,9 +3333,7 @@ func rewriteValueARM_OpARMCMN(v *Value) bool {
z := v_1.Args[1]
y := v_1.Args[0]
v.reset(OpARMCMNshiftLLreg)
- v.AddArg(x)
- v.AddArg(y)
- v.AddArg(z)
+ v.AddArg3(x, y, z)
return true
}
break
@@ -3474,9 +3349,7 @@ func rewriteValueARM_OpARMCMN(v *Value) bool {
z := v_1.Args[1]
y := v_1.Args[0]
v.reset(OpARMCMNshiftRLreg)
- v.AddArg(x)
- v.AddArg(y)
- v.AddArg(z)
+ v.AddArg3(x, y, z)
return true
}
break
@@ -3492,9 +3365,7 @@ func rewriteValueARM_OpARMCMN(v *Value) bool {
z := v_1.Args[1]
y := v_1.Args[0]
v.reset(OpARMCMNshiftRAreg)
- v.AddArg(x)
- v.AddArg(y)
- v.AddArg(z)
+ v.AddArg3(x, y, z)
return true
}
break
@@ -3509,8 +3380,7 @@ func rewriteValueARM_OpARMCMN(v *Value) bool {
}
y := v_1.Args[0]
v.reset(OpARMCMP)
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
break
@@ -3650,8 +3520,7 @@ func rewriteValueARM_OpARMCMNshiftLLreg(v *Value) bool {
v.reset(OpARMCMNconst)
v.AuxInt = c
v0 := b.NewValue0(v.Pos, OpARMSLL, x.Type)
- v0.AddArg(x)
- v0.AddArg(y)
+ v0.AddArg2(x, y)
v.AddArg(v0)
return true
}
@@ -3666,8 +3535,7 @@ func rewriteValueARM_OpARMCMNshiftLLreg(v *Value) bool {
c := v_2.AuxInt
v.reset(OpARMCMNshiftLL)
v.AuxInt = c
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
return false
@@ -3726,8 +3594,7 @@ func rewriteValueARM_OpARMCMNshiftRAreg(v *Value) bool {
v.reset(OpARMCMNconst)
v.AuxInt = c
v0 := b.NewValue0(v.Pos, OpARMSRA, x.Type)
- v0.AddArg(x)
- v0.AddArg(y)
+ v0.AddArg2(x, y)
v.AddArg(v0)
return true
}
@@ -3742,8 +3609,7 @@ func rewriteValueARM_OpARMCMNshiftRAreg(v *Value) bool {
c := v_2.AuxInt
v.reset(OpARMCMNshiftRA)
v.AuxInt = c
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
return false
@@ -3802,8 +3668,7 @@ func rewriteValueARM_OpARMCMNshiftRLreg(v *Value) bool {
v.reset(OpARMCMNconst)
v.AuxInt = c
v0 := b.NewValue0(v.Pos, OpARMSRL, x.Type)
- v0.AddArg(x)
- v0.AddArg(y)
+ v0.AddArg2(x, y)
v.AddArg(v0)
return true
}
@@ -3818,8 +3683,7 @@ func rewriteValueARM_OpARMCMNshiftRLreg(v *Value) bool {
c := v_2.AuxInt
v.reset(OpARMCMNshiftRL)
v.AuxInt = c
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
return false
@@ -3845,9 +3709,7 @@ func rewriteValueARM_OpARMCMOVWHSconst(v *Value) bool {
if v_1.Op != OpARMFlagLT_ULT {
break
}
- v.reset(OpCopy)
- v.Type = x.Type
- v.AddArg(x)
+ v.copyOf(x)
return true
}
// match: (CMOVWHSconst _ (FlagLT_UGT) [c])
@@ -3868,9 +3730,7 @@ func rewriteValueARM_OpARMCMOVWHSconst(v *Value) bool {
if v_1.Op != OpARMFlagGT_ULT {
break
}
- v.reset(OpCopy)
- v.Type = x.Type
- v.AddArg(x)
+ v.copyOf(x)
return true
}
// match: (CMOVWHSconst _ (FlagGT_UGT) [c])
@@ -3895,8 +3755,7 @@ func rewriteValueARM_OpARMCMOVWHSconst(v *Value) bool {
flags := v_1.Args[0]
v.reset(OpARMCMOVWLSconst)
v.AuxInt = c
- v.AddArg(x)
- v.AddArg(flags)
+ v.AddArg2(x, flags)
return true
}
return false
@@ -3933,9 +3792,7 @@ func rewriteValueARM_OpARMCMOVWLSconst(v *Value) bool {
if v_1.Op != OpARMFlagLT_UGT {
break
}
- v.reset(OpCopy)
- v.Type = x.Type
- v.AddArg(x)
+ v.copyOf(x)
return true
}
// match: (CMOVWLSconst _ (FlagGT_ULT) [c])
@@ -3956,9 +3813,7 @@ func rewriteValueARM_OpARMCMOVWLSconst(v *Value) bool {
if v_1.Op != OpARMFlagGT_UGT {
break
}
- v.reset(OpCopy)
- v.Type = x.Type
- v.AddArg(x)
+ v.copyOf(x)
return true
}
// match: (CMOVWLSconst x (InvertFlags flags) [c])
@@ -3972,8 +3827,7 @@ func rewriteValueARM_OpARMCMOVWLSconst(v *Value) bool {
flags := v_1.Args[0]
v.reset(OpARMCMOVWHSconst)
v.AuxInt = c
- v.AddArg(x)
- v.AddArg(flags)
+ v.AddArg2(x, flags)
return true
}
return false
@@ -4021,8 +3875,7 @@ func rewriteValueARM_OpARMCMP(v *Value) bool {
}
v.reset(OpARMInvertFlags)
v0 := b.NewValue0(v.Pos, OpARMCMP, types.TypeFlags)
- v0.AddArg(y)
- v0.AddArg(x)
+ v0.AddArg2(y, x)
v.AddArg(v0)
return true
}
@@ -4037,8 +3890,7 @@ func rewriteValueARM_OpARMCMP(v *Value) bool {
y := v_1.Args[0]
v.reset(OpARMCMPshiftLL)
v.AuxInt = c
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
// match: (CMP (SLLconst [c] y) x)
@@ -4053,8 +3905,7 @@ func rewriteValueARM_OpARMCMP(v *Value) bool {
v.reset(OpARMInvertFlags)
v0 := b.NewValue0(v.Pos, OpARMCMPshiftLL, types.TypeFlags)
v0.AuxInt = c
- v0.AddArg(x)
- v0.AddArg(y)
+ v0.AddArg2(x, y)
v.AddArg(v0)
return true
}
@@ -4069,8 +3920,7 @@ func rewriteValueARM_OpARMCMP(v *Value) bool {
y := v_1.Args[0]
v.reset(OpARMCMPshiftRL)
v.AuxInt = c
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
// match: (CMP (SRLconst [c] y) x)
@@ -4085,8 +3935,7 @@ func rewriteValueARM_OpARMCMP(v *Value) bool {
v.reset(OpARMInvertFlags)
v0 := b.NewValue0(v.Pos, OpARMCMPshiftRL, types.TypeFlags)
v0.AuxInt = c
- v0.AddArg(x)
- v0.AddArg(y)
+ v0.AddArg2(x, y)
v.AddArg(v0)
return true
}
@@ -4101,8 +3950,7 @@ func rewriteValueARM_OpARMCMP(v *Value) bool {
y := v_1.Args[0]
v.reset(OpARMCMPshiftRA)
v.AuxInt = c
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
// match: (CMP (SRAconst [c] y) x)
@@ -4117,8 +3965,7 @@ func rewriteValueARM_OpARMCMP(v *Value) bool {
v.reset(OpARMInvertFlags)
v0 := b.NewValue0(v.Pos, OpARMCMPshiftRA, types.TypeFlags)
v0.AuxInt = c
- v0.AddArg(x)
- v0.AddArg(y)
+ v0.AddArg2(x, y)
v.AddArg(v0)
return true
}
@@ -4132,9 +3979,7 @@ func rewriteValueARM_OpARMCMP(v *Value) bool {
z := v_1.Args[1]
y := v_1.Args[0]
v.reset(OpARMCMPshiftLLreg)
- v.AddArg(x)
- v.AddArg(y)
- v.AddArg(z)
+ v.AddArg3(x, y, z)
return true
}
// match: (CMP (SLL y z) x)
@@ -4148,9 +3993,7 @@ func rewriteValueARM_OpARMCMP(v *Value) bool {
x := v_1
v.reset(OpARMInvertFlags)
v0 := b.NewValue0(v.Pos, OpARMCMPshiftLLreg, types.TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
- v0.AddArg(z)
+ v0.AddArg3(x, y, z)
v.AddArg(v0)
return true
}
@@ -4164,9 +4007,7 @@ func rewriteValueARM_OpARMCMP(v *Value) bool {
z := v_1.Args[1]
y := v_1.Args[0]
v.reset(OpARMCMPshiftRLreg)
- v.AddArg(x)
- v.AddArg(y)
- v.AddArg(z)
+ v.AddArg3(x, y, z)
return true
}
// match: (CMP (SRL y z) x)
@@ -4180,9 +4021,7 @@ func rewriteValueARM_OpARMCMP(v *Value) bool {
x := v_1
v.reset(OpARMInvertFlags)
v0 := b.NewValue0(v.Pos, OpARMCMPshiftRLreg, types.TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
- v0.AddArg(z)
+ v0.AddArg3(x, y, z)
v.AddArg(v0)
return true
}
@@ -4196,9 +4035,7 @@ func rewriteValueARM_OpARMCMP(v *Value) bool {
z := v_1.Args[1]
y := v_1.Args[0]
v.reset(OpARMCMPshiftRAreg)
- v.AddArg(x)
- v.AddArg(y)
- v.AddArg(z)
+ v.AddArg3(x, y, z)
return true
}
// match: (CMP (SRA y z) x)
@@ -4212,9 +4049,7 @@ func rewriteValueARM_OpARMCMP(v *Value) bool {
x := v_1
v.reset(OpARMInvertFlags)
v0 := b.NewValue0(v.Pos, OpARMCMPshiftRAreg, types.TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
- v0.AddArg(z)
+ v0.AddArg3(x, y, z)
v.AddArg(v0)
return true
}
@@ -4227,8 +4062,7 @@ func rewriteValueARM_OpARMCMP(v *Value) bool {
}
y := v_1.Args[0]
v.reset(OpARMCMN)
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
return false
@@ -4453,8 +4287,7 @@ func rewriteValueARM_OpARMCMPshiftLLreg(v *Value) bool {
v0 := b.NewValue0(v.Pos, OpARMCMPconst, types.TypeFlags)
v0.AuxInt = c
v1 := b.NewValue0(v.Pos, OpARMSLL, x.Type)
- v1.AddArg(x)
- v1.AddArg(y)
+ v1.AddArg2(x, y)
v0.AddArg(v1)
v.AddArg(v0)
return true
@@ -4470,8 +4303,7 @@ func rewriteValueARM_OpARMCMPshiftLLreg(v *Value) bool {
c := v_2.AuxInt
v.reset(OpARMCMPshiftLL)
v.AuxInt = c
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
return false
@@ -4533,8 +4365,7 @@ func rewriteValueARM_OpARMCMPshiftRAreg(v *Value) bool {
v0 := b.NewValue0(v.Pos, OpARMCMPconst, types.TypeFlags)
v0.AuxInt = c
v1 := b.NewValue0(v.Pos, OpARMSRA, x.Type)
- v1.AddArg(x)
- v1.AddArg(y)
+ v1.AddArg2(x, y)
v0.AddArg(v1)
v.AddArg(v0)
return true
@@ -4550,8 +4381,7 @@ func rewriteValueARM_OpARMCMPshiftRAreg(v *Value) bool {
c := v_2.AuxInt
v.reset(OpARMCMPshiftRA)
v.AuxInt = c
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
return false
@@ -4613,8 +4443,7 @@ func rewriteValueARM_OpARMCMPshiftRLreg(v *Value) bool {
v0 := b.NewValue0(v.Pos, OpARMCMPconst, types.TypeFlags)
v0.AuxInt = c
v1 := b.NewValue0(v.Pos, OpARMSRL, x.Type)
- v1.AddArg(x)
- v1.AddArg(y)
+ v1.AddArg2(x, y)
v0.AddArg(v1)
v.AddArg(v0)
return true
@@ -4630,8 +4459,7 @@ func rewriteValueARM_OpARMCMPshiftRLreg(v *Value) bool {
c := v_2.AuxInt
v.reset(OpARMCMPshiftRL)
v.AuxInt = c
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
return false
@@ -5238,8 +5066,7 @@ func rewriteValueARM_OpARMMOVBUload(v *Value) bool {
v.reset(OpARMMOVBUload)
v.AuxInt = off1 + off2
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(mem)
+ v.AddArg2(ptr, mem)
return true
}
// match: (MOVBUload [off1] {sym} (SUBconst [off2] ptr) mem)
@@ -5256,8 +5083,7 @@ func rewriteValueARM_OpARMMOVBUload(v *Value) bool {
v.reset(OpARMMOVBUload)
v.AuxInt = off1 - off2
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(mem)
+ v.AddArg2(ptr, mem)
return true
}
// match: (MOVBUload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem)
@@ -5279,8 +5105,7 @@ func rewriteValueARM_OpARMMOVBUload(v *Value) bool {
v.reset(OpARMMOVBUload)
v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
- v.AddArg(ptr)
- v.AddArg(mem)
+ v.AddArg2(ptr, mem)
return true
}
// match: (MOVBUload [off] {sym} ptr (MOVBstore [off2] {sym2} ptr2 x _))
@@ -5295,9 +5120,8 @@ func rewriteValueARM_OpARMMOVBUload(v *Value) bool {
}
off2 := v_1.AuxInt
sym2 := v_1.Aux
- _ = v_1.Args[2]
- ptr2 := v_1.Args[0]
x := v_1.Args[1]
+ ptr2 := v_1.Args[0]
if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
break
}
@@ -5323,9 +5147,7 @@ func rewriteValueARM_OpARMMOVBUload(v *Value) bool {
break
}
v.reset(OpARMMOVBUloadidx)
- v.AddArg(ptr)
- v.AddArg(idx)
- v.AddArg(mem)
+ v.AddArg3(ptr, idx, mem)
return true
}
// match: (MOVBUload [off] {sym} (SB) _)
@@ -5356,13 +5178,9 @@ func rewriteValueARM_OpARMMOVBUloadidx(v *Value) bool {
if v_2.Op != OpARMMOVBstoreidx {
break
}
- _ = v_2.Args[3]
- ptr2 := v_2.Args[0]
- if idx != v_2.Args[1] {
- break
- }
x := v_2.Args[2]
- if !(isSamePtr(ptr, ptr2)) {
+ ptr2 := v_2.Args[0]
+ if idx != v_2.Args[1] || !(isSamePtr(ptr, ptr2)) {
break
}
v.reset(OpARMMOVBUreg)
@@ -5380,8 +5198,7 @@ func rewriteValueARM_OpARMMOVBUloadidx(v *Value) bool {
mem := v_2
v.reset(OpARMMOVBUload)
v.AuxInt = c
- v.AddArg(ptr)
- v.AddArg(mem)
+ v.AddArg2(ptr, mem)
return true
}
// match: (MOVBUloadidx (MOVWconst [c]) ptr mem)
@@ -5395,8 +5212,7 @@ func rewriteValueARM_OpARMMOVBUloadidx(v *Value) bool {
mem := v_2
v.reset(OpARMMOVBUload)
v.AuxInt = c
- v.AddArg(ptr)
- v.AddArg(mem)
+ v.AddArg2(ptr, mem)
return true
}
return false
@@ -5410,7 +5226,6 @@ func rewriteValueARM_OpARMMOVBUreg(v *Value) bool {
if x.Op != OpARMMOVBUload {
break
}
- _ = x.Args[1]
v.reset(OpARMMOVWreg)
v.AddArg(x)
return true
@@ -5469,8 +5284,7 @@ func rewriteValueARM_OpARMMOVBload(v *Value) bool {
v.reset(OpARMMOVBload)
v.AuxInt = off1 + off2
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(mem)
+ v.AddArg2(ptr, mem)
return true
}
// match: (MOVBload [off1] {sym} (SUBconst [off2] ptr) mem)
@@ -5487,8 +5301,7 @@ func rewriteValueARM_OpARMMOVBload(v *Value) bool {
v.reset(OpARMMOVBload)
v.AuxInt = off1 - off2
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(mem)
+ v.AddArg2(ptr, mem)
return true
}
// match: (MOVBload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem)
@@ -5510,8 +5323,7 @@ func rewriteValueARM_OpARMMOVBload(v *Value) bool {
v.reset(OpARMMOVBload)
v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
- v.AddArg(ptr)
- v.AddArg(mem)
+ v.AddArg2(ptr, mem)
return true
}
// match: (MOVBload [off] {sym} ptr (MOVBstore [off2] {sym2} ptr2 x _))
@@ -5526,9 +5338,8 @@ func rewriteValueARM_OpARMMOVBload(v *Value) bool {
}
off2 := v_1.AuxInt
sym2 := v_1.Aux
- _ = v_1.Args[2]
- ptr2 := v_1.Args[0]
x := v_1.Args[1]
+ ptr2 := v_1.Args[0]
if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
break
}
@@ -5554,9 +5365,7 @@ func rewriteValueARM_OpARMMOVBload(v *Value) bool {
break
}
v.reset(OpARMMOVBloadidx)
- v.AddArg(ptr)
- v.AddArg(idx)
- v.AddArg(mem)
+ v.AddArg3(ptr, idx, mem)
return true
}
return false
@@ -5574,13 +5383,9 @@ func rewriteValueARM_OpARMMOVBloadidx(v *Value) bool {
if v_2.Op != OpARMMOVBstoreidx {
break
}
- _ = v_2.Args[3]
- ptr2 := v_2.Args[0]
- if idx != v_2.Args[1] {
- break
- }
x := v_2.Args[2]
- if !(isSamePtr(ptr, ptr2)) {
+ ptr2 := v_2.Args[0]
+ if idx != v_2.Args[1] || !(isSamePtr(ptr, ptr2)) {
break
}
v.reset(OpARMMOVBreg)
@@ -5598,8 +5403,7 @@ func rewriteValueARM_OpARMMOVBloadidx(v *Value) bool {
mem := v_2
v.reset(OpARMMOVBload)
v.AuxInt = c
- v.AddArg(ptr)
- v.AddArg(mem)
+ v.AddArg2(ptr, mem)
return true
}
// match: (MOVBloadidx (MOVWconst [c]) ptr mem)
@@ -5613,8 +5417,7 @@ func rewriteValueARM_OpARMMOVBloadidx(v *Value) bool {
mem := v_2
v.reset(OpARMMOVBload)
v.AuxInt = c
- v.AddArg(ptr)
- v.AddArg(mem)
+ v.AddArg2(ptr, mem)
return true
}
return false
@@ -5628,7 +5431,6 @@ func rewriteValueARM_OpARMMOVBreg(v *Value) bool {
if x.Op != OpARMMOVBload {
break
}
- _ = x.Args[1]
v.reset(OpARMMOVWreg)
v.AddArg(x)
return true
@@ -5693,9 +5495,7 @@ func rewriteValueARM_OpARMMOVBstore(v *Value) bool {
v.reset(OpARMMOVBstore)
v.AuxInt = off1 + off2
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(val)
- v.AddArg(mem)
+ v.AddArg3(ptr, val, mem)
return true
}
// match: (MOVBstore [off1] {sym} (SUBconst [off2] ptr) val mem)
@@ -5713,9 +5513,7 @@ func rewriteValueARM_OpARMMOVBstore(v *Value) bool {
v.reset(OpARMMOVBstore)
v.AuxInt = off1 - off2
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(val)
- v.AddArg(mem)
+ v.AddArg3(ptr, val, mem)
return true
}
// match: (MOVBstore [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) val mem)
@@ -5738,9 +5536,7 @@ func rewriteValueARM_OpARMMOVBstore(v *Value) bool {
v.reset(OpARMMOVBstore)
v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
- v.AddArg(ptr)
- v.AddArg(val)
- v.AddArg(mem)
+ v.AddArg3(ptr, val, mem)
return true
}
// match: (MOVBstore [off] {sym} ptr (MOVBreg x) mem)
@@ -5757,9 +5553,7 @@ func rewriteValueARM_OpARMMOVBstore(v *Value) bool {
v.reset(OpARMMOVBstore)
v.AuxInt = off
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(x)
- v.AddArg(mem)
+ v.AddArg3(ptr, x, mem)
return true
}
// match: (MOVBstore [off] {sym} ptr (MOVBUreg x) mem)
@@ -5776,9 +5570,7 @@ func rewriteValueARM_OpARMMOVBstore(v *Value) bool {
v.reset(OpARMMOVBstore)
v.AuxInt = off
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(x)
- v.AddArg(mem)
+ v.AddArg3(ptr, x, mem)
return true
}
// match: (MOVBstore [off] {sym} ptr (MOVHreg x) mem)
@@ -5795,9 +5587,7 @@ func rewriteValueARM_OpARMMOVBstore(v *Value) bool {
v.reset(OpARMMOVBstore)
v.AuxInt = off
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(x)
- v.AddArg(mem)
+ v.AddArg3(ptr, x, mem)
return true
}
// match: (MOVBstore [off] {sym} ptr (MOVHUreg x) mem)
@@ -5814,9 +5604,7 @@ func rewriteValueARM_OpARMMOVBstore(v *Value) bool {
v.reset(OpARMMOVBstore)
v.AuxInt = off
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(x)
- v.AddArg(mem)
+ v.AddArg3(ptr, x, mem)
return true
}
// match: (MOVBstore [0] {sym} (ADD ptr idx) val mem)
@@ -5838,10 +5626,7 @@ func rewriteValueARM_OpARMMOVBstore(v *Value) bool {
break
}
v.reset(OpARMMOVBstoreidx)
- v.AddArg(ptr)
- v.AddArg(idx)
- v.AddArg(val)
- v.AddArg(mem)
+ v.AddArg4(ptr, idx, val, mem)
return true
}
return false
@@ -5863,9 +5648,7 @@ func rewriteValueARM_OpARMMOVBstoreidx(v *Value) bool {
mem := v_3
v.reset(OpARMMOVBstore)
v.AuxInt = c
- v.AddArg(ptr)
- v.AddArg(val)
- v.AddArg(mem)
+ v.AddArg3(ptr, val, mem)
return true
}
// match: (MOVBstoreidx (MOVWconst [c]) ptr val mem)
@@ -5880,9 +5663,7 @@ func rewriteValueARM_OpARMMOVBstoreidx(v *Value) bool {
mem := v_3
v.reset(OpARMMOVBstore)
v.AuxInt = c
- v.AddArg(ptr)
- v.AddArg(val)
- v.AddArg(mem)
+ v.AddArg3(ptr, val, mem)
return true
}
return false
@@ -5904,8 +5685,7 @@ func rewriteValueARM_OpARMMOVDload(v *Value) bool {
v.reset(OpARMMOVDload)
v.AuxInt = off1 + off2
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(mem)
+ v.AddArg2(ptr, mem)
return true
}
// match: (MOVDload [off1] {sym} (SUBconst [off2] ptr) mem)
@@ -5922,8 +5702,7 @@ func rewriteValueARM_OpARMMOVDload(v *Value) bool {
v.reset(OpARMMOVDload)
v.AuxInt = off1 - off2
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(mem)
+ v.AddArg2(ptr, mem)
return true
}
// match: (MOVDload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem)
@@ -5945,8 +5724,7 @@ func rewriteValueARM_OpARMMOVDload(v *Value) bool {
v.reset(OpARMMOVDload)
v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
- v.AddArg(ptr)
- v.AddArg(mem)
+ v.AddArg2(ptr, mem)
return true
}
// match: (MOVDload [off] {sym} ptr (MOVDstore [off2] {sym2} ptr2 x _))
@@ -5961,15 +5739,12 @@ func rewriteValueARM_OpARMMOVDload(v *Value) bool {
}
off2 := v_1.AuxInt
sym2 := v_1.Aux
- _ = v_1.Args[2]
- ptr2 := v_1.Args[0]
x := v_1.Args[1]
+ ptr2 := v_1.Args[0]
if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
break
}
- v.reset(OpCopy)
- v.Type = x.Type
- v.AddArg(x)
+ v.copyOf(x)
return true
}
return false
@@ -5993,9 +5768,7 @@ func rewriteValueARM_OpARMMOVDstore(v *Value) bool {
v.reset(OpARMMOVDstore)
v.AuxInt = off1 + off2
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(val)
- v.AddArg(mem)
+ v.AddArg3(ptr, val, mem)
return true
}
// match: (MOVDstore [off1] {sym} (SUBconst [off2] ptr) val mem)
@@ -6013,9 +5786,7 @@ func rewriteValueARM_OpARMMOVDstore(v *Value) bool {
v.reset(OpARMMOVDstore)
v.AuxInt = off1 - off2
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(val)
- v.AddArg(mem)
+ v.AddArg3(ptr, val, mem)
return true
}
// match: (MOVDstore [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) val mem)
@@ -6038,9 +5809,7 @@ func rewriteValueARM_OpARMMOVDstore(v *Value) bool {
v.reset(OpARMMOVDstore)
v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
- v.AddArg(ptr)
- v.AddArg(val)
- v.AddArg(mem)
+ v.AddArg3(ptr, val, mem)
return true
}
return false
@@ -6062,8 +5831,7 @@ func rewriteValueARM_OpARMMOVFload(v *Value) bool {
v.reset(OpARMMOVFload)
v.AuxInt = off1 + off2
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(mem)
+ v.AddArg2(ptr, mem)
return true
}
// match: (MOVFload [off1] {sym} (SUBconst [off2] ptr) mem)
@@ -6080,8 +5848,7 @@ func rewriteValueARM_OpARMMOVFload(v *Value) bool {
v.reset(OpARMMOVFload)
v.AuxInt = off1 - off2
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(mem)
+ v.AddArg2(ptr, mem)
return true
}
// match: (MOVFload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem)
@@ -6103,8 +5870,7 @@ func rewriteValueARM_OpARMMOVFload(v *Value) bool {
v.reset(OpARMMOVFload)
v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
- v.AddArg(ptr)
- v.AddArg(mem)
+ v.AddArg2(ptr, mem)
return true
}
// match: (MOVFload [off] {sym} ptr (MOVFstore [off2] {sym2} ptr2 x _))
@@ -6119,15 +5885,12 @@ func rewriteValueARM_OpARMMOVFload(v *Value) bool {
}
off2 := v_1.AuxInt
sym2 := v_1.Aux
- _ = v_1.Args[2]
- ptr2 := v_1.Args[0]
x := v_1.Args[1]
+ ptr2 := v_1.Args[0]
if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
break
}
- v.reset(OpCopy)
- v.Type = x.Type
- v.AddArg(x)
+ v.copyOf(x)
return true
}
return false
@@ -6151,9 +5914,7 @@ func rewriteValueARM_OpARMMOVFstore(v *Value) bool {
v.reset(OpARMMOVFstore)
v.AuxInt = off1 + off2
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(val)
- v.AddArg(mem)
+ v.AddArg3(ptr, val, mem)
return true
}
// match: (MOVFstore [off1] {sym} (SUBconst [off2] ptr) val mem)
@@ -6171,9 +5932,7 @@ func rewriteValueARM_OpARMMOVFstore(v *Value) bool {
v.reset(OpARMMOVFstore)
v.AuxInt = off1 - off2
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(val)
- v.AddArg(mem)
+ v.AddArg3(ptr, val, mem)
return true
}
// match: (MOVFstore [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) val mem)
@@ -6196,9 +5955,7 @@ func rewriteValueARM_OpARMMOVFstore(v *Value) bool {
v.reset(OpARMMOVFstore)
v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
- v.AddArg(ptr)
- v.AddArg(val)
- v.AddArg(mem)
+ v.AddArg3(ptr, val, mem)
return true
}
return false
@@ -6222,8 +5979,7 @@ func rewriteValueARM_OpARMMOVHUload(v *Value) bool {
v.reset(OpARMMOVHUload)
v.AuxInt = off1 + off2
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(mem)
+ v.AddArg2(ptr, mem)
return true
}
// match: (MOVHUload [off1] {sym} (SUBconst [off2] ptr) mem)
@@ -6240,8 +5996,7 @@ func rewriteValueARM_OpARMMOVHUload(v *Value) bool {
v.reset(OpARMMOVHUload)
v.AuxInt = off1 - off2
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(mem)
+ v.AddArg2(ptr, mem)
return true
}
// match: (MOVHUload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem)
@@ -6263,8 +6018,7 @@ func rewriteValueARM_OpARMMOVHUload(v *Value) bool {
v.reset(OpARMMOVHUload)
v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
- v.AddArg(ptr)
- v.AddArg(mem)
+ v.AddArg2(ptr, mem)
return true
}
// match: (MOVHUload [off] {sym} ptr (MOVHstore [off2] {sym2} ptr2 x _))
@@ -6279,9 +6033,8 @@ func rewriteValueARM_OpARMMOVHUload(v *Value) bool {
}
off2 := v_1.AuxInt
sym2 := v_1.Aux
- _ = v_1.Args[2]
- ptr2 := v_1.Args[0]
x := v_1.Args[1]
+ ptr2 := v_1.Args[0]
if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
break
}
@@ -6307,9 +6060,7 @@ func rewriteValueARM_OpARMMOVHUload(v *Value) bool {
break
}
v.reset(OpARMMOVHUloadidx)
- v.AddArg(ptr)
- v.AddArg(idx)
- v.AddArg(mem)
+ v.AddArg3(ptr, idx, mem)
return true
}
// match: (MOVHUload [off] {sym} (SB) _)
@@ -6340,13 +6091,9 @@ func rewriteValueARM_OpARMMOVHUloadidx(v *Value) bool {
if v_2.Op != OpARMMOVHstoreidx {
break
}
- _ = v_2.Args[3]
- ptr2 := v_2.Args[0]
- if idx != v_2.Args[1] {
- break
- }
x := v_2.Args[2]
- if !(isSamePtr(ptr, ptr2)) {
+ ptr2 := v_2.Args[0]
+ if idx != v_2.Args[1] || !(isSamePtr(ptr, ptr2)) {
break
}
v.reset(OpARMMOVHUreg)
@@ -6364,8 +6111,7 @@ func rewriteValueARM_OpARMMOVHUloadidx(v *Value) bool {
mem := v_2
v.reset(OpARMMOVHUload)
v.AuxInt = c
- v.AddArg(ptr)
- v.AddArg(mem)
+ v.AddArg2(ptr, mem)
return true
}
// match: (MOVHUloadidx (MOVWconst [c]) ptr mem)
@@ -6379,8 +6125,7 @@ func rewriteValueARM_OpARMMOVHUloadidx(v *Value) bool {
mem := v_2
v.reset(OpARMMOVHUload)
v.AuxInt = c
- v.AddArg(ptr)
- v.AddArg(mem)
+ v.AddArg2(ptr, mem)
return true
}
return false
@@ -6394,7 +6139,6 @@ func rewriteValueARM_OpARMMOVHUreg(v *Value) bool {
if x.Op != OpARMMOVBUload {
break
}
- _ = x.Args[1]
v.reset(OpARMMOVWreg)
v.AddArg(x)
return true
@@ -6406,7 +6150,6 @@ func rewriteValueARM_OpARMMOVHUreg(v *Value) bool {
if x.Op != OpARMMOVHUload {
break
}
- _ = x.Args[1]
v.reset(OpARMMOVWreg)
v.AddArg(x)
return true
@@ -6476,8 +6219,7 @@ func rewriteValueARM_OpARMMOVHload(v *Value) bool {
v.reset(OpARMMOVHload)
v.AuxInt = off1 + off2
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(mem)
+ v.AddArg2(ptr, mem)
return true
}
// match: (MOVHload [off1] {sym} (SUBconst [off2] ptr) mem)
@@ -6494,8 +6236,7 @@ func rewriteValueARM_OpARMMOVHload(v *Value) bool {
v.reset(OpARMMOVHload)
v.AuxInt = off1 - off2
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(mem)
+ v.AddArg2(ptr, mem)
return true
}
// match: (MOVHload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem)
@@ -6517,8 +6258,7 @@ func rewriteValueARM_OpARMMOVHload(v *Value) bool {
v.reset(OpARMMOVHload)
v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
- v.AddArg(ptr)
- v.AddArg(mem)
+ v.AddArg2(ptr, mem)
return true
}
// match: (MOVHload [off] {sym} ptr (MOVHstore [off2] {sym2} ptr2 x _))
@@ -6533,9 +6273,8 @@ func rewriteValueARM_OpARMMOVHload(v *Value) bool {
}
off2 := v_1.AuxInt
sym2 := v_1.Aux
- _ = v_1.Args[2]
- ptr2 := v_1.Args[0]
x := v_1.Args[1]
+ ptr2 := v_1.Args[0]
if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
break
}
@@ -6561,9 +6300,7 @@ func rewriteValueARM_OpARMMOVHload(v *Value) bool {
break
}
v.reset(OpARMMOVHloadidx)
- v.AddArg(ptr)
- v.AddArg(idx)
- v.AddArg(mem)
+ v.AddArg3(ptr, idx, mem)
return true
}
return false
@@ -6581,13 +6318,9 @@ func rewriteValueARM_OpARMMOVHloadidx(v *Value) bool {
if v_2.Op != OpARMMOVHstoreidx {
break
}
- _ = v_2.Args[3]
- ptr2 := v_2.Args[0]
- if idx != v_2.Args[1] {
- break
- }
x := v_2.Args[2]
- if !(isSamePtr(ptr, ptr2)) {
+ ptr2 := v_2.Args[0]
+ if idx != v_2.Args[1] || !(isSamePtr(ptr, ptr2)) {
break
}
v.reset(OpARMMOVHreg)
@@ -6605,8 +6338,7 @@ func rewriteValueARM_OpARMMOVHloadidx(v *Value) bool {
mem := v_2
v.reset(OpARMMOVHload)
v.AuxInt = c
- v.AddArg(ptr)
- v.AddArg(mem)
+ v.AddArg2(ptr, mem)
return true
}
// match: (MOVHloadidx (MOVWconst [c]) ptr mem)
@@ -6620,8 +6352,7 @@ func rewriteValueARM_OpARMMOVHloadidx(v *Value) bool {
mem := v_2
v.reset(OpARMMOVHload)
v.AuxInt = c
- v.AddArg(ptr)
- v.AddArg(mem)
+ v.AddArg2(ptr, mem)
return true
}
return false
@@ -6635,7 +6366,6 @@ func rewriteValueARM_OpARMMOVHreg(v *Value) bool {
if x.Op != OpARMMOVBload {
break
}
- _ = x.Args[1]
v.reset(OpARMMOVWreg)
v.AddArg(x)
return true
@@ -6647,7 +6377,6 @@ func rewriteValueARM_OpARMMOVHreg(v *Value) bool {
if x.Op != OpARMMOVBUload {
break
}
- _ = x.Args[1]
v.reset(OpARMMOVWreg)
v.AddArg(x)
return true
@@ -6659,7 +6388,6 @@ func rewriteValueARM_OpARMMOVHreg(v *Value) bool {
if x.Op != OpARMMOVHload {
break
}
- _ = x.Args[1]
v.reset(OpARMMOVWreg)
v.AddArg(x)
return true
@@ -6746,9 +6474,7 @@ func rewriteValueARM_OpARMMOVHstore(v *Value) bool {
v.reset(OpARMMOVHstore)
v.AuxInt = off1 + off2
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(val)
- v.AddArg(mem)
+ v.AddArg3(ptr, val, mem)
return true
}
// match: (MOVHstore [off1] {sym} (SUBconst [off2] ptr) val mem)
@@ -6766,9 +6492,7 @@ func rewriteValueARM_OpARMMOVHstore(v *Value) bool {
v.reset(OpARMMOVHstore)
v.AuxInt = off1 - off2
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(val)
- v.AddArg(mem)
+ v.AddArg3(ptr, val, mem)
return true
}
// match: (MOVHstore [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) val mem)
@@ -6791,9 +6515,7 @@ func rewriteValueARM_OpARMMOVHstore(v *Value) bool {
v.reset(OpARMMOVHstore)
v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
- v.AddArg(ptr)
- v.AddArg(val)
- v.AddArg(mem)
+ v.AddArg3(ptr, val, mem)
return true
}
// match: (MOVHstore [off] {sym} ptr (MOVHreg x) mem)
@@ -6810,9 +6532,7 @@ func rewriteValueARM_OpARMMOVHstore(v *Value) bool {
v.reset(OpARMMOVHstore)
v.AuxInt = off
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(x)
- v.AddArg(mem)
+ v.AddArg3(ptr, x, mem)
return true
}
// match: (MOVHstore [off] {sym} ptr (MOVHUreg x) mem)
@@ -6829,9 +6549,7 @@ func rewriteValueARM_OpARMMOVHstore(v *Value) bool {
v.reset(OpARMMOVHstore)
v.AuxInt = off
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(x)
- v.AddArg(mem)
+ v.AddArg3(ptr, x, mem)
return true
}
// match: (MOVHstore [0] {sym} (ADD ptr idx) val mem)
@@ -6853,10 +6571,7 @@ func rewriteValueARM_OpARMMOVHstore(v *Value) bool {
break
}
v.reset(OpARMMOVHstoreidx)
- v.AddArg(ptr)
- v.AddArg(idx)
- v.AddArg(val)
- v.AddArg(mem)
+ v.AddArg4(ptr, idx, val, mem)
return true
}
return false
@@ -6878,9 +6593,7 @@ func rewriteValueARM_OpARMMOVHstoreidx(v *Value) bool {
mem := v_3
v.reset(OpARMMOVHstore)
v.AuxInt = c
- v.AddArg(ptr)
- v.AddArg(val)
- v.AddArg(mem)
+ v.AddArg3(ptr, val, mem)
return true
}
// match: (MOVHstoreidx (MOVWconst [c]) ptr val mem)
@@ -6895,9 +6608,7 @@ func rewriteValueARM_OpARMMOVHstoreidx(v *Value) bool {
mem := v_3
v.reset(OpARMMOVHstore)
v.AuxInt = c
- v.AddArg(ptr)
- v.AddArg(val)
- v.AddArg(mem)
+ v.AddArg3(ptr, val, mem)
return true
}
return false
@@ -6921,8 +6632,7 @@ func rewriteValueARM_OpARMMOVWload(v *Value) bool {
v.reset(OpARMMOVWload)
v.AuxInt = off1 + off2
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(mem)
+ v.AddArg2(ptr, mem)
return true
}
// match: (MOVWload [off1] {sym} (SUBconst [off2] ptr) mem)
@@ -6939,8 +6649,7 @@ func rewriteValueARM_OpARMMOVWload(v *Value) bool {
v.reset(OpARMMOVWload)
v.AuxInt = off1 - off2
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(mem)
+ v.AddArg2(ptr, mem)
return true
}
// match: (MOVWload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem)
@@ -6962,8 +6671,7 @@ func rewriteValueARM_OpARMMOVWload(v *Value) bool {
v.reset(OpARMMOVWload)
v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
- v.AddArg(ptr)
- v.AddArg(mem)
+ v.AddArg2(ptr, mem)
return true
}
// match: (MOVWload [off] {sym} ptr (MOVWstore [off2] {sym2} ptr2 x _))
@@ -6978,15 +6686,12 @@ func rewriteValueARM_OpARMMOVWload(v *Value) bool {
}
off2 := v_1.AuxInt
sym2 := v_1.Aux
- _ = v_1.Args[2]
- ptr2 := v_1.Args[0]
x := v_1.Args[1]
+ ptr2 := v_1.Args[0]
if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
break
}
- v.reset(OpCopy)
- v.Type = x.Type
- v.AddArg(x)
+ v.copyOf(x)
return true
}
// match: (MOVWload [0] {sym} (ADD ptr idx) mem)
@@ -7007,9 +6712,7 @@ func rewriteValueARM_OpARMMOVWload(v *Value) bool {
break
}
v.reset(OpARMMOVWloadidx)
- v.AddArg(ptr)
- v.AddArg(idx)
- v.AddArg(mem)
+ v.AddArg3(ptr, idx, mem)
return true
}
// match: (MOVWload [0] {sym} (ADDshiftLL ptr idx [c]) mem)
@@ -7032,9 +6735,7 @@ func rewriteValueARM_OpARMMOVWload(v *Value) bool {
}
v.reset(OpARMMOVWloadshiftLL)
v.AuxInt = c
- v.AddArg(ptr)
- v.AddArg(idx)
- v.AddArg(mem)
+ v.AddArg3(ptr, idx, mem)
return true
}
// match: (MOVWload [0] {sym} (ADDshiftRL ptr idx [c]) mem)
@@ -7057,9 +6758,7 @@ func rewriteValueARM_OpARMMOVWload(v *Value) bool {
}
v.reset(OpARMMOVWloadshiftRL)
v.AuxInt = c
- v.AddArg(ptr)
- v.AddArg(idx)
- v.AddArg(mem)
+ v.AddArg3(ptr, idx, mem)
return true
}
// match: (MOVWload [0] {sym} (ADDshiftRA ptr idx [c]) mem)
@@ -7082,9 +6781,7 @@ func rewriteValueARM_OpARMMOVWload(v *Value) bool {
}
v.reset(OpARMMOVWloadshiftRA)
v.AuxInt = c
- v.AddArg(ptr)
- v.AddArg(idx)
- v.AddArg(mem)
+ v.AddArg3(ptr, idx, mem)
return true
}
// match: (MOVWload [off] {sym} (SB) _)
@@ -7115,18 +6812,12 @@ func rewriteValueARM_OpARMMOVWloadidx(v *Value) bool {
if v_2.Op != OpARMMOVWstoreidx {
break
}
- _ = v_2.Args[3]
- ptr2 := v_2.Args[0]
- if idx != v_2.Args[1] {
- break
- }
x := v_2.Args[2]
- if !(isSamePtr(ptr, ptr2)) {
+ ptr2 := v_2.Args[0]
+ if idx != v_2.Args[1] || !(isSamePtr(ptr, ptr2)) {
break
}
- v.reset(OpCopy)
- v.Type = x.Type
- v.AddArg(x)
+ v.copyOf(x)
return true
}
// match: (MOVWloadidx ptr (MOVWconst [c]) mem)
@@ -7140,8 +6831,7 @@ func rewriteValueARM_OpARMMOVWloadidx(v *Value) bool {
mem := v_2
v.reset(OpARMMOVWload)
v.AuxInt = c
- v.AddArg(ptr)
- v.AddArg(mem)
+ v.AddArg2(ptr, mem)
return true
}
// match: (MOVWloadidx (MOVWconst [c]) ptr mem)
@@ -7155,8 +6845,7 @@ func rewriteValueARM_OpARMMOVWloadidx(v *Value) bool {
mem := v_2
v.reset(OpARMMOVWload)
v.AuxInt = c
- v.AddArg(ptr)
- v.AddArg(mem)
+ v.AddArg2(ptr, mem)
return true
}
// match: (MOVWloadidx ptr (SLLconst idx [c]) mem)
@@ -7171,9 +6860,7 @@ func rewriteValueARM_OpARMMOVWloadidx(v *Value) bool {
mem := v_2
v.reset(OpARMMOVWloadshiftLL)
v.AuxInt = c
- v.AddArg(ptr)
- v.AddArg(idx)
- v.AddArg(mem)
+ v.AddArg3(ptr, idx, mem)
return true
}
// match: (MOVWloadidx (SLLconst idx [c]) ptr mem)
@@ -7188,9 +6875,7 @@ func rewriteValueARM_OpARMMOVWloadidx(v *Value) bool {
mem := v_2
v.reset(OpARMMOVWloadshiftLL)
v.AuxInt = c
- v.AddArg(ptr)
- v.AddArg(idx)
- v.AddArg(mem)
+ v.AddArg3(ptr, idx, mem)
return true
}
// match: (MOVWloadidx ptr (SRLconst idx [c]) mem)
@@ -7205,9 +6890,7 @@ func rewriteValueARM_OpARMMOVWloadidx(v *Value) bool {
mem := v_2
v.reset(OpARMMOVWloadshiftRL)
v.AuxInt = c
- v.AddArg(ptr)
- v.AddArg(idx)
- v.AddArg(mem)
+ v.AddArg3(ptr, idx, mem)
return true
}
// match: (MOVWloadidx (SRLconst idx [c]) ptr mem)
@@ -7222,9 +6905,7 @@ func rewriteValueARM_OpARMMOVWloadidx(v *Value) bool {
mem := v_2
v.reset(OpARMMOVWloadshiftRL)
v.AuxInt = c
- v.AddArg(ptr)
- v.AddArg(idx)
- v.AddArg(mem)
+ v.AddArg3(ptr, idx, mem)
return true
}
// match: (MOVWloadidx ptr (SRAconst idx [c]) mem)
@@ -7239,9 +6920,7 @@ func rewriteValueARM_OpARMMOVWloadidx(v *Value) bool {
mem := v_2
v.reset(OpARMMOVWloadshiftRA)
v.AuxInt = c
- v.AddArg(ptr)
- v.AddArg(idx)
- v.AddArg(mem)
+ v.AddArg3(ptr, idx, mem)
return true
}
// match: (MOVWloadidx (SRAconst idx [c]) ptr mem)
@@ -7256,9 +6935,7 @@ func rewriteValueARM_OpARMMOVWloadidx(v *Value) bool {
mem := v_2
v.reset(OpARMMOVWloadshiftRA)
v.AuxInt = c
- v.AddArg(ptr)
- v.AddArg(idx)
- v.AddArg(mem)
+ v.AddArg3(ptr, idx, mem)
return true
}
return false
@@ -7278,18 +6955,12 @@ func rewriteValueARM_OpARMMOVWloadshiftLL(v *Value) bool {
break
}
d := v_2.AuxInt
- _ = v_2.Args[3]
- ptr2 := v_2.Args[0]
- if idx != v_2.Args[1] {
- break
- }
x := v_2.Args[2]
- if !(c == d && isSamePtr(ptr, ptr2)) {
+ ptr2 := v_2.Args[0]
+ if idx != v_2.Args[1] || !(c == d && isSamePtr(ptr, ptr2)) {
break
}
- v.reset(OpCopy)
- v.Type = x.Type
- v.AddArg(x)
+ v.copyOf(x)
return true
}
// match: (MOVWloadshiftLL ptr (MOVWconst [c]) [d] mem)
@@ -7304,8 +6975,7 @@ func rewriteValueARM_OpARMMOVWloadshiftLL(v *Value) bool {
mem := v_2
v.reset(OpARMMOVWload)
v.AuxInt = int64(uint32(c) << uint64(d))
- v.AddArg(ptr)
- v.AddArg(mem)
+ v.AddArg2(ptr, mem)
return true
}
return false
@@ -7325,18 +6995,12 @@ func rewriteValueARM_OpARMMOVWloadshiftRA(v *Value) bool {
break
}
d := v_2.AuxInt
- _ = v_2.Args[3]
- ptr2 := v_2.Args[0]
- if idx != v_2.Args[1] {
- break
- }
x := v_2.Args[2]
- if !(c == d && isSamePtr(ptr, ptr2)) {
+ ptr2 := v_2.Args[0]
+ if idx != v_2.Args[1] || !(c == d && isSamePtr(ptr, ptr2)) {
break
}
- v.reset(OpCopy)
- v.Type = x.Type
- v.AddArg(x)
+ v.copyOf(x)
return true
}
// match: (MOVWloadshiftRA ptr (MOVWconst [c]) [d] mem)
@@ -7351,8 +7015,7 @@ func rewriteValueARM_OpARMMOVWloadshiftRA(v *Value) bool {
mem := v_2
v.reset(OpARMMOVWload)
v.AuxInt = int64(int32(c) >> uint64(d))
- v.AddArg(ptr)
- v.AddArg(mem)
+ v.AddArg2(ptr, mem)
return true
}
return false
@@ -7372,18 +7035,12 @@ func rewriteValueARM_OpARMMOVWloadshiftRL(v *Value) bool {
break
}
d := v_2.AuxInt
- _ = v_2.Args[3]
- ptr2 := v_2.Args[0]
- if idx != v_2.Args[1] {
- break
- }
x := v_2.Args[2]
- if !(c == d && isSamePtr(ptr, ptr2)) {
+ ptr2 := v_2.Args[0]
+ if idx != v_2.Args[1] || !(c == d && isSamePtr(ptr, ptr2)) {
break
}
- v.reset(OpCopy)
- v.Type = x.Type
- v.AddArg(x)
+ v.copyOf(x)
return true
}
// match: (MOVWloadshiftRL ptr (MOVWconst [c]) [d] mem)
@@ -7398,8 +7055,7 @@ func rewriteValueARM_OpARMMOVWloadshiftRL(v *Value) bool {
mem := v_2
v.reset(OpARMMOVWload)
v.AuxInt = int64(uint32(c) >> uint64(d))
- v.AddArg(ptr)
- v.AddArg(mem)
+ v.AddArg2(ptr, mem)
return true
}
return false
@@ -7450,9 +7106,7 @@ func rewriteValueARM_OpARMMOVWstore(v *Value) bool {
v.reset(OpARMMOVWstore)
v.AuxInt = off1 + off2
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(val)
- v.AddArg(mem)
+ v.AddArg3(ptr, val, mem)
return true
}
// match: (MOVWstore [off1] {sym} (SUBconst [off2] ptr) val mem)
@@ -7470,9 +7124,7 @@ func rewriteValueARM_OpARMMOVWstore(v *Value) bool {
v.reset(OpARMMOVWstore)
v.AuxInt = off1 - off2
v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(val)
- v.AddArg(mem)
+ v.AddArg3(ptr, val, mem)
return true
}
// match: (MOVWstore [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) val mem)
@@ -7495,9 +7147,7 @@ func rewriteValueARM_OpARMMOVWstore(v *Value) bool {
v.reset(OpARMMOVWstore)
v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
- v.AddArg(ptr)
- v.AddArg(val)
- v.AddArg(mem)
+ v.AddArg3(ptr, val, mem)
return true
}
// match: (MOVWstore [0] {sym} (ADD ptr idx) val mem)
@@ -7519,10 +7169,7 @@ func rewriteValueARM_OpARMMOVWstore(v *Value) bool {
break
}
v.reset(OpARMMOVWstoreidx)
- v.AddArg(ptr)
- v.AddArg(idx)
- v.AddArg(val)
- v.AddArg(mem)
+ v.AddArg4(ptr, idx, val, mem)
return true
}
// match: (MOVWstore [0] {sym} (ADDshiftLL ptr idx [c]) val mem)
@@ -7546,10 +7193,7 @@ func rewriteValueARM_OpARMMOVWstore(v *Value) bool {
}
v.reset(OpARMMOVWstoreshiftLL)
v.AuxInt = c
- v.AddArg(ptr)
- v.AddArg(idx)
- v.AddArg(val)
- v.AddArg(mem)
+ v.AddArg4(ptr, idx, val, mem)
return true
}
// match: (MOVWstore [0] {sym} (ADDshiftRL ptr idx [c]) val mem)
@@ -7573,10 +7217,7 @@ func rewriteValueARM_OpARMMOVWstore(v *Value) bool {
}
v.reset(OpARMMOVWstoreshiftRL)
v.AuxInt = c
- v.AddArg(ptr)
- v.AddArg(idx)
- v.AddArg(val)
- v.AddArg(mem)
+ v.AddArg4(ptr, idx, val, mem)
return true
}
// match: (MOVWstore [0] {sym} (ADDshiftRA ptr idx [c]) val mem)
@@ -7600,10 +7241,7 @@ func rewriteValueARM_OpARMMOVWstore(v *Value) bool {
}
v.reset(OpARMMOVWstoreshiftRA)
v.AuxInt = c
- v.AddArg(ptr)
- v.AddArg(idx)
- v.AddArg(val)
- v.AddArg(mem)
+ v.AddArg4(ptr, idx, val, mem)
return true
}
return false
@@ -7625,9 +7263,7 @@ func rewriteValueARM_OpARMMOVWstoreidx(v *Value) bool {
mem := v_3
v.reset(OpARMMOVWstore)
v.AuxInt = c
- v.AddArg(ptr)
- v.AddArg(val)
- v.AddArg(mem)
+ v.AddArg3(ptr, val, mem)
return true
}
// match: (MOVWstoreidx (MOVWconst [c]) ptr val mem)
@@ -7642,9 +7278,7 @@ func rewriteValueARM_OpARMMOVWstoreidx(v *Value) bool {
mem := v_3
v.reset(OpARMMOVWstore)
v.AuxInt = c
- v.AddArg(ptr)
- v.AddArg(val)
- v.AddArg(mem)
+ v.AddArg3(ptr, val, mem)
return true
}
// match: (MOVWstoreidx ptr (SLLconst idx [c]) val mem)
@@ -7660,10 +7294,7 @@ func rewriteValueARM_OpARMMOVWstoreidx(v *Value) bool {
mem := v_3
v.reset(OpARMMOVWstoreshiftLL)
v.AuxInt = c
- v.AddArg(ptr)
- v.AddArg(idx)
- v.AddArg(val)
- v.AddArg(mem)
+ v.AddArg4(ptr, idx, val, mem)
return true
}
// match: (MOVWstoreidx (SLLconst idx [c]) ptr val mem)
@@ -7679,10 +7310,7 @@ func rewriteValueARM_OpARMMOVWstoreidx(v *Value) bool {
mem := v_3
v.reset(OpARMMOVWstoreshiftLL)
v.AuxInt = c
- v.AddArg(ptr)
- v.AddArg(idx)
- v.AddArg(val)
- v.AddArg(mem)
+ v.AddArg4(ptr, idx, val, mem)
return true
}
// match: (MOVWstoreidx ptr (SRLconst idx [c]) val mem)
@@ -7698,10 +7326,7 @@ func rewriteValueARM_OpARMMOVWstoreidx(v *Value) bool {
mem := v_3
v.reset(OpARMMOVWstoreshiftRL)
v.AuxInt = c
- v.AddArg(ptr)
- v.AddArg(idx)
- v.AddArg(val)
- v.AddArg(mem)
+ v.AddArg4(ptr, idx, val, mem)
return true
}
// match: (MOVWstoreidx (SRLconst idx [c]) ptr val mem)
@@ -7717,10 +7342,7 @@ func rewriteValueARM_OpARMMOVWstoreidx(v *Value) bool {
mem := v_3
v.reset(OpARMMOVWstoreshiftRL)
v.AuxInt = c
- v.AddArg(ptr)
- v.AddArg(idx)
- v.AddArg(val)
- v.AddArg(mem)
+ v.AddArg4(ptr, idx, val, mem)
return true
}
// match: (MOVWstoreidx ptr (SRAconst idx [c]) val mem)
@@ -7736,10 +7358,7 @@ func rewriteValueARM_OpARMMOVWstoreidx(v *Value) bool {
mem := v_3
v.reset(OpARMMOVWstoreshiftRA)
v.AuxInt = c
- v.AddArg(ptr)
- v.AddArg(idx)
- v.AddArg(val)
- v.AddArg(mem)
+ v.AddArg4(ptr, idx, val, mem)
return true
}
// match: (MOVWstoreidx (SRAconst idx [c]) ptr val mem)
@@ -7755,10 +7374,7 @@ func rewriteValueARM_OpARMMOVWstoreidx(v *Value) bool {
mem := v_3
v.reset(OpARMMOVWstoreshiftRA)
v.AuxInt = c
- v.AddArg(ptr)
- v.AddArg(idx)
- v.AddArg(val)
- v.AddArg(mem)
+ v.AddArg4(ptr, idx, val, mem)
return true
}
return false
@@ -7781,9 +7397,7 @@ func rewriteValueARM_OpARMMOVWstoreshiftLL(v *Value) bool {
mem := v_3
v.reset(OpARMMOVWstore)
v.AuxInt = int64(uint32(c) << uint64(d))
- v.AddArg(ptr)
- v.AddArg(val)
- v.AddArg(mem)
+ v.AddArg3(ptr, val, mem)
return true
}
return false
@@ -7806,9 +7420,7 @@ func rewriteValueARM_OpARMMOVWstoreshiftRA(v *Value) bool {
mem := v_3
v.reset(OpARMMOVWstore)
v.AuxInt = int64(int32(c) >> uint64(d))
- v.AddArg(ptr)
- v.AddArg(val)
- v.AddArg(mem)
+ v.AddArg3(ptr, val, mem)
return true
}
return false
@@ -7831,9 +7443,7 @@ func rewriteValueARM_OpARMMOVWstoreshiftRL(v *Value) bool {
mem := v_3
v.reset(OpARMMOVWstore)
v.AuxInt = int64(uint32(c) >> uint64(d))
- v.AddArg(ptr)
- v.AddArg(val)
- v.AddArg(mem)
+ v.AddArg3(ptr, val, mem)
return true
}
return false
@@ -7883,9 +7493,7 @@ func rewriteValueARM_OpARMMUL(v *Value) bool {
if v_1.Op != OpARMMOVWconst || v_1.AuxInt != 1 {
continue
}
- v.reset(OpCopy)
- v.Type = x.Type
- v.AddArg(x)
+ v.copyOf(x)
return true
}
break
@@ -7925,8 +7533,7 @@ func rewriteValueARM_OpARMMUL(v *Value) bool {
}
v.reset(OpARMADDshiftLL)
v.AuxInt = log2(c - 1)
- v.AddArg(x)
- v.AddArg(x)
+ v.AddArg2(x, x)
return true
}
break
@@ -7946,8 +7553,7 @@ func rewriteValueARM_OpARMMUL(v *Value) bool {
}
v.reset(OpARMRSBshiftLL)
v.AuxInt = log2(c + 1)
- v.AddArg(x)
- v.AddArg(x)
+ v.AddArg2(x, x)
return true
}
break
@@ -7969,8 +7575,7 @@ func rewriteValueARM_OpARMMUL(v *Value) bool {
v.AuxInt = log2(c / 3)
v0 := b.NewValue0(v.Pos, OpARMADDshiftLL, x.Type)
v0.AuxInt = 1
- v0.AddArg(x)
- v0.AddArg(x)
+ v0.AddArg2(x, x)
v.AddArg(v0)
return true
}
@@ -7993,8 +7598,7 @@ func rewriteValueARM_OpARMMUL(v *Value) bool {
v.AuxInt = log2(c / 5)
v0 := b.NewValue0(v.Pos, OpARMADDshiftLL, x.Type)
v0.AuxInt = 2
- v0.AddArg(x)
- v0.AddArg(x)
+ v0.AddArg2(x, x)
v.AddArg(v0)
return true
}
@@ -8017,8 +7621,7 @@ func rewriteValueARM_OpARMMUL(v *Value) bool {
v.AuxInt = log2(c / 7)
v0 := b.NewValue0(v.Pos, OpARMRSBshiftLL, x.Type)
v0.AuxInt = 3
- v0.AddArg(x)
- v0.AddArg(x)
+ v0.AddArg2(x, x)
v.AddArg(v0)
return true
}
@@ -8041,8 +7644,7 @@ func rewriteValueARM_OpARMMUL(v *Value) bool {
v.AuxInt = log2(c / 9)
v0 := b.NewValue0(v.Pos, OpARMADDshiftLL, x.Type)
v0.AuxInt = 3
- v0.AddArg(x)
- v0.AddArg(x)
+ v0.AddArg2(x, x)
v.AddArg(v0)
return true
}
@@ -8087,8 +7689,7 @@ func rewriteValueARM_OpARMMULA(v *Value) bool {
break
}
v.reset(OpARMSUB)
- v.AddArg(a)
- v.AddArg(x)
+ v.AddArg2(a, x)
return true
}
// match: (MULA _ (MOVWconst [0]) a)
@@ -8098,9 +7699,7 @@ func rewriteValueARM_OpARMMULA(v *Value) bool {
break
}
a := v_2
- v.reset(OpCopy)
- v.Type = a.Type
- v.AddArg(a)
+ v.copyOf(a)
return true
}
// match: (MULA x (MOVWconst [1]) a)
@@ -8112,8 +7711,7 @@ func rewriteValueARM_OpARMMULA(v *Value) bool {
}
a := v_2
v.reset(OpARMADD)
- v.AddArg(x)
- v.AddArg(a)
+ v.AddArg2(x, a)
return true
}
// match: (MULA x (MOVWconst [c]) a)
@@ -8133,8 +7731,7 @@ func rewriteValueARM_OpARMMULA(v *Value) bool {
v0 := b.NewValue0(v.Pos, OpARMSLLconst, x.Type)
v0.AuxInt = log2(c)
v0.AddArg(x)
- v.AddArg(v0)
- v.AddArg(a)
+ v.AddArg2(v0, a)
return true
}
// match: (MULA x (MOVWconst [c]) a)
@@ -8153,10 +7750,8 @@ func rewriteValueARM_OpARMMULA(v *Value) bool {
v.reset(OpARMADD)
v0 := b.NewValue0(v.Pos, OpARMADDshiftLL, x.Type)
v0.AuxInt = log2(c - 1)
- v0.AddArg(x)
- v0.AddArg(x)
- v.AddArg(v0)
- v.AddArg(a)
+ v0.AddArg2(x, x)
+ v.AddArg2(v0, a)
return true
}
// match: (MULA x (MOVWconst [c]) a)
@@ -8175,10 +7770,8 @@ func rewriteValueARM_OpARMMULA(v *Value) bool {
v.reset(OpARMADD)
v0 := b.NewValue0(v.Pos, OpARMRSBshiftLL, x.Type)
v0.AuxInt = log2(c + 1)
- v0.AddArg(x)
- v0.AddArg(x)
- v.AddArg(v0)
- v.AddArg(a)
+ v0.AddArg2(x, x)
+ v.AddArg2(v0, a)
return true
}
// match: (MULA x (MOVWconst [c]) a)
@@ -8199,11 +7792,9 @@ func rewriteValueARM_OpARMMULA(v *Value) bool {
v0.AuxInt = log2(c / 3)
v1 := b.NewValue0(v.Pos, OpARMADDshiftLL, x.Type)
v1.AuxInt = 1
- v1.AddArg(x)
- v1.AddArg(x)
+ v1.AddArg2(x, x)
v0.AddArg(v1)
- v.AddArg(v0)
- v.AddArg(a)
+ v.AddArg2(v0, a)
return true
}
// match: (MULA x (MOVWconst [c]) a)
@@ -8224,11 +7815,9 @@ func rewriteValueARM_OpARMMULA(v *Value) bool {
v0.AuxInt = log2(c / 5)
v1 := b.NewValue0(v.Pos, OpARMADDshiftLL, x.Type)
v1.AuxInt = 2
- v1.AddArg(x)
- v1.AddArg(x)
+ v1.AddArg2(x, x)
v0.AddArg(v1)
- v.AddArg(v0)
- v.AddArg(a)
+ v.AddArg2(v0, a)
return true
}
// match: (MULA x (MOVWconst [c]) a)
@@ -8249,11 +7838,9 @@ func rewriteValueARM_OpARMMULA(v *Value) bool {
v0.AuxInt = log2(c / 7)
v1 := b.NewValue0(v.Pos, OpARMRSBshiftLL, x.Type)
v1.AuxInt = 3
- v1.AddArg(x)
- v1.AddArg(x)
+ v1.AddArg2(x, x)
v0.AddArg(v1)
- v.AddArg(v0)
- v.AddArg(a)
+ v.AddArg2(v0, a)
return true
}
// match: (MULA x (MOVWconst [c]) a)
@@ -8274,11 +7861,9 @@ func rewriteValueARM_OpARMMULA(v *Value) bool {
v0.AuxInt = log2(c / 9)
v1 := b.NewValue0(v.Pos, OpARMADDshiftLL, x.Type)
v1.AuxInt = 3
- v1.AddArg(x)
- v1.AddArg(x)
+ v1.AddArg2(x, x)
v0.AddArg(v1)
- v.AddArg(v0)
- v.AddArg(a)
+ v.AddArg2(v0, a)
return true
}
// match: (MULA (MOVWconst [c]) x a)
@@ -8295,8 +7880,7 @@ func rewriteValueARM_OpARMMULA(v *Value) bool {
break
}
v.reset(OpARMSUB)
- v.AddArg(a)
- v.AddArg(x)
+ v.AddArg2(a, x)
return true
}
// match: (MULA (MOVWconst [0]) _ a)
@@ -8306,9 +7890,7 @@ func rewriteValueARM_OpARMMULA(v *Value) bool {
break
}
a := v_2
- v.reset(OpCopy)
- v.Type = a.Type
- v.AddArg(a)
+ v.copyOf(a)
return true
}
// match: (MULA (MOVWconst [1]) x a)
@@ -8320,8 +7902,7 @@ func rewriteValueARM_OpARMMULA(v *Value) bool {
x := v_1
a := v_2
v.reset(OpARMADD)
- v.AddArg(x)
- v.AddArg(a)
+ v.AddArg2(x, a)
return true
}
// match: (MULA (MOVWconst [c]) x a)
@@ -8341,8 +7922,7 @@ func rewriteValueARM_OpARMMULA(v *Value) bool {
v0 := b.NewValue0(v.Pos, OpARMSLLconst, x.Type)
v0.AuxInt = log2(c)
v0.AddArg(x)
- v.AddArg(v0)
- v.AddArg(a)
+ v.AddArg2(v0, a)
return true
}
// match: (MULA (MOVWconst [c]) x a)
@@ -8361,10 +7941,8 @@ func rewriteValueARM_OpARMMULA(v *Value) bool {
v.reset(OpARMADD)
v0 := b.NewValue0(v.Pos, OpARMADDshiftLL, x.Type)
v0.AuxInt = log2(c - 1)
- v0.AddArg(x)
- v0.AddArg(x)
- v.AddArg(v0)
- v.AddArg(a)
+ v0.AddArg2(x, x)
+ v.AddArg2(v0, a)
return true
}
// match: (MULA (MOVWconst [c]) x a)
@@ -8383,10 +7961,8 @@ func rewriteValueARM_OpARMMULA(v *Value) bool {
v.reset(OpARMADD)
v0 := b.NewValue0(v.Pos, OpARMRSBshiftLL, x.Type)
v0.AuxInt = log2(c + 1)
- v0.AddArg(x)
- v0.AddArg(x)
- v.AddArg(v0)
- v.AddArg(a)
+ v0.AddArg2(x, x)
+ v.AddArg2(v0, a)
return true
}
// match: (MULA (MOVWconst [c]) x a)
@@ -8407,11 +7983,9 @@ func rewriteValueARM_OpARMMULA(v *Value) bool {
v0.AuxInt = log2(c / 3)
v1 := b.NewValue0(v.Pos, OpARMADDshiftLL, x.Type)
v1.AuxInt = 1
- v1.AddArg(x)
- v1.AddArg(x)
+ v1.AddArg2(x, x)
v0.AddArg(v1)
- v.AddArg(v0)
- v.AddArg(a)
+ v.AddArg2(v0, a)
return true
}
// match: (MULA (MOVWconst [c]) x a)
@@ -8432,11 +8006,9 @@ func rewriteValueARM_OpARMMULA(v *Value) bool {
v0.AuxInt = log2(c / 5)
v1 := b.NewValue0(v.Pos, OpARMADDshiftLL, x.Type)
v1.AuxInt = 2
- v1.AddArg(x)
- v1.AddArg(x)
+ v1.AddArg2(x, x)
v0.AddArg(v1)
- v.AddArg(v0)
- v.AddArg(a)
+ v.AddArg2(v0, a)
return true
}
// match: (MULA (MOVWconst [c]) x a)
@@ -8457,11 +8029,9 @@ func rewriteValueARM_OpARMMULA(v *Value) bool {
v0.AuxInt = log2(c / 7)
v1 := b.NewValue0(v.Pos, OpARMRSBshiftLL, x.Type)
v1.AuxInt = 3
- v1.AddArg(x)
- v1.AddArg(x)
+ v1.AddArg2(x, x)
v0.AddArg(v1)
- v.AddArg(v0)
- v.AddArg(a)
+ v.AddArg2(v0, a)
return true
}
// match: (MULA (MOVWconst [c]) x a)
@@ -8482,11 +8052,9 @@ func rewriteValueARM_OpARMMULA(v *Value) bool {
v0.AuxInt = log2(c / 9)
v1 := b.NewValue0(v.Pos, OpARMADDshiftLL, x.Type)
v1.AuxInt = 3
- v1.AddArg(x)
- v1.AddArg(x)
+ v1.AddArg2(x, x)
v0.AddArg(v1)
- v.AddArg(v0)
- v.AddArg(a)
+ v.AddArg2(v0, a)
return true
}
// match: (MULA (MOVWconst [c]) (MOVWconst [d]) a)
@@ -8525,8 +8093,7 @@ func rewriteValueARM_OpARMMULD(v *Value) bool {
continue
}
v.reset(OpARMNMULD)
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
break
@@ -8550,8 +8117,7 @@ func rewriteValueARM_OpARMMULF(v *Value) bool {
continue
}
v.reset(OpARMNMULF)
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
break
@@ -8577,8 +8143,7 @@ func rewriteValueARM_OpARMMULS(v *Value) bool {
break
}
v.reset(OpARMADD)
- v.AddArg(a)
- v.AddArg(x)
+ v.AddArg2(a, x)
return true
}
// match: (MULS _ (MOVWconst [0]) a)
@@ -8588,9 +8153,7 @@ func rewriteValueARM_OpARMMULS(v *Value) bool {
break
}
a := v_2
- v.reset(OpCopy)
- v.Type = a.Type
- v.AddArg(a)
+ v.copyOf(a)
return true
}
// match: (MULS x (MOVWconst [1]) a)
@@ -8602,8 +8165,7 @@ func rewriteValueARM_OpARMMULS(v *Value) bool {
}
a := v_2
v.reset(OpARMRSB)
- v.AddArg(x)
- v.AddArg(a)
+ v.AddArg2(x, a)
return true
}
// match: (MULS x (MOVWconst [c]) a)
@@ -8623,8 +8185,7 @@ func rewriteValueARM_OpARMMULS(v *Value) bool {
v0 := b.NewValue0(v.Pos, OpARMSLLconst, x.Type)
v0.AuxInt = log2(c)
v0.AddArg(x)
- v.AddArg(v0)
- v.AddArg(a)
+ v.AddArg2(v0, a)
return true
}
// match: (MULS x (MOVWconst [c]) a)
@@ -8643,10 +8204,8 @@ func rewriteValueARM_OpARMMULS(v *Value) bool {
v.reset(OpARMRSB)
v0 := b.NewValue0(v.Pos, OpARMADDshiftLL, x.Type)
v0.AuxInt = log2(c - 1)
- v0.AddArg(x)
- v0.AddArg(x)
- v.AddArg(v0)
- v.AddArg(a)
+ v0.AddArg2(x, x)
+ v.AddArg2(v0, a)
return true
}
// match: (MULS x (MOVWconst [c]) a)
@@ -8665,10 +8224,8 @@ func rewriteValueARM_OpARMMULS(v *Value) bool {
v.reset(OpARMRSB)
v0 := b.NewValue0(v.Pos, OpARMRSBshiftLL, x.Type)
v0.AuxInt = log2(c + 1)
- v0.AddArg(x)
- v0.AddArg(x)
- v.AddArg(v0)
- v.AddArg(a)
+ v0.AddArg2(x, x)
+ v.AddArg2(v0, a)
return true
}
// match: (MULS x (MOVWconst [c]) a)
@@ -8689,11 +8246,9 @@ func rewriteValueARM_OpARMMULS(v *Value) bool {
v0.AuxInt = log2(c / 3)
v1 := b.NewValue0(v.Pos, OpARMADDshiftLL, x.Type)
v1.AuxInt = 1
- v1.AddArg(x)
- v1.AddArg(x)
+ v1.AddArg2(x, x)
v0.AddArg(v1)
- v.AddArg(v0)
- v.AddArg(a)
+ v.AddArg2(v0, a)
return true
}
// match: (MULS x (MOVWconst [c]) a)
@@ -8714,11 +8269,9 @@ func rewriteValueARM_OpARMMULS(v *Value) bool {
v0.AuxInt = log2(c / 5)
v1 := b.NewValue0(v.Pos, OpARMADDshiftLL, x.Type)
v1.AuxInt = 2
- v1.AddArg(x)
- v1.AddArg(x)
+ v1.AddArg2(x, x)
v0.AddArg(v1)
- v.AddArg(v0)
- v.AddArg(a)
+ v.AddArg2(v0, a)
return true
}
// match: (MULS x (MOVWconst [c]) a)
@@ -8739,11 +8292,9 @@ func rewriteValueARM_OpARMMULS(v *Value) bool {
v0.AuxInt = log2(c / 7)
v1 := b.NewValue0(v.Pos, OpARMRSBshiftLL, x.Type)
v1.AuxInt = 3
- v1.AddArg(x)
- v1.AddArg(x)
+ v1.AddArg2(x, x)
v0.AddArg(v1)
- v.AddArg(v0)
- v.AddArg(a)
+ v.AddArg2(v0, a)
return true
}
// match: (MULS x (MOVWconst [c]) a)
@@ -8764,11 +8315,9 @@ func rewriteValueARM_OpARMMULS(v *Value) bool {
v0.AuxInt = log2(c / 9)
v1 := b.NewValue0(v.Pos, OpARMADDshiftLL, x.Type)
v1.AuxInt = 3
- v1.AddArg(x)
- v1.AddArg(x)
+ v1.AddArg2(x, x)
v0.AddArg(v1)
- v.AddArg(v0)
- v.AddArg(a)
+ v.AddArg2(v0, a)
return true
}
// match: (MULS (MOVWconst [c]) x a)
@@ -8785,8 +8334,7 @@ func rewriteValueARM_OpARMMULS(v *Value) bool {
break
}
v.reset(OpARMADD)
- v.AddArg(a)
- v.AddArg(x)
+ v.AddArg2(a, x)
return true
}
// match: (MULS (MOVWconst [0]) _ a)
@@ -8796,9 +8344,7 @@ func rewriteValueARM_OpARMMULS(v *Value) bool {
break
}
a := v_2
- v.reset(OpCopy)
- v.Type = a.Type
- v.AddArg(a)
+ v.copyOf(a)
return true
}
// match: (MULS (MOVWconst [1]) x a)
@@ -8810,8 +8356,7 @@ func rewriteValueARM_OpARMMULS(v *Value) bool {
x := v_1
a := v_2
v.reset(OpARMRSB)
- v.AddArg(x)
- v.AddArg(a)
+ v.AddArg2(x, a)
return true
}
// match: (MULS (MOVWconst [c]) x a)
@@ -8831,8 +8376,7 @@ func rewriteValueARM_OpARMMULS(v *Value) bool {
v0 := b.NewValue0(v.Pos, OpARMSLLconst, x.Type)
v0.AuxInt = log2(c)
v0.AddArg(x)
- v.AddArg(v0)
- v.AddArg(a)
+ v.AddArg2(v0, a)
return true
}
// match: (MULS (MOVWconst [c]) x a)
@@ -8851,10 +8395,8 @@ func rewriteValueARM_OpARMMULS(v *Value) bool {
v.reset(OpARMRSB)
v0 := b.NewValue0(v.Pos, OpARMADDshiftLL, x.Type)
v0.AuxInt = log2(c - 1)
- v0.AddArg(x)
- v0.AddArg(x)
- v.AddArg(v0)
- v.AddArg(a)
+ v0.AddArg2(x, x)
+ v.AddArg2(v0, a)
return true
}
// match: (MULS (MOVWconst [c]) x a)
@@ -8873,10 +8415,8 @@ func rewriteValueARM_OpARMMULS(v *Value) bool {
v.reset(OpARMRSB)
v0 := b.NewValue0(v.Pos, OpARMRSBshiftLL, x.Type)
v0.AuxInt = log2(c + 1)
- v0.AddArg(x)
- v0.AddArg(x)
- v.AddArg(v0)
- v.AddArg(a)
+ v0.AddArg2(x, x)
+ v.AddArg2(v0, a)
return true
}
// match: (MULS (MOVWconst [c]) x a)
@@ -8897,11 +8437,9 @@ func rewriteValueARM_OpARMMULS(v *Value) bool {
v0.AuxInt = log2(c / 3)
v1 := b.NewValue0(v.Pos, OpARMADDshiftLL, x.Type)
v1.AuxInt = 1
- v1.AddArg(x)
- v1.AddArg(x)
+ v1.AddArg2(x, x)
v0.AddArg(v1)
- v.AddArg(v0)
- v.AddArg(a)
+ v.AddArg2(v0, a)
return true
}
// match: (MULS (MOVWconst [c]) x a)
@@ -8922,11 +8460,9 @@ func rewriteValueARM_OpARMMULS(v *Value) bool {
v0.AuxInt = log2(c / 5)
v1 := b.NewValue0(v.Pos, OpARMADDshiftLL, x.Type)
v1.AuxInt = 2
- v1.AddArg(x)
- v1.AddArg(x)
+ v1.AddArg2(x, x)
v0.AddArg(v1)
- v.AddArg(v0)
- v.AddArg(a)
+ v.AddArg2(v0, a)
return true
}
// match: (MULS (MOVWconst [c]) x a)
@@ -8947,11 +8483,9 @@ func rewriteValueARM_OpARMMULS(v *Value) bool {
v0.AuxInt = log2(c / 7)
v1 := b.NewValue0(v.Pos, OpARMRSBshiftLL, x.Type)
v1.AuxInt = 3
- v1.AddArg(x)
- v1.AddArg(x)
+ v1.AddArg2(x, x)
v0.AddArg(v1)
- v.AddArg(v0)
- v.AddArg(a)
+ v.AddArg2(v0, a)
return true
}
// match: (MULS (MOVWconst [c]) x a)
@@ -8972,11 +8506,9 @@ func rewriteValueARM_OpARMMULS(v *Value) bool {
v0.AuxInt = log2(c / 9)
v1 := b.NewValue0(v.Pos, OpARMADDshiftLL, x.Type)
v1.AuxInt = 3
- v1.AddArg(x)
- v1.AddArg(x)
+ v1.AddArg2(x, x)
v0.AddArg(v1)
- v.AddArg(v0)
- v.AddArg(a)
+ v.AddArg2(v0, a)
return true
}
// match: (MULS (MOVWconst [c]) (MOVWconst [d]) a)
@@ -9059,8 +8591,7 @@ func rewriteValueARM_OpARMMVN(v *Value) bool {
y := v_0.Args[1]
x := v_0.Args[0]
v.reset(OpARMMVNshiftLLreg)
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
// match: (MVN (SRL x y))
@@ -9072,8 +8603,7 @@ func rewriteValueARM_OpARMMVN(v *Value) bool {
y := v_0.Args[1]
x := v_0.Args[0]
v.reset(OpARMMVNshiftRLreg)
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
// match: (MVN (SRA x y))
@@ -9085,8 +8615,7 @@ func rewriteValueARM_OpARMMVN(v *Value) bool {
y := v_0.Args[1]
x := v_0.Args[0]
v.reset(OpARMMVNshiftRAreg)
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
return false
@@ -9208,8 +8737,7 @@ func rewriteValueARM_OpARMNEGD(v *Value) bool {
break
}
v.reset(OpARMNMULD)
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
return false
@@ -9229,8 +8757,7 @@ func rewriteValueARM_OpARMNEGF(v *Value) bool {
break
}
v.reset(OpARMNMULF)
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
return false
@@ -9248,8 +8775,7 @@ func rewriteValueARM_OpARMNMULD(v *Value) bool {
x := v_0.Args[0]
y := v_1
v.reset(OpARMMULD)
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
break
@@ -9269,8 +8795,7 @@ func rewriteValueARM_OpARMNMULF(v *Value) bool {
x := v_0.Args[0]
y := v_1
v.reset(OpARMMULF)
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
break
@@ -9373,8 +8898,7 @@ func rewriteValueARM_OpARMOR(v *Value) bool {
y := v_1.Args[0]
v.reset(OpARMORshiftLL)
v.AuxInt = c
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
break
@@ -9391,8 +8915,7 @@ func rewriteValueARM_OpARMOR(v *Value) bool {
y := v_1.Args[0]
v.reset(OpARMORshiftRL)
v.AuxInt = c
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
break
@@ -9409,8 +8932,7 @@ func rewriteValueARM_OpARMOR(v *Value) bool {
y := v_1.Args[0]
v.reset(OpARMORshiftRA)
v.AuxInt = c
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
break
@@ -9426,9 +8948,7 @@ func rewriteValueARM_OpARMOR(v *Value) bool {
z := v_1.Args[1]
y := v_1.Args[0]
v.reset(OpARMORshiftLLreg)
- v.AddArg(x)
- v.AddArg(y)
- v.AddArg(z)
+ v.AddArg3(x, y, z)
return true
}
break
@@ -9444,9 +8964,7 @@ func rewriteValueARM_OpARMOR(v *Value) bool {
z := v_1.Args[1]
y := v_1.Args[0]
v.reset(OpARMORshiftRLreg)
- v.AddArg(x)
- v.AddArg(y)
- v.AddArg(z)
+ v.AddArg3(x, y, z)
return true
}
break
@@ -9462,9 +8980,7 @@ func rewriteValueARM_OpARMOR(v *Value) bool {
z := v_1.Args[1]
y := v_1.Args[0]
v.reset(OpARMORshiftRAreg)
- v.AddArg(x)
- v.AddArg(y)
- v.AddArg(z)
+ v.AddArg3(x, y, z)
return true
}
break
@@ -9476,9 +8992,7 @@ func rewriteValueARM_OpARMOR(v *Value) bool {
if x != v_1 {
break
}
- v.reset(OpCopy)
- v.Type = x.Type
- v.AddArg(x)
+ v.copyOf(x)
return true
}
return false
@@ -9492,9 +9006,7 @@ func rewriteValueARM_OpARMORconst(v *Value) bool {
break
}
x := v_0
- v.reset(OpCopy)
- v.Type = x.Type
- v.AddArg(x)
+ v.copyOf(x)
return true
}
// match: (ORconst [c] _)
@@ -9636,9 +9148,7 @@ func rewriteValueARM_OpARMORshiftLL(v *Value) bool {
if x != y.Args[0] || !(c == d) {
break
}
- v.reset(OpCopy)
- v.Type = y.Type
- v.AddArg(y)
+ v.copyOf(y)
return true
}
return false
@@ -9660,8 +9170,7 @@ func rewriteValueARM_OpARMORshiftLLreg(v *Value) bool {
v.reset(OpARMORconst)
v.AuxInt = c
v0 := b.NewValue0(v.Pos, OpARMSLL, x.Type)
- v0.AddArg(x)
- v0.AddArg(y)
+ v0.AddArg2(x, y)
v.AddArg(v0)
return true
}
@@ -9676,8 +9185,7 @@ func rewriteValueARM_OpARMORshiftLLreg(v *Value) bool {
c := v_2.AuxInt
v.reset(OpARMORshiftLL)
v.AuxInt = c
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
return false
@@ -9731,9 +9239,7 @@ func rewriteValueARM_OpARMORshiftRA(v *Value) bool {
if x != y.Args[0] || !(c == d) {
break
}
- v.reset(OpCopy)
- v.Type = y.Type
- v.AddArg(y)
+ v.copyOf(y)
return true
}
return false
@@ -9755,8 +9261,7 @@ func rewriteValueARM_OpARMORshiftRAreg(v *Value) bool {
v.reset(OpARMORconst)
v.AuxInt = c
v0 := b.NewValue0(v.Pos, OpARMSRA, x.Type)
- v0.AddArg(x)
- v0.AddArg(y)
+ v0.AddArg2(x, y)
v.AddArg(v0)
return true
}
@@ -9771,8 +9276,7 @@ func rewriteValueARM_OpARMORshiftRAreg(v *Value) bool {
c := v_2.AuxInt
v.reset(OpARMORshiftRA)
v.AuxInt = c
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
return false
@@ -9842,9 +9346,7 @@ func rewriteValueARM_OpARMORshiftRL(v *Value) bool {
if x != y.Args[0] || !(c == d) {
break
}
- v.reset(OpCopy)
- v.Type = y.Type
- v.AddArg(y)
+ v.copyOf(y)
return true
}
return false
@@ -9866,8 +9368,7 @@ func rewriteValueARM_OpARMORshiftRLreg(v *Value) bool {
v.reset(OpARMORconst)
v.AuxInt = c
v0 := b.NewValue0(v.Pos, OpARMSRL, x.Type)
- v0.AddArg(x)
- v0.AddArg(y)
+ v0.AddArg2(x, y)
v.AddArg(v0)
return true
}
@@ -9882,8 +9383,7 @@ func rewriteValueARM_OpARMORshiftRLreg(v *Value) bool {
c := v_2.AuxInt
v.reset(OpARMORshiftRL)
v.AuxInt = c
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
return false
@@ -9928,8 +9428,7 @@ func rewriteValueARM_OpARMRSB(v *Value) bool {
y := v_1.Args[0]
v.reset(OpARMRSBshiftLL)
v.AuxInt = c
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
// match: (RSB (SLLconst [c] y) x)
@@ -9943,8 +9442,7 @@ func rewriteValueARM_OpARMRSB(v *Value) bool {
x := v_1
v.reset(OpARMSUBshiftLL)
v.AuxInt = c
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
// match: (RSB x (SRLconst [c] y))
@@ -9958,8 +9456,7 @@ func rewriteValueARM_OpARMRSB(v *Value) bool {
y := v_1.Args[0]
v.reset(OpARMRSBshiftRL)
v.AuxInt = c
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
// match: (RSB (SRLconst [c] y) x)
@@ -9973,8 +9470,7 @@ func rewriteValueARM_OpARMRSB(v *Value) bool {
x := v_1
v.reset(OpARMSUBshiftRL)
v.AuxInt = c
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
// match: (RSB x (SRAconst [c] y))
@@ -9988,8 +9484,7 @@ func rewriteValueARM_OpARMRSB(v *Value) bool {
y := v_1.Args[0]
v.reset(OpARMRSBshiftRA)
v.AuxInt = c
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
// match: (RSB (SRAconst [c] y) x)
@@ -10003,8 +9498,7 @@ func rewriteValueARM_OpARMRSB(v *Value) bool {
x := v_1
v.reset(OpARMSUBshiftRA)
v.AuxInt = c
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
// match: (RSB x (SLL y z))
@@ -10017,9 +9511,7 @@ func rewriteValueARM_OpARMRSB(v *Value) bool {
z := v_1.Args[1]
y := v_1.Args[0]
v.reset(OpARMRSBshiftLLreg)
- v.AddArg(x)
- v.AddArg(y)
- v.AddArg(z)
+ v.AddArg3(x, y, z)
return true
}
// match: (RSB (SLL y z) x)
@@ -10032,9 +9524,7 @@ func rewriteValueARM_OpARMRSB(v *Value) bool {
y := v_0.Args[0]
x := v_1
v.reset(OpARMSUBshiftLLreg)
- v.AddArg(x)
- v.AddArg(y)
- v.AddArg(z)
+ v.AddArg3(x, y, z)
return true
}
// match: (RSB x (SRL y z))
@@ -10047,9 +9537,7 @@ func rewriteValueARM_OpARMRSB(v *Value) bool {
z := v_1.Args[1]
y := v_1.Args[0]
v.reset(OpARMRSBshiftRLreg)
- v.AddArg(x)
- v.AddArg(y)
- v.AddArg(z)
+ v.AddArg3(x, y, z)
return true
}
// match: (RSB (SRL y z) x)
@@ -10062,9 +9550,7 @@ func rewriteValueARM_OpARMRSB(v *Value) bool {
y := v_0.Args[0]
x := v_1
v.reset(OpARMSUBshiftRLreg)
- v.AddArg(x)
- v.AddArg(y)
- v.AddArg(z)
+ v.AddArg3(x, y, z)
return true
}
// match: (RSB x (SRA y z))
@@ -10077,9 +9563,7 @@ func rewriteValueARM_OpARMRSB(v *Value) bool {
z := v_1.Args[1]
y := v_1.Args[0]
v.reset(OpARMRSBshiftRAreg)
- v.AddArg(x)
- v.AddArg(y)
- v.AddArg(z)
+ v.AddArg3(x, y, z)
return true
}
// match: (RSB (SRA y z) x)
@@ -10092,9 +9576,7 @@ func rewriteValueARM_OpARMRSB(v *Value) bool {
y := v_0.Args[0]
x := v_1
v.reset(OpARMSUBshiftRAreg)
- v.AddArg(x)
- v.AddArg(y)
- v.AddArg(z)
+ v.AddArg3(x, y, z)
return true
}
// match: (RSB x x)
@@ -10122,9 +9604,7 @@ func rewriteValueARM_OpARMRSB(v *Value) bool {
break
}
v.reset(OpARMMULS)
- v.AddArg(x)
- v.AddArg(y)
- v.AddArg(a)
+ v.AddArg3(x, y, a)
return true
}
return false
@@ -10183,8 +9663,7 @@ func rewriteValueARM_OpARMRSBSshiftLLreg(v *Value) bool {
v.reset(OpARMSUBSconst)
v.AuxInt = c
v0 := b.NewValue0(v.Pos, OpARMSLL, x.Type)
- v0.AddArg(x)
- v0.AddArg(y)
+ v0.AddArg2(x, y)
v.AddArg(v0)
return true
}
@@ -10199,8 +9678,7 @@ func rewriteValueARM_OpARMRSBSshiftLLreg(v *Value) bool {
c := v_2.AuxInt
v.reset(OpARMRSBSshiftLL)
v.AuxInt = c
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
return false
@@ -10259,8 +9737,7 @@ func rewriteValueARM_OpARMRSBSshiftRAreg(v *Value) bool {
v.reset(OpARMSUBSconst)
v.AuxInt = c
v0 := b.NewValue0(v.Pos, OpARMSRA, x.Type)
- v0.AddArg(x)
- v0.AddArg(y)
+ v0.AddArg2(x, y)
v.AddArg(v0)
return true
}
@@ -10275,8 +9752,7 @@ func rewriteValueARM_OpARMRSBSshiftRAreg(v *Value) bool {
c := v_2.AuxInt
v.reset(OpARMRSBSshiftRA)
v.AuxInt = c
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
return false
@@ -10335,8 +9811,7 @@ func rewriteValueARM_OpARMRSBSshiftRLreg(v *Value) bool {
v.reset(OpARMSUBSconst)
v.AuxInt = c
v0 := b.NewValue0(v.Pos, OpARMSRL, x.Type)
- v0.AddArg(x)
- v0.AddArg(y)
+ v0.AddArg2(x, y)
v.AddArg(v0)
return true
}
@@ -10351,8 +9826,7 @@ func rewriteValueARM_OpARMRSBSshiftRLreg(v *Value) bool {
c := v_2.AuxInt
v.reset(OpARMRSBSshiftRL)
v.AuxInt = c
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
return false
@@ -10486,8 +9960,7 @@ func rewriteValueARM_OpARMRSBshiftLLreg(v *Value) bool {
v.reset(OpARMSUBconst)
v.AuxInt = c
v0 := b.NewValue0(v.Pos, OpARMSLL, x.Type)
- v0.AddArg(x)
- v0.AddArg(y)
+ v0.AddArg2(x, y)
v.AddArg(v0)
return true
}
@@ -10502,8 +9975,7 @@ func rewriteValueARM_OpARMRSBshiftLLreg(v *Value) bool {
c := v_2.AuxInt
v.reset(OpARMRSBshiftLL)
v.AuxInt = c
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
return false
@@ -10579,8 +10051,7 @@ func rewriteValueARM_OpARMRSBshiftRAreg(v *Value) bool {
v.reset(OpARMSUBconst)
v.AuxInt = c
v0 := b.NewValue0(v.Pos, OpARMSRA, x.Type)
- v0.AddArg(x)
- v0.AddArg(y)
+ v0.AddArg2(x, y)
v.AddArg(v0)
return true
}
@@ -10595,8 +10066,7 @@ func rewriteValueARM_OpARMRSBshiftRAreg(v *Value) bool {
c := v_2.AuxInt
v.reset(OpARMRSBshiftRA)
v.AuxInt = c
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
return false
@@ -10672,8 +10142,7 @@ func rewriteValueARM_OpARMRSBshiftRLreg(v *Value) bool {
v.reset(OpARMSUBconst)
v.AuxInt = c
v0 := b.NewValue0(v.Pos, OpARMSRL, x.Type)
- v0.AddArg(x)
- v0.AddArg(y)
+ v0.AddArg2(x, y)
v.AddArg(v0)
return true
}
@@ -10688,8 +10157,7 @@ func rewriteValueARM_OpARMRSBshiftRLreg(v *Value) bool {
c := v_2.AuxInt
v.reset(OpARMRSBshiftRL)
v.AuxInt = c
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
return false
@@ -10709,8 +10177,7 @@ func rewriteValueARM_OpARMRSCconst(v *Value) bool {
flags := v_1
v.reset(OpARMRSCconst)
v.AuxInt = int64(int32(c - d))
- v.AddArg(x)
- v.AddArg(flags)
+ v.AddArg2(x, flags)
return true
}
// match: (RSCconst [c] (SUBconst [d] x) flags)
@@ -10725,8 +10192,7 @@ func rewriteValueARM_OpARMRSCconst(v *Value) bool {
flags := v_1
v.reset(OpARMRSCconst)
v.AuxInt = int64(int32(c + d))
- v.AddArg(x)
- v.AddArg(flags)
+ v.AddArg2(x, flags)
return true
}
return false
@@ -10751,8 +10217,7 @@ func rewriteValueARM_OpARMRSCshiftLL(v *Value) bool {
v0 := b.NewValue0(v.Pos, OpARMSLLconst, x.Type)
v0.AuxInt = d
v0.AddArg(x)
- v.AddArg(v0)
- v.AddArg(flags)
+ v.AddArg2(v0, flags)
return true
}
// match: (RSCshiftLL x (MOVWconst [c]) [d] flags)
@@ -10767,8 +10232,7 @@ func rewriteValueARM_OpARMRSCshiftLL(v *Value) bool {
flags := v_2
v.reset(OpARMRSCconst)
v.AuxInt = int64(int32(uint32(c) << uint64(d)))
- v.AddArg(x)
- v.AddArg(flags)
+ v.AddArg2(x, flags)
return true
}
return false
@@ -10792,10 +10256,8 @@ func rewriteValueARM_OpARMRSCshiftLLreg(v *Value) bool {
v.reset(OpARMSBCconst)
v.AuxInt = c
v0 := b.NewValue0(v.Pos, OpARMSLL, x.Type)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- v.AddArg(flags)
+ v0.AddArg2(x, y)
+ v.AddArg2(v0, flags)
return true
}
// match: (RSCshiftLLreg x y (MOVWconst [c]) flags)
@@ -10810,9 +10272,7 @@ func rewriteValueARM_OpARMRSCshiftLLreg(v *Value) bool {
flags := v_3
v.reset(OpARMRSCshiftLL)
v.AuxInt = c
- v.AddArg(x)
- v.AddArg(y)
- v.AddArg(flags)
+ v.AddArg3(x, y, flags)
return true
}
return false
@@ -10837,8 +10297,7 @@ func rewriteValueARM_OpARMRSCshiftRA(v *Value) bool {
v0 := b.NewValue0(v.Pos, OpARMSRAconst, x.Type)
v0.AuxInt = d
v0.AddArg(x)
- v.AddArg(v0)
- v.AddArg(flags)
+ v.AddArg2(v0, flags)
return true
}
// match: (RSCshiftRA x (MOVWconst [c]) [d] flags)
@@ -10853,8 +10312,7 @@ func rewriteValueARM_OpARMRSCshiftRA(v *Value) bool {
flags := v_2
v.reset(OpARMRSCconst)
v.AuxInt = int64(int32(c) >> uint64(d))
- v.AddArg(x)
- v.AddArg(flags)
+ v.AddArg2(x, flags)
return true
}
return false
@@ -10878,10 +10336,8 @@ func rewriteValueARM_OpARMRSCshiftRAreg(v *Value) bool {
v.reset(OpARMSBCconst)
v.AuxInt = c
v0 := b.NewValue0(v.Pos, OpARMSRA, x.Type)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- v.AddArg(flags)
+ v0.AddArg2(x, y)
+ v.AddArg2(v0, flags)
return true
}
// match: (RSCshiftRAreg x y (MOVWconst [c]) flags)
@@ -10896,9 +10352,7 @@ func rewriteValueARM_OpARMRSCshiftRAreg(v *Value) bool {
flags := v_3
v.reset(OpARMRSCshiftRA)
v.AuxInt = c
- v.AddArg(x)
- v.AddArg(y)
- v.AddArg(flags)
+ v.AddArg3(x, y, flags)
return true
}
return false
@@ -10923,8 +10377,7 @@ func rewriteValueARM_OpARMRSCshiftRL(v *Value) bool {
v0 := b.NewValue0(v.Pos, OpARMSRLconst, x.Type)
v0.AuxInt = d
v0.AddArg(x)
- v.AddArg(v0)
- v.AddArg(flags)
+ v.AddArg2(v0, flags)
return true
}
// match: (RSCshiftRL x (MOVWconst [c]) [d] flags)
@@ -10939,8 +10392,7 @@ func rewriteValueARM_OpARMRSCshiftRL(v *Value) bool {
flags := v_2
v.reset(OpARMRSCconst)
v.AuxInt = int64(int32(uint32(c) >> uint64(d)))
- v.AddArg(x)
- v.AddArg(flags)
+ v.AddArg2(x, flags)
return true
}
return false
@@ -10964,10 +10416,8 @@ func rewriteValueARM_OpARMRSCshiftRLreg(v *Value) bool {
v.reset(OpARMSBCconst)
v.AuxInt = c
v0 := b.NewValue0(v.Pos, OpARMSRL, x.Type)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- v.AddArg(flags)
+ v0.AddArg2(x, y)
+ v.AddArg2(v0, flags)
return true
}
// match: (RSCshiftRLreg x y (MOVWconst [c]) flags)
@@ -10982,9 +10432,7 @@ func rewriteValueARM_OpARMRSCshiftRLreg(v *Value) bool {
flags := v_3
v.reset(OpARMRSCshiftRL)
v.AuxInt = c
- v.AddArg(x)
- v.AddArg(y)
- v.AddArg(flags)
+ v.AddArg3(x, y, flags)
return true
}
return false
@@ -11004,8 +10452,7 @@ func rewriteValueARM_OpARMSBC(v *Value) bool {
flags := v_2
v.reset(OpARMRSCconst)
v.AuxInt = c
- v.AddArg(x)
- v.AddArg(flags)
+ v.AddArg2(x, flags)
return true
}
// match: (SBC x (MOVWconst [c]) flags)
@@ -11019,8 +10466,7 @@ func rewriteValueARM_OpARMSBC(v *Value) bool {
flags := v_2
v.reset(OpARMSBCconst)
v.AuxInt = c
- v.AddArg(x)
- v.AddArg(flags)
+ v.AddArg2(x, flags)
return true
}
// match: (SBC x (SLLconst [c] y) flags)
@@ -11035,9 +10481,7 @@ func rewriteValueARM_OpARMSBC(v *Value) bool {
flags := v_2
v.reset(OpARMSBCshiftLL)
v.AuxInt = c
- v.AddArg(x)
- v.AddArg(y)
- v.AddArg(flags)
+ v.AddArg3(x, y, flags)
return true
}
// match: (SBC (SLLconst [c] y) x flags)
@@ -11052,9 +10496,7 @@ func rewriteValueARM_OpARMSBC(v *Value) bool {
flags := v_2
v.reset(OpARMRSCshiftLL)
v.AuxInt = c
- v.AddArg(x)
- v.AddArg(y)
- v.AddArg(flags)
+ v.AddArg3(x, y, flags)
return true
}
// match: (SBC x (SRLconst [c] y) flags)
@@ -11069,9 +10511,7 @@ func rewriteValueARM_OpARMSBC(v *Value) bool {
flags := v_2
v.reset(OpARMSBCshiftRL)
v.AuxInt = c
- v.AddArg(x)
- v.AddArg(y)
- v.AddArg(flags)
+ v.AddArg3(x, y, flags)
return true
}
// match: (SBC (SRLconst [c] y) x flags)
@@ -11086,9 +10526,7 @@ func rewriteValueARM_OpARMSBC(v *Value) bool {
flags := v_2
v.reset(OpARMRSCshiftRL)
v.AuxInt = c
- v.AddArg(x)
- v.AddArg(y)
- v.AddArg(flags)
+ v.AddArg3(x, y, flags)
return true
}
// match: (SBC x (SRAconst [c] y) flags)
@@ -11103,9 +10541,7 @@ func rewriteValueARM_OpARMSBC(v *Value) bool {
flags := v_2
v.reset(OpARMSBCshiftRA)
v.AuxInt = c
- v.AddArg(x)
- v.AddArg(y)
- v.AddArg(flags)
+ v.AddArg3(x, y, flags)
return true
}
// match: (SBC (SRAconst [c] y) x flags)
@@ -11120,9 +10556,7 @@ func rewriteValueARM_OpARMSBC(v *Value) bool {
flags := v_2
v.reset(OpARMRSCshiftRA)
v.AuxInt = c
- v.AddArg(x)
- v.AddArg(y)
- v.AddArg(flags)
+ v.AddArg3(x, y, flags)
return true
}
// match: (SBC x (SLL y z) flags)
@@ -11136,10 +10570,7 @@ func rewriteValueARM_OpARMSBC(v *Value) bool {
y := v_1.Args[0]
flags := v_2
v.reset(OpARMSBCshiftLLreg)
- v.AddArg(x)
- v.AddArg(y)
- v.AddArg(z)
- v.AddArg(flags)
+ v.AddArg4(x, y, z, flags)
return true
}
// match: (SBC (SLL y z) x flags)
@@ -11153,10 +10584,7 @@ func rewriteValueARM_OpARMSBC(v *Value) bool {
x := v_1
flags := v_2
v.reset(OpARMRSCshiftLLreg)
- v.AddArg(x)
- v.AddArg(y)
- v.AddArg(z)
- v.AddArg(flags)
+ v.AddArg4(x, y, z, flags)
return true
}
// match: (SBC x (SRL y z) flags)
@@ -11170,10 +10598,7 @@ func rewriteValueARM_OpARMSBC(v *Value) bool {
y := v_1.Args[0]
flags := v_2
v.reset(OpARMSBCshiftRLreg)
- v.AddArg(x)
- v.AddArg(y)
- v.AddArg(z)
- v.AddArg(flags)
+ v.AddArg4(x, y, z, flags)
return true
}
// match: (SBC (SRL y z) x flags)
@@ -11187,10 +10612,7 @@ func rewriteValueARM_OpARMSBC(v *Value) bool {
x := v_1
flags := v_2
v.reset(OpARMRSCshiftRLreg)
- v.AddArg(x)
- v.AddArg(y)
- v.AddArg(z)
- v.AddArg(flags)
+ v.AddArg4(x, y, z, flags)
return true
}
// match: (SBC x (SRA y z) flags)
@@ -11204,10 +10626,7 @@ func rewriteValueARM_OpARMSBC(v *Value) bool {
y := v_1.Args[0]
flags := v_2
v.reset(OpARMSBCshiftRAreg)
- v.AddArg(x)
- v.AddArg(y)
- v.AddArg(z)
- v.AddArg(flags)
+ v.AddArg4(x, y, z, flags)
return true
}
// match: (SBC (SRA y z) x flags)
@@ -11221,10 +10640,7 @@ func rewriteValueARM_OpARMSBC(v *Value) bool {
x := v_1
flags := v_2
v.reset(OpARMRSCshiftRAreg)
- v.AddArg(x)
- v.AddArg(y)
- v.AddArg(z)
- v.AddArg(flags)
+ v.AddArg4(x, y, z, flags)
return true
}
return false
@@ -11244,8 +10660,7 @@ func rewriteValueARM_OpARMSBCconst(v *Value) bool {
flags := v_1
v.reset(OpARMSBCconst)
v.AuxInt = int64(int32(c - d))
- v.AddArg(x)
- v.AddArg(flags)
+ v.AddArg2(x, flags)
return true
}
// match: (SBCconst [c] (SUBconst [d] x) flags)
@@ -11260,8 +10675,7 @@ func rewriteValueARM_OpARMSBCconst(v *Value) bool {
flags := v_1
v.reset(OpARMSBCconst)
v.AuxInt = int64(int32(c + d))
- v.AddArg(x)
- v.AddArg(flags)
+ v.AddArg2(x, flags)
return true
}
return false
@@ -11286,8 +10700,7 @@ func rewriteValueARM_OpARMSBCshiftLL(v *Value) bool {
v0 := b.NewValue0(v.Pos, OpARMSLLconst, x.Type)
v0.AuxInt = d
v0.AddArg(x)
- v.AddArg(v0)
- v.AddArg(flags)
+ v.AddArg2(v0, flags)
return true
}
// match: (SBCshiftLL x (MOVWconst [c]) [d] flags)
@@ -11302,8 +10715,7 @@ func rewriteValueARM_OpARMSBCshiftLL(v *Value) bool {
flags := v_2
v.reset(OpARMSBCconst)
v.AuxInt = int64(int32(uint32(c) << uint64(d)))
- v.AddArg(x)
- v.AddArg(flags)
+ v.AddArg2(x, flags)
return true
}
return false
@@ -11327,10 +10739,8 @@ func rewriteValueARM_OpARMSBCshiftLLreg(v *Value) bool {
v.reset(OpARMRSCconst)
v.AuxInt = c
v0 := b.NewValue0(v.Pos, OpARMSLL, x.Type)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- v.AddArg(flags)
+ v0.AddArg2(x, y)
+ v.AddArg2(v0, flags)
return true
}
// match: (SBCshiftLLreg x y (MOVWconst [c]) flags)
@@ -11345,9 +10755,7 @@ func rewriteValueARM_OpARMSBCshiftLLreg(v *Value) bool {
flags := v_3
v.reset(OpARMSBCshiftLL)
v.AuxInt = c
- v.AddArg(x)
- v.AddArg(y)
- v.AddArg(flags)
+ v.AddArg3(x, y, flags)
return true
}
return false
@@ -11372,8 +10780,7 @@ func rewriteValueARM_OpARMSBCshiftRA(v *Value) bool {
v0 := b.NewValue0(v.Pos, OpARMSRAconst, x.Type)
v0.AuxInt = d
v0.AddArg(x)
- v.AddArg(v0)
- v.AddArg(flags)
+ v.AddArg2(v0, flags)
return true
}
// match: (SBCshiftRA x (MOVWconst [c]) [d] flags)
@@ -11388,8 +10795,7 @@ func rewriteValueARM_OpARMSBCshiftRA(v *Value) bool {
flags := v_2
v.reset(OpARMSBCconst)
v.AuxInt = int64(int32(c) >> uint64(d))
- v.AddArg(x)
- v.AddArg(flags)
+ v.AddArg2(x, flags)
return true
}
return false
@@ -11413,10 +10819,8 @@ func rewriteValueARM_OpARMSBCshiftRAreg(v *Value) bool {
v.reset(OpARMRSCconst)
v.AuxInt = c
v0 := b.NewValue0(v.Pos, OpARMSRA, x.Type)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- v.AddArg(flags)
+ v0.AddArg2(x, y)
+ v.AddArg2(v0, flags)
return true
}
// match: (SBCshiftRAreg x y (MOVWconst [c]) flags)
@@ -11431,9 +10835,7 @@ func rewriteValueARM_OpARMSBCshiftRAreg(v *Value) bool {
flags := v_3
v.reset(OpARMSBCshiftRA)
v.AuxInt = c
- v.AddArg(x)
- v.AddArg(y)
- v.AddArg(flags)
+ v.AddArg3(x, y, flags)
return true
}
return false
@@ -11458,8 +10860,7 @@ func rewriteValueARM_OpARMSBCshiftRL(v *Value) bool {
v0 := b.NewValue0(v.Pos, OpARMSRLconst, x.Type)
v0.AuxInt = d
v0.AddArg(x)
- v.AddArg(v0)
- v.AddArg(flags)
+ v.AddArg2(v0, flags)
return true
}
// match: (SBCshiftRL x (MOVWconst [c]) [d] flags)
@@ -11474,8 +10875,7 @@ func rewriteValueARM_OpARMSBCshiftRL(v *Value) bool {
flags := v_2
v.reset(OpARMSBCconst)
v.AuxInt = int64(int32(uint32(c) >> uint64(d)))
- v.AddArg(x)
- v.AddArg(flags)
+ v.AddArg2(x, flags)
return true
}
return false
@@ -11499,10 +10899,8 @@ func rewriteValueARM_OpARMSBCshiftRLreg(v *Value) bool {
v.reset(OpARMRSCconst)
v.AuxInt = c
v0 := b.NewValue0(v.Pos, OpARMSRL, x.Type)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- v.AddArg(flags)
+ v0.AddArg2(x, y)
+ v.AddArg2(v0, flags)
return true
}
// match: (SBCshiftRLreg x y (MOVWconst [c]) flags)
@@ -11517,9 +10915,7 @@ func rewriteValueARM_OpARMSBCshiftRLreg(v *Value) bool {
flags := v_3
v.reset(OpARMSBCshiftRL)
v.AuxInt = c
- v.AddArg(x)
- v.AddArg(y)
- v.AddArg(flags)
+ v.AddArg3(x, y, flags)
return true
}
return false
@@ -11601,8 +10997,7 @@ func rewriteValueARM_OpARMSRAcond(v *Value) bool {
break
}
v.reset(OpARMSRA)
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
// match: (SRAcond x _ (FlagLT_UGT))
@@ -11626,8 +11021,7 @@ func rewriteValueARM_OpARMSRAcond(v *Value) bool {
break
}
v.reset(OpARMSRA)
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
// match: (SRAcond x _ (FlagGT_UGT))
@@ -11770,8 +11164,7 @@ func rewriteValueARM_OpARMSUB(v *Value) bool {
y := v_1.Args[0]
v.reset(OpARMSUBshiftLL)
v.AuxInt = c
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
// match: (SUB (SLLconst [c] y) x)
@@ -11785,8 +11178,7 @@ func rewriteValueARM_OpARMSUB(v *Value) bool {
x := v_1
v.reset(OpARMRSBshiftLL)
v.AuxInt = c
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
// match: (SUB x (SRLconst [c] y))
@@ -11800,8 +11192,7 @@ func rewriteValueARM_OpARMSUB(v *Value) bool {
y := v_1.Args[0]
v.reset(OpARMSUBshiftRL)
v.AuxInt = c
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
// match: (SUB (SRLconst [c] y) x)
@@ -11815,8 +11206,7 @@ func rewriteValueARM_OpARMSUB(v *Value) bool {
x := v_1
v.reset(OpARMRSBshiftRL)
v.AuxInt = c
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
// match: (SUB x (SRAconst [c] y))
@@ -11830,8 +11220,7 @@ func rewriteValueARM_OpARMSUB(v *Value) bool {
y := v_1.Args[0]
v.reset(OpARMSUBshiftRA)
v.AuxInt = c
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
// match: (SUB (SRAconst [c] y) x)
@@ -11845,8 +11234,7 @@ func rewriteValueARM_OpARMSUB(v *Value) bool {
x := v_1
v.reset(OpARMRSBshiftRA)
v.AuxInt = c
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
// match: (SUB x (SLL y z))
@@ -11859,9 +11247,7 @@ func rewriteValueARM_OpARMSUB(v *Value) bool {
z := v_1.Args[1]
y := v_1.Args[0]
v.reset(OpARMSUBshiftLLreg)
- v.AddArg(x)
- v.AddArg(y)
- v.AddArg(z)
+ v.AddArg3(x, y, z)
return true
}
// match: (SUB (SLL y z) x)
@@ -11874,9 +11260,7 @@ func rewriteValueARM_OpARMSUB(v *Value) bool {
y := v_0.Args[0]
x := v_1
v.reset(OpARMRSBshiftLLreg)
- v.AddArg(x)
- v.AddArg(y)
- v.AddArg(z)
+ v.AddArg3(x, y, z)
return true
}
// match: (SUB x (SRL y z))
@@ -11889,9 +11273,7 @@ func rewriteValueARM_OpARMSUB(v *Value) bool {
z := v_1.Args[1]
y := v_1.Args[0]
v.reset(OpARMSUBshiftRLreg)
- v.AddArg(x)
- v.AddArg(y)
- v.AddArg(z)
+ v.AddArg3(x, y, z)
return true
}
// match: (SUB (SRL y z) x)
@@ -11904,9 +11286,7 @@ func rewriteValueARM_OpARMSUB(v *Value) bool {
y := v_0.Args[0]
x := v_1
v.reset(OpARMRSBshiftRLreg)
- v.AddArg(x)
- v.AddArg(y)
- v.AddArg(z)
+ v.AddArg3(x, y, z)
return true
}
// match: (SUB x (SRA y z))
@@ -11919,9 +11299,7 @@ func rewriteValueARM_OpARMSUB(v *Value) bool {
z := v_1.Args[1]
y := v_1.Args[0]
v.reset(OpARMSUBshiftRAreg)
- v.AddArg(x)
- v.AddArg(y)
- v.AddArg(z)
+ v.AddArg3(x, y, z)
return true
}
// match: (SUB (SRA y z) x)
@@ -11934,9 +11312,7 @@ func rewriteValueARM_OpARMSUB(v *Value) bool {
y := v_0.Args[0]
x := v_1
v.reset(OpARMRSBshiftRAreg)
- v.AddArg(x)
- v.AddArg(y)
- v.AddArg(z)
+ v.AddArg3(x, y, z)
return true
}
// match: (SUB x x)
@@ -11964,9 +11340,7 @@ func rewriteValueARM_OpARMSUB(v *Value) bool {
break
}
v.reset(OpARMMULS)
- v.AddArg(x)
- v.AddArg(y)
- v.AddArg(a)
+ v.AddArg3(x, y, a)
return true
}
return false
@@ -11988,9 +11362,7 @@ func rewriteValueARM_OpARMSUBD(v *Value) bool {
break
}
v.reset(OpARMMULSD)
- v.AddArg(a)
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg3(a, x, y)
return true
}
// match: (SUBD a (NMULD x y))
@@ -12007,9 +11379,7 @@ func rewriteValueARM_OpARMSUBD(v *Value) bool {
break
}
v.reset(OpARMMULAD)
- v.AddArg(a)
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg3(a, x, y)
return true
}
return false
@@ -12031,9 +11401,7 @@ func rewriteValueARM_OpARMSUBF(v *Value) bool {
break
}
v.reset(OpARMMULSF)
- v.AddArg(a)
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg3(a, x, y)
return true
}
// match: (SUBF a (NMULF x y))
@@ -12050,9 +11418,7 @@ func rewriteValueARM_OpARMSUBF(v *Value) bool {
break
}
v.reset(OpARMMULAF)
- v.AddArg(a)
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg3(a, x, y)
return true
}
return false
@@ -12084,8 +11450,7 @@ func rewriteValueARM_OpARMSUBS(v *Value) bool {
y := v_1.Args[0]
v.reset(OpARMSUBSshiftLL)
v.AuxInt = c
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
// match: (SUBS (SLLconst [c] y) x)
@@ -12099,8 +11464,7 @@ func rewriteValueARM_OpARMSUBS(v *Value) bool {
x := v_1
v.reset(OpARMRSBSshiftLL)
v.AuxInt = c
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
// match: (SUBS x (SRLconst [c] y))
@@ -12114,8 +11478,7 @@ func rewriteValueARM_OpARMSUBS(v *Value) bool {
y := v_1.Args[0]
v.reset(OpARMSUBSshiftRL)
v.AuxInt = c
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
// match: (SUBS (SRLconst [c] y) x)
@@ -12129,8 +11492,7 @@ func rewriteValueARM_OpARMSUBS(v *Value) bool {
x := v_1
v.reset(OpARMRSBSshiftRL)
v.AuxInt = c
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
// match: (SUBS x (SRAconst [c] y))
@@ -12144,8 +11506,7 @@ func rewriteValueARM_OpARMSUBS(v *Value) bool {
y := v_1.Args[0]
v.reset(OpARMSUBSshiftRA)
v.AuxInt = c
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
// match: (SUBS (SRAconst [c] y) x)
@@ -12159,8 +11520,7 @@ func rewriteValueARM_OpARMSUBS(v *Value) bool {
x := v_1
v.reset(OpARMRSBSshiftRA)
v.AuxInt = c
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
// match: (SUBS x (SLL y z))
@@ -12173,9 +11533,7 @@ func rewriteValueARM_OpARMSUBS(v *Value) bool {
z := v_1.Args[1]
y := v_1.Args[0]
v.reset(OpARMSUBSshiftLLreg)
- v.AddArg(x)
- v.AddArg(y)
- v.AddArg(z)
+ v.AddArg3(x, y, z)
return true
}
// match: (SUBS (SLL y z) x)
@@ -12188,9 +11546,7 @@ func rewriteValueARM_OpARMSUBS(v *Value) bool {
y := v_0.Args[0]
x := v_1
v.reset(OpARMRSBSshiftLLreg)
- v.AddArg(x)
- v.AddArg(y)
- v.AddArg(z)
+ v.AddArg3(x, y, z)
return true
}
// match: (SUBS x (SRL y z))
@@ -12203,9 +11559,7 @@ func rewriteValueARM_OpARMSUBS(v *Value) bool {
z := v_1.Args[1]
y := v_1.Args[0]
v.reset(OpARMSUBSshiftRLreg)
- v.AddArg(x)
- v.AddArg(y)
- v.AddArg(z)
+ v.AddArg3(x, y, z)
return true
}
// match: (SUBS (SRL y z) x)
@@ -12218,9 +11572,7 @@ func rewriteValueARM_OpARMSUBS(v *Value) bool {
y := v_0.Args[0]
x := v_1
v.reset(OpARMRSBSshiftRLreg)
- v.AddArg(x)
- v.AddArg(y)
- v.AddArg(z)
+ v.AddArg3(x, y, z)
return true
}
// match: (SUBS x (SRA y z))
@@ -12233,9 +11585,7 @@ func rewriteValueARM_OpARMSUBS(v *Value) bool {
z := v_1.Args[1]
y := v_1.Args[0]
v.reset(OpARMSUBSshiftRAreg)
- v.AddArg(x)
- v.AddArg(y)
- v.AddArg(z)
+ v.AddArg3(x, y, z)
return true
}
// match: (SUBS (SRA y z) x)
@@ -12248,9 +11598,7 @@ func rewriteValueARM_OpARMSUBS(v *Value) bool {
y := v_0.Args[0]
x := v_1
v.reset(OpARMRSBSshiftRAreg)
- v.AddArg(x)
- v.AddArg(y)
- v.AddArg(z)
+ v.AddArg3(x, y, z)
return true
}
return false
@@ -12309,8 +11657,7 @@ func rewriteValueARM_OpARMSUBSshiftLLreg(v *Value) bool {
v.reset(OpARMRSBSconst)
v.AuxInt = c
v0 := b.NewValue0(v.Pos, OpARMSLL, x.Type)
- v0.AddArg(x)
- v0.AddArg(y)
+ v0.AddArg2(x, y)
v.AddArg(v0)
return true
}
@@ -12325,8 +11672,7 @@ func rewriteValueARM_OpARMSUBSshiftLLreg(v *Value) bool {
c := v_2.AuxInt
v.reset(OpARMSUBSshiftLL)
v.AuxInt = c
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
return false
@@ -12385,8 +11731,7 @@ func rewriteValueARM_OpARMSUBSshiftRAreg(v *Value) bool {
v.reset(OpARMRSBSconst)
v.AuxInt = c
v0 := b.NewValue0(v.Pos, OpARMSRA, x.Type)
- v0.AddArg(x)
- v0.AddArg(y)
+ v0.AddArg2(x, y)
v.AddArg(v0)
return true
}
@@ -12401,8 +11746,7 @@ func rewriteValueARM_OpARMSUBSshiftRAreg(v *Value) bool {
c := v_2.AuxInt
v.reset(OpARMSUBSshiftRA)
v.AuxInt = c
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
return false
@@ -12461,8 +11805,7 @@ func rewriteValueARM_OpARMSUBSshiftRLreg(v *Value) bool {
v.reset(OpARMRSBSconst)
v.AuxInt = c
v0 := b.NewValue0(v.Pos, OpARMSRL, x.Type)
- v0.AddArg(x)
- v0.AddArg(y)
+ v0.AddArg2(x, y)
v.AddArg(v0)
return true
}
@@ -12477,8 +11820,7 @@ func rewriteValueARM_OpARMSUBSshiftRLreg(v *Value) bool {
c := v_2.AuxInt
v.reset(OpARMSUBSshiftRL)
v.AuxInt = c
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
return false
@@ -12508,9 +11850,7 @@ func rewriteValueARM_OpARMSUBconst(v *Value) bool {
break
}
x := v_0
- v.reset(OpCopy)
- v.Type = x.Type
- v.AddArg(x)
+ v.copyOf(x)
return true
}
// match: (SUBconst [c] x)
@@ -12668,8 +12008,7 @@ func rewriteValueARM_OpARMSUBshiftLLreg(v *Value) bool {
v.reset(OpARMRSBconst)
v.AuxInt = c
v0 := b.NewValue0(v.Pos, OpARMSLL, x.Type)
- v0.AddArg(x)
- v0.AddArg(y)
+ v0.AddArg2(x, y)
v.AddArg(v0)
return true
}
@@ -12684,8 +12023,7 @@ func rewriteValueARM_OpARMSUBshiftLLreg(v *Value) bool {
c := v_2.AuxInt
v.reset(OpARMSUBshiftLL)
v.AuxInt = c
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
return false
@@ -12761,8 +12099,7 @@ func rewriteValueARM_OpARMSUBshiftRAreg(v *Value) bool {
v.reset(OpARMRSBconst)
v.AuxInt = c
v0 := b.NewValue0(v.Pos, OpARMSRA, x.Type)
- v0.AddArg(x)
- v0.AddArg(y)
+ v0.AddArg2(x, y)
v.AddArg(v0)
return true
}
@@ -12777,8 +12114,7 @@ func rewriteValueARM_OpARMSUBshiftRAreg(v *Value) bool {
c := v_2.AuxInt
v.reset(OpARMSUBshiftRA)
v.AuxInt = c
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
return false
@@ -12854,8 +12190,7 @@ func rewriteValueARM_OpARMSUBshiftRLreg(v *Value) bool {
v.reset(OpARMRSBconst)
v.AuxInt = c
v0 := b.NewValue0(v.Pos, OpARMSRL, x.Type)
- v0.AddArg(x)
- v0.AddArg(y)
+ v0.AddArg2(x, y)
v.AddArg(v0)
return true
}
@@ -12870,8 +12205,7 @@ func rewriteValueARM_OpARMSUBshiftRLreg(v *Value) bool {
c := v_2.AuxInt
v.reset(OpARMSUBshiftRL)
v.AuxInt = c
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
return false
@@ -12907,8 +12241,7 @@ func rewriteValueARM_OpARMTEQ(v *Value) bool {
y := v_1.Args[0]
v.reset(OpARMTEQshiftLL)
v.AuxInt = c
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
break
@@ -12925,8 +12258,7 @@ func rewriteValueARM_OpARMTEQ(v *Value) bool {
y := v_1.Args[0]
v.reset(OpARMTEQshiftRL)
v.AuxInt = c
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
break
@@ -12943,8 +12275,7 @@ func rewriteValueARM_OpARMTEQ(v *Value) bool {
y := v_1.Args[0]
v.reset(OpARMTEQshiftRA)
v.AuxInt = c
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
break
@@ -12960,9 +12291,7 @@ func rewriteValueARM_OpARMTEQ(v *Value) bool {
z := v_1.Args[1]
y := v_1.Args[0]
v.reset(OpARMTEQshiftLLreg)
- v.AddArg(x)
- v.AddArg(y)
- v.AddArg(z)
+ v.AddArg3(x, y, z)
return true
}
break
@@ -12978,9 +12307,7 @@ func rewriteValueARM_OpARMTEQ(v *Value) bool {
z := v_1.Args[1]
y := v_1.Args[0]
v.reset(OpARMTEQshiftRLreg)
- v.AddArg(x)
- v.AddArg(y)
- v.AddArg(z)
+ v.AddArg3(x, y, z)
return true
}
break
@@ -12996,9 +12323,7 @@ func rewriteValueARM_OpARMTEQ(v *Value) bool {
z := v_1.Args[1]
y := v_1.Args[0]
v.reset(OpARMTEQshiftRAreg)
- v.AddArg(x)
- v.AddArg(y)
- v.AddArg(z)
+ v.AddArg3(x, y, z)
return true
}
break
@@ -13108,8 +12433,7 @@ func rewriteValueARM_OpARMTEQshiftLLreg(v *Value) bool {
v.reset(OpARMTEQconst)
v.AuxInt = c
v0 := b.NewValue0(v.Pos, OpARMSLL, x.Type)
- v0.AddArg(x)
- v0.AddArg(y)
+ v0.AddArg2(x, y)
v.AddArg(v0)
return true
}
@@ -13124,8 +12448,7 @@ func rewriteValueARM_OpARMTEQshiftLLreg(v *Value) bool {
c := v_2.AuxInt
v.reset(OpARMTEQshiftLL)
v.AuxInt = c
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
return false
@@ -13184,8 +12507,7 @@ func rewriteValueARM_OpARMTEQshiftRAreg(v *Value) bool {
v.reset(OpARMTEQconst)
v.AuxInt = c
v0 := b.NewValue0(v.Pos, OpARMSRA, x.Type)
- v0.AddArg(x)
- v0.AddArg(y)
+ v0.AddArg2(x, y)
v.AddArg(v0)
return true
}
@@ -13200,8 +12522,7 @@ func rewriteValueARM_OpARMTEQshiftRAreg(v *Value) bool {
c := v_2.AuxInt
v.reset(OpARMTEQshiftRA)
v.AuxInt = c
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
return false
@@ -13260,8 +12581,7 @@ func rewriteValueARM_OpARMTEQshiftRLreg(v *Value) bool {
v.reset(OpARMTEQconst)
v.AuxInt = c
v0 := b.NewValue0(v.Pos, OpARMSRL, x.Type)
- v0.AddArg(x)
- v0.AddArg(y)
+ v0.AddArg2(x, y)
v.AddArg(v0)
return true
}
@@ -13276,8 +12596,7 @@ func rewriteValueARM_OpARMTEQshiftRLreg(v *Value) bool {
c := v_2.AuxInt
v.reset(OpARMTEQshiftRL)
v.AuxInt = c
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
return false
@@ -13313,8 +12632,7 @@ func rewriteValueARM_OpARMTST(v *Value) bool {
y := v_1.Args[0]
v.reset(OpARMTSTshiftLL)
v.AuxInt = c
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
break
@@ -13331,8 +12649,7 @@ func rewriteValueARM_OpARMTST(v *Value) bool {
y := v_1.Args[0]
v.reset(OpARMTSTshiftRL)
v.AuxInt = c
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
break
@@ -13349,8 +12666,7 @@ func rewriteValueARM_OpARMTST(v *Value) bool {
y := v_1.Args[0]
v.reset(OpARMTSTshiftRA)
v.AuxInt = c
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
break
@@ -13366,9 +12682,7 @@ func rewriteValueARM_OpARMTST(v *Value) bool {
z := v_1.Args[1]
y := v_1.Args[0]
v.reset(OpARMTSTshiftLLreg)
- v.AddArg(x)
- v.AddArg(y)
- v.AddArg(z)
+ v.AddArg3(x, y, z)
return true
}
break
@@ -13384,9 +12698,7 @@ func rewriteValueARM_OpARMTST(v *Value) bool {
z := v_1.Args[1]
y := v_1.Args[0]
v.reset(OpARMTSTshiftRLreg)
- v.AddArg(x)
- v.AddArg(y)
- v.AddArg(z)
+ v.AddArg3(x, y, z)
return true
}
break
@@ -13402,9 +12714,7 @@ func rewriteValueARM_OpARMTST(v *Value) bool {
z := v_1.Args[1]
y := v_1.Args[0]
v.reset(OpARMTSTshiftRAreg)
- v.AddArg(x)
- v.AddArg(y)
- v.AddArg(z)
+ v.AddArg3(x, y, z)
return true
}
break
@@ -13514,8 +12824,7 @@ func rewriteValueARM_OpARMTSTshiftLLreg(v *Value) bool {
v.reset(OpARMTSTconst)
v.AuxInt = c
v0 := b.NewValue0(v.Pos, OpARMSLL, x.Type)
- v0.AddArg(x)
- v0.AddArg(y)
+ v0.AddArg2(x, y)
v.AddArg(v0)
return true
}
@@ -13530,8 +12839,7 @@ func rewriteValueARM_OpARMTSTshiftLLreg(v *Value) bool {
c := v_2.AuxInt
v.reset(OpARMTSTshiftLL)
v.AuxInt = c
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
return false
@@ -13590,8 +12898,7 @@ func rewriteValueARM_OpARMTSTshiftRAreg(v *Value) bool {
v.reset(OpARMTSTconst)
v.AuxInt = c
v0 := b.NewValue0(v.Pos, OpARMSRA, x.Type)
- v0.AddArg(x)
- v0.AddArg(y)
+ v0.AddArg2(x, y)
v.AddArg(v0)
return true
}
@@ -13606,8 +12913,7 @@ func rewriteValueARM_OpARMTSTshiftRAreg(v *Value) bool {
c := v_2.AuxInt
v.reset(OpARMTSTshiftRA)
v.AuxInt = c
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
return false
@@ -13666,8 +12972,7 @@ func rewriteValueARM_OpARMTSTshiftRLreg(v *Value) bool {
v.reset(OpARMTSTconst)
v.AuxInt = c
v0 := b.NewValue0(v.Pos, OpARMSRL, x.Type)
- v0.AddArg(x)
- v0.AddArg(y)
+ v0.AddArg2(x, y)
v.AddArg(v0)
return true
}
@@ -13682,8 +12987,7 @@ func rewriteValueARM_OpARMTSTshiftRLreg(v *Value) bool {
c := v_2.AuxInt
v.reset(OpARMTSTshiftRL)
v.AuxInt = c
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
return false
@@ -13719,8 +13023,7 @@ func rewriteValueARM_OpARMXOR(v *Value) bool {
y := v_1.Args[0]
v.reset(OpARMXORshiftLL)
v.AuxInt = c
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
break
@@ -13737,8 +13040,7 @@ func rewriteValueARM_OpARMXOR(v *Value) bool {
y := v_1.Args[0]
v.reset(OpARMXORshiftRL)
v.AuxInt = c
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
break
@@ -13755,8 +13057,7 @@ func rewriteValueARM_OpARMXOR(v *Value) bool {
y := v_1.Args[0]
v.reset(OpARMXORshiftRA)
v.AuxInt = c
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
break
@@ -13773,8 +13074,7 @@ func rewriteValueARM_OpARMXOR(v *Value) bool {
y := v_1.Args[0]
v.reset(OpARMXORshiftRR)
v.AuxInt = c
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
break
@@ -13790,9 +13090,7 @@ func rewriteValueARM_OpARMXOR(v *Value) bool {
z := v_1.Args[1]
y := v_1.Args[0]
v.reset(OpARMXORshiftLLreg)
- v.AddArg(x)
- v.AddArg(y)
- v.AddArg(z)
+ v.AddArg3(x, y, z)
return true
}
break
@@ -13808,9 +13106,7 @@ func rewriteValueARM_OpARMXOR(v *Value) bool {
z := v_1.Args[1]
y := v_1.Args[0]
v.reset(OpARMXORshiftRLreg)
- v.AddArg(x)
- v.AddArg(y)
- v.AddArg(z)
+ v.AddArg3(x, y, z)
return true
}
break
@@ -13826,9 +13122,7 @@ func rewriteValueARM_OpARMXOR(v *Value) bool {
z := v_1.Args[1]
y := v_1.Args[0]
v.reset(OpARMXORshiftRAreg)
- v.AddArg(x)
- v.AddArg(y)
- v.AddArg(z)
+ v.AddArg3(x, y, z)
return true
}
break
@@ -13855,9 +13149,7 @@ func rewriteValueARM_OpARMXORconst(v *Value) bool {
break
}
x := v_0
- v.reset(OpCopy)
- v.Type = x.Type
- v.AddArg(x)
+ v.copyOf(x)
return true
}
// match: (XORconst [c] (MOVWconst [d]))
@@ -14009,8 +13301,7 @@ func rewriteValueARM_OpARMXORshiftLLreg(v *Value) bool {
v.reset(OpARMXORconst)
v.AuxInt = c
v0 := b.NewValue0(v.Pos, OpARMSLL, x.Type)
- v0.AddArg(x)
- v0.AddArg(y)
+ v0.AddArg2(x, y)
v.AddArg(v0)
return true
}
@@ -14025,8 +13316,7 @@ func rewriteValueARM_OpARMXORshiftLLreg(v *Value) bool {
c := v_2.AuxInt
v.reset(OpARMXORshiftLL)
v.AuxInt = c
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
return false
@@ -14102,8 +13392,7 @@ func rewriteValueARM_OpARMXORshiftRAreg(v *Value) bool {
v.reset(OpARMXORconst)
v.AuxInt = c
v0 := b.NewValue0(v.Pos, OpARMSRA, x.Type)
- v0.AddArg(x)
- v0.AddArg(y)
+ v0.AddArg2(x, y)
v.AddArg(v0)
return true
}
@@ -14118,8 +13407,7 @@ func rewriteValueARM_OpARMXORshiftRAreg(v *Value) bool {
c := v_2.AuxInt
v.reset(OpARMXORshiftRA)
v.AuxInt = c
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
return false
@@ -14211,8 +13499,7 @@ func rewriteValueARM_OpARMXORshiftRLreg(v *Value) bool {
v.reset(OpARMXORconst)
v.AuxInt = c
v0 := b.NewValue0(v.Pos, OpARMSRL, x.Type)
- v0.AddArg(x)
- v0.AddArg(y)
+ v0.AddArg2(x, y)
v.AddArg(v0)
return true
}
@@ -14227,8 +13514,7 @@ func rewriteValueARM_OpARMXORshiftRLreg(v *Value) bool {
c := v_2.AuxInt
v.reset(OpARMXORshiftRL)
v.AuxInt = c
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg2(x, y)
return true
}
return false
@@ -14284,11 +13570,9 @@ func rewriteValueARM_OpAvg32u(v *Value) bool {
v0 := b.NewValue0(v.Pos, OpARMSRLconst, t)
v0.AuxInt = 1
v1 := b.NewValue0(v.Pos, OpARMSUB, t)
- v1.AddArg(x)
- v1.AddArg(y)
+ v1.AddArg2(x, y)
v0.AddArg(v1)
- v.AddArg(v0)
- v.AddArg(y)
+ v.AddArg2(v0, y)
return true
}
}
@@ -14327,18 +13611,16 @@ func rewriteValueARM_OpBswap32(v *Value) bool {
v1 := b.NewValue0(v.Pos, OpARMBICconst, t)
v1.AuxInt = 0xff0000
v2 := b.NewValue0(v.Pos, OpARMXOR, t)
- v2.AddArg(x)
v3 := b.NewValue0(v.Pos, OpARMSRRconst, t)
v3.AuxInt = 16
v3.AddArg(x)
- v2.AddArg(v3)
+ v2.AddArg2(x, v3)
v1.AddArg(v2)
v0.AddArg(v1)
- v.AddArg(v0)
v4 := b.NewValue0(v.Pos, OpARMSRRconst, t)
v4.AuxInt = 8
v4.AddArg(x)
- v.AddArg(v4)
+ v.AddArg2(v0, v4)
return true
}
// match: (Bswap32 x)
@@ -14386,14 +13668,13 @@ func rewriteValueARM_OpCtz16(v *Value) bool {
v3 := b.NewValue0(v.Pos, OpARMORconst, typ.UInt32)
v3.AuxInt = 0x10000
v3.AddArg(x)
- v2.AddArg(v3)
v4 := b.NewValue0(v.Pos, OpARMRSBconst, typ.UInt32)
v4.AuxInt = 0
v5 := b.NewValue0(v.Pos, OpARMORconst, typ.UInt32)
v5.AuxInt = 0x10000
v5.AddArg(x)
v4.AddArg(v5)
- v2.AddArg(v4)
+ v2.AddArg2(v3, v4)
v1.AddArg(v2)
v0.AddArg(v1)
v.AddArg(v0)
@@ -14438,11 +13719,10 @@ func rewriteValueARM_OpCtz32(v *Value) bool {
v1 := b.NewValue0(v.Pos, OpARMSUBconst, t)
v1.AuxInt = 1
v2 := b.NewValue0(v.Pos, OpARMAND, t)
- v2.AddArg(x)
v3 := b.NewValue0(v.Pos, OpARMRSBconst, t)
v3.AuxInt = 0
v3.AddArg(x)
- v2.AddArg(v3)
+ v2.AddArg2(x, v3)
v1.AddArg(v2)
v0.AddArg(v1)
v.AddArg(v0)
@@ -14488,14 +13768,13 @@ func rewriteValueARM_OpCtz8(v *Value) bool {
v3 := b.NewValue0(v.Pos, OpARMORconst, typ.UInt32)
v3.AuxInt = 0x100
v3.AddArg(x)
- v2.AddArg(v3)
v4 := b.NewValue0(v.Pos, OpARMRSBconst, typ.UInt32)
v4.AuxInt = 0
v5 := b.NewValue0(v.Pos, OpARMORconst, typ.UInt32)
v5.AuxInt = 0x100
v5.AddArg(x)
v4.AddArg(v5)
- v2.AddArg(v4)
+ v2.AddArg2(v3, v4)
v1.AddArg(v2)
v0.AddArg(v1)
v.AddArg(v0)
@@ -14535,10 +13814,9 @@ func rewriteValueARM_OpDiv16(v *Value) bool {
v.reset(OpDiv32)
v0 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32)
v0.AddArg(x)
- v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32)
v1.AddArg(y)
- v.AddArg(v1)
+ v.AddArg2(v0, v1)
return true
}
}
@@ -14555,10 +13833,9 @@ func rewriteValueARM_OpDiv16u(v *Value) bool {
v.reset(OpDiv32u)
v0 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
v0.AddArg(x)
- v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
v1.AddArg(y)
- v.AddArg(v1)
+ v.AddArg2(v0, v1)
return true
}
}
@@ -14578,41 +13855,32 @@ func rewriteValueARM_OpDiv32(v *Value) bool {
v2 := b.NewValue0(v.Pos, OpARMCALLudiv, types.NewTuple(typ.UInt32, typ.UInt32))
v3 := b.NewValue0(v.Pos, OpARMSUB, typ.UInt32)
v4 := b.NewValue0(v.Pos, OpARMXOR, typ.UInt32)
- v4.AddArg(x)
v5 := b.NewValue0(v.Pos, OpSignmask, typ.Int32)
v5.AddArg(x)
- v4.AddArg(v5)
- v3.AddArg(v4)
+ v4.AddArg2(x, v5)
v6 := b.NewValue0(v.Pos, OpSignmask, typ.Int32)
v6.AddArg(x)
- v3.AddArg(v6)
- v2.AddArg(v3)
+ v3.AddArg2(v4, v6)
v7 := b.NewValue0(v.Pos, OpARMSUB, typ.UInt32)
v8 := b.NewValue0(v.Pos, OpARMXOR, typ.UInt32)
- v8.AddArg(y)
v9 := b.NewValue0(v.Pos, OpSignmask, typ.Int32)
v9.AddArg(y)
- v8.AddArg(v9)
- v7.AddArg(v8)
+ v8.AddArg2(y, v9)
v10 := b.NewValue0(v.Pos, OpSignmask, typ.Int32)
v10.AddArg(y)
- v7.AddArg(v10)
- v2.AddArg(v7)
+ v7.AddArg2(v8, v10)
+ v2.AddArg2(v3, v7)
v1.AddArg(v2)
- v0.AddArg(v1)
v11 := b.NewValue0(v.Pos, OpSignmask, typ.Int32)
v12 := b.NewValue0(v.Pos, OpARMXOR, typ.UInt32)
- v12.AddArg(x)
- v12.AddArg(y)
+ v12.AddArg2(x, y)
v11.AddArg(v12)
- v0.AddArg(v11)
- v.AddArg(v0)
+ v0.AddArg2(v1, v11)
v13 := b.NewValue0(v.Pos, OpSignmask, typ.Int32)
v14 := b.NewValue0(v.Pos, OpARMXOR, typ.UInt32)
- v14.AddArg(x)
- v14.AddArg(y)
+ v14.AddArg2(x, y)
v13.AddArg(v14)
- v.AddArg(v13)
+ v.AddArg2(v0, v13)
return true
}
}
@@ -14629,8 +13897,7 @@ func rewriteValueARM_OpDiv32u(v *Value) bool {
v.reset(OpSelect0)
v.Type = typ.UInt32
v0 := b.NewValue0(v.Pos, OpARMCALLudiv, types.NewTuple(typ.UInt32, typ.UInt32))
- v0.AddArg(x)
- v0.AddArg(y)
+ v0.AddArg2(x, y)
v.AddArg(v0)
return true
}
@@ -14648,10 +13915,9 @@ func rewriteValueARM_OpDiv8(v *Value) bool {
v.reset(OpDiv32)
v0 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32)
v0.AddArg(x)
- v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32)
v1.AddArg(y)
- v.AddArg(v1)
+ v.AddArg2(v0, v1)
return true
}
}
@@ -14668,10 +13934,9 @@ func rewriteValueARM_OpDiv8u(v *Value) bool {
v.reset(OpDiv32u)
v0 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
v0.AddArg(x)
- v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
v1.AddArg(y)
- v.AddArg(v1)
+ v.AddArg2(v0, v1)
return true
}
}
@@ -14689,10 +13954,9 @@ func rewriteValueARM_OpEq16(v *Value) bool {
v0 := b.NewValue0(v.Pos, OpARMCMP, types.TypeFlags)
v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
v1.AddArg(x)
- v0.AddArg(v1)
v2 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
v2.AddArg(y)
- v0.AddArg(v2)
+ v0.AddArg2(v1, v2)
v.AddArg(v0)
return true
}
@@ -14708,8 +13972,7 @@ func rewriteValueARM_OpEq32(v *Value) bool {
y := v_1
v.reset(OpARMEqual)
v0 := b.NewValue0(v.Pos, OpARMCMP, types.TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
+ v0.AddArg2(x, y)
v.AddArg(v0)
return true
}
@@ -14725,8 +13988,7 @@ func rewriteValueARM_OpEq32F(v *Value) bool {
y := v_1
v.reset(OpARMEqual)
v0 := b.NewValue0(v.Pos, OpARMCMPF, types.TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
+ v0.AddArg2(x, y)
v.AddArg(v0)
return true
}
@@ -14742,8 +14004,7 @@ func rewriteValueARM_OpEq64F(v *Value) bool {
y := v_1
v.reset(OpARMEqual)
v0 := b.NewValue0(v.Pos, OpARMCMPD, types.TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
+ v0.AddArg2(x, y)
v.AddArg(v0)
return true
}
@@ -14762,10 +14023,9 @@ func rewriteValueARM_OpEq8(v *Value) bool {
v0 := b.NewValue0(v.Pos, OpARMCMP, types.TypeFlags)
v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
v1.AddArg(x)
- v0.AddArg(v1)
v2 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
v2.AddArg(y)
- v0.AddArg(v2)
+ v0.AddArg2(v1, v2)
v.AddArg(v0)
return true
}
@@ -14783,8 +14043,7 @@ func rewriteValueARM_OpEqB(v *Value) bool {
v.reset(OpARMXORconst)
v.AuxInt = 1
v0 := b.NewValue0(v.Pos, OpARMXOR, typ.Bool)
- v0.AddArg(x)
- v0.AddArg(y)
+ v0.AddArg2(x, y)
v.AddArg(v0)
return true
}
@@ -14800,8 +14059,7 @@ func rewriteValueARM_OpEqPtr(v *Value) bool {
y := v_1
v.reset(OpARMEqual)
v0 := b.NewValue0(v.Pos, OpARMCMP, types.TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
+ v0.AddArg2(x, y)
v.AddArg(v0)
return true
}
@@ -14817,9 +14075,7 @@ func rewriteValueARM_OpFMA(v *Value) bool {
y := v_1
z := v_2
v.reset(OpARMFMULAD)
- v.AddArg(z)
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg3(z, x, y)
return true
}
}
@@ -14834,8 +14090,7 @@ func rewriteValueARM_OpGeq32F(v *Value) bool {
y := v_1
v.reset(OpARMGreaterEqual)
v0 := b.NewValue0(v.Pos, OpARMCMPF, types.TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
+ v0.AddArg2(x, y)
v.AddArg(v0)
return true
}
@@ -14851,8 +14106,7 @@ func rewriteValueARM_OpGeq64F(v *Value) bool {
y := v_1
v.reset(OpARMGreaterEqual)
v0 := b.NewValue0(v.Pos, OpARMCMPD, types.TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
+ v0.AddArg2(x, y)
v.AddArg(v0)
return true
}
@@ -14868,8 +14122,7 @@ func rewriteValueARM_OpGreater32F(v *Value) bool {
y := v_1
v.reset(OpARMGreaterThan)
v0 := b.NewValue0(v.Pos, OpARMCMPF, types.TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
+ v0.AddArg2(x, y)
v.AddArg(v0)
return true
}
@@ -14885,8 +14138,7 @@ func rewriteValueARM_OpGreater64F(v *Value) bool {
y := v_1
v.reset(OpARMGreaterThan)
v0 := b.NewValue0(v.Pos, OpARMCMPD, types.TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
+ v0.AddArg2(x, y)
v.AddArg(v0)
return true
}
@@ -14902,8 +14154,7 @@ func rewriteValueARM_OpIsInBounds(v *Value) bool {
len := v_1
v.reset(OpARMLessThanU)
v0 := b.NewValue0(v.Pos, OpARMCMP, types.TypeFlags)
- v0.AddArg(idx)
- v0.AddArg(len)
+ v0.AddArg2(idx, len)
v.AddArg(v0)
return true
}
@@ -14934,8 +14185,7 @@ func rewriteValueARM_OpIsSliceInBounds(v *Value) bool {
len := v_1
v.reset(OpARMLessEqualU)
v0 := b.NewValue0(v.Pos, OpARMCMP, types.TypeFlags)
- v0.AddArg(idx)
- v0.AddArg(len)
+ v0.AddArg2(idx, len)
v.AddArg(v0)
return true
}
@@ -14954,10 +14204,9 @@ func rewriteValueARM_OpLeq16(v *Value) bool {
v0 := b.NewValue0(v.Pos, OpARMCMP, types.TypeFlags)
v1 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32)
v1.AddArg(x)
- v0.AddArg(v1)
v2 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32)
v2.AddArg(y)
- v0.AddArg(v2)
+ v0.AddArg2(v1, v2)
v.AddArg(v0)
return true
}
@@ -14976,10 +14225,9 @@ func rewriteValueARM_OpLeq16U(v *Value) bool {
v0 := b.NewValue0(v.Pos, OpARMCMP, types.TypeFlags)
v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
v1.AddArg(x)
- v0.AddArg(v1)
v2 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
v2.AddArg(y)
- v0.AddArg(v2)
+ v0.AddArg2(v1, v2)
v.AddArg(v0)
return true
}
@@ -14995,8 +14243,7 @@ func rewriteValueARM_OpLeq32(v *Value) bool {
y := v_1
v.reset(OpARMLessEqual)
v0 := b.NewValue0(v.Pos, OpARMCMP, types.TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
+ v0.AddArg2(x, y)
v.AddArg(v0)
return true
}
@@ -15012,8 +14259,7 @@ func rewriteValueARM_OpLeq32F(v *Value) bool {
y := v_1
v.reset(OpARMGreaterEqual)
v0 := b.NewValue0(v.Pos, OpARMCMPF, types.TypeFlags)
- v0.AddArg(y)
- v0.AddArg(x)
+ v0.AddArg2(y, x)
v.AddArg(v0)
return true
}
@@ -15029,8 +14275,7 @@ func rewriteValueARM_OpLeq32U(v *Value) bool {
y := v_1
v.reset(OpARMLessEqualU)
v0 := b.NewValue0(v.Pos, OpARMCMP, types.TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
+ v0.AddArg2(x, y)
v.AddArg(v0)
return true
}
@@ -15046,8 +14291,7 @@ func rewriteValueARM_OpLeq64F(v *Value) bool {
y := v_1
v.reset(OpARMGreaterEqual)
v0 := b.NewValue0(v.Pos, OpARMCMPD, types.TypeFlags)
- v0.AddArg(y)
- v0.AddArg(x)
+ v0.AddArg2(y, x)
v.AddArg(v0)
return true
}
@@ -15066,10 +14310,9 @@ func rewriteValueARM_OpLeq8(v *Value) bool {
v0 := b.NewValue0(v.Pos, OpARMCMP, types.TypeFlags)
v1 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32)
v1.AddArg(x)
- v0.AddArg(v1)
v2 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32)
v2.AddArg(y)
- v0.AddArg(v2)
+ v0.AddArg2(v1, v2)
v.AddArg(v0)
return true
}
@@ -15088,10 +14331,9 @@ func rewriteValueARM_OpLeq8U(v *Value) bool {
v0 := b.NewValue0(v.Pos, OpARMCMP, types.TypeFlags)
v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
v1.AddArg(x)
- v0.AddArg(v1)
v2 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
v2.AddArg(y)
- v0.AddArg(v2)
+ v0.AddArg2(v1, v2)
v.AddArg(v0)
return true
}
@@ -15110,10 +14352,9 @@ func rewriteValueARM_OpLess16(v *Value) bool {
v0 := b.NewValue0(v.Pos, OpARMCMP, types.TypeFlags)
v1 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32)
v1.AddArg(x)
- v0.AddArg(v1)
v2 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32)
v2.AddArg(y)
- v0.AddArg(v2)
+ v0.AddArg2(v1, v2)
v.AddArg(v0)
return true
}
@@ -15132,10 +14373,9 @@ func rewriteValueARM_OpLess16U(v *Value) bool {
v0 := b.NewValue0(v.Pos, OpARMCMP, types.TypeFlags)
v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
v1.AddArg(x)
- v0.AddArg(v1)
v2 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
v2.AddArg(y)
- v0.AddArg(v2)
+ v0.AddArg2(v1, v2)
v.AddArg(v0)
return true
}
@@ -15151,8 +14391,7 @@ func rewriteValueARM_OpLess32(v *Value) bool {
y := v_1
v.reset(OpARMLessThan)
v0 := b.NewValue0(v.Pos, OpARMCMP, types.TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
+ v0.AddArg2(x, y)
v.AddArg(v0)
return true
}
@@ -15168,8 +14407,7 @@ func rewriteValueARM_OpLess32F(v *Value) bool {
y := v_1
v.reset(OpARMGreaterThan)
v0 := b.NewValue0(v.Pos, OpARMCMPF, types.TypeFlags)
- v0.AddArg(y)
- v0.AddArg(x)
+ v0.AddArg2(y, x)
v.AddArg(v0)
return true
}
@@ -15185,8 +14423,7 @@ func rewriteValueARM_OpLess32U(v *Value) bool {
y := v_1
v.reset(OpARMLessThanU)
v0 := b.NewValue0(v.Pos, OpARMCMP, types.TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
+ v0.AddArg2(x, y)
v.AddArg(v0)
return true
}
@@ -15202,8 +14439,7 @@ func rewriteValueARM_OpLess64F(v *Value) bool {
y := v_1
v.reset(OpARMGreaterThan)
v0 := b.NewValue0(v.Pos, OpARMCMPD, types.TypeFlags)
- v0.AddArg(y)
- v0.AddArg(x)
+ v0.AddArg2(y, x)
v.AddArg(v0)
return true
}
@@ -15222,10 +14458,9 @@ func rewriteValueARM_OpLess8(v *Value) bool {
v0 := b.NewValue0(v.Pos, OpARMCMP, types.TypeFlags)
v1 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32)
v1.AddArg(x)
- v0.AddArg(v1)
v2 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32)
v2.AddArg(y)
- v0.AddArg(v2)
+ v0.AddArg2(v1, v2)
v.AddArg(v0)
return true
}
@@ -15244,10 +14479,9 @@ func rewriteValueARM_OpLess8U(v *Value) bool {
v0 := b.NewValue0(v.Pos, OpARMCMP, types.TypeFlags)
v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
v1.AddArg(x)
- v0.AddArg(v1)
v2 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
v2.AddArg(y)
- v0.AddArg(v2)
+ v0.AddArg2(v1, v2)
v.AddArg(v0)
return true
}
@@ -15266,8 +14500,7 @@ func rewriteValueARM_OpLoad(v *Value) bool {
break
}
v.reset(OpARMMOVBUload)
- v.AddArg(ptr)
- v.AddArg(mem)
+ v.AddArg2(ptr, mem)
return true
}
// match: (Load ptr mem)
@@ -15281,8 +14514,7 @@ func rewriteValueARM_OpLoad(v *Value) bool {
break
}
v.reset(OpARMMOVBload)
- v.AddArg(ptr)
- v.AddArg(mem)
+ v.AddArg2(ptr, mem)
return true
}
// match: (Load ptr mem)
@@ -15296,8 +14528,7 @@ func rewriteValueARM_OpLoad(v *Value) bool {
break
}
v.reset(OpARMMOVBUload)
- v.AddArg(ptr)
- v.AddArg(mem)
+ v.AddArg2(ptr, mem)
return true
}
// match: (Load ptr mem)
@@ -15311,8 +14542,7 @@ func rewriteValueARM_OpLoad(v *Value) bool {
break
}
v.reset(OpARMMOVHload)
- v.AddArg(ptr)
- v.AddArg(mem)
+ v.AddArg2(ptr, mem)
return true
}
// match: (Load