diff --git a/doc/articles/race_detector.html b/doc/articles/race_detector.html index 014411d948..63a658f870 100644 --- a/doc/articles/race_detector.html +++ b/doc/articles/race_detector.html @@ -379,6 +379,38 @@ func (w *Watchdog) Start() { } +

Unsynchronized send and close operations

+ +

+As this example demonstrates, unsynchronized send and close operations +on the same channel can also be a race condition: +

+ +
+c := make(chan struct{}) // or buffered channel
+
+// The race detector cannot derive the happens before relation
+// for the following send and close operations. These two operations
+// are unsynchronized and happen concurrently.
+go func() { c <- struct{}{} }()
+close(c)
+
+ +

+According to the Go memory model, a send on a channel happens before +the corresponding receive from that channel completes. To synchronize +send and close operations, use a receive operation that guarantees +the send is done before the close: +

+ +
+c := make(chan struct{}) // or buffered channel
+
+go func() { c <- struct{}{} }()
+<-c
+close(c)
+
+

Supported Systems

diff --git a/doc/go1.14.html b/doc/go1.14.html index 6e7c5dc9b6..eb35fa8cae 100644 --- a/doc/go1.14.html +++ b/doc/go1.14.html @@ -759,6 +759,19 @@ Do not send CLs removing the interior tags from such phrases. +

net/url
+
+

+ When parsing of a URL fails + (for example by Parse + or ParseRequestURI), + the resulting Error message + will now quote the unparsable URL. + This provides clearer structure and consistency with other parsing errors. +

+
+
+
os/signal

diff --git a/doc/go1.15.html b/doc/go1.15.html index 1eb159c318..b4319874c9 100644 --- a/doc/go1.15.html +++ b/doc/go1.15.html @@ -47,6 +47,14 @@ TODO TODO

+

go test

+ +

+ Changing the -timeout flag now invalidates cached test results. A + cached result for a test run with a long timeout will no longer count as + passing when go test is re-invoked with a short one. +

+

Flag parsing

@@ -92,6 +100,18 @@ TODO TODO

+
runtime
+
+

+ If panic is invoked with a value whose type is derived from any + of: bool, complex64, complex128, float32, float64, + int, int8, int16, int32, int64, string, + uint, uint8, uint16, uint32, uint64, uintptr, + then the value will be printed, instead of just its address. +

+
+
+
sync

diff --git a/doc/go_mem.html b/doc/go_mem.html index d355bebaed..5f1eb68af3 100644 --- a/doc/go_mem.html +++ b/doc/go_mem.html @@ -273,9 +273,7 @@ func f() { a = "hello, world" <-c } - -

 func main() {
 	go f()
 	c <- 0
diff --git a/misc/cgo/test/sigaltstack.go b/misc/cgo/test/sigaltstack.go
index 2c9b81ced7..7b3f4acbb7 100644
--- a/misc/cgo/test/sigaltstack.go
+++ b/misc/cgo/test/sigaltstack.go
@@ -14,15 +14,22 @@ package cgotest
 #include 
 #include 
 
+#ifdef _AIX
+// On AIX, SIGSTKSZ is too small to handle Go sighandler.
+#define CSIGSTKSZ 0x4000
+#else
+#define CSIGSTKSZ SIGSTKSZ
+#endif
+
 static stack_t oss;
-static char signalStack[SIGSTKSZ];
+static char signalStack[CSIGSTKSZ];
 
 static void changeSignalStack(void) {
 	stack_t ss;
 	memset(&ss, 0, sizeof ss);
 	ss.ss_sp = signalStack;
 	ss.ss_flags = 0;
-	ss.ss_size = SIGSTKSZ;
+	ss.ss_size = CSIGSTKSZ;
 	if (sigaltstack(&ss, &oss) < 0) {
 		perror("sigaltstack");
 		abort();
diff --git a/src/bytes/bytes.go b/src/bytes/bytes.go
index e872cc2050..e7931387aa 100644
--- a/src/bytes/bytes.go
+++ b/src/bytes/bytes.go
@@ -117,17 +117,17 @@ func LastIndex(s, sep []byte) int {
 		return -1
 	}
 	// Rabin-Karp search from the end of the string
-	hashss, pow := hashStrRev(sep)
+	hashss, pow := bytealg.HashStrRevBytes(sep)
 	last := len(s) - n
 	var h uint32
 	for i := len(s) - 1; i >= last; i-- {
-		h = h*primeRK + uint32(s[i])
+		h = h*bytealg.PrimeRK + uint32(s[i])
 	}
 	if h == hashss && Equal(s[last:], sep) {
 		return last
 	}
 	for i := last - 1; i >= 0; i-- {
-		h *= primeRK
+		h *= bytealg.PrimeRK
 		h += uint32(s[i])
 		h -= pow * uint32(s[i+n])
 		if h == hashss && Equal(s[i:i+n], sep) {
@@ -1068,7 +1068,7 @@ func Index(s, sep []byte) int {
 			// we should cutover at even larger average skips,
 			// because Equal becomes that much more expensive.
 			// This code does not take that effect into account.
-			j := indexRabinKarp(s[i:], sep)
+			j := bytealg.IndexRabinKarpBytes(s[i:], sep)
 			if j < 0 {
 				return -1
 			}
@@ -1077,63 +1077,3 @@ func Index(s, sep []byte) int {
 	}
 	return -1
 }
-
-func indexRabinKarp(s, sep []byte) int {
-	// Rabin-Karp search
-	hashsep, pow := hashStr(sep)
-	n := len(sep)
-	var h uint32
-	for i := 0; i < n; i++ {
-		h = h*primeRK + uint32(s[i])
-	}
-	if h == hashsep && Equal(s[:n], sep) {
-		return 0
-	}
-	for i := n; i < len(s); {
-		h *= primeRK
-		h += uint32(s[i])
-		h -= pow * uint32(s[i-n])
-		i++
-		if h == hashsep && Equal(s[i-n:i], sep) {
-			return i - n
-		}
-	}
-	return -1
-}
-
-// primeRK is the prime base used in Rabin-Karp algorithm.
-const primeRK = 16777619
-
-// hashStr returns the hash and the appropriate multiplicative
-// factor for use in Rabin-Karp algorithm.
-func hashStr(sep []byte) (uint32, uint32) {
-	hash := uint32(0)
-	for i := 0; i < len(sep); i++ {
-		hash = hash*primeRK + uint32(sep[i])
-	}
-	var pow, sq uint32 = 1, primeRK
-	for i := len(sep); i > 0; i >>= 1 {
-		if i&1 != 0 {
-			pow *= sq
-		}
-		sq *= sq
-	}
-	return hash, pow
-}
-
-// hashStrRev returns the hash of the reverse of sep and the
-// appropriate multiplicative factor for use in Rabin-Karp algorithm.
-func hashStrRev(sep []byte) (uint32, uint32) {
-	hash := uint32(0)
-	for i := len(sep) - 1; i >= 0; i-- {
-		hash = hash*primeRK + uint32(sep[i])
-	}
-	var pow, sq uint32 = 1, primeRK
-	for i := len(sep); i > 0; i >>= 1 {
-		if i&1 != 0 {
-			pow *= sq
-		}
-		sq *= sq
-	}
-	return hash, pow
-}
diff --git a/src/bytes/bytes_test.go b/src/bytes/bytes_test.go
index 2dbbb99f37..a208d4ed76 100644
--- a/src/bytes/bytes_test.go
+++ b/src/bytes/bytes_test.go
@@ -141,9 +141,10 @@ var indexTests = []BinOpTest{
 	{"barfoobarfooyyyzzzyyyzzzyyyzzzyyyxxxzzzyyy", "x", 33},
 	{"foofyfoobarfoobar", "y", 4},
 	{"oooooooooooooooooooooo", "r", -1},
-	// test fallback to Rabin-Karp.
 	{"oxoxoxoxoxoxoxoxoxoxoxoy", "oy", 22},
 	{"oxoxoxoxoxoxoxoxoxoxoxox", "oy", -1},
+	// test fallback to Rabin-Karp.
+	{"000000000000000000000000000000000000000000000000000000000000000000000001", "0000000000000000000000000000000000000000000000000000000000000000001", 5},
 }
 
 var lastIndexTests = []BinOpTest{
@@ -209,6 +210,27 @@ func runIndexTests(t *testing.T, f func(s, sep []byte) int, funcName string, tes
 			t.Errorf("%s(%q,%q) = %v; want %v", funcName, a, b, actual, test.i)
 		}
 	}
+	var allocTests = []struct {
+		a []byte
+		b []byte
+		i int
+	}{
+		// case for function Index.
+		{[]byte("000000000000000000000000000000000000000000000000000000000000000000000001"), []byte("0000000000000000000000000000000000000000000000000000000000000000001"), 5},
+		// case for function LastIndex.
+		{[]byte("000000000000000000000000000000000000000000000000000000000000000010000"), []byte("00000000000000000000000000000000000000000000000000000000000001"), 3},
+	}
+	allocs := testing.AllocsPerRun(100, func() {
+		if i := Index(allocTests[1].a, allocTests[1].b); i != allocTests[1].i {
+			t.Errorf("Index([]byte(%q), []byte(%q)) = %v; want %v", allocTests[1].a, allocTests[1].b, i, allocTests[1].i)
+		}
+		if i := LastIndex(allocTests[0].a, allocTests[0].b); i != allocTests[0].i {
+			t.Errorf("LastIndex([]byte(%q), []byte(%q)) = %v; want %v", allocTests[0].a, allocTests[0].b, i, allocTests[0].i)
+		}
+	})
+	if allocs != 0 {
+		t.Errorf("expected no allocations, got %f", allocs)
+	}
 }
 
 func runIndexAnyTests(t *testing.T, f func(s []byte, chars string) int, funcName string, testCases []BinOpTest) {
diff --git a/src/cmd/asm/internal/arch/arch.go b/src/cmd/asm/internal/arch/arch.go
index f090d12bed..d9ba6670e8 100644
--- a/src/cmd/asm/internal/arch/arch.go
+++ b/src/cmd/asm/internal/arch/arch.go
@@ -484,6 +484,9 @@ func archMips64(linkArch *obj.LinkArch) *Arch {
 	for i := mips.REG_FCR0; i <= mips.REG_FCR31; i++ {
 		register[obj.Rconv(i)] = int16(i)
 	}
+	for i := mips.REG_W0; i <= mips.REG_W31; i++ {
+		register[obj.Rconv(i)] = int16(i)
+	}
 	register["HI"] = mips.REG_HI
 	register["LO"] = mips.REG_LO
 	// Pseudo-registers.
@@ -501,6 +504,7 @@ func archMips64(linkArch *obj.LinkArch) *Arch {
 		"FCR": true,
 		"M":   true,
 		"R":   true,
+		"W":   true,
 	}
 
 	instructions := make(map[string]obj.As)
diff --git a/src/cmd/asm/internal/arch/mips.go b/src/cmd/asm/internal/arch/mips.go
index 79fb7cf02e..5d71f40fbe 100644
--- a/src/cmd/asm/internal/arch/mips.go
+++ b/src/cmd/asm/internal/arch/mips.go
@@ -63,6 +63,10 @@ func mipsRegisterNumber(name string, n int16) (int16, bool) {
 		if 0 <= n && n <= 31 {
 			return mips.REG_R0 + n, true
 		}
+	case "W":
+		if 0 <= n && n <= 31 {
+			return mips.REG_W0 + n, true
+		}
 	}
 	return 0, false
 }
diff --git a/src/cmd/asm/internal/asm/testdata/mips64.s b/src/cmd/asm/internal/asm/testdata/mips64.s
index 2a8c288d7b..21ab82f319 100644
--- a/src/cmd/asm/internal/asm/testdata/mips64.s
+++ b/src/cmd/asm/internal/asm/testdata/mips64.s
@@ -583,6 +583,39 @@ label4:
 	NEGV	R1, R2 // 0001102f
 	RET
 
+// MSA VMOVI
+	VMOVB	$511, W0   // 7b0ff807
+	VMOVH	$24, W23   // 7b20c5c7
+	VMOVW	$-24, W15  // 7b5f43c7
+	VMOVD	$-511, W31 // 7b700fc7
+
+	VMOVB	(R0), W8       // 78000220
+	VMOVB	511(R3), W0    // 79ff1820
+	VMOVB	-512(R12), W21 // 7a006560
+	VMOVH	(R24), W12     // 7800c321
+	VMOVH	110(R19), W8   // 78379a21
+	VMOVH	-70(R12), W3   // 7bdd60e1
+	VMOVW	(R3), W31      // 78001fe2
+	VMOVW	64(R20), W16   // 7810a422
+	VMOVW	-104(R17), W24 // 7be68e22
+	VMOVD	(R3), W2       // 780018a3
+	VMOVD	128(R23), W19  // 7810bce3
+	VMOVD	-256(R31), W0  // 7be0f823
+
+	VMOVB	W8, (R0)       // 78000224
+	VMOVB	W0, 511(R3)    // 79ff1824
+	VMOVB	W21, -512(R12) // 7a006564
+	VMOVH	W12, (R24)     // 7800c325
+	VMOVH	W8, 110(R19)   // 78379a25
+	VMOVH	W3, -70(R12)   // 7bdd60e5
+	VMOVW	W31, (R3)      // 78001fe6
+	VMOVW	W16, 64(R20)   // 7810a426
+	VMOVW	W24, -104(R17) // 7be68e26
+	VMOVD	W2, (R3)       // 780018a7
+	VMOVD	W19, 128(R23)  // 7810bce7
+	VMOVD	W0, -256(R31)  // 7be0f827
+	RET
+
 // END
 //
 //	LEND	comma // asm doesn't support the trailing comma.
diff --git a/src/cmd/compile/internal/gc/float_test.go b/src/cmd/compile/internal/gc/float_test.go
index c5c604003a..6ae363be22 100644
--- a/src/cmd/compile/internal/gc/float_test.go
+++ b/src/cmd/compile/internal/gc/float_test.go
@@ -483,6 +483,64 @@ func TestFloat32StoreToLoadConstantFold(t *testing.T) {
 	}
 }
 
+// Signaling NaN values as constants.
+const (
+	snan32bits uint32 = 0x7f800001
+	snan64bits uint64 = 0x7ff0000000000001
+)
+
+// Signaling NaNs as variables.
+var snan32bitsVar uint32 = snan32bits
+var snan64bitsVar uint64 = snan64bits
+
+func TestFloatSignalingNaN(t *testing.T) {
+	// Make sure we generate a signaling NaN from a constant properly.
+	// See issue 36400.
+	f32 := math.Float32frombits(snan32bits)
+	g32 := math.Float32frombits(snan32bitsVar)
+	x32 := math.Float32bits(f32)
+	y32 := math.Float32bits(g32)
+	if x32 != y32 {
+		t.Errorf("got %x, want %x (diff=%x)", x32, y32, x32^y32)
+	}
+
+	f64 := math.Float64frombits(snan64bits)
+	g64 := math.Float64frombits(snan64bitsVar)
+	x64 := math.Float64bits(f64)
+	y64 := math.Float64bits(g64)
+	if x64 != y64 {
+		t.Errorf("got %x, want %x (diff=%x)", x64, y64, x64^y64)
+	}
+}
+
+func TestFloatSignalingNaNConversion(t *testing.T) {
+	// Test to make sure when we convert a signaling NaN, we get a NaN.
+	// (Ideally we want a quiet NaN, but some platforms don't agree.)
+	// See issue 36399.
+	s32 := math.Float32frombits(snan32bitsVar)
+	if s32 == s32 {
+		t.Errorf("converting a NaN did not result in a NaN")
+	}
+	s64 := math.Float64frombits(snan64bitsVar)
+	if s64 == s64 {
+		t.Errorf("converting a NaN did not result in a NaN")
+	}
+}
+
+func TestFloatSignalingNaNConversionConst(t *testing.T) {
+	// Test to make sure when we convert a signaling NaN, it converts to a NaN.
+	// (Ideally we want a quiet NaN, but some platforms don't agree.)
+	// See issue 36399 and 36400.
+	s32 := math.Float32frombits(snan32bits)
+	if s32 == s32 {
+		t.Errorf("converting a NaN did not result in a NaN")
+	}
+	s64 := math.Float64frombits(snan64bits)
+	if s64 == s64 {
+		t.Errorf("converting a NaN did not result in a NaN")
+	}
+}
+
 var sinkFloat float64
 
 func BenchmarkMul2(b *testing.B) {
diff --git a/src/cmd/compile/internal/gc/go.go b/src/cmd/compile/internal/gc/go.go
index 50b866ca65..85c857c214 100644
--- a/src/cmd/compile/internal/gc/go.go
+++ b/src/cmd/compile/internal/gc/go.go
@@ -279,7 +279,7 @@ type Arch struct {
 var thearch Arch
 
 var (
-	staticbytes,
+	staticuint64s,
 	zerobase *Node
 
 	assertE2I,
diff --git a/src/cmd/compile/internal/gc/ssa.go b/src/cmd/compile/internal/gc/ssa.go
index a710f81dc5..34adeabae1 100644
--- a/src/cmd/compile/internal/gc/ssa.go
+++ b/src/cmd/compile/internal/gc/ssa.go
@@ -1274,6 +1274,16 @@ func (s *state) stmt(n *Node) {
 		s.assign(n.Left, r, deref, skip)
 
 	case OIF:
+		if Isconst(n.Left, CTBOOL) {
+			s.stmtList(n.Left.Ninit)
+			if n.Left.Bool() {
+				s.stmtList(n.Nbody)
+			} else {
+				s.stmtList(n.Rlist)
+			}
+			break
+		}
+
 		bEnd := s.f.NewBlock(ssa.BlockPlain)
 		var likely int8
 		if n.Likely() {
@@ -2203,7 +2213,7 @@ func (s *state) expr(n *Node) *ssa.Value {
 					conv = conv1
 				}
 			}
-			if thearch.LinkArch.Family == sys.ARM64 || thearch.LinkArch.Family == sys.Wasm || s.softFloat {
+			if thearch.LinkArch.Family == sys.ARM64 || thearch.LinkArch.Family == sys.Wasm || thearch.LinkArch.Family == sys.S390X || s.softFloat {
 				if conv1, ok1 := uint64fpConvOpToSSA[twoTypes{s.concreteEtype(ft), s.concreteEtype(tt)}]; ok1 {
 					conv = conv1
 				}
@@ -3269,7 +3279,7 @@ func init() {
 			}
 			return s.newValue2(ssa.OpMul64uover, types.NewTuple(types.Types[TUINT], types.Types[TUINT]), args[0], args[1])
 		},
-		sys.AMD64, sys.I386)
+		sys.AMD64, sys.I386, sys.MIPS64)
 	add("runtime", "KeepAlive",
 		func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
 			data := s.newValue1(ssa.OpIData, s.f.Config.Types.BytePtr, args[0])
diff --git a/src/cmd/compile/internal/gc/walk.go b/src/cmd/compile/internal/gc/walk.go
index 9298d7b783..14af03f58c 100644
--- a/src/cmd/compile/internal/gc/walk.go
+++ b/src/cmd/compile/internal/gc/walk.go
@@ -565,7 +565,6 @@ opswitch:
 
 		n.Right = walkexpr(n.Right, &ll)
 		n.Right = addinit(n.Right, ll.Slice())
-		n = walkinrange(n, init)
 
 	case OPRINT, OPRINTN:
 		n = walkprint(n, init)
@@ -838,10 +837,12 @@ opswitch:
 			break
 		}
 
-		if staticbytes == nil {
-			staticbytes = newname(Runtimepkg.Lookup("staticbytes"))
-			staticbytes.SetClass(PEXTERN)
-			staticbytes.Type = types.NewArray(types.Types[TUINT8], 256)
+		if staticuint64s == nil {
+			staticuint64s = newname(Runtimepkg.Lookup("staticuint64s"))
+			staticuint64s.SetClass(PEXTERN)
+			// The actual type is [256]uint64, but we use [256*8]uint8 so we can address
+			// individual bytes.
+			staticuint64s.Type = types.NewArray(types.Types[TUINT8], 256*8)
 			zerobase = newname(Runtimepkg.Lookup("zerobase"))
 			zerobase.SetClass(PEXTERN)
 			zerobase.Type = types.Types[TUINTPTR]
@@ -857,9 +858,16 @@ opswitch:
 			cheapexpr(n.Left, init) // Evaluate n.Left for side-effects. See issue 19246.
 			value = zerobase
 		case fromType.IsBoolean() || (fromType.Size() == 1 && fromType.IsInteger()):
-			// n.Left is a bool/byte. Use staticbytes[n.Left].
+			// n.Left is a bool/byte. Use staticuint64s[n.Left * 8] on little-endian
+			// and staticuint64s[n.Left * 8 + 7] on big-endian.
 			n.Left = cheapexpr(n.Left, init)
-			value = nod(OINDEX, staticbytes, byteindex(n.Left))
+			// byteindex widens n.Left so that the multiplication doesn't overflow.
+			index := nod(OLSH, byteindex(n.Left), nodintconst(3))
+			index.SetBounded(true)
+			if thearch.LinkArch.ByteOrder == binary.BigEndian {
+				index = nod(OADD, index, nodintconst(7))
+			}
+			value = nod(OINDEX, staticuint64s, index)
 			value.SetBounded(true)
 		case n.Left.Class() == PEXTERN && n.Left.Name != nil && n.Left.Name.Readonly():
 			// n.Left is a readonly global; use it directly.
@@ -2424,15 +2432,21 @@ func convnop(n *Node, t *types.Type) *Node {
 	return n
 }
 
-// byteindex converts n, which is byte-sized, to a uint8.
-// We cannot use conv, because we allow converting bool to uint8 here,
+// byteindex converts n, which is byte-sized, to an int used to index into an array.
+// We cannot use conv, because we allow converting bool to int here,
 // which is forbidden in user code.
 func byteindex(n *Node) *Node {
-	if types.Identical(n.Type, types.Types[TUINT8]) {
-		return n
+	// We cannot convert from bool to int directly.
+	// While converting from int8 to int is possible, it would yield
+	// the wrong result for negative values.
+	// Reinterpreting the value as an unsigned byte solves both cases.
+	if !types.Identical(n.Type, types.Types[TUINT8]) {
+		n = nod(OCONV, n, nil)
+		n.Type = types.Types[TUINT8]
+		n.SetTypecheck(1)
 	}
 	n = nod(OCONV, n, nil)
-	n.Type = types.Types[TUINT8]
+	n.Type = types.Types[TINT]
 	n.SetTypecheck(1)
 	return n
 }
@@ -3523,133 +3537,6 @@ func (n *Node) isIntOrdering() bool {
 	return n.Left.Type.IsInteger() && n.Right.Type.IsInteger()
 }
 
-// walkinrange optimizes integer-in-range checks, such as 4 <= x && x < 10.
-// n must be an OANDAND or OOROR node.
-// The result of walkinrange MUST be assigned back to n, e.g.
-// 	n.Left = walkinrange(n.Left)
-func walkinrange(n *Node, init *Nodes) *Node {
-	// We are looking for something equivalent to a opl b OP b opr c, where:
-	// * a, b, and c have integer type
-	// * b is side-effect-free
-	// * opl and opr are each < or ≤
-	// * OP is &&
-	l := n.Left
-	r := n.Right
-	if !l.isIntOrdering() || !r.isIntOrdering() {
-		return n
-	}
-
-	// Find b, if it exists, and rename appropriately.
-	// Input is: l.Left l.Op l.Right ANDAND/OROR r.Left r.Op r.Right
-	// Output is: a opl b(==x) ANDAND/OROR b(==x) opr c
-	a, opl, b := l.Left, l.Op, l.Right
-	x, opr, c := r.Left, r.Op, r.Right
-	for i := 0; ; i++ {
-		if samesafeexpr(b, x) {
-			break
-		}
-		if i == 3 {
-			// Tried all permutations and couldn't find an appropriate b == x.
-			return n
-		}
-		if i&1 == 0 {
-			a, opl, b = b, brrev(opl), a
-		} else {
-			x, opr, c = c, brrev(opr), x
-		}
-	}
-
-	// If n.Op is ||, apply de Morgan.
-	// Negate the internal ops now; we'll negate the top level op at the end.
-	// Henceforth assume &&.
-	negateResult := n.Op == OOROR
-	if negateResult {
-		opl = brcom(opl)
-		opr = brcom(opr)
-	}
-
-	cmpdir := func(o Op) int {
-		switch o {
-		case OLE, OLT:
-			return -1
-		case OGE, OGT:
-			return +1
-		}
-		Fatalf("walkinrange cmpdir %v", o)
-		return 0
-	}
-	if cmpdir(opl) != cmpdir(opr) {
-		// Not a range check; something like b < a && b < c.
-		return n
-	}
-
-	switch opl {
-	case OGE, OGT:
-		// We have something like a > b && b ≥ c.
-		// Switch and reverse ops and rename constants,
-		// to make it look like a ≤ b && b < c.
-		a, c = c, a
-		opl, opr = brrev(opr), brrev(opl)
-	}
-
-	// We must ensure that c-a is non-negative.
-	// For now, require a and c to be constants.
-	// In the future, we could also support a == 0 and c == len/cap(...).
-	// Unfortunately, by this point, most len/cap expressions have been
-	// stored into temporary variables.
-	if !Isconst(a, CTINT) || !Isconst(c, CTINT) {
-		return n
-	}
-
-	// Ensure that Int64() does not overflow on a and c (it'll happen
-	// for any const above 2**63; see issue #27143).
-	if !a.CanInt64() || !c.CanInt64() {
-		return n
-	}
-
-	if opl == OLT {
-		// We have a < b && ...
-		// We need a ≤ b && ... to safely use unsigned comparison tricks.
-		// If a is not the maximum constant for b's type,
-		// we can increment a and switch to ≤.
-		if a.Int64() >= maxintval[b.Type.Etype].Int64() {
-			return n
-		}
-		a = nodintconst(a.Int64() + 1)
-		opl = OLE
-	}
-
-	bound := c.Int64() - a.Int64()
-	if bound < 0 {
-		// Bad news. Something like 5 <= x && x < 3.
-		// Rare in practice, and we still need to generate side-effects,
-		// so just leave it alone.
-		return n
-	}
-
-	// We have a ≤ b && b < c (or a ≤ b && b ≤ c).
-	// This is equivalent to (a-a) ≤ (b-a) && (b-a) < (c-a),
-	// which is equivalent to 0 ≤ (b-a) && (b-a) < (c-a),
-	// which is equivalent to uint(b-a) < uint(c-a).
-	ut := b.Type.ToUnsigned()
-	lhs := conv(nod(OSUB, b, a), ut)
-	rhs := nodintconst(bound)
-	if negateResult {
-		// Negate top level.
-		opr = brcom(opr)
-	}
-	cmp := nod(opr, lhs, rhs)
-	cmp.Pos = n.Pos
-	cmp = addinit(cmp, l.Ninit.Slice())
-	cmp = addinit(cmp, r.Ninit.Slice())
-	// Typecheck the AST rooted at cmp...
-	cmp = typecheck(cmp, ctxExpr)
-	// ...but then reset cmp's type to match n's type.
-	cmp.Type = n.Type
-	cmp = walkexpr(cmp, init)
-	return cmp
-}
-
 // return 1 if integer n must be in range [0, max), 0 otherwise
 func bounded(n *Node, max int64) bool {
 	if n.Type == nil || !n.Type.IsInteger() {
diff --git a/src/cmd/compile/internal/ppc64/ssa.go b/src/cmd/compile/internal/ppc64/ssa.go
index 591f3666e7..ce30c9ae37 100644
--- a/src/cmd/compile/internal/ppc64/ssa.go
+++ b/src/cmd/compile/internal/ppc64/ssa.go
@@ -1328,7 +1328,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
 		v.Fatalf("Pseudo-op should not make it to codegen: %s ###\n", v.LongString())
 	case ssa.OpPPC64InvertFlags:
 		v.Fatalf("InvertFlags should never make it to codegen %v", v.LongString())
-	case ssa.OpPPC64FlagEQ, ssa.OpPPC64FlagLT, ssa.OpPPC64FlagGT:
+	case ssa.OpPPC64FlagEQ, ssa.OpPPC64FlagLT, ssa.OpPPC64FlagGT, ssa.OpPPC64FlagCarrySet, ssa.OpPPC64FlagCarryClear:
 		v.Fatalf("Flag* ops should never make it to codegen %v", v.LongString())
 	case ssa.OpClobber:
 		// TODO: implement for clobberdead experiment. Nop is ok for now.
diff --git a/src/cmd/compile/internal/riscv64/ssa.go b/src/cmd/compile/internal/riscv64/ssa.go
index 167c9a3411..3fece75b1b 100644
--- a/src/cmd/compile/internal/riscv64/ssa.go
+++ b/src/cmd/compile/internal/riscv64/ssa.go
@@ -314,6 +314,13 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
 		p.To.Type = obj.TYPE_MEM
 		p.To.Reg = v.Args[0].Reg()
 		gc.AddAux(&p.To, v)
+	case ssa.OpRISCV64MOVBstorezero, ssa.OpRISCV64MOVHstorezero, ssa.OpRISCV64MOVWstorezero, ssa.OpRISCV64MOVDstorezero:
+		p := s.Prog(v.Op.Asm())
+		p.From.Type = obj.TYPE_REG
+		p.From.Reg = riscv.REG_ZERO
+		p.To.Type = obj.TYPE_MEM
+		p.To.Reg = v.Args[0].Reg()
+		gc.AddAux(&p.To, v)
 	case ssa.OpRISCV64SEQZ, ssa.OpRISCV64SNEZ:
 		p := s.Prog(v.Op.Asm())
 		p.From.Type = obj.TYPE_REG
@@ -464,7 +471,7 @@ func ssaGenBlock(s *gc.SSAGenState, b, next *ssa.Block) {
 	case ssa.BlockRet:
 		s.Prog(obj.ARET)
 	case ssa.BlockRetJmp:
-		p := s.Prog(obj.AJMP)
+		p := s.Prog(obj.ARET)
 		p.To.Type = obj.TYPE_MEM
 		p.To.Name = obj.NAME_EXTERN
 		p.To.Sym = b.Aux.(*obj.LSym)
diff --git a/src/cmd/compile/internal/s390x/ssa.go b/src/cmd/compile/internal/s390x/ssa.go
index f1725bdda4..2de3ef4b35 100644
--- a/src/cmd/compile/internal/s390x/ssa.go
+++ b/src/cmd/compile/internal/s390x/ssa.go
@@ -498,6 +498,8 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
 		ssa.OpS390XLDGR, ssa.OpS390XLGDR,
 		ssa.OpS390XCEFBRA, ssa.OpS390XCDFBRA, ssa.OpS390XCEGBRA, ssa.OpS390XCDGBRA,
 		ssa.OpS390XCFEBRA, ssa.OpS390XCFDBRA, ssa.OpS390XCGEBRA, ssa.OpS390XCGDBRA,
+		ssa.OpS390XCELFBR, ssa.OpS390XCDLFBR, ssa.OpS390XCELGBR, ssa.OpS390XCDLGBR,
+		ssa.OpS390XCLFEBR, ssa.OpS390XCLFDBR, ssa.OpS390XCLGEBR, ssa.OpS390XCLGDBR,
 		ssa.OpS390XLDEBR, ssa.OpS390XLEDBR,
 		ssa.OpS390XFNEG, ssa.OpS390XFNEGS,
 		ssa.OpS390XLPDFR, ssa.OpS390XLNDFR:
diff --git a/src/cmd/compile/internal/ssa/block.go b/src/cmd/compile/internal/ssa/block.go
index eadd5700ba..205fcfc707 100644
--- a/src/cmd/compile/internal/ssa/block.go
+++ b/src/cmd/compile/internal/ssa/block.go
@@ -232,6 +232,34 @@ func (b *Block) Reset(kind BlockKind) {
 	b.AuxInt = 0
 }
 
+// resetWithControl resets b and adds control v.
+// It is equivalent to b.Reset(kind); b.AddControl(v),
+// except that it is one call instead of two and avoids a bounds check.
+// It is intended for use by rewrite rules, where this matters.
+func (b *Block) resetWithControl(kind BlockKind, v *Value) {
+	b.Kind = kind
+	b.ResetControls()
+	b.Aux = nil
+	b.AuxInt = 0
+	b.Controls[0] = v
+	v.Uses++
+}
+
+// resetWithControl2 resets b and adds controls v and w.
+// It is equivalent to b.Reset(kind); b.AddControl(v); b.AddControl(w),
+// except that it is one call instead of three and avoids two bounds checks.
+// It is intended for use by rewrite rules, where this matters.
+func (b *Block) resetWithControl2(kind BlockKind, v, w *Value) {
+	b.Kind = kind
+	b.ResetControls()
+	b.Aux = nil
+	b.AuxInt = 0
+	b.Controls[0] = v
+	b.Controls[1] = w
+	v.Uses++
+	w.Uses++
+}
+
 // AddEdgeTo adds an edge from block b to block c. Used during building of the
 // SSA graph; do not use on an already-completed SSA graph.
 func (b *Block) AddEdgeTo(c *Block) {
diff --git a/src/cmd/compile/internal/ssa/branchelim.go b/src/cmd/compile/internal/ssa/branchelim.go
index c7c3f8c15f..4f9fd8e22e 100644
--- a/src/cmd/compile/internal/ssa/branchelim.go
+++ b/src/cmd/compile/internal/ssa/branchelim.go
@@ -148,7 +148,7 @@ func elimIf(f *Func, loadAddr *sparseSet, dom *Block) bool {
 	// the number of useless instructions executed.
 	const maxfuseinsts = 2
 
-	if len(simple.Values) > maxfuseinsts || !allTrivial(simple) {
+	if len(simple.Values) > maxfuseinsts || !canSpeculativelyExecute(simple) {
 		return false
 	}
 
@@ -305,10 +305,10 @@ func elimIfElse(f *Func, loadAddr *sparseSet, b *Block) bool {
 		return false
 	}
 	yes, no := b.Succs[0].Block(), b.Succs[1].Block()
-	if !isLeafPlain(yes) || len(yes.Values) > 1 || !allTrivial(yes) {
+	if !isLeafPlain(yes) || len(yes.Values) > 1 || !canSpeculativelyExecute(yes) {
 		return false
 	}
-	if !isLeafPlain(no) || len(no.Values) > 1 || !allTrivial(no) {
+	if !isLeafPlain(no) || len(no.Values) > 1 || !canSpeculativelyExecute(no) {
 		return false
 	}
 	if b.Succs[0].Block().Succs[0].Block() != b.Succs[1].Block().Succs[0].Block() {
@@ -415,7 +415,15 @@ func shouldElimIfElse(no, yes, post *Block, arch string) bool {
 	}
 }
 
-func allTrivial(b *Block) bool {
+// canSpeculativelyExecute reports whether every value in the block can
+// be evaluated without causing any observable side effects (memory
+// accesses, panics and so on) except for execution time changes. It
+// also ensures that the block does not contain any phis which we can't
+// speculatively execute.
+// Warning: this function cannot currently detect values that represent
+// instructions the execution of which need to be guarded with CPU
+// hardware feature checks. See issue #34950.
+func canSpeculativelyExecute(b *Block) bool {
 	// don't fuse memory ops, Phi ops, divides (can panic),
 	// or anything else with side-effects
 	for _, v := range b.Values {
diff --git a/src/cmd/compile/internal/ssa/check.go b/src/cmd/compile/internal/ssa/check.go
index a6746805f7..4c694a03ac 100644
--- a/src/cmd/compile/internal/ssa/check.go
+++ b/src/cmd/compile/internal/ssa/check.go
@@ -141,15 +141,23 @@ func checkFunc(f *Func) {
 					f.Fatalf("bad int32 AuxInt value for %v", v)
 				}
 				canHaveAuxInt = true
-			case auxInt64, auxFloat64, auxARM64BitField:
+			case auxInt64, auxARM64BitField:
 				canHaveAuxInt = true
 			case auxInt128:
 				// AuxInt must be zero, so leave canHaveAuxInt set to false.
 			case auxFloat32:
 				canHaveAuxInt = true
+				if math.IsNaN(v.AuxFloat()) {
+					f.Fatalf("value %v has an AuxInt that encodes a NaN", v)
+				}
 				if !isExactFloat32(v.AuxFloat()) {
 					f.Fatalf("value %v has an AuxInt value that is not an exact float32", v)
 				}
+			case auxFloat64:
+				canHaveAuxInt = true
+				if math.IsNaN(v.AuxFloat()) {
+					f.Fatalf("value %v has an AuxInt that encodes a NaN", v)
+				}
 			case auxString, auxSym, auxTyp, auxArchSpecific:
 				canHaveAux = true
 			case auxSymOff, auxSymValAndOff, auxTypSize:
diff --git a/src/cmd/compile/internal/ssa/compile.go b/src/cmd/compile/internal/ssa/compile.go
index 8551c0a54b..2de4e133bf 100644
--- a/src/cmd/compile/internal/ssa/compile.go
+++ b/src/cmd/compile/internal/ssa/compile.go
@@ -35,7 +35,8 @@ func Compile(f *Func) {
 
 	var rnd *rand.Rand
 	if checkEnabled {
-		rnd = rand.New(rand.NewSource(int64(crc32.ChecksumIEEE(([]byte)(f.Name)))))
+		seed := int64(crc32.ChecksumIEEE(([]byte)(f.Name))) ^ int64(checkRandSeed)
+		rnd = rand.New(rand.NewSource(seed))
 	}
 
 	// hook to print function & phase if panic happens
@@ -199,7 +200,10 @@ func (p *pass) addDump(s string) {
 }
 
 // Run consistency checker between each phase
-var checkEnabled = false
+var (
+	checkEnabled  = false
+	checkRandSeed = 0
+)
 
 // Debug output
 var IntrinsicsDebug int
@@ -253,7 +257,7 @@ where:
 ` + phasenames + `
 
 -  is one of:
-    on, off, debug, mem, time, test, stats, dump
+    on, off, debug, mem, time, test, stats, dump, seed
 
 -  defaults to 1
 
@@ -271,6 +275,10 @@ Examples:
     -d=ssa/check/on
 enables checking after each phase
 
+	-d=ssa/check/seed=1234
+enables checking after each phase, using 1234 to seed the PRNG
+used for value order randomization
+
     -d=ssa/all/time
 enables time reporting for all phases
 
@@ -294,6 +302,12 @@ commas. For example:
 		debugPoset = checkEnabled
 		return ""
 	}
+	if phase == "check" && flag == "seed" {
+		checkEnabled = true
+		checkRandSeed = val
+		debugPoset = checkEnabled
+		return ""
+	}
 
 	alltime := false
 	allmem := false
@@ -414,7 +428,7 @@ var passes = [...]pass{
 	{name: "gcse deadcode", fn: deadcode, required: true}, // clean out after cse and phiopt
 	{name: "nilcheckelim", fn: nilcheckelim},
 	{name: "prove", fn: prove},
-	{name: "fuse plain", fn: fusePlain},
+	{name: "early fuse", fn: fuseEarly},
 	{name: "decompose builtin", fn: decomposeBuiltIn, required: true},
 	{name: "softfloat", fn: softfloat, required: true},
 	{name: "late opt", fn: opt, required: true}, // TODO: split required rules and optimizing rules
@@ -422,7 +436,7 @@ var passes = [...]pass{
 	{name: "generic deadcode", fn: deadcode, required: true}, // remove dead stores, which otherwise mess up store chain
 	{name: "check bce", fn: checkbce},
 	{name: "branchelim", fn: branchelim},
-	{name: "fuse", fn: fuseAll},
+	{name: "late fuse", fn: fuseLate},
 	{name: "dse", fn: dse},
 	{name: "writebarrier", fn: writebarrier, required: true}, // expand write barrier ops
 	{name: "insert resched checks", fn: insertLoopReschedChecks,
@@ -477,7 +491,7 @@ var passOrder = [...]constraint{
 	// allow deadcode to clean up after nilcheckelim
 	{"nilcheckelim", "generic deadcode"},
 	// nilcheckelim generates sequences of plain basic blocks
-	{"nilcheckelim", "fuse"},
+	{"nilcheckelim", "late fuse"},
 	// nilcheckelim relies on opt to rewrite user nil checks
 	{"opt", "nilcheckelim"},
 	// tighten will be most effective when as many values have been removed as possible
diff --git a/src/cmd/compile/internal/ssa/fuse.go b/src/cmd/compile/internal/ssa/fuse.go
index c2d4051da8..f80ec0dc5d 100644
--- a/src/cmd/compile/internal/ssa/fuse.go
+++ b/src/cmd/compile/internal/ssa/fuse.go
@@ -8,18 +8,18 @@ import (
 	"cmd/internal/src"
 )
 
-// fusePlain runs fuse(f, fuseTypePlain).
-func fusePlain(f *Func) { fuse(f, fuseTypePlain) }
+// fuseEarly runs fuse(f, fuseTypePlain|fuseTypeIntInRange).
+func fuseEarly(f *Func) { fuse(f, fuseTypePlain|fuseTypeIntInRange) }
 
-// fuseAll runs fuse(f, fuseTypeAll).
-func fuseAll(f *Func) { fuse(f, fuseTypeAll) }
+// fuseLate runs fuse(f, fuseTypePlain|fuseTypeIf).
+func fuseLate(f *Func) { fuse(f, fuseTypePlain|fuseTypeIf) }
 
 type fuseType uint8
 
 const (
 	fuseTypePlain fuseType = 1 << iota
 	fuseTypeIf
-	fuseTypeAll = fuseTypePlain | fuseTypeIf
+	fuseTypeIntInRange
 )
 
 // fuse simplifies control flow by joining basic blocks.
@@ -32,6 +32,9 @@ func fuse(f *Func, typ fuseType) {
 			if typ&fuseTypeIf != 0 {
 				changed = fuseBlockIf(b) || changed
 			}
+			if typ&fuseTypeIntInRange != 0 {
+				changed = fuseIntegerComparisons(b) || changed
+			}
 			if typ&fuseTypePlain != 0 {
 				changed = fuseBlockPlain(b) || changed
 			}
diff --git a/src/cmd/compile/internal/ssa/fuse_comparisons.go b/src/cmd/compile/internal/ssa/fuse_comparisons.go
new file mode 100644
index 0000000000..d843fc3fda
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/fuse_comparisons.go
@@ -0,0 +1,157 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa
+
+// fuseIntegerComparisons optimizes inequalities such as '1 <= x && x < 5',
+// which can be optimized to 'unsigned(x-1) < 4'.
+//
+// Look for branch structure like:
+//
+//   p
+//   |\
+//   | b
+//   |/ \
+//   s0 s1
+//
+// In our example, p has control '1 <= x', b has control 'x < 5',
+// and s0 and s1 are the if and else results of the comparison.
+//
+// This will be optimized into:
+//
+//   p
+//    \
+//     b
+//    / \
+//   s0 s1
+//
+// where b has the combined control value 'unsigned(x-1) < 4'.
+// Later passes will then fuse p and b.
+func fuseIntegerComparisons(b *Block) bool {
+	if len(b.Preds) != 1 {
+		return false
+	}
+	p := b.Preds[0].Block()
+	if b.Kind != BlockIf || p.Kind != BlockIf {
+		return false
+	}
+
+	// Don't merge control values if b is likely to be bypassed anyway.
+	if p.Likely == BranchLikely && p.Succs[0].Block() != b {
+		return false
+	}
+	if p.Likely == BranchUnlikely && p.Succs[1].Block() != b {
+		return false
+	}
+
+	// Check if the control values combine to make an integer inequality that
+	// can be further optimized later.
+	bc := b.Controls[0]
+	pc := p.Controls[0]
+	if !areMergeableInequalities(bc, pc) {
+		return false
+	}
+
+	// If the first (true) successors match then we have a disjunction (||).
+	// If the second (false) successors match then we have a conjunction (&&).
+	for i, op := range [2]Op{OpOrB, OpAndB} {
+		if p.Succs[i].Block() != b.Succs[i].Block() {
+			continue
+		}
+
+		// TODO(mundaym): should we also check the cost of executing b?
+		// Currently we might speculatively execute b even if b contains
+		// a lot of instructions. We could just check that len(b.Values)
+		// is lower than a fixed amount. Bear in mind however that the
+		// other optimization passes might yet reduce the cost of b
+		// significantly so we shouldn't be overly conservative.
+		if !canSpeculativelyExecute(b) {
+			return false
+		}
+
+		// Logically combine the control values for p and b.
+		v := b.NewValue0(bc.Pos, op, bc.Type)
+		v.AddArg(pc)
+		v.AddArg(bc)
+
+		// Set the combined control value as the control value for b.
+		b.SetControl(v)
+
+		// Modify p so that it jumps directly to b.
+		p.removeEdge(i)
+		p.Kind = BlockPlain
+		p.Likely = BranchUnknown
+		p.ResetControls()
+
+		return true
+	}
+
+	// TODO: could negate condition(s) to merge controls.
+	return false
+}
+
+// getConstIntArgIndex returns the index of the first argument that is a
+// constant integer or -1 if no such argument exists.
+func getConstIntArgIndex(v *Value) int {
+	for i, a := range v.Args {
+		switch a.Op {
+		case OpConst8, OpConst16, OpConst32, OpConst64:
+			return i
+		}
+	}
+	return -1
+}
+
+// isSignedInequality reports whether op represents the inequality < or ≤
+// in the signed domain.
+func isSignedInequality(v *Value) bool {
+	switch v.Op {
+	case OpLess64, OpLess32, OpLess16, OpLess8,
+		OpLeq64, OpLeq32, OpLeq16, OpLeq8:
+		return true
+	}
+	return false
+}
+
+// isUnsignedInequality reports whether op represents the inequality < or ≤
+// in the unsigned domain.
+func isUnsignedInequality(v *Value) bool {
+	switch v.Op {
+	case OpLess64U, OpLess32U, OpLess16U, OpLess8U,
+		OpLeq64U, OpLeq32U, OpLeq16U, OpLeq8U:
+		return true
+	}
+	return false
+}
+
+func areMergeableInequalities(x, y *Value) bool {
+	// We need both inequalities to be either in the signed or unsigned domain.
+	// TODO(mundaym): it would also be good to merge when we have an Eq op that
+	// could be transformed into a Less/Leq. For example in the unsigned
+	// domain 'x == 0 || 3 < x' is equivalent to 'x <= 0 || 3 < x'
+	inequalityChecks := [...]func(*Value) bool{
+		isSignedInequality,
+		isUnsignedInequality,
+	}
+	for _, f := range inequalityChecks {
+		if !f(x) || !f(y) {
+			continue
+		}
+
+		// Check that both inequalities are comparisons with constants.
+		xi := getConstIntArgIndex(x)
+		if xi < 0 {
+			return false
+		}
+		yi := getConstIntArgIndex(y)
+		if yi < 0 {
+			return false
+		}
+
+		// Check that the non-constant arguments to the inequalities
+		// are the same.
+		return x.Args[xi^1] == y.Args[yi^1]
+	}
+	return false
+}
diff --git a/src/cmd/compile/internal/ssa/fuse_test.go b/src/cmd/compile/internal/ssa/fuse_test.go
index 77d2aad5c1..5fe3da93ca 100644
--- a/src/cmd/compile/internal/ssa/fuse_test.go
+++ b/src/cmd/compile/internal/ssa/fuse_test.go
@@ -26,7 +26,7 @@ func TestFuseEliminatesOneBranch(t *testing.T) {
 			Exit("mem")))
 
 	CheckFunc(fun.f)
-	fuseAll(fun.f)
+	fuseLate(fun.f)
 
 	for _, b := range fun.f.Blocks {
 		if b == fun.blocks["then"] && b.Kind != BlockInvalid {
@@ -56,7 +56,7 @@ func TestFuseEliminatesBothBranches(t *testing.T) {
 			Exit("mem")))
 
 	CheckFunc(fun.f)
-	fuseAll(fun.f)
+	fuseLate(fun.f)
 
 	for _, b := range fun.f.Blocks {
 		if b == fun.blocks["then"] && b.Kind != BlockInvalid {
@@ -90,7 +90,7 @@ func TestFuseHandlesPhis(t *testing.T) {
 			Exit("mem")))
 
 	CheckFunc(fun.f)
-	fuseAll(fun.f)
+	fuseLate(fun.f)
 
 	for _, b := range fun.f.Blocks {
 		if b == fun.blocks["then"] && b.Kind != BlockInvalid {
@@ -122,7 +122,7 @@ func TestFuseEliminatesEmptyBlocks(t *testing.T) {
 		))
 
 	CheckFunc(fun.f)
-	fuseAll(fun.f)
+	fuseLate(fun.f)
 
 	for k, b := range fun.blocks {
 		if k[:1] == "z" && b.Kind != BlockInvalid {
@@ -153,7 +153,7 @@ func TestFuseSideEffects(t *testing.T) {
 			Goto("loop")))
 
 	CheckFunc(fun.f)
-	fuseAll(fun.f)
+	fuseLate(fun.f)
 
 	for _, b := range fun.f.Blocks {
 		if b == fun.blocks["then"] && b.Kind == BlockInvalid {
@@ -196,7 +196,7 @@ func BenchmarkFuse(b *testing.B) {
 			b.ResetTimer()
 			for i := 0; i < b.N; i++ {
 				fun := c.Fun("entry", blocks...)
-				fuseAll(fun.f)
+				fuseLate(fun.f)
 			}
 		})
 	}
diff --git a/src/cmd/compile/internal/ssa/gen/386.rules b/src/cmd/compile/internal/ssa/gen/386.rules
index 78916bebc3..64a6cbaf84 100644
--- a/src/cmd/compile/internal/ssa/gen/386.rules
+++ b/src/cmd/compile/internal/ssa/gen/386.rules
@@ -92,6 +92,8 @@
 (Round32F ...) -> (Copy ...)
 (Round64F ...) -> (Copy ...)
 
+(CvtBoolToUint8 ...) -> (Copy ...)
+
 // Lowering shifts
 // Unsigned shifts need to return 0 if shift amount is >= width of shifted value.
 //   result = (arg << shift) & (shift >= argbits ? 0 : 0xffffffffffffffff)
diff --git a/src/cmd/compile/internal/ssa/gen/AMD64.rules b/src/cmd/compile/internal/ssa/gen/AMD64.rules
index 4fd13a5056..c165fed485 100644
--- a/src/cmd/compile/internal/ssa/gen/AMD64.rules
+++ b/src/cmd/compile/internal/ssa/gen/AMD64.rules
@@ -154,6 +154,8 @@
 
 (Round(32|64)F ...) -> (Copy ...)
 
+(CvtBoolToUint8 ...) -> (Copy ...)
+
 // Lowering shifts
 // Unsigned shifts need to return 0 if shift amount is >= width of shifted value.
 //   result = (arg << shift) & (shift >= argbits ? 0 : 0xffffffffffffffff)
@@ -756,6 +758,7 @@
 (MULQconst [c] (MULQconst [d] x)) && is32Bit(c*d) -> (MULQconst [c * d] x)
 
 (ORQ x (MOVQconst [c])) && is32Bit(c) -> (ORQconst [c] x)
+(ORQ x (MOVLconst [c])) -> (ORQconst [c] x)
 (ORL x (MOVLconst [c])) -> (ORLconst [c] x)
 
 (XORQ x (MOVQconst [c])) && is32Bit(c) -> (XORQconst [c] x)
@@ -1305,6 +1308,15 @@
 (CMPBconst (MOVLconst [x]) [y]) && int8(x)>int8(y) && uint8(x) (FlagGT_ULT)
 (CMPBconst (MOVLconst [x]) [y]) && int8(x)>int8(y) && uint8(x)>uint8(y) -> (FlagGT_UGT)
 
+// CMPQconst requires a 32 bit const, but we can still constant-fold 64 bit consts.
+// In theory this applies to any of the simplifications above,
+// but CMPQ is the only one I've actually seen occur.
+(CMPQ (MOVQconst [x]) (MOVQconst [y])) && x==y -> (FlagEQ)
+(CMPQ (MOVQconst [x]) (MOVQconst [y])) && x (FlagLT_ULT)
+(CMPQ (MOVQconst [x]) (MOVQconst [y])) && xuint64(y) -> (FlagLT_UGT)
+(CMPQ (MOVQconst [x]) (MOVQconst [y])) && x>y && uint64(x) (FlagGT_ULT)
+(CMPQ (MOVQconst [x]) (MOVQconst [y])) && x>y && uint64(x)>uint64(y) -> (FlagGT_UGT)
+
 // Other known comparisons.
 (CMPQconst (MOVBQZX _) [c]) && 0xFF < c -> (FlagLT_ULT)
 (CMPQconst (MOVWQZX _) [c]) && 0xFFFF < c -> (FlagLT_ULT)
@@ -1478,6 +1490,12 @@
 (BTCQconst [c] (MOVQconst [d])) -> (MOVQconst [d^(1< (MOVLconst [d^(1< (MOVQconst [c|d])
+
 // generic simplifications
 // TODO: more of this
 (ADDQ x (NEGQ y)) -> (SUBQ x y)
@@ -1493,6 +1511,7 @@
 
 (SHLLconst [d] (MOVLconst [c])) -> (MOVLconst [int64(int32(c)) << uint64(d)])
 (SHLQconst [d] (MOVQconst [c])) -> (MOVQconst [c << uint64(d)])
+(SHLQconst [d] (MOVLconst [c])) -> (MOVQconst [int64(int32(c)) << uint64(d)])
 
 // Fold NEG into ADDconst/MULconst. Take care to keep c in 32 bit range.
 (NEGQ (ADDQconst [c] (NEGQ x))) && c != -(1<<31) -> (ADDQconst [-c] x)
diff --git a/src/cmd/compile/internal/ssa/gen/ARM.rules b/src/cmd/compile/internal/ssa/gen/ARM.rules
index 77e7b477c6..839d701b8c 100644
--- a/src/cmd/compile/internal/ssa/gen/ARM.rules
+++ b/src/cmd/compile/internal/ssa/gen/ARM.rules
@@ -207,6 +207,8 @@
 
 (Round(32|64)F ...) -> (Copy ...)
 
+(CvtBoolToUint8 ...) -> (Copy ...)
+
 // fused-multiply-add
 (FMA x y z) -> (FMULAD z x y)
 
diff --git a/src/cmd/compile/internal/ssa/gen/ARM64.rules b/src/cmd/compile/internal/ssa/gen/ARM64.rules
index 4e0ab3288d..61994a15a1 100644
--- a/src/cmd/compile/internal/ssa/gen/ARM64.rules
+++ b/src/cmd/compile/internal/ssa/gen/ARM64.rules
@@ -244,6 +244,8 @@
 (Cvt32Fto64F ...) -> (FCVTSD ...)
 (Cvt64Fto32F ...) -> (FCVTDS ...)
 
+(CvtBoolToUint8 ...) -> (Copy ...)
+
 (Round32F ...) -> (LoweredRound32F ...)
 (Round64F ...) -> (LoweredRound64F ...)
 
diff --git a/src/cmd/compile/internal/ssa/gen/MIPS.rules b/src/cmd/compile/internal/ssa/gen/MIPS.rules
index 228d5ee454..9ac8e5f471 100644
--- a/src/cmd/compile/internal/ssa/gen/MIPS.rules
+++ b/src/cmd/compile/internal/ssa/gen/MIPS.rules
@@ -170,6 +170,8 @@
 (Cvt32Fto64F ...) -> (MOVFD ...)
 (Cvt64Fto32F ...) -> (MOVDF ...)
 
+(CvtBoolToUint8 ...) -> (Copy ...)
+
 (Round(32|64)F ...) -> (Copy ...)
 
 // comparisons
diff --git a/src/cmd/compile/internal/ssa/gen/MIPS64.rules b/src/cmd/compile/internal/ssa/gen/MIPS64.rules
index 35c65023cd..be05dc71c0 100644
--- a/src/cmd/compile/internal/ssa/gen/MIPS64.rules
+++ b/src/cmd/compile/internal/ssa/gen/MIPS64.rules
@@ -11,6 +11,8 @@
 (Mul(64|32|16|8) x y) -> (Select1 (MULVU x y))
 (Mul(32|64)F ...) -> (MUL(F|D) ...)
 (Mul64uhilo ...) -> (MULVU ...)
+(Select0 (Mul64uover x y)) -> (Select1  (MULVU x y))
+(Select1 (Mul64uover x y)) -> (SGTU  (Select0  (MULVU x y)) (MOVVconst  [0]))
 
 (Hmul64 x y) -> (Select0 (MULV x y))
 (Hmul64u x y) -> (Select0 (MULVU x y))
@@ -171,6 +173,8 @@
 (Cvt32Fto64F ...) -> (MOVFD ...)
 (Cvt64Fto32F ...) -> (MOVDF ...)
 
+(CvtBoolToUint8 ...) -> (Copy ...)
+
 (Round(32|64)F ...) -> (Copy ...)
 
 // comparisons
diff --git a/src/cmd/compile/internal/ssa/gen/PPC64.rules b/src/cmd/compile/internal/ssa/gen/PPC64.rules
index d4ef49e20b..c53ec0fde1 100644
--- a/src/cmd/compile/internal/ssa/gen/PPC64.rules
+++ b/src/cmd/compile/internal/ssa/gen/PPC64.rules
@@ -59,6 +59,8 @@
 (Cvt32Fto64F ...) -> (Copy ...) // Note v will have the wrong type for patterns dependent on Float32/Float64
 (Cvt64Fto32F ...) -> (FRSP ...)
 
+(CvtBoolToUint8 ...) -> (Copy ...)
+
 (Round(32|64)F ...) -> (LoweredRound(32|64)F ...)
 
 (Sqrt ...) -> (FSQRT ...)
@@ -78,7 +80,7 @@
 
 // Constant folding
 (FABS (FMOVDconst [x])) -> (FMOVDconst [auxFrom64F(math.Abs(auxTo64F(x)))])
-(FSQRT (FMOVDconst [x])) -> (FMOVDconst [auxFrom64F(math.Sqrt(auxTo64F(x)))])
+(FSQRT (FMOVDconst [x])) && auxTo64F(x) >= 0 -> (FMOVDconst [auxFrom64F(math.Sqrt(auxTo64F(x)))])
 (FFLOOR (FMOVDconst [x])) -> (FMOVDconst [auxFrom64F(math.Floor(auxTo64F(x)))])
 (FCEIL (FMOVDconst [x])) -> (FMOVDconst [auxFrom64F(math.Ceil(auxTo64F(x)))])
 (FTRUNC (FMOVDconst [x])) -> (FMOVDconst [auxFrom64F(math.Trunc(auxTo64F(x)))])
@@ -116,47 +118,22 @@
 (ROTLW  x (MOVDconst [c])) -> (ROTLWconst  x [c&31])
 (ROTL   x (MOVDconst [c])) -> (ROTLconst   x [c&63])
 
-(Lsh64x64  x (Const64 [c])) && uint64(c) < 64 -> (SLDconst x [c])
-(Rsh64x64  x (Const64 [c])) && uint64(c) < 64 -> (SRADconst x [c])
-(Rsh64Ux64 x (Const64 [c])) && uint64(c) < 64 -> (SRDconst x [c])
-(Lsh32x64  x (Const64 [c])) && uint64(c) < 32 -> (SLWconst x [c])
-(Rsh32x64  x (Const64 [c])) && uint64(c) < 32 -> (SRAWconst x [c])
-(Rsh32Ux64 x (Const64 [c])) && uint64(c) < 32 -> (SRWconst x [c])
-(Lsh16x64  x (Const64 [c])) && uint64(c) < 16 -> (SLWconst x [c])
-(Rsh16x64  x (Const64 [c])) && uint64(c) < 16 -> (SRAWconst (SignExt16to32 x) [c])
-(Rsh16Ux64 x (Const64 [c])) && uint64(c) < 16 -> (SRWconst (ZeroExt16to32 x) [c])
-(Lsh8x64   x (Const64 [c])) && uint64(c) < 8  -> (SLWconst x [c])
-(Rsh8x64   x (Const64 [c])) && uint64(c) < 8  -> (SRAWconst (SignExt8to32  x) [c])
-(Rsh8Ux64  x (Const64 [c])) && uint64(c) < 8  -> (SRWconst (ZeroExt8to32  x) [c])
-
-(Lsh64x32  x (Const64 [c])) && uint32(c) < 64 -> (SLDconst x [c])
-(Rsh64x32  x (Const64 [c])) && uint32(c) < 64 -> (SRADconst x [c])
-(Rsh64Ux32 x (Const64 [c])) && uint32(c) < 64 -> (SRDconst x [c])
-(Lsh32x32  x (Const64 [c])) && uint32(c) < 32 -> (SLWconst x [c])
-(Rsh32x32  x (Const64 [c])) && uint32(c) < 32 -> (SRAWconst x [c])
-(Rsh32Ux32 x (Const64 [c])) && uint32(c) < 32 -> (SRWconst x [c])
-(Lsh16x32  x (Const64 [c])) && uint32(c) < 16 -> (SLWconst x [c])
-(Rsh16x32  x (Const64 [c])) && uint32(c) < 16 -> (SRAWconst (SignExt16to32 x) [c])
-(Rsh16Ux32 x (Const64 [c])) && uint32(c) < 16 -> (SRWconst (ZeroExt16to32 x) [c])
-(Lsh8x32   x (Const64 [c])) && uint32(c) < 8  -> (SLWconst x [c])
-(Rsh8x32   x (Const64 [c])) && uint32(c) < 8  -> (SRAWconst (SignExt8to32  x) [c])
-(Rsh8Ux32  x (Const64 [c])) && uint32(c) < 8  -> (SRWconst (ZeroExt8to32  x) [c])
 
 // large constant shifts
-(Lsh64x64  _ (Const64 [c])) && uint64(c) >= 64 -> (MOVDconst [0])
-(Rsh64Ux64 _ (Const64 [c])) && uint64(c) >= 64 -> (MOVDconst [0])
-(Lsh32x64  _ (Const64 [c])) && uint64(c) >= 32 -> (MOVDconst [0])
-(Rsh32Ux64 _ (Const64 [c])) && uint64(c) >= 32 -> (MOVDconst [0])
-(Lsh16x64  _ (Const64 [c])) && uint64(c) >= 16 -> (MOVDconst [0])
-(Rsh16Ux64 _ (Const64 [c])) && uint64(c) >= 16 -> (MOVDconst [0])
-(Lsh8x64   _ (Const64 [c])) && uint64(c) >= 8  -> (MOVDconst [0])
-(Rsh8Ux64  _ (Const64 [c])) && uint64(c) >= 8  -> (MOVDconst [0])
+(Lsh64x64  _ (MOVDconst [c])) && uint64(c) >= 64 -> (MOVDconst [0])
+(Rsh64Ux64 _ (MOVDconst [c])) && uint64(c) >= 64 -> (MOVDconst [0])
+(Lsh32x64  _ (MOVDconst [c])) && uint64(c) >= 32 -> (MOVDconst [0])
+(Rsh32Ux64 _ (MOVDconst [c])) && uint64(c) >= 32 -> (MOVDconst [0])
+(Lsh16x64  _ (MOVDconst [c])) && uint64(c) >= 16 -> (MOVDconst [0])
+(Rsh16Ux64 _ (MOVDconst [c])) && uint64(c) >= 16 -> (MOVDconst [0])
+(Lsh8x64   _ (MOVDconst [c])) && uint64(c) >= 8  -> (MOVDconst [0])
+(Rsh8Ux64  _ (MOVDconst [c])) && uint64(c) >= 8  -> (MOVDconst [0])
 
 // large constant signed right shift, we leave the sign bit
-(Rsh64x64 x (Const64 [c])) && uint64(c) >= 64 -> (SRADconst x [63])
-(Rsh32x64 x (Const64 [c])) && uint64(c) >= 32 -> (SRAWconst x [63])
-(Rsh16x64 x (Const64 [c])) && uint64(c) >= 16 -> (SRAWconst (SignExt16to32 x) [63])
-(Rsh8x64  x (Const64 [c])) && uint64(c) >= 8  -> (SRAWconst (SignExt8to32  x) [63])
+(Rsh64x64 x (MOVDconst [c])) && uint64(c) >= 64 -> (SRADconst x [63])
+(Rsh32x64 x (MOVDconst [c])) && uint64(c) >= 32 -> (SRAWconst x [63])
+(Rsh16x64 x (MOVDconst [c])) && uint64(c) >= 16 -> (SRAWconst (SignExt16to32 x) [63])
+(Rsh8x64  x (MOVDconst [c])) && uint64(c) >= 8  -> (SRAWconst (SignExt8to32  x) [63])
 
 // constant shifts
 (Lsh64x64  x (MOVDconst [c])) && uint64(c) < 64 -> (SLDconst x [c])
@@ -297,11 +274,13 @@
 (MaskIfNotCarry (ADDconstForCarry [c] (ANDconst [d] _))) && c < 0 && d > 0 && c + d < 0 -> (MOVDconst [-1])
 (ORN x (MOVDconst [-1])) -> x
 
-// Potentially useful optimizing rewrites.
-// (ADDconstForCarry [k] c), k < 0 && (c < 0 || k+c >= 0) -> CarrySet
-// (ADDconstForCarry [k] c), K < 0 && (c >= 0 && k+c < 0) -> CarryClear
-// (MaskIfNotCarry CarrySet) -> 0
-// (MaskIfNotCarry CarrySet) -> -1
+(ADDconstForCarry [c] (MOVDconst [d])) && int64(int16(c)) < 0 && (int64(int16(c)) <  0 || int64(int16(c)) + d >= 0) -> (FlagCarryClear)
+(ADDconstForCarry [c] (MOVDconst [d])) && int64(int16(c)) < 0 &&  int64(int16(c)) >= 0 && int64(int16(c)) + d <  0  -> (FlagCarrySet)
+
+(MaskIfNotCarry (FlagCarrySet)) -> (MOVDconst [0])
+(MaskIfNotCarry (FlagCarryClear)) -> (MOVDconst [-1])
+
+(S(RAD|RAW|RD|RW|LD|LW) x (MOVDconst [c])) -> (S(RAD|RAW|RD|RW|LD|LW)const [c] x)
 
 (Addr ...) -> (MOVDaddr ...)
 (LocalAddr {sym} base _) -> (MOVDaddr {sym} base)
@@ -662,6 +641,9 @@
 (AND (MOVDconst [c]) (MOVDconst [d])) -> (MOVDconst [c&d])
 (OR (MOVDconst [c]) (MOVDconst [d])) -> (MOVDconst [c|d])
 (XOR (MOVDconst [c]) (MOVDconst [d])) -> (MOVDconst [c^d])
+(ORN (MOVDconst [c]) (MOVDconst [d])) -> (MOVDconst [c|^d])
+(ANDN (MOVDconst [c]) (MOVDconst [d])) -> (MOVDconst [c&^d])
+(NOR (MOVDconst [c]) (MOVDconst [d])) -> (MOVDconst [^(c|d)])
 
 // Discover consts
 (AND x (MOVDconst [c])) && isU16Bit(c) -> (ANDconst [c] x)
diff --git a/src/cmd/compile/internal/ssa/gen/PPC64Ops.go b/src/cmd/compile/internal/ssa/gen/PPC64Ops.go
index 6660b921ef..d0a22c1f20 100644
--- a/src/cmd/compile/internal/ssa/gen/PPC64Ops.go
+++ b/src/cmd/compile/internal/ssa/gen/PPC64Ops.go
@@ -205,7 +205,7 @@ func init() {
 		{name: "ROTLW", argLength: 2, reg: gp21, asm: "ROTLW"}, // uint32(arg0) rotate left by arg1 mod 32
 
 		{name: "LoweredAdd64Carry", argLength: 3, reg: gp32, resultNotInArgs: true},                                                                     // arg0 + arg1 + carry, returns (sum, carry)
-		{name: "ADDconstForCarry", argLength: 1, reg: regInfo{inputs: []regMask{gp | sp | sb}, clobbers: tmp}, aux: "Int16", asm: "ADDC", typ: "Flags"}, // _, carry := arg0 + aux
+		{name: "ADDconstForCarry", argLength: 1, reg: regInfo{inputs: []regMask{gp | sp | sb}, clobbers: tmp}, aux: "Int16", asm: "ADDC", typ: "Flags"}, // _, carry := arg0 + auxint
 		{name: "MaskIfNotCarry", argLength: 1, reg: crgp, asm: "ADDME", typ: "Int64"},                                                                   // carry - 1 (if carry then 0 else -1)
 
 		{name: "SRADconst", argLength: 1, reg: gp11, asm: "SRAD", aux: "Int64"}, // arg0 >>a aux, 64 bits
@@ -588,10 +588,11 @@ func init() {
 
 		// These ops are for temporary use by rewrite rules. They
 		// cannot appear in the generated assembly.
-		{name: "FlagEQ"}, // equal
-		{name: "FlagLT"}, // signed < or unsigned <
-		{name: "FlagGT"}, // signed > or unsigned >
-
+		{name: "FlagEQ"},         // equal
+		{name: "FlagLT"},         // signed < or unsigned <
+		{name: "FlagGT"},         // signed > or unsigned >
+		{name: "FlagCarrySet"},   // carry flag set
+		{name: "FlagCarryClear"}, // carry flag clear
 	}
 
 	blocks := []blockData{
diff --git a/src/cmd/compile/internal/ssa/gen/RISCV64.rules b/src/cmd/compile/internal/ssa/gen/RISCV64.rules
index 720724647e..9b88b56871 100644
--- a/src/cmd/compile/internal/ssa/gen/RISCV64.rules
+++ b/src/cmd/compile/internal/ssa/gen/RISCV64.rules
@@ -131,6 +131,8 @@
 (Cvt32Fto64F ...) -> (FCVTDS ...)
 (Cvt64Fto32F ...) -> (FCVTSD ...)
 
+(CvtBoolToUint8 ...) -> (Copy ...)
+
 (Round32F ...) -> (Copy ...)
 (Round64F ...) -> (Copy ...)
 
@@ -325,6 +327,14 @@
 	(MOVWstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
 (MOVDstore [off1] {sym1} (MOVaddr [off2] {sym2} base) val mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
 	(MOVDstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+(MOVBstorezero [off1] {sym1} (MOVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) && is32Bit(off1+off2) ->
+	(MOVBstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+(MOVHstorezero [off1] {sym1} (MOVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) && is32Bit(off1+off2) ->
+	(MOVHstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+(MOVWstorezero [off1] {sym1} (MOVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) && is32Bit(off1+off2) ->
+	(MOVWstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+(MOVDstorezero [off1] {sym1} (MOVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) && is32Bit(off1+off2) ->
+	(MOVDstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
 
 (MOVBUload [off1] {sym} (ADDI [off2] base) mem) && is32Bit(off1+off2) ->
 	(MOVBUload [off1+off2] {sym} base mem)
@@ -349,6 +359,10 @@
 	(MOVWstore [off1+off2] {sym} base val mem)
 (MOVDstore [off1] {sym} (ADDI [off2] base) val mem) && is32Bit(off1+off2) ->
 	(MOVDstore [off1+off2] {sym} base val mem)
+(MOVBstorezero [off1] {sym} (ADDI [off2] ptr) mem) && is32Bit(off1+off2) -> (MOVBstorezero [off1+off2] {sym} ptr mem)
+(MOVHstorezero [off1] {sym} (ADDI [off2] ptr) mem) && is32Bit(off1+off2) -> (MOVHstorezero [off1+off2] {sym} ptr mem)
+(MOVWstorezero [off1] {sym} (ADDI [off2] ptr) mem) && is32Bit(off1+off2) -> (MOVWstorezero [off1+off2] {sym} ptr mem)
+(MOVDstorezero [off1] {sym} (ADDI [off2] ptr) mem) && is32Bit(off1+off2) -> (MOVDstorezero [off1+off2] {sym} ptr mem)
 
 // Similarly, fold ADDI into MOVaddr to avoid confusing live variable analysis
 // with OffPtr -> ADDI.
@@ -436,9 +450,6 @@
 (MOVDconst  [c]) && !is32Bit(c) && int32(c) <  0 -> (ADD (SLLI  [32] (MOVDconst [c>>32+1])) (MOVDconst [int64(int32(c))]))
 (MOVDconst  [c]) && !is32Bit(c) && int32(c) >= 0 -> (ADD (SLLI  [32] (MOVDconst [c>>32+0])) (MOVDconst [int64(int32(c))]))
 
-// Fold ADD+MOVDconst into ADDI where possible.
-(ADD (MOVDconst [off]) ptr) && is32Bit(off) -> (ADDI [off] ptr)
-
 (Addr ...) -> (MOVaddr ...)
 (LocalAddr {sym} base _) -> (MOVaddr {sym} base)
 
@@ -457,5 +468,34 @@
 (ClosureCall ...) -> (CALLclosure ...)
 (InterCall   ...) -> (CALLinter   ...)
 
+// Optimizations
+
+// Absorb SNEZ into branch.
+(BNE (SNEZ x) yes no) -> (BNE x yes no)
+
+// Store zero
+(MOVBstore [off] {sym} ptr (MOVBconst [0]) mem) -> (MOVBstorezero [off] {sym} ptr mem)
+(MOVHstore [off] {sym} ptr (MOVHconst [0]) mem) -> (MOVHstorezero [off] {sym} ptr mem)
+(MOVWstore [off] {sym} ptr (MOVWconst [0]) mem) -> (MOVWstorezero [off] {sym} ptr mem)
+(MOVDstore [off] {sym} ptr (MOVDconst [0]) mem) -> (MOVDstorezero [off] {sym} ptr mem)
+
+// Fold ADD+MOVDconst into ADDI where possible.
+(ADD (MOVDconst [off]) ptr) && is32Bit(off) -> (ADDI [off] ptr)
+
+// Convert subtraction of a const into ADDI with negative immediate, where possible.
+(SUB x (MOVBconst [val])) && is32Bit(-val) -> (ADDI [-val] x)
+(SUB x (MOVHconst [val])) && is32Bit(-val) -> (ADDI [-val] x)
+(SUB x (MOVWconst [val])) && is32Bit(-val) -> (ADDI [-val] x)
+(SUB x (MOVDconst [val])) && is32Bit(-val) -> (ADDI [-val] x)
+
+// Subtraction of zero.
+(SUB x (MOVBconst [0])) -> x
+(SUB x (MOVHconst [0])) -> x
+(SUB x (MOVWconst [0])) -> x
+(SUB x (MOVDconst [0])) -> x
+
+// Subtraction of zero with sign extension.
+(SUBW x (MOVWconst [0])) -> (ADDIW [0] x)
+
 // remove redundant *const ops
 (ADDI [0]  x) -> x
diff --git a/src/cmd/compile/internal/ssa/gen/RISCV64Ops.go b/src/cmd/compile/internal/ssa/gen/RISCV64Ops.go
index 7829f9a07c..28a91d559f 100644
--- a/src/cmd/compile/internal/ssa/gen/RISCV64Ops.go
+++ b/src/cmd/compile/internal/ssa/gen/RISCV64Ops.go
@@ -106,12 +106,13 @@ func init() {
 	callerSave := gpMask | fpMask | regNamed["g"]
 
 	var (
-		gpstore = regInfo{inputs: []regMask{gpspsbMask, gpspMask, 0}} // SB in first input so we can load from a global, but not in second to avoid using SB as a temporary register
-		gp01    = regInfo{outputs: []regMask{gpMask}}
-		gp11    = regInfo{inputs: []regMask{gpMask}, outputs: []regMask{gpMask}}
-		gp21    = regInfo{inputs: []regMask{gpMask, gpMask}, outputs: []regMask{gpMask}}
-		gpload  = regInfo{inputs: []regMask{gpspsbMask, 0}, outputs: []regMask{gpMask}}
-		gp11sb  = regInfo{inputs: []regMask{gpspsbMask}, outputs: []regMask{gpMask}}
+		gpstore  = regInfo{inputs: []regMask{gpspsbMask, gpspMask, 0}} // SB in first input so we can load from a global, but not in second to avoid using SB as a temporary register
+		gpstore0 = regInfo{inputs: []regMask{gpspsbMask}}
+		gp01     = regInfo{outputs: []regMask{gpMask}}
+		gp11     = regInfo{inputs: []regMask{gpMask}, outputs: []regMask{gpMask}}
+		gp21     = regInfo{inputs: []regMask{gpMask, gpMask}, outputs: []regMask{gpMask}}
+		gpload   = regInfo{inputs: []regMask{gpspsbMask, 0}, outputs: []regMask{gpMask}}
+		gp11sb   = regInfo{inputs: []regMask{gpspsbMask}, outputs: []regMask{gpMask}}
 
 		fp11    = regInfo{inputs: []regMask{fpMask}, outputs: []regMask{fpMask}}
 		fp21    = regInfo{inputs: []regMask{fpMask, fpMask}, outputs: []regMask{fpMask}}
@@ -171,6 +172,12 @@ func init() {
 		{name: "MOVWstore", argLength: 3, reg: gpstore, asm: "MOVW", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // 32 bits
 		{name: "MOVDstore", argLength: 3, reg: gpstore, asm: "MOV", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"},  // 64 bits
 
+		// Stores: store  of zero in arg0+auxint+aux; arg1=mem
+		{name: "MOVBstorezero", argLength: 2, reg: gpstore0, aux: "SymOff", asm: "MOVB", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, //  8 bits
+		{name: "MOVHstorezero", argLength: 2, reg: gpstore0, aux: "SymOff", asm: "MOVH", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // 16 bits
+		{name: "MOVWstorezero", argLength: 2, reg: gpstore0, aux: "SymOff", asm: "MOVW", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // 32 bits
+		{name: "MOVDstorezero", argLength: 2, reg: gpstore0, aux: "SymOff", asm: "MOV", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"},  // 64 bits
+
 		// Shift ops
 		{name: "SLL", argLength: 2, reg: gp21, asm: "SLL"},                 // arg0 << aux1
 		{name: "SRA", argLength: 2, reg: gp21, asm: "SRA"},                 // arg0 >> aux1, signed
diff --git a/src/cmd/compile/internal/ssa/gen/S390X.rules b/src/cmd/compile/internal/ssa/gen/S390X.rules
index 30a0249759..5cff8df3a4 100644
--- a/src/cmd/compile/internal/ssa/gen/S390X.rules
+++ b/src/cmd/compile/internal/ssa/gen/S390X.rules
@@ -227,9 +227,23 @@
 (Cvt64Fto32 ...) -> (CFDBRA ...)
 (Cvt64Fto64 ...) -> (CGDBRA ...)
 
+// Lowering float <-> uint
+(Cvt32Uto32F ...) -> (CELFBR ...)
+(Cvt32Uto64F ...) -> (CDLFBR ...)
+(Cvt64Uto32F ...) -> (CELGBR ...)
+(Cvt64Uto64F ...) -> (CDLGBR ...)
+
+(Cvt32Fto32U ...) -> (CLFEBR ...)
+(Cvt32Fto64U ...) -> (CLGEBR ...)
+(Cvt64Fto32U ...) -> (CLFDBR ...)
+(Cvt64Fto64U ...) -> (CLGDBR ...)
+
+// Lowering float32 <-> float64
 (Cvt32Fto64F ...) -> (LDEBR ...)
 (Cvt64Fto32F ...) -> (LEDBR ...)
 
+(CvtBoolToUint8 ...) -> (Copy ...)
+
 (Round(32|64)F ...) -> (LoweredRound(32|64)F ...)
 
 // Lowering shifts
diff --git a/src/cmd/compile/internal/ssa/gen/S390XOps.go b/src/cmd/compile/internal/ssa/gen/S390XOps.go
index 6517957fd4..819046d30c 100644
--- a/src/cmd/compile/internal/ssa/gen/S390XOps.go
+++ b/src/cmd/compile/internal/ssa/gen/S390XOps.go
@@ -401,8 +401,17 @@ func init() {
 		{name: "CDFBRA", argLength: 1, reg: gpfp, asm: "CDFBRA"}, // convert int32 to float64
 		{name: "CEGBRA", argLength: 1, reg: gpfp, asm: "CEGBRA"}, // convert int64 to float32
 		{name: "CDGBRA", argLength: 1, reg: gpfp, asm: "CDGBRA"}, // convert int64 to float64
-		{name: "LEDBR", argLength: 1, reg: fp11, asm: "LEDBR"},   // convert float64 to float32
-		{name: "LDEBR", argLength: 1, reg: fp11, asm: "LDEBR"},   // convert float32 to float64
+		{name: "CLFEBR", argLength: 1, reg: fpgp, asm: "CLFEBR"}, // convert float32 to uint32
+		{name: "CLFDBR", argLength: 1, reg: fpgp, asm: "CLFDBR"}, // convert float64 to uint32
+		{name: "CLGEBR", argLength: 1, reg: fpgp, asm: "CLGEBR"}, // convert float32 to uint64
+		{name: "CLGDBR", argLength: 1, reg: fpgp, asm: "CLGDBR"}, // convert float64 to uint64
+		{name: "CELFBR", argLength: 1, reg: gpfp, asm: "CELFBR"}, // convert uint32 to float32
+		{name: "CDLFBR", argLength: 1, reg: gpfp, asm: "CDLFBR"}, // convert uint32 to float64
+		{name: "CELGBR", argLength: 1, reg: gpfp, asm: "CELGBR"}, // convert uint64 to float32
+		{name: "CDLGBR", argLength: 1, reg: gpfp, asm: "CDLGBR"}, // convert uint64 to float64
+
+		{name: "LEDBR", argLength: 1, reg: fp11, asm: "LEDBR"}, // convert float64 to float32
+		{name: "LDEBR", argLength: 1, reg: fp11, asm: "LDEBR"}, // convert float32 to float64
 
 		{name: "MOVDaddr", argLength: 1, reg: addr, aux: "SymOff", rematerializeable: true, symEffect: "Read"}, // arg0 + auxint + offset encoded in aux
 		{name: "MOVDaddridx", argLength: 2, reg: addridx, aux: "SymOff", symEffect: "Read"},                    // arg0 + arg1 + auxint + aux
diff --git a/src/cmd/compile/internal/ssa/gen/Wasm.rules b/src/cmd/compile/internal/ssa/gen/Wasm.rules
index 010adcb095..bf2b904baf 100644
--- a/src/cmd/compile/internal/ssa/gen/Wasm.rules
+++ b/src/cmd/compile/internal/ssa/gen/Wasm.rules
@@ -91,6 +91,8 @@
 (Cvt32Fto64F ...) -> (F64PromoteF32 ...)
 (Cvt64Fto32F ...) -> (F32DemoteF64 ...)
 
+(CvtBoolToUint8 ...) -> (Copy ...)
+
 (Round32F ...) -> (Copy ...)
 (Round64F ...) -> (Copy ...)
 
@@ -355,7 +357,7 @@
 (I64Or  (I64Const [x]) (I64Const [y])) -> (I64Const [x | y])
 (I64Xor (I64Const [x]) (I64Const [y])) -> (I64Const [x ^ y])
 (F64Add (F64Const [x]) (F64Const [y])) -> (F64Const [auxFrom64F(auxTo64F(x) + auxTo64F(y))])
-(F64Mul (F64Const [x]) (F64Const [y])) -> (F64Const [auxFrom64F(auxTo64F(x) * auxTo64F(y))])
+(F64Mul (F64Const [x]) (F64Const [y])) && !math.IsNaN(auxTo64F(x) * auxTo64F(y)) -> (F64Const [auxFrom64F(auxTo64F(x) * auxTo64F(y))])
 (I64Eq  (I64Const [x]) (I64Const [y])) && x == y -> (I64Const [1])
 (I64Eq  (I64Const [x]) (I64Const [y])) && x != y -> (I64Const [0])
 (I64Ne  (I64Const [x]) (I64Const [y])) && x == y -> (I64Const [0])
@@ -365,15 +367,16 @@
 (I64ShrU (I64Const [x]) (I64Const [y])) -> (I64Const [int64(uint64(x) >> uint64(y))])
 (I64ShrS (I64Const [x]) (I64Const [y])) -> (I64Const [x >> uint64(y)])
 
-(I64Add (I64Const [x]) y) -> (I64Add y (I64Const [x]))
-(I64Mul (I64Const [x]) y) -> (I64Mul y (I64Const [x]))
-(I64And (I64Const [x]) y) -> (I64And y (I64Const [x]))
-(I64Or  (I64Const [x]) y) -> (I64Or  y (I64Const [x]))
-(I64Xor (I64Const [x]) y) -> (I64Xor y (I64Const [x]))
-(F64Add (F64Const [x]) y) -> (F64Add y (F64Const [x]))
-(F64Mul (F64Const [x]) y) -> (F64Mul y (F64Const [x]))
-(I64Eq  (I64Const [x]) y) -> (I64Eq y  (I64Const [x]))
-(I64Ne  (I64Const [x]) y) -> (I64Ne y  (I64Const [x]))
+// TODO: declare these operations as commutative and get rid of these rules?
+(I64Add (I64Const [x]) y) && y.Op != OpWasmI64Const -> (I64Add y (I64Const [x]))
+(I64Mul (I64Const [x]) y) && y.Op != OpWasmI64Const -> (I64Mul y (I64Const [x]))
+(I64And (I64Const [x]) y) && y.Op != OpWasmI64Const -> (I64And y (I64Const [x]))
+(I64Or  (I64Const [x]) y) && y.Op != OpWasmI64Const -> (I64Or  y (I64Const [x]))
+(I64Xor (I64Const [x]) y) && y.Op != OpWasmI64Const -> (I64Xor y (I64Const [x]))
+(F64Add (F64Const [x]) y) && y.Op != OpWasmF64Const -> (F64Add y (F64Const [x]))
+(F64Mul (F64Const [x]) y) && y.Op != OpWasmF64Const -> (F64Mul y (F64Const [x]))
+(I64Eq  (I64Const [x]) y) && y.Op != OpWasmI64Const -> (I64Eq y  (I64Const [x]))
+(I64Ne  (I64Const [x]) y) && y.Op != OpWasmI64Const -> (I64Ne y  (I64Const [x]))
 
 (I64Eq x (I64Const [0])) -> (I64Eqz x)
 (I64Ne x (I64Const [0])) -> (I64Eqz (I64Eqz x))
diff --git a/src/cmd/compile/internal/ssa/gen/generic.rules b/src/cmd/compile/internal/ssa/gen/generic.rules
index f4d487176b..8ec22d86e7 100644
--- a/src/cmd/compile/internal/ssa/gen/generic.rules
+++ b/src/cmd/compile/internal/ssa/gen/generic.rules
@@ -56,6 +56,7 @@
 (Cvt64Fto64  (Const64F [c])) -> (Const64  [int64(auxTo64F(c))])
 (Round32F x:(Const32F)) -> x
 (Round64F x:(Const64F)) -> x
+(CvtBoolToUint8 (ConstBool [c])) -> (Const8 [c])
 
 (Trunc16to8  (ZeroExt8to16  x)) -> x
 (Trunc32to8  (ZeroExt8to32  x)) -> x
@@ -118,8 +119,8 @@
 (Mul16  (Const16 [c])  (Const16 [d]))  -> (Const16 [int64(int16(c*d))])
 (Mul32  (Const32 [c])  (Const32 [d]))  -> (Const32 [int64(int32(c*d))])
 (Mul64  (Const64 [c])  (Const64 [d]))  -> (Const64 [c*d])
-(Mul32F (Const32F [c]) (Const32F [d])) -> (Const32F [auxFrom32F(auxTo32F(c) * auxTo32F(d))])
-(Mul64F (Const64F [c]) (Const64F [d])) -> (Const64F [auxFrom64F(auxTo64F(c) * auxTo64F(d))])
+(Mul32F (Const32F [c]) (Const32F [d])) && !math.IsNaN(float64(auxTo32F(c) * auxTo32F(d))) -> (Const32F [auxFrom32F(auxTo32F(c) * auxTo32F(d))])
+(Mul64F (Const64F [c]) (Const64F [d])) && !math.IsNaN(auxTo64F(c) * auxTo64F(d)) -> (Const64F [auxFrom64F(auxTo64F(c) * auxTo64F(d))])
 
 (And8   (Const8 [c])   (Const8 [d]))   -> (Const8  [int64(int8(c&d))])
 (And16  (Const16 [c])  (Const16 [d]))  -> (Const16 [int64(int16(c&d))])
@@ -144,8 +145,8 @@
 (Div16u (Const16 [c])  (Const16 [d])) && d != 0 -> (Const16 [int64(int16(uint16(c)/uint16(d)))])
 (Div32u (Const32 [c])  (Const32 [d])) && d != 0 -> (Const32 [int64(int32(uint32(c)/uint32(d)))])
 (Div64u (Const64 [c])  (Const64 [d])) && d != 0 -> (Const64 [int64(uint64(c)/uint64(d))])
-(Div32F (Const32F [c]) (Const32F [d])) -> (Const32F [auxFrom32F(auxTo32F(c) / auxTo32F(d))])
-(Div64F (Const64F [c]) (Const64F [d])) -> (Const64F [auxFrom64F(auxTo64F(c) / auxTo64F(d))])
+(Div32F (Const32F [c]) (Const32F [d])) && !math.IsNaN(float64(auxTo32F(c) / auxTo32F(d))) -> (Const32F [auxFrom32F(auxTo32F(c) / auxTo32F(d))])
+(Div64F (Const64F [c]) (Const64F [d])) && !math.IsNaN(auxTo64F(c) / auxTo64F(d)) -> (Const64F [auxFrom64F(auxTo64F(c) / auxTo64F(d))])
 (Select0 (Div128u (Const64 [0]) lo y)) -> (Div64u lo y)
 (Select1 (Div128u (Const64 [0]) lo y)) -> (Mod64u lo y)
 
@@ -253,6 +254,54 @@
 (Neq16 (Const16  [c]) (Add16 (Const16  [d]) x)) -> (Neq16 (Const16  [int64(int16(c-d))]) x)
 (Neq8  (Const8   [c]) (Add8  (Const8   [d]) x)) -> (Neq8 (Const8  [int64(int8(c-d))]) x)
 
+// signed integer range: ( c <= x && x (<|<=) d ) -> ( unsigned(x-c) (<|<=) unsigned(d-c) )
+(AndB (Leq64 (Const64 [c]) x) ((Less|Leq)64 x (Const64 [d]))) && d >= c -> ((Less|Leq)64U (Sub64  x (Const64  [c])) (Const64  [d-c]))
+(AndB (Leq32 (Const32 [c]) x) ((Less|Leq)32 x (Const32 [d]))) && d >= c -> ((Less|Leq)32U (Sub32  x (Const32  [c])) (Const32  [d-c]))
+(AndB (Leq16 (Const16 [c]) x) ((Less|Leq)16 x (Const16 [d]))) && d >= c -> ((Less|Leq)16U (Sub16  x (Const16  [c])) (Const16  [d-c]))
+(AndB (Leq8  (Const8  [c]) x) ((Less|Leq)8  x (Const8  [d]))) && d >= c -> ((Less|Leq)8U  (Sub8   x (Const8   [c])) (Const8   [d-c]))
+
+// signed integer range: ( c < x && x (<|<=) d ) -> ( unsigned(x-(c+1)) (<|<=) unsigned(d-(c+1)) )
+(AndB (Less64 (Const64 [c]) x) ((Less|Leq)64 x (Const64 [d]))) && d >= c+1 && int64(c+1) > int64(c) -> ((Less|Leq)64U (Sub64  x (Const64  [c+1])) (Const64  [d-c-1]))
+(AndB (Less32 (Const32 [c]) x) ((Less|Leq)32 x (Const32 [d]))) && d >= c+1 && int32(c+1) > int32(c) -> ((Less|Leq)32U (Sub32  x (Const32  [c+1])) (Const32  [d-c-1]))
+(AndB (Less16 (Const16 [c]) x) ((Less|Leq)16 x (Const16 [d]))) && d >= c+1 && int16(c+1) > int16(c) -> ((Less|Leq)16U (Sub16  x (Const16  [c+1])) (Const16  [d-c-1]))
+(AndB (Less8  (Const8  [c]) x) ((Less|Leq)8  x (Const8  [d]))) && d >= c+1 && int8(c+1)  > int8(c)  -> ((Less|Leq)8U  (Sub8   x (Const8   [c+1])) (Const8   [d-c-1]))
+
+// unsigned integer range: ( c <= x && x (<|<=) d ) -> ( x-c (<|<=) d-c )
+(AndB (Leq64U (Const64 [c]) x) ((Less|Leq)64U x (Const64 [d]))) && uint64(d) >= uint64(c) -> ((Less|Leq)64U (Sub64  x (Const64  [c])) (Const64  [d-c]))
+(AndB (Leq32U (Const32 [c]) x) ((Less|Leq)32U x (Const32 [d]))) && uint32(d) >= uint32(c) -> ((Less|Leq)32U (Sub32  x (Const32  [c])) (Const32  [int64(int32(d-c))]))
+(AndB (Leq16U (Const16 [c]) x) ((Less|Leq)16U x (Const16 [d]))) && uint16(d) >= uint16(c) -> ((Less|Leq)16U (Sub16  x (Const16  [c])) (Const16  [int64(int16(d-c))]))
+(AndB (Leq8U  (Const8  [c]) x) ((Less|Leq)8U  x (Const8  [d]))) && uint8(d)  >= uint8(c)  -> ((Less|Leq)8U  (Sub8   x (Const8   [c])) (Const8   [int64(int8(d-c))]))
+
+// unsigned integer range: ( c < x && x (<|<=) d ) -> ( x-(c+1) (<|<=) d-(c+1) )
+(AndB (Less64U (Const64 [c]) x) ((Less|Leq)64U x (Const64 [d]))) && uint64(d) >= uint64(c+1) && uint64(c+1) > uint64(c) -> ((Less|Leq)64U (Sub64  x (Const64  [c+1])) (Const64  [d-c-1]))
+(AndB (Less32U (Const32 [c]) x) ((Less|Leq)32U x (Const32 [d]))) && uint32(d) >= uint32(c+1) && uint32(c+1) > uint32(c) -> ((Less|Leq)32U (Sub32  x (Const32  [int64(int32(c+1))])) (Const32  [int64(int32(d-c-1))]))
+(AndB (Less16U (Const16 [c]) x) ((Less|Leq)16U x (Const16 [d]))) && uint16(d) >= uint16(c+1) && uint16(c+1) > uint16(c) -> ((Less|Leq)16U (Sub16  x (Const16  [int64(int16(c+1))])) (Const16  [int64(int16(d-c-1))]))
+(AndB (Less8U  (Const8  [c]) x) ((Less|Leq)8U  x (Const8  [d]))) && uint8(d)  >= uint8(c+1)  && uint8(c+1)  > uint8(c)  -> ((Less|Leq)8U  (Sub8   x (Const8   [int64(int8(c+1))]))  (Const8   [int64(int8(d-c-1))]))
+
+// signed integer range: ( c (<|<=) x || x < d ) -> ( unsigned(c-d) (<|<=) unsigned(x-d) )
+(OrB ((Less|Leq)64 (Const64 [c]) x) (Less64 x (Const64 [d]))) && c >= d -> ((Less|Leq)64U (Const64  [c-d]) (Sub64  x (Const64  [d])))
+(OrB ((Less|Leq)32 (Const32 [c]) x) (Less32 x (Const32 [d]))) && c >= d -> ((Less|Leq)32U (Const32  [c-d]) (Sub32  x (Const32  [d])))
+(OrB ((Less|Leq)16 (Const16 [c]) x) (Less16 x (Const16 [d]))) && c >= d -> ((Less|Leq)16U (Const16  [c-d]) (Sub16  x (Const16  [d])))
+(OrB ((Less|Leq)8  (Const8  [c]) x) (Less8  x (Const8  [d]))) && c >= d -> ((Less|Leq)8U  (Const8   [c-d]) (Sub8   x (Const8   [d])))
+
+// signed integer range: ( c (<|<=) x || x <= d ) -> ( unsigned(c-(d+1)) (<|<=) unsigned(x-(d+1)) )
+(OrB ((Less|Leq)64 (Const64 [c]) x) (Leq64 x (Const64 [d]))) && c >= d+1 && int64(d+1) > int64(d) -> ((Less|Leq)64U (Const64  [c-d-1]) (Sub64  x (Const64  [d+1])))
+(OrB ((Less|Leq)32 (Const32 [c]) x) (Leq32 x (Const32 [d]))) && c >= d+1 && int32(d+1) > int32(d) -> ((Less|Leq)32U (Const32  [c-d-1]) (Sub32  x (Const32  [d+1])))
+(OrB ((Less|Leq)16 (Const16 [c]) x) (Leq16 x (Const16 [d]))) && c >= d+1 && int16(d+1) > int16(d) -> ((Less|Leq)16U (Const16  [c-d-1]) (Sub16  x (Const16  [d+1])))
+(OrB ((Less|Leq)8  (Const8  [c]) x) (Leq8  x (Const8  [d]))) && c >= d+1 && int8(d+1)  > int8(d)  -> ((Less|Leq)8U  (Const8   [c-d-1]) (Sub8   x (Const8   [d+1])))
+
+// unsigned integer range: ( c (<|<=) x || x < d ) -> ( c-d (<|<=) x-d )
+(OrB ((Less|Leq)64U (Const64 [c]) x) (Less64U x (Const64 [d]))) && uint64(c) >= uint64(d) -> ((Less|Leq)64U (Const64                [c-d]) (Sub64  x (Const64  [d])))
+(OrB ((Less|Leq)32U (Const32 [c]) x) (Less32U x (Const32 [d]))) && uint32(c) >= uint32(d) -> ((Less|Leq)32U (Const32  [int64(int32(c-d))]) (Sub32  x (Const32  [d])))
+(OrB ((Less|Leq)16U (Const16 [c]) x) (Less16U x (Const16 [d]))) && uint16(c) >= uint16(d) -> ((Less|Leq)16U (Const16  [int64(int16(c-d))]) (Sub16  x (Const16  [d])))
+(OrB ((Less|Leq)8U  (Const8  [c]) x) (Less8U  x (Const8  [d]))) && uint8(c)  >= uint8(d)  -> ((Less|Leq)8U  (Const8   [int64( int8(c-d))]) (Sub8   x (Const8   [d])))
+
+// unsigned integer range: ( c (<|<=) x || x <= d ) -> ( c-(d+1) (<|<=) x-(d+1) )
+(OrB ((Less|Leq)64U (Const64 [c]) x) (Leq64U x (Const64 [d]))) && uint64(c) >= uint64(d+1) && uint64(d+1) > uint64(d) -> ((Less|Leq)64U (Const64                [c-d-1]) (Sub64  x (Const64  [d+1])))
+(OrB ((Less|Leq)32U (Const32 [c]) x) (Leq32U x (Const32 [d]))) && uint32(c) >= uint32(d+1) && uint32(d+1) > uint32(d) -> ((Less|Leq)32U (Const32  [int64(int32(c-d-1))]) (Sub32  x (Const32  [int64(int32(d+1))])))
+(OrB ((Less|Leq)16U (Const16 [c]) x) (Leq16U x (Const16 [d]))) && uint16(c) >= uint16(d+1) && uint16(d+1) > uint16(d) -> ((Less|Leq)16U (Const16  [int64(int16(c-d-1))]) (Sub16  x (Const16  [int64(int16(d+1))])))
+(OrB ((Less|Leq)8U  (Const8  [c]) x) (Leq8U  x (Const8  [d]))) && uint8(c)  >= uint8(d+1)  && uint8(d+1)  > uint8(d)  -> ((Less|Leq)8U  (Const8   [int64( int8(c-d-1))]) (Sub8   x (Const8   [int64( int8(d+1))])))
+
 // Canonicalize x-const to x+(-const)
 (Sub64 x (Const64  [c])) && x.Op != OpConst64 -> (Add64 (Const64  [-c]) x)
 (Sub32 x (Const32  [c])) && x.Op != OpConst32 -> (Add32 (Const32  [int64(int32(-c))]) x)
@@ -574,8 +623,8 @@
 	-> x
 
 // Pass constants through math.Float{32,64}bits and math.Float{32,64}frombits
-(Load  p1 (Store {t2} p2 (Const64  [x]) _)) && isSamePtr(p1,p2) && sizeof(t2) == 8 && is64BitFloat(t1) -> (Const64F [x])
-(Load  p1 (Store {t2} p2 (Const32  [x]) _)) && isSamePtr(p1,p2) && sizeof(t2) == 4 && is32BitFloat(t1) -> (Const32F [auxFrom32F(math.Float32frombits(uint32(x)))])
+        (Load  p1 (Store {t2} p2 (Const64  [x]) _)) && isSamePtr(p1,p2) && sizeof(t2) == 8 && is64BitFloat(t1) && !math.IsNaN(math.Float64frombits(uint64(x))) -> (Const64F [x])
+        (Load  p1 (Store {t2} p2 (Const32  [x]) _)) && isSamePtr(p1,p2) && sizeof(t2) == 4 && is32BitFloat(t1) && !math.IsNaN(float64(math.Float32frombits(uint32(x)))) -> (Const32F [auxFrom32F(math.Float32frombits(uint32(x)))])
 (Load  p1 (Store {t2} p2 (Const64F [x]) _)) && isSamePtr(p1,p2) && sizeof(t2) == 8 && is64BitInt(t1)   -> (Const64  [x])
 (Load  p1 (Store {t2} p2 (Const32F [x]) _)) && isSamePtr(p1,p2) && sizeof(t2) == 4 && is32BitInt(t1)   -> (Const32  [int64(int32(math.Float32bits(auxTo32F(x))))])
 
@@ -1844,7 +1893,7 @@
 (Div32F x (Const32F  [c])) && reciprocalExact32(auxTo32F(c)) -> (Mul32F x (Const32F  [auxFrom32F(1/auxTo32F(c))]))
 (Div64F x (Const64F  [c])) && reciprocalExact64(auxTo64F(c)) -> (Mul64F x (Const64F  [auxFrom64F(1/auxTo64F(c))]))
 
-(Sqrt (Const64F [c])) -> (Const64F [auxFrom64F(math.Sqrt(auxTo64F(c)))])
+(Sqrt (Const64F [c])) && !math.IsNaN(math.Sqrt(auxTo64F(c))) -> (Const64F [auxFrom64F(math.Sqrt(auxTo64F(c)))])
 
 // recognize runtime.newobject and don't Zero/Nilcheck it
 (Zero (Load (OffPtr [c] (SP)) mem) mem)
diff --git a/src/cmd/compile/internal/ssa/gen/genericOps.go b/src/cmd/compile/internal/ssa/gen/genericOps.go
index 9f17299610..b7e91a1f20 100644
--- a/src/cmd/compile/internal/ssa/gen/genericOps.go
+++ b/src/cmd/compile/internal/ssa/gen/genericOps.go
@@ -323,7 +323,12 @@ var genericOps = []opData{
 	{name: "Const32", aux: "Int32"},      // auxint is sign-extended 32 bits
 	// Note: ConstX are sign-extended even when the type of the value is unsigned.
 	// For instance, uint8(0xaa) is stored as auxint=0xffffffffffffffaa.
-	{name: "Const64", aux: "Int64"},    // value is auxint
+	{name: "Const64", aux: "Int64"}, // value is auxint
+	// Note: for both Const32F and Const64F, we disallow encoding NaNs.
+	// Signaling NaNs are tricky because if you do anything with them, they become quiet.
+	// Particularly, converting a 32 bit sNaN to 64 bit and back converts it to a qNaN.
+	// See issue 36399 and 36400.
+	// Encodings of +inf, -inf, and -0 are fine.
 	{name: "Const32F", aux: "Float32"}, // value is math.Float64frombits(uint64(auxint)) and is exactly representable as float 32
 	{name: "Const64F", aux: "Float64"}, // value is math.Float64frombits(uint64(auxint))
 	{name: "ConstInterface"},           // nil interface
@@ -418,6 +423,7 @@ var genericOps = []opData{
 	{name: "Cvt64Fto64", argLength: 1},
 	{name: "Cvt32Fto64F", argLength: 1},
 	{name: "Cvt64Fto32F", argLength: 1},
+	{name: "CvtBoolToUint8", argLength: 1},
 
 	// Force rounding to precision of type.
 	{name: "Round32F", argLength: 1},
diff --git a/src/cmd/compile/internal/ssa/gen/rulegen.go b/src/cmd/compile/internal/ssa/gen/rulegen.go
index 2a10f2fa25..8e88d0b6a3 100644
--- a/src/cmd/compile/internal/ssa/gen/rulegen.go
+++ b/src/cmd/compile/internal/ssa/gen/rulegen.go
@@ -891,7 +891,7 @@ func genBlockRewrite(rule Rule, arch arch, data blockData) *RuleRewrite {
 	}
 
 	blockName, _ := getBlockInfo(outop, arch)
-	rr.add(stmtf("b.Reset(%s)", blockName))
+	var genControls [2]string
 	for i, control := range t[:outdata.controls] {
 		// Select a source position for any new control values.
 		// TODO: does it always make sense to use the source position
@@ -904,9 +904,19 @@ func genBlockRewrite(rule Rule, arch arch, data blockData) *RuleRewrite {
 		}
 
 		// Generate a new control value (or copy an existing value).
-		v := genResult0(rr, arch, control, false, false, newpos)
-		rr.add(stmtf("b.AddControl(%s)", v))
+		genControls[i] = genResult0(rr, arch, control, false, false, newpos)
 	}
+	switch outdata.controls {
+	case 0:
+		rr.add(stmtf("b.Reset(%s)", blockName))
+	case 1:
+		rr.add(stmtf("b.resetWithControl(%s, %s)", blockName, genControls[0]))
+	case 2:
+		rr.add(stmtf("b.resetWithControl2(%s, %s, %s)", blockName, genControls[0], genControls[1]))
+	default:
+		log.Fatalf("too many controls: %d", outdata.controls)
+	}
+
 	if auxint != "" {
 		rr.add(stmtf("b.AuxInt = %s", auxint))
 	}
@@ -991,16 +1001,21 @@ func genMatch0(rr *RuleRewrite, arch arch, match, v string, cnt map[string]int,
 		}
 	}
 
-	// Access last argument first to minimize bounds checks.
-	if n := len(args); n > 1 && !pregenTop {
-		a := args[n-1]
-		if a != "_" && !rr.declared(a) && token.IsIdentifier(a) && !(commutative && len(args) == 2) {
-			rr.add(declf(a, "%s.Args[%d]", v, n-1))
-
-			// delete the last argument so it is not reprocessed
-			args = args[:n-1]
-		} else {
-			rr.add(stmtf("_ = %s.Args[%d]", v, n-1))
+	if !pregenTop {
+		// Access last argument first to minimize bounds checks.
+		for n := len(args) - 1; n > 0; n-- {
+			a := args[n]
+			if a == "_" {
+				continue
+			}
+			if !rr.declared(a) && token.IsIdentifier(a) && !(commutative && len(args) == 2) {
+				rr.add(declf(a, "%s.Args[%d]", v, n))
+				// delete the last argument so it is not reprocessed
+				args = args[:n]
+			} else {
+				rr.add(stmtf("_ = %s.Args[%d]", v, n))
+			}
+			break
 		}
 	}
 	if commutative && !pregenTop {
@@ -1093,9 +1108,7 @@ func genResult0(rr *RuleRewrite, arch arch, result string, top, move bool, pos s
 			// It in not safe in general to move a variable between blocks
 			// (and particularly not a phi node).
 			// Introduce a copy.
-			rr.add(stmtf("v.reset(OpCopy)"))
-			rr.add(stmtf("v.Type = %s.Type", result))
-			rr.add(stmtf("v.AddArg(%s)", result))
+			rr.add(stmtf("v.copyOf(%s)", result))
 		}
 		return result
 	}
@@ -1123,8 +1136,7 @@ func genResult0(rr *RuleRewrite, arch arch, result string, top, move bool, pos s
 		rr.add(declf(v, "b.NewValue0(%s, Op%s%s, %s)", pos, oparch, op.name, typ))
 		if move && top {
 			// Rewrite original into a copy
-			rr.add(stmtf("v.reset(OpCopy)"))
-			rr.add(stmtf("v.AddArg(%s)", v))
+			rr.add(stmtf("v.copyOf(%s)", v))
 		}
 	}
 
@@ -1134,11 +1146,21 @@ func genResult0(rr *RuleRewrite, arch arch, result string, top, move bool, pos s
 	if aux != "" {
 		rr.add(stmtf("%s.Aux = %s", v, aux))
 	}
-	for _, arg := range args {
+	all := new(strings.Builder)
+	for i, arg := range args {
 		x := genResult0(rr, arch, arg, false, move, pos)
-		rr.add(stmtf("%s.AddArg(%s)", v, x))
+		if i > 0 {
+			all.WriteString(", ")
+		}
+		all.WriteString(x)
+	}
+	switch len(args) {
+	case 0:
+	case 1:
+		rr.add(stmtf("%s.AddArg(%s)", v, all.String()))
+	default:
+		rr.add(stmtf("%s.AddArg%d(%s)", v, len(args), all.String()))
 	}
-
 	return v
 }
 
diff --git a/src/cmd/compile/internal/ssa/html.go b/src/cmd/compile/internal/ssa/html.go
index 1e76a673ef..c384817d0c 100644
--- a/src/cmd/compile/internal/ssa/html.go
+++ b/src/cmd/compile/internal/ssa/html.go
@@ -93,7 +93,7 @@ td > h2 {
 td.collapsed {
     font-size: 12px;
     width: 12px;
-    border: 0px;
+    border: 1px solid white;
     padding: 0;
     cursor: pointer;
     background: #fafafa;
@@ -247,18 +247,61 @@ svg {
     outline: 1px solid #eee;
 }
 
-.highlight-aquamarine     { background-color: aquamarine; }
-.highlight-coral          { background-color: coral; }
-.highlight-lightpink      { background-color: lightpink; }
-.highlight-lightsteelblue { background-color: lightsteelblue; }
-.highlight-palegreen      { background-color: palegreen; }
-.highlight-skyblue        { background-color: skyblue; }
-.highlight-lightgray      { background-color: lightgray; }
-.highlight-yellow         { background-color: yellow; }
-.highlight-lime           { background-color: lime; }
-.highlight-khaki          { background-color: khaki; }
-.highlight-aqua           { background-color: aqua; }
-.highlight-salmon         { background-color: salmon; }
+body.darkmode {
+    background-color: rgb(21, 21, 21);
+    color: rgb(230, 255, 255);
+    opacity: 100%;
+}
+
+td.darkmode {
+    background-color: rgb(21, 21, 21);
+    border: 1px solid gray;
+}
+
+body.darkmode table, th {
+    border: 1px solid gray;
+}
+
+.highlight-aquamarine     { background-color: aquamarine; color: black; }
+.highlight-coral          { background-color: coral; color: black; }
+.highlight-lightpink      { background-color: lightpink; color: black; }
+.highlight-lightsteelblue { background-color: lightsteelblue; color: black; }
+.highlight-palegreen      { background-color: palegreen; color: black; }
+.highlight-skyblue        { background-color: skyblue; color: black; }
+.highlight-lightgray      { background-color: lightgray; color: black; }
+.highlight-yellow         { background-color: yellow; color: black; }
+.highlight-lime           { background-color: lime; color: black; }
+.highlight-khaki          { background-color: khaki; color: black; }
+.highlight-aqua           { background-color: aqua; color: black; }
+.highlight-salmon         { background-color: salmon; color: black; }
+
+/* Ensure all dead values/blocks continue to have gray font color in dark mode with highlights */
+.dead-value span.highlight-aquamarine,
+.dead-block.highlight-aquamarine,
+.dead-value span.highlight-coral,
+.dead-block.highlight-coral,
+.dead-value span.highlight-lightpink,
+.dead-block.highlight-lightpink,
+.dead-value span.highlight-lightsteelblue,
+.dead-block.highlight-lightsteelblue,
+.dead-value span.highlight-palegreen,
+.dead-block.highlight-palegreen,
+.dead-value span.highlight-skyblue,
+.dead-block.highlight-skyblue,
+.dead-value span.highlight-lightgray,
+.dead-block.highlight-lightgray,
+.dead-value span.highlight-yellow,
+.dead-block.highlight-yellow,
+.dead-value span.highlight-lime,
+.dead-block.highlight-lime,
+.dead-value span.highlight-khaki,
+.dead-block.highlight-khaki,
+.dead-value span.highlight-aqua,
+.dead-block.highlight-aqua,
+.dead-value span.highlight-salmon,
+.dead-block.highlight-salmon {
+    color: gray;
+}
 
 .outline-blue           { outline: blue solid 2px; }
 .outline-red            { outline: red solid 2px; }
@@ -284,6 +327,10 @@ ellipse.outline-teal           { stroke-width: 2px; stroke: teal; }
 ellipse.outline-maroon         { stroke-width: 2px; stroke: maroon; }
 ellipse.outline-black          { stroke-width: 2px; stroke: black; }
 
+/* Capture alternative for outline-black and ellipse.outline-black when in dark mode */
+body.darkmode .outline-black        { outline: gray solid 2px; }
+body.darkmode ellipse.outline-black { outline: gray solid 2px; }
+
 
 
 
+}
+
+function toggleDarkMode() {
+    document.body.classList.toggle('darkmode');
+
+    const collapsedEls = document.getElementsByClassName('collapsed');
+    const len = collapsedEls.length;
+
+    for (let i = 0; i < len; i++) {
+        collapsedEls[i].classList.toggle('darkmode');
+    }
+}
+
+
 
 `)
 	w.WriteString("")
@@ -616,6 +681,8 @@ Edge with a dot means that this edge follows the order in which blocks were laid
 

+ + `) w.WriteString("") w.WriteString("") diff --git a/src/cmd/compile/internal/ssa/nilcheck_test.go b/src/cmd/compile/internal/ssa/nilcheck_test.go index f728e8ee25..16d94614d8 100644 --- a/src/cmd/compile/internal/ssa/nilcheck_test.go +++ b/src/cmd/compile/internal/ssa/nilcheck_test.go @@ -87,7 +87,7 @@ func TestNilcheckSimple(t *testing.T) { nilcheckelim(fun.f) // clean up the removed nil check - fusePlain(fun.f) + fuse(fun.f, fuseTypePlain) deadcode(fun.f) CheckFunc(fun.f) @@ -124,7 +124,7 @@ func TestNilcheckDomOrder(t *testing.T) { nilcheckelim(fun.f) // clean up the removed nil check - fusePlain(fun.f) + fuse(fun.f, fuseTypePlain) deadcode(fun.f) CheckFunc(fun.f) @@ -157,7 +157,7 @@ func TestNilcheckAddr(t *testing.T) { nilcheckelim(fun.f) // clean up the removed nil check - fusePlain(fun.f) + fuse(fun.f, fuseTypePlain) deadcode(fun.f) CheckFunc(fun.f) @@ -191,7 +191,7 @@ func TestNilcheckAddPtr(t *testing.T) { nilcheckelim(fun.f) // clean up the removed nil check - fusePlain(fun.f) + fuse(fun.f, fuseTypePlain) deadcode(fun.f) CheckFunc(fun.f) @@ -235,7 +235,7 @@ func TestNilcheckPhi(t *testing.T) { nilcheckelim(fun.f) // clean up the removed nil check - fusePlain(fun.f) + fuse(fun.f, fuseTypePlain) deadcode(fun.f) CheckFunc(fun.f) @@ -276,7 +276,7 @@ func TestNilcheckKeepRemove(t *testing.T) { nilcheckelim(fun.f) // clean up the removed nil check - fusePlain(fun.f) + fuse(fun.f, fuseTypePlain) deadcode(fun.f) CheckFunc(fun.f) @@ -323,7 +323,7 @@ func TestNilcheckInFalseBranch(t *testing.T) { nilcheckelim(fun.f) // clean up the removed nil check - fusePlain(fun.f) + fuse(fun.f, fuseTypePlain) deadcode(fun.f) CheckFunc(fun.f) @@ -374,7 +374,7 @@ func TestNilcheckUser(t *testing.T) { nilcheckelim(fun.f) // clean up the removed nil check - fusePlain(fun.f) + fuse(fun.f, fuseTypePlain) deadcode(fun.f) CheckFunc(fun.f) @@ -418,7 +418,7 @@ func TestNilcheckBug(t *testing.T) { nilcheckelim(fun.f) // clean up the removed nil check - fusePlain(fun.f) + fuse(fun.f, fuseTypePlain) deadcode(fun.f) CheckFunc(fun.f) diff --git a/src/cmd/compile/internal/ssa/numberlines.go b/src/cmd/compile/internal/ssa/numberlines.go index 3d77fe5bb4..f4e62b88c4 100644 --- a/src/cmd/compile/internal/ssa/numberlines.go +++ b/src/cmd/compile/internal/ssa/numberlines.go @@ -66,12 +66,9 @@ func nextGoodStatementIndex(v *Value, i int, b *Block) int { return i } -// notStmtBoundary indicates which value opcodes can never be a statement -// boundary because they don't correspond to a user's understanding of a -// statement boundary. Called from *Value.reset(), and *Func.newValue(), -// located here to keep all the statement boundary heuristics in one place. -// Note: *Value.reset() filters out OpCopy because of how that is used in -// rewrite. +// notStmtBoundary reports whether a value with opcode op can never be a statement +// boundary. Such values don't correspond to a user's understanding of a +// statement boundary. func notStmtBoundary(op Op) bool { switch op { case OpCopy, OpPhi, OpVarKill, OpVarDef, OpVarLive, OpUnknown, OpFwdRef, OpArg: diff --git a/src/cmd/compile/internal/ssa/opGen.go b/src/cmd/compile/internal/ssa/opGen.go index 1111316d9b..9da7376a8a 100644 --- a/src/cmd/compile/internal/ssa/opGen.go +++ b/src/cmd/compile/internal/ssa/opGen.go @@ -1881,6 +1881,8 @@ const ( OpPPC64FlagEQ OpPPC64FlagLT OpPPC64FlagGT + OpPPC64FlagCarrySet + OpPPC64FlagCarryClear OpRISCV64ADD OpRISCV64ADDI @@ -1915,6 +1917,10 @@ const ( OpRISCV64MOVHstore OpRISCV64MOVWstore OpRISCV64MOVDstore + OpRISCV64MOVBstorezero + OpRISCV64MOVHstorezero + OpRISCV64MOVWstorezero + OpRISCV64MOVDstorezero OpRISCV64SLL OpRISCV64SRA OpRISCV64SRL @@ -2113,6 +2119,14 @@ const ( OpS390XCDFBRA OpS390XCEGBRA OpS390XCDGBRA + OpS390XCLFEBR + OpS390XCLFDBR + OpS390XCLGEBR + OpS390XCLGDBR + OpS390XCELFBR + OpS390XCDLFBR + OpS390XCELGBR + OpS390XCDLGBR OpS390XLEDBR OpS390XLDEBR OpS390XMOVDaddr @@ -2584,6 +2598,7 @@ const ( OpCvt64Fto64 OpCvt32Fto64F OpCvt64Fto32F + OpCvtBoolToUint8 OpRound32F OpRound64F OpIsNonNil @@ -24986,6 +25001,16 @@ var opcodeTable = [...]opInfo{ argLen: 0, reg: regInfo{}, }, + { + name: "FlagCarrySet", + argLen: 0, + reg: regInfo{}, + }, + { + name: "FlagCarryClear", + argLen: 0, + reg: regInfo{}, + }, { name: "ADD", @@ -25462,6 +25487,58 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "MOVBstorezero", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: riscv.AMOVB, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372037928517622}, // SP X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 SB + }, + }, + }, + { + name: "MOVHstorezero", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: riscv.AMOVH, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372037928517622}, // SP X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 SB + }, + }, + }, + { + name: "MOVWstorezero", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: riscv.AMOVW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372037928517622}, // SP X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 SB + }, + }, + }, + { + name: "MOVDstorezero", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: riscv.AMOV, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372037928517622}, // SP X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 SB + }, + }, + }, { name: "SLL", argLen: 2, @@ -28313,6 +28390,110 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "CLFEBR", + argLen: 1, + asm: s390x.ACLFEBR, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "CLFDBR", + argLen: 1, + asm: s390x.ACLFDBR, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "CLGEBR", + argLen: 1, + asm: s390x.ACLGEBR, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "CLGDBR", + argLen: 1, + asm: s390x.ACLGDBR, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "CELFBR", + argLen: 1, + asm: s390x.ACELFBR, + reg: regInfo{ + inputs: []inputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + outputs: []outputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, + }, + { + name: "CDLFBR", + argLen: 1, + asm: s390x.ACDLFBR, + reg: regInfo{ + inputs: []inputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + outputs: []outputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, + }, + { + name: "CELGBR", + argLen: 1, + asm: s390x.ACELGBR, + reg: regInfo{ + inputs: []inputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + outputs: []outputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, + }, + { + name: "CDLGBR", + argLen: 1, + asm: s390x.ACDLGBR, + reg: regInfo{ + inputs: []inputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + outputs: []outputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, + }, { name: "LEDBR", argLen: 1, @@ -32561,6 +32742,11 @@ var opcodeTable = [...]opInfo{ argLen: 1, generic: true, }, + { + name: "CvtBoolToUint8", + argLen: 1, + generic: true, + }, { name: "Round32F", argLen: 1, diff --git a/src/cmd/compile/internal/ssa/phiopt.go b/src/cmd/compile/internal/ssa/phiopt.go index cc3319e188..8643fa584c 100644 --- a/src/cmd/compile/internal/ssa/phiopt.go +++ b/src/cmd/compile/internal/ssa/phiopt.go @@ -148,6 +148,13 @@ func phioptint(v *Value, b0 *Block, reverse int) { negate = !negate } + a := b0.Controls[0] + if negate { + a = v.Block.NewValue1(v.Pos, OpNot, a.Type, a) + } + v.AddArg(a) + + cvt := v.Block.NewValue1(v.Pos, OpCvtBoolToUint8, a.Type, a) switch v.Type.Size() { case 1: v.reset(OpCopy) @@ -160,12 +167,7 @@ func phioptint(v *Value, b0 *Block, reverse int) { default: v.Fatalf("bad int size %d", v.Type.Size()) } - - a := b0.Controls[0] - if negate { - a = v.Block.NewValue1(v.Pos, OpNot, a.Type, a) - } - v.AddArg(a) + v.AddArg(cvt) f := b0.Func if f.pass.debug > 0 { diff --git a/src/cmd/compile/internal/ssa/rewrite.go b/src/cmd/compile/internal/ssa/rewrite.go index fcbb76cf34..238e243096 100644 --- a/src/cmd/compile/internal/ssa/rewrite.go +++ b/src/cmd/compile/internal/ssa/rewrite.go @@ -487,11 +487,17 @@ func DivisionNeedsFixUp(v *Value) bool { // auxFrom64F encodes a float64 value so it can be stored in an AuxInt. func auxFrom64F(f float64) int64 { + if f != f { + panic("can't encode a NaN in AuxInt field") + } return int64(math.Float64bits(f)) } // auxFrom32F encodes a float32 value so it can be stored in an AuxInt. func auxFrom32F(f float32) int64 { + if f != f { + panic("can't encode a NaN in AuxInt field") + } return int64(math.Float64bits(extend32Fto64F(f))) } diff --git a/src/cmd/compile/internal/ssa/rewrite386.go b/src/cmd/compile/internal/ssa/rewrite386.go index cf9a7362a2..8b2da94c13 100644 --- a/src/cmd/compile/internal/ssa/rewrite386.go +++ b/src/cmd/compile/internal/ssa/rewrite386.go @@ -408,6 +408,9 @@ func rewriteValue386(v *Value) bool { case OpCvt64Fto32F: v.Op = Op386CVTSD2SS return true + case OpCvtBoolToUint8: + v.Op = OpCopy + return true case OpDiv16: v.Op = Op386DIVW return true @@ -788,8 +791,7 @@ func rewriteValue386_Op386ADCL(v *Value) bool { f := v_2 v.reset(Op386ADCLconst) v.AuxInt = c - v.AddArg(x) - v.AddArg(f) + v.AddArg2(x, f) return true } break @@ -899,8 +901,7 @@ func rewriteValue386_Op386ADDL(v *Value) bool { } y := v_1.Args[0] v.reset(Op386LEAL8) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } break @@ -915,8 +916,7 @@ func rewriteValue386_Op386ADDL(v *Value) bool { } y := v_1.Args[0] v.reset(Op386LEAL4) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } break @@ -931,8 +931,7 @@ func rewriteValue386_Op386ADDL(v *Value) bool { } y := v_1.Args[0] v.reset(Op386LEAL2) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } break @@ -950,8 +949,7 @@ func rewriteValue386_Op386ADDL(v *Value) bool { continue } v.reset(Op386LEAL2) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } break @@ -973,8 +971,7 @@ func rewriteValue386_Op386ADDL(v *Value) bool { } y := v_1_1 v.reset(Op386LEAL2) - v.AddArg(y) - v.AddArg(x) + v.AddArg2(y, x) return true } } @@ -992,8 +989,7 @@ func rewriteValue386_Op386ADDL(v *Value) bool { y := v_1 v.reset(Op386LEAL1) v.AuxInt = c - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } break @@ -1016,8 +1012,7 @@ func rewriteValue386_Op386ADDL(v *Value) bool { v.reset(Op386LEAL1) v.AuxInt = c v.Aux = s - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } break @@ -1042,9 +1037,7 @@ func rewriteValue386_Op386ADDL(v *Value) bool { v.reset(Op386ADDLload) v.AuxInt = off v.Aux = sym - v.AddArg(x) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg3(x, ptr, mem) return true } break @@ -1070,10 +1063,7 @@ func rewriteValue386_Op386ADDL(v *Value) bool { v.reset(Op386ADDLloadidx4) v.AuxInt = off v.Aux = sym - v.AddArg(x) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg4(x, ptr, idx, mem) return true } break @@ -1088,8 +1078,7 @@ func rewriteValue386_Op386ADDL(v *Value) bool { } y := v_1.Args[0] v.reset(Op386SUBL) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } break @@ -1130,8 +1119,7 @@ func rewriteValue386_Op386ADDLconst(v *Value) bool { x := v_0.Args[0] v.reset(Op386LEAL1) v.AuxInt = c - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (ADDLconst [c] (LEAL [d] {s} x)) @@ -1172,8 +1160,7 @@ func rewriteValue386_Op386ADDLconst(v *Value) bool { v.reset(Op386LEAL1) v.AuxInt = c + d v.Aux = s - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (ADDLconst [c] (LEAL2 [d] {s} x y)) @@ -1194,8 +1181,7 @@ func rewriteValue386_Op386ADDLconst(v *Value) bool { v.reset(Op386LEAL2) v.AuxInt = c + d v.Aux = s - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (ADDLconst [c] (LEAL4 [d] {s} x y)) @@ -1216,8 +1202,7 @@ func rewriteValue386_Op386ADDLconst(v *Value) bool { v.reset(Op386LEAL4) v.AuxInt = c + d v.Aux = s - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (ADDLconst [c] (LEAL8 [d] {s} x y)) @@ -1238,8 +1223,7 @@ func rewriteValue386_Op386ADDLconst(v *Value) bool { v.reset(Op386LEAL8) v.AuxInt = c + d v.Aux = s - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (ADDLconst [c] x) @@ -1251,9 +1235,7 @@ func rewriteValue386_Op386ADDLconst(v *Value) bool { if !(int32(c) == 0) { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (ADDLconst [c] (MOVLconst [d])) @@ -1307,8 +1289,7 @@ func rewriteValue386_Op386ADDLconstmodify(v *Value) bool { v.reset(Op386ADDLconstmodify) v.AuxInt = ValAndOff(valoff1).add(off2) v.Aux = sym - v.AddArg(base) - v.AddArg(mem) + v.AddArg2(base, mem) return true } // match: (ADDLconstmodify [valoff1] {sym1} (LEAL [off2] {sym2} base) mem) @@ -1330,8 +1311,7 @@ func rewriteValue386_Op386ADDLconstmodify(v *Value) bool { v.reset(Op386ADDLconstmodify) v.AuxInt = ValAndOff(valoff1).add(off2) v.Aux = mergeSym(sym1, sym2) - v.AddArg(base) - v.AddArg(mem) + v.AddArg2(base, mem) return true } return false @@ -1361,9 +1341,7 @@ func rewriteValue386_Op386ADDLconstmodifyidx4(v *Value) bool { v.reset(Op386ADDLconstmodifyidx4) v.AuxInt = ValAndOff(valoff1).add(off2) v.Aux = sym - v.AddArg(base) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(base, idx, mem) return true } // match: (ADDLconstmodifyidx4 [valoff1] {sym} base (ADDLconst [off2] idx) mem) @@ -1385,9 +1363,7 @@ func rewriteValue386_Op386ADDLconstmodifyidx4(v *Value) bool { v.reset(Op386ADDLconstmodifyidx4) v.AuxInt = ValAndOff(valoff1).add(off2 * 4) v.Aux = sym - v.AddArg(base) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(base, idx, mem) return true } // match: (ADDLconstmodifyidx4 [valoff1] {sym1} (LEAL [off2] {sym2} base) idx mem) @@ -1410,9 +1386,7 @@ func rewriteValue386_Op386ADDLconstmodifyidx4(v *Value) bool { v.reset(Op386ADDLconstmodifyidx4) v.AuxInt = ValAndOff(valoff1).add(off2) v.Aux = mergeSym(sym1, sym2) - v.AddArg(base) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(base, idx, mem) return true } return false @@ -1442,9 +1416,7 @@ func rewriteValue386_Op386ADDLload(v *Value) bool { v.reset(Op386ADDLload) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(val) - v.AddArg(base) - v.AddArg(mem) + v.AddArg3(val, base, mem) return true } // match: (ADDLload [off1] {sym1} val (LEAL [off2] {sym2} base) mem) @@ -1467,9 +1439,7 @@ func rewriteValue386_Op386ADDLload(v *Value) bool { v.reset(Op386ADDLload) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(val) - v.AddArg(base) - v.AddArg(mem) + v.AddArg3(val, base, mem) return true } // match: (ADDLload [off1] {sym1} val (LEAL4 [off2] {sym2} ptr idx) mem) @@ -1493,10 +1463,7 @@ func rewriteValue386_Op386ADDLload(v *Value) bool { v.reset(Op386ADDLloadidx4) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(val) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg4(val, ptr, idx, mem) return true } return false @@ -1528,10 +1495,7 @@ func rewriteValue386_Op386ADDLloadidx4(v *Value) bool { v.reset(Op386ADDLloadidx4) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(val) - v.AddArg(base) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg4(val, base, idx, mem) return true } // match: (ADDLloadidx4 [off1] {sym} val base (ADDLconst [off2] idx) mem) @@ -1554,10 +1518,7 @@ func rewriteValue386_Op386ADDLloadidx4(v *Value) bool { v.reset(Op386ADDLloadidx4) v.AuxInt = off1 + off2*4 v.Aux = sym - v.AddArg(val) - v.AddArg(base) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg4(val, base, idx, mem) return true } // match: (ADDLloadidx4 [off1] {sym1} val (LEAL [off2] {sym2} base) idx mem) @@ -1581,10 +1542,7 @@ func rewriteValue386_Op386ADDLloadidx4(v *Value) bool { v.reset(Op386ADDLloadidx4) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(val) - v.AddArg(base) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg4(val, base, idx, mem) return true } return false @@ -1614,9 +1572,7 @@ func rewriteValue386_Op386ADDLmodify(v *Value) bool { v.reset(Op386ADDLmodify) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(base) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(base, val, mem) return true } // match: (ADDLmodify [off1] {sym1} (LEAL [off2] {sym2} base) val mem) @@ -1639,9 +1595,7 @@ func rewriteValue386_Op386ADDLmodify(v *Value) bool { v.reset(Op386ADDLmodify) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(base) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(base, val, mem) return true } return false @@ -1673,10 +1627,7 @@ func rewriteValue386_Op386ADDLmodifyidx4(v *Value) bool { v.reset(Op386ADDLmodifyidx4) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(base) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) + v.AddArg4(base, idx, val, mem) return true } // match: (ADDLmodifyidx4 [off1] {sym} base (ADDLconst [off2] idx) val mem) @@ -1699,10 +1650,7 @@ func rewriteValue386_Op386ADDLmodifyidx4(v *Value) bool { v.reset(Op386ADDLmodifyidx4) v.AuxInt = off1 + off2*4 v.Aux = sym - v.AddArg(base) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) + v.AddArg4(base, idx, val, mem) return true } // match: (ADDLmodifyidx4 [off1] {sym1} (LEAL [off2] {sym2} base) idx val mem) @@ -1726,10 +1674,7 @@ func rewriteValue386_Op386ADDLmodifyidx4(v *Value) bool { v.reset(Op386ADDLmodifyidx4) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(base) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) + v.AddArg4(base, idx, val, mem) return true } // match: (ADDLmodifyidx4 [off] {sym} ptr idx (MOVLconst [c]) mem) @@ -1751,9 +1696,7 @@ func rewriteValue386_Op386ADDLmodifyidx4(v *Value) bool { v.reset(Op386ADDLconstmodifyidx4) v.AuxInt = makeValAndOff(c, off) v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } return false @@ -1783,9 +1726,7 @@ func rewriteValue386_Op386ADDSD(v *Value) bool { v.reset(Op386ADDSDload) v.AuxInt = off v.Aux = sym - v.AddArg(x) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg3(x, ptr, mem) return true } break @@ -1817,9 +1758,7 @@ func rewriteValue386_Op386ADDSDload(v *Value) bool { v.reset(Op386ADDSDload) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(val) - v.AddArg(base) - v.AddArg(mem) + v.AddArg3(val, base, mem) return true } // match: (ADDSDload [off1] {sym1} val (LEAL [off2] {sym2} base) mem) @@ -1842,9 +1781,7 @@ func rewriteValue386_Op386ADDSDload(v *Value) bool { v.reset(Op386ADDSDload) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(val) - v.AddArg(base) - v.AddArg(mem) + v.AddArg3(val, base, mem) return true } return false @@ -1874,9 +1811,7 @@ func rewriteValue386_Op386ADDSS(v *Value) bool { v.reset(Op386ADDSSload) v.AuxInt = off v.Aux = sym - v.AddArg(x) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg3(x, ptr, mem) return true } break @@ -1908,9 +1843,7 @@ func rewriteValue386_Op386ADDSSload(v *Value) bool { v.reset(Op386ADDSSload) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(val) - v.AddArg(base) - v.AddArg(mem) + v.AddArg3(val, base, mem) return true } // match: (ADDSSload [off1] {sym1} val (LEAL [off2] {sym2} base) mem) @@ -1933,9 +1866,7 @@ func rewriteValue386_Op386ADDSSload(v *Value) bool { v.reset(Op386ADDSSload) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(val) - v.AddArg(base) - v.AddArg(mem) + v.AddArg3(val, base, mem) return true } return false @@ -1979,9 +1910,7 @@ func rewriteValue386_Op386ANDL(v *Value) bool { v.reset(Op386ANDLload) v.AuxInt = off v.Aux = sym - v.AddArg(x) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg3(x, ptr, mem) return true } break @@ -2007,10 +1936,7 @@ func rewriteValue386_Op386ANDL(v *Value) bool { v.reset(Op386ANDLloadidx4) v.AuxInt = off v.Aux = sym - v.AddArg(x) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg4(x, ptr, idx, mem) return true } break @@ -2022,9 +1948,7 @@ func rewriteValue386_Op386ANDL(v *Value) bool { if x != v_1 { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } return false @@ -2066,9 +1990,7 @@ func rewriteValue386_Op386ANDLconst(v *Value) bool { if !(int32(c) == -1) { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (ANDLconst [c] (MOVLconst [d])) @@ -2108,8 +2030,7 @@ func rewriteValue386_Op386ANDLconstmodify(v *Value) bool { v.reset(Op386ANDLconstmodify) v.AuxInt = ValAndOff(valoff1).add(off2) v.Aux = sym - v.AddArg(base) - v.AddArg(mem) + v.AddArg2(base, mem) return true } // match: (ANDLconstmodify [valoff1] {sym1} (LEAL [off2] {sym2} base) mem) @@ -2131,8 +2052,7 @@ func rewriteValue386_Op386ANDLconstmodify(v *Value) bool { v.reset(Op386ANDLconstmodify) v.AuxInt = ValAndOff(valoff1).add(off2) v.Aux = mergeSym(sym1, sym2) - v.AddArg(base) - v.AddArg(mem) + v.AddArg2(base, mem) return true } return false @@ -2162,9 +2082,7 @@ func rewriteValue386_Op386ANDLconstmodifyidx4(v *Value) bool { v.reset(Op386ANDLconstmodifyidx4) v.AuxInt = ValAndOff(valoff1).add(off2) v.Aux = sym - v.AddArg(base) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(base, idx, mem) return true } // match: (ANDLconstmodifyidx4 [valoff1] {sym} base (ADDLconst [off2] idx) mem) @@ -2186,9 +2104,7 @@ func rewriteValue386_Op386ANDLconstmodifyidx4(v *Value) bool { v.reset(Op386ANDLconstmodifyidx4) v.AuxInt = ValAndOff(valoff1).add(off2 * 4) v.Aux = sym - v.AddArg(base) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(base, idx, mem) return true } // match: (ANDLconstmodifyidx4 [valoff1] {sym1} (LEAL [off2] {sym2} base) idx mem) @@ -2211,9 +2127,7 @@ func rewriteValue386_Op386ANDLconstmodifyidx4(v *Value) bool { v.reset(Op386ANDLconstmodifyidx4) v.AuxInt = ValAndOff(valoff1).add(off2) v.Aux = mergeSym(sym1, sym2) - v.AddArg(base) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(base, idx, mem) return true } return false @@ -2243,9 +2157,7 @@ func rewriteValue386_Op386ANDLload(v *Value) bool { v.reset(Op386ANDLload) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(val) - v.AddArg(base) - v.AddArg(mem) + v.AddArg3(val, base, mem) return true } // match: (ANDLload [off1] {sym1} val (LEAL [off2] {sym2} base) mem) @@ -2268,9 +2180,7 @@ func rewriteValue386_Op386ANDLload(v *Value) bool { v.reset(Op386ANDLload) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(val) - v.AddArg(base) - v.AddArg(mem) + v.AddArg3(val, base, mem) return true } // match: (ANDLload [off1] {sym1} val (LEAL4 [off2] {sym2} ptr idx) mem) @@ -2294,10 +2204,7 @@ func rewriteValue386_Op386ANDLload(v *Value) bool { v.reset(Op386ANDLloadidx4) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(val) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg4(val, ptr, idx, mem) return true } return false @@ -2329,10 +2236,7 @@ func rewriteValue386_Op386ANDLloadidx4(v *Value) bool { v.reset(Op386ANDLloadidx4) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(val) - v.AddArg(base) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg4(val, base, idx, mem) return true } // match: (ANDLloadidx4 [off1] {sym} val base (ADDLconst [off2] idx) mem) @@ -2355,10 +2259,7 @@ func rewriteValue386_Op386ANDLloadidx4(v *Value) bool { v.reset(Op386ANDLloadidx4) v.AuxInt = off1 + off2*4 v.Aux = sym - v.AddArg(val) - v.AddArg(base) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg4(val, base, idx, mem) return true } // match: (ANDLloadidx4 [off1] {sym1} val (LEAL [off2] {sym2} base) idx mem) @@ -2382,10 +2283,7 @@ func rewriteValue386_Op386ANDLloadidx4(v *Value) bool { v.reset(Op386ANDLloadidx4) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(val) - v.AddArg(base) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg4(val, base, idx, mem) return true } return false @@ -2415,9 +2313,7 @@ func rewriteValue386_Op386ANDLmodify(v *Value) bool { v.reset(Op386ANDLmodify) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(base) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(base, val, mem) return true } // match: (ANDLmodify [off1] {sym1} (LEAL [off2] {sym2} base) val mem) @@ -2440,9 +2336,7 @@ func rewriteValue386_Op386ANDLmodify(v *Value) bool { v.reset(Op386ANDLmodify) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(base) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(base, val, mem) return true } return false @@ -2474,10 +2368,7 @@ func rewriteValue386_Op386ANDLmodifyidx4(v *Value) bool { v.reset(Op386ANDLmodifyidx4) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(base) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) + v.AddArg4(base, idx, val, mem) return true } // match: (ANDLmodifyidx4 [off1] {sym} base (ADDLconst [off2] idx) val mem) @@ -2500,10 +2391,7 @@ func rewriteValue386_Op386ANDLmodifyidx4(v *Value) bool { v.reset(Op386ANDLmodifyidx4) v.AuxInt = off1 + off2*4 v.Aux = sym - v.AddArg(base) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) + v.AddArg4(base, idx, val, mem) return true } // match: (ANDLmodifyidx4 [off1] {sym1} (LEAL [off2] {sym2} base) idx val mem) @@ -2527,10 +2415,7 @@ func rewriteValue386_Op386ANDLmodifyidx4(v *Value) bool { v.reset(Op386ANDLmodifyidx4) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(base) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) + v.AddArg4(base, idx, val, mem) return true } // match: (ANDLmodifyidx4 [off] {sym} ptr idx (MOVLconst [c]) mem) @@ -2552,9 +2437,7 @@ func rewriteValue386_Op386ANDLmodifyidx4(v *Value) bool { v.reset(Op386ANDLconstmodifyidx4) v.AuxInt = makeValAndOff(c, off) v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } return false @@ -2602,8 +2485,7 @@ func rewriteValue386_Op386CMPB(v *Value) bool { } v.reset(Op386InvertFlags) v0 := b.NewValue0(v.Pos, Op386CMPB, types.TypeFlags) - v0.AddArg(y) - v0.AddArg(x) + v0.AddArg2(y, x) v.AddArg(v0) return true } @@ -2626,9 +2508,7 @@ func rewriteValue386_Op386CMPB(v *Value) bool { v.reset(Op386CMPBload) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(x) - v.AddArg(mem) + v.AddArg3(ptr, x, mem) return true } // match: (CMPB x l:(MOVBload {sym} [off] ptr mem)) @@ -2651,9 +2531,7 @@ func rewriteValue386_Op386CMPB(v *Value) bool { v0 := b.NewValue0(l.Pos, Op386CMPBload, types.TypeFlags) v0.AuxInt = off v0.Aux = sym - v0.AddArg(ptr) - v0.AddArg(x) - v0.AddArg(mem) + v0.AddArg3(ptr, x, mem) v.AddArg(v0) return true } @@ -2769,8 +2647,7 @@ func rewriteValue386_Op386CMPBconst(v *Value) bool { break } v.reset(Op386TESTB) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (CMPBconst l:(ANDLconst [c] x) [0]) @@ -2802,8 +2679,7 @@ func rewriteValue386_Op386CMPBconst(v *Value) bool { } x := v_0 v.reset(Op386TESTB) - v.AddArg(x) - v.AddArg(x) + v.AddArg2(x, x) return true } // match: (CMPBconst l:(MOVBload {sym} [off] ptr mem) [c]) @@ -2824,12 +2700,10 @@ func rewriteValue386_Op386CMPBconst(v *Value) bool { } b = l.Block v0 := b.NewValue0(l.Pos, Op386CMPBconstload, types.TypeFlags) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v0.AuxInt = makeValAndOff(c, off) v0.Aux = sym - v0.AddArg(ptr) - v0.AddArg(mem) + v0.AddArg2(ptr, mem) return true } return false @@ -2856,8 +2730,7 @@ func rewriteValue386_Op386CMPBload(v *Value) bool { v.reset(Op386CMPBconstload) v.AuxInt = makeValAndOff(int64(int8(c)), off) v.Aux = sym - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } return false @@ -2905,8 +2778,7 @@ func rewriteValue386_Op386CMPL(v *Value) bool { } v.reset(Op386InvertFlags) v0 := b.NewValue0(v.Pos, Op386CMPL, types.TypeFlags) - v0.AddArg(y) - v0.AddArg(x) + v0.AddArg2(y, x) v.AddArg(v0) return true } @@ -2929,9 +2801,7 @@ func rewriteValue386_Op386CMPL(v *Value) bool { v.reset(Op386CMPLload) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(x) - v.AddArg(mem) + v.AddArg3(ptr, x, mem) return true } // match: (CMPL x l:(MOVLload {sym} [off] ptr mem)) @@ -2954,9 +2824,7 @@ func rewriteValue386_Op386CMPL(v *Value) bool { v0 := b.NewValue0(l.Pos, Op386CMPLload, types.TypeFlags) v0.AuxInt = off v0.Aux = sym - v0.AddArg(ptr) - v0.AddArg(x) - v0.AddArg(mem) + v0.AddArg3(ptr, x, mem) v.AddArg(v0) return true } @@ -3087,8 +2955,7 @@ func rewriteValue386_Op386CMPLconst(v *Value) bool { break } v.reset(Op386TESTL) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (CMPLconst l:(ANDLconst [c] x) [0]) @@ -3120,8 +2987,7 @@ func rewriteValue386_Op386CMPLconst(v *Value) bool { } x := v_0 v.reset(Op386TESTL) - v.AddArg(x) - v.AddArg(x) + v.AddArg2(x, x) return true } // match: (CMPLconst l:(MOVLload {sym} [off] ptr mem) [c]) @@ -3142,12 +3008,10 @@ func rewriteValue386_Op386CMPLconst(v *Value) bool { } b = l.Block v0 := b.NewValue0(l.Pos, Op386CMPLconstload, types.TypeFlags) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v0.AuxInt = makeValAndOff(c, off) v0.Aux = sym - v0.AddArg(ptr) - v0.AddArg(mem) + v0.AddArg2(ptr, mem) return true } return false @@ -3174,8 +3038,7 @@ func rewriteValue386_Op386CMPLload(v *Value) bool { v.reset(Op386CMPLconstload) v.AuxInt = makeValAndOff(int64(int32(c)), off) v.Aux = sym - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } return false @@ -3223,8 +3086,7 @@ func rewriteValue386_Op386CMPW(v *Value) bool { } v.reset(Op386InvertFlags) v0 := b.NewValue0(v.Pos, Op386CMPW, types.TypeFlags) - v0.AddArg(y) - v0.AddArg(x) + v0.AddArg2(y, x) v.AddArg(v0) return true } @@ -3247,9 +3109,7 @@ func rewriteValue386_Op386CMPW(v *Value) bool { v.reset(Op386CMPWload) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(x) - v.AddArg(mem) + v.AddArg3(ptr, x, mem) return true } // match: (CMPW x l:(MOVWload {sym} [off] ptr mem)) @@ -3272,9 +3132,7 @@ func rewriteValue386_Op386CMPW(v *Value) bool { v0 := b.NewValue0(l.Pos, Op386CMPWload, types.TypeFlags) v0.AuxInt = off v0.Aux = sym - v0.AddArg(ptr) - v0.AddArg(x) - v0.AddArg(mem) + v0.AddArg3(ptr, x, mem) v.AddArg(v0) return true } @@ -3390,8 +3248,7 @@ func rewriteValue386_Op386CMPWconst(v *Value) bool { break } v.reset(Op386TESTW) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (CMPWconst l:(ANDLconst [c] x) [0]) @@ -3423,8 +3280,7 @@ func rewriteValue386_Op386CMPWconst(v *Value) bool { } x := v_0 v.reset(Op386TESTW) - v.AddArg(x) - v.AddArg(x) + v.AddArg2(x, x) return true } // match: (CMPWconst l:(MOVWload {sym} [off] ptr mem) [c]) @@ -3445,12 +3301,10 @@ func rewriteValue386_Op386CMPWconst(v *Value) bool { } b = l.Block v0 := b.NewValue0(l.Pos, Op386CMPWconstload, types.TypeFlags) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v0.AuxInt = makeValAndOff(c, off) v0.Aux = sym - v0.AddArg(ptr) - v0.AddArg(mem) + v0.AddArg2(ptr, mem) return true } return false @@ -3477,8 +3331,7 @@ func rewriteValue386_Op386CMPWload(v *Value) bool { v.reset(Op386CMPWconstload) v.AuxInt = makeValAndOff(int64(int16(c)), off) v.Aux = sym - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } return false @@ -3507,9 +3360,7 @@ func rewriteValue386_Op386DIVSD(v *Value) bool { v.reset(Op386DIVSDload) v.AuxInt = off v.Aux = sym - v.AddArg(x) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg3(x, ptr, mem) return true } return false @@ -3539,9 +3390,7 @@ func rewriteValue386_Op386DIVSDload(v *Value) bool { v.reset(Op386DIVSDload) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(val) - v.AddArg(base) - v.AddArg(mem) + v.AddArg3(val, base, mem) return true } // match: (DIVSDload [off1] {sym1} val (LEAL [off2] {sym2} base) mem) @@ -3564,9 +3413,7 @@ func rewriteValue386_Op386DIVSDload(v *Value) bool { v.reset(Op386DIVSDload) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(val) - v.AddArg(base) - v.AddArg(mem) + v.AddArg3(val, base, mem) return true } return false @@ -3595,9 +3442,7 @@ func rewriteValue386_Op386DIVSS(v *Value) bool { v.reset(Op386DIVSSload) v.AuxInt = off v.Aux = sym - v.AddArg(x) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg3(x, ptr, mem) return true } return false @@ -3627,9 +3472,7 @@ func rewriteValue386_Op386DIVSSload(v *Value) bool { v.reset(Op386DIVSSload) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(val) - v.AddArg(base) - v.AddArg(mem) + v.AddArg3(val, base, mem) return true } // match: (DIVSSload [off1] {sym1} val (LEAL [off2] {sym2} base) mem) @@ -3652,9 +3495,7 @@ func rewriteValue386_Op386DIVSSload(v *Value) bool { v.reset(Op386DIVSSload) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(val) - v.AddArg(base) - v.AddArg(mem) + v.AddArg3(val, base, mem) return true } return false @@ -3702,8 +3543,7 @@ func rewriteValue386_Op386LEAL(v *Value) bool { v.reset(Op386LEAL1) v.AuxInt = c v.Aux = s - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } break @@ -3748,8 +3588,7 @@ func rewriteValue386_Op386LEAL(v *Value) bool { v.reset(Op386LEAL1) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (LEAL [off1] {sym1} (LEAL2 [off2] {sym2} x y)) @@ -3771,8 +3610,7 @@ func rewriteValue386_Op386LEAL(v *Value) bool { v.reset(Op386LEAL2) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (LEAL [off1] {sym1} (LEAL4 [off2] {sym2} x y)) @@ -3794,8 +3632,7 @@ func rewriteValue386_Op386LEAL(v *Value) bool { v.reset(Op386LEAL4) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (LEAL [off1] {sym1} (LEAL8 [off2] {sym2} x y)) @@ -3817,8 +3654,7 @@ func rewriteValue386_Op386LEAL(v *Value) bool { v.reset(Op386LEAL8) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } return false @@ -3845,8 +3681,7 @@ func rewriteValue386_Op386LEAL1(v *Value) bool { v.reset(Op386LEAL1) v.AuxInt = c + d v.Aux = s - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } break @@ -3865,8 +3700,7 @@ func rewriteValue386_Op386LEAL1(v *Value) bool { v.reset(Op386LEAL2) v.AuxInt = c v.Aux = s - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } break @@ -3885,8 +3719,7 @@ func rewriteValue386_Op386LEAL1(v *Value) bool { v.reset(Op386LEAL4) v.AuxInt = c v.Aux = s - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } break @@ -3905,8 +3738,7 @@ func rewriteValue386_Op386LEAL1(v *Value) bool { v.reset(Op386LEAL8) v.AuxInt = c v.Aux = s - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } break @@ -3931,8 +3763,7 @@ func rewriteValue386_Op386LEAL1(v *Value) bool { v.reset(Op386LEAL1) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } break @@ -3960,8 +3791,7 @@ func rewriteValue386_Op386LEAL2(v *Value) bool { v.reset(Op386LEAL2) v.AuxInt = c + d v.Aux = s - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (LEAL2 [c] {s} x (ADDLconst [d] y)) @@ -3982,8 +3812,7 @@ func rewriteValue386_Op386LEAL2(v *Value) bool { v.reset(Op386LEAL2) v.AuxInt = c + 2*d v.Aux = s - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (LEAL2 [c] {s} x (SHLLconst [1] y)) @@ -3999,8 +3828,7 @@ func rewriteValue386_Op386LEAL2(v *Value) bool { v.reset(Op386LEAL4) v.AuxInt = c v.Aux = s - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (LEAL2 [c] {s} x (SHLLconst [2] y)) @@ -4016,8 +3844,7 @@ func rewriteValue386_Op386LEAL2(v *Value) bool { v.reset(Op386LEAL8) v.AuxInt = c v.Aux = s - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (LEAL2 [off1] {sym1} (LEAL [off2] {sym2} x) y) @@ -4039,8 +3866,7 @@ func rewriteValue386_Op386LEAL2(v *Value) bool { v.reset(Op386LEAL2) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } return false @@ -4066,8 +3892,7 @@ func rewriteValue386_Op386LEAL4(v *Value) bool { v.reset(Op386LEAL4) v.AuxInt = c + d v.Aux = s - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (LEAL4 [c] {s} x (ADDLconst [d] y)) @@ -4088,8 +3913,7 @@ func rewriteValue386_Op386LEAL4(v *Value) bool { v.reset(Op386LEAL4) v.AuxInt = c + 4*d v.Aux = s - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (LEAL4 [c] {s} x (SHLLconst [1] y)) @@ -4105,8 +3929,7 @@ func rewriteValue386_Op386LEAL4(v *Value) bool { v.reset(Op386LEAL8) v.AuxInt = c v.Aux = s - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (LEAL4 [off1] {sym1} (LEAL [off2] {sym2} x) y) @@ -4128,8 +3951,7 @@ func rewriteValue386_Op386LEAL4(v *Value) bool { v.reset(Op386LEAL4) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } return false @@ -4155,8 +3977,7 @@ func rewriteValue386_Op386LEAL8(v *Value) bool { v.reset(Op386LEAL8) v.AuxInt = c + d v.Aux = s - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (LEAL8 [c] {s} x (ADDLconst [d] y)) @@ -4177,8 +3998,7 @@ func rewriteValue386_Op386LEAL8(v *Value) bool { v.reset(Op386LEAL8) v.AuxInt = c + 8*d v.Aux = s - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (LEAL8 [off1] {sym1} (LEAL [off2] {sym2} x) y) @@ -4200,8 +4020,7 @@ func rewriteValue386_Op386LEAL8(v *Value) bool { v.reset(Op386LEAL8) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } return false @@ -4226,12 +4045,10 @@ func rewriteValue386_Op386MOVBLSX(v *Value) bool { } b = x.Block v0 := b.NewValue0(x.Pos, Op386MOVBLSXload, v.Type) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v0.AuxInt = off v0.Aux = sym - v0.AddArg(ptr) - v0.AddArg(mem) + v0.AddArg2(ptr, mem) return true } // match: (MOVBLSX (ANDLconst [c] x)) @@ -4270,9 +4087,8 @@ func rewriteValue386_Op386MOVBLSXload(v *Value) bool { } off2 := v_1.AuxInt sym2 := v_1.Aux - _ = v_1.Args[2] - ptr2 := v_1.Args[0] x := v_1.Args[1] + ptr2 := v_1.Args[0] if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) { break } @@ -4299,8 +4115,7 @@ func rewriteValue386_Op386MOVBLSXload(v *Value) bool { v.reset(Op386MOVBLSXload) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(base) - v.AddArg(mem) + v.AddArg2(base, mem) return true } return false @@ -4325,12 +4140,10 @@ func rewriteValue386_Op386MOVBLZX(v *Value) bool { } b = x.Block v0 := b.NewValue0(x.Pos, Op386MOVBload, v.Type) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v0.AuxInt = off v0.Aux = sym - v0.AddArg(ptr) - v0.AddArg(mem) + v0.AddArg2(ptr, mem) return true } // match: (MOVBLZX x:(MOVBloadidx1 [off] {sym} ptr idx mem)) @@ -4351,13 +4164,10 @@ func rewriteValue386_Op386MOVBLZX(v *Value) bool { } b = x.Block v0 := b.NewValue0(v.Pos, Op386MOVBloadidx1, v.Type) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v0.AuxInt = off v0.Aux = sym - v0.AddArg(ptr) - v0.AddArg(idx) - v0.AddArg(mem) + v0.AddArg3(ptr, idx, mem) return true } // match: (MOVBLZX (ANDLconst [c] x)) @@ -4392,9 +4202,8 @@ func rewriteValue386_Op386MOVBload(v *Value) bool { } off2 := v_1.AuxInt sym2 := v_1.Aux - _ = v_1.Args[2] - ptr2 := v_1.Args[0] x := v_1.Args[1] + ptr2 := v_1.Args[0] if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) { break } @@ -4420,8 +4229,7 @@ func rewriteValue386_Op386MOVBload(v *Value) bool { v.reset(Op386MOVBload) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVBload [off1] {sym1} (LEAL [off2] {sym2} base) mem) @@ -4443,8 +4251,7 @@ func rewriteValue386_Op386MOVBload(v *Value) bool { v.reset(Op386MOVBload) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(base) - v.AddArg(mem) + v.AddArg2(base, mem) return true } // match: (MOVBload [off1] {sym1} (LEAL1 [off2] {sym2} ptr idx) mem) @@ -4467,9 +4274,7 @@ func rewriteValue386_Op386MOVBload(v *Value) bool { v.reset(Op386MOVBloadidx1) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } // match: (MOVBload [off] {sym} (ADDL ptr idx) mem) @@ -4494,9 +4299,7 @@ func rewriteValue386_Op386MOVBload(v *Value) bool { v.reset(Op386MOVBloadidx1) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } break @@ -4536,9 +4339,7 @@ func rewriteValue386_Op386MOVBloadidx1(v *Value) bool { v.reset(Op386MOVBloadidx1) v.AuxInt = int64(int32(c + d)) v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } break @@ -4559,9 +4360,7 @@ func rewriteValue386_Op386MOVBloadidx1(v *Value) bool { v.reset(Op386MOVBloadidx1) v.AuxInt = int64(int32(c + d)) v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } break @@ -4588,9 +4387,7 @@ func rewriteValue386_Op386MOVBstore(v *Value) bool { v.reset(Op386MOVBstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(x) - v.AddArg(mem) + v.AddArg3(ptr, x, mem) return true } // match: (MOVBstore [off] {sym} ptr (MOVBLZX x) mem) @@ -4607,9 +4404,7 @@ func rewriteValue386_Op386MOVBstore(v *Value) bool { v.reset(Op386MOVBstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(x) - v.AddArg(mem) + v.AddArg3(ptr, x, mem) return true } // match: (MOVBstore [off1] {sym} (ADDLconst [off2] ptr) val mem) @@ -4631,9 +4426,7 @@ func rewriteValue386_Op386MOVBstore(v *Value) bool { v.reset(Op386MOVBstore) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } // match: (MOVBstore [off] {sym} ptr (MOVLconst [c]) mem) @@ -4654,8 +4447,7 @@ func rewriteValue386_Op386MOVBstore(v *Value) bool { v.reset(Op386MOVBstoreconst) v.AuxInt = makeValAndOff(int64(int8(c)), off) v.Aux = sym - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVBstore [off1] {sym1} (LEAL [off2] {sym2} base) val mem) @@ -4678,9 +4470,7 @@ func rewriteValue386_Op386MOVBstore(v *Value) bool { v.reset(Op386MOVBstore) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(base) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(base, val, mem) return true } // match: (MOVBstore [off1] {sym1} (LEAL1 [off2] {sym2} ptr idx) val mem) @@ -4704,10 +4494,7 @@ func rewriteValue386_Op386MOVBstore(v *Value) bool { v.reset(Op386MOVBstoreidx1) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) + v.AddArg4(ptr, idx, val, mem) return true } // match: (MOVBstore [off] {sym} (ADDL ptr idx) val mem) @@ -4733,10 +4520,7 @@ func rewriteValue386_Op386MOVBstore(v *Value) bool { v.reset(Op386MOVBstoreidx1) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) + v.AddArg4(ptr, idx, val, mem) return true } break @@ -4763,9 +4547,7 @@ func rewriteValue386_Op386MOVBstore(v *Value) bool { v.reset(Op386MOVWstore) v.AuxInt = i - 1 v.Aux = s - v.AddArg(p) - v.AddArg(w) - v.AddArg(mem) + v.AddArg3(p, w, mem) return true } // match: (MOVBstore [i] {s} p (SHRLconst [8] w) x:(MOVBstore [i-1] {s} p w mem)) @@ -4790,9 +4572,7 @@ func rewriteValue386_Op386MOVBstore(v *Value) bool { v.reset(Op386MOVWstore) v.AuxInt = i - 1 v.Aux = s - v.AddArg(p) - v.AddArg(w) - v.AddArg(mem) + v.AddArg3(p, w, mem) return true } // match: (MOVBstore [i] {s} p w x:(MOVBstore {s} [i+1] p (SHRWconst [8] w) mem)) @@ -4818,9 +4598,7 @@ func rewriteValue386_Op386MOVBstore(v *Value) bool { v.reset(Op386MOVWstore) v.AuxInt = i v.Aux = s - v.AddArg(p) - v.AddArg(w) - v.AddArg(mem) + v.AddArg3(p, w, mem) return true } // match: (MOVBstore [i] {s} p w x:(MOVBstore {s} [i+1] p (SHRLconst [8] w) mem)) @@ -4846,9 +4624,7 @@ func rewriteValue386_Op386MOVBstore(v *Value) bool { v.reset(Op386MOVWstore) v.AuxInt = i v.Aux = s - v.AddArg(p) - v.AddArg(w) - v.AddArg(mem) + v.AddArg3(p, w, mem) return true } // match: (MOVBstore [i] {s} p (SHRLconst [j] w) x:(MOVBstore [i-1] {s} p w0:(SHRLconst [j-8] w) mem)) @@ -4878,9 +4654,7 @@ func rewriteValue386_Op386MOVBstore(v *Value) bool { v.reset(Op386MOVWstore) v.AuxInt = i - 1 v.Aux = s - v.AddArg(p) - v.AddArg(w0) - v.AddArg(mem) + v.AddArg3(p, w0, mem) return true } return false @@ -4908,8 +4682,7 @@ func rewriteValue386_Op386MOVBstoreconst(v *Value) bool { v.reset(Op386MOVBstoreconst) v.AuxInt = ValAndOff(sc).add(off) v.Aux = s - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVBstoreconst [sc] {sym1} (LEAL [off] {sym2} ptr) mem) @@ -4931,8 +4704,7 @@ func rewriteValue386_Op386MOVBstoreconst(v *Value) bool { v.reset(Op386MOVBstoreconst) v.AuxInt = ValAndOff(sc).add(off) v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVBstoreconst [x] {sym1} (LEAL1 [off] {sym2} ptr idx) mem) @@ -4955,9 +4727,7 @@ func rewriteValue386_Op386MOVBstoreconst(v *Value) bool { v.reset(Op386MOVBstoreconstidx1) v.AuxInt = ValAndOff(x).add(off) v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } // match: (MOVBstoreconst [x] {sym} (ADDL ptr idx) mem) @@ -4974,9 +4744,7 @@ func rewriteValue386_Op386MOVBstoreconst(v *Value) bool { v.reset(Op386MOVBstoreconstidx1) v.AuxInt = x v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } // match: (MOVBstoreconst [c] {s} p x:(MOVBstoreconst [a] {s} p mem)) @@ -5001,8 +4769,7 @@ func rewriteValue386_Op386MOVBstoreconst(v *Value) bool { v.reset(Op386MOVWstoreconst) v.AuxInt = makeValAndOff(ValAndOff(a).Val()&0xff|ValAndOff(c).Val()<<8, ValAndOff(a).Off()) v.Aux = s - v.AddArg(p) - v.AddArg(mem) + v.AddArg2(p, mem) return true } // match: (MOVBstoreconst [a] {s} p x:(MOVBstoreconst [c] {s} p mem)) @@ -5027,8 +4794,7 @@ func rewriteValue386_Op386MOVBstoreconst(v *Value) bool { v.reset(Op386MOVWstoreconst) v.AuxInt = makeValAndOff(ValAndOff(a).Val()&0xff|ValAndOff(c).Val()<<8, ValAndOff(a).Off()) v.Aux = s - v.AddArg(p) - v.AddArg(mem) + v.AddArg2(p, mem) return true } return false @@ -5052,9 +4818,7 @@ func rewriteValue386_Op386MOVBstoreconstidx1(v *Value) bool { v.reset(Op386MOVBstoreconstidx1) v.AuxInt = ValAndOff(x).add(c) v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } // match: (MOVBstoreconstidx1 [x] {sym} ptr (ADDLconst [c] idx) mem) @@ -5072,9 +4836,7 @@ func rewriteValue386_Op386MOVBstoreconstidx1(v *Value) bool { v.reset(Op386MOVBstoreconstidx1) v.AuxInt = ValAndOff(x).add(c) v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } // match: (MOVBstoreconstidx1 [c] {s} p i x:(MOVBstoreconstidx1 [a] {s} p i mem)) @@ -5100,9 +4862,7 @@ func rewriteValue386_Op386MOVBstoreconstidx1(v *Value) bool { v.reset(Op386MOVWstoreconstidx1) v.AuxInt = makeValAndOff(ValAndOff(a).Val()&0xff|ValAndOff(c).Val()<<8, ValAndOff(a).Off()) v.Aux = s - v.AddArg(p) - v.AddArg(i) - v.AddArg(mem) + v.AddArg3(p, i, mem) return true } return false @@ -5129,10 +4889,7 @@ func rewriteValue386_Op386MOVBstoreidx1(v *Value) bool { v.reset(Op386MOVBstoreidx1) v.AuxInt = int64(int32(c + d)) v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) + v.AddArg4(ptr, idx, val, mem) return true } break @@ -5154,10 +4911,7 @@ func rewriteValue386_Op386MOVBstoreidx1(v *Value) bool { v.reset(Op386MOVBstoreidx1) v.AuxInt = int64(int32(c + d)) v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) + v.AddArg4(ptr, idx, val, mem) return true } break @@ -5189,10 +4943,7 @@ func rewriteValue386_Op386MOVBstoreidx1(v *Value) bool { v.reset(Op386MOVWstoreidx1) v.AuxInt = i - 1 v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v.AddArg(w) - v.AddArg(mem) + v.AddArg4(p, idx, w, mem) return true } } @@ -5225,10 +4976,7 @@ func rewriteValue386_Op386MOVBstoreidx1(v *Value) bool { v.reset(Op386MOVWstoreidx1) v.AuxInt = i - 1 v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v.AddArg(w) - v.AddArg(mem) + v.AddArg4(p, idx, w, mem) return true } } @@ -5262,10 +5010,7 @@ func rewriteValue386_Op386MOVBstoreidx1(v *Value) bool { v.reset(Op386MOVWstoreidx1) v.AuxInt = i v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v.AddArg(w) - v.AddArg(mem) + v.AddArg4(p, idx, w, mem) return true } } @@ -5299,10 +5044,7 @@ func rewriteValue386_Op386MOVBstoreidx1(v *Value) bool { v.reset(Op386MOVWstoreidx1) v.AuxInt = i v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v.AddArg(w) - v.AddArg(mem) + v.AddArg4(p, idx, w, mem) return true } } @@ -5340,10 +5082,7 @@ func rewriteValue386_Op386MOVBstoreidx1(v *Value) bool { v.reset(Op386MOVWstoreidx1) v.AuxInt = i - 1 v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v.AddArg(w0) - v.AddArg(mem) + v.AddArg4(p, idx, w0, mem) return true } } @@ -5368,15 +5107,12 @@ func rewriteValue386_Op386MOVLload(v *Value) bool { } off2 := v_1.AuxInt sym2 := v_1.Aux - _ = v_1.Args[2] - ptr2 := v_1.Args[0] x := v_1.Args[1] + ptr2 := v_1.Args[0] if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (MOVLload [off1] {sym} (ADDLconst [off2] ptr) mem) @@ -5397,8 +5133,7 @@ func rewriteValue386_Op386MOVLload(v *Value) bool { v.reset(Op386MOVLload) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVLload [off1] {sym1} (LEAL [off2] {sym2} base) mem) @@ -5420,8 +5155,7 @@ func rewriteValue386_Op386MOVLload(v *Value) bool { v.reset(Op386MOVLload) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(base) - v.AddArg(mem) + v.AddArg2(base, mem) return true } // match: (MOVLload [off1] {sym1} (LEAL1 [off2] {sym2} ptr idx) mem) @@ -5444,9 +5178,7 @@ func rewriteValue386_Op386MOVLload(v *Value) bool { v.reset(Op386MOVLloadidx1) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } // match: (MOVLload [off1] {sym1} (LEAL4 [off2] {sym2} ptr idx) mem) @@ -5469,9 +5201,7 @@ func rewriteValue386_Op386MOVLload(v *Value) bool { v.reset(Op386MOVLloadidx4) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } // match: (MOVLload [off] {sym} (ADDL ptr idx) mem) @@ -5496,9 +5226,7 @@ func rewriteValue386_Op386MOVLload(v *Value) bool { v.reset(Op386MOVLloadidx1) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } break @@ -5537,9 +5265,7 @@ func rewriteValue386_Op386MOVLloadidx1(v *Value) bool { v.reset(Op386MOVLloadidx4) v.AuxInt = c v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } break @@ -5560,9 +5286,7 @@ func rewriteValue386_Op386MOVLloadidx1(v *Value) bool { v.reset(Op386MOVLloadidx1) v.AuxInt = int64(int32(c + d)) v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } break @@ -5583,9 +5307,7 @@ func rewriteValue386_Op386MOVLloadidx1(v *Value) bool { v.reset(Op386MOVLloadidx1) v.AuxInt = int64(int32(c + d)) v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } break @@ -5611,9 +5333,7 @@ func rewriteValue386_Op386MOVLloadidx4(v *Value) bool { v.reset(Op386MOVLloadidx4) v.AuxInt = int64(int32(c + d)) v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } // match: (MOVLloadidx4 [c] {sym} ptr (ADDLconst [d] idx) mem) @@ -5631,9 +5351,7 @@ func rewriteValue386_Op386MOVLloadidx4(v *Value) bool { v.reset(Op386MOVLloadidx4) v.AuxInt = int64(int32(c + 4*d)) v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } return false @@ -5663,9 +5381,7 @@ func rewriteValue386_Op386MOVLstore(v *Value) bool { v.reset(Op386MOVLstore) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } // match: (MOVLstore [off] {sym} ptr (MOVLconst [c]) mem) @@ -5686,8 +5402,7 @@ func rewriteValue386_Op386MOVLstore(v *Value) bool { v.reset(Op386MOVLstoreconst) v.AuxInt = makeValAndOff(int64(int32(c)), off) v.Aux = sym - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVLstore [off1] {sym1} (LEAL [off2] {sym2} base) val mem) @@ -5710,9 +5425,7 @@ func rewriteValue386_Op386MOVLstore(v *Value) bool { v.reset(Op386MOVLstore) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(base) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(base, val, mem) return true } // match: (MOVLstore [off1] {sym1} (LEAL1 [off2] {sym2} ptr idx) val mem) @@ -5736,10 +5449,7 @@ func rewriteValue386_Op386MOVLstore(v *Value) bool { v.reset(Op386MOVLstoreidx1) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) + v.AddArg4(ptr, idx, val, mem) return true } // match: (MOVLstore [off1] {sym1} (LEAL4 [off2] {sym2} ptr idx) val mem) @@ -5763,10 +5473,7 @@ func rewriteValue386_Op386MOVLstore(v *Value) bool { v.reset(Op386MOVLstoreidx4) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) + v.AddArg4(ptr, idx, val, mem) return true } // match: (MOVLstore [off] {sym} (ADDL ptr idx) val mem) @@ -5792,10 +5499,7 @@ func rewriteValue386_Op386MOVLstore(v *Value) bool { v.reset(Op386MOVLstoreidx1) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) + v.AddArg4(ptr, idx, val, mem) return true } break @@ -5819,9 +5523,7 @@ func rewriteValue386_Op386MOVLstore(v *Value) bool { v.reset(Op386ADDLmodify) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(x) - v.AddArg(mem) + v.AddArg3(ptr, x, mem) return true } // match: (MOVLstore {sym} [off] ptr y:(ANDLload x [off] {sym} ptr mem) mem) @@ -5843,9 +5545,7 @@ func rewriteValue386_Op386MOVLstore(v *Value) bool { v.reset(Op386ANDLmodify) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(x) - v.AddArg(mem) + v.AddArg3(ptr, x, mem) return true } // match: (MOVLstore {sym} [off] ptr y:(ORLload x [off] {sym} ptr mem) mem) @@ -5867,9 +5567,7 @@ func rewriteValue386_Op386MOVLstore(v *Value) bool { v.reset(Op386ORLmodify) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(x) - v.AddArg(mem) + v.AddArg3(ptr, x, mem) return true } // match: (MOVLstore {sym} [off] ptr y:(XORLload x [off] {sym} ptr mem) mem) @@ -5891,9 +5589,7 @@ func rewriteValue386_Op386MOVLstore(v *Value) bool { v.reset(Op386XORLmodify) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(x) - v.AddArg(mem) + v.AddArg3(ptr, x, mem) return true } // match: (MOVLstore {sym} [off] ptr y:(ADDL l:(MOVLload [off] {sym} ptr mem) x) mem) @@ -5926,9 +5622,7 @@ func rewriteValue386_Op386MOVLstore(v *Value) bool { v.reset(Op386ADDLmodify) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(x) - v.AddArg(mem) + v.AddArg3(ptr, x, mem) return true } break @@ -5956,9 +5650,7 @@ func rewriteValue386_Op386MOVLstore(v *Value) bool { v.reset(Op386SUBLmodify) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(x) - v.AddArg(mem) + v.AddArg3(ptr, x, mem) return true } // match: (MOVLstore {sym} [off] ptr y:(ANDL l:(MOVLload [off] {sym} ptr mem) x) mem) @@ -5991,9 +5683,7 @@ func rewriteValue386_Op386MOVLstore(v *Value) bool { v.reset(Op386ANDLmodify) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(x) - v.AddArg(mem) + v.AddArg3(ptr, x, mem) return true } break @@ -6028,9 +5718,7 @@ func rewriteValue386_Op386MOVLstore(v *Value) bool { v.reset(Op386ORLmodify) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(x) - v.AddArg(mem) + v.AddArg3(ptr, x, mem) return true } break @@ -6065,9 +5753,7 @@ func rewriteValue386_Op386MOVLstore(v *Value) bool { v.reset(Op386XORLmodify) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(x) - v.AddArg(mem) + v.AddArg3(ptr, x, mem) return true } break @@ -6095,8 +5781,7 @@ func rewriteValue386_Op386MOVLstore(v *Value) bool { v.reset(Op386ADDLconstmodify) v.AuxInt = makeValAndOff(c, off) v.Aux = sym - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVLstore {sym} [off] ptr y:(ANDLconst [c] l:(MOVLload [off] {sym} ptr mem)) mem) @@ -6122,8 +5807,7 @@ func rewriteValue386_Op386MOVLstore(v *Value) bool { v.reset(Op386ANDLconstmodify) v.AuxInt = makeValAndOff(c, off) v.Aux = sym - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVLstore {sym} [off] ptr y:(ORLconst [c] l:(MOVLload [off] {sym} ptr mem)) mem) @@ -6149,8 +5833,7 @@ func rewriteValue386_Op386MOVLstore(v *Value) bool { v.reset(Op386ORLconstmodify) v.AuxInt = makeValAndOff(c, off) v.Aux = sym - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVLstore {sym} [off] ptr y:(XORLconst [c] l:(MOVLload [off] {sym} ptr mem)) mem) @@ -6176,8 +5859,7 @@ func rewriteValue386_Op386MOVLstore(v *Value) bool { v.reset(Op386XORLconstmodify) v.AuxInt = makeValAndOff(c, off) v.Aux = sym - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } return false @@ -6205,8 +5887,7 @@ func rewriteValue386_Op386MOVLstoreconst(v *Value) bool { v.reset(Op386MOVLstoreconst) v.AuxInt = ValAndOff(sc).add(off) v.Aux = s - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVLstoreconst [sc] {sym1} (LEAL [off] {sym2} ptr) mem) @@ -6228,8 +5909,7 @@ func rewriteValue386_Op386MOVLstoreconst(v *Value) bool { v.reset(Op386MOVLstoreconst) v.AuxInt = ValAndOff(sc).add(off) v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVLstoreconst [x] {sym1} (LEAL1 [off] {sym2} ptr idx) mem) @@ -6252,9 +5932,7 @@ func rewriteValue386_Op386MOVLstoreconst(v *Value) bool { v.reset(Op386MOVLstoreconstidx1) v.AuxInt = ValAndOff(x).add(off) v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } // match: (MOVLstoreconst [x] {sym1} (LEAL4 [off] {sym2} ptr idx) mem) @@ -6277,9 +5955,7 @@ func rewriteValue386_Op386MOVLstoreconst(v *Value) bool { v.reset(Op386MOVLstoreconstidx4) v.AuxInt = ValAndOff(x).add(off) v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } // match: (MOVLstoreconst [x] {sym} (ADDL ptr idx) mem) @@ -6296,9 +5972,7 @@ func rewriteValue386_Op386MOVLstoreconst(v *Value) bool { v.reset(Op386MOVLstoreconstidx1) v.AuxInt = x v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } return false @@ -6321,9 +5995,7 @@ func rewriteValue386_Op386MOVLstoreconstidx1(v *Value) bool { v.reset(Op386MOVLstoreconstidx4) v.AuxInt = c v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } // match: (MOVLstoreconstidx1 [x] {sym} (ADDLconst [c] ptr) idx mem) @@ -6341,9 +6013,7 @@ func rewriteValue386_Op386MOVLstoreconstidx1(v *Value) bool { v.reset(Op386MOVLstoreconstidx1) v.AuxInt = ValAndOff(x).add(c) v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } // match: (MOVLstoreconstidx1 [x] {sym} ptr (ADDLconst [c] idx) mem) @@ -6361,9 +6031,7 @@ func rewriteValue386_Op386MOVLstoreconstidx1(v *Value) bool { v.reset(Op386MOVLstoreconstidx1) v.AuxInt = ValAndOff(x).add(c) v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } return false @@ -6387,9 +6055,7 @@ func rewriteValue386_Op386MOVLstoreconstidx4(v *Value) bool { v.reset(Op386MOVLstoreconstidx4) v.AuxInt = ValAndOff(x).add(c) v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } // match: (MOVLstoreconstidx4 [x] {sym} ptr (ADDLconst [c] idx) mem) @@ -6407,9 +6073,7 @@ func rewriteValue386_Op386MOVLstoreconstidx4(v *Value) bool { v.reset(Op386MOVLstoreconstidx4) v.AuxInt = ValAndOff(x).add(4 * c) v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } return false @@ -6435,10 +6099,7 @@ func rewriteValue386_Op386MOVLstoreidx1(v *Value) bool { v.reset(Op386MOVLstoreidx4) v.AuxInt = c v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) + v.AddArg4(ptr, idx, val, mem) return true } break @@ -6460,10 +6121,7 @@ func rewriteValue386_Op386MOVLstoreidx1(v *Value) bool { v.reset(Op386MOVLstoreidx1) v.AuxInt = int64(int32(c + d)) v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) + v.AddArg4(ptr, idx, val, mem) return true } break @@ -6485,10 +6143,7 @@ func rewriteValue386_Op386MOVLstoreidx1(v *Value) bool { v.reset(Op386MOVLstoreidx1) v.AuxInt = int64(int32(c + d)) v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) + v.AddArg4(ptr, idx, val, mem) return true } break @@ -6516,10 +6171,7 @@ func rewriteValue386_Op386MOVLstoreidx4(v *Value) bool { v.reset(Op386MOVLstoreidx4) v.AuxInt = int64(int32(c + d)) v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) + v.AddArg4(ptr, idx, val, mem) return true } // match: (MOVLstoreidx4 [c] {sym} ptr (ADDLconst [d] idx) val mem) @@ -6538,10 +6190,7 @@ func rewriteValue386_Op386MOVLstoreidx4(v *Value) bool { v.reset(Op386MOVLstoreidx4) v.AuxInt = int64(int32(c + 4*d)) v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) + v.AddArg4(ptr, idx, val, mem) return true } // match: (MOVLstoreidx4 {sym} [off] ptr idx y:(ADDLloadidx4 x [off] {sym} ptr idx mem) mem) @@ -6564,10 +6213,7 @@ func rewriteValue386_Op386MOVLstoreidx4(v *Value) bool { v.reset(Op386ADDLmodifyidx4) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(x) - v.AddArg(mem) + v.AddArg4(ptr, idx, x, mem) return true } // match: (MOVLstoreidx4 {sym} [off] ptr idx y:(ANDLloadidx4 x [off] {sym} ptr idx mem) mem) @@ -6590,10 +6236,7 @@ func rewriteValue386_Op386MOVLstoreidx4(v *Value) bool { v.reset(Op386ANDLmodifyidx4) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(x) - v.AddArg(mem) + v.AddArg4(ptr, idx, x, mem) return true } // match: (MOVLstoreidx4 {sym} [off] ptr idx y:(ORLloadidx4 x [off] {sym} ptr idx mem) mem) @@ -6616,10 +6259,7 @@ func rewriteValue386_Op386MOVLstoreidx4(v *Value) bool { v.reset(Op386ORLmodifyidx4) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(x) - v.AddArg(mem) + v.AddArg4(ptr, idx, x, mem) return true } // match: (MOVLstoreidx4 {sym} [off] ptr idx y:(XORLloadidx4 x [off] {sym} ptr idx mem) mem) @@ -6642,10 +6282,7 @@ func rewriteValue386_Op386MOVLstoreidx4(v *Value) bool { v.reset(Op386XORLmodifyidx4) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(x) - v.AddArg(mem) + v.AddArg4(ptr, idx, x, mem) return true } // match: (MOVLstoreidx4 {sym} [off] ptr idx y:(ADDL l:(MOVLloadidx4 [off] {sym} ptr idx mem) x) mem) @@ -6679,10 +6316,7 @@ func rewriteValue386_Op386MOVLstoreidx4(v *Value) bool { v.reset(Op386ADDLmodifyidx4) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(x) - v.AddArg(mem) + v.AddArg4(ptr, idx, x, mem) return true } break @@ -6711,10 +6345,7 @@ func rewriteValue386_Op386MOVLstoreidx4(v *Value) bool { v.reset(Op386SUBLmodifyidx4) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(x) - v.AddArg(mem) + v.AddArg4(ptr, idx, x, mem) return true } // match: (MOVLstoreidx4 {sym} [off] ptr idx y:(ANDL l:(MOVLloadidx4 [off] {sym} ptr idx mem) x) mem) @@ -6748,10 +6379,7 @@ func rewriteValue386_Op386MOVLstoreidx4(v *Value) bool { v.reset(Op386ANDLmodifyidx4) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(x) - v.AddArg(mem) + v.AddArg4(ptr, idx, x, mem) return true } break @@ -6787,10 +6415,7 @@ func rewriteValue386_Op386MOVLstoreidx4(v *Value) bool { v.reset(Op386ORLmodifyidx4) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(x) - v.AddArg(mem) + v.AddArg4(ptr, idx, x, mem) return true } break @@ -6826,10 +6451,7 @@ func rewriteValue386_Op386MOVLstoreidx4(v *Value) bool { v.reset(Op386XORLmodifyidx4) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(x) - v.AddArg(mem) + v.AddArg4(ptr, idx, x, mem) return true } break @@ -6858,9 +6480,7 @@ func rewriteValue386_Op386MOVLstoreidx4(v *Value) bool { v.reset(Op386ADDLconstmodifyidx4) v.AuxInt = makeValAndOff(c, off) v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } // match: (MOVLstoreidx4 {sym} [off] ptr idx y:(ANDLconst [c] l:(MOVLloadidx4 [off] {sym} ptr idx mem)) mem) @@ -6887,9 +6507,7 @@ func rewriteValue386_Op386MOVLstoreidx4(v *Value) bool { v.reset(Op386ANDLconstmodifyidx4) v.AuxInt = makeValAndOff(c, off) v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } // match: (MOVLstoreidx4 {sym} [off] ptr idx y:(ORLconst [c] l:(MOVLloadidx4 [off] {sym} ptr idx mem)) mem) @@ -6916,9 +6534,7 @@ func rewriteValue386_Op386MOVLstoreidx4(v *Value) bool { v.reset(Op386ORLconstmodifyidx4) v.AuxInt = makeValAndOff(c, off) v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } // match: (MOVLstoreidx4 {sym} [off] ptr idx y:(XORLconst [c] l:(MOVLloadidx4 [off] {sym} ptr idx mem)) mem) @@ -6945,9 +6561,7 @@ func rewriteValue386_Op386MOVLstoreidx4(v *Value) bool { v.reset(Op386XORLconstmodifyidx4) v.AuxInt = makeValAndOff(c, off) v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } return false @@ -6995,8 +6609,7 @@ func rewriteValue386_Op386MOVSDload(v *Value) bool { v.reset(Op386MOVSDload) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVSDload [off1] {sym1} (LEAL [off2] {sym2} base) mem) @@ -7018,8 +6631,7 @@ func rewriteValue386_Op386MOVSDload(v *Value) bool { v.reset(Op386MOVSDload) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(base) - v.AddArg(mem) + v.AddArg2(base, mem) return true } // match: (MOVSDload [off1] {sym1} (LEAL1 [off2] {sym2} ptr idx) mem) @@ -7042,9 +6654,7 @@ func rewriteValue386_Op386MOVSDload(v *Value) bool { v.reset(Op386MOVSDloadidx1) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } // match: (MOVSDload [off1] {sym1} (LEAL8 [off2] {sym2} ptr idx) mem) @@ -7067,9 +6677,7 @@ func rewriteValue386_Op386MOVSDload(v *Value) bool { v.reset(Op386MOVSDloadidx8) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } // match: (MOVSDload [off] {sym} (ADDL ptr idx) mem) @@ -7094,9 +6702,7 @@ func rewriteValue386_Op386MOVSDload(v *Value) bool { v.reset(Op386MOVSDloadidx1) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } break @@ -7122,9 +6728,7 @@ func rewriteValue386_Op386MOVSDloadidx1(v *Value) bool { v.reset(Op386MOVSDloadidx1) v.AuxInt = int64(int32(c + d)) v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } // match: (MOVSDloadidx1 [c] {sym} ptr (ADDLconst [d] idx) mem) @@ -7142,9 +6746,7 @@ func rewriteValue386_Op386MOVSDloadidx1(v *Value) bool { v.reset(Op386MOVSDloadidx1) v.AuxInt = int64(int32(c + d)) v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } return false @@ -7168,9 +6770,7 @@ func rewriteValue386_Op386MOVSDloadidx8(v *Value) bool { v.reset(Op386MOVSDloadidx8) v.AuxInt = int64(int32(c + d)) v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } // match: (MOVSDloadidx8 [c] {sym} ptr (ADDLconst [d] idx) mem) @@ -7188,9 +6788,7 @@ func rewriteValue386_Op386MOVSDloadidx8(v *Value) bool { v.reset(Op386MOVSDloadidx8) v.AuxInt = int64(int32(c + 8*d)) v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } return false @@ -7220,9 +6818,7 @@ func rewriteValue386_Op386MOVSDstore(v *Value) bool { v.reset(Op386MOVSDstore) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } // match: (MOVSDstore [off1] {sym1} (LEAL [off2] {sym2} base) val mem) @@ -7245,9 +6841,7 @@ func rewriteValue386_Op386MOVSDstore(v *Value) bool { v.reset(Op386MOVSDstore) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(base) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(base, val, mem) return true } // match: (MOVSDstore [off1] {sym1} (LEAL1 [off2] {sym2} ptr idx) val mem) @@ -7271,10 +6865,7 @@ func rewriteValue386_Op386MOVSDstore(v *Value) bool { v.reset(Op386MOVSDstoreidx1) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) + v.AddArg4(ptr, idx, val, mem) return true } // match: (MOVSDstore [off1] {sym1} (LEAL8 [off2] {sym2} ptr idx) val mem) @@ -7298,10 +6889,7 @@ func rewriteValue386_Op386MOVSDstore(v *Value) bool { v.reset(Op386MOVSDstoreidx8) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) + v.AddArg4(ptr, idx, val, mem) return true } // match: (MOVSDstore [off] {sym} (ADDL ptr idx) val mem) @@ -7327,10 +6915,7 @@ func rewriteValue386_Op386MOVSDstore(v *Value) bool { v.reset(Op386MOVSDstoreidx1) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) + v.AddArg4(ptr, idx, val, mem) return true } break @@ -7358,10 +6943,7 @@ func rewriteValue386_Op386MOVSDstoreidx1(v *Value) bool { v.reset(Op386MOVSDstoreidx1) v.AuxInt = int64(int32(c + d)) v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) + v.AddArg4(ptr, idx, val, mem) return true } // match: (MOVSDstoreidx1 [c] {sym} ptr (ADDLconst [d] idx) val mem) @@ -7380,10 +6962,7 @@ func rewriteValue386_Op386MOVSDstoreidx1(v *Value) bool { v.reset(Op386MOVSDstoreidx1) v.AuxInt = int64(int32(c + d)) v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) + v.AddArg4(ptr, idx, val, mem) return true } return false @@ -7409,10 +6988,7 @@ func rewriteValue386_Op386MOVSDstoreidx8(v *Value) bool { v.reset(Op386MOVSDstoreidx8) v.AuxInt = int64(int32(c + d)) v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) + v.AddArg4(ptr, idx, val, mem) return true } // match: (MOVSDstoreidx8 [c] {sym} ptr (ADDLconst [d] idx) val mem) @@ -7431,10 +7007,7 @@ func rewriteValue386_Op386MOVSDstoreidx8(v *Value) bool { v.reset(Op386MOVSDstoreidx8) v.AuxInt = int64(int32(c + 8*d)) v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) + v.AddArg4(ptr, idx, val, mem) return true } return false @@ -7482,8 +7055,7 @@ func rewriteValue386_Op386MOVSSload(v *Value) bool { v.reset(Op386MOVSSload) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVSSload [off1] {sym1} (LEAL [off2] {sym2} base) mem) @@ -7505,8 +7077,7 @@ func rewriteValue386_Op386MOVSSload(v *Value) bool { v.reset(Op386MOVSSload) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(base) - v.AddArg(mem) + v.AddArg2(base, mem) return true } // match: (MOVSSload [off1] {sym1} (LEAL1 [off2] {sym2} ptr idx) mem) @@ -7529,9 +7100,7 @@ func rewriteValue386_Op386MOVSSload(v *Value) bool { v.reset(Op386MOVSSloadidx1) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } // match: (MOVSSload [off1] {sym1} (LEAL4 [off2] {sym2} ptr idx) mem) @@ -7554,9 +7123,7 @@ func rewriteValue386_Op386MOVSSload(v *Value) bool { v.reset(Op386MOVSSloadidx4) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } // match: (MOVSSload [off] {sym} (ADDL ptr idx) mem) @@ -7581,9 +7148,7 @@ func rewriteValue386_Op386MOVSSload(v *Value) bool { v.reset(Op386MOVSSloadidx1) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } break @@ -7609,9 +7174,7 @@ func rewriteValue386_Op386MOVSSloadidx1(v *Value) bool { v.reset(Op386MOVSSloadidx1) v.AuxInt = int64(int32(c + d)) v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } // match: (MOVSSloadidx1 [c] {sym} ptr (ADDLconst [d] idx) mem) @@ -7629,9 +7192,7 @@ func rewriteValue386_Op386MOVSSloadidx1(v *Value) bool { v.reset(Op386MOVSSloadidx1) v.AuxInt = int64(int32(c + d)) v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } return false @@ -7655,9 +7216,7 @@ func rewriteValue386_Op386MOVSSloadidx4(v *Value) bool { v.reset(Op386MOVSSloadidx4) v.AuxInt = int64(int32(c + d)) v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } // match: (MOVSSloadidx4 [c] {sym} ptr (ADDLconst [d] idx) mem) @@ -7675,9 +7234,7 @@ func rewriteValue386_Op386MOVSSloadidx4(v *Value) bool { v.reset(Op386MOVSSloadidx4) v.AuxInt = int64(int32(c + 4*d)) v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } return false @@ -7707,9 +7264,7 @@ func rewriteValue386_Op386MOVSSstore(v *Value) bool { v.reset(Op386MOVSSstore) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } // match: (MOVSSstore [off1] {sym1} (LEAL [off2] {sym2} base) val mem) @@ -7732,9 +7287,7 @@ func rewriteValue386_Op386MOVSSstore(v *Value) bool { v.reset(Op386MOVSSstore) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(base) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(base, val, mem) return true } // match: (MOVSSstore [off1] {sym1} (LEAL1 [off2] {sym2} ptr idx) val mem) @@ -7758,10 +7311,7 @@ func rewriteValue386_Op386MOVSSstore(v *Value) bool { v.reset(Op386MOVSSstoreidx1) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) + v.AddArg4(ptr, idx, val, mem) return true } // match: (MOVSSstore [off1] {sym1} (LEAL4 [off2] {sym2} ptr idx) val mem) @@ -7785,10 +7335,7 @@ func rewriteValue386_Op386MOVSSstore(v *Value) bool { v.reset(Op386MOVSSstoreidx4) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) + v.AddArg4(ptr, idx, val, mem) return true } // match: (MOVSSstore [off] {sym} (ADDL ptr idx) val mem) @@ -7814,10 +7361,7 @@ func rewriteValue386_Op386MOVSSstore(v *Value) bool { v.reset(Op386MOVSSstoreidx1) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) + v.AddArg4(ptr, idx, val, mem) return true } break @@ -7845,10 +7389,7 @@ func rewriteValue386_Op386MOVSSstoreidx1(v *Value) bool { v.reset(Op386MOVSSstoreidx1) v.AuxInt = int64(int32(c + d)) v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) + v.AddArg4(ptr, idx, val, mem) return true } // match: (MOVSSstoreidx1 [c] {sym} ptr (ADDLconst [d] idx) val mem) @@ -7867,10 +7408,7 @@ func rewriteValue386_Op386MOVSSstoreidx1(v *Value) bool { v.reset(Op386MOVSSstoreidx1) v.AuxInt = int64(int32(c + d)) v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) + v.AddArg4(ptr, idx, val, mem) return true } return false @@ -7896,10 +7434,7 @@ func rewriteValue386_Op386MOVSSstoreidx4(v *Value) bool { v.reset(Op386MOVSSstoreidx4) v.AuxInt = int64(int32(c + d)) v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) + v.AddArg4(ptr, idx, val, mem) return true } // match: (MOVSSstoreidx4 [c] {sym} ptr (ADDLconst [d] idx) val mem) @@ -7918,10 +7453,7 @@ func rewriteValue386_Op386MOVSSstoreidx4(v *Value) bool { v.reset(Op386MOVSSstoreidx4) v.AuxInt = int64(int32(c + 4*d)) v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) + v.AddArg4(ptr, idx, val, mem) return true } return false @@ -7946,12 +7478,10 @@ func rewriteValue386_Op386MOVWLSX(v *Value) bool { } b = x.Block v0 := b.NewValue0(x.Pos, Op386MOVWLSXload, v.Type) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v0.AuxInt = off v0.Aux = sym - v0.AddArg(ptr) - v0.AddArg(mem) + v0.AddArg2(ptr, mem) return true } // match: (MOVWLSX (ANDLconst [c] x)) @@ -7990,9 +7520,8 @@ func rewriteValue386_Op386MOVWLSXload(v *Value) bool { } off2 := v_1.AuxInt sym2 := v_1.Aux - _ = v_1.Args[2] - ptr2 := v_1.Args[0] x := v_1.Args[1] + ptr2 := v_1.Args[0] if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) { break } @@ -8019,8 +7548,7 @@ func rewriteValue386_Op386MOVWLSXload(v *Value) bool { v.reset(Op386MOVWLSXload) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(base) - v.AddArg(mem) + v.AddArg2(base, mem) return true } return false @@ -8045,12 +7573,10 @@ func rewriteValue386_Op386MOVWLZX(v *Value) bool { } b = x.Block v0 := b.NewValue0(x.Pos, Op386MOVWload, v.Type) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v0.AuxInt = off v0.Aux = sym - v0.AddArg(ptr) - v0.AddArg(mem) + v0.AddArg2(ptr, mem) return true } // match: (MOVWLZX x:(MOVWloadidx1 [off] {sym} ptr idx mem)) @@ -8071,13 +7597,10 @@ func rewriteValue386_Op386MOVWLZX(v *Value) bool { } b = x.Block v0 := b.NewValue0(v.Pos, Op386MOVWloadidx1, v.Type) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v0.AuxInt = off v0.Aux = sym - v0.AddArg(ptr) - v0.AddArg(idx) - v0.AddArg(mem) + v0.AddArg3(ptr, idx, mem) return true } // match: (MOVWLZX x:(MOVWloadidx2 [off] {sym} ptr idx mem)) @@ -8098,13 +7621,10 @@ func rewriteValue386_Op386MOVWLZX(v *Value) bool { } b = x.Block v0 := b.NewValue0(v.Pos, Op386MOVWloadidx2, v.Type) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v0.AuxInt = off v0.Aux = sym - v0.AddArg(ptr) - v0.AddArg(idx) - v0.AddArg(mem) + v0.AddArg3(ptr, idx, mem) return true } // match: (MOVWLZX (ANDLconst [c] x)) @@ -8139,9 +7659,8 @@ func rewriteValue386_Op386MOVWload(v *Value) bool { } off2 := v_1.AuxInt sym2 := v_1.Aux - _ = v_1.Args[2] - ptr2 := v_1.Args[0] x := v_1.Args[1] + ptr2 := v_1.Args[0] if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) { break } @@ -8167,8 +7686,7 @@ func rewriteValue386_Op386MOVWload(v *Value) bool { v.reset(Op386MOVWload) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVWload [off1] {sym1} (LEAL [off2] {sym2} base) mem) @@ -8190,8 +7708,7 @@ func rewriteValue386_Op386MOVWload(v *Value) bool { v.reset(Op386MOVWload) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(base) - v.AddArg(mem) + v.AddArg2(base, mem) return true } // match: (MOVWload [off1] {sym1} (LEAL1 [off2] {sym2} ptr idx) mem) @@ -8214,9 +7731,7 @@ func rewriteValue386_Op386MOVWload(v *Value) bool { v.reset(Op386MOVWloadidx1) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } // match: (MOVWload [off1] {sym1} (LEAL2 [off2] {sym2} ptr idx) mem) @@ -8239,9 +7754,7 @@ func rewriteValue386_Op386MOVWload(v *Value) bool { v.reset(Op386MOVWloadidx2) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } // match: (MOVWload [off] {sym} (ADDL ptr idx) mem) @@ -8266,9 +7779,7 @@ func rewriteValue386_Op386MOVWload(v *Value) bool { v.reset(Op386MOVWloadidx1) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } break @@ -8307,9 +7818,7 @@ func rewriteValue386_Op386MOVWloadidx1(v *Value) bool { v.reset(Op386MOVWloadidx2) v.AuxInt = c v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } break @@ -8330,9 +7839,7 @@ func rewriteValue386_Op386MOVWloadidx1(v *Value) bool { v.reset(Op386MOVWloadidx1) v.AuxInt = int64(int32(c + d)) v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } break @@ -8353,9 +7860,7 @@ func rewriteValue386_Op386MOVWloadidx1(v *Value) bool { v.reset(Op386MOVWloadidx1) v.AuxInt = int64(int32(c + d)) v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } break @@ -8381,9 +7886,7 @@ func rewriteValue386_Op386MOVWloadidx2(v *Value) bool { v.reset(Op386MOVWloadidx2) v.AuxInt = int64(int32(c + d)) v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } // match: (MOVWloadidx2 [c] {sym} ptr (ADDLconst [d] idx) mem) @@ -8401,9 +7904,7 @@ func rewriteValue386_Op386MOVWloadidx2(v *Value) bool { v.reset(Op386MOVWloadidx2) v.AuxInt = int64(int32(c + 2*d)) v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } return false @@ -8428,9 +7929,7 @@ func rewriteValue386_Op386MOVWstore(v *Value) bool { v.reset(Op386MOVWstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(x) - v.AddArg(mem) + v.AddArg3(ptr, x, mem) return true } // match: (MOVWstore [off] {sym} ptr (MOVWLZX x) mem) @@ -8447,9 +7946,7 @@ func rewriteValue386_Op386MOVWstore(v *Value) bool { v.reset(Op386MOVWstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(x) - v.AddArg(mem) + v.AddArg3(ptr, x, mem) return true } // match: (MOVWstore [off1] {sym} (ADDLconst [off2] ptr) val mem) @@ -8471,9 +7968,7 @@ func rewriteValue386_Op386MOVWstore(v *Value) bool { v.reset(Op386MOVWstore) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } // match: (MOVWstore [off] {sym} ptr (MOVLconst [c]) mem) @@ -8494,8 +7989,7 @@ func rewriteValue386_Op386MOVWstore(v *Value) bool { v.reset(Op386MOVWstoreconst) v.AuxInt = makeValAndOff(int64(int16(c)), off) v.Aux = sym - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVWstore [off1] {sym1} (LEAL [off2] {sym2} base) val mem) @@ -8518,9 +8012,7 @@ func rewriteValue386_Op386MOVWstore(v *Value) bool { v.reset(Op386MOVWstore) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(base) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(base, val, mem) return true } // match: (MOVWstore [off1] {sym1} (LEAL1 [off2] {sym2} ptr idx) val mem) @@ -8544,10 +8036,7 @@ func rewriteValue386_Op386MOVWstore(v *Value) bool { v.reset(Op386MOVWstoreidx1) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) + v.AddArg4(ptr, idx, val, mem) return true } // match: (MOVWstore [off1] {sym1} (LEAL2 [off2] {sym2} ptr idx) val mem) @@ -8571,10 +8060,7 @@ func rewriteValue386_Op386MOVWstore(v *Value) bool { v.reset(Op386MOVWstoreidx2) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) + v.AddArg4(ptr, idx, val, mem) return true } // match: (MOVWstore [off] {sym} (ADDL ptr idx) val mem) @@ -8600,10 +8086,7 @@ func rewriteValue386_Op386MOVWstore(v *Value) bool { v.reset(Op386MOVWstoreidx1) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) + v.AddArg4(ptr, idx, val, mem) return true } break @@ -8630,9 +8113,7 @@ func rewriteValue386_Op386MOVWstore(v *Value) bool { v.reset(Op386MOVLstore) v.AuxInt = i - 2 v.Aux = s - v.AddArg(p) - v.AddArg(w) - v.AddArg(mem) + v.AddArg3(p, w, mem) return true } // match: (MOVWstore [i] {s} p (SHRLconst [j] w) x:(MOVWstore [i-2] {s} p w0:(SHRLconst [j-16] w) mem)) @@ -8662,9 +8143,7 @@ func rewriteValue386_Op386MOVWstore(v *Value) bool { v.reset(Op386MOVLstore) v.AuxInt = i - 2 v.Aux = s - v.AddArg(p) - v.AddArg(w0) - v.AddArg(mem) + v.AddArg3(p, w0, mem) return true } return false @@ -8692,8 +8171,7 @@ func rewriteValue386_Op386MOVWstoreconst(v *Value) bool { v.reset(Op386MOVWstoreconst) v.AuxInt = ValAndOff(sc).add(off) v.Aux = s - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVWstoreconst [sc] {sym1} (LEAL [off] {sym2} ptr) mem) @@ -8715,8 +8193,7 @@ func rewriteValue386_Op386MOVWstoreconst(v *Value) bool { v.reset(Op386MOVWstoreconst) v.AuxInt = ValAndOff(sc).add(off) v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVWstoreconst [x] {sym1} (LEAL1 [off] {sym2} ptr idx) mem) @@ -8739,9 +8216,7 @@ func rewriteValue386_Op386MOVWstoreconst(v *Value) bool { v.reset(Op386MOVWstoreconstidx1) v.AuxInt = ValAndOff(x).add(off) v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } // match: (MOVWstoreconst [x] {sym1} (LEAL2 [off] {sym2} ptr idx) mem) @@ -8764,9 +8239,7 @@ func rewriteValue386_Op386MOVWstoreconst(v *Value) bool { v.reset(Op386MOVWstoreconstidx2) v.AuxInt = ValAndOff(x).add(off) v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } // match: (MOVWstoreconst [x] {sym} (ADDL ptr idx) mem) @@ -8783,9 +8256,7 @@ func rewriteValue386_Op386MOVWstoreconst(v *Value) bool { v.reset(Op386MOVWstoreconstidx1) v.AuxInt = x v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } // match: (MOVWstoreconst [c] {s} p x:(MOVWstoreconst [a] {s} p mem)) @@ -8810,8 +8281,7 @@ func rewriteValue386_Op386MOVWstoreconst(v *Value) bool { v.reset(Op386MOVLstoreconst) v.AuxInt = makeValAndOff(ValAndOff(a).Val()&0xffff|ValAndOff(c).Val()<<16, ValAndOff(a).Off()) v.Aux = s - v.AddArg(p) - v.AddArg(mem) + v.AddArg2(p, mem) return true } // match: (MOVWstoreconst [a] {s} p x:(MOVWstoreconst [c] {s} p mem)) @@ -8836,8 +8306,7 @@ func rewriteValue386_Op386MOVWstoreconst(v *Value) bool { v.reset(Op386MOVLstoreconst) v.AuxInt = makeValAndOff(ValAndOff(a).Val()&0xffff|ValAndOff(c).Val()<<16, ValAndOff(a).Off()) v.Aux = s - v.AddArg(p) - v.AddArg(mem) + v.AddArg2(p, mem) return true } return false @@ -8860,9 +8329,7 @@ func rewriteValue386_Op386MOVWstoreconstidx1(v *Value) bool { v.reset(Op386MOVWstoreconstidx2) v.AuxInt = c v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } // match: (MOVWstoreconstidx1 [x] {sym} (ADDLconst [c] ptr) idx mem) @@ -8880,9 +8347,7 @@ func rewriteValue386_Op386MOVWstoreconstidx1(v *Value) bool { v.reset(Op386MOVWstoreconstidx1) v.AuxInt = ValAndOff(x).add(c) v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } // match: (MOVWstoreconstidx1 [x] {sym} ptr (ADDLconst [c] idx) mem) @@ -8900,9 +8365,7 @@ func rewriteValue386_Op386MOVWstoreconstidx1(v *Value) bool { v.reset(Op386MOVWstoreconstidx1) v.AuxInt = ValAndOff(x).add(c) v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } // match: (MOVWstoreconstidx1 [c] {s} p i x:(MOVWstoreconstidx1 [a] {s} p i mem)) @@ -8928,9 +8391,7 @@ func rewriteValue386_Op386MOVWstoreconstidx1(v *Value) bool { v.reset(Op386MOVLstoreconstidx1) v.AuxInt = makeValAndOff(ValAndOff(a).Val()&0xffff|ValAndOff(c).Val()<<16, ValAndOff(a).Off()) v.Aux = s - v.AddArg(p) - v.AddArg(i) - v.AddArg(mem) + v.AddArg3(p, i, mem) return true } return false @@ -8955,9 +8416,7 @@ func rewriteValue386_Op386MOVWstoreconstidx2(v *Value) bool { v.reset(Op386MOVWstoreconstidx2) v.AuxInt = ValAndOff(x).add(c) v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } // match: (MOVWstoreconstidx2 [x] {sym} ptr (ADDLconst [c] idx) mem) @@ -8975,9 +8434,7 @@ func rewriteValue386_Op386MOVWstoreconstidx2(v *Value) bool { v.reset(Op386MOVWstoreconstidx2) v.AuxInt = ValAndOff(x).add(2 * c) v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } // match: (MOVWstoreconstidx2 [c] {s} p i x:(MOVWstoreconstidx2 [a] {s} p i mem)) @@ -9003,12 +8460,10 @@ func rewriteValue386_Op386MOVWstoreconstidx2(v *Value) bool { v.reset(Op386MOVLstoreconstidx1) v.AuxInt = makeValAndOff(ValAndOff(a).Val()&0xffff|ValAndOff(c).Val()<<16, ValAndOff(a).Off()) v.Aux = s - v.AddArg(p) v0 := b.NewValue0(v.Pos, Op386SHLLconst, i.Type) v0.AuxInt = 1 v0.AddArg(i) - v.AddArg(v0) - v.AddArg(mem) + v.AddArg3(p, v0, mem) return true } return false @@ -9034,10 +8489,7 @@ func rewriteValue386_Op386MOVWstoreidx1(v *Value) bool { v.reset(Op386MOVWstoreidx2) v.AuxInt = c v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) + v.AddArg4(ptr, idx, val, mem) return true } break @@ -9059,10 +8511,7 @@ func rewriteValue386_Op386MOVWstoreidx1(v *Value) bool { v.reset(Op386MOVWstoreidx1) v.AuxInt = int64(int32(c + d)) v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) + v.AddArg4(ptr, idx, val, mem) return true } break @@ -9084,10 +8533,7 @@ func rewriteValue386_Op386MOVWstoreidx1(v *Value) bool { v.reset(Op386MOVWstoreidx1) v.AuxInt = int64(int32(c + d)) v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) + v.AddArg4(ptr, idx, val, mem) return true } break @@ -9119,10 +8565,7 @@ func rewriteValue386_Op386MOVWstoreidx1(v *Value) bool { v.reset(Op386MOVLstoreidx1) v.AuxInt = i - 2 v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v.AddArg(w) - v.AddArg(mem) + v.AddArg4(p, idx, w, mem) return true } } @@ -9160,10 +8603,7 @@ func rewriteValue386_Op386MOVWstoreidx1(v *Value) bool { v.reset(Op386MOVLstoreidx1) v.AuxInt = i - 2 v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v.AddArg(w0) - v.AddArg(mem) + v.AddArg4(p, idx, w0, mem) return true } } @@ -9193,10 +8633,7 @@ func rewriteValue386_Op386MOVWstoreidx2(v *Value) bool { v.reset(Op386MOVWstoreidx2) v.AuxInt = int64(int32(c + d)) v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) + v.AddArg4(ptr, idx, val, mem) return true } // match: (MOVWstoreidx2 [c] {sym} ptr (ADDLconst [d] idx) val mem) @@ -9215,10 +8652,7 @@ func rewriteValue386_Op386MOVWstoreidx2(v *Value) bool { v.reset(Op386MOVWstoreidx2) v.AuxInt = int64(int32(c + 2*d)) v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) + v.AddArg4(ptr, idx, val, mem) return true } // match: (MOVWstoreidx2 [i] {s} p idx (SHRLconst [16] w) x:(MOVWstoreidx2 [i-2] {s} p idx w mem)) @@ -9244,13 +8678,10 @@ func rewriteValue386_Op386MOVWstoreidx2(v *Value) bool { v.reset(Op386MOVLstoreidx1) v.AuxInt = i - 2 v.Aux = s - v.AddArg(p) v0 := b.NewValue0(v.Pos, Op386SHLLconst, idx.Type) v0.AuxInt = 1 v0.AddArg(idx) - v.AddArg(v0) - v.AddArg(w) - v.AddArg(mem) + v.AddArg4(p, v0, w, mem) return true } // match: (MOVWstoreidx2 [i] {s} p idx (SHRLconst [j] w) x:(MOVWstoreidx2 [i-2] {s} p idx w0:(SHRLconst [j-16] w) mem)) @@ -9281,13 +8712,10 @@ func rewriteValue386_Op386MOVWstoreidx2(v *Value) bool { v.reset(Op386MOVLstoreidx1) v.AuxInt = i - 2 v.Aux = s - v.AddArg(p) v0 := b.NewValue0(v.Pos, Op386SHLLconst, idx.Type) v0.AuxInt = 1 v0.AddArg(idx) - v.AddArg(v0) - v.AddArg(w0) - v.AddArg(mem) + v.AddArg4(p, v0, w0, mem) return true } return false @@ -9331,9 +8759,7 @@ func rewriteValue386_Op386MULL(v *Value) bool { v.reset(Op386MULLload) v.AuxInt = off v.Aux = sym - v.AddArg(x) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg3(x, ptr, mem) return true } break @@ -9359,10 +8785,7 @@ func rewriteValue386_Op386MULL(v *Value) bool { v.reset(Op386MULLloadidx4) v.AuxInt = off v.Aux = sym - v.AddArg(x) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg4(x, ptr, idx, mem) return true } break @@ -9395,8 +8818,7 @@ func rewriteValue386_Op386MULLconst(v *Value) bool { x := v_0 v.reset(Op386NEGL) v0 := b.NewValue0(v.Pos, Op386LEAL8, v.Type) - v0.AddArg(x) - v0.AddArg(x) + v0.AddArg2(x, x) v.AddArg(v0) return true } @@ -9409,8 +8831,7 @@ func rewriteValue386_Op386MULLconst(v *Value) bool { x := v_0 v.reset(Op386NEGL) v0 := b.NewValue0(v.Pos, Op386LEAL4, v.Type) - v0.AddArg(x) - v0.AddArg(x) + v0.AddArg2(x, x) v.AddArg(v0) return true } @@ -9423,8 +8844,7 @@ func rewriteValue386_Op386MULLconst(v *Value) bool { x := v_0 v.reset(Op386NEGL) v0 := b.NewValue0(v.Pos, Op386LEAL2, v.Type) - v0.AddArg(x) - v0.AddArg(x) + v0.AddArg2(x, x) v.AddArg(v0) return true } @@ -9456,9 +8876,7 @@ func rewriteValue386_Op386MULLconst(v *Value) bool { break } x := v_0 - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (MULLconst [3] x) @@ -9469,8 +8887,7 @@ func rewriteValue386_Op386MULLconst(v *Value) bool { } x := v_0 v.reset(Op386LEAL2) - v.AddArg(x) - v.AddArg(x) + v.AddArg2(x, x) return true } // match: (MULLconst [5] x) @@ -9481,8 +8898,7 @@ func rewriteValue386_Op386MULLconst(v *Value) bool { } x := v_0 v.reset(Op386LEAL4) - v.AddArg(x) - v.AddArg(x) + v.AddArg2(x, x) return true } // match: (MULLconst [7] x) @@ -9493,11 +8909,9 @@ func rewriteValue386_Op386MULLconst(v *Value) bool { } x := v_0 v.reset(Op386LEAL2) - v.AddArg(x) v0 := b.NewValue0(v.Pos, Op386LEAL2, v.Type) - v0.AddArg(x) - v0.AddArg(x) - v.AddArg(v0) + v0.AddArg2(x, x) + v.AddArg2(x, v0) return true } // match: (MULLconst [9] x) @@ -9508,8 +8922,7 @@ func rewriteValue386_Op386MULLconst(v *Value) bool { } x := v_0 v.reset(Op386LEAL8) - v.AddArg(x) - v.AddArg(x) + v.AddArg2(x, x) return true } // match: (MULLconst [11] x) @@ -9520,11 +8933,9 @@ func rewriteValue386_Op386MULLconst(v *Value) bool { } x := v_0 v.reset(Op386LEAL2) - v.AddArg(x) v0 := b.NewValue0(v.Pos, Op386LEAL4, v.Type) - v0.AddArg(x) - v0.AddArg(x) - v.AddArg(v0) + v0.AddArg2(x, x) + v.AddArg2(x, v0) return true } // match: (MULLconst [13] x) @@ -9535,11 +8946,9 @@ func rewriteValue386_Op386MULLconst(v *Value) bool { } x := v_0 v.reset(Op386LEAL4) - v.AddArg(x) v0 := b.NewValue0(v.Pos, Op386LEAL2, v.Type) - v0.AddArg(x) - v0.AddArg(x) - v.AddArg(v0) + v0.AddArg2(x, x) + v.AddArg2(x, v0) return true } // match: (MULLconst [19] x) @@ -9550,11 +8959,9 @@ func rewriteValue386_Op386MULLconst(v *Value) bool { } x := v_0 v.reset(Op386LEAL2) - v.AddArg(x) v0 := b.NewValue0(v.Pos, Op386LEAL8, v.Type) - v0.AddArg(x) - v0.AddArg(x) - v.AddArg(v0) + v0.AddArg2(x, x) + v.AddArg2(x, v0) return true } // match: (MULLconst [21] x) @@ -9565,11 +8972,9 @@ func rewriteValue386_Op386MULLconst(v *Value) bool { } x := v_0 v.reset(Op386LEAL4) - v.AddArg(x) v0 := b.NewValue0(v.Pos, Op386LEAL4, v.Type) - v0.AddArg(x) - v0.AddArg(x) - v.AddArg(v0) + v0.AddArg2(x, x) + v.AddArg2(x, v0) return true } // match: (MULLconst [25] x) @@ -9580,11 +8985,9 @@ func rewriteValue386_Op386MULLconst(v *Value) bool { } x := v_0 v.reset(Op386LEAL8) - v.AddArg(x) v0 := b.NewValue0(v.Pos, Op386LEAL2, v.Type) - v0.AddArg(x) - v0.AddArg(x) - v.AddArg(v0) + v0.AddArg2(x, x) + v.AddArg2(x, v0) return true } // match: (MULLconst [27] x) @@ -9596,13 +8999,10 @@ func rewriteValue386_Op386MULLconst(v *Value) bool { x := v_0 v.reset(Op386LEAL8) v0 := b.NewValue0(v.Pos, Op386LEAL2, v.Type) - v0.AddArg(x) - v0.AddArg(x) - v.AddArg(v0) + v0.AddArg2(x, x) v1 := b.NewValue0(v.Pos, Op386LEAL2, v.Type) - v1.AddArg(x) - v1.AddArg(x) - v.AddArg(v1) + v1.AddArg2(x, x) + v.AddArg2(v0, v1) return true } // match: (MULLconst [37] x) @@ -9613,11 +9013,9 @@ func rewriteValue386_Op386MULLconst(v *Value) bool { } x := v_0 v.reset(Op386LEAL4) - v.AddArg(x) v0 := b.NewValue0(v.Pos, Op386LEAL8, v.Type) - v0.AddArg(x) - v0.AddArg(x) - v.AddArg(v0) + v0.AddArg2(x, x) + v.AddArg2(x, v0) return true } // match: (MULLconst [41] x) @@ -9628,11 +9026,9 @@ func rewriteValue386_Op386MULLconst(v *Value) bool { } x := v_0 v.reset(Op386LEAL8) - v.AddArg(x) v0 := b.NewValue0(v.Pos, Op386LEAL4, v.Type) - v0.AddArg(x) - v0.AddArg(x) - v.AddArg(v0) + v0.AddArg2(x, x) + v.AddArg2(x, v0) return true } // match: (MULLconst [45] x) @@ -9644,13 +9040,10 @@ func rewriteValue386_Op386MULLconst(v *Value) bool { x := v_0 v.reset(Op386LEAL8) v0 := b.NewValue0(v.Pos, Op386LEAL4, v.Type) - v0.AddArg(x) - v0.AddArg(x) - v.AddArg(v0) + v0.AddArg2(x, x) v1 := b.NewValue0(v.Pos, Op386LEAL4, v.Type) - v1.AddArg(x) - v1.AddArg(x) - v.AddArg(v1) + v1.AddArg2(x, x) + v.AddArg2(v0, v1) return true } // match: (MULLconst [73] x) @@ -9661,11 +9054,9 @@ func rewriteValue386_Op386MULLconst(v *Value) bool { } x := v_0 v.reset(Op386LEAL8) - v.AddArg(x) v0 := b.NewValue0(v.Pos, Op386LEAL8, v.Type) - v0.AddArg(x) - v0.AddArg(x) - v.AddArg(v0) + v0.AddArg2(x, x) + v.AddArg2(x, v0) return true } // match: (MULLconst [81] x) @@ -9677,13 +9068,10 @@ func rewriteValue386_Op386MULLconst(v *Value) bool { x := v_0 v.reset(Op386LEAL8) v0 := b.NewValue0(v.Pos, Op386LEAL8, v.Type) - v0.AddArg(x) - v0.AddArg(x) - v.AddArg(v0) + v0.AddArg2(x, x) v1 := b.NewValue0(v.Pos, Op386LEAL8, v.Type) - v1.AddArg(x) - v1.AddArg(x) - v.AddArg(v1) + v1.AddArg2(x, x) + v.AddArg2(v0, v1) return true } // match: (MULLconst [c] x) @@ -9699,8 +9087,7 @@ func rewriteValue386_Op386MULLconst(v *Value) bool { v0 := b.NewValue0(v.Pos, Op386SHLLconst, v.Type) v0.AuxInt = log2(c + 1) v0.AddArg(x) - v.AddArg(v0) - v.AddArg(x) + v.AddArg2(v0, x) return true } // match: (MULLconst [c] x) @@ -9716,8 +9103,7 @@ func rewriteValue386_Op386MULLconst(v *Value) bool { v0 := b.NewValue0(v.Pos, Op386SHLLconst, v.Type) v0.AuxInt = log2(c - 1) v0.AddArg(x) - v.AddArg(v0) - v.AddArg(x) + v.AddArg2(v0, x) return true } // match: (MULLconst [c] x) @@ -9733,8 +9119,7 @@ func rewriteValue386_Op386MULLconst(v *Value) bool { v0 := b.NewValue0(v.Pos, Op386SHLLconst, v.Type) v0.AuxInt = log2(c - 2) v0.AddArg(x) - v.AddArg(v0) - v.AddArg(x) + v.AddArg2(v0, x) return true } // match: (MULLconst [c] x) @@ -9750,8 +9135,7 @@ func rewriteValue386_Op386MULLconst(v *Value) bool { v0 := b.NewValue0(v.Pos, Op386SHLLconst, v.Type) v0.AuxInt = log2(c - 4) v0.AddArg(x) - v.AddArg(v0) - v.AddArg(x) + v.AddArg2(v0, x) return true } // match: (MULLconst [c] x) @@ -9767,8 +9151,7 @@ func rewriteValue386_Op386MULLconst(v *Value) bool { v0 := b.NewValue0(v.Pos, Op386SHLLconst, v.Type) v0.AuxInt = log2(c - 8) v0.AddArg(x) - v.AddArg(v0) - v.AddArg(x) + v.AddArg2(v0, x) return true } // match: (MULLconst [c] x) @@ -9783,8 +9166,7 @@ func rewriteValue386_Op386MULLconst(v *Value) bool { v.reset(Op386SHLLconst) v.AuxInt = log2(c / 3) v0 := b.NewValue0(v.Pos, Op386LEAL2, v.Type) - v0.AddArg(x) - v0.AddArg(x) + v0.AddArg2(x, x) v.AddArg(v0) return true } @@ -9800,8 +9182,7 @@ func rewriteValue386_Op386MULLconst(v *Value) bool { v.reset(Op386SHLLconst) v.AuxInt = log2(c / 5) v0 := b.NewValue0(v.Pos, Op386LEAL4, v.Type) - v0.AddArg(x) - v0.AddArg(x) + v0.AddArg2(x, x) v.AddArg(v0) return true } @@ -9817,8 +9198,7 @@ func rewriteValue386_Op386MULLconst(v *Value) bool { v.reset(Op386SHLLconst) v.AuxInt = log2(c / 9) v0 := b.NewValue0(v.Pos, Op386LEAL8, v.Type) - v0.AddArg(x) - v0.AddArg(x) + v0.AddArg2(x, x) v.AddArg(v0) return true } @@ -9861,9 +9241,7 @@ func rewriteValue386_Op386MULLload(v *Value) bool { v.reset(Op386MULLload) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(val) - v.AddArg(base) - v.AddArg(mem) + v.AddArg3(val, base, mem) return true } // match: (MULLload [off1] {sym1} val (LEAL [off2] {sym2} base) mem) @@ -9886,9 +9264,7 @@ func rewriteValue386_Op386MULLload(v *Value) bool { v.reset(Op386MULLload) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(val) - v.AddArg(base) - v.AddArg(mem) + v.AddArg3(val, base, mem) return true } // match: (MULLload [off1] {sym1} val (LEAL4 [off2] {sym2} ptr idx) mem) @@ -9912,10 +9288,7 @@ func rewriteValue386_Op386MULLload(v *Value) bool { v.reset(Op386MULLloadidx4) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(val) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg4(val, ptr, idx, mem) return true } return false @@ -9947,10 +9320,7 @@ func rewriteValue386_Op386MULLloadidx4(v *Value) bool { v.reset(Op386MULLloadidx4) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(val) - v.AddArg(base) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg4(val, base, idx, mem) return true } // match: (MULLloadidx4 [off1] {sym} val base (ADDLconst [off2] idx) mem) @@ -9973,10 +9343,7 @@ func rewriteValue386_Op386MULLloadidx4(v *Value) bool { v.reset(Op386MULLloadidx4) v.AuxInt = off1 + off2*4 v.Aux = sym - v.AddArg(val) - v.AddArg(base) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg4(val, base, idx, mem) return true } // match: (MULLloadidx4 [off1] {sym1} val (LEAL [off2] {sym2} base) idx mem) @@ -10000,10 +9367,7 @@ func rewriteValue386_Op386MULLloadidx4(v *Value) bool { v.reset(Op386MULLloadidx4) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(val) - v.AddArg(base) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg4(val, base, idx, mem) return true } return false @@ -10033,9 +9397,7 @@ func rewriteValue386_Op386MULSD(v *Value) bool { v.reset(Op386MULSDload) v.AuxInt = off v.Aux = sym - v.AddArg(x) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg3(x, ptr, mem) return true } break @@ -10067,9 +9429,7 @@ func rewriteValue386_Op386MULSDload(v *Value) bool { v.reset(Op386MULSDload) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(val) - v.AddArg(base) - v.AddArg(mem) + v.AddArg3(val, base, mem) return true } // match: (MULSDload [off1] {sym1} val (LEAL [off2] {sym2} base) mem) @@ -10092,9 +9452,7 @@ func rewriteValue386_Op386MULSDload(v *Value) bool { v.reset(Op386MULSDload) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(val) - v.AddArg(base) - v.AddArg(mem) + v.AddArg3(val, base, mem) return true } return false @@ -10124,9 +9482,7 @@ func rewriteValue386_Op386MULSS(v *Value) bool { v.reset(Op386MULSSload) v.AuxInt = off v.Aux = sym - v.AddArg(x) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg3(x, ptr, mem) return true } break @@ -10158,9 +9514,7 @@ func rewriteValue386_Op386MULSSload(v *Value) bool { v.reset(Op386MULSSload) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(val) - v.AddArg(base) - v.AddArg(mem) + v.AddArg3(val, base, mem) return true } // match: (MULSSload [off1] {sym1} val (LEAL [off2] {sym2} base) mem) @@ -10183,9 +9537,7 @@ func rewriteValue386_Op386MULSSload(v *Value) bool { v.reset(Op386MULSSload) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(val) - v.AddArg(base) - v.AddArg(mem) + v.AddArg3(val, base, mem) return true } return false @@ -10335,9 +9687,7 @@ func rewriteValue386_Op386ORL(v *Value) bool { v.reset(Op386ORLload) v.AuxInt = off v.Aux = sym - v.AddArg(x) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg3(x, ptr, mem) return true } break @@ -10363,10 +9713,7 @@ func rewriteValue386_Op386ORL(v *Value) bool { v.reset(Op386ORLloadidx4) v.AuxInt = off v.Aux = sym - v.AddArg(x) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg4(x, ptr, idx, mem) return true } break @@ -10378,9 +9725,7 @@ func rewriteValue386_Op386ORL(v *Value) bool { if x != v_1 { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (ORL x0:(MOVBload [i0] {s} p mem) s0:(SHLLconst [8] x1:(MOVBload [i1] {s} p mem))) @@ -10414,12 +9759,10 @@ func rewriteValue386_Op386ORL(v *Value) bool { } b = mergePoint(b, x0, x1) v0 := b.NewValue0(x1.Pos, Op386MOVWload, typ.UInt16) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v0.AuxInt = i0 v0.Aux = s - v0.AddArg(p) - v0.AddArg(mem) + v0.AddArg2(p, mem) return true } break @@ -10479,12 +9822,10 @@ func rewriteValue386_Op386ORL(v *Value) bool { } b = mergePoint(b, x0, x1, x2) v0 := b.NewValue0(x2.Pos, Op386MOVLload, typ.UInt32) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v0.AuxInt = i0 v0.Aux = s - v0.AddArg(p) - v0.AddArg(mem) + v0.AddArg2(p, mem) return true } } @@ -10528,13 +9869,10 @@ func rewriteValue386_Op386ORL(v *Value) bool { } b = mergePoint(b, x0, x1) v0 := b.NewValue0(v.Pos, Op386MOVWloadidx1, v.Type) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v0.AuxInt = i0 v0.Aux = s - v0.AddArg(p) - v0.AddArg(idx) - v0.AddArg(mem) + v0.AddArg3(p, idx, mem) return true } } @@ -10606,13 +9944,10 @@ func rewriteValue386_Op386ORL(v *Value) bool { } b = mergePoint(b, x0, x1, x2) v0 := b.NewValue0(v.Pos, Op386MOVLloadidx1, v.Type) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v0.AuxInt = i0 v0.Aux = s - v0.AddArg(p) - v0.AddArg(idx) - v0.AddArg(mem) + v0.AddArg3(p, idx, mem) return true } } @@ -10634,9 +9969,7 @@ func rewriteValue386_Op386ORLconst(v *Value) bool { if !(int32(c) == 0) { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (ORLconst [c] _) @@ -10688,8 +10021,7 @@ func rewriteValue386_Op386ORLconstmodify(v *Value) bool { v.reset(Op386ORLconstmodify) v.AuxInt = ValAndOff(valoff1).add(off2) v.Aux = sym - v.AddArg(base) - v.AddArg(mem) + v.AddArg2(base, mem) return true } // match: (ORLconstmodify [valoff1] {sym1} (LEAL [off2] {sym2} base) mem) @@ -10711,8 +10043,7 @@ func rewriteValue386_Op386ORLconstmodify(v *Value) bool { v.reset(Op386ORLconstmodify) v.AuxInt = ValAndOff(valoff1).add(off2) v.Aux = mergeSym(sym1, sym2) - v.AddArg(base) - v.AddArg(mem) + v.AddArg2(base, mem) return true } return false @@ -10742,9 +10073,7 @@ func rewriteValue386_Op386ORLconstmodifyidx4(v *Value) bool { v.reset(Op386ORLconstmodifyidx4) v.AuxInt = ValAndOff(valoff1).add(off2) v.Aux = sym - v.AddArg(base) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(base, idx, mem) return true } // match: (ORLconstmodifyidx4 [valoff1] {sym} base (ADDLconst [off2] idx) mem) @@ -10766,9 +10095,7 @@ func rewriteValue386_Op386ORLconstmodifyidx4(v *Value) bool { v.reset(Op386ORLconstmodifyidx4) v.AuxInt = ValAndOff(valoff1).add(off2 * 4) v.Aux = sym - v.AddArg(base) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(base, idx, mem) return true } // match: (ORLconstmodifyidx4 [valoff1] {sym1} (LEAL [off2] {sym2} base) idx mem) @@ -10791,9 +10118,7 @@ func rewriteValue386_Op386ORLconstmodifyidx4(v *Value) bool { v.reset(Op386ORLconstmodifyidx4) v.AuxInt = ValAndOff(valoff1).add(off2) v.Aux = mergeSym(sym1, sym2) - v.AddArg(base) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(base, idx, mem) return true } return false @@ -10823,9 +10148,7 @@ func rewriteValue386_Op386ORLload(v *Value) bool { v.reset(Op386ORLload) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(val) - v.AddArg(base) - v.AddArg(mem) + v.AddArg3(val, base, mem) return true } // match: (ORLload [off1] {sym1} val (LEAL [off2] {sym2} base) mem) @@ -10848,9 +10171,7 @@ func rewriteValue386_Op386ORLload(v *Value) bool { v.reset(Op386ORLload) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(val) - v.AddArg(base) - v.AddArg(mem) + v.AddArg3(val, base, mem) return true } // match: (ORLload [off1] {sym1} val (LEAL4 [off2] {sym2} ptr idx) mem) @@ -10874,10 +10195,7 @@ func rewriteValue386_Op386ORLload(v *Value) bool { v.reset(Op386ORLloadidx4) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(val) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg4(val, ptr, idx, mem) return true } return false @@ -10909,10 +10227,7 @@ func rewriteValue386_Op386ORLloadidx4(v *Value) bool { v.reset(Op386ORLloadidx4) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(val) - v.AddArg(base) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg4(val, base, idx, mem) return true } // match: (ORLloadidx4 [off1] {sym} val base (ADDLconst [off2] idx) mem) @@ -10935,10 +10250,7 @@ func rewriteValue386_Op386ORLloadidx4(v *Value) bool { v.reset(Op386ORLloadidx4) v.AuxInt = off1 + off2*4 v.Aux = sym - v.AddArg(val) - v.AddArg(base) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg4(val, base, idx, mem) return true } // match: (ORLloadidx4 [off1] {sym1} val (LEAL [off2] {sym2} base) idx mem) @@ -10962,10 +10274,7 @@ func rewriteValue386_Op386ORLloadidx4(v *Value) bool { v.reset(Op386ORLloadidx4) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(val) - v.AddArg(base) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg4(val, base, idx, mem) return true } return false @@ -10995,9 +10304,7 @@ func rewriteValue386_Op386ORLmodify(v *Value) bool { v.reset(Op386ORLmodify) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(base) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(base, val, mem) return true } // match: (ORLmodify [off1] {sym1} (LEAL [off2] {sym2} base) val mem) @@ -11020,9 +10327,7 @@ func rewriteValue386_Op386ORLmodify(v *Value) bool { v.reset(Op386ORLmodify) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(base) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(base, val, mem) return true } return false @@ -11054,10 +10359,7 @@ func rewriteValue386_Op386ORLmodifyidx4(v *Value) bool { v.reset(Op386ORLmodifyidx4) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(base) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) + v.AddArg4(base, idx, val, mem) return true } // match: (ORLmodifyidx4 [off1] {sym} base (ADDLconst [off2] idx) val mem) @@ -11080,10 +10382,7 @@ func rewriteValue386_Op386ORLmodifyidx4(v *Value) bool { v.reset(Op386ORLmodifyidx4) v.AuxInt = off1 + off2*4 v.Aux = sym - v.AddArg(base) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) + v.AddArg4(base, idx, val, mem) return true } // match: (ORLmodifyidx4 [off1] {sym1} (LEAL [off2] {sym2} base) idx val mem) @@ -11107,10 +10406,7 @@ func rewriteValue386_Op386ORLmodifyidx4(v *Value) bool { v.reset(Op386ORLmodifyidx4) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(base) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) + v.AddArg4(base, idx, val, mem) return true } // match: (ORLmodifyidx4 [off] {sym} ptr idx (MOVLconst [c]) mem) @@ -11132,9 +10428,7 @@ func rewriteValue386_Op386ORLmodifyidx4(v *Value) bool { v.reset(Op386ORLconstmodifyidx4) v.AuxInt = makeValAndOff(c, off) v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } return false @@ -11162,9 +10456,7 @@ func rewriteValue386_Op386ROLBconst(v *Value) bool { break } x := v_0 - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } return false @@ -11192,9 +10484,7 @@ func rewriteValue386_Op386ROLLconst(v *Value) bool { break } x := v_0 - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } return false @@ -11222,9 +10512,7 @@ func rewriteValue386_Op386ROLWconst(v *Value) bool { break } x := v_0 - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } return false @@ -11256,9 +10544,7 @@ func rewriteValue386_Op386SARBconst(v *Value) bool { break } x := v_0 - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (SARBconst [c] (MOVLconst [d])) @@ -11300,8 +10586,7 @@ func rewriteValue386_Op386SARL(v *Value) bool { } y := v_1.Args[0] v.reset(Op386SARL) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } return false @@ -11315,9 +10600,7 @@ func rewriteValue386_Op386SARLconst(v *Value) bool { break } x := v_0 - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (SARLconst [c] (MOVLconst [d])) @@ -11361,9 +10644,7 @@ func rewriteValue386_Op386SARWconst(v *Value) bool { break } x := v_0 - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (SARWconst [c] (MOVLconst [d])) @@ -11395,8 +10676,7 @@ func rewriteValue386_Op386SBBL(v *Value) bool { f := v_2 v.reset(Op386SBBLconst) v.AuxInt = c - v.AddArg(x) - v.AddArg(f) + v.AddArg2(x, f) return true } return false @@ -12130,8 +11410,7 @@ func rewriteValue386_Op386SHLL(v *Value) bool { } y := v_1.Args[0] v.reset(Op386SHLL) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } return false @@ -12145,9 +11424,7 @@ func rewriteValue386_Op386SHLLconst(v *Value) bool { break } x := v_0 - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } return false @@ -12198,9 +11475,7 @@ func rewriteValue386_Op386SHRBconst(v *Value) bool { break } x := v_0 - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } return false @@ -12230,8 +11505,7 @@ func rewriteValue386_Op386SHRL(v *Value) bool { } y := v_1.Args[0] v.reset(Op386SHRL) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } return false @@ -12245,9 +11519,7 @@ func rewriteValue386_Op386SHRLconst(v *Value) bool { break } x := v_0 - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } return false @@ -12298,9 +11570,7 @@ func rewriteValue386_Op386SHRWconst(v *Value) bool { break } x := v_0 - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } return false @@ -12356,9 +11626,7 @@ func rewriteValue386_Op386SUBL(v *Value) bool { v.reset(Op386SUBLload) v.AuxInt = off v.Aux = sym - v.AddArg(x) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg3(x, ptr, mem) return true } // match: (SUBL x l:(MOVLloadidx4 [off] {sym} ptr idx mem)) @@ -12381,10 +11649,7 @@ func rewriteValue386_Op386SUBL(v *Value) bool { v.reset(Op386SUBLloadidx4) v.AuxInt = off v.Aux = sym - v.AddArg(x) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg4(x, ptr, idx, mem) return true } // match: (SUBL x x) @@ -12429,9 +11694,7 @@ func rewriteValue386_Op386SUBLconst(v *Value) bool { if !(int32(c) == 0) { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (SUBLconst [c] x) @@ -12470,9 +11733,7 @@ func rewriteValue386_Op386SUBLload(v *Value) bool { v.reset(Op386SUBLload) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(val) - v.AddArg(base) - v.AddArg(mem) + v.AddArg3(val, base, mem) return true } // match: (SUBLload [off1] {sym1} val (LEAL [off2] {sym2} base) mem) @@ -12495,9 +11756,7 @@ func rewriteValue386_Op386SUBLload(v *Value) bool { v.reset(Op386SUBLload) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(val) - v.AddArg(base) - v.AddArg(mem) + v.AddArg3(val, base, mem) return true } // match: (SUBLload [off1] {sym1} val (LEAL4 [off2] {sym2} ptr idx) mem) @@ -12521,10 +11780,7 @@ func rewriteValue386_Op386SUBLload(v *Value) bool { v.reset(Op386SUBLloadidx4) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(val) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg4(val, ptr, idx, mem) return true } return false @@ -12556,10 +11812,7 @@ func rewriteValue386_Op386SUBLloadidx4(v *Value) bool { v.reset(Op386SUBLloadidx4) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(val) - v.AddArg(base) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg4(val, base, idx, mem) return true } // match: (SUBLloadidx4 [off1] {sym} val base (ADDLconst [off2] idx) mem) @@ -12582,10 +11835,7 @@ func rewriteValue386_Op386SUBLloadidx4(v *Value) bool { v.reset(Op386SUBLloadidx4) v.AuxInt = off1 + off2*4 v.Aux = sym - v.AddArg(val) - v.AddArg(base) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg4(val, base, idx, mem) return true } // match: (SUBLloadidx4 [off1] {sym1} val (LEAL [off2] {sym2} base) idx mem) @@ -12609,10 +11859,7 @@ func rewriteValue386_Op386SUBLloadidx4(v *Value) bool { v.reset(Op386SUBLloadidx4) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(val) - v.AddArg(base) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg4(val, base, idx, mem) return true } return false @@ -12642,9 +11889,7 @@ func rewriteValue386_Op386SUBLmodify(v *Value) bool { v.reset(Op386SUBLmodify) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(base) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(base, val, mem) return true } // match: (SUBLmodify [off1] {sym1} (LEAL [off2] {sym2} base) val mem) @@ -12667,9 +11912,7 @@ func rewriteValue386_Op386SUBLmodify(v *Value) bool { v.reset(Op386SUBLmodify) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(base) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(base, val, mem) return true } return false @@ -12701,10 +11944,7 @@ func rewriteValue386_Op386SUBLmodifyidx4(v *Value) bool { v.reset(Op386SUBLmodifyidx4) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(base) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) + v.AddArg4(base, idx, val, mem) return true } // match: (SUBLmodifyidx4 [off1] {sym} base (ADDLconst [off2] idx) val mem) @@ -12727,10 +11967,7 @@ func rewriteValue386_Op386SUBLmodifyidx4(v *Value) bool { v.reset(Op386SUBLmodifyidx4) v.AuxInt = off1 + off2*4 v.Aux = sym - v.AddArg(base) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) + v.AddArg4(base, idx, val, mem) return true } // match: (SUBLmodifyidx4 [off1] {sym1} (LEAL [off2] {sym2} base) idx val mem) @@ -12754,10 +11991,7 @@ func rewriteValue386_Op386SUBLmodifyidx4(v *Value) bool { v.reset(Op386SUBLmodifyidx4) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(base) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) + v.AddArg4(base, idx, val, mem) return true } // match: (SUBLmodifyidx4 [off] {sym} ptr idx (MOVLconst [c]) mem) @@ -12779,9 +12013,7 @@ func rewriteValue386_Op386SUBLmodifyidx4(v *Value) bool { v.reset(Op386ADDLconstmodifyidx4) v.AuxInt = makeValAndOff(-c, off) v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } return false @@ -12810,9 +12042,7 @@ func rewriteValue386_Op386SUBSD(v *Value) bool { v.reset(Op386SUBSDload) v.AuxInt = off v.Aux = sym - v.AddArg(x) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg3(x, ptr, mem) return true } return false @@ -12842,9 +12072,7 @@ func rewriteValue386_Op386SUBSDload(v *Value) bool { v.reset(Op386SUBSDload) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(val) - v.AddArg(base) - v.AddArg(mem) + v.AddArg3(val, base, mem) return true } // match: (SUBSDload [off1] {sym1} val (LEAL [off2] {sym2} base) mem) @@ -12867,9 +12095,7 @@ func rewriteValue386_Op386SUBSDload(v *Value) bool { v.reset(Op386SUBSDload) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(val) - v.AddArg(base) - v.AddArg(mem) + v.AddArg3(val, base, mem) return true } return false @@ -12898,9 +12124,7 @@ func rewriteValue386_Op386SUBSS(v *Value) bool { v.reset(Op386SUBSSload) v.AuxInt = off v.Aux = sym - v.AddArg(x) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg3(x, ptr, mem) return true } return false @@ -12930,9 +12154,7 @@ func rewriteValue386_Op386SUBSSload(v *Value) bool { v.reset(Op386SUBSSload) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(val) - v.AddArg(base) - v.AddArg(mem) + v.AddArg3(val, base, mem) return true } // match: (SUBSSload [off1] {sym1} val (LEAL [off2] {sym2} base) mem) @@ -12955,9 +12177,7 @@ func rewriteValue386_Op386SUBSSload(v *Value) bool { v.reset(Op386SUBSSload) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(val) - v.AddArg(base) - v.AddArg(mem) + v.AddArg3(val, base, mem) return true } return false @@ -13075,9 +12295,7 @@ func rewriteValue386_Op386XORL(v *Value) bool { v.reset(Op386XORLload) v.AuxInt = off v.Aux = sym - v.AddArg(x) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg3(x, ptr, mem) return true } break @@ -13103,10 +12321,7 @@ func rewriteValue386_Op386XORL(v *Value) bool { v.reset(Op386XORLloadidx4) v.AuxInt = off v.Aux = sym - v.AddArg(x) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg4(x, ptr, idx, mem) return true } break @@ -13149,9 +12364,7 @@ func rewriteValue386_Op386XORLconst(v *Value) bool { if !(int32(c) == 0) { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (XORLconst [c] (MOVLconst [d])) @@ -13191,8 +12404,7 @@ func rewriteValue386_Op386XORLconstmodify(v *Value) bool { v.reset(Op386XORLconstmodify) v.AuxInt = ValAndOff(valoff1).add(off2) v.Aux = sym - v.AddArg(base) - v.AddArg(mem) + v.AddArg2(base, mem) return true } // match: (XORLconstmodify [valoff1] {sym1} (LEAL [off2] {sym2} base) mem) @@ -13214,8 +12426,7 @@ func rewriteValue386_Op386XORLconstmodify(v *Value) bool { v.reset(Op386XORLconstmodify) v.AuxInt = ValAndOff(valoff1).add(off2) v.Aux = mergeSym(sym1, sym2) - v.AddArg(base) - v.AddArg(mem) + v.AddArg2(base, mem) return true } return false @@ -13245,9 +12456,7 @@ func rewriteValue386_Op386XORLconstmodifyidx4(v *Value) bool { v.reset(Op386XORLconstmodifyidx4) v.AuxInt = ValAndOff(valoff1).add(off2) v.Aux = sym - v.AddArg(base) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(base, idx, mem) return true } // match: (XORLconstmodifyidx4 [valoff1] {sym} base (ADDLconst [off2] idx) mem) @@ -13269,9 +12478,7 @@ func rewriteValue386_Op386XORLconstmodifyidx4(v *Value) bool { v.reset(Op386XORLconstmodifyidx4) v.AuxInt = ValAndOff(valoff1).add(off2 * 4) v.Aux = sym - v.AddArg(base) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(base, idx, mem) return true } // match: (XORLconstmodifyidx4 [valoff1] {sym1} (LEAL [off2] {sym2} base) idx mem) @@ -13294,9 +12501,7 @@ func rewriteValue386_Op386XORLconstmodifyidx4(v *Value) bool { v.reset(Op386XORLconstmodifyidx4) v.AuxInt = ValAndOff(valoff1).add(off2) v.Aux = mergeSym(sym1, sym2) - v.AddArg(base) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(base, idx, mem) return true } return false @@ -13326,9 +12531,7 @@ func rewriteValue386_Op386XORLload(v *Value) bool { v.reset(Op386XORLload) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(val) - v.AddArg(base) - v.AddArg(mem) + v.AddArg3(val, base, mem) return true } // match: (XORLload [off1] {sym1} val (LEAL [off2] {sym2} base) mem) @@ -13351,9 +12554,7 @@ func rewriteValue386_Op386XORLload(v *Value) bool { v.reset(Op386XORLload) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(val) - v.AddArg(base) - v.AddArg(mem) + v.AddArg3(val, base, mem) return true } // match: (XORLload [off1] {sym1} val (LEAL4 [off2] {sym2} ptr idx) mem) @@ -13377,10 +12578,7 @@ func rewriteValue386_Op386XORLload(v *Value) bool { v.reset(Op386XORLloadidx4) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(val) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg4(val, ptr, idx, mem) return true } return false @@ -13412,10 +12610,7 @@ func rewriteValue386_Op386XORLloadidx4(v *Value) bool { v.reset(Op386XORLloadidx4) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(val) - v.AddArg(base) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg4(val, base, idx, mem) return true } // match: (XORLloadidx4 [off1] {sym} val base (ADDLconst [off2] idx) mem) @@ -13438,10 +12633,7 @@ func rewriteValue386_Op386XORLloadidx4(v *Value) bool { v.reset(Op386XORLloadidx4) v.AuxInt = off1 + off2*4 v.Aux = sym - v.AddArg(val) - v.AddArg(base) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg4(val, base, idx, mem) return true } // match: (XORLloadidx4 [off1] {sym1} val (LEAL [off2] {sym2} base) idx mem) @@ -13465,10 +12657,7 @@ func rewriteValue386_Op386XORLloadidx4(v *Value) bool { v.reset(Op386XORLloadidx4) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(val) - v.AddArg(base) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg4(val, base, idx, mem) return true } return false @@ -13498,9 +12687,7 @@ func rewriteValue386_Op386XORLmodify(v *Value) bool { v.reset(Op386XORLmodify) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(base) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(base, val, mem) return true } // match: (XORLmodify [off1] {sym1} (LEAL [off2] {sym2} base) val mem) @@ -13523,9 +12710,7 @@ func rewriteValue386_Op386XORLmodify(v *Value) bool { v.reset(Op386XORLmodify) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(base) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(base, val, mem) return true } return false @@ -13557,10 +12742,7 @@ func rewriteValue386_Op386XORLmodifyidx4(v *Value) bool { v.reset(Op386XORLmodifyidx4) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(base) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) + v.AddArg4(base, idx, val, mem) return true } // match: (XORLmodifyidx4 [off1] {sym} base (ADDLconst [off2] idx) val mem) @@ -13583,10 +12765,7 @@ func rewriteValue386_Op386XORLmodifyidx4(v *Value) bool { v.reset(Op386XORLmodifyidx4) v.AuxInt = off1 + off2*4 v.Aux = sym - v.AddArg(base) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) + v.AddArg4(base, idx, val, mem) return true } // match: (XORLmodifyidx4 [off1] {sym1} (LEAL [off2] {sym2} base) idx val mem) @@ -13610,10 +12789,7 @@ func rewriteValue386_Op386XORLmodifyidx4(v *Value) bool { v.reset(Op386XORLmodifyidx4) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(base) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) + v.AddArg4(base, idx, val, mem) return true } // match: (XORLmodifyidx4 [off] {sym} ptr idx (MOVLconst [c]) mem) @@ -13635,9 +12811,7 @@ func rewriteValue386_Op386XORLmodifyidx4(v *Value) bool { v.reset(Op386XORLconstmodifyidx4) v.AuxInt = makeValAndOff(c, off) v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } return false @@ -13680,10 +12854,9 @@ func rewriteValue386_OpDiv8(v *Value) bool { v.reset(Op386DIVW) v0 := b.NewValue0(v.Pos, OpSignExt8to16, typ.Int16) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpSignExt8to16, typ.Int16) v1.AddArg(y) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } } @@ -13700,10 +12873,9 @@ func rewriteValue386_OpDiv8u(v *Value) bool { v.reset(Op386DIVWU) v0 := b.NewValue0(v.Pos, OpZeroExt8to16, typ.UInt16) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpZeroExt8to16, typ.UInt16) v1.AddArg(y) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } } @@ -13718,8 +12890,7 @@ func rewriteValue386_OpEq16(v *Value) bool { y := v_1 v.reset(Op386SETEQ) v0 := b.NewValue0(v.Pos, Op386CMPW, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -13735,8 +12906,7 @@ func rewriteValue386_OpEq32(v *Value) bool { y := v_1 v.reset(Op386SETEQ) v0 := b.NewValue0(v.Pos, Op386CMPL, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -13752,8 +12922,7 @@ func rewriteValue386_OpEq32F(v *Value) bool { y := v_1 v.reset(Op386SETEQF) v0 := b.NewValue0(v.Pos, Op386UCOMISS, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -13769,8 +12938,7 @@ func rewriteValue386_OpEq64F(v *Value) bool { y := v_1 v.reset(Op386SETEQF) v0 := b.NewValue0(v.Pos, Op386UCOMISD, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -13786,8 +12954,7 @@ func rewriteValue386_OpEq8(v *Value) bool { y := v_1 v.reset(Op386SETEQ) v0 := b.NewValue0(v.Pos, Op386CMPB, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -13803,8 +12970,7 @@ func rewriteValue386_OpEqB(v *Value) bool { y := v_1 v.reset(Op386SETEQ) v0 := b.NewValue0(v.Pos, Op386CMPB, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -13820,8 +12986,7 @@ func rewriteValue386_OpEqPtr(v *Value) bool { y := v_1 v.reset(Op386SETEQ) v0 := b.NewValue0(v.Pos, Op386CMPL, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -13837,8 +13002,7 @@ func rewriteValue386_OpGeq32F(v *Value) bool { y := v_1 v.reset(Op386SETGEF) v0 := b.NewValue0(v.Pos, Op386UCOMISS, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -13854,8 +13018,7 @@ func rewriteValue386_OpGeq64F(v *Value) bool { y := v_1 v.reset(Op386SETGEF) v0 := b.NewValue0(v.Pos, Op386UCOMISD, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -13871,8 +13034,7 @@ func rewriteValue386_OpGreater32F(v *Value) bool { y := v_1 v.reset(Op386SETGF) v0 := b.NewValue0(v.Pos, Op386UCOMISS, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -13888,8 +13050,7 @@ func rewriteValue386_OpGreater64F(v *Value) bool { y := v_1 v.reset(Op386SETGF) v0 := b.NewValue0(v.Pos, Op386UCOMISD, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -13905,8 +13066,7 @@ func rewriteValue386_OpIsInBounds(v *Value) bool { len := v_1 v.reset(Op386SETB) v0 := b.NewValue0(v.Pos, Op386CMPL, types.TypeFlags) - v0.AddArg(idx) - v0.AddArg(len) + v0.AddArg2(idx, len) v.AddArg(v0) return true } @@ -13920,8 +13080,7 @@ func rewriteValue386_OpIsNonNil(v *Value) bool { p := v_0 v.reset(Op386SETNE) v0 := b.NewValue0(v.Pos, Op386TESTL, types.TypeFlags) - v0.AddArg(p) - v0.AddArg(p) + v0.AddArg2(p, p) v.AddArg(v0) return true } @@ -13937,8 +13096,7 @@ func rewriteValue386_OpIsSliceInBounds(v *Value) bool { len := v_1 v.reset(Op386SETBE) v0 := b.NewValue0(v.Pos, Op386CMPL, types.TypeFlags) - v0.AddArg(idx) - v0.AddArg(len) + v0.AddArg2(idx, len) v.AddArg(v0) return true } @@ -13954,8 +13112,7 @@ func rewriteValue386_OpLeq16(v *Value) bool { y := v_1 v.reset(Op386SETLE) v0 := b.NewValue0(v.Pos, Op386CMPW, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -13971,8 +13128,7 @@ func rewriteValue386_OpLeq16U(v *Value) bool { y := v_1 v.reset(Op386SETBE) v0 := b.NewValue0(v.Pos, Op386CMPW, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -13988,8 +13144,7 @@ func rewriteValue386_OpLeq32(v *Value) bool { y := v_1 v.reset(Op386SETLE) v0 := b.NewValue0(v.Pos, Op386CMPL, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -14005,8 +13160,7 @@ func rewriteValue386_OpLeq32F(v *Value) bool { y := v_1 v.reset(Op386SETGEF) v0 := b.NewValue0(v.Pos, Op386UCOMISS, types.TypeFlags) - v0.AddArg(y) - v0.AddArg(x) + v0.AddArg2(y, x) v.AddArg(v0) return true } @@ -14022,8 +13176,7 @@ func rewriteValue386_OpLeq32U(v *Value) bool { y := v_1 v.reset(Op386SETBE) v0 := b.NewValue0(v.Pos, Op386CMPL, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -14039,8 +13192,7 @@ func rewriteValue386_OpLeq64F(v *Value) bool { y := v_1 v.reset(Op386SETGEF) v0 := b.NewValue0(v.Pos, Op386UCOMISD, types.TypeFlags) - v0.AddArg(y) - v0.AddArg(x) + v0.AddArg2(y, x) v.AddArg(v0) return true } @@ -14056,8 +13208,7 @@ func rewriteValue386_OpLeq8(v *Value) bool { y := v_1 v.reset(Op386SETLE) v0 := b.NewValue0(v.Pos, Op386CMPB, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -14073,8 +13224,7 @@ func rewriteValue386_OpLeq8U(v *Value) bool { y := v_1 v.reset(Op386SETBE) v0 := b.NewValue0(v.Pos, Op386CMPB, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -14090,8 +13240,7 @@ func rewriteValue386_OpLess16(v *Value) bool { y := v_1 v.reset(Op386SETL) v0 := b.NewValue0(v.Pos, Op386CMPW, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -14107,8 +13256,7 @@ func rewriteValue386_OpLess16U(v *Value) bool { y := v_1 v.reset(Op386SETB) v0 := b.NewValue0(v.Pos, Op386CMPW, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -14124,8 +13272,7 @@ func rewriteValue386_OpLess32(v *Value) bool { y := v_1 v.reset(Op386SETL) v0 := b.NewValue0(v.Pos, Op386CMPL, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -14141,8 +13288,7 @@ func rewriteValue386_OpLess32F(v *Value) bool { y := v_1 v.reset(Op386SETGF) v0 := b.NewValue0(v.Pos, Op386UCOMISS, types.TypeFlags) - v0.AddArg(y) - v0.AddArg(x) + v0.AddArg2(y, x) v.AddArg(v0) return true } @@ -14158,8 +13304,7 @@ func rewriteValue386_OpLess32U(v *Value) bool { y := v_1 v.reset(Op386SETB) v0 := b.NewValue0(v.Pos, Op386CMPL, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -14175,8 +13320,7 @@ func rewriteValue386_OpLess64F(v *Value) bool { y := v_1 v.reset(Op386SETGF) v0 := b.NewValue0(v.Pos, Op386UCOMISD, types.TypeFlags) - v0.AddArg(y) - v0.AddArg(x) + v0.AddArg2(y, x) v.AddArg(v0) return true } @@ -14192,8 +13336,7 @@ func rewriteValue386_OpLess8(v *Value) bool { y := v_1 v.reset(Op386SETL) v0 := b.NewValue0(v.Pos, Op386CMPB, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -14209,8 +13352,7 @@ func rewriteValue386_OpLess8U(v *Value) bool { y := v_1 v.reset(Op386SETB) v0 := b.NewValue0(v.Pos, Op386CMPB, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -14229,8 +13371,7 @@ func rewriteValue386_OpLoad(v *Value) bool { break } v.reset(Op386MOVLload) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (Load ptr mem) @@ -14244,8 +13385,7 @@ func rewriteValue386_OpLoad(v *Value) bool { break } v.reset(Op386MOVWload) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (Load ptr mem) @@ -14259,8 +13399,7 @@ func rewriteValue386_OpLoad(v *Value) bool { break } v.reset(Op386MOVBload) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (Load ptr mem) @@ -14274,8 +13413,7 @@ func rewriteValue386_OpLoad(v *Value) bool { break } v.reset(Op386MOVSSload) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (Load ptr mem) @@ -14289,8 +13427,7 @@ func rewriteValue386_OpLoad(v *Value) bool { break } v.reset(Op386MOVSDload) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } return false @@ -14324,15 +13461,13 @@ func rewriteValue386_OpLsh16x16(v *Value) bool { } v.reset(Op386ANDL) v0 := b.NewValue0(v.Pos, Op386SHLL, t) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) + v0.AddArg2(x, y) v1 := b.NewValue0(v.Pos, Op386SBBLcarrymask, t) v2 := b.NewValue0(v.Pos, Op386CMPWconst, types.TypeFlags) v2.AuxInt = 32 v2.AddArg(y) v1.AddArg(v2) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } // match: (Lsh16x16 x y) @@ -14347,8 +13482,7 @@ func rewriteValue386_OpLsh16x16(v *Value) bool { } v.reset(Op386SHLL) v.Type = t - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } return false @@ -14369,15 +13503,13 @@ func rewriteValue386_OpLsh16x32(v *Value) bool { } v.reset(Op386ANDL) v0 := b.NewValue0(v.Pos, Op386SHLL, t) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) + v0.AddArg2(x, y) v1 := b.NewValue0(v.Pos, Op386SBBLcarrymask, t) v2 := b.NewValue0(v.Pos, Op386CMPLconst, types.TypeFlags) v2.AuxInt = 32 v2.AddArg(y) v1.AddArg(v2) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } // match: (Lsh16x32 x y) @@ -14392,8 +13524,7 @@ func rewriteValue386_OpLsh16x32(v *Value) bool { } v.reset(Op386SHLL) v.Type = t - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } return false @@ -14451,15 +13582,13 @@ func rewriteValue386_OpLsh16x8(v *Value) bool { } v.reset(Op386ANDL) v0 := b.NewValue0(v.Pos, Op386SHLL, t) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) + v0.AddArg2(x, y) v1 := b.NewValue0(v.Pos, Op386SBBLcarrymask, t) v2 := b.NewValue0(v.Pos, Op386CMPBconst, types.TypeFlags) v2.AuxInt = 32 v2.AddArg(y) v1.AddArg(v2) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } // match: (Lsh16x8 x y) @@ -14474,8 +13603,7 @@ func rewriteValue386_OpLsh16x8(v *Value) bool { } v.reset(Op386SHLL) v.Type = t - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } return false @@ -14496,15 +13624,13 @@ func rewriteValue386_OpLsh32x16(v *Value) bool { } v.reset(Op386ANDL) v0 := b.NewValue0(v.Pos, Op386SHLL, t) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) + v0.AddArg2(x, y) v1 := b.NewValue0(v.Pos, Op386SBBLcarrymask, t) v2 := b.NewValue0(v.Pos, Op386CMPWconst, types.TypeFlags) v2.AuxInt = 32 v2.AddArg(y) v1.AddArg(v2) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } // match: (Lsh32x16 x y) @@ -14519,8 +13645,7 @@ func rewriteValue386_OpLsh32x16(v *Value) bool { } v.reset(Op386SHLL) v.Type = t - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } return false @@ -14541,15 +13666,13 @@ func rewriteValue386_OpLsh32x32(v *Value) bool { } v.reset(Op386ANDL) v0 := b.NewValue0(v.Pos, Op386SHLL, t) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) + v0.AddArg2(x, y) v1 := b.NewValue0(v.Pos, Op386SBBLcarrymask, t) v2 := b.NewValue0(v.Pos, Op386CMPLconst, types.TypeFlags) v2.AuxInt = 32 v2.AddArg(y) v1.AddArg(v2) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } // match: (Lsh32x32 x y) @@ -14564,8 +13687,7 @@ func rewriteValue386_OpLsh32x32(v *Value) bool { } v.reset(Op386SHLL) v.Type = t - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } return false @@ -14623,15 +13745,13 @@ func rewriteValue386_OpLsh32x8(v *Value) bool { } v.reset(Op386ANDL) v0 := b.NewValue0(v.Pos, Op386SHLL, t) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) + v0.AddArg2(x, y) v1 := b.NewValue0(v.Pos, Op386SBBLcarrymask, t) v2 := b.NewValue0(v.Pos, Op386CMPBconst, types.TypeFlags) v2.AuxInt = 32 v2.AddArg(y) v1.AddArg(v2) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } // match: (Lsh32x8 x y) @@ -14646,8 +13766,7 @@ func rewriteValue386_OpLsh32x8(v *Value) bool { } v.reset(Op386SHLL) v.Type = t - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } return false @@ -14668,15 +13787,13 @@ func rewriteValue386_OpLsh8x16(v *Value) bool { } v.reset(Op386ANDL) v0 := b.NewValue0(v.Pos, Op386SHLL, t) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) + v0.AddArg2(x, y) v1 := b.NewValue0(v.Pos, Op386SBBLcarrymask, t) v2 := b.NewValue0(v.Pos, Op386CMPWconst, types.TypeFlags) v2.AuxInt = 32 v2.AddArg(y) v1.AddArg(v2) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } // match: (Lsh8x16 x y) @@ -14691,8 +13808,7 @@ func rewriteValue386_OpLsh8x16(v *Value) bool { } v.reset(Op386SHLL) v.Type = t - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } return false @@ -14713,15 +13829,13 @@ func rewriteValue386_OpLsh8x32(v *Value) bool { } v.reset(Op386ANDL) v0 := b.NewValue0(v.Pos, Op386SHLL, t) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) + v0.AddArg2(x, y) v1 := b.NewValue0(v.Pos, Op386SBBLcarrymask, t) v2 := b.NewValue0(v.Pos, Op386CMPLconst, types.TypeFlags) v2.AuxInt = 32 v2.AddArg(y) v1.AddArg(v2) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } // match: (Lsh8x32 x y) @@ -14736,8 +13850,7 @@ func rewriteValue386_OpLsh8x32(v *Value) bool { } v.reset(Op386SHLL) v.Type = t - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } return false @@ -14795,15 +13908,13 @@ func rewriteValue386_OpLsh8x8(v *Value) bool { } v.reset(Op386ANDL) v0 := b.NewValue0(v.Pos, Op386SHLL, t) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) + v0.AddArg2(x, y) v1 := b.NewValue0(v.Pos, Op386SBBLcarrymask, t) v2 := b.NewValue0(v.Pos, Op386CMPBconst, types.TypeFlags) v2.AuxInt = 32 v2.AddArg(y) v1.AddArg(v2) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } // match: (Lsh8x8 x y) @@ -14818,8 +13929,7 @@ func rewriteValue386_OpLsh8x8(v *Value) bool { } v.reset(Op386SHLL) v.Type = t - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } return false @@ -14837,10 +13947,9 @@ func rewriteValue386_OpMod8(v *Value) bool { v.reset(Op386MODW) v0 := b.NewValue0(v.Pos, OpSignExt8to16, typ.Int16) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpSignExt8to16, typ.Int16) v1.AddArg(y) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } } @@ -14857,10 +13966,9 @@ func rewriteValue386_OpMod8u(v *Value) bool { v.reset(Op386MODWU) v0 := b.NewValue0(v.Pos, OpZeroExt8to16, typ.UInt16) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpZeroExt8to16, typ.UInt16) v1.AddArg(y) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } } @@ -14878,9 +13986,7 @@ func rewriteValue386_OpMove(v *Value) bool { break } mem := v_2 - v.reset(OpCopy) - v.Type = mem.Type - v.AddArg(mem) + v.copyOf(mem) return true } // match: (Move [1] dst src mem) @@ -14893,12 +13999,9 @@ func rewriteValue386_OpMove(v *Value) bool { src := v_1 mem := v_2 v.reset(Op386MOVBstore) - v.AddArg(dst) v0 := b.NewValue0(v.Pos, Op386MOVBload, typ.UInt8) - v0.AddArg(src) - v0.AddArg(mem) - v.AddArg(v0) - v.AddArg(mem) + v0.AddArg2(src, mem) + v.AddArg3(dst, v0, mem) return true } // match: (Move [2] dst src mem) @@ -14911,12 +14014,9 @@ func rewriteValue386_OpMove(v *Value) bool { src := v_1 mem := v_2 v.reset(Op386MOVWstore) - v.AddArg(dst) v0 := b.NewValue0(v.Pos, Op386MOVWload, typ.UInt16) - v0.AddArg(src) - v0.AddArg(mem) - v.AddArg(v0) - v.AddArg(mem) + v0.AddArg2(src, mem) + v.AddArg3(dst, v0, mem) return true } // match: (Move [4] dst src mem) @@ -14929,12 +14029,9 @@ func rewriteValue386_OpMove(v *Value) bool { src := v_1 mem := v_2 v.reset(Op386MOVLstore) - v.AddArg(dst) v0 := b.NewValue0(v.Pos, Op386MOVLload, typ.UInt32) - v0.AddArg(src) - v0.AddArg(mem) - v.AddArg(v0) - v.AddArg(mem) + v0.AddArg2(src, mem) + v.AddArg3(dst, v0, mem) return true } // match: (Move [3] dst src mem) @@ -14948,20 +14045,14 @@ func rewriteValue386_OpMove(v *Value) bool { mem := v_2 v.reset(Op386MOVBstore) v.AuxInt = 2 - v.AddArg(dst) v0 := b.NewValue0(v.Pos, Op386MOVBload, typ.UInt8) v0.AuxInt = 2 - v0.AddArg(src) - v0.AddArg(mem) - v.AddArg(v0) + v0.AddArg2(src, mem) v1 := b.NewValue0(v.Pos, Op386MOVWstore, types.TypeMem) - v1.AddArg(dst) v2 := b.NewValue0(v.Pos, Op386MOVWload, typ.UInt16) - v2.AddArg(src) - v2.AddArg(mem) - v1.AddArg(v2) - v1.AddArg(mem) - v.AddArg(v1) + v2.AddArg2(src, mem) + v1.AddArg3(dst, v2, mem) + v.AddArg3(dst, v0, v1) return true } // match: (Move [5] dst src mem) @@ -14975,20 +14066,14 @@ func rewriteValue386_OpMove(v *Value) bool { mem := v_2 v.reset(Op386MOVBstore) v.AuxInt = 4 - v.AddArg(dst) v0 := b.NewValue0(v.Pos, Op386MOVBload, typ.UInt8) v0.AuxInt = 4 - v0.AddArg(src) - v0.AddArg(mem) - v.AddArg(v0) + v0.AddArg2(src, mem) v1 := b.NewValue0(v.Pos, Op386MOVLstore, types.TypeMem) - v1.AddArg(dst) v2 := b.NewValue0(v.Pos, Op386MOVLload, typ.UInt32) - v2.AddArg(src) - v2.AddArg(mem) - v1.AddArg(v2) - v1.AddArg(mem) - v.AddArg(v1) + v2.AddArg2(src, mem) + v1.AddArg3(dst, v2, mem) + v.AddArg3(dst, v0, v1) return true } // match: (Move [6] dst src mem) @@ -15002,20 +14087,14 @@ func rewriteValue386_OpMove(v *Value) bool { mem := v_2 v.reset(Op386MOVWstore) v.AuxInt = 4 - v.AddArg(dst) v0 := b.NewValue0(v.Pos, Op386MOVWload, typ.UInt16) v0.AuxInt = 4 - v0.AddArg(src) - v0.AddArg(mem) - v.AddArg(v0) + v0.AddArg2(src, mem) v1 := b.NewValue0(v.Pos, Op386MOVLstore, types.TypeMem) - v1.AddArg(dst) v2 := b.NewValue0(v.Pos, Op386MOVLload, typ.UInt32) - v2.AddArg(src) - v2.AddArg(mem) - v1.AddArg(v2) - v1.AddArg(mem) - v.AddArg(v1) + v2.AddArg2(src, mem) + v1.AddArg3(dst, v2, mem) + v.AddArg3(dst, v0, v1) return true } // match: (Move [7] dst src mem) @@ -15029,20 +14108,14 @@ func rewriteValue386_OpMove(v *Value) bool { mem := v_2 v.reset(Op386MOVLstore) v.AuxInt = 3 - v.AddArg(dst) v0 := b.NewValue0(v.Pos, Op386MOVLload, typ.UInt32) v0.AuxInt = 3 - v0.AddArg(src) - v0.AddArg(mem) - v.AddArg(v0) + v0.AddArg2(src, mem) v1 := b.NewValue0(v.Pos, Op386MOVLstore, types.TypeMem) - v1.AddArg(dst) v2 := b.NewValue0(v.Pos, Op386MOVLload, typ.UInt32) - v2.AddArg(src) - v2.AddArg(mem) - v1.AddArg(v2) - v1.AddArg(mem) - v.AddArg(v1) + v2.AddArg2(src, mem) + v1.AddArg3(dst, v2, mem) + v.AddArg3(dst, v0, v1) return true } // match: (Move [8] dst src mem) @@ -15056,20 +14129,14 @@ func rewriteValue386_OpMove(v *Value) bool { mem := v_2 v.reset(Op386MOVLstore) v.AuxInt = 4 - v.AddArg(dst) v0 := b.NewValue0(v.Pos, Op386MOVLload, typ.UInt32) v0.AuxInt = 4 - v0.AddArg(src) - v0.AddArg(mem) - v.AddArg(v0) + v0.AddArg2(src, mem) v1 := b.NewValue0(v.Pos, Op386MOVLstore, types.TypeMem) - v1.AddArg(dst) v2 := b.NewValue0(v.Pos, Op386MOVLload, typ.UInt32) - v2.AddArg(src) - v2.AddArg(mem) - v1.AddArg(v2) - v1.AddArg(mem) - v.AddArg(v1) + v2.AddArg2(src, mem) + v1.AddArg3(dst, v2, mem) + v.AddArg3(dst, v0, v1) return true } // match: (Move [s] dst src mem) @@ -15088,19 +14155,14 @@ func rewriteValue386_OpMove(v *Value) bool { v0 := b.NewValue0(v.Pos, Op386ADDLconst, dst.Type) v0.AuxInt = s % 4 v0.AddArg(dst) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, Op386ADDLconst, src.Type) v1.AuxInt = s % 4 v1.AddArg(src) - v.AddArg(v1) v2 := b.NewValue0(v.Pos, Op386MOVLstore, types.TypeMem) - v2.AddArg(dst) v3 := b.NewValue0(v.Pos, Op386MOVLload, typ.UInt32) - v3.AddArg(src) - v3.AddArg(mem) - v2.AddArg(v3) - v2.AddArg(mem) - v.AddArg(v2) + v3.AddArg2(src, mem) + v2.AddArg3(dst, v3, mem) + v.AddArg3(v0, v1, v2) return true } // match: (Move [s] dst src mem) @@ -15116,9 +14178,7 @@ func rewriteValue386_OpMove(v *Value) bool { } v.reset(Op386DUFFCOPY) v.AuxInt = 10 * (128 - s/4) - v.AddArg(dst) - v.AddArg(src) - v.AddArg(mem) + v.AddArg3(dst, src, mem) return true } // match: (Move [s] dst src mem) @@ -15133,12 +14193,9 @@ func rewriteValue386_OpMove(v *Value) bool { break } v.reset(Op386REPMOVSL) - v.AddArg(dst) - v.AddArg(src) v0 := b.NewValue0(v.Pos, Op386MOVLconst, typ.UInt32) v0.AuxInt = s / 4 - v.AddArg(v0) - v.AddArg(mem) + v.AddArg4(dst, src, v0, mem) return true } return false @@ -15157,10 +14214,9 @@ func rewriteValue386_OpNeg32F(v *Value) bool { break } v.reset(Op386PXOR) - v.AddArg(x) v0 := b.NewValue0(v.Pos, Op386MOVSSconst, typ.Float32) v0.AuxInt = auxFrom32F(float32(math.Copysign(0, -1))) - v.AddArg(v0) + v.AddArg2(x, v0) return true } // match: (Neg32F x) @@ -15191,10 +14247,9 @@ func rewriteValue386_OpNeg64F(v *Value) bool { break } v.reset(Op386PXOR) - v.AddArg(x) v0 := b.NewValue0(v.Pos, Op386MOVSDconst, typ.Float64) v0.AuxInt = auxFrom64F(math.Copysign(0, -1)) - v.AddArg(v0) + v.AddArg2(x, v0) return true } // match: (Neg64F x) @@ -15222,8 +14277,7 @@ func rewriteValue386_OpNeq16(v *Value) bool { y := v_1 v.reset(Op386SETNE) v0 := b.NewValue0(v.Pos, Op386CMPW, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -15239,8 +14293,7 @@ func rewriteValue386_OpNeq32(v *Value) bool { y := v_1 v.reset(Op386SETNE) v0 := b.NewValue0(v.Pos, Op386CMPL, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -15256,8 +14309,7 @@ func rewriteValue386_OpNeq32F(v *Value) bool { y := v_1 v.reset(Op386SETNEF) v0 := b.NewValue0(v.Pos, Op386UCOMISS, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -15273,8 +14325,7 @@ func rewriteValue386_OpNeq64F(v *Value) bool { y := v_1 v.reset(Op386SETNEF) v0 := b.NewValue0(v.Pos, Op386UCOMISD, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -15290,8 +14341,7 @@ func rewriteValue386_OpNeq8(v *Value) bool { y := v_1 v.reset(Op386SETNE) v0 := b.NewValue0(v.Pos, Op386CMPB, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -15307,8 +14357,7 @@ func rewriteValue386_OpNeqB(v *Value) bool { y := v_1 v.reset(Op386SETNE) v0 := b.NewValue0(v.Pos, Op386CMPB, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -15324,8 +14373,7 @@ func rewriteValue386_OpNeqPtr(v *Value) bool { y := v_1 v.reset(Op386SETNE) v0 := b.NewValue0(v.Pos, Op386CMPL, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -15359,9 +14407,7 @@ func rewriteValue386_OpPanicBounds(v *Value) bool { } v.reset(Op386LoweredPanicBoundsA) v.AuxInt = kind - v.AddArg(x) - v.AddArg(y) - v.AddArg(mem) + v.AddArg3(x, y, mem) return true } // match: (PanicBounds [kind] x y mem) @@ -15377,9 +14423,7 @@ func rewriteValue386_OpPanicBounds(v *Value) bool { } v.reset(Op386LoweredPanicBoundsB) v.AuxInt = kind - v.AddArg(x) - v.AddArg(y) - v.AddArg(mem) + v.AddArg3(x, y, mem) return true } // match: (PanicBounds [kind] x y mem) @@ -15395,9 +14439,7 @@ func rewriteValue386_OpPanicBounds(v *Value) bool { } v.reset(Op386LoweredPanicBoundsC) v.AuxInt = kind - v.AddArg(x) - v.AddArg(y) - v.AddArg(mem) + v.AddArg3(x, y, mem) return true } return false @@ -15421,10 +14463,7 @@ func rewriteValue386_OpPanicExtend(v *Value) bool { } v.reset(Op386LoweredPanicExtendA) v.AuxInt = kind - v.AddArg(hi) - v.AddArg(lo) - v.AddArg(y) - v.AddArg(mem) + v.AddArg4(hi, lo, y, mem) return true } // match: (PanicExtend [kind] hi lo y mem) @@ -15441,10 +14480,7 @@ func rewriteValue386_OpPanicExtend(v *Value) bool { } v.reset(Op386LoweredPanicExtendB) v.AuxInt = kind - v.AddArg(hi) - v.AddArg(lo) - v.AddArg(y) - v.AddArg(mem) + v.AddArg4(hi, lo, y, mem) return true } // match: (PanicExtend [kind] hi lo y mem) @@ -15461,10 +14497,7 @@ func rewriteValue386_OpPanicExtend(v *Value) bool { } v.reset(Op386LoweredPanicExtendC) v.AuxInt = kind - v.AddArg(hi) - v.AddArg(lo) - v.AddArg(y) - v.AddArg(mem) + v.AddArg4(hi, lo, y, mem) return true } return false @@ -15539,15 +14572,13 @@ func rewriteValue386_OpRsh16Ux16(v *Value) bool { } v.reset(Op386ANDL) v0 := b.NewValue0(v.Pos, Op386SHRW, t) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) + v0.AddArg2(x, y) v1 := b.NewValue0(v.Pos, Op386SBBLcarrymask, t) v2 := b.NewValue0(v.Pos, Op386CMPWconst, types.TypeFlags) v2.AuxInt = 16 v2.AddArg(y) v1.AddArg(v2) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } // match: (Rsh16Ux16 x y) @@ -15562,8 +14593,7 @@ func rewriteValue386_OpRsh16Ux16(v *Value) bool { } v.reset(Op386SHRW) v.Type = t - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } return false @@ -15584,15 +14614,13 @@ func rewriteValue386_OpRsh16Ux32(v *Value) bool { } v.reset(Op386ANDL) v0 := b.NewValue0(v.Pos, Op386SHRW, t) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) + v0.AddArg2(x, y) v1 := b.NewValue0(v.Pos, Op386SBBLcarrymask, t) v2 := b.NewValue0(v.Pos, Op386CMPLconst, types.TypeFlags) v2.AuxInt = 16 v2.AddArg(y) v1.AddArg(v2) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } // match: (Rsh16Ux32 x y) @@ -15607,8 +14635,7 @@ func rewriteValue386_OpRsh16Ux32(v *Value) bool { } v.reset(Op386SHRW) v.Type = t - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } return false @@ -15666,15 +14693,13 @@ func rewriteValue386_OpRsh16Ux8(v *Value) bool { } v.reset(Op386ANDL) v0 := b.NewValue0(v.Pos, Op386SHRW, t) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) + v0.AddArg2(x, y) v1 := b.NewValue0(v.Pos, Op386SBBLcarrymask, t) v2 := b.NewValue0(v.Pos, Op386CMPBconst, types.TypeFlags) v2.AuxInt = 16 v2.AddArg(y) v1.AddArg(v2) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } // match: (Rsh16Ux8 x y) @@ -15689,8 +14714,7 @@ func rewriteValue386_OpRsh16Ux8(v *Value) bool { } v.reset(Op386SHRW) v.Type = t - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } return false @@ -15711,9 +14735,7 @@ func rewriteValue386_OpRsh16x16(v *Value) bool { } v.reset(Op386SARW) v.Type = t - v.AddArg(x) v0 := b.NewValue0(v.Pos, Op386ORL, y.Type) - v0.AddArg(y) v1 := b.NewValue0(v.Pos, Op386NOTL, y.Type) v2 := b.NewValue0(v.Pos, Op386SBBLcarrymask, y.Type) v3 := b.NewValue0(v.Pos, Op386CMPWconst, types.TypeFlags) @@ -15721,8 +14743,8 @@ func rewriteValue386_OpRsh16x16(v *Value) bool { v3.AddArg(y) v2.AddArg(v3) v1.AddArg(v2) - v0.AddArg(v1) - v.AddArg(v0) + v0.AddArg2(y, v1) + v.AddArg2(x, v0) return true } // match: (Rsh16x16 x y) @@ -15735,8 +14757,7 @@ func rewriteValue386_OpRsh16x16(v *Value) bool { break } v.reset(Op386SARW) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } return false @@ -15757,9 +14778,7 @@ func rewriteValue386_OpRsh16x32(v *Value) bool { } v.reset(Op386SARW) v.Type = t - v.AddArg(x) v0 := b.NewValue0(v.Pos, Op386ORL, y.Type) - v0.AddArg(y) v1 := b.NewValue0(v.Pos, Op386NOTL, y.Type) v2 := b.NewValue0(v.Pos, Op386SBBLcarrymask, y.Type) v3 := b.NewValue0(v.Pos, Op386CMPLconst, types.TypeFlags) @@ -15767,8 +14786,8 @@ func rewriteValue386_OpRsh16x32(v *Value) bool { v3.AddArg(y) v2.AddArg(v3) v1.AddArg(v2) - v0.AddArg(v1) - v.AddArg(v0) + v0.AddArg2(y, v1) + v.AddArg2(x, v0) return true } // match: (Rsh16x32 x y) @@ -15781,8 +14800,7 @@ func rewriteValue386_OpRsh16x32(v *Value) bool { break } v.reset(Op386SARW) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } return false @@ -15842,9 +14860,7 @@ func rewriteValue386_OpRsh16x8(v *Value) bool { } v.reset(Op386SARW) v.Type = t - v.AddArg(x) v0 := b.NewValue0(v.Pos, Op386ORL, y.Type) - v0.AddArg(y) v1 := b.NewValue0(v.Pos, Op386NOTL, y.Type) v2 := b.NewValue0(v.Pos, Op386SBBLcarrymask, y.Type) v3 := b.NewValue0(v.Pos, Op386CMPBconst, types.TypeFlags) @@ -15852,8 +14868,8 @@ func rewriteValue386_OpRsh16x8(v *Value) bool { v3.AddArg(y) v2.AddArg(v3) v1.AddArg(v2) - v0.AddArg(v1) - v.AddArg(v0) + v0.AddArg2(y, v1) + v.AddArg2(x, v0) return true } // match: (Rsh16x8 x y) @@ -15866,8 +14882,7 @@ func rewriteValue386_OpRsh16x8(v *Value) bool { break } v.reset(Op386SARW) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } return false @@ -15888,15 +14903,13 @@ func rewriteValue386_OpRsh32Ux16(v *Value) bool { } v.reset(Op386ANDL) v0 := b.NewValue0(v.Pos, Op386SHRL, t) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) + v0.AddArg2(x, y) v1 := b.NewValue0(v.Pos, Op386SBBLcarrymask, t) v2 := b.NewValue0(v.Pos, Op386CMPWconst, types.TypeFlags) v2.AuxInt = 32 v2.AddArg(y) v1.AddArg(v2) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } // match: (Rsh32Ux16 x y) @@ -15911,8 +14924,7 @@ func rewriteValue386_OpRsh32Ux16(v *Value) bool { } v.reset(Op386SHRL) v.Type = t - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } return false @@ -15933,15 +14945,13 @@ func rewriteValue386_OpRsh32Ux32(v *Value) bool { } v.reset(Op386ANDL) v0 := b.NewValue0(v.Pos, Op386SHRL, t) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) + v0.AddArg2(x, y) v1 := b.NewValue0(v.Pos, Op386SBBLcarrymask, t) v2 := b.NewValue0(v.Pos, Op386CMPLconst, types.TypeFlags) v2.AuxInt = 32 v2.AddArg(y) v1.AddArg(v2) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } // match: (Rsh32Ux32 x y) @@ -15956,8 +14966,7 @@ func rewriteValue386_OpRsh32Ux32(v *Value) bool { } v.reset(Op386SHRL) v.Type = t - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } return false @@ -16015,15 +15024,13 @@ func rewriteValue386_OpRsh32Ux8(v *Value) bool { } v.reset(Op386ANDL) v0 := b.NewValue0(v.Pos, Op386SHRL, t) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) + v0.AddArg2(x, y) v1 := b.NewValue0(v.Pos, Op386SBBLcarrymask, t) v2 := b.NewValue0(v.Pos, Op386CMPBconst, types.TypeFlags) v2.AuxInt = 32 v2.AddArg(y) v1.AddArg(v2) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } // match: (Rsh32Ux8 x y) @@ -16038,8 +15045,7 @@ func rewriteValue386_OpRsh32Ux8(v *Value) bool { } v.reset(Op386SHRL) v.Type = t - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } return false @@ -16060,9 +15066,7 @@ func rewriteValue386_OpRsh32x16(v *Value) bool { } v.reset(Op386SARL) v.Type = t - v.AddArg(x) v0 := b.NewValue0(v.Pos, Op386ORL, y.Type) - v0.AddArg(y) v1 := b.NewValue0(v.Pos, Op386NOTL, y.Type) v2 := b.NewValue0(v.Pos, Op386SBBLcarrymask, y.Type) v3 := b.NewValue0(v.Pos, Op386CMPWconst, types.TypeFlags) @@ -16070,8 +15074,8 @@ func rewriteValue386_OpRsh32x16(v *Value) bool { v3.AddArg(y) v2.AddArg(v3) v1.AddArg(v2) - v0.AddArg(v1) - v.AddArg(v0) + v0.AddArg2(y, v1) + v.AddArg2(x, v0) return true } // match: (Rsh32x16 x y) @@ -16084,8 +15088,7 @@ func rewriteValue386_OpRsh32x16(v *Value) bool { break } v.reset(Op386SARL) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } return false @@ -16106,9 +15109,7 @@ func rewriteValue386_OpRsh32x32(v *Value) bool { } v.reset(Op386SARL) v.Type = t - v.AddArg(x) v0 := b.NewValue0(v.Pos, Op386ORL, y.Type) - v0.AddArg(y) v1 := b.NewValue0(v.Pos, Op386NOTL, y.Type) v2 := b.NewValue0(v.Pos, Op386SBBLcarrymask, y.Type) v3 := b.NewValue0(v.Pos, Op386CMPLconst, types.TypeFlags) @@ -16116,8 +15117,8 @@ func rewriteValue386_OpRsh32x32(v *Value) bool { v3.AddArg(y) v2.AddArg(v3) v1.AddArg(v2) - v0.AddArg(v1) - v.AddArg(v0) + v0.AddArg2(y, v1) + v.AddArg2(x, v0) return true } // match: (Rsh32x32 x y) @@ -16130,8 +15131,7 @@ func rewriteValue386_OpRsh32x32(v *Value) bool { break } v.reset(Op386SARL) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } return false @@ -16191,9 +15191,7 @@ func rewriteValue386_OpRsh32x8(v *Value) bool { } v.reset(Op386SARL) v.Type = t - v.AddArg(x) v0 := b.NewValue0(v.Pos, Op386ORL, y.Type) - v0.AddArg(y) v1 := b.NewValue0(v.Pos, Op386NOTL, y.Type) v2 := b.NewValue0(v.Pos, Op386SBBLcarrymask, y.Type) v3 := b.NewValue0(v.Pos, Op386CMPBconst, types.TypeFlags) @@ -16201,8 +15199,8 @@ func rewriteValue386_OpRsh32x8(v *Value) bool { v3.AddArg(y) v2.AddArg(v3) v1.AddArg(v2) - v0.AddArg(v1) - v.AddArg(v0) + v0.AddArg2(y, v1) + v.AddArg2(x, v0) return true } // match: (Rsh32x8 x y) @@ -16215,8 +15213,7 @@ func rewriteValue386_OpRsh32x8(v *Value) bool { break } v.reset(Op386SARL) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } return false @@ -16237,15 +15234,13 @@ func rewriteValue386_OpRsh8Ux16(v *Value) bool { } v.reset(Op386ANDL) v0 := b.NewValue0(v.Pos, Op386SHRB, t) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) + v0.AddArg2(x, y) v1 := b.NewValue0(v.Pos, Op386SBBLcarrymask, t) v2 := b.NewValue0(v.Pos, Op386CMPWconst, types.TypeFlags) v2.AuxInt = 8 v2.AddArg(y) v1.AddArg(v2) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } // match: (Rsh8Ux16 x y) @@ -16260,8 +15255,7 @@ func rewriteValue386_OpRsh8Ux16(v *Value) bool { } v.reset(Op386SHRB) v.Type = t - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } return false @@ -16282,15 +15276,13 @@ func rewriteValue386_OpRsh8Ux32(v *Value) bool { } v.reset(Op386ANDL) v0 := b.NewValue0(v.Pos, Op386SHRB, t) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) + v0.AddArg2(x, y) v1 := b.NewValue0(v.Pos, Op386SBBLcarrymask, t) v2 := b.NewValue0(v.Pos, Op386CMPLconst, types.TypeFlags) v2.AuxInt = 8 v2.AddArg(y) v1.AddArg(v2) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } // match: (Rsh8Ux32 x y) @@ -16305,8 +15297,7 @@ func rewriteValue386_OpRsh8Ux32(v *Value) bool { } v.reset(Op386SHRB) v.Type = t - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } return false @@ -16364,15 +15355,13 @@ func rewriteValue386_OpRsh8Ux8(v *Value) bool { } v.reset(Op386ANDL) v0 := b.NewValue0(v.Pos, Op386SHRB, t) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) + v0.AddArg2(x, y) v1 := b.NewValue0(v.Pos, Op386SBBLcarrymask, t) v2 := b.NewValue0(v.Pos, Op386CMPBconst, types.TypeFlags) v2.AuxInt = 8 v2.AddArg(y) v1.AddArg(v2) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } // match: (Rsh8Ux8 x y) @@ -16387,8 +15376,7 @@ func rewriteValue386_OpRsh8Ux8(v *Value) bool { } v.reset(Op386SHRB) v.Type = t - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } return false @@ -16409,9 +15397,7 @@ func rewriteValue386_OpRsh8x16(v *Value) bool { } v.reset(Op386SARB) v.Type = t - v.AddArg(x) v0 := b.NewValue0(v.Pos, Op386ORL, y.Type) - v0.AddArg(y) v1 := b.NewValue0(v.Pos, Op386NOTL, y.Type) v2 := b.NewValue0(v.Pos, Op386SBBLcarrymask, y.Type) v3 := b.NewValue0(v.Pos, Op386CMPWconst, types.TypeFlags) @@ -16419,8 +15405,8 @@ func rewriteValue386_OpRsh8x16(v *Value) bool { v3.AddArg(y) v2.AddArg(v3) v1.AddArg(v2) - v0.AddArg(v1) - v.AddArg(v0) + v0.AddArg2(y, v1) + v.AddArg2(x, v0) return true } // match: (Rsh8x16 x y) @@ -16433,8 +15419,7 @@ func rewriteValue386_OpRsh8x16(v *Value) bool { break } v.reset(Op386SARB) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } return false @@ -16455,9 +15440,7 @@ func rewriteValue386_OpRsh8x32(v *Value) bool { } v.reset(Op386SARB) v.Type = t - v.AddArg(x) v0 := b.NewValue0(v.Pos, Op386ORL, y.Type) - v0.AddArg(y) v1 := b.NewValue0(v.Pos, Op386NOTL, y.Type) v2 := b.NewValue0(v.Pos, Op386SBBLcarrymask, y.Type) v3 := b.NewValue0(v.Pos, Op386CMPLconst, types.TypeFlags) @@ -16465,8 +15448,8 @@ func rewriteValue386_OpRsh8x32(v *Value) bool { v3.AddArg(y) v2.AddArg(v3) v1.AddArg(v2) - v0.AddArg(v1) - v.AddArg(v0) + v0.AddArg2(y, v1) + v.AddArg2(x, v0) return true } // match: (Rsh8x32 x y) @@ -16479,8 +15462,7 @@ func rewriteValue386_OpRsh8x32(v *Value) bool { break } v.reset(Op386SARB) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } return false @@ -16540,9 +15522,7 @@ func rewriteValue386_OpRsh8x8(v *Value) bool { } v.reset(Op386SARB) v.Type = t - v.AddArg(x) v0 := b.NewValue0(v.Pos, Op386ORL, y.Type) - v0.AddArg(y) v1 := b.NewValue0(v.Pos, Op386NOTL, y.Type) v2 := b.NewValue0(v.Pos, Op386SBBLcarrymask, y.Type) v3 := b.NewValue0(v.Pos, Op386CMPBconst, types.TypeFlags) @@ -16550,8 +15530,8 @@ func rewriteValue386_OpRsh8x8(v *Value) bool { v3.AddArg(y) v2.AddArg(v3) v1.AddArg(v2) - v0.AddArg(v1) - v.AddArg(v0) + v0.AddArg2(y, v1) + v.AddArg2(x, v0) return true } // match: (Rsh8x8 x y) @@ -16564,8 +15544,7 @@ func rewriteValue386_OpRsh8x8(v *Value) bool { break } v.reset(Op386SARB) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } return false @@ -16585,8 +15564,7 @@ func rewriteValue386_OpSelect0(v *Value) bool { v.reset(OpSelect0) v.Type = typ.UInt32 v0 := b.NewValue0(v.Pos, Op386MULLU, types.NewTuple(typ.UInt32, types.TypeFlags)) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -16607,8 +15585,7 @@ func rewriteValue386_OpSelect1(v *Value) bool { v.reset(Op386SETO) v0 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) v1 := b.NewValue0(v.Pos, Op386MULLU, types.NewTuple(typ.UInt32, types.TypeFlags)) - v1.AddArg(x) - v1.AddArg(y) + v1.AddArg2(x, y) v0.AddArg(v1) v.AddArg(v0) return true @@ -16659,9 +15636,7 @@ func rewriteValue386_OpStore(v *Value) bool { break } v.reset(Op386MOVSDstore) - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } // match: (Store {t} ptr val mem) @@ -16676,9 +15651,7 @@ func rewriteValue386_OpStore(v *Value) bool { break } v.reset(Op386MOVSSstore) - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } // match: (Store {t} ptr val mem) @@ -16693,9 +15666,7 @@ func rewriteValue386_OpStore(v *Value) bool { break } v.reset(Op386MOVLstore) - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } // match: (Store {t} ptr val mem) @@ -16710,9 +15681,7 @@ func rewriteValue386_OpStore(v *Value) bool { break } v.reset(Op386MOVWstore) - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } // match: (Store {t} ptr val mem) @@ -16727,9 +15696,7 @@ func rewriteValue386_OpStore(v *Value) bool { break } v.reset(Op386MOVBstore) - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } return false @@ -16747,9 +15714,7 @@ func rewriteValue386_OpZero(v *Value) bool { break } mem := v_1 - v.reset(OpCopy) - v.Type = mem.Type - v.AddArg(mem) + v.copyOf(mem) return true } // match: (Zero [1] destptr mem) @@ -16762,8 +15727,7 @@ func rewriteValue386_OpZero(v *Value) bool { mem := v_1 v.reset(Op386MOVBstoreconst) v.AuxInt = 0 - v.AddArg(destptr) - v.AddArg(mem) + v.AddArg2(destptr, mem) return true } // match: (Zero [2] destptr mem) @@ -16776,8 +15740,7 @@ func rewriteValue386_OpZero(v *Value) bool { mem := v_1 v.reset(Op386MOVWstoreconst) v.AuxInt = 0 - v.AddArg(destptr) - v.AddArg(mem) + v.AddArg2(destptr, mem) return true } // match: (Zero [4] destptr mem) @@ -16790,8 +15753,7 @@ func rewriteValue386_OpZero(v *Value) bool { mem := v_1 v.reset(Op386MOVLstoreconst) v.AuxInt = 0 - v.AddArg(destptr) - v.AddArg(mem) + v.AddArg2(destptr, mem) return true } // match: (Zero [3] destptr mem) @@ -16804,12 +15766,10 @@ func rewriteValue386_OpZero(v *Value) bool { mem := v_1 v.reset(Op386MOVBstoreconst) v.AuxInt = makeValAndOff(0, 2) - v.AddArg(destptr) v0 := b.NewValue0(v.Pos, Op386MOVWstoreconst, types.TypeMem) v0.AuxInt = 0 - v0.AddArg(destptr) - v0.AddArg(mem) - v.AddArg(v0) + v0.AddArg2(destptr, mem) + v.AddArg2(destptr, v0) return true } // match: (Zero [5] destptr mem) @@ -16822,12 +15782,10 @@ func rewriteValue386_OpZero(v *Value) bool { mem := v_1 v.reset(Op386MOVBstoreconst) v.AuxInt = makeValAndOff(0, 4) - v.AddArg(destptr) v0 := b.NewValue0(v.Pos, Op386MOVLstoreconst, types.TypeMem) v0.AuxInt = 0 - v0.AddArg(destptr) - v0.AddArg(mem) - v.AddArg(v0) + v0.AddArg2(destptr, mem) + v.AddArg2(destptr, v0) return true } // match: (Zero [6] destptr mem) @@ -16840,12 +15798,10 @@ func rewriteValue386_OpZero(v *Value) bool { mem := v_1 v.reset(Op386MOVWstoreconst) v.AuxInt = makeValAndOff(0, 4) - v.AddArg(destptr) v0 := b.NewValue0(v.Pos, Op386MOVLstoreconst, types.TypeMem) v0.AuxInt = 0 - v0.AddArg(destptr) - v0.AddArg(mem) - v.AddArg(v0) + v0.AddArg2(destptr, mem) + v.AddArg2(destptr, v0) return true } // match: (Zero [7] destptr mem) @@ -16858,12 +15814,10 @@ func rewriteValue386_OpZero(v *Value) bool { mem := v_1 v.reset(Op386MOVLstoreconst) v.AuxInt = makeValAndOff(0, 3) - v.AddArg(destptr) v0 := b.NewValue0(v.Pos, Op386MOVLstoreconst, types.TypeMem) v0.AuxInt = 0 - v0.AddArg(destptr) - v0.AddArg(mem) - v.AddArg(v0) + v0.AddArg2(destptr, mem) + v.AddArg2(destptr, v0) return true } // match: (Zero [s] destptr mem) @@ -16881,12 +15835,10 @@ func rewriteValue386_OpZero(v *Value) bool { v0 := b.NewValue0(v.Pos, Op386ADDLconst, typ.UInt32) v0.AuxInt = s % 4 v0.AddArg(destptr) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, Op386MOVLstoreconst, types.TypeMem) v1.AuxInt = 0 - v1.AddArg(destptr) - v1.AddArg(mem) - v.AddArg(v1) + v1.AddArg2(destptr, mem) + v.AddArg2(v0, v1) return true } // match: (Zero [8] destptr mem) @@ -16899,12 +15851,10 @@ func rewriteValue386_OpZero(v *Value) bool { mem := v_1 v.reset(Op386MOVLstoreconst) v.AuxInt = makeValAndOff(0, 4) - v.AddArg(destptr) v0 := b.NewValue0(v.Pos, Op386MOVLstoreconst, types.TypeMem) v0.AuxInt = 0 - v0.AddArg(destptr) - v0.AddArg(mem) - v.AddArg(v0) + v0.AddArg2(destptr, mem) + v.AddArg2(destptr, v0) return true } // match: (Zero [12] destptr mem) @@ -16917,16 +15867,13 @@ func rewriteValue386_OpZero(v *Value) bool { mem := v_1 v.reset(Op386MOVLstoreconst) v.AuxInt = makeValAndOff(0, 8) - v.AddArg(destptr) v0 := b.NewValue0(v.Pos, Op386MOVLstoreconst, types.TypeMem) v0.AuxInt = makeValAndOff(0, 4) - v0.AddArg(destptr) v1 := b.NewValue0(v.Pos, Op386MOVLstoreconst, types.TypeMem) v1.AuxInt = 0 - v1.AddArg(destptr) - v1.AddArg(mem) - v0.AddArg(v1) - v.AddArg(v0) + v1.AddArg2(destptr, mem) + v0.AddArg2(destptr, v1) + v.AddArg2(destptr, v0) return true } // match: (Zero [16] destptr mem) @@ -16939,20 +15886,16 @@ func rewriteValue386_OpZero(v *Value) bool { mem := v_1 v.reset(Op386MOVLstoreconst) v.AuxInt = makeValAndOff(0, 12) - v.AddArg(destptr) v0 := b.NewValue0(v.Pos, Op386MOVLstoreconst, types.TypeMem) v0.AuxInt = makeValAndOff(0, 8) - v0.AddArg(destptr) v1 := b.NewValue0(v.Pos, Op386MOVLstoreconst, types.TypeMem) v1.AuxInt = makeValAndOff(0, 4) - v1.AddArg(destptr) v2 := b.NewValue0(v.Pos, Op386MOVLstoreconst, types.TypeMem) v2.AuxInt = 0 - v2.AddArg(destptr) - v2.AddArg(mem) - v1.AddArg(v2) - v0.AddArg(v1) - v.AddArg(v0) + v2.AddArg2(destptr, mem) + v1.AddArg2(destptr, v2) + v0.AddArg2(destptr, v1) + v.AddArg2(destptr, v0) return true } // match: (Zero [s] destptr mem) @@ -16967,11 +15910,9 @@ func rewriteValue386_OpZero(v *Value) bool { } v.reset(Op386DUFFZERO) v.AuxInt = 1 * (128 - s/4) - v.AddArg(destptr) v0 := b.NewValue0(v.Pos, Op386MOVLconst, typ.UInt32) v0.AuxInt = 0 - v.AddArg(v0) - v.AddArg(mem) + v.AddArg3(destptr, v0, mem) return true } // match: (Zero [s] destptr mem) @@ -16985,14 +15926,11 @@ func rewriteValue386_OpZero(v *Value) bool { break } v.reset(Op386REPSTOSL) - v.AddArg(destptr) v0 := b.NewValue0(v.Pos, Op386MOVLconst, typ.UInt32) v0.AuxInt = s / 4 - v.AddArg(v0) v1 := b.NewValue0(v.Pos, Op386MOVLconst, typ.UInt32) v1.AuxInt = 0 - v.AddArg(v1) - v.AddArg(mem) + v.AddArg4(destptr, v0, v1, mem) return true } return false @@ -17024,8 +15962,7 @@ func rewriteBlock386(b *Block) bool { for b.Controls[0].Op == Op386InvertFlags { v_0 := b.Controls[0] cmp := v_0.Args[0] - b.Reset(Block386EQ) - b.AddControl(cmp) + b.resetWithControl(Block386EQ, cmp) return true } // match: (EQ (FlagEQ) yes no) @@ -17068,8 +16005,7 @@ func rewriteBlock386(b *Block) bool { for b.Controls[0].Op == Op386InvertFlags { v_0 := b.Controls[0] cmp := v_0.Args[0] - b.Reset(Block386LE) - b.AddControl(cmp) + b.resetWithControl(Block386LE, cmp) return true } // match: (GE (FlagEQ) yes no) @@ -17110,8 +16046,7 @@ func rewriteBlock386(b *Block) bool { for b.Controls[0].Op == Op386InvertFlags { v_0 := b.Controls[0] cmp := v_0.Args[0] - b.Reset(Block386LT) - b.AddControl(cmp) + b.resetWithControl(Block386LT, cmp) return true } // match: (GT (FlagEQ) yes no) @@ -17153,8 +16088,7 @@ func rewriteBlock386(b *Block) bool { for b.Controls[0].Op == Op386SETL { v_0 := b.Controls[0] cmp := v_0.Args[0] - b.Reset(Block386LT) - b.AddControl(cmp) + b.resetWithControl(Block386LT, cmp) return true } // match: (If (SETLE cmp) yes no) @@ -17162,8 +16096,7 @@ func rewriteBlock386(b *Block) bool { for b.Controls[0].Op == Op386SETLE { v_0 := b.Controls[0] cmp := v_0.Args[0] - b.Reset(Block386LE) - b.AddControl(cmp) + b.resetWithControl(Block386LE, cmp) return true } // match: (If (SETG cmp) yes no) @@ -17171,8 +16104,7 @@ func rewriteBlock386(b *Block) bool { for b.Controls[0].Op == Op386SETG { v_0 := b.Controls[0] cmp := v_0.Args[0] - b.Reset(Block386GT) - b.AddControl(cmp) + b.resetWithControl(Block386GT, cmp) return true } // match: (If (SETGE cmp) yes no) @@ -17180,8 +16112,7 @@ func rewriteBlock386(b *Block) bool { for b.Controls[0].Op == Op386SETGE { v_0 := b.Controls[0] cmp := v_0.Args[0] - b.Reset(Block386GE) - b.AddControl(cmp) + b.resetWithControl(Block386GE, cmp) return true } // match: (If (SETEQ cmp) yes no) @@ -17189,8 +16120,7 @@ func rewriteBlock386(b *Block) bool { for b.Controls[0].Op == Op386SETEQ { v_0 := b.Controls[0] cmp := v_0.Args[0] - b.Reset(Block386EQ) - b.AddControl(cmp) + b.resetWithControl(Block386EQ, cmp) return true } // match: (If (SETNE cmp) yes no) @@ -17198,8 +16128,7 @@ func rewriteBlock386(b *Block) bool { for b.Controls[0].Op == Op386SETNE { v_0 := b.Controls[0] cmp := v_0.Args[0] - b.Reset(Block386NE) - b.AddControl(cmp) + b.resetWithControl(Block386NE, cmp) return true } // match: (If (SETB cmp) yes no) @@ -17207,8 +16136,7 @@ func rewriteBlock386(b *Block) bool { for b.Controls[0].Op == Op386SETB { v_0 := b.Controls[0] cmp := v_0.Args[0] - b.Reset(Block386ULT) - b.AddControl(cmp) + b.resetWithControl(Block386ULT, cmp) return true } // match: (If (SETBE cmp) yes no) @@ -17216,8 +16144,7 @@ func rewriteBlock386(b *Block) bool { for b.Controls[0].Op == Op386SETBE { v_0 := b.Controls[0] cmp := v_0.Args[0] - b.Reset(Block386ULE) - b.AddControl(cmp) + b.resetWithControl(Block386ULE, cmp) return true } // match: (If (SETA cmp) yes no) @@ -17225,8 +16152,7 @@ func rewriteBlock386(b *Block) bool { for b.Controls[0].Op == Op386SETA { v_0 := b.Controls[0] cmp := v_0.Args[0] - b.Reset(Block386UGT) - b.AddControl(cmp) + b.resetWithControl(Block386UGT, cmp) return true } // match: (If (SETAE cmp) yes no) @@ -17234,8 +16160,7 @@ func rewriteBlock386(b *Block) bool { for b.Controls[0].Op == Op386SETAE { v_0 := b.Controls[0] cmp := v_0.Args[0] - b.Reset(Block386UGE) - b.AddControl(cmp) + b.resetWithControl(Block386UGE, cmp) return true } // match: (If (SETO cmp) yes no) @@ -17243,8 +16168,7 @@ func rewriteBlock386(b *Block) bool { for b.Controls[0].Op == Op386SETO { v_0 := b.Controls[0] cmp := v_0.Args[0] - b.Reset(Block386OS) - b.AddControl(cmp) + b.resetWithControl(Block386OS, cmp) return true } // match: (If (SETGF cmp) yes no) @@ -17252,8 +16176,7 @@ func rewriteBlock386(b *Block) bool { for b.Controls[0].Op == Op386SETGF { v_0 := b.Controls[0] cmp := v_0.Args[0] - b.Reset(Block386UGT) - b.AddControl(cmp) + b.resetWithControl(Block386UGT, cmp) return true } // match: (If (SETGEF cmp) yes no) @@ -17261,8 +16184,7 @@ func rewriteBlock386(b *Block) bool { for b.Controls[0].Op == Op386SETGEF { v_0 := b.Controls[0] cmp := v_0.Args[0] - b.Reset(Block386UGE) - b.AddControl(cmp) + b.resetWithControl(Block386UGE, cmp) return true } // match: (If (SETEQF cmp) yes no) @@ -17270,8 +16192,7 @@ func rewriteBlock386(b *Block) bool { for b.Controls[0].Op == Op386SETEQF { v_0 := b.Controls[0] cmp := v_0.Args[0] - b.Reset(Block386EQF) - b.AddControl(cmp) + b.resetWithControl(Block386EQF, cmp) return true } // match: (If (SETNEF cmp) yes no) @@ -17279,19 +16200,16 @@ func rewriteBlock386(b *Block) bool { for b.Controls[0].Op == Op386SETNEF { v_0 := b.Controls[0] cmp := v_0.Args[0] - b.Reset(Block386NEF) - b.AddControl(cmp) + b.resetWithControl(Block386NEF, cmp) return true } // match: (If cond yes no) // result: (NE (TESTB cond cond) yes no) for { cond := b.Controls[0] - b.Reset(Block386NE) v0 := b.NewValue0(cond.Pos, Op386TESTB, types.TypeFlags) - v0.AddArg(cond) - v0.AddArg(cond) - b.AddControl(v0) + v0.AddArg2(cond, cond) + b.resetWithControl(Block386NE, v0) return true } case Block386LE: @@ -17300,8 +16218,7 @@ func rewriteBlock386(b *Block) bool { for b.Controls[0].Op == Op386InvertFlags { v_0 := b.Controls[0] cmp := v_0.Args[0] - b.Reset(Block386GE) - b.AddControl(cmp) + b.resetWithControl(Block386GE, cmp) return true } // match: (LE (FlagEQ) yes no) @@ -17342,8 +16259,7 @@ func rewriteBlock386(b *Block) bool { for b.Controls[0].Op == Op386InvertFlags { v_0 := b.Controls[0] cmp := v_0.Args[0] - b.Reset(Block386GT) - b.AddControl(cmp) + b.resetWithControl(Block386GT, cmp) return true } // match: (LT (FlagEQ) yes no) @@ -17394,8 +16310,7 @@ func rewriteBlock386(b *Block) bool { if v_0_1.Op != Op386SETL || cmp != v_0_1.Args[0] { break } - b.Reset(Block386LT) - b.AddControl(cmp) + b.resetWithControl(Block386LT, cmp) return true } // match: (NE (TESTB (SETLE cmp) (SETLE cmp)) yes no) @@ -17412,8 +16327,7 @@ func rewriteBlock386(b *Block) bool { if v_0_1.Op != Op386SETLE || cmp != v_0_1.Args[0] { break } - b.Reset(Block386LE) - b.AddControl(cmp) + b.resetWithControl(Block386LE, cmp) return true } // match: (NE (TESTB (SETG cmp) (SETG cmp)) yes no) @@ -17430,8 +16344,7 @@ func rewriteBlock386(b *Block) bool { if v_0_1.Op != Op386SETG || cmp != v_0_1.Args[0] { break } - b.Reset(Block386GT) - b.AddControl(cmp) + b.resetWithControl(Block386GT, cmp) return true } // match: (NE (TESTB (SETGE cmp) (SETGE cmp)) yes no) @@ -17448,8 +16361,7 @@ func rewriteBlock386(b *Block) bool { if v_0_1.Op != Op386SETGE || cmp != v_0_1.Args[0] { break } - b.Reset(Block386GE) - b.AddControl(cmp) + b.resetWithControl(Block386GE, cmp) return true } // match: (NE (TESTB (SETEQ cmp) (SETEQ cmp)) yes no) @@ -17466,8 +16378,7 @@ func rewriteBlock386(b *Block) bool { if v_0_1.Op != Op386SETEQ || cmp != v_0_1.Args[0] { break } - b.Reset(Block386EQ) - b.AddControl(cmp) + b.resetWithControl(Block386EQ, cmp) return true } // match: (NE (TESTB (SETNE cmp) (SETNE cmp)) yes no) @@ -17484,8 +16395,7 @@ func rewriteBlock386(b *Block) bool { if v_0_1.Op != Op386SETNE || cmp != v_0_1.Args[0] { break } - b.Reset(Block386NE) - b.AddControl(cmp) + b.resetWithControl(Block386NE, cmp) return true } // match: (NE (TESTB (SETB cmp) (SETB cmp)) yes no) @@ -17502,8 +16412,7 @@ func rewriteBlock386(b *Block) bool { if v_0_1.Op != Op386SETB || cmp != v_0_1.Args[0] { break } - b.Reset(Block386ULT) - b.AddControl(cmp) + b.resetWithControl(Block386ULT, cmp) return true } // match: (NE (TESTB (SETBE cmp) (SETBE cmp)) yes no) @@ -17520,8 +16429,7 @@ func rewriteBlock386(b *Block) bool { if v_0_1.Op != Op386SETBE || cmp != v_0_1.Args[0] { break } - b.Reset(Block386ULE) - b.AddControl(cmp) + b.resetWithControl(Block386ULE, cmp) return true } // match: (NE (TESTB (SETA cmp) (SETA cmp)) yes no) @@ -17538,8 +16446,7 @@ func rewriteBlock386(b *Block) bool { if v_0_1.Op != Op386SETA || cmp != v_0_1.Args[0] { break } - b.Reset(Block386UGT) - b.AddControl(cmp) + b.resetWithControl(Block386UGT, cmp) return true } // match: (NE (TESTB (SETAE cmp) (SETAE cmp)) yes no) @@ -17556,8 +16463,7 @@ func rewriteBlock386(b *Block) bool { if v_0_1.Op != Op386SETAE || cmp != v_0_1.Args[0] { break } - b.Reset(Block386UGE) - b.AddControl(cmp) + b.resetWithControl(Block386UGE, cmp) return true } // match: (NE (TESTB (SETO cmp) (SETO cmp)) yes no) @@ -17574,8 +16480,7 @@ func rewriteBlock386(b *Block) bool { if v_0_1.Op != Op386SETO || cmp != v_0_1.Args[0] { break } - b.Reset(Block386OS) - b.AddControl(cmp) + b.resetWithControl(Block386OS, cmp) return true } // match: (NE (TESTB (SETGF cmp) (SETGF cmp)) yes no) @@ -17592,8 +16497,7 @@ func rewriteBlock386(b *Block) bool { if v_0_1.Op != Op386SETGF || cmp != v_0_1.Args[0] { break } - b.Reset(Block386UGT) - b.AddControl(cmp) + b.resetWithControl(Block386UGT, cmp) return true } // match: (NE (TESTB (SETGEF cmp) (SETGEF cmp)) yes no) @@ -17610,8 +16514,7 @@ func rewriteBlock386(b *Block) bool { if v_0_1.Op != Op386SETGEF || cmp != v_0_1.Args[0] { break } - b.Reset(Block386UGE) - b.AddControl(cmp) + b.resetWithControl(Block386UGE, cmp) return true } // match: (NE (TESTB (SETEQF cmp) (SETEQF cmp)) yes no) @@ -17628,8 +16531,7 @@ func rewriteBlock386(b *Block) bool { if v_0_1.Op != Op386SETEQF || cmp != v_0_1.Args[0] { break } - b.Reset(Block386EQF) - b.AddControl(cmp) + b.resetWithControl(Block386EQF, cmp) return true } // match: (NE (TESTB (SETNEF cmp) (SETNEF cmp)) yes no) @@ -17646,8 +16548,7 @@ func rewriteBlock386(b *Block) bool { if v_0_1.Op != Op386SETNEF || cmp != v_0_1.Args[0] { break } - b.Reset(Block386NEF) - b.AddControl(cmp) + b.resetWithControl(Block386NEF, cmp) return true } // match: (NE (InvertFlags cmp) yes no) @@ -17655,8 +16556,7 @@ func rewriteBlock386(b *Block) bool { for b.Controls[0].Op == Op386InvertFlags { v_0 := b.Controls[0] cmp := v_0.Args[0] - b.Reset(Block386NE) - b.AddControl(cmp) + b.resetWithControl(Block386NE, cmp) return true } // match: (NE (FlagEQ) yes no) @@ -17696,8 +16596,7 @@ func rewriteBlock386(b *Block) bool { for b.Controls[0].Op == Op386InvertFlags { v_0 := b.Controls[0] cmp := v_0.Args[0] - b.Reset(Block386ULE) - b.AddControl(cmp) + b.resetWithControl(Block386ULE, cmp) return true } // match: (UGE (FlagEQ) yes no) @@ -17738,8 +16637,7 @@ func rewriteBlock386(b *Block) bool { for b.Controls[0].Op == Op386InvertFlags { v_0 := b.Controls[0] cmp := v_0.Args[0] - b.Reset(Block386ULT) - b.AddControl(cmp) + b.resetWithControl(Block386ULT, cmp) return true } // match: (UGT (FlagEQ) yes no) @@ -17781,8 +16679,7 @@ func rewriteBlock386(b *Block) bool { for b.Controls[0].Op == Op386InvertFlags { v_0 := b.Controls[0] cmp := v_0.Args[0] - b.Reset(Block386UGE) - b.AddControl(cmp) + b.resetWithControl(Block386UGE, cmp) return true } // match: (ULE (FlagEQ) yes no) @@ -17823,8 +16720,7 @@ func rewriteBlock386(b *Block) bool { for b.Controls[0].Op == Op386InvertFlags { v_0 := b.Controls[0] cmp := v_0.Args[0] - b.Reset(Block386UGT) - b.AddControl(cmp) + b.resetWithControl(Block386UGT, cmp) return true } // match: (ULT (FlagEQ) yes no) diff --git a/src/cmd/compile/internal/ssa/rewrite386splitload.go b/src/cmd/compile/internal/ssa/rewrite386splitload.go index cce1b2d05a..f82eae99ab 100644 --- a/src/cmd/compile/internal/ssa/rewrite386splitload.go +++ b/src/cmd/compile/internal/ssa/rewrite386splitload.go @@ -37,8 +37,7 @@ func rewriteValue386splitload_Op386CMPBconstload(v *Value) bool { v0 := b.NewValue0(v.Pos, Op386MOVBload, typ.UInt8) v0.AuxInt = offOnly(vo) v0.Aux = sym - v0.AddArg(ptr) - v0.AddArg(mem) + v0.AddArg2(ptr, mem) v.AddArg(v0) return true } @@ -61,10 +60,8 @@ func rewriteValue386splitload_Op386CMPBload(v *Value) bool { v0 := b.NewValue0(v.Pos, Op386MOVBload, typ.UInt8) v0.AuxInt = off v0.Aux = sym - v0.AddArg(ptr) - v0.AddArg(mem) - v.AddArg(v0) - v.AddArg(x) + v0.AddArg2(ptr, mem) + v.AddArg2(v0, x) return true } } @@ -85,8 +82,7 @@ func rewriteValue386splitload_Op386CMPLconstload(v *Value) bool { v0 := b.NewValue0(v.Pos, Op386MOVLload, typ.UInt32) v0.AuxInt = offOnly(vo) v0.Aux = sym - v0.AddArg(ptr) - v0.AddArg(mem) + v0.AddArg2(ptr, mem) v.AddArg(v0) return true } @@ -109,10 +105,8 @@ func rewriteValue386splitload_Op386CMPLload(v *Value) bool { v0 := b.NewValue0(v.Pos, Op386MOVLload, typ.UInt32) v0.AuxInt = off v0.Aux = sym - v0.AddArg(ptr) - v0.AddArg(mem) - v.AddArg(v0) - v.AddArg(x) + v0.AddArg2(ptr, mem) + v.AddArg2(v0, x) return true } } @@ -133,8 +127,7 @@ func rewriteValue386splitload_Op386CMPWconstload(v *Value) bool { v0 := b.NewValue0(v.Pos, Op386MOVWload, typ.UInt16) v0.AuxInt = offOnly(vo) v0.Aux = sym - v0.AddArg(ptr) - v0.AddArg(mem) + v0.AddArg2(ptr, mem) v.AddArg(v0) return true } @@ -157,10 +150,8 @@ func rewriteValue386splitload_Op386CMPWload(v *Value) bool { v0 := b.NewValue0(v.Pos, Op386MOVWload, typ.UInt16) v0.AuxInt = off v0.Aux = sym - v0.AddArg(ptr) - v0.AddArg(mem) - v.AddArg(v0) - v.AddArg(x) + v0.AddArg2(ptr, mem) + v.AddArg2(v0, x) return true } } diff --git a/src/cmd/compile/internal/ssa/rewriteAMD64.go b/src/cmd/compile/internal/ssa/rewriteAMD64.go index 665b20c42d..c37bae2c22 100644 --- a/src/cmd/compile/internal/ssa/rewriteAMD64.go +++ b/src/cmd/compile/internal/ssa/rewriteAMD64.go @@ -780,6 +780,9 @@ func rewriteValueAMD64(v *Value) bool { case OpCvt64to64F: v.Op = OpAMD64CVTSQ2SD return true + case OpCvtBoolToUint8: + v.Op = OpCopy + return true case OpDiv128u: v.Op = OpAMD64DIVQU2 return true @@ -1262,8 +1265,7 @@ func rewriteValueAMD64_OpAMD64ADCQ(v *Value) bool { } v.reset(OpAMD64ADCQconst) v.AuxInt = c - v.AddArg(x) - v.AddArg(carry) + v.AddArg2(x, carry) return true } break @@ -1277,8 +1279,7 @@ func rewriteValueAMD64_OpAMD64ADCQ(v *Value) bool { break } v.reset(OpAMD64ADDQcarry) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } return false @@ -1404,8 +1405,7 @@ func rewriteValueAMD64_OpAMD64ADDL(v *Value) bool { } y := v_1.Args[0] v.reset(OpAMD64LEAL8) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } break @@ -1420,8 +1420,7 @@ func rewriteValueAMD64_OpAMD64ADDL(v *Value) bool { } y := v_1.Args[0] v.reset(OpAMD64LEAL4) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } break @@ -1436,8 +1435,7 @@ func rewriteValueAMD64_OpAMD64ADDL(v *Value) bool { } y := v_1.Args[0] v.reset(OpAMD64LEAL2) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } break @@ -1455,8 +1453,7 @@ func rewriteValueAMD64_OpAMD64ADDL(v *Value) bool { continue } v.reset(OpAMD64LEAL2) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } break @@ -1478,8 +1475,7 @@ func rewriteValueAMD64_OpAMD64ADDL(v *Value) bool { } y := v_1_1 v.reset(OpAMD64LEAL2) - v.AddArg(y) - v.AddArg(x) + v.AddArg2(y, x) return true } } @@ -1497,8 +1493,7 @@ func rewriteValueAMD64_OpAMD64ADDL(v *Value) bool { y := v_1 v.reset(OpAMD64LEAL1) v.AuxInt = c - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } break @@ -1521,8 +1516,7 @@ func rewriteValueAMD64_OpAMD64ADDL(v *Value) bool { v.reset(OpAMD64LEAL1) v.AuxInt = c v.Aux = s - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } break @@ -1537,8 +1531,7 @@ func rewriteValueAMD64_OpAMD64ADDL(v *Value) bool { } y := v_1.Args[0] v.reset(OpAMD64SUBL) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } break @@ -1563,9 +1556,7 @@ func rewriteValueAMD64_OpAMD64ADDL(v *Value) bool { v.reset(OpAMD64ADDLload) v.AuxInt = off v.Aux = sym - v.AddArg(x) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg3(x, ptr, mem) return true } break @@ -1585,8 +1576,7 @@ func rewriteValueAMD64_OpAMD64ADDLconst(v *Value) bool { x := v_0.Args[0] v.reset(OpAMD64LEAL1) v.AuxInt = c - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (ADDLconst [c] (SHLLconst [1] x)) @@ -1599,8 +1589,7 @@ func rewriteValueAMD64_OpAMD64ADDLconst(v *Value) bool { x := v_0.Args[0] v.reset(OpAMD64LEAL1) v.AuxInt = c - v.AddArg(x) - v.AddArg(x) + v.AddArg2(x, x) return true } // match: (ADDLconst [c] (LEAL [d] {s} x)) @@ -1641,8 +1630,7 @@ func rewriteValueAMD64_OpAMD64ADDLconst(v *Value) bool { v.reset(OpAMD64LEAL1) v.AuxInt = c + d v.Aux = s - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (ADDLconst [c] (LEAL2 [d] {s} x y)) @@ -1663,8 +1651,7 @@ func rewriteValueAMD64_OpAMD64ADDLconst(v *Value) bool { v.reset(OpAMD64LEAL2) v.AuxInt = c + d v.Aux = s - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (ADDLconst [c] (LEAL4 [d] {s} x y)) @@ -1685,8 +1672,7 @@ func rewriteValueAMD64_OpAMD64ADDLconst(v *Value) bool { v.reset(OpAMD64LEAL4) v.AuxInt = c + d v.Aux = s - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (ADDLconst [c] (LEAL8 [d] {s} x y)) @@ -1707,8 +1693,7 @@ func rewriteValueAMD64_OpAMD64ADDLconst(v *Value) bool { v.reset(OpAMD64LEAL8) v.AuxInt = c + d v.Aux = s - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (ADDLconst [c] x) @@ -1720,9 +1705,7 @@ func rewriteValueAMD64_OpAMD64ADDLconst(v *Value) bool { if !(int32(c) == 0) { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (ADDLconst [c] (MOVLconst [d])) @@ -1787,8 +1770,7 @@ func rewriteValueAMD64_OpAMD64ADDLconstmodify(v *Value) bool { v.reset(OpAMD64ADDLconstmodify) v.AuxInt = ValAndOff(valoff1).add(off2) v.Aux = sym - v.AddArg(base) - v.AddArg(mem) + v.AddArg2(base, mem) return true } // match: (ADDLconstmodify [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem) @@ -1810,8 +1792,7 @@ func rewriteValueAMD64_OpAMD64ADDLconstmodify(v *Value) bool { v.reset(OpAMD64ADDLconstmodify) v.AuxInt = ValAndOff(valoff1).add(off2) v.Aux = mergeSym(sym1, sym2) - v.AddArg(base) - v.AddArg(mem) + v.AddArg2(base, mem) return true } return false @@ -1841,9 +1822,7 @@ func rewriteValueAMD64_OpAMD64ADDLload(v *Value) bool { v.reset(OpAMD64ADDLload) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(val) - v.AddArg(base) - v.AddArg(mem) + v.AddArg3(val, base, mem) return true } // match: (ADDLload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem) @@ -1866,9 +1845,7 @@ func rewriteValueAMD64_OpAMD64ADDLload(v *Value) bool { v.reset(OpAMD64ADDLload) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(val) - v.AddArg(base) - v.AddArg(mem) + v.AddArg3(val, base, mem) return true } // match: (ADDLload x [off] {sym} ptr (MOVSSstore [off] {sym} ptr y _)) @@ -1881,16 +1858,14 @@ func rewriteValueAMD64_OpAMD64ADDLload(v *Value) bool { if v_2.Op != OpAMD64MOVSSstore || v_2.AuxInt != off || v_2.Aux != sym { break } - _ = v_2.Args[2] + y := v_2.Args[1] if ptr != v_2.Args[0] { break } - y := v_2.Args[1] v.reset(OpAMD64ADDL) - v.AddArg(x) v0 := b.NewValue0(v_2.Pos, OpAMD64MOVLf2i, typ.UInt32) v0.AddArg(y) - v.AddArg(v0) + v.AddArg2(x, v0) return true } return false @@ -1918,9 +1893,7 @@ func rewriteValueAMD64_OpAMD64ADDLmodify(v *Value) bool { v.reset(OpAMD64ADDLmodify) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(base) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(base, val, mem) return true } // match: (ADDLmodify [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) @@ -1943,9 +1916,7 @@ func rewriteValueAMD64_OpAMD64ADDLmodify(v *Value) bool { v.reset(OpAMD64ADDLmodify) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(base) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(base, val, mem) return true } return false @@ -2027,8 +1998,7 @@ func rewriteValueAMD64_OpAMD64ADDQ(v *Value) bool { } y := v_1.Args[0] v.reset(OpAMD64LEAQ8) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } break @@ -2043,8 +2013,7 @@ func rewriteValueAMD64_OpAMD64ADDQ(v *Value) bool { } y := v_1.Args[0] v.reset(OpAMD64LEAQ4) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } break @@ -2059,8 +2028,7 @@ func rewriteValueAMD64_OpAMD64ADDQ(v *Value) bool { } y := v_1.Args[0] v.reset(OpAMD64LEAQ2) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } break @@ -2078,8 +2046,7 @@ func rewriteValueAMD64_OpAMD64ADDQ(v *Value) bool { continue } v.reset(OpAMD64LEAQ2) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } break @@ -2101,8 +2068,7 @@ func rewriteValueAMD64_OpAMD64ADDQ(v *Value) bool { } y := v_1_1 v.reset(OpAMD64LEAQ2) - v.AddArg(y) - v.AddArg(x) + v.AddArg2(y, x) return true } } @@ -2120,8 +2086,7 @@ func rewriteValueAMD64_OpAMD64ADDQ(v *Value) bool { y := v_1 v.reset(OpAMD64LEAQ1) v.AuxInt = c - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } break @@ -2144,8 +2109,7 @@ func rewriteValueAMD64_OpAMD64ADDQ(v *Value) bool { v.reset(OpAMD64LEAQ1) v.AuxInt = c v.Aux = s - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } break @@ -2160,8 +2124,7 @@ func rewriteValueAMD64_OpAMD64ADDQ(v *Value) bool { } y := v_1.Args[0] v.reset(OpAMD64SUBQ) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } break @@ -2186,9 +2149,7 @@ func rewriteValueAMD64_OpAMD64ADDQ(v *Value) bool { v.reset(OpAMD64ADDQload) v.AuxInt = off v.Aux = sym - v.AddArg(x) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg3(x, ptr, mem) return true } break @@ -2233,8 +2194,7 @@ func rewriteValueAMD64_OpAMD64ADDQconst(v *Value) bool { x := v_0.Args[0] v.reset(OpAMD64LEAQ1) v.AuxInt = c - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (ADDQconst [c] (SHLQconst [1] x)) @@ -2247,8 +2207,7 @@ func rewriteValueAMD64_OpAMD64ADDQconst(v *Value) bool { x := v_0.Args[0] v.reset(OpAMD64LEAQ1) v.AuxInt = c - v.AddArg(x) - v.AddArg(x) + v.AddArg2(x, x) return true } // match: (ADDQconst [c] (LEAQ [d] {s} x)) @@ -2289,8 +2248,7 @@ func rewriteValueAMD64_OpAMD64ADDQconst(v *Value) bool { v.reset(OpAMD64LEAQ1) v.AuxInt = c + d v.Aux = s - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (ADDQconst [c] (LEAQ2 [d] {s} x y)) @@ -2311,8 +2269,7 @@ func rewriteValueAMD64_OpAMD64ADDQconst(v *Value) bool { v.reset(OpAMD64LEAQ2) v.AuxInt = c + d v.Aux = s - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (ADDQconst [c] (LEAQ4 [d] {s} x y)) @@ -2333,8 +2290,7 @@ func rewriteValueAMD64_OpAMD64ADDQconst(v *Value) bool { v.reset(OpAMD64LEAQ4) v.AuxInt = c + d v.Aux = s - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (ADDQconst [c] (LEAQ8 [d] {s} x y)) @@ -2355,8 +2311,7 @@ func rewriteValueAMD64_OpAMD64ADDQconst(v *Value) bool { v.reset(OpAMD64LEAQ8) v.AuxInt = c + d v.Aux = s - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (ADDQconst [0] x) @@ -2366,9 +2321,7 @@ func rewriteValueAMD64_OpAMD64ADDQconst(v *Value) bool { break } x := v_0 - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (ADDQconst [c] (MOVQconst [d])) @@ -2437,8 +2390,7 @@ func rewriteValueAMD64_OpAMD64ADDQconstmodify(v *Value) bool { v.reset(OpAMD64ADDQconstmodify) v.AuxInt = ValAndOff(valoff1).add(off2) v.Aux = sym - v.AddArg(base) - v.AddArg(mem) + v.AddArg2(base, mem) return true } // match: (ADDQconstmodify [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem) @@ -2460,8 +2412,7 @@ func rewriteValueAMD64_OpAMD64ADDQconstmodify(v *Value) bool { v.reset(OpAMD64ADDQconstmodify) v.AuxInt = ValAndOff(valoff1).add(off2) v.Aux = mergeSym(sym1, sym2) - v.AddArg(base) - v.AddArg(mem) + v.AddArg2(base, mem) return true } return false @@ -2491,9 +2442,7 @@ func rewriteValueAMD64_OpAMD64ADDQload(v *Value) bool { v.reset(OpAMD64ADDQload) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(val) - v.AddArg(base) - v.AddArg(mem) + v.AddArg3(val, base, mem) return true } // match: (ADDQload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem) @@ -2516,9 +2465,7 @@ func rewriteValueAMD64_OpAMD64ADDQload(v *Value) bool { v.reset(OpAMD64ADDQload) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(val) - v.AddArg(base) - v.AddArg(mem) + v.AddArg3(val, base, mem) return true } // match: (ADDQload x [off] {sym} ptr (MOVSDstore [off] {sym} ptr y _)) @@ -2531,16 +2478,14 @@ func rewriteValueAMD64_OpAMD64ADDQload(v *Value) bool { if v_2.Op != OpAMD64MOVSDstore || v_2.AuxInt != off || v_2.Aux != sym { break } - _ = v_2.Args[2] + y := v_2.Args[1] if ptr != v_2.Args[0] { break } - y := v_2.Args[1] v.reset(OpAMD64ADDQ) - v.AddArg(x) v0 := b.NewValue0(v_2.Pos, OpAMD64MOVQf2i, typ.UInt64) v0.AddArg(y) - v.AddArg(v0) + v.AddArg2(x, v0) return true } return false @@ -2568,9 +2513,7 @@ func rewriteValueAMD64_OpAMD64ADDQmodify(v *Value) bool { v.reset(OpAMD64ADDQmodify) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(base) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(base, val, mem) return true } // match: (ADDQmodify [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) @@ -2593,9 +2536,7 @@ func rewriteValueAMD64_OpAMD64ADDQmodify(v *Value) bool { v.reset(OpAMD64ADDQmodify) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(base) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(base, val, mem) return true } return false @@ -2623,9 +2564,7 @@ func rewriteValueAMD64_OpAMD64ADDSD(v *Value) bool { v.reset(OpAMD64ADDSDload) v.AuxInt = off v.Aux = sym - v.AddArg(x) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg3(x, ptr, mem) return true } break @@ -2657,9 +2596,7 @@ func rewriteValueAMD64_OpAMD64ADDSDload(v *Value) bool { v.reset(OpAMD64ADDSDload) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(val) - v.AddArg(base) - v.AddArg(mem) + v.AddArg3(val, base, mem) return true } // match: (ADDSDload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem) @@ -2682,9 +2619,7 @@ func rewriteValueAMD64_OpAMD64ADDSDload(v *Value) bool { v.reset(OpAMD64ADDSDload) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(val) - v.AddArg(base) - v.AddArg(mem) + v.AddArg3(val, base, mem) return true } // match: (ADDSDload x [off] {sym} ptr (MOVQstore [off] {sym} ptr y _)) @@ -2697,16 +2632,14 @@ func rewriteValueAMD64_OpAMD64ADDSDload(v *Value) bool { if v_2.Op != OpAMD64MOVQstore || v_2.AuxInt != off || v_2.Aux != sym { break } - _ = v_2.Args[2] + y := v_2.Args[1] if ptr != v_2.Args[0] { break } - y := v_2.Args[1] v.reset(OpAMD64ADDSD) - v.AddArg(x) v0 := b.NewValue0(v_2.Pos, OpAMD64MOVQi2f, typ.Float64) v0.AddArg(y) - v.AddArg(v0) + v.AddArg2(x, v0) return true } return false @@ -2734,9 +2667,7 @@ func rewriteValueAMD64_OpAMD64ADDSS(v *Value) bool { v.reset(OpAMD64ADDSSload) v.AuxInt = off v.Aux = sym - v.AddArg(x) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg3(x, ptr, mem) return true } break @@ -2768,9 +2699,7 @@ func rewriteValueAMD64_OpAMD64ADDSSload(v *Value) bool { v.reset(OpAMD64ADDSSload) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(val) - v.AddArg(base) - v.AddArg(mem) + v.AddArg3(val, base, mem) return true } // match: (ADDSSload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem) @@ -2793,9 +2722,7 @@ func rewriteValueAMD64_OpAMD64ADDSSload(v *Value) bool { v.reset(OpAMD64ADDSSload) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(val) - v.AddArg(base) - v.AddArg(mem) + v.AddArg3(val, base, mem) return true } // match: (ADDSSload x [off] {sym} ptr (MOVLstore [off] {sym} ptr y _)) @@ -2808,16 +2735,14 @@ func rewriteValueAMD64_OpAMD64ADDSSload(v *Value) bool { if v_2.Op != OpAMD64MOVLstore || v_2.AuxInt != off || v_2.Aux != sym { break } - _ = v_2.Args[2] + y := v_2.Args[1] if ptr != v_2.Args[0] { break } - y := v_2.Args[1] v.reset(OpAMD64ADDSS) - v.AddArg(x) v0 := b.NewValue0(v_2.Pos, OpAMD64MOVLi2f, typ.Float32) v0.AddArg(y) - v.AddArg(v0) + v.AddArg2(x, v0) return true } return false @@ -2843,8 +2768,7 @@ func rewriteValueAMD64_OpAMD64ANDL(v *Value) bool { } x := v_1 v.reset(OpAMD64BTRL) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } break @@ -2892,9 +2816,7 @@ func rewriteValueAMD64_OpAMD64ANDL(v *Value) bool { if x != v_1 { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (ANDL x l:(MOVLload [off] {sym} ptr mem)) @@ -2917,9 +2839,7 @@ func rewriteValueAMD64_OpAMD64ANDL(v *Value) bool { v.reset(OpAMD64ANDLload) v.AuxInt = off v.Aux = sym - v.AddArg(x) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg3(x, ptr, mem) return true } break @@ -3013,9 +2933,7 @@ func rewriteValueAMD64_OpAMD64ANDLconst(v *Value) bool { if !(int32(c) == -1) { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (ANDLconst [c] (MOVLconst [d])) @@ -3053,8 +2971,7 @@ func rewriteValueAMD64_OpAMD64ANDLconstmodify(v *Value) bool { v.reset(OpAMD64ANDLconstmodify) v.AuxInt = ValAndOff(valoff1).add(off2) v.Aux = sym - v.AddArg(base) - v.AddArg(mem) + v.AddArg2(base, mem) return true } // match: (ANDLconstmodify [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem) @@ -3076,8 +2993,7 @@ func rewriteValueAMD64_OpAMD64ANDLconstmodify(v *Value) bool { v.reset(OpAMD64ANDLconstmodify) v.AuxInt = ValAndOff(valoff1).add(off2) v.Aux = mergeSym(sym1, sym2) - v.AddArg(base) - v.AddArg(mem) + v.AddArg2(base, mem) return true } return false @@ -3107,9 +3023,7 @@ func rewriteValueAMD64_OpAMD64ANDLload(v *Value) bool { v.reset(OpAMD64ANDLload) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(val) - v.AddArg(base) - v.AddArg(mem) + v.AddArg3(val, base, mem) return true } // match: (ANDLload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem) @@ -3132,9 +3046,7 @@ func rewriteValueAMD64_OpAMD64ANDLload(v *Value) bool { v.reset(OpAMD64ANDLload) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(val) - v.AddArg(base) - v.AddArg(mem) + v.AddArg3(val, base, mem) return true } // match: (ANDLload x [off] {sym} ptr (MOVSSstore [off] {sym} ptr y _)) @@ -3147,16 +3059,14 @@ func rewriteValueAMD64_OpAMD64ANDLload(v *Value) bool { if v_2.Op != OpAMD64MOVSSstore || v_2.AuxInt != off || v_2.Aux != sym { break } - _ = v_2.Args[2] + y := v_2.Args[1] if ptr != v_2.Args[0] { break } - y := v_2.Args[1] v.reset(OpAMD64ANDL) - v.AddArg(x) v0 := b.NewValue0(v_2.Pos, OpAMD64MOVLf2i, typ.UInt32) v0.AddArg(y) - v.AddArg(v0) + v.AddArg2(x, v0) return true } return false @@ -3184,9 +3094,7 @@ func rewriteValueAMD64_OpAMD64ANDLmodify(v *Value) bool { v.reset(OpAMD64ANDLmodify) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(base) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(base, val, mem) return true } // match: (ANDLmodify [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) @@ -3209,9 +3117,7 @@ func rewriteValueAMD64_OpAMD64ANDLmodify(v *Value) bool { v.reset(OpAMD64ANDLmodify) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(base) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(base, val, mem) return true } return false @@ -3237,8 +3143,7 @@ func rewriteValueAMD64_OpAMD64ANDQ(v *Value) bool { } x := v_1 v.reset(OpAMD64BTRQ) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } break @@ -3290,9 +3195,7 @@ func rewriteValueAMD64_OpAMD64ANDQ(v *Value) bool { if x != v_1 { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (ANDQ x l:(MOVQload [off] {sym} ptr mem)) @@ -3315,9 +3218,7 @@ func rewriteValueAMD64_OpAMD64ANDQ(v *Value) bool { v.reset(OpAMD64ANDQload) v.AuxInt = off v.Aux = sym - v.AddArg(x) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg3(x, ptr, mem) return true } break @@ -3418,9 +3319,7 @@ func rewriteValueAMD64_OpAMD64ANDQconst(v *Value) bool { break } x := v_0 - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (ANDQconst [c] (MOVQconst [d])) @@ -3458,8 +3357,7 @@ func rewriteValueAMD64_OpAMD64ANDQconstmodify(v *Value) bool { v.reset(OpAMD64ANDQconstmodify) v.AuxInt = ValAndOff(valoff1).add(off2) v.Aux = sym - v.AddArg(base) - v.AddArg(mem) + v.AddArg2(base, mem) return true } // match: (ANDQconstmodify [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem) @@ -3481,8 +3379,7 @@ func rewriteValueAMD64_OpAMD64ANDQconstmodify(v *Value) bool { v.reset(OpAMD64ANDQconstmodify) v.AuxInt = ValAndOff(valoff1).add(off2) v.Aux = mergeSym(sym1, sym2) - v.AddArg(base) - v.AddArg(mem) + v.AddArg2(base, mem) return true } return false @@ -3512,9 +3409,7 @@ func rewriteValueAMD64_OpAMD64ANDQload(v *Value) bool { v.reset(OpAMD64ANDQload) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(val) - v.AddArg(base) - v.AddArg(mem) + v.AddArg3(val, base, mem) return true } // match: (ANDQload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem) @@ -3537,9 +3432,7 @@ func rewriteValueAMD64_OpAMD64ANDQload(v *Value) bool { v.reset(OpAMD64ANDQload) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(val) - v.AddArg(base) - v.AddArg(mem) + v.AddArg3(val, base, mem) return true } // match: (ANDQload x [off] {sym} ptr (MOVSDstore [off] {sym} ptr y _)) @@ -3552,16 +3445,14 @@ func rewriteValueAMD64_OpAMD64ANDQload(v *Value) bool { if v_2.Op != OpAMD64MOVSDstore || v_2.AuxInt != off || v_2.Aux != sym { break } - _ = v_2.Args[2] + y := v_2.Args[1] if ptr != v_2.Args[0] { break } - y := v_2.Args[1] v.reset(OpAMD64ANDQ) - v.AddArg(x) v0 := b.NewValue0(v_2.Pos, OpAMD64MOVQf2i, typ.UInt64) v0.AddArg(y) - v.AddArg(v0) + v.AddArg2(x, v0) return true } return false @@ -3589,9 +3480,7 @@ func rewriteValueAMD64_OpAMD64ANDQmodify(v *Value) bool { v.reset(OpAMD64ANDQmodify) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(base) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(base, val, mem) return true } // match: (ANDQmodify [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) @@ -3614,9 +3503,7 @@ func rewriteValueAMD64_OpAMD64ANDQmodify(v *Value) bool { v.reset(OpAMD64ANDQmodify) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(base) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(base, val, mem) return true } return false @@ -3735,8 +3622,7 @@ func rewriteValueAMD64_OpAMD64BTCLconstmodify(v *Value) bool { v.reset(OpAMD64BTCLconstmodify) v.AuxInt = ValAndOff(valoff1).add(off2) v.Aux = sym - v.AddArg(base) - v.AddArg(mem) + v.AddArg2(base, mem) return true } // match: (BTCLconstmodify [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem) @@ -3758,8 +3644,7 @@ func rewriteValueAMD64_OpAMD64BTCLconstmodify(v *Value) bool { v.reset(OpAMD64BTCLconstmodify) v.AuxInt = ValAndOff(valoff1).add(off2) v.Aux = mergeSym(sym1, sym2) - v.AddArg(base) - v.AddArg(mem) + v.AddArg2(base, mem) return true } return false @@ -3787,9 +3672,7 @@ func rewriteValueAMD64_OpAMD64BTCLmodify(v *Value) bool { v.reset(OpAMD64BTCLmodify) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(base) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(base, val, mem) return true } // match: (BTCLmodify [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) @@ -3812,9 +3695,7 @@ func rewriteValueAMD64_OpAMD64BTCLmodify(v *Value) bool { v.reset(OpAMD64BTCLmodify) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(base) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(base, val, mem) return true } return false @@ -3884,8 +3765,7 @@ func rewriteValueAMD64_OpAMD64BTCQconstmodify(v *Value) bool { v.reset(OpAMD64BTCQconstmodify) v.AuxInt = ValAndOff(valoff1).add(off2) v.Aux = sym - v.AddArg(base) - v.AddArg(mem) + v.AddArg2(base, mem) return true } // match: (BTCQconstmodify [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem) @@ -3907,8 +3787,7 @@ func rewriteValueAMD64_OpAMD64BTCQconstmodify(v *Value) bool { v.reset(OpAMD64BTCQconstmodify) v.AuxInt = ValAndOff(valoff1).add(off2) v.Aux = mergeSym(sym1, sym2) - v.AddArg(base) - v.AddArg(mem) + v.AddArg2(base, mem) return true } return false @@ -3936,9 +3815,7 @@ func rewriteValueAMD64_OpAMD64BTCQmodify(v *Value) bool { v.reset(OpAMD64BTCQmodify) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(base) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(base, val, mem) return true } // match: (BTCQmodify [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) @@ -3961,9 +3838,7 @@ func rewriteValueAMD64_OpAMD64BTCQmodify(v *Value) bool { v.reset(OpAMD64BTCQmodify) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(base) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(base, val, mem) return true } return false @@ -4019,8 +3894,7 @@ func rewriteValueAMD64_OpAMD64BTLconst(v *Value) bool { y := s.Args[1] x := s.Args[0] v.reset(OpAMD64BTQ) - v.AddArg(y) - v.AddArg(x) + v.AddArg2(y, x) return true } // match: (BTLconst [c] (SHRLconst [d] x)) @@ -4072,8 +3946,7 @@ func rewriteValueAMD64_OpAMD64BTLconst(v *Value) bool { y := s.Args[1] x := s.Args[0] v.reset(OpAMD64BTL) - v.AddArg(y) - v.AddArg(x) + v.AddArg2(y, x) return true } return false @@ -4129,8 +4002,7 @@ func rewriteValueAMD64_OpAMD64BTQconst(v *Value) bool { y := s.Args[1] x := s.Args[0] v.reset(OpAMD64BTQ) - v.AddArg(y) - v.AddArg(x) + v.AddArg2(y, x) return true } return false @@ -4226,8 +4098,7 @@ func rewriteValueAMD64_OpAMD64BTRLconstmodify(v *Value) bool { v.reset(OpAMD64BTRLconstmodify) v.AuxInt = ValAndOff(valoff1).add(off2) v.Aux = sym - v.AddArg(base) - v.AddArg(mem) + v.AddArg2(base, mem) return true } // match: (BTRLconstmodify [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem) @@ -4249,8 +4120,7 @@ func rewriteValueAMD64_OpAMD64BTRLconstmodify(v *Value) bool { v.reset(OpAMD64BTRLconstmodify) v.AuxInt = ValAndOff(valoff1).add(off2) v.Aux = mergeSym(sym1, sym2) - v.AddArg(base) - v.AddArg(mem) + v.AddArg2(base, mem) return true } return false @@ -4278,9 +4148,7 @@ func rewriteValueAMD64_OpAMD64BTRLmodify(v *Value) bool { v.reset(OpAMD64BTRLmodify) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(base) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(base, val, mem) return true } // match: (BTRLmodify [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) @@ -4303,9 +4171,7 @@ func rewriteValueAMD64_OpAMD64BTRLmodify(v *Value) bool { v.reset(OpAMD64BTRLmodify) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(base) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(base, val, mem) return true } return false @@ -4401,8 +4267,7 @@ func rewriteValueAMD64_OpAMD64BTRQconstmodify(v *Value) bool { v.reset(OpAMD64BTRQconstmodify) v.AuxInt = ValAndOff(valoff1).add(off2) v.Aux = sym - v.AddArg(base) - v.AddArg(mem) + v.AddArg2(base, mem) return true } // match: (BTRQconstmodify [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem) @@ -4424,8 +4289,7 @@ func rewriteValueAMD64_OpAMD64BTRQconstmodify(v *Value) bool { v.reset(OpAMD64BTRQconstmodify) v.AuxInt = ValAndOff(valoff1).add(off2) v.Aux = mergeSym(sym1, sym2) - v.AddArg(base) - v.AddArg(mem) + v.AddArg2(base, mem) return true } return false @@ -4453,9 +4317,7 @@ func rewriteValueAMD64_OpAMD64BTRQmodify(v *Value) bool { v.reset(OpAMD64BTRQmodify) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(base) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(base, val, mem) return true } // match: (BTRQmodify [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) @@ -4478,9 +4340,7 @@ func rewriteValueAMD64_OpAMD64BTRQmodify(v *Value) bool { v.reset(OpAMD64BTRQmodify) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(base) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(base, val, mem) return true } return false @@ -4576,8 +4436,7 @@ func rewriteValueAMD64_OpAMD64BTSLconstmodify(v *Value) bool { v.reset(OpAMD64BTSLconstmodify) v.AuxInt = ValAndOff(valoff1).add(off2) v.Aux = sym - v.AddArg(base) - v.AddArg(mem) + v.AddArg2(base, mem) return true } // match: (BTSLconstmodify [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem) @@ -4599,8 +4458,7 @@ func rewriteValueAMD64_OpAMD64BTSLconstmodify(v *Value) bool { v.reset(OpAMD64BTSLconstmodify) v.AuxInt = ValAndOff(valoff1).add(off2) v.Aux = mergeSym(sym1, sym2) - v.AddArg(base) - v.AddArg(mem) + v.AddArg2(base, mem) return true } return false @@ -4628,9 +4486,7 @@ func rewriteValueAMD64_OpAMD64BTSLmodify(v *Value) bool { v.reset(OpAMD64BTSLmodify) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(base) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(base, val, mem) return true } // match: (BTSLmodify [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) @@ -4653,9 +4509,7 @@ func rewriteValueAMD64_OpAMD64BTSLmodify(v *Value) bool { v.reset(OpAMD64BTSLmodify) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(base) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(base, val, mem) return true } return false @@ -4751,8 +4605,7 @@ func rewriteValueAMD64_OpAMD64BTSQconstmodify(v *Value) bool { v.reset(OpAMD64BTSQconstmodify) v.AuxInt = ValAndOff(valoff1).add(off2) v.Aux = sym - v.AddArg(base) - v.AddArg(mem) + v.AddArg2(base, mem) return true } // match: (BTSQconstmodify [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem) @@ -4774,8 +4627,7 @@ func rewriteValueAMD64_OpAMD64BTSQconstmodify(v *Value) bool { v.reset(OpAMD64BTSQconstmodify) v.AuxInt = ValAndOff(valoff1).add(off2) v.Aux = mergeSym(sym1, sym2) - v.AddArg(base) - v.AddArg(mem) + v.AddArg2(base, mem) return true } return false @@ -4803,9 +4655,7 @@ func rewriteValueAMD64_OpAMD64BTSQmodify(v *Value) bool { v.reset(OpAMD64BTSQmodify) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(base) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(base, val, mem) return true } // match: (BTSQmodify [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) @@ -4828,9 +4678,7 @@ func rewriteValueAMD64_OpAMD64BTSQmodify(v *Value) bool { v.reset(OpAMD64BTSQmodify) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(base) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(base, val, mem) return true } return false @@ -4849,9 +4697,7 @@ func rewriteValueAMD64_OpAMD64CMOVLCC(v *Value) bool { } cond := v_2.Args[0] v.reset(OpAMD64CMOVLLS) - v.AddArg(x) - v.AddArg(y) - v.AddArg(cond) + v.AddArg3(x, y, cond) return true } // match: (CMOVLCC _ x (FlagEQ)) @@ -4861,9 +4707,7 @@ func rewriteValueAMD64_OpAMD64CMOVLCC(v *Value) bool { if v_2.Op != OpAMD64FlagEQ { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (CMOVLCC _ x (FlagGT_UGT)) @@ -4873,9 +4717,7 @@ func rewriteValueAMD64_OpAMD64CMOVLCC(v *Value) bool { if v_2.Op != OpAMD64FlagGT_UGT { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (CMOVLCC y _ (FlagGT_ULT)) @@ -4885,9 +4727,7 @@ func rewriteValueAMD64_OpAMD64CMOVLCC(v *Value) bool { if v_2.Op != OpAMD64FlagGT_ULT { break } - v.reset(OpCopy) - v.Type = y.Type - v.AddArg(y) + v.copyOf(y) return true } // match: (CMOVLCC y _ (FlagLT_ULT)) @@ -4897,9 +4737,7 @@ func rewriteValueAMD64_OpAMD64CMOVLCC(v *Value) bool { if v_2.Op != OpAMD64FlagLT_ULT { break } - v.reset(OpCopy) - v.Type = y.Type - v.AddArg(y) + v.copyOf(y) return true } // match: (CMOVLCC _ x (FlagLT_UGT)) @@ -4909,9 +4747,7 @@ func rewriteValueAMD64_OpAMD64CMOVLCC(v *Value) bool { if v_2.Op != OpAMD64FlagLT_UGT { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } return false @@ -4930,9 +4766,7 @@ func rewriteValueAMD64_OpAMD64CMOVLCS(v *Value) bool { } cond := v_2.Args[0] v.reset(OpAMD64CMOVLHI) - v.AddArg(x) - v.AddArg(y) - v.AddArg(cond) + v.AddArg3(x, y, cond) return true } // match: (CMOVLCS y _ (FlagEQ)) @@ -4942,9 +4776,7 @@ func rewriteValueAMD64_OpAMD64CMOVLCS(v *Value) bool { if v_2.Op != OpAMD64FlagEQ { break } - v.reset(OpCopy) - v.Type = y.Type - v.AddArg(y) + v.copyOf(y) return true } // match: (CMOVLCS y _ (FlagGT_UGT)) @@ -4954,9 +4786,7 @@ func rewriteValueAMD64_OpAMD64CMOVLCS(v *Value) bool { if v_2.Op != OpAMD64FlagGT_UGT { break } - v.reset(OpCopy) - v.Type = y.Type - v.AddArg(y) + v.copyOf(y) return true } // match: (CMOVLCS _ x (FlagGT_ULT)) @@ -4966,9 +4796,7 @@ func rewriteValueAMD64_OpAMD64CMOVLCS(v *Value) bool { if v_2.Op != OpAMD64FlagGT_ULT { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (CMOVLCS _ x (FlagLT_ULT)) @@ -4978,9 +4806,7 @@ func rewriteValueAMD64_OpAMD64CMOVLCS(v *Value) bool { if v_2.Op != OpAMD64FlagLT_ULT { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (CMOVLCS y _ (FlagLT_UGT)) @@ -4990,9 +4816,7 @@ func rewriteValueAMD64_OpAMD64CMOVLCS(v *Value) bool { if v_2.Op != OpAMD64FlagLT_UGT { break } - v.reset(OpCopy) - v.Type = y.Type - v.AddArg(y) + v.copyOf(y) return true } return false @@ -5011,9 +4835,7 @@ func rewriteValueAMD64_OpAMD64CMOVLEQ(v *Value) bool { } cond := v_2.Args[0] v.reset(OpAMD64CMOVLEQ) - v.AddArg(x) - v.AddArg(y) - v.AddArg(cond) + v.AddArg3(x, y, cond) return true } // match: (CMOVLEQ _ x (FlagEQ)) @@ -5023,9 +4845,7 @@ func rewriteValueAMD64_OpAMD64CMOVLEQ(v *Value) bool { if v_2.Op != OpAMD64FlagEQ { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (CMOVLEQ y _ (FlagGT_UGT)) @@ -5035,9 +4855,7 @@ func rewriteValueAMD64_OpAMD64CMOVLEQ(v *Value) bool { if v_2.Op != OpAMD64FlagGT_UGT { break } - v.reset(OpCopy) - v.Type = y.Type - v.AddArg(y) + v.copyOf(y) return true } // match: (CMOVLEQ y _ (FlagGT_ULT)) @@ -5047,9 +4865,7 @@ func rewriteValueAMD64_OpAMD64CMOVLEQ(v *Value) bool { if v_2.Op != OpAMD64FlagGT_ULT { break } - v.reset(OpCopy) - v.Type = y.Type - v.AddArg(y) + v.copyOf(y) return true } // match: (CMOVLEQ y _ (FlagLT_ULT)) @@ -5059,9 +4875,7 @@ func rewriteValueAMD64_OpAMD64CMOVLEQ(v *Value) bool { if v_2.Op != OpAMD64FlagLT_ULT { break } - v.reset(OpCopy) - v.Type = y.Type - v.AddArg(y) + v.copyOf(y) return true } // match: (CMOVLEQ y _ (FlagLT_UGT)) @@ -5071,9 +4885,7 @@ func rewriteValueAMD64_OpAMD64CMOVLEQ(v *Value) bool { if v_2.Op != OpAMD64FlagLT_UGT { break } - v.reset(OpCopy) - v.Type = y.Type - v.AddArg(y) + v.copyOf(y) return true } return false @@ -5092,9 +4904,7 @@ func rewriteValueAMD64_OpAMD64CMOVLGE(v *Value) bool { } cond := v_2.Args[0] v.reset(OpAMD64CMOVLLE) - v.AddArg(x) - v.AddArg(y) - v.AddArg(cond) + v.AddArg3(x, y, cond) return true } // match: (CMOVLGE _ x (FlagEQ)) @@ -5104,9 +4914,7 @@ func rewriteValueAMD64_OpAMD64CMOVLGE(v *Value) bool { if v_2.Op != OpAMD64FlagEQ { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (CMOVLGE _ x (FlagGT_UGT)) @@ -5116,9 +4924,7 @@ func rewriteValueAMD64_OpAMD64CMOVLGE(v *Value) bool { if v_2.Op != OpAMD64FlagGT_UGT { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (CMOVLGE _ x (FlagGT_ULT)) @@ -5128,9 +4934,7 @@ func rewriteValueAMD64_OpAMD64CMOVLGE(v *Value) bool { if v_2.Op != OpAMD64FlagGT_ULT { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (CMOVLGE y _ (FlagLT_ULT)) @@ -5140,9 +4944,7 @@ func rewriteValueAMD64_OpAMD64CMOVLGE(v *Value) bool { if v_2.Op != OpAMD64FlagLT_ULT { break } - v.reset(OpCopy) - v.Type = y.Type - v.AddArg(y) + v.copyOf(y) return true } // match: (CMOVLGE y _ (FlagLT_UGT)) @@ -5152,9 +4954,7 @@ func rewriteValueAMD64_OpAMD64CMOVLGE(v *Value) bool { if v_2.Op != OpAMD64FlagLT_UGT { break } - v.reset(OpCopy) - v.Type = y.Type - v.AddArg(y) + v.copyOf(y) return true } return false @@ -5173,9 +4973,7 @@ func rewriteValueAMD64_OpAMD64CMOVLGT(v *Value) bool { } cond := v_2.Args[0] v.reset(OpAMD64CMOVLLT) - v.AddArg(x) - v.AddArg(y) - v.AddArg(cond) + v.AddArg3(x, y, cond) return true } // match: (CMOVLGT y _ (FlagEQ)) @@ -5185,9 +4983,7 @@ func rewriteValueAMD64_OpAMD64CMOVLGT(v *Value) bool { if v_2.Op != OpAMD64FlagEQ { break } - v.reset(OpCopy) - v.Type = y.Type - v.AddArg(y) + v.copyOf(y) return true } // match: (CMOVLGT _ x (FlagGT_UGT)) @@ -5197,9 +4993,7 @@ func rewriteValueAMD64_OpAMD64CMOVLGT(v *Value) bool { if v_2.Op != OpAMD64FlagGT_UGT { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (CMOVLGT _ x (FlagGT_ULT)) @@ -5209,9 +5003,7 @@ func rewriteValueAMD64_OpAMD64CMOVLGT(v *Value) bool { if v_2.Op != OpAMD64FlagGT_ULT { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (CMOVLGT y _ (FlagLT_ULT)) @@ -5221,9 +5013,7 @@ func rewriteValueAMD64_OpAMD64CMOVLGT(v *Value) bool { if v_2.Op != OpAMD64FlagLT_ULT { break } - v.reset(OpCopy) - v.Type = y.Type - v.AddArg(y) + v.copyOf(y) return true } // match: (CMOVLGT y _ (FlagLT_UGT)) @@ -5233,9 +5023,7 @@ func rewriteValueAMD64_OpAMD64CMOVLGT(v *Value) bool { if v_2.Op != OpAMD64FlagLT_UGT { break } - v.reset(OpCopy) - v.Type = y.Type - v.AddArg(y) + v.copyOf(y) return true } return false @@ -5254,9 +5042,7 @@ func rewriteValueAMD64_OpAMD64CMOVLHI(v *Value) bool { } cond := v_2.Args[0] v.reset(OpAMD64CMOVLCS) - v.AddArg(x) - v.AddArg(y) - v.AddArg(cond) + v.AddArg3(x, y, cond) return true } // match: (CMOVLHI y _ (FlagEQ)) @@ -5266,9 +5052,7 @@ func rewriteValueAMD64_OpAMD64CMOVLHI(v *Value) bool { if v_2.Op != OpAMD64FlagEQ { break } - v.reset(OpCopy) - v.Type = y.Type - v.AddArg(y) + v.copyOf(y) return true } // match: (CMOVLHI _ x (FlagGT_UGT)) @@ -5278,9 +5062,7 @@ func rewriteValueAMD64_OpAMD64CMOVLHI(v *Value) bool { if v_2.Op != OpAMD64FlagGT_UGT { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (CMOVLHI y _ (FlagGT_ULT)) @@ -5290,9 +5072,7 @@ func rewriteValueAMD64_OpAMD64CMOVLHI(v *Value) bool { if v_2.Op != OpAMD64FlagGT_ULT { break } - v.reset(OpCopy) - v.Type = y.Type - v.AddArg(y) + v.copyOf(y) return true } // match: (CMOVLHI y _ (FlagLT_ULT)) @@ -5302,9 +5082,7 @@ func rewriteValueAMD64_OpAMD64CMOVLHI(v *Value) bool { if v_2.Op != OpAMD64FlagLT_ULT { break } - v.reset(OpCopy) - v.Type = y.Type - v.AddArg(y) + v.copyOf(y) return true } // match: (CMOVLHI _ x (FlagLT_UGT)) @@ -5314,9 +5092,7 @@ func rewriteValueAMD64_OpAMD64CMOVLHI(v *Value) bool { if v_2.Op != OpAMD64FlagLT_UGT { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } return false @@ -5335,9 +5111,7 @@ func rewriteValueAMD64_OpAMD64CMOVLLE(v *Value) bool { } cond := v_2.Args[0] v.reset(OpAMD64CMOVLGE) - v.AddArg(x) - v.AddArg(y) - v.AddArg(cond) + v.AddArg3(x, y, cond) return true } // match: (CMOVLLE _ x (FlagEQ)) @@ -5347,9 +5121,7 @@ func rewriteValueAMD64_OpAMD64CMOVLLE(v *Value) bool { if v_2.Op != OpAMD64FlagEQ { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (CMOVLLE y _ (FlagGT_UGT)) @@ -5359,9 +5131,7 @@ func rewriteValueAMD64_OpAMD64CMOVLLE(v *Value) bool { if v_2.Op != OpAMD64FlagGT_UGT { break } - v.reset(OpCopy) - v.Type = y.Type - v.AddArg(y) + v.copyOf(y) return true } // match: (CMOVLLE y _ (FlagGT_ULT)) @@ -5371,9 +5141,7 @@ func rewriteValueAMD64_OpAMD64CMOVLLE(v *Value) bool { if v_2.Op != OpAMD64FlagGT_ULT { break } - v.reset(OpCopy) - v.Type = y.Type - v.AddArg(y) + v.copyOf(y) return true } // match: (CMOVLLE _ x (FlagLT_ULT)) @@ -5383,9 +5151,7 @@ func rewriteValueAMD64_OpAMD64CMOVLLE(v *Value) bool { if v_2.Op != OpAMD64FlagLT_ULT { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (CMOVLLE _ x (FlagLT_UGT)) @@ -5395,9 +5161,7 @@ func rewriteValueAMD64_OpAMD64CMOVLLE(v *Value) bool { if v_2.Op != OpAMD64FlagLT_UGT { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } return false @@ -5416,9 +5180,7 @@ func rewriteValueAMD64_OpAMD64CMOVLLS(v *Value) bool { } cond := v_2.Args[0] v.reset(OpAMD64CMOVLCC) - v.AddArg(x) - v.AddArg(y) - v.AddArg(cond) + v.AddArg3(x, y, cond) return true } // match: (CMOVLLS _ x (FlagEQ)) @@ -5428,9 +5190,7 @@ func rewriteValueAMD64_OpAMD64CMOVLLS(v *Value) bool { if v_2.Op != OpAMD64FlagEQ { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (CMOVLLS y _ (FlagGT_UGT)) @@ -5440,9 +5200,7 @@ func rewriteValueAMD64_OpAMD64CMOVLLS(v *Value) bool { if v_2.Op != OpAMD64FlagGT_UGT { break } - v.reset(OpCopy) - v.Type = y.Type - v.AddArg(y) + v.copyOf(y) return true } // match: (CMOVLLS _ x (FlagGT_ULT)) @@ -5452,9 +5210,7 @@ func rewriteValueAMD64_OpAMD64CMOVLLS(v *Value) bool { if v_2.Op != OpAMD64FlagGT_ULT { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (CMOVLLS _ x (FlagLT_ULT)) @@ -5464,9 +5220,7 @@ func rewriteValueAMD64_OpAMD64CMOVLLS(v *Value) bool { if v_2.Op != OpAMD64FlagLT_ULT { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (CMOVLLS y _ (FlagLT_UGT)) @@ -5476,9 +5230,7 @@ func rewriteValueAMD64_OpAMD64CMOVLLS(v *Value) bool { if v_2.Op != OpAMD64FlagLT_UGT { break } - v.reset(OpCopy) - v.Type = y.Type - v.AddArg(y) + v.copyOf(y) return true } return false @@ -5497,9 +5249,7 @@ func rewriteValueAMD64_OpAMD64CMOVLLT(v *Value) bool { } cond := v_2.Args[0] v.reset(OpAMD64CMOVLGT) - v.AddArg(x) - v.AddArg(y) - v.AddArg(cond) + v.AddArg3(x, y, cond) return true } // match: (CMOVLLT y _ (FlagEQ)) @@ -5509,9 +5259,7 @@ func rewriteValueAMD64_OpAMD64CMOVLLT(v *Value) bool { if v_2.Op != OpAMD64FlagEQ { break } - v.reset(OpCopy) - v.Type = y.Type - v.AddArg(y) + v.copyOf(y) return true } // match: (CMOVLLT y _ (FlagGT_UGT)) @@ -5521,9 +5269,7 @@ func rewriteValueAMD64_OpAMD64CMOVLLT(v *Value) bool { if v_2.Op != OpAMD64FlagGT_UGT { break } - v.reset(OpCopy) - v.Type = y.Type - v.AddArg(y) + v.copyOf(y) return true } // match: (CMOVLLT y _ (FlagGT_ULT)) @@ -5533,9 +5279,7 @@ func rewriteValueAMD64_OpAMD64CMOVLLT(v *Value) bool { if v_2.Op != OpAMD64FlagGT_ULT { break } - v.reset(OpCopy) - v.Type = y.Type - v.AddArg(y) + v.copyOf(y) return true } // match: (CMOVLLT _ x (FlagLT_ULT)) @@ -5545,9 +5289,7 @@ func rewriteValueAMD64_OpAMD64CMOVLLT(v *Value) bool { if v_2.Op != OpAMD64FlagLT_ULT { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (CMOVLLT _ x (FlagLT_UGT)) @@ -5557,9 +5299,7 @@ func rewriteValueAMD64_OpAMD64CMOVLLT(v *Value) bool { if v_2.Op != OpAMD64FlagLT_UGT { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } return false @@ -5578,9 +5318,7 @@ func rewriteValueAMD64_OpAMD64CMOVLNE(v *Value) bool { } cond := v_2.Args[0] v.reset(OpAMD64CMOVLNE) - v.AddArg(x) - v.AddArg(y) - v.AddArg(cond) + v.AddArg3(x, y, cond) return true } // match: (CMOVLNE y _ (FlagEQ)) @@ -5590,9 +5328,7 @@ func rewriteValueAMD64_OpAMD64CMOVLNE(v *Value) bool { if v_2.Op != OpAMD64FlagEQ { break } - v.reset(OpCopy) - v.Type = y.Type - v.AddArg(y) + v.copyOf(y) return true } // match: (CMOVLNE _ x (FlagGT_UGT)) @@ -5602,9 +5338,7 @@ func rewriteValueAMD64_OpAMD64CMOVLNE(v *Value) bool { if v_2.Op != OpAMD64FlagGT_UGT { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (CMOVLNE _ x (FlagGT_ULT)) @@ -5614,9 +5348,7 @@ func rewriteValueAMD64_OpAMD64CMOVLNE(v *Value) bool { if v_2.Op != OpAMD64FlagGT_ULT { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (CMOVLNE _ x (FlagLT_ULT)) @@ -5626,9 +5358,7 @@ func rewriteValueAMD64_OpAMD64CMOVLNE(v *Value) bool { if v_2.Op != OpAMD64FlagLT_ULT { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (CMOVLNE _ x (FlagLT_UGT)) @@ -5638,9 +5368,7 @@ func rewriteValueAMD64_OpAMD64CMOVLNE(v *Value) bool { if v_2.Op != OpAMD64FlagLT_UGT { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } return false @@ -5659,9 +5387,7 @@ func rewriteValueAMD64_OpAMD64CMOVQCC(v *Value) bool { } cond := v_2.Args[0] v.reset(OpAMD64CMOVQLS) - v.AddArg(x) - v.AddArg(y) - v.AddArg(cond) + v.AddArg3(x, y, cond) return true } // match: (CMOVQCC _ x (FlagEQ)) @@ -5671,9 +5397,7 @@ func rewriteValueAMD64_OpAMD64CMOVQCC(v *Value) bool { if v_2.Op != OpAMD64FlagEQ { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (CMOVQCC _ x (FlagGT_UGT)) @@ -5683,9 +5407,7 @@ func rewriteValueAMD64_OpAMD64CMOVQCC(v *Value) bool { if v_2.Op != OpAMD64FlagGT_UGT { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (CMOVQCC y _ (FlagGT_ULT)) @@ -5695,9 +5417,7 @@ func rewriteValueAMD64_OpAMD64CMOVQCC(v *Value) bool { if v_2.Op != OpAMD64FlagGT_ULT { break } - v.reset(OpCopy) - v.Type = y.Type - v.AddArg(y) + v.copyOf(y) return true } // match: (CMOVQCC y _ (FlagLT_ULT)) @@ -5707,9 +5427,7 @@ func rewriteValueAMD64_OpAMD64CMOVQCC(v *Value) bool { if v_2.Op != OpAMD64FlagLT_ULT { break } - v.reset(OpCopy) - v.Type = y.Type - v.AddArg(y) + v.copyOf(y) return true } // match: (CMOVQCC _ x (FlagLT_UGT)) @@ -5719,9 +5437,7 @@ func rewriteValueAMD64_OpAMD64CMOVQCC(v *Value) bool { if v_2.Op != OpAMD64FlagLT_UGT { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } return false @@ -5740,9 +5456,7 @@ func rewriteValueAMD64_OpAMD64CMOVQCS(v *Value) bool { } cond := v_2.Args[0] v.reset(OpAMD64CMOVQHI) - v.AddArg(x) - v.AddArg(y) - v.AddArg(cond) + v.AddArg3(x, y, cond) return true } // match: (CMOVQCS y _ (FlagEQ)) @@ -5752,9 +5466,7 @@ func rewriteValueAMD64_OpAMD64CMOVQCS(v *Value) bool { if v_2.Op != OpAMD64FlagEQ { break } - v.reset(OpCopy) - v.Type = y.Type - v.AddArg(y) + v.copyOf(y) return true } // match: (CMOVQCS y _ (FlagGT_UGT)) @@ -5764,9 +5476,7 @@ func rewriteValueAMD64_OpAMD64CMOVQCS(v *Value) bool { if v_2.Op != OpAMD64FlagGT_UGT { break } - v.reset(OpCopy) - v.Type = y.Type - v.AddArg(y) + v.copyOf(y) return true } // match: (CMOVQCS _ x (FlagGT_ULT)) @@ -5776,9 +5486,7 @@ func rewriteValueAMD64_OpAMD64CMOVQCS(v *Value) bool { if v_2.Op != OpAMD64FlagGT_ULT { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (CMOVQCS _ x (FlagLT_ULT)) @@ -5788,9 +5496,7 @@ func rewriteValueAMD64_OpAMD64CMOVQCS(v *Value) bool { if v_2.Op != OpAMD64FlagLT_ULT { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (CMOVQCS y _ (FlagLT_UGT)) @@ -5800,9 +5506,7 @@ func rewriteValueAMD64_OpAMD64CMOVQCS(v *Value) bool { if v_2.Op != OpAMD64FlagLT_UGT { break } - v.reset(OpCopy) - v.Type = y.Type - v.AddArg(y) + v.copyOf(y) return true } return false @@ -5821,9 +5525,7 @@ func rewriteValueAMD64_OpAMD64CMOVQEQ(v *Value) bool { } cond := v_2.Args[0] v.reset(OpAMD64CMOVQEQ) - v.AddArg(x) - v.AddArg(y) - v.AddArg(cond) + v.AddArg3(x, y, cond) return true } // match: (CMOVQEQ _ x (FlagEQ)) @@ -5833,9 +5535,7 @@ func rewriteValueAMD64_OpAMD64CMOVQEQ(v *Value) bool { if v_2.Op != OpAMD64FlagEQ { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (CMOVQEQ y _ (FlagGT_UGT)) @@ -5845,9 +5545,7 @@ func rewriteValueAMD64_OpAMD64CMOVQEQ(v *Value) bool { if v_2.Op != OpAMD64FlagGT_UGT { break } - v.reset(OpCopy) - v.Type = y.Type - v.AddArg(y) + v.copyOf(y) return true } // match: (CMOVQEQ y _ (FlagGT_ULT)) @@ -5857,9 +5555,7 @@ func rewriteValueAMD64_OpAMD64CMOVQEQ(v *Value) bool { if v_2.Op != OpAMD64FlagGT_ULT { break } - v.reset(OpCopy) - v.Type = y.Type - v.AddArg(y) + v.copyOf(y) return true } // match: (CMOVQEQ y _ (FlagLT_ULT)) @@ -5869,9 +5565,7 @@ func rewriteValueAMD64_OpAMD64CMOVQEQ(v *Value) bool { if v_2.Op != OpAMD64FlagLT_ULT { break } - v.reset(OpCopy) - v.Type = y.Type - v.AddArg(y) + v.copyOf(y) return true } // match: (CMOVQEQ y _ (FlagLT_UGT)) @@ -5881,9 +5575,7 @@ func rewriteValueAMD64_OpAMD64CMOVQEQ(v *Value) bool { if v_2.Op != OpAMD64FlagLT_UGT { break } - v.reset(OpCopy) - v.Type = y.Type - v.AddArg(y) + v.copyOf(y) return true } // match: (CMOVQEQ x _ (Select1 (BSFQ (ORQconst [c] _)))) @@ -5906,9 +5598,7 @@ func rewriteValueAMD64_OpAMD64CMOVQEQ(v *Value) bool { if !(c != 0) { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } return false @@ -5927,9 +5617,7 @@ func rewriteValueAMD64_OpAMD64CMOVQGE(v *Value) bool { } cond := v_2.Args[0] v.reset(OpAMD64CMOVQLE) - v.AddArg(x) - v.AddArg(y) - v.AddArg(cond) + v.AddArg3(x, y, cond) return true } // match: (CMOVQGE _ x (FlagEQ)) @@ -5939,9 +5627,7 @@ func rewriteValueAMD64_OpAMD64CMOVQGE(v *Value) bool { if v_2.Op != OpAMD64FlagEQ { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (CMOVQGE _ x (FlagGT_UGT)) @@ -5951,9 +5637,7 @@ func rewriteValueAMD64_OpAMD64CMOVQGE(v *Value) bool { if v_2.Op != OpAMD64FlagGT_UGT { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (CMOVQGE _ x (FlagGT_ULT)) @@ -5963,9 +5647,7 @@ func rewriteValueAMD64_OpAMD64CMOVQGE(v *Value) bool { if v_2.Op != OpAMD64FlagGT_ULT { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (CMOVQGE y _ (FlagLT_ULT)) @@ -5975,9 +5657,7 @@ func rewriteValueAMD64_OpAMD64CMOVQGE(v *Value) bool { if v_2.Op != OpAMD64FlagLT_ULT { break } - v.reset(OpCopy) - v.Type = y.Type - v.AddArg(y) + v.copyOf(y) return true } // match: (CMOVQGE y _ (FlagLT_UGT)) @@ -5987,9 +5667,7 @@ func rewriteValueAMD64_OpAMD64CMOVQGE(v *Value) bool { if v_2.Op != OpAMD64FlagLT_UGT { break } - v.reset(OpCopy) - v.Type = y.Type - v.AddArg(y) + v.copyOf(y) return true } return false @@ -6008,9 +5686,7 @@ func rewriteValueAMD64_OpAMD64CMOVQGT(v *Value) bool { } cond := v_2.Args[0] v.reset(OpAMD64CMOVQLT) - v.AddArg(x) - v.AddArg(y) - v.AddArg(cond) + v.AddArg3(x, y, cond) return true } // match: (CMOVQGT y _ (FlagEQ)) @@ -6020,9 +5696,7 @@ func rewriteValueAMD64_OpAMD64CMOVQGT(v *Value) bool { if v_2.Op != OpAMD64FlagEQ { break } - v.reset(OpCopy) - v.Type = y.Type - v.AddArg(y) + v.copyOf(y) return true } // match: (CMOVQGT _ x (FlagGT_UGT)) @@ -6032,9 +5706,7 @@ func rewriteValueAMD64_OpAMD64CMOVQGT(v *Value) bool { if v_2.Op != OpAMD64FlagGT_UGT { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (CMOVQGT _ x (FlagGT_ULT)) @@ -6044,9 +5716,7 @@ func rewriteValueAMD64_OpAMD64CMOVQGT(v *Value) bool { if v_2.Op != OpAMD64FlagGT_ULT { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (CMOVQGT y _ (FlagLT_ULT)) @@ -6056,9 +5726,7 @@ func rewriteValueAMD64_OpAMD64CMOVQGT(v *Value) bool { if v_2.Op != OpAMD64FlagLT_ULT { break } - v.reset(OpCopy) - v.Type = y.Type - v.AddArg(y) + v.copyOf(y) return true } // match: (CMOVQGT y _ (FlagLT_UGT)) @@ -6068,9 +5736,7 @@ func rewriteValueAMD64_OpAMD64CMOVQGT(v *Value) bool { if v_2.Op != OpAMD64FlagLT_UGT { break } - v.reset(OpCopy) - v.Type = y.Type - v.AddArg(y) + v.copyOf(y) return true } return false @@ -6089,9 +5755,7 @@ func rewriteValueAMD64_OpAMD64CMOVQHI(v *Value) bool { } cond := v_2.Args[0] v.reset(OpAMD64CMOVQCS) - v.AddArg(x) - v.AddArg(y) - v.AddArg(cond) + v.AddArg3(x, y, cond) return true } // match: (CMOVQHI y _ (FlagEQ)) @@ -6101,9 +5765,7 @@ func rewriteValueAMD64_OpAMD64CMOVQHI(v *Value) bool { if v_2.Op != OpAMD64FlagEQ { break } - v.reset(OpCopy) - v.Type = y.Type - v.AddArg(y) + v.copyOf(y) return true } // match: (CMOVQHI _ x (FlagGT_UGT)) @@ -6113,9 +5775,7 @@ func rewriteValueAMD64_OpAMD64CMOVQHI(v *Value) bool { if v_2.Op != OpAMD64FlagGT_UGT { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (CMOVQHI y _ (FlagGT_ULT)) @@ -6125,9 +5785,7 @@ func rewriteValueAMD64_OpAMD64CMOVQHI(v *Value) bool { if v_2.Op != OpAMD64FlagGT_ULT { break } - v.reset(OpCopy) - v.Type = y.Type - v.AddArg(y) + v.copyOf(y) return true } // match: (CMOVQHI y _ (FlagLT_ULT)) @@ -6137,9 +5795,7 @@ func rewriteValueAMD64_OpAMD64CMOVQHI(v *Value) bool { if v_2.Op != OpAMD64FlagLT_ULT { break } - v.reset(OpCopy) - v.Type = y.Type - v.AddArg(y) + v.copyOf(y) return true } // match: (CMOVQHI _ x (FlagLT_UGT)) @@ -6149,9 +5805,7 @@ func rewriteValueAMD64_OpAMD64CMOVQHI(v *Value) bool { if v_2.Op != OpAMD64FlagLT_UGT { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } return false @@ -6170,9 +5824,7 @@ func rewriteValueAMD64_OpAMD64CMOVQLE(v *Value) bool { } cond := v_2.Args[0] v.reset(OpAMD64CMOVQGE) - v.AddArg(x) - v.AddArg(y) - v.AddArg(cond) + v.AddArg3(x, y, cond) return true } // match: (CMOVQLE _ x (FlagEQ)) @@ -6182,9 +5834,7 @@ func rewriteValueAMD64_OpAMD64CMOVQLE(v *Value) bool { if v_2.Op != OpAMD64FlagEQ { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (CMOVQLE y _ (FlagGT_UGT)) @@ -6194,9 +5844,7 @@ func rewriteValueAMD64_OpAMD64CMOVQLE(v *Value) bool { if v_2.Op != OpAMD64FlagGT_UGT { break } - v.reset(OpCopy) - v.Type = y.Type - v.AddArg(y) + v.copyOf(y) return true } // match: (CMOVQLE y _ (FlagGT_ULT)) @@ -6206,9 +5854,7 @@ func rewriteValueAMD64_OpAMD64CMOVQLE(v *Value) bool { if v_2.Op != OpAMD64FlagGT_ULT { break } - v.reset(OpCopy) - v.Type = y.Type - v.AddArg(y) + v.copyOf(y) return true } // match: (CMOVQLE _ x (FlagLT_ULT)) @@ -6218,9 +5864,7 @@ func rewriteValueAMD64_OpAMD64CMOVQLE(v *Value) bool { if v_2.Op != OpAMD64FlagLT_ULT { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (CMOVQLE _ x (FlagLT_UGT)) @@ -6230,9 +5874,7 @@ func rewriteValueAMD64_OpAMD64CMOVQLE(v *Value) bool { if v_2.Op != OpAMD64FlagLT_UGT { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } return false @@ -6251,9 +5893,7 @@ func rewriteValueAMD64_OpAMD64CMOVQLS(v *Value) bool { } cond := v_2.Args[0] v.reset(OpAMD64CMOVQCC) - v.AddArg(x) - v.AddArg(y) - v.AddArg(cond) + v.AddArg3(x, y, cond) return true } // match: (CMOVQLS _ x (FlagEQ)) @@ -6263,9 +5903,7 @@ func rewriteValueAMD64_OpAMD64CMOVQLS(v *Value) bool { if v_2.Op != OpAMD64FlagEQ { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (CMOVQLS y _ (FlagGT_UGT)) @@ -6275,9 +5913,7 @@ func rewriteValueAMD64_OpAMD64CMOVQLS(v *Value) bool { if v_2.Op != OpAMD64FlagGT_UGT { break } - v.reset(OpCopy) - v.Type = y.Type - v.AddArg(y) + v.copyOf(y) return true } // match: (CMOVQLS _ x (FlagGT_ULT)) @@ -6287,9 +5923,7 @@ func rewriteValueAMD64_OpAMD64CMOVQLS(v *Value) bool { if v_2.Op != OpAMD64FlagGT_ULT { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (CMOVQLS _ x (FlagLT_ULT)) @@ -6299,9 +5933,7 @@ func rewriteValueAMD64_OpAMD64CMOVQLS(v *Value) bool { if v_2.Op != OpAMD64FlagLT_ULT { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (CMOVQLS y _ (FlagLT_UGT)) @@ -6311,9 +5943,7 @@ func rewriteValueAMD64_OpAMD64CMOVQLS(v *Value) bool { if v_2.Op != OpAMD64FlagLT_UGT { break } - v.reset(OpCopy) - v.Type = y.Type - v.AddArg(y) + v.copyOf(y) return true } return false @@ -6332,9 +5962,7 @@ func rewriteValueAMD64_OpAMD64CMOVQLT(v *Value) bool { } cond := v_2.Args[0] v.reset(OpAMD64CMOVQGT) - v.AddArg(x) - v.AddArg(y) - v.AddArg(cond) + v.AddArg3(x, y, cond) return true } // match: (CMOVQLT y _ (FlagEQ)) @@ -6344,9 +5972,7 @@ func rewriteValueAMD64_OpAMD64CMOVQLT(v *Value) bool { if v_2.Op != OpAMD64FlagEQ { break } - v.reset(OpCopy) - v.Type = y.Type - v.AddArg(y) + v.copyOf(y) return true } // match: (CMOVQLT y _ (FlagGT_UGT)) @@ -6356,9 +5982,7 @@ func rewriteValueAMD64_OpAMD64CMOVQLT(v *Value) bool { if v_2.Op != OpAMD64FlagGT_UGT { break } - v.reset(OpCopy) - v.Type = y.Type - v.AddArg(y) + v.copyOf(y) return true } // match: (CMOVQLT y _ (FlagGT_ULT)) @@ -6368,9 +5992,7 @@ func rewriteValueAMD64_OpAMD64CMOVQLT(v *Value) bool { if v_2.Op != OpAMD64FlagGT_ULT { break } - v.reset(OpCopy) - v.Type = y.Type - v.AddArg(y) + v.copyOf(y) return true } // match: (CMOVQLT _ x (FlagLT_ULT)) @@ -6380,9 +6002,7 @@ func rewriteValueAMD64_OpAMD64CMOVQLT(v *Value) bool { if v_2.Op != OpAMD64FlagLT_ULT { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (CMOVQLT _ x (FlagLT_UGT)) @@ -6392,9 +6012,7 @@ func rewriteValueAMD64_OpAMD64CMOVQLT(v *Value) bool { if v_2.Op != OpAMD64FlagLT_UGT { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } return false @@ -6413,9 +6031,7 @@ func rewriteValueAMD64_OpAMD64CMOVQNE(v *Value) bool { } cond := v_2.Args[0] v.reset(OpAMD64CMOVQNE) - v.AddArg(x) - v.AddArg(y) - v.AddArg(cond) + v.AddArg3(x, y, cond) return true } // match: (CMOVQNE y _ (FlagEQ)) @@ -6425,9 +6041,7 @@ func rewriteValueAMD64_OpAMD64CMOVQNE(v *Value) bool { if v_2.Op != OpAMD64FlagEQ { break } - v.reset(OpCopy) - v.Type = y.Type - v.AddArg(y) + v.copyOf(y) return true } // match: (CMOVQNE _ x (FlagGT_UGT)) @@ -6437,9 +6051,7 @@ func rewriteValueAMD64_OpAMD64CMOVQNE(v *Value) bool { if v_2.Op != OpAMD64FlagGT_UGT { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (CMOVQNE _ x (FlagGT_ULT)) @@ -6449,9 +6061,7 @@ func rewriteValueAMD64_OpAMD64CMOVQNE(v *Value) bool { if v_2.Op != OpAMD64FlagGT_ULT { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (CMOVQNE _ x (FlagLT_ULT)) @@ -6461,9 +6071,7 @@ func rewriteValueAMD64_OpAMD64CMOVQNE(v *Value) bool { if v_2.Op != OpAMD64FlagLT_ULT { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (CMOVQNE _ x (FlagLT_UGT)) @@ -6473,9 +6081,7 @@ func rewriteValueAMD64_OpAMD64CMOVQNE(v *Value) bool { if v_2.Op != OpAMD64FlagLT_UGT { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } return false @@ -6494,9 +6100,7 @@ func rewriteValueAMD64_OpAMD64CMOVWCC(v *Value) bool { } cond := v_2.Args[0] v.reset(OpAMD64CMOVWLS) - v.AddArg(x) - v.AddArg(y) - v.AddArg(cond) + v.AddArg3(x, y, cond) return true } // match: (CMOVWCC _ x (FlagEQ)) @@ -6506,9 +6110,7 @@ func rewriteValueAMD64_OpAMD64CMOVWCC(v *Value) bool { if v_2.Op != OpAMD64FlagEQ { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (CMOVWCC _ x (FlagGT_UGT)) @@ -6518,9 +6120,7 @@ func rewriteValueAMD64_OpAMD64CMOVWCC(v *Value) bool { if v_2.Op != OpAMD64FlagGT_UGT { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (CMOVWCC y _ (FlagGT_ULT)) @@ -6530,9 +6130,7 @@ func rewriteValueAMD64_OpAMD64CMOVWCC(v *Value) bool { if v_2.Op != OpAMD64FlagGT_ULT { break } - v.reset(OpCopy) - v.Type = y.Type - v.AddArg(y) + v.copyOf(y) return true } // match: (CMOVWCC y _ (FlagLT_ULT)) @@ -6542,9 +6140,7 @@ func rewriteValueAMD64_OpAMD64CMOVWCC(v *Value) bool { if v_2.Op != OpAMD64FlagLT_ULT { break } - v.reset(OpCopy) - v.Type = y.Type - v.AddArg(y) + v.copyOf(y) return true } // match: (CMOVWCC _ x (FlagLT_UGT)) @@ -6554,9 +6150,7 @@ func rewriteValueAMD64_OpAMD64CMOVWCC(v *Value) bool { if v_2.Op != OpAMD64FlagLT_UGT { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } return false @@ -6575,9 +6169,7 @@ func rewriteValueAMD64_OpAMD64CMOVWCS(v *Value) bool { } cond := v_2.Args[0] v.reset(OpAMD64CMOVWHI) - v.AddArg(x) - v.AddArg(y) - v.AddArg(cond) + v.AddArg3(x, y, cond) return true } // match: (CMOVWCS y _ (FlagEQ)) @@ -6587,9 +6179,7 @@ func rewriteValueAMD64_OpAMD64CMOVWCS(v *Value) bool { if v_2.Op != OpAMD64FlagEQ { break } - v.reset(OpCopy) - v.Type = y.Type - v.AddArg(y) + v.copyOf(y) return true } // match: (CMOVWCS y _ (FlagGT_UGT)) @@ -6599,9 +6189,7 @@ func rewriteValueAMD64_OpAMD64CMOVWCS(v *Value) bool { if v_2.Op != OpAMD64FlagGT_UGT { break } - v.reset(OpCopy) - v.Type = y.Type - v.AddArg(y) + v.copyOf(y) return true } // match: (CMOVWCS _ x (FlagGT_ULT)) @@ -6611,9 +6199,7 @@ func rewriteValueAMD64_OpAMD64CMOVWCS(v *Value) bool { if v_2.Op != OpAMD64FlagGT_ULT { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (CMOVWCS _ x (FlagLT_ULT)) @@ -6623,9 +6209,7 @@ func rewriteValueAMD64_OpAMD64CMOVWCS(v *Value) bool { if v_2.Op != OpAMD64FlagLT_ULT { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (CMOVWCS y _ (FlagLT_UGT)) @@ -6635,9 +6219,7 @@ func rewriteValueAMD64_OpAMD64CMOVWCS(v *Value) bool { if v_2.Op != OpAMD64FlagLT_UGT { break } - v.reset(OpCopy) - v.Type = y.Type - v.AddArg(y) + v.copyOf(y) return true } return false @@ -6656,9 +6238,7 @@ func rewriteValueAMD64_OpAMD64CMOVWEQ(v *Value) bool { } cond := v_2.Args[0] v.reset(OpAMD64CMOVWEQ) - v.AddArg(x) - v.AddArg(y) - v.AddArg(cond) + v.AddArg3(x, y, cond) return true } // match: (CMOVWEQ _ x (FlagEQ)) @@ -6668,9 +6248,7 @@ func rewriteValueAMD64_OpAMD64CMOVWEQ(v *Value) bool { if v_2.Op != OpAMD64FlagEQ { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (CMOVWEQ y _ (FlagGT_UGT)) @@ -6680,9 +6258,7 @@ func rewriteValueAMD64_OpAMD64CMOVWEQ(v *Value) bool { if v_2.Op != OpAMD64FlagGT_UGT { break } - v.reset(OpCopy) - v.Type = y.Type - v.AddArg(y) + v.copyOf(y) return true } // match: (CMOVWEQ y _ (FlagGT_ULT)) @@ -6692,9 +6268,7 @@ func rewriteValueAMD64_OpAMD64CMOVWEQ(v *Value) bool { if v_2.Op != OpAMD64FlagGT_ULT { break } - v.reset(OpCopy) - v.Type = y.Type - v.AddArg(y) + v.copyOf(y) return true } // match: (CMOVWEQ y _ (FlagLT_ULT)) @@ -6704,9 +6278,7 @@ func rewriteValueAMD64_OpAMD64CMOVWEQ(v *Value) bool { if v_2.Op != OpAMD64FlagLT_ULT { break } - v.reset(OpCopy) - v.Type = y.Type - v.AddArg(y) + v.copyOf(y) return true } // match: (CMOVWEQ y _ (FlagLT_UGT)) @@ -6716,9 +6288,7 @@ func rewriteValueAMD64_OpAMD64CMOVWEQ(v *Value) bool { if v_2.Op != OpAMD64FlagLT_UGT { break } - v.reset(OpCopy) - v.Type = y.Type - v.AddArg(y) + v.copyOf(y) return true } return false @@ -6737,9 +6307,7 @@ func rewriteValueAMD64_OpAMD64CMOVWGE(v *Value) bool { } cond := v_2.Args[0] v.reset(OpAMD64CMOVWLE) - v.AddArg(x) - v.AddArg(y) - v.AddArg(cond) + v.AddArg3(x, y, cond) return true } // match: (CMOVWGE _ x (FlagEQ)) @@ -6749,9 +6317,7 @@ func rewriteValueAMD64_OpAMD64CMOVWGE(v *Value) bool { if v_2.Op != OpAMD64FlagEQ { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (CMOVWGE _ x (FlagGT_UGT)) @@ -6761,9 +6327,7 @@ func rewriteValueAMD64_OpAMD64CMOVWGE(v *Value) bool { if v_2.Op != OpAMD64FlagGT_UGT { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (CMOVWGE _ x (FlagGT_ULT)) @@ -6773,9 +6337,7 @@ func rewriteValueAMD64_OpAMD64CMOVWGE(v *Value) bool { if v_2.Op != OpAMD64FlagGT_ULT { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (CMOVWGE y _ (FlagLT_ULT)) @@ -6785,9 +6347,7 @@ func rewriteValueAMD64_OpAMD64CMOVWGE(v *Value) bool { if v_2.Op != OpAMD64FlagLT_ULT { break } - v.reset(OpCopy) - v.Type = y.Type - v.AddArg(y) + v.copyOf(y) return true } // match: (CMOVWGE y _ (FlagLT_UGT)) @@ -6797,9 +6357,7 @@ func rewriteValueAMD64_OpAMD64CMOVWGE(v *Value) bool { if v_2.Op != OpAMD64FlagLT_UGT { break } - v.reset(OpCopy) - v.Type = y.Type - v.AddArg(y) + v.copyOf(y) return true } return false @@ -6818,9 +6376,7 @@ func rewriteValueAMD64_OpAMD64CMOVWGT(v *Value) bool { } cond := v_2.Args[0] v.reset(OpAMD64CMOVWLT) - v.AddArg(x) - v.AddArg(y) - v.AddArg(cond) + v.AddArg3(x, y, cond) return true } // match: (CMOVWGT y _ (FlagEQ)) @@ -6830,9 +6386,7 @@ func rewriteValueAMD64_OpAMD64CMOVWGT(v *Value) bool { if v_2.Op != OpAMD64FlagEQ { break } - v.reset(OpCopy) - v.Type = y.Type - v.AddArg(y) + v.copyOf(y) return true } // match: (CMOVWGT _ x (FlagGT_UGT)) @@ -6842,9 +6396,7 @@ func rewriteValueAMD64_OpAMD64CMOVWGT(v *Value) bool { if v_2.Op != OpAMD64FlagGT_UGT { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (CMOVWGT _ x (FlagGT_ULT)) @@ -6854,9 +6406,7 @@ func rewriteValueAMD64_OpAMD64CMOVWGT(v *Value) bool { if v_2.Op != OpAMD64FlagGT_ULT { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (CMOVWGT y _ (FlagLT_ULT)) @@ -6866,9 +6416,7 @@ func rewriteValueAMD64_OpAMD64CMOVWGT(v *Value) bool { if v_2.Op != OpAMD64FlagLT_ULT { break } - v.reset(OpCopy) - v.Type = y.Type - v.AddArg(y) + v.copyOf(y) return true } // match: (CMOVWGT y _ (FlagLT_UGT)) @@ -6878,9 +6426,7 @@ func rewriteValueAMD64_OpAMD64CMOVWGT(v *Value) bool { if v_2.Op != OpAMD64FlagLT_UGT { break } - v.reset(OpCopy) - v.Type = y.Type - v.AddArg(y) + v.copyOf(y) return true } return false @@ -6899,9 +6445,7 @@ func rewriteValueAMD64_OpAMD64CMOVWHI(v *Value) bool { } cond := v_2.Args[0] v.reset(OpAMD64CMOVWCS) - v.AddArg(x) - v.AddArg(y) - v.AddArg(cond) + v.AddArg3(x, y, cond) return true } // match: (CMOVWHI y _ (FlagEQ)) @@ -6911,9 +6455,7 @@ func rewriteValueAMD64_OpAMD64CMOVWHI(v *Value) bool { if v_2.Op != OpAMD64FlagEQ { break } - v.reset(OpCopy) - v.Type = y.Type - v.AddArg(y) + v.copyOf(y) return true } // match: (CMOVWHI _ x (FlagGT_UGT)) @@ -6923,9 +6465,7 @@ func rewriteValueAMD64_OpAMD64CMOVWHI(v *Value) bool { if v_2.Op != OpAMD64FlagGT_UGT { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (CMOVWHI y _ (FlagGT_ULT)) @@ -6935,9 +6475,7 @@ func rewriteValueAMD64_OpAMD64CMOVWHI(v *Value) bool { if v_2.Op != OpAMD64FlagGT_ULT { break } - v.reset(OpCopy) - v.Type = y.Type - v.AddArg(y) + v.copyOf(y) return true } // match: (CMOVWHI y _ (FlagLT_ULT)) @@ -6947,9 +6485,7 @@ func rewriteValueAMD64_OpAMD64CMOVWHI(v *Value) bool { if v_2.Op != OpAMD64FlagLT_ULT { break } - v.reset(OpCopy) - v.Type = y.Type - v.AddArg(y) + v.copyOf(y) return true } // match: (CMOVWHI _ x (FlagLT_UGT)) @@ -6959,9 +6495,7 @@ func rewriteValueAMD64_OpAMD64CMOVWHI(v *Value) bool { if v_2.Op != OpAMD64FlagLT_UGT { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } return false @@ -6980,9 +6514,7 @@ func rewriteValueAMD64_OpAMD64CMOVWLE(v *Value) bool { } cond := v_2.Args[0] v.reset(OpAMD64CMOVWGE) - v.AddArg(x) - v.AddArg(y) - v.AddArg(cond) + v.AddArg3(x, y, cond) return true } // match: (CMOVWLE _ x (FlagEQ)) @@ -6992,9 +6524,7 @@ func rewriteValueAMD64_OpAMD64CMOVWLE(v *Value) bool { if v_2.Op != OpAMD64FlagEQ { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (CMOVWLE y _ (FlagGT_UGT)) @@ -7004,9 +6534,7 @@ func rewriteValueAMD64_OpAMD64CMOVWLE(v *Value) bool { if v_2.Op != OpAMD64FlagGT_UGT { break } - v.reset(OpCopy) - v.Type = y.Type - v.AddArg(y) + v.copyOf(y) return true } // match: (CMOVWLE y _ (FlagGT_ULT)) @@ -7016,9 +6544,7 @@ func rewriteValueAMD64_OpAMD64CMOVWLE(v *Value) bool { if v_2.Op != OpAMD64FlagGT_ULT { break } - v.reset(OpCopy) - v.Type = y.Type - v.AddArg(y) + v.copyOf(y) return true } // match: (CMOVWLE _ x (FlagLT_ULT)) @@ -7028,9 +6554,7 @@ func rewriteValueAMD64_OpAMD64CMOVWLE(v *Value) bool { if v_2.Op != OpAMD64FlagLT_ULT { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (CMOVWLE _ x (FlagLT_UGT)) @@ -7040,9 +6564,7 @@ func rewriteValueAMD64_OpAMD64CMOVWLE(v *Value) bool { if v_2.Op != OpAMD64FlagLT_UGT { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } return false @@ -7061,9 +6583,7 @@ func rewriteValueAMD64_OpAMD64CMOVWLS(v *Value) bool { } cond := v_2.Args[0] v.reset(OpAMD64CMOVWCC) - v.AddArg(x) - v.AddArg(y) - v.AddArg(cond) + v.AddArg3(x, y, cond) return true } // match: (CMOVWLS _ x (FlagEQ)) @@ -7073,9 +6593,7 @@ func rewriteValueAMD64_OpAMD64CMOVWLS(v *Value) bool { if v_2.Op != OpAMD64FlagEQ { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (CMOVWLS y _ (FlagGT_UGT)) @@ -7085,9 +6603,7 @@ func rewriteValueAMD64_OpAMD64CMOVWLS(v *Value) bool { if v_2.Op != OpAMD64FlagGT_UGT { break } - v.reset(OpCopy) - v.Type = y.Type - v.AddArg(y) + v.copyOf(y) return true } // match: (CMOVWLS _ x (FlagGT_ULT)) @@ -7097,9 +6613,7 @@ func rewriteValueAMD64_OpAMD64CMOVWLS(v *Value) bool { if v_2.Op != OpAMD64FlagGT_ULT { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (CMOVWLS _ x (FlagLT_ULT)) @@ -7109,9 +6623,7 @@ func rewriteValueAMD64_OpAMD64CMOVWLS(v *Value) bool { if v_2.Op != OpAMD64FlagLT_ULT { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (CMOVWLS y _ (FlagLT_UGT)) @@ -7121,9 +6633,7 @@ func rewriteValueAMD64_OpAMD64CMOVWLS(v *Value) bool { if v_2.Op != OpAMD64FlagLT_UGT { break } - v.reset(OpCopy) - v.Type = y.Type - v.AddArg(y) + v.copyOf(y) return true } return false @@ -7142,9 +6652,7 @@ func rewriteValueAMD64_OpAMD64CMOVWLT(v *Value) bool { } cond := v_2.Args[0] v.reset(OpAMD64CMOVWGT) - v.AddArg(x) - v.AddArg(y) - v.AddArg(cond) + v.AddArg3(x, y, cond) return true } // match: (CMOVWLT y _ (FlagEQ)) @@ -7154,9 +6662,7 @@ func rewriteValueAMD64_OpAMD64CMOVWLT(v *Value) bool { if v_2.Op != OpAMD64FlagEQ { break } - v.reset(OpCopy) - v.Type = y.Type - v.AddArg(y) + v.copyOf(y) return true } // match: (CMOVWLT y _ (FlagGT_UGT)) @@ -7166,9 +6672,7 @@ func rewriteValueAMD64_OpAMD64CMOVWLT(v *Value) bool { if v_2.Op != OpAMD64FlagGT_UGT { break } - v.reset(OpCopy) - v.Type = y.Type - v.AddArg(y) + v.copyOf(y) return true } // match: (CMOVWLT y _ (FlagGT_ULT)) @@ -7178,9 +6682,7 @@ func rewriteValueAMD64_OpAMD64CMOVWLT(v *Value) bool { if v_2.Op != OpAMD64FlagGT_ULT { break } - v.reset(OpCopy) - v.Type = y.Type - v.AddArg(y) + v.copyOf(y) return true } // match: (CMOVWLT _ x (FlagLT_ULT)) @@ -7190,9 +6692,7 @@ func rewriteValueAMD64_OpAMD64CMOVWLT(v *Value) bool { if v_2.Op != OpAMD64FlagLT_ULT { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (CMOVWLT _ x (FlagLT_UGT)) @@ -7202,9 +6702,7 @@ func rewriteValueAMD64_OpAMD64CMOVWLT(v *Value) bool { if v_2.Op != OpAMD64FlagLT_UGT { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } return false @@ -7223,9 +6721,7 @@ func rewriteValueAMD64_OpAMD64CMOVWNE(v *Value) bool { } cond := v_2.Args[0] v.reset(OpAMD64CMOVWNE) - v.AddArg(x) - v.AddArg(y) - v.AddArg(cond) + v.AddArg3(x, y, cond) return true } // match: (CMOVWNE y _ (FlagEQ)) @@ -7235,9 +6731,7 @@ func rewriteValueAMD64_OpAMD64CMOVWNE(v *Value) bool { if v_2.Op != OpAMD64FlagEQ { break } - v.reset(OpCopy) - v.Type = y.Type - v.AddArg(y) + v.copyOf(y) return true } // match: (CMOVWNE _ x (FlagGT_UGT)) @@ -7247,9 +6741,7 @@ func rewriteValueAMD64_OpAMD64CMOVWNE(v *Value) bool { if v_2.Op != OpAMD64FlagGT_UGT { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (CMOVWNE _ x (FlagGT_ULT)) @@ -7259,9 +6751,7 @@ func rewriteValueAMD64_OpAMD64CMOVWNE(v *Value) bool { if v_2.Op != OpAMD64FlagGT_ULT { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (CMOVWNE _ x (FlagLT_ULT)) @@ -7271,9 +6761,7 @@ func rewriteValueAMD64_OpAMD64CMOVWNE(v *Value) bool { if v_2.Op != OpAMD64FlagLT_ULT { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (CMOVWNE _ x (FlagLT_UGT)) @@ -7283,9 +6771,7 @@ func rewriteValueAMD64_OpAMD64CMOVWNE(v *Value) bool { if v_2.Op != OpAMD64FlagLT_UGT { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } return false @@ -7333,8 +6819,7 @@ func rewriteValueAMD64_OpAMD64CMPB(v *Value) bool { } v.reset(OpAMD64InvertFlags) v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags) - v0.AddArg(y) - v0.AddArg(x) + v0.AddArg2(y, x) v.AddArg(v0) return true } @@ -7357,9 +6842,7 @@ func rewriteValueAMD64_OpAMD64CMPB(v *Value) bool { v.reset(OpAMD64CMPBload) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(x) - v.AddArg(mem) + v.AddArg3(ptr, x, mem) return true } // match: (CMPB x l:(MOVBload {sym} [off] ptr mem)) @@ -7382,9 +6865,7 @@ func rewriteValueAMD64_OpAMD64CMPB(v *Value) bool { v0 := b.NewValue0(l.Pos, OpAMD64CMPBload, types.TypeFlags) v0.AuxInt = off v0.Aux = sym - v0.AddArg(ptr) - v0.AddArg(x) - v0.AddArg(mem) + v0.AddArg3(ptr, x, mem) v.AddArg(v0) return true } @@ -7492,8 +6973,7 @@ func rewriteValueAMD64_OpAMD64CMPBconst(v *Value) bool { y := v_0.Args[1] x := v_0.Args[0] v.reset(OpAMD64TESTB) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (CMPBconst (ANDLconst [c] x) [0]) @@ -7517,8 +6997,7 @@ func rewriteValueAMD64_OpAMD64CMPBconst(v *Value) bool { } x := v_0 v.reset(OpAMD64TESTB) - v.AddArg(x) - v.AddArg(x) + v.AddArg2(x, x) return true } // match: (CMPBconst l:(MOVBload {sym} [off] ptr mem) [c]) @@ -7539,12 +7018,10 @@ func rewriteValueAMD64_OpAMD64CMPBconst(v *Value) bool { } b = l.Block v0 := b.NewValue0(l.Pos, OpAMD64CMPBconstload, types.TypeFlags) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v0.AuxInt = makeValAndOff(c, off) v0.Aux = sym - v0.AddArg(ptr) - v0.AddArg(mem) + v0.AddArg2(ptr, mem) return true } return false @@ -7570,8 +7047,7 @@ func rewriteValueAMD64_OpAMD64CMPBconstload(v *Value) bool { v.reset(OpAMD64CMPBconstload) v.AuxInt = ValAndOff(valoff1).add(off2) v.Aux = sym - v.AddArg(base) - v.AddArg(mem) + v.AddArg2(base, mem) return true } // match: (CMPBconstload [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem) @@ -7593,8 +7069,7 @@ func rewriteValueAMD64_OpAMD64CMPBconstload(v *Value) bool { v.reset(OpAMD64CMPBconstload) v.AuxInt = ValAndOff(valoff1).add(off2) v.Aux = mergeSym(sym1, sym2) - v.AddArg(base) - v.AddArg(mem) + v.AddArg2(base, mem) return true } return false @@ -7622,9 +7097,7 @@ func rewriteValueAMD64_OpAMD64CMPBload(v *Value) bool { v.reset(OpAMD64CMPBload) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(base) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(base, val, mem) return true } // match: (CMPBload [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) @@ -7647,9 +7120,7 @@ func rewriteValueAMD64_OpAMD64CMPBload(v *Value) bool { v.reset(OpAMD64CMPBload) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(base) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(base, val, mem) return true } // match: (CMPBload {sym} [off] ptr (MOVLconst [c]) mem) @@ -7670,8 +7141,7 @@ func rewriteValueAMD64_OpAMD64CMPBload(v *Value) bool { v.reset(OpAMD64CMPBconstload) v.AuxInt = makeValAndOff(int64(int8(c)), off) v.Aux = sym - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } return false @@ -7719,8 +7189,7 @@ func rewriteValueAMD64_OpAMD64CMPL(v *Value) bool { } v.reset(OpAMD64InvertFlags) v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags) - v0.AddArg(y) - v0.AddArg(x) + v0.AddArg2(y, x) v.AddArg(v0) return true } @@ -7743,9 +7212,7 @@ func rewriteValueAMD64_OpAMD64CMPL(v *Value) bool { v.reset(OpAMD64CMPLload) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(x) - v.AddArg(mem) + v.AddArg3(ptr, x, mem) return true } // match: (CMPL x l:(MOVLload {sym} [off] ptr mem)) @@ -7768,9 +7235,7 @@ func rewriteValueAMD64_OpAMD64CMPL(v *Value) bool { v0 := b.NewValue0(l.Pos, OpAMD64CMPLload, types.TypeFlags) v0.AuxInt = off v0.Aux = sym - v0.AddArg(ptr) - v0.AddArg(x) - v0.AddArg(mem) + v0.AddArg3(ptr, x, mem) v.AddArg(v0) return true } @@ -7893,8 +7358,7 @@ func rewriteValueAMD64_OpAMD64CMPLconst(v *Value) bool { y := v_0.Args[1] x := v_0.Args[0] v.reset(OpAMD64TESTL) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (CMPLconst (ANDLconst [c] x) [0]) @@ -7918,8 +7382,7 @@ func rewriteValueAMD64_OpAMD64CMPLconst(v *Value) bool { } x := v_0 v.reset(OpAMD64TESTL) - v.AddArg(x) - v.AddArg(x) + v.AddArg2(x, x) return true } // match: (CMPLconst l:(MOVLload {sym} [off] ptr mem) [c]) @@ -7940,12 +7403,10 @@ func rewriteValueAMD64_OpAMD64CMPLconst(v *Value) bool { } b = l.Block v0 := b.NewValue0(l.Pos, OpAMD64CMPLconstload, types.TypeFlags) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v0.AuxInt = makeValAndOff(c, off) v0.Aux = sym - v0.AddArg(ptr) - v0.AddArg(mem) + v0.AddArg2(ptr, mem) return true } return false @@ -7971,8 +7432,7 @@ func rewriteValueAMD64_OpAMD64CMPLconstload(v *Value) bool { v.reset(OpAMD64CMPLconstload) v.AuxInt = ValAndOff(valoff1).add(off2) v.Aux = sym - v.AddArg(base) - v.AddArg(mem) + v.AddArg2(base, mem) return true } // match: (CMPLconstload [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem) @@ -7994,8 +7454,7 @@ func rewriteValueAMD64_OpAMD64CMPLconstload(v *Value) bool { v.reset(OpAMD64CMPLconstload) v.AuxInt = ValAndOff(valoff1).add(off2) v.Aux = mergeSym(sym1, sym2) - v.AddArg(base) - v.AddArg(mem) + v.AddArg2(base, mem) return true } return false @@ -8023,9 +7482,7 @@ func rewriteValueAMD64_OpAMD64CMPLload(v *Value) bool { v.reset(OpAMD64CMPLload) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(base) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(base, val, mem) return true } // match: (CMPLload [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) @@ -8048,9 +7505,7 @@ func rewriteValueAMD64_OpAMD64CMPLload(v *Value) bool { v.reset(OpAMD64CMPLload) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(base) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(base, val, mem) return true } // match: (CMPLload {sym} [off] ptr (MOVLconst [c]) mem) @@ -8071,8 +7526,7 @@ func rewriteValueAMD64_OpAMD64CMPLload(v *Value) bool { v.reset(OpAMD64CMPLconstload) v.AuxInt = makeValAndOff(c, off) v.Aux = sym - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } return false @@ -8128,11 +7582,100 @@ func rewriteValueAMD64_OpAMD64CMPQ(v *Value) bool { } v.reset(OpAMD64InvertFlags) v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) - v0.AddArg(y) - v0.AddArg(x) + v0.AddArg2(y, x) v.AddArg(v0) return true } + // match: (CMPQ (MOVQconst [x]) (MOVQconst [y])) + // cond: x==y + // result: (FlagEQ) + for { + if v_0.Op != OpAMD64MOVQconst { + break + } + x := v_0.AuxInt + if v_1.Op != OpAMD64MOVQconst { + break + } + y := v_1.AuxInt + if !(x == y) { + break + } + v.reset(OpAMD64FlagEQ) + return true + } + // match: (CMPQ (MOVQconst [x]) (MOVQconst [y])) + // cond: xuint64(y) + // result: (FlagLT_UGT) + for { + if v_0.Op != OpAMD64MOVQconst { + break + } + x := v_0.AuxInt + if v_1.Op != OpAMD64MOVQconst { + break + } + y := v_1.AuxInt + if !(x < y && uint64(x) > uint64(y)) { + break + } + v.reset(OpAMD64FlagLT_UGT) + return true + } + // match: (CMPQ (MOVQconst [x]) (MOVQconst [y])) + // cond: x>y && uint64(x) y && uint64(x) < uint64(y)) { + break + } + v.reset(OpAMD64FlagGT_ULT) + return true + } + // match: (CMPQ (MOVQconst [x]) (MOVQconst [y])) + // cond: x>y && uint64(x)>uint64(y) + // result: (FlagGT_UGT) + for { + if v_0.Op != OpAMD64MOVQconst { + break + } + x := v_0.AuxInt + if v_1.Op != OpAMD64MOVQconst { + break + } + y := v_1.AuxInt + if !(x > y && uint64(x) > uint64(y)) { + break + } + v.reset(OpAMD64FlagGT_UGT) + return true + } // match: (CMPQ l:(MOVQload {sym} [off] ptr mem) x) // cond: canMergeLoad(v, l) && clobber(l) // result: (CMPQload {sym} [off] ptr x mem) @@ -8152,9 +7695,7 @@ func rewriteValueAMD64_OpAMD64CMPQ(v *Value) bool { v.reset(OpAMD64CMPQload) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(x) - v.AddArg(mem) + v.AddArg3(ptr, x, mem) return true } // match: (CMPQ x l:(MOVQload {sym} [off] ptr mem)) @@ -8177,9 +7718,7 @@ func rewriteValueAMD64_OpAMD64CMPQ(v *Value) bool { v0 := b.NewValue0(l.Pos, OpAMD64CMPQload, types.TypeFlags) v0.AuxInt = off v0.Aux = sym - v0.AddArg(ptr) - v0.AddArg(x) - v0.AddArg(mem) + v0.AddArg3(ptr, x, mem) v.AddArg(v0) return true } @@ -8384,8 +7923,7 @@ func rewriteValueAMD64_OpAMD64CMPQconst(v *Value) bool { y := v_0.Args[1] x := v_0.Args[0] v.reset(OpAMD64TESTQ) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (CMPQconst (ANDQconst [c] x) [0]) @@ -8409,8 +7947,7 @@ func rewriteValueAMD64_OpAMD64CMPQconst(v *Value) bool { } x := v_0 v.reset(OpAMD64TESTQ) - v.AddArg(x) - v.AddArg(x) + v.AddArg2(x, x) return true } // match: (CMPQconst l:(MOVQload {sym} [off] ptr mem) [c]) @@ -8431,12 +7968,10 @@ func rewriteValueAMD64_OpAMD64CMPQconst(v *Value) bool { } b = l.Block v0 := b.NewValue0(l.Pos, OpAMD64CMPQconstload, types.TypeFlags) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v0.AuxInt = makeValAndOff(c, off) v0.Aux = sym - v0.AddArg(ptr) - v0.AddArg(mem) + v0.AddArg2(ptr, mem) return true } return false @@ -8462,8 +7997,7 @@ func rewriteValueAMD64_OpAMD64CMPQconstload(v *Value) bool { v.reset(OpAMD64CMPQconstload) v.AuxInt = ValAndOff(valoff1).add(off2) v.Aux = sym - v.AddArg(base) - v.AddArg(mem) + v.AddArg2(base, mem) return true } // match: (CMPQconstload [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem) @@ -8485,8 +8019,7 @@ func rewriteValueAMD64_OpAMD64CMPQconstload(v *Value) bool { v.reset(OpAMD64CMPQconstload) v.AuxInt = ValAndOff(valoff1).add(off2) v.Aux = mergeSym(sym1, sym2) - v.AddArg(base) - v.AddArg(mem) + v.AddArg2(base, mem) return true } return false @@ -8514,9 +8047,7 @@ func rewriteValueAMD64_OpAMD64CMPQload(v *Value) bool { v.reset(OpAMD64CMPQload) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(base) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(base, val, mem) return true } // match: (CMPQload [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) @@ -8539,9 +8070,7 @@ func rewriteValueAMD64_OpAMD64CMPQload(v *Value) bool { v.reset(OpAMD64CMPQload) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(base) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(base, val, mem) return true } // match: (CMPQload {sym} [off] ptr (MOVQconst [c]) mem) @@ -8562,8 +8091,7 @@ func rewriteValueAMD64_OpAMD64CMPQload(v *Value) bool { v.reset(OpAMD64CMPQconstload) v.AuxInt = makeValAndOff(c, off) v.Aux = sym - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } return false @@ -8611,8 +8139,7 @@ func rewriteValueAMD64_OpAMD64CMPW(v *Value) bool { } v.reset(OpAMD64InvertFlags) v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags) - v0.AddArg(y) - v0.AddArg(x) + v0.AddArg2(y, x) v.AddArg(v0) return true } @@ -8635,9 +8162,7 @@ func rewriteValueAMD64_OpAMD64CMPW(v *Value) bool { v.reset(OpAMD64CMPWload) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(x) - v.AddArg(mem) + v.AddArg3(ptr, x, mem) return true } // match: (CMPW x l:(MOVWload {sym} [off] ptr mem)) @@ -8660,9 +8185,7 @@ func rewriteValueAMD64_OpAMD64CMPW(v *Value) bool { v0 := b.NewValue0(l.Pos, OpAMD64CMPWload, types.TypeFlags) v0.AuxInt = off v0.Aux = sym - v0.AddArg(ptr) - v0.AddArg(x) - v0.AddArg(mem) + v0.AddArg3(ptr, x, mem) v.AddArg(v0) return true } @@ -8770,8 +8293,7 @@ func rewriteValueAMD64_OpAMD64CMPWconst(v *Value) bool { y := v_0.Args[1] x := v_0.Args[0] v.reset(OpAMD64TESTW) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (CMPWconst (ANDLconst [c] x) [0]) @@ -8795,8 +8317,7 @@ func rewriteValueAMD64_OpAMD64CMPWconst(v *Value) bool { } x := v_0 v.reset(OpAMD64TESTW) - v.AddArg(x) - v.AddArg(x) + v.AddArg2(x, x) return true } // match: (CMPWconst l:(MOVWload {sym} [off] ptr mem) [c]) @@ -8817,12 +8338,10 @@ func rewriteValueAMD64_OpAMD64CMPWconst(v *Value) bool { } b = l.Block v0 := b.NewValue0(l.Pos, OpAMD64CMPWconstload, types.TypeFlags) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v0.AuxInt = makeValAndOff(c, off) v0.Aux = sym - v0.AddArg(ptr) - v0.AddArg(mem) + v0.AddArg2(ptr, mem) return true } return false @@ -8848,8 +8367,7 @@ func rewriteValueAMD64_OpAMD64CMPWconstload(v *Value) bool { v.reset(OpAMD64CMPWconstload) v.AuxInt = ValAndOff(valoff1).add(off2) v.Aux = sym - v.AddArg(base) - v.AddArg(mem) + v.AddArg2(base, mem) return true } // match: (CMPWconstload [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem) @@ -8871,8 +8389,7 @@ func rewriteValueAMD64_OpAMD64CMPWconstload(v *Value) bool { v.reset(OpAMD64CMPWconstload) v.AuxInt = ValAndOff(valoff1).add(off2) v.Aux = mergeSym(sym1, sym2) - v.AddArg(base) - v.AddArg(mem) + v.AddArg2(base, mem) return true } return false @@ -8900,9 +8417,7 @@ func rewriteValueAMD64_OpAMD64CMPWload(v *Value) bool { v.reset(OpAMD64CMPWload) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(base) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(base, val, mem) return true } // match: (CMPWload [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) @@ -8925,9 +8440,7 @@ func rewriteValueAMD64_OpAMD64CMPWload(v *Value) bool { v.reset(OpAMD64CMPWload) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(base) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(base, val, mem) return true } // match: (CMPWload {sym} [off] ptr (MOVLconst [c]) mem) @@ -8948,8 +8461,7 @@ func rewriteValueAMD64_OpAMD64CMPWload(v *Value) bool { v.reset(OpAMD64CMPWconstload) v.AuxInt = makeValAndOff(int64(int16(c)), off) v.Aux = sym - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } return false @@ -8979,10 +8491,7 @@ func rewriteValueAMD64_OpAMD64CMPXCHGLlock(v *Value) bool { v.reset(OpAMD64CMPXCHGLlock) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(ptr) - v.AddArg(old) - v.AddArg(new_) - v.AddArg(mem) + v.AddArg4(ptr, old, new_, mem) return true } return false @@ -9012,10 +8521,7 @@ func rewriteValueAMD64_OpAMD64CMPXCHGQlock(v *Value) bool { v.reset(OpAMD64CMPXCHGQlock) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(ptr) - v.AddArg(old) - v.AddArg(new_) - v.AddArg(mem) + v.AddArg4(ptr, old, new_, mem) return true } return false @@ -9042,9 +8548,7 @@ func rewriteValueAMD64_OpAMD64DIVSD(v *Value) bool { v.reset(OpAMD64DIVSDload) v.AuxInt = off v.Aux = sym - v.AddArg(x) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg3(x, ptr, mem) return true } return false @@ -9072,9 +8576,7 @@ func rewriteValueAMD64_OpAMD64DIVSDload(v *Value) bool { v.reset(OpAMD64DIVSDload) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(val) - v.AddArg(base) - v.AddArg(mem) + v.AddArg3(val, base, mem) return true } // match: (DIVSDload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem) @@ -9097,9 +8599,7 @@ func rewriteValueAMD64_OpAMD64DIVSDload(v *Value) bool { v.reset(OpAMD64DIVSDload) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(val) - v.AddArg(base) - v.AddArg(mem) + v.AddArg3(val, base, mem) return true } return false @@ -9126,9 +8626,7 @@ func rewriteValueAMD64_OpAMD64DIVSS(v *Value) bool { v.reset(OpAMD64DIVSSload) v.AuxInt = off v.Aux = sym - v.AddArg(x) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg3(x, ptr, mem) return true } return false @@ -9156,9 +8654,7 @@ func rewriteValueAMD64_OpAMD64DIVSSload(v *Value) bool { v.reset(OpAMD64DIVSSload) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(val) - v.AddArg(base) - v.AddArg(mem) + v.AddArg3(val, base, mem) return true } // match: (DIVSSload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem) @@ -9181,9 +8677,7 @@ func rewriteValueAMD64_OpAMD64DIVSSload(v *Value) bool { v.reset(OpAMD64DIVSSload) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(val) - v.AddArg(base) - v.AddArg(mem) + v.AddArg3(val, base, mem) return true } return false @@ -9201,8 +8695,7 @@ func rewriteValueAMD64_OpAMD64HMULL(v *Value) bool { break } v.reset(OpAMD64HMULL) - v.AddArg(y) - v.AddArg(x) + v.AddArg2(y, x) return true } return false @@ -9220,8 +8713,7 @@ func rewriteValueAMD64_OpAMD64HMULLU(v *Value) bool { break } v.reset(OpAMD64HMULLU) - v.AddArg(y) - v.AddArg(x) + v.AddArg2(y, x) return true } return false @@ -9239,8 +8731,7 @@ func rewriteValueAMD64_OpAMD64HMULQ(v *Value) bool { break } v.reset(OpAMD64HMULQ) - v.AddArg(y) - v.AddArg(x) + v.AddArg2(y, x) return true } return false @@ -9258,8 +8749,7 @@ func rewriteValueAMD64_OpAMD64HMULQU(v *Value) bool { break } v.reset(OpAMD64HMULQU) - v.AddArg(y) - v.AddArg(x) + v.AddArg2(y, x) return true } return false @@ -9307,8 +8797,7 @@ func rewriteValueAMD64_OpAMD64LEAL(v *Value) bool { v.reset(OpAMD64LEAL1) v.AuxInt = c v.Aux = s - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } break @@ -9337,8 +8826,7 @@ func rewriteValueAMD64_OpAMD64LEAL1(v *Value) bool { v.reset(OpAMD64LEAL1) v.AuxInt = c + d v.Aux = s - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } break @@ -9357,8 +8845,7 @@ func rewriteValueAMD64_OpAMD64LEAL1(v *Value) bool { v.reset(OpAMD64LEAL2) v.AuxInt = c v.Aux = s - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } break @@ -9377,8 +8864,7 @@ func rewriteValueAMD64_OpAMD64LEAL1(v *Value) bool { v.reset(OpAMD64LEAL4) v.AuxInt = c v.Aux = s - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } break @@ -9397,8 +8883,7 @@ func rewriteValueAMD64_OpAMD64LEAL1(v *Value) bool { v.reset(OpAMD64LEAL8) v.AuxInt = c v.Aux = s - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } break @@ -9426,8 +8911,7 @@ func rewriteValueAMD64_OpAMD64LEAL2(v *Value) bool { v.reset(OpAMD64LEAL2) v.AuxInt = c + d v.Aux = s - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (LEAL2 [c] {s} x (ADDLconst [d] y)) @@ -9448,8 +8932,7 @@ func rewriteValueAMD64_OpAMD64LEAL2(v *Value) bool { v.reset(OpAMD64LEAL2) v.AuxInt = c + 2*d v.Aux = s - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (LEAL2 [c] {s} x (SHLLconst [1] y)) @@ -9465,8 +8948,7 @@ func rewriteValueAMD64_OpAMD64LEAL2(v *Value) bool { v.reset(OpAMD64LEAL4) v.AuxInt = c v.Aux = s - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (LEAL2 [c] {s} x (SHLLconst [2] y)) @@ -9482,8 +8964,7 @@ func rewriteValueAMD64_OpAMD64LEAL2(v *Value) bool { v.reset(OpAMD64LEAL8) v.AuxInt = c v.Aux = s - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } return false @@ -9509,8 +8990,7 @@ func rewriteValueAMD64_OpAMD64LEAL4(v *Value) bool { v.reset(OpAMD64LEAL4) v.AuxInt = c + d v.Aux = s - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (LEAL4 [c] {s} x (ADDLconst [d] y)) @@ -9531,8 +9011,7 @@ func rewriteValueAMD64_OpAMD64LEAL4(v *Value) bool { v.reset(OpAMD64LEAL4) v.AuxInt = c + 4*d v.Aux = s - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (LEAL4 [c] {s} x (SHLLconst [1] y)) @@ -9548,8 +9027,7 @@ func rewriteValueAMD64_OpAMD64LEAL4(v *Value) bool { v.reset(OpAMD64LEAL8) v.AuxInt = c v.Aux = s - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } return false @@ -9575,8 +9053,7 @@ func rewriteValueAMD64_OpAMD64LEAL8(v *Value) bool { v.reset(OpAMD64LEAL8) v.AuxInt = c + d v.Aux = s - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (LEAL8 [c] {s} x (ADDLconst [d] y)) @@ -9597,8 +9074,7 @@ func rewriteValueAMD64_OpAMD64LEAL8(v *Value) bool { v.reset(OpAMD64LEAL8) v.AuxInt = c + 8*d v.Aux = s - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } return false @@ -9646,8 +9122,7 @@ func rewriteValueAMD64_OpAMD64LEAQ(v *Value) bool { v.reset(OpAMD64LEAQ1) v.AuxInt = c v.Aux = s - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } break @@ -9692,8 +9167,7 @@ func rewriteValueAMD64_OpAMD64LEAQ(v *Value) bool { v.reset(OpAMD64LEAQ1) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (LEAQ [off1] {sym1} (LEAQ2 [off2] {sym2} x y)) @@ -9715,8 +9189,7 @@ func rewriteValueAMD64_OpAMD64LEAQ(v *Value) bool { v.reset(OpAMD64LEAQ2) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (LEAQ [off1] {sym1} (LEAQ4 [off2] {sym2} x y)) @@ -9738,8 +9211,7 @@ func rewriteValueAMD64_OpAMD64LEAQ(v *Value) bool { v.reset(OpAMD64LEAQ4) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (LEAQ [off1] {sym1} (LEAQ8 [off2] {sym2} x y)) @@ -9761,8 +9233,7 @@ func rewriteValueAMD64_OpAMD64LEAQ(v *Value) bool { v.reset(OpAMD64LEAQ8) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } return false @@ -9789,8 +9260,7 @@ func rewriteValueAMD64_OpAMD64LEAQ1(v *Value) bool { v.reset(OpAMD64LEAQ1) v.AuxInt = c + d v.Aux = s - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } break @@ -9809,8 +9279,7 @@ func rewriteValueAMD64_OpAMD64LEAQ1(v *Value) bool { v.reset(OpAMD64LEAQ2) v.AuxInt = c v.Aux = s - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } break @@ -9829,8 +9298,7 @@ func rewriteValueAMD64_OpAMD64LEAQ1(v *Value) bool { v.reset(OpAMD64LEAQ4) v.AuxInt = c v.Aux = s - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } break @@ -9849,8 +9317,7 @@ func rewriteValueAMD64_OpAMD64LEAQ1(v *Value) bool { v.reset(OpAMD64LEAQ8) v.AuxInt = c v.Aux = s - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } break @@ -9875,8 +9342,7 @@ func rewriteValueAMD64_OpAMD64LEAQ1(v *Value) bool { v.reset(OpAMD64LEAQ1) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } break @@ -9894,8 +9360,7 @@ func rewriteValueAMD64_OpAMD64LEAQ1(v *Value) bool { break } v.reset(OpAMD64ADDQ) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } return false @@ -9921,8 +9386,7 @@ func rewriteValueAMD64_OpAMD64LEAQ2(v *Value) bool { v.reset(OpAMD64LEAQ2) v.AuxInt = c + d v.Aux = s - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (LEAQ2 [c] {s} x (ADDQconst [d] y)) @@ -9943,8 +9407,7 @@ func rewriteValueAMD64_OpAMD64LEAQ2(v *Value) bool { v.reset(OpAMD64LEAQ2) v.AuxInt = c + 2*d v.Aux = s - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (LEAQ2 [c] {s} x (SHLQconst [1] y)) @@ -9960,8 +9423,7 @@ func rewriteValueAMD64_OpAMD64LEAQ2(v *Value) bool { v.reset(OpAMD64LEAQ4) v.AuxInt = c v.Aux = s - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (LEAQ2 [c] {s} x (SHLQconst [2] y)) @@ -9977,8 +9439,7 @@ func rewriteValueAMD64_OpAMD64LEAQ2(v *Value) bool { v.reset(OpAMD64LEAQ8) v.AuxInt = c v.Aux = s - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (LEAQ2 [off1] {sym1} (LEAQ [off2] {sym2} x) y) @@ -10000,8 +9461,7 @@ func rewriteValueAMD64_OpAMD64LEAQ2(v *Value) bool { v.reset(OpAMD64LEAQ2) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } return false @@ -10027,8 +9487,7 @@ func rewriteValueAMD64_OpAMD64LEAQ4(v *Value) bool { v.reset(OpAMD64LEAQ4) v.AuxInt = c + d v.Aux = s - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (LEAQ4 [c] {s} x (ADDQconst [d] y)) @@ -10049,8 +9508,7 @@ func rewriteValueAMD64_OpAMD64LEAQ4(v *Value) bool { v.reset(OpAMD64LEAQ4) v.AuxInt = c + 4*d v.Aux = s - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (LEAQ4 [c] {s} x (SHLQconst [1] y)) @@ -10066,8 +9524,7 @@ func rewriteValueAMD64_OpAMD64LEAQ4(v *Value) bool { v.reset(OpAMD64LEAQ8) v.AuxInt = c v.Aux = s - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (LEAQ4 [off1] {sym1} (LEAQ [off2] {sym2} x) y) @@ -10089,8 +9546,7 @@ func rewriteValueAMD64_OpAMD64LEAQ4(v *Value) bool { v.reset(OpAMD64LEAQ4) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } return false @@ -10116,8 +9572,7 @@ func rewriteValueAMD64_OpAMD64LEAQ8(v *Value) bool { v.reset(OpAMD64LEAQ8) v.AuxInt = c + d v.Aux = s - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (LEAQ8 [c] {s} x (ADDQconst [d] y)) @@ -10138,8 +9593,7 @@ func rewriteValueAMD64_OpAMD64LEAQ8(v *Value) bool { v.reset(OpAMD64LEAQ8) v.AuxInt = c + 8*d v.Aux = s - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (LEAQ8 [off1] {sym1} (LEAQ [off2] {sym2} x) y) @@ -10161,8 +9615,7 @@ func rewriteValueAMD64_OpAMD64LEAQ8(v *Value) bool { v.reset(OpAMD64LEAQ8) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } return false @@ -10187,12 +9640,10 @@ func rewriteValueAMD64_OpAMD64MOVBQSX(v *Value) bool { } b = x.Block v0 := b.NewValue0(x.Pos, OpAMD64MOVBQSXload, v.Type) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v0.AuxInt = off v0.Aux = sym - v0.AddArg(ptr) - v0.AddArg(mem) + v0.AddArg2(ptr, mem) return true } // match: (MOVBQSX x:(MOVWload [off] {sym} ptr mem)) @@ -10212,12 +9663,10 @@ func rewriteValueAMD64_OpAMD64MOVBQSX(v *Value) bool { } b = x.Block v0 := b.NewValue0(x.Pos, OpAMD64MOVBQSXload, v.Type) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v0.AuxInt = off v0.Aux = sym - v0.AddArg(ptr) - v0.AddArg(mem) + v0.AddArg2(ptr, mem) return true } // match: (MOVBQSX x:(MOVLload [off] {sym} ptr mem)) @@ -10237,12 +9686,10 @@ func rewriteValueAMD64_OpAMD64MOVBQSX(v *Value) bool { } b = x.Block v0 := b.NewValue0(x.Pos, OpAMD64MOVBQSXload, v.Type) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v0.AuxInt = off v0.Aux = sym - v0.AddArg(ptr) - v0.AddArg(mem) + v0.AddArg2(ptr, mem) return true } // match: (MOVBQSX x:(MOVQload [off] {sym} ptr mem)) @@ -10262,12 +9709,10 @@ func rewriteValueAMD64_OpAMD64MOVBQSX(v *Value) bool { } b = x.Block v0 := b.NewValue0(x.Pos, OpAMD64MOVBQSXload, v.Type) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v0.AuxInt = off v0.Aux = sym - v0.AddArg(ptr) - v0.AddArg(mem) + v0.AddArg2(ptr, mem) return true } // match: (MOVBQSX (ANDLconst [c] x)) @@ -10315,9 +9760,8 @@ func rewriteValueAMD64_OpAMD64MOVBQSXload(v *Value) bool { } off2 := v_1.AuxInt sym2 := v_1.Aux - _ = v_1.Args[2] - ptr2 := v_1.Args[0] x := v_1.Args[1] + ptr2 := v_1.Args[0] if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) { break } @@ -10344,8 +9788,7 @@ func rewriteValueAMD64_OpAMD64MOVBQSXload(v *Value) bool { v.reset(OpAMD64MOVBQSXload) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(base) - v.AddArg(mem) + v.AddArg2(base, mem) return true } return false @@ -10370,12 +9813,10 @@ func rewriteValueAMD64_OpAMD64MOVBQZX(v *Value) bool { } b = x.Block v0 := b.NewValue0(x.Pos, OpAMD64MOVBload, v.Type) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v0.AuxInt = off v0.Aux = sym - v0.AddArg(ptr) - v0.AddArg(mem) + v0.AddArg2(ptr, mem) return true } // match: (MOVBQZX x:(MOVWload [off] {sym} ptr mem)) @@ -10395,12 +9836,10 @@ func rewriteValueAMD64_OpAMD64MOVBQZX(v *Value) bool { } b = x.Block v0 := b.NewValue0(x.Pos, OpAMD64MOVBload, v.Type) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v0.AuxInt = off v0.Aux = sym - v0.AddArg(ptr) - v0.AddArg(mem) + v0.AddArg2(ptr, mem) return true } // match: (MOVBQZX x:(MOVLload [off] {sym} ptr mem)) @@ -10420,12 +9859,10 @@ func rewriteValueAMD64_OpAMD64MOVBQZX(v *Value) bool { } b = x.Block v0 := b.NewValue0(x.Pos, OpAMD64MOVBload, v.Type) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v0.AuxInt = off v0.Aux = sym - v0.AddArg(ptr) - v0.AddArg(mem) + v0.AddArg2(ptr, mem) return true } // match: (MOVBQZX x:(MOVQload [off] {sym} ptr mem)) @@ -10445,12 +9882,10 @@ func rewriteValueAMD64_OpAMD64MOVBQZX(v *Value) bool { } b = x.Block v0 := b.NewValue0(x.Pos, OpAMD64MOVBload, v.Type) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v0.AuxInt = off v0.Aux = sym - v0.AddArg(ptr) - v0.AddArg(mem) + v0.AddArg2(ptr, mem) return true } // match: (MOVBQZX x) @@ -10461,9 +9896,7 @@ func rewriteValueAMD64_OpAMD64MOVBQZX(v *Value) bool { if !(zeroUpper56Bits(x, 3)) { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (MOVBQZX x:(MOVBloadidx1 [off] {sym} ptr idx mem)) @@ -10484,13 +9917,10 @@ func rewriteValueAMD64_OpAMD64MOVBQZX(v *Value) bool { } b = x.Block v0 := b.NewValue0(v.Pos, OpAMD64MOVBloadidx1, v.Type) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v0.AuxInt = off v0.Aux = sym - v0.AddArg(ptr) - v0.AddArg(idx) - v0.AddArg(mem) + v0.AddArg3(ptr, idx, mem) return true } // match: (MOVBQZX (ANDLconst [c] x)) @@ -10540,8 +9970,7 @@ func rewriteValueAMD64_OpAMD64MOVBatomicload(v *Value) bool { v.reset(OpAMD64MOVBatomicload) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVBatomicload [off1] {sym1} (LEAQ [off2] {sym2} ptr) mem) @@ -10563,8 +9992,7 @@ func rewriteValueAMD64_OpAMD64MOVBatomicload(v *Value) bool { v.reset(OpAMD64MOVBatomicload) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } return false @@ -10584,9 +10012,8 @@ func rewriteValueAMD64_OpAMD64MOVBload(v *Value) bool { } off2 := v_1.AuxInt sym2 := v_1.Aux - _ = v_1.Args[2] - ptr2 := v_1.Args[0] x := v_1.Args[1] + ptr2 := v_1.Args[0] if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) { break } @@ -10612,8 +10039,7 @@ func rewriteValueAMD64_OpAMD64MOVBload(v *Value) bool { v.reset(OpAMD64MOVBload) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVBload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) @@ -10635,8 +10061,7 @@ func rewriteValueAMD64_OpAMD64MOVBload(v *Value) bool { v.reset(OpAMD64MOVBload) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(base) - v.AddArg(mem) + v.AddArg2(base, mem) return true } // match: (MOVBload [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) mem) @@ -10659,9 +10084,7 @@ func rewriteValueAMD64_OpAMD64MOVBload(v *Value) bool { v.reset(OpAMD64MOVBloadidx1) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } // match: (MOVBload [off] {sym} (ADDQ ptr idx) mem) @@ -10686,9 +10109,7 @@ func rewriteValueAMD64_OpAMD64MOVBload(v *Value) bool { v.reset(OpAMD64MOVBloadidx1) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } break @@ -10712,8 +10133,7 @@ func rewriteValueAMD64_OpAMD64MOVBload(v *Value) bool { v.reset(OpAMD64MOVBload) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(base) - v.AddArg(mem) + v.AddArg2(base, mem) return true } // match: (MOVBload [off1] {sym} (ADDLconst [off2] ptr) mem) @@ -10734,8 +10154,7 @@ func rewriteValueAMD64_OpAMD64MOVBload(v *Value) bool { v.reset(OpAMD64MOVBload) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVBload [off] {sym} (SB) _) @@ -10777,9 +10196,7 @@ func rewriteValueAMD64_OpAMD64MOVBloadidx1(v *Value) bool { v.reset(OpAMD64MOVBloadidx1) v.AuxInt = c + d v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } break @@ -10804,9 +10221,7 @@ func rewriteValueAMD64_OpAMD64MOVBloadidx1(v *Value) bool { v.reset(OpAMD64MOVBloadidx1) v.AuxInt = c + d v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } break @@ -10830,8 +10245,7 @@ func rewriteValueAMD64_OpAMD64MOVBloadidx1(v *Value) bool { v.reset(OpAMD64MOVBload) v.AuxInt = i + c v.Aux = s - v.AddArg(p) - v.AddArg(mem) + v.AddArg2(p, mem) return true } break @@ -10863,9 +10277,7 @@ func rewriteValueAMD64_OpAMD64MOVBstore(v *Value) bool { v.reset(OpAMD64SETLstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(x) - v.AddArg(mem) + v.AddArg3(ptr, x, mem) return true } // match: (MOVBstore [off] {sym} ptr y:(SETLE x) mem) @@ -10887,9 +10299,7 @@ func rewriteValueAMD64_OpAMD64MOVBstore(v *Value) bool { v.reset(OpAMD64SETLEstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(x) - v.AddArg(mem) + v.AddArg3(ptr, x, mem) return true } // match: (MOVBstore [off] {sym} ptr y:(SETG x) mem) @@ -10911,9 +10321,7 @@ func rewriteValueAMD64_OpAMD64MOVBstore(v *Value) bool { v.reset(OpAMD64SETGstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(x) - v.AddArg(mem) + v.AddArg3(ptr, x, mem) return true } // match: (MOVBstore [off] {sym} ptr y:(SETGE x) mem) @@ -10935,9 +10343,7 @@ func rewriteValueAMD64_OpAMD64MOVBstore(v *Value) bool { v.reset(OpAMD64SETGEstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(x) - v.AddArg(mem) + v.AddArg3(ptr, x, mem) return true } // match: (MOVBstore [off] {sym} ptr y:(SETEQ x) mem) @@ -10959,9 +10365,7 @@ func rewriteValueAMD64_OpAMD64MOVBstore(v *Value) bool { v.reset(OpAMD64SETEQstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(x) - v.AddArg(mem) + v.AddArg3(ptr, x, mem) return true } // match: (MOVBstore [off] {sym} ptr y:(SETNE x) mem) @@ -10983,9 +10387,7 @@ func rewriteValueAMD64_OpAMD64MOVBstore(v *Value) bool { v.reset(OpAMD64SETNEstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(x) - v.AddArg(mem) + v.AddArg3(ptr, x, mem) return true } // match: (MOVBstore [off] {sym} ptr y:(SETB x) mem) @@ -11007,9 +10409,7 @@ func rewriteValueAMD64_OpAMD64MOVBstore(v *Value) bool { v.reset(OpAMD64SETBstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(x) - v.AddArg(mem) + v.AddArg3(ptr, x, mem) return true } // match: (MOVBstore [off] {sym} ptr y:(SETBE x) mem) @@ -11031,9 +10431,7 @@ func rewriteValueAMD64_OpAMD64MOVBstore(v *Value) bool { v.reset(OpAMD64SETBEstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(x) - v.AddArg(mem) + v.AddArg3(ptr, x, mem) return true } // match: (MOVBstore [off] {sym} ptr y:(SETA x) mem) @@ -11055,9 +10453,7 @@ func rewriteValueAMD64_OpAMD64MOVBstore(v *Value) bool { v.reset(OpAMD64SETAstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(x) - v.AddArg(mem) + v.AddArg3(ptr, x, mem) return true } // match: (MOVBstore [off] {sym} ptr y:(SETAE x) mem) @@ -11079,9 +10475,7 @@ func rewriteValueAMD64_OpAMD64MOVBstore(v *Value) bool { v.reset(OpAMD64SETAEstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(x) - v.AddArg(mem) + v.AddArg3(ptr, x, mem) return true } // match: (MOVBstore [off] {sym} ptr (MOVBQSX x) mem) @@ -11098,9 +10492,7 @@ func rewriteValueAMD64_OpAMD64MOVBstore(v *Value) bool { v.reset(OpAMD64MOVBstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(x) - v.AddArg(mem) + v.AddArg3(ptr, x, mem) return true } // match: (MOVBstore [off] {sym} ptr (MOVBQZX x) mem) @@ -11117,9 +10509,7 @@ func rewriteValueAMD64_OpAMD64MOVBstore(v *Value) bool { v.reset(OpAMD64MOVBstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(x) - v.AddArg(mem) + v.AddArg3(ptr, x, mem) return true } // match: (MOVBstore [off1] {sym} (ADDQconst [off2] ptr) val mem) @@ -11141,9 +10531,7 @@ func rewriteValueAMD64_OpAMD64MOVBstore(v *Value) bool { v.reset(OpAMD64MOVBstore) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } // match: (MOVBstore [off] {sym} ptr (MOVLconst [c]) mem) @@ -11164,8 +10552,7 @@ func rewriteValueAMD64_OpAMD64MOVBstore(v *Value) bool { v.reset(OpAMD64MOVBstoreconst) v.AuxInt = makeValAndOff(int64(int8(c)), off) v.Aux = sym - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVBstore [off] {sym} ptr (MOVQconst [c]) mem) @@ -11186,8 +10573,7 @@ func rewriteValueAMD64_OpAMD64MOVBstore(v *Value) bool { v.reset(OpAMD64MOVBstoreconst) v.AuxInt = makeValAndOff(int64(int8(c)), off) v.Aux = sym - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVBstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) @@ -11210,9 +10596,7 @@ func rewriteValueAMD64_OpAMD64MOVBstore(v *Value) bool { v.reset(OpAMD64MOVBstore) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(base) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(base, val, mem) return true } // match: (MOVBstore [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) val mem) @@ -11236,10 +10620,7 @@ func rewriteValueAMD64_OpAMD64MOVBstore(v *Value) bool { v.reset(OpAMD64MOVBstoreidx1) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) + v.AddArg4(ptr, idx, val, mem) return true } // match: (MOVBstore [off] {sym} (ADDQ ptr idx) val mem) @@ -11265,10 +10646,7 @@ func rewriteValueAMD64_OpAMD64MOVBstore(v *Value) bool { v.reset(OpAMD64MOVBstoreidx1) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) + v.AddArg4(ptr, idx, val, mem) return true } break @@ -11296,12 +10674,10 @@ func rewriteValueAMD64_OpAMD64MOVBstore(v *Value) bool { v.reset(OpAMD64MOVWstore) v.AuxInt = i - 1 v.Aux = s - v.AddArg(p) v0 := b.NewValue0(x0.Pos, OpAMD64ROLWconst, w.Type) v0.AuxInt = 8 v0.AddArg(w) - v.AddArg(v0) - v.AddArg(mem) + v.AddArg3(p, v0, mem) return true } // match: (MOVBstore [i] {s} p w x2:(MOVBstore [i-1] {s} p (SHRLconst [8] w) x1:(MOVBstore [i-2] {s} p (SHRLconst [16] w) x0:(MOVBstore [i-3] {s} p (SHRLconst [24] w) mem)))) @@ -11351,11 +10727,9 @@ func rewriteValueAMD64_OpAMD64MOVBstore(v *Value) bool { v.reset(OpAMD64MOVLstore) v.AuxInt = i - 3 v.Aux = s - v.AddArg(p) v0 := b.NewValue0(x0.Pos, OpAMD64BSWAPL, w.Type) v0.AddArg(w) - v.AddArg(v0) - v.AddArg(mem) + v.AddArg3(p, v0, mem) return true } // match: (MOVBstore [i] {s} p w x6:(MOVBstore [i-1] {s} p (SHRQconst [8] w) x5:(MOVBstore [i-2] {s} p (SHRQconst [16] w) x4:(MOVBstore [i-3] {s} p (SHRQconst [24] w) x3:(MOVBstore [i-4] {s} p (SHRQconst [32] w) x2:(MOVBstore [i-5] {s} p (SHRQconst [40] w) x1:(MOVBstore [i-6] {s} p (SHRQconst [48] w) x0:(MOVBstore [i-7] {s} p (SHRQconst [56] w) mem)))))))) @@ -11453,11 +10827,9 @@ func rewriteValueAMD64_OpAMD64MOVBstore(v *Value) bool { v.reset(OpAMD64MOVQstore) v.AuxInt = i - 7 v.Aux = s - v.AddArg(p) v0 := b.NewValue0(x0.Pos, OpAMD64BSWAPQ, w.Type) v0.AddArg(w) - v.AddArg(v0) - v.AddArg(mem) + v.AddArg3(p, v0, mem) return true } // match: (MOVBstore [i] {s} p (SHRWconst [8] w) x:(MOVBstore [i-1] {s} p w mem)) @@ -11482,9 +10854,7 @@ func rewriteValueAMD64_OpAMD64MOVBstore(v *Value) bool { v.reset(OpAMD64MOVWstore) v.AuxInt = i - 1 v.Aux = s - v.AddArg(p) - v.AddArg(w) - v.AddArg(mem) + v.AddArg3(p, w, mem) return true } // match: (MOVBstore [i] {s} p (SHRLconst [8] w) x:(MOVBstore [i-1] {s} p w mem)) @@ -11509,9 +10879,7 @@ func rewriteValueAMD64_OpAMD64MOVBstore(v *Value) bool { v.reset(OpAMD64MOVWstore) v.AuxInt = i - 1 v.Aux = s - v.AddArg(p) - v.AddArg(w) - v.AddArg(mem) + v.AddArg3(p, w, mem) return true } // match: (MOVBstore [i] {s} p (SHRQconst [8] w) x:(MOVBstore [i-1] {s} p w mem)) @@ -11536,9 +10904,7 @@ func rewriteValueAMD64_OpAMD64MOVBstore(v *Value) bool { v.reset(OpAMD64MOVWstore) v.AuxInt = i - 1 v.Aux = s - v.AddArg(p) - v.AddArg(w) - v.AddArg(mem) + v.AddArg3(p, w, mem) return true } // match: (MOVBstore [i] {s} p w x:(MOVBstore [i+1] {s} p (SHRWconst [8] w) mem)) @@ -11564,9 +10930,7 @@ func rewriteValueAMD64_OpAMD64MOVBstore(v *Value) bool { v.reset(OpAMD64MOVWstore) v.AuxInt = i v.Aux = s - v.AddArg(p) - v.AddArg(w) - v.AddArg(mem) + v.AddArg3(p, w, mem) return true } // match: (MOVBstore [i] {s} p w x:(MOVBstore [i+1] {s} p (SHRLconst [8] w) mem)) @@ -11592,9 +10956,7 @@ func rewriteValueAMD64_OpAMD64MOVBstore(v *Value) bool { v.reset(OpAMD64MOVWstore) v.AuxInt = i v.Aux = s - v.AddArg(p) - v.AddArg(w) - v.AddArg(mem) + v.AddArg3(p, w, mem) return true } // match: (MOVBstore [i] {s} p w x:(MOVBstore [i+1] {s} p (SHRQconst [8] w) mem)) @@ -11620,9 +10982,7 @@ func rewriteValueAMD64_OpAMD64MOVBstore(v *Value) bool { v.reset(OpAMD64MOVWstore) v.AuxInt = i v.Aux = s - v.AddArg(p) - v.AddArg(w) - v.AddArg(mem) + v.AddArg3(p, w, mem) return true } // match: (MOVBstore [i] {s} p (SHRLconst [j] w) x:(MOVBstore [i-1] {s} p w0:(SHRLconst [j-8] w) mem)) @@ -11652,9 +11012,7 @@ func rewriteValueAMD64_OpAMD64MOVBstore(v *Value) bool { v.reset(OpAMD64MOVWstore) v.AuxInt = i - 1 v.Aux = s - v.AddArg(p) - v.AddArg(w0) - v.AddArg(mem) + v.AddArg3(p, w0, mem) return true } // match: (MOVBstore [i] {s} p (SHRQconst [j] w) x:(MOVBstore [i-1] {s} p w0:(SHRQconst [j-8] w) mem)) @@ -11684,9 +11042,7 @@ func rewriteValueAMD64_OpAMD64MOVBstore(v *Value) bool { v.reset(OpAMD64MOVWstore) v.AuxInt = i - 1 v.Aux = s - v.AddArg(p) - v.AddArg(w0) - v.AddArg(mem) + v.AddArg3(p, w0, mem) return true } // match: (MOVBstore [i] {s} p x1:(MOVBload [j] {s2} p2 mem) mem2:(MOVBstore [i-1] {s} p x2:(MOVBload [j-1] {s2} p2 mem) mem)) @@ -11723,14 +11079,11 @@ func rewriteValueAMD64_OpAMD64MOVBstore(v *Value) bool { v.reset(OpAMD64MOVWstore) v.AuxInt = i - 1 v.Aux = s - v.AddArg(p) v0 := b.NewValue0(x2.Pos, OpAMD64MOVWload, typ.UInt16) v0.AuxInt = j - 1 v0.Aux = s2 - v0.AddArg(p2) - v0.AddArg(mem) - v.AddArg(v0) - v.AddArg(mem) + v0.AddArg2(p2, mem) + v.AddArg3(p, v0, mem) return true } // match: (MOVBstore [off1] {sym1} (LEAL [off2] {sym2} base) val mem) @@ -11753,9 +11106,7 @@ func rewriteValueAMD64_OpAMD64MOVBstore(v *Value) bool { v.reset(OpAMD64MOVBstore) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(base) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(base, val, mem) return true } // match: (MOVBstore [off1] {sym} (ADDLconst [off2] ptr) val mem) @@ -11777,9 +11128,7 @@ func rewriteValueAMD64_OpAMD64MOVBstore(v *Value) bool { v.reset(OpAMD64MOVBstore) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } return false @@ -11805,8 +11154,7 @@ func rewriteValueAMD64_OpAMD64MOVBstoreconst(v *Value) bool { v.reset(OpAMD64MOVBstoreconst) v.AuxInt = ValAndOff(sc).add(off) v.Aux = s - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVBstoreconst [sc] {sym1} (LEAQ [off] {sym2} ptr) mem) @@ -11828,8 +11176,7 @@ func rewriteValueAMD64_OpAMD64MOVBstoreconst(v *Value) bool { v.reset(OpAMD64MOVBstoreconst) v.AuxInt = ValAndOff(sc).add(off) v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVBstoreconst [x] {sym1} (LEAQ1 [off] {sym2} ptr idx) mem) @@ -11852,9 +11199,7 @@ func rewriteValueAMD64_OpAMD64MOVBstoreconst(v *Value) bool { v.reset(OpAMD64MOVBstoreconstidx1) v.AuxInt = ValAndOff(x).add(off) v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } // match: (MOVBstoreconst [x] {sym} (ADDQ ptr idx) mem) @@ -11871,9 +11216,7 @@ func rewriteValueAMD64_OpAMD64MOVBstoreconst(v *Value) bool { v.reset(OpAMD64MOVBstoreconstidx1) v.AuxInt = x v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } // match: (MOVBstoreconst [c] {s} p x:(MOVBstoreconst [a] {s} p mem)) @@ -11898,8 +11241,7 @@ func rewriteValueAMD64_OpAMD64MOVBstoreconst(v *Value) bool { v.reset(OpAMD64MOVWstoreconst) v.AuxInt = makeValAndOff(ValAndOff(a).Val()&0xff|ValAndOff(c).Val()<<8, ValAndOff(a).Off()) v.Aux = s - v.AddArg(p) - v.AddArg(mem) + v.AddArg2(p, mem) return true } // match: (MOVBstoreconst [a] {s} p x:(MOVBstoreconst [c] {s} p mem)) @@ -11924,8 +11266,7 @@ func rewriteValueAMD64_OpAMD64MOVBstoreconst(v *Value) bool { v.reset(OpAMD64MOVWstoreconst) v.AuxInt = makeValAndOff(ValAndOff(a).Val()&0xff|ValAndOff(c).Val()<<8, ValAndOff(a).Off()) v.Aux = s - v.AddArg(p) - v.AddArg(mem) + v.AddArg2(p, mem) return true } // match: (MOVBstoreconst [sc] {sym1} (LEAL [off] {sym2} ptr) mem) @@ -11947,8 +11288,7 @@ func rewriteValueAMD64_OpAMD64MOVBstoreconst(v *Value) bool { v.reset(OpAMD64MOVBstoreconst) v.AuxInt = ValAndOff(sc).add(off) v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVBstoreconst [sc] {s} (ADDLconst [off] ptr) mem) @@ -11969,8 +11309,7 @@ func rewriteValueAMD64_OpAMD64MOVBstoreconst(v *Value) bool { v.reset(OpAMD64MOVBstoreconst) v.AuxInt = ValAndOff(sc).add(off) v.Aux = s - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } return false @@ -11999,9 +11338,7 @@ func rewriteValueAMD64_OpAMD64MOVBstoreconstidx1(v *Value) bool { v.reset(OpAMD64MOVBstoreconstidx1) v.AuxInt = ValAndOff(x).add(c) v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } break @@ -12026,9 +11363,7 @@ func rewriteValueAMD64_OpAMD64MOVBstoreconstidx1(v *Value) bool { v.reset(OpAMD64MOVBstoreconstidx1) v.AuxInt = ValAndOff(x).add(c) v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } break @@ -12060,9 +11395,7 @@ func rewriteValueAMD64_OpAMD64MOVBstoreconstidx1(v *Value) bool { v.reset(OpAMD64MOVWstoreconstidx1) v.AuxInt = makeValAndOff(ValAndOff(a).Val()&0xff|ValAndOff(c).Val()<<8, ValAndOff(a).Off()) v.Aux = s - v.AddArg(p) - v.AddArg(i) - v.AddArg(mem) + v.AddArg3(p, i, mem) return true } } @@ -12097,10 +11430,7 @@ func rewriteValueAMD64_OpAMD64MOVBstoreidx1(v *Value) bool { v.reset(OpAMD64MOVBstoreidx1) v.AuxInt = c + d v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) + v.AddArg4(ptr, idx, val, mem) return true } break @@ -12126,10 +11456,7 @@ func rewriteValueAMD64_OpAMD64MOVBstoreidx1(v *Value) bool { v.reset(OpAMD64MOVBstoreidx1) v.AuxInt = c + d v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) + v.AddArg4(ptr, idx, val, mem) return true } break @@ -12162,13 +11489,10 @@ func rewriteValueAMD64_OpAMD64MOVBstoreidx1(v *Value) bool { v.reset(OpAMD64MOVWstoreidx1) v.AuxInt = i - 1 v.Aux = s - v.AddArg(p) - v.AddArg(idx) v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, w.Type) v0.AuxInt = 8 v0.AddArg(w) - v.AddArg(v0) - v.AddArg(mem) + v.AddArg4(p, idx, v0, mem) return true } } @@ -12232,12 +11556,9 @@ func rewriteValueAMD64_OpAMD64MOVBstoreidx1(v *Value) bool { v.reset(OpAMD64MOVLstoreidx1) v.AuxInt = i - 3 v.Aux = s - v.AddArg(p) - v.AddArg(idx) v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, w.Type) v0.AddArg(w) - v.AddArg(v0) - v.AddArg(mem) + v.AddArg4(p, idx, v0, mem) return true } } @@ -12363,12 +11684,9 @@ func rewriteValueAMD64_OpAMD64MOVBstoreidx1(v *Value) bool { v.reset(OpAMD64MOVQstoreidx1) v.AuxInt = i - 7 v.Aux = s - v.AddArg(p) - v.AddArg(idx) v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, w.Type) v0.AddArg(w) - v.AddArg(v0) - v.AddArg(mem) + v.AddArg4(p, idx, v0, mem) return true } } @@ -12407,10 +11725,7 @@ func rewriteValueAMD64_OpAMD64MOVBstoreidx1(v *Value) bool { v.reset(OpAMD64MOVWstoreidx1) v.AuxInt = i - 1 v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v.AddArg(w) - v.AddArg(mem) + v.AddArg4(p, idx, w, mem) return true } } @@ -12443,10 +11758,7 @@ func rewriteValueAMD64_OpAMD64MOVBstoreidx1(v *Value) bool { v.reset(OpAMD64MOVWstoreidx1) v.AuxInt = i - 1 v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v.AddArg(w) - v.AddArg(mem) + v.AddArg4(p, idx, w, mem) return true } } @@ -12479,10 +11791,7 @@ func rewriteValueAMD64_OpAMD64MOVBstoreidx1(v *Value) bool { v.reset(OpAMD64MOVWstoreidx1) v.AuxInt = i - 1 v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v.AddArg(w) - v.AddArg(mem) + v.AddArg4(p, idx, w, mem) return true } } @@ -12520,10 +11829,7 @@ func rewriteValueAMD64_OpAMD64MOVBstoreidx1(v *Value) bool { v.reset(OpAMD64MOVWstoreidx1) v.AuxInt = i - 1 v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v.AddArg(w0) - v.AddArg(mem) + v.AddArg4(p, idx, w0, mem) return true } } @@ -12561,10 +11867,7 @@ func rewriteValueAMD64_OpAMD64MOVBstoreidx1(v *Value) bool { v.reset(OpAMD64MOVWstoreidx1) v.AuxInt = i - 1 v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v.AddArg(w0) - v.AddArg(mem) + v.AddArg4(p, idx, w0, mem) return true } } @@ -12590,9 +11893,7 @@ func rewriteValueAMD64_OpAMD64MOVBstoreidx1(v *Value) bool { v.reset(OpAMD64MOVBstore) v.AuxInt = i + c v.Aux = s - v.AddArg(p) - v.AddArg(w) - v.AddArg(mem) + v.AddArg3(p, w, mem) return true } break @@ -12616,9 +11917,7 @@ func rewriteValueAMD64_OpAMD64MOVBstoreidx1(v *Value) bool { v.reset(OpAMD64MOVBstoreconstidx1) v.AuxInt = makeValAndOff(int64(int8(c)), off) v.Aux = s - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } return false @@ -12643,12 +11942,10 @@ func rewriteValueAMD64_OpAMD64MOVLQSX(v *Value) bool { } b = x.Block v0 := b.NewValue0(x.Pos, OpAMD64MOVLQSXload, v.Type) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v0.AuxInt = off v0.Aux = sym - v0.AddArg(ptr) - v0.AddArg(mem) + v0.AddArg2(ptr, mem) return true } // match: (MOVLQSX x:(MOVQload [off] {sym} ptr mem)) @@ -12668,12 +11965,10 @@ func rewriteValueAMD64_OpAMD64MOVLQSX(v *Value) bool { } b = x.Block v0 := b.NewValue0(x.Pos, OpAMD64MOVLQSXload, v.Type) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v0.AuxInt = off v0.Aux = sym - v0.AddArg(ptr) - v0.AddArg(mem) + v0.AddArg2(ptr, mem) return true } // match: (MOVLQSX (ANDLconst [c] x)) @@ -12743,9 +12038,8 @@ func rewriteValueAMD64_OpAMD64MOVLQSXload(v *Value) bool { } off2 := v_1.AuxInt sym2 := v_1.Aux - _ = v_1.Args[2] - ptr2 := v_1.Args[0] x := v_1.Args[1] + ptr2 := v_1.Args[0] if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) { break } @@ -12772,8 +12066,7 @@ func rewriteValueAMD64_OpAMD64MOVLQSXload(v *Value) bool { v.reset(OpAMD64MOVLQSXload) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(base) - v.AddArg(mem) + v.AddArg2(base, mem) return true } return false @@ -12798,12 +12091,10 @@ func rewriteValueAMD64_OpAMD64MOVLQZX(v *Value) bool { } b = x.Block v0 := b.NewValue0(x.Pos, OpAMD64MOVLload, v.Type) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v0.AuxInt = off v0.Aux = sym - v0.AddArg(ptr) - v0.AddArg(mem) + v0.AddArg2(ptr, mem) return true } // match: (MOVLQZX x:(MOVQload [off] {sym} ptr mem)) @@ -12823,12 +12114,10 @@ func rewriteValueAMD64_OpAMD64MOVLQZX(v *Value) bool { } b = x.Block v0 := b.NewValue0(x.Pos, OpAMD64MOVLload, v.Type) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v0.AuxInt = off v0.Aux = sym - v0.AddArg(ptr) - v0.AddArg(mem) + v0.AddArg2(ptr, mem) return true } // match: (MOVLQZX x) @@ -12839,9 +12128,7 @@ func rewriteValueAMD64_OpAMD64MOVLQZX(v *Value) bool { if !(zeroUpper32Bits(x, 3)) { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (MOVLQZX x:(MOVLloadidx1 [off] {sym} ptr idx mem)) @@ -12862,13 +12149,10 @@ func rewriteValueAMD64_OpAMD64MOVLQZX(v *Value) bool { } b = x.Block v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, v.Type) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v0.AuxInt = off v0.Aux = sym - v0.AddArg(ptr) - v0.AddArg(idx) - v0.AddArg(mem) + v0.AddArg3(ptr, idx, mem) return true } // match: (MOVLQZX x:(MOVLloadidx4 [off] {sym} ptr idx mem)) @@ -12889,13 +12173,10 @@ func rewriteValueAMD64_OpAMD64MOVLQZX(v *Value) bool { } b = x.Block v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx4, v.Type) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v0.AuxInt = off v0.Aux = sym - v0.AddArg(ptr) - v0.AddArg(idx) - v0.AddArg(mem) + v0.AddArg3(ptr, idx, mem) return true } // match: (MOVLQZX (ANDLconst [c] x)) @@ -12967,8 +12248,7 @@ func rewriteValueAMD64_OpAMD64MOVLatomicload(v *Value) bool { v.reset(OpAMD64MOVLatomicload) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVLatomicload [off1] {sym1} (LEAQ [off2] {sym2} ptr) mem) @@ -12990,8 +12270,7 @@ func rewriteValueAMD64_OpAMD64MOVLatomicload(v *Value) bool { v.reset(OpAMD64MOVLatomicload) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } return false @@ -13015,8 +12294,7 @@ func rewriteValueAMD64_OpAMD64MOVLf2i(v *Value) bool { } b = b.Func.Entry v0 := b.NewValue0(v.Pos, OpArg, t) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v0.AuxInt = off v0.Aux = sym return true @@ -13042,8 +12320,7 @@ func rewriteValueAMD64_OpAMD64MOVLi2f(v *Value) bool { } b = b.Func.Entry v0 := b.NewValue0(v.Pos, OpArg, t) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v0.AuxInt = off v0.Aux = sym return true @@ -13067,9 +12344,8 @@ func rewriteValueAMD64_OpAMD64MOVLload(v *Value) bool { } off2 := v_1.AuxInt sym2 := v_1.Aux - _ = v_1.Args[2] - ptr2 := v_1.Args[0] x := v_1.Args[1] + ptr2 := v_1.Args[0] if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) { break } @@ -13095,8 +12371,7 @@ func rewriteValueAMD64_OpAMD64MOVLload(v *Value) bool { v.reset(OpAMD64MOVLload) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVLload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) @@ -13118,8 +12393,7 @@ func rewriteValueAMD64_OpAMD64MOVLload(v *Value) bool { v.reset(OpAMD64MOVLload) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(base) - v.AddArg(mem) + v.AddArg2(base, mem) return true } // match: (MOVLload [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) mem) @@ -13142,9 +12416,7 @@ func rewriteValueAMD64_OpAMD64MOVLload(v *Value) bool { v.reset(OpAMD64MOVLloadidx1) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } // match: (MOVLload [off1] {sym1} (LEAQ4 [off2] {sym2} ptr idx) mem) @@ -13167,9 +12439,7 @@ func rewriteValueAMD64_OpAMD64MOVLload(v *Value) bool { v.reset(OpAMD64MOVLloadidx4) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } // match: (MOVLload [off1] {sym1} (LEAQ8 [off2] {sym2} ptr idx) mem) @@ -13192,9 +12462,7 @@ func rewriteValueAMD64_OpAMD64MOVLload(v *Value) bool { v.reset(OpAMD64MOVLloadidx8) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } // match: (MOVLload [off] {sym} (ADDQ ptr idx) mem) @@ -13219,9 +12487,7 @@ func rewriteValueAMD64_OpAMD64MOVLload(v *Value) bool { v.reset(OpAMD64MOVLloadidx1) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } break @@ -13245,8 +12511,7 @@ func rewriteValueAMD64_OpAMD64MOVLload(v *Value) bool { v.reset(OpAMD64MOVLload) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(base) - v.AddArg(mem) + v.AddArg2(base, mem) return true } // match: (MOVLload [off1] {sym} (ADDLconst [off2] ptr) mem) @@ -13267,8 +12532,7 @@ func rewriteValueAMD64_OpAMD64MOVLload(v *Value) bool { v.reset(OpAMD64MOVLload) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVLload [off] {sym} ptr (MOVSSstore [off] {sym} ptr val _)) @@ -13280,11 +12544,10 @@ func rewriteValueAMD64_OpAMD64MOVLload(v *Value) bool { if v_1.Op != OpAMD64MOVSSstore || v_1.AuxInt != off || v_1.Aux != sym { break } - _ = v_1.Args[2] + val := v_1.Args[1] if ptr != v_1.Args[0] { break } - val := v_1.Args[1] v.reset(OpAMD64MOVLf2i) v.AddArg(val) return true @@ -13323,9 +12586,7 @@ func rewriteValueAMD64_OpAMD64MOVLloadidx1(v *Value) bool { v.reset(OpAMD64MOVLloadidx4) v.AuxInt = c v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } break @@ -13345,9 +12606,7 @@ func rewriteValueAMD64_OpAMD64MOVLloadidx1(v *Value) bool { v.reset(OpAMD64MOVLloadidx8) v.AuxInt = c v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } break @@ -13372,9 +12631,7 @@ func rewriteValueAMD64_OpAMD64MOVLloadidx1(v *Value) bool { v.reset(OpAMD64MOVLloadidx1) v.AuxInt = c + d v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } break @@ -13399,9 +12656,7 @@ func rewriteValueAMD64_OpAMD64MOVLloadidx1(v *Value) bool { v.reset(OpAMD64MOVLloadidx1) v.AuxInt = c + d v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } break @@ -13425,8 +12680,7 @@ func rewriteValueAMD64_OpAMD64MOVLloadidx1(v *Value) bool { v.reset(OpAMD64MOVLload) v.AuxInt = i + c v.Aux = s - v.AddArg(p) - v.AddArg(mem) + v.AddArg2(p, mem) return true } break @@ -13456,9 +12710,7 @@ func rewriteValueAMD64_OpAMD64MOVLloadidx4(v *Value) bool { v.reset(OpAMD64MOVLloadidx4) v.AuxInt = c + d v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } // match: (MOVLloadidx4 [c] {sym} ptr (ADDQconst [d] idx) mem) @@ -13480,9 +12732,7 @@ func rewriteValueAMD64_OpAMD64MOVLloadidx4(v *Value) bool { v.reset(OpAMD64MOVLloadidx4) v.AuxInt = c + 4*d v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } // match: (MOVLloadidx4 [i] {s} p (MOVQconst [c]) mem) @@ -13503,8 +12753,7 @@ func rewriteValueAMD64_OpAMD64MOVLloadidx4(v *Value) bool { v.reset(OpAMD64MOVLload) v.AuxInt = i + 4*c v.Aux = s - v.AddArg(p) - v.AddArg(mem) + v.AddArg2(p, mem) return true } return false @@ -13532,9 +12781,7 @@ func rewriteValueAMD64_OpAMD64MOVLloadidx8(v *Value) bool { v.reset(OpAMD64MOVLloadidx8) v.AuxInt = c + d v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } // match: (MOVLloadidx8 [c] {sym} ptr (ADDQconst [d] idx) mem) @@ -13556,9 +12803,7 @@ func rewriteValueAMD64_OpAMD64MOVLloadidx8(v *Value) bool { v.reset(OpAMD64MOVLloadidx8) v.AuxInt = c + 8*d v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } // match: (MOVLloadidx8 [i] {s} p (MOVQconst [c]) mem) @@ -13579,8 +12824,7 @@ func rewriteValueAMD64_OpAMD64MOVLloadidx8(v *Value) bool { v.reset(OpAMD64MOVLload) v.AuxInt = i + 8*c v.Aux = s - v.AddArg(p) - v.AddArg(mem) + v.AddArg2(p, mem) return true } return false @@ -13605,9 +12849,7 @@ func rewriteValueAMD64_OpAMD64MOVLstore(v *Value) bool { v.reset(OpAMD64MOVLstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(x) - v.AddArg(mem) + v.AddArg3(ptr, x, mem) return true } // match: (MOVLstore [off] {sym} ptr (MOVLQZX x) mem) @@ -13624,9 +12866,7 @@ func rewriteValueAMD64_OpAMD64MOVLstore(v *Value) bool { v.reset(OpAMD64MOVLstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(x) - v.AddArg(mem) + v.AddArg3(ptr, x, mem) return true } // match: (MOVLstore [off1] {sym} (ADDQconst [off2] ptr) val mem) @@ -13648,9 +12888,7 @@ func rewriteValueAMD64_OpAMD64MOVLstore(v *Value) bool { v.reset(OpAMD64MOVLstore) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } // match: (MOVLstore [off] {sym} ptr (MOVLconst [c]) mem) @@ -13671,8 +12909,7 @@ func rewriteValueAMD64_OpAMD64MOVLstore(v *Value) bool { v.reset(OpAMD64MOVLstoreconst) v.AuxInt = makeValAndOff(int64(int32(c)), off) v.Aux = sym - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVLstore [off] {sym} ptr (MOVQconst [c]) mem) @@ -13693,8 +12930,7 @@ func rewriteValueAMD64_OpAMD64MOVLstore(v *Value) bool { v.reset(OpAMD64MOVLstoreconst) v.AuxInt = makeValAndOff(int64(int32(c)), off) v.Aux = sym - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVLstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) @@ -13717,9 +12953,7 @@ func rewriteValueAMD64_OpAMD64MOVLstore(v *Value) bool { v.reset(OpAMD64MOVLstore) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(base) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(base, val, mem) return true } // match: (MOVLstore [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) val mem) @@ -13743,10 +12977,7 @@ func rewriteValueAMD64_OpAMD64MOVLstore(v *Value) bool { v.reset(OpAMD64MOVLstoreidx1) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) + v.AddArg4(ptr, idx, val, mem) return true } // match: (MOVLstore [off1] {sym1} (LEAQ4 [off2] {sym2} ptr idx) val mem) @@ -13770,10 +13001,7 @@ func rewriteValueAMD64_OpAMD64MOVLstore(v *Value) bool { v.reset(OpAMD64MOVLstoreidx4) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) + v.AddArg4(ptr, idx, val, mem) return true } // match: (MOVLstore [off1] {sym1} (LEAQ8 [off2] {sym2} ptr idx) val mem) @@ -13797,10 +13025,7 @@ func rewriteValueAMD64_OpAMD64MOVLstore(v *Value) bool { v.reset(OpAMD64MOVLstoreidx8) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) + v.AddArg4(ptr, idx, val, mem) return true } // match: (MOVLstore [off] {sym} (ADDQ ptr idx) val mem) @@ -13826,10 +13051,7 @@ func rewriteValueAMD64_OpAMD64MOVLstore(v *Value) bool { v.reset(OpAMD64MOVLstoreidx1) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) + v.AddArg4(ptr, idx, val, mem) return true } break @@ -13856,9 +13078,7 @@ func rewriteValueAMD64_OpAMD64MOVLstore(v *Value) bool { v.reset(OpAMD64MOVQstore) v.AuxInt = i - 4 v.Aux = s - v.AddArg(p) - v.AddArg(w) - v.AddArg(mem) + v.AddArg3(p, w, mem) return true } // match: (MOVLstore [i] {s} p (SHRQconst [j] w) x:(MOVLstore [i-4] {s} p w0:(SHRQconst [j-32] w) mem)) @@ -13888,9 +13108,7 @@ func rewriteValueAMD64_OpAMD64MOVLstore(v *Value) bool { v.reset(OpAMD64MOVQstore) v.AuxInt = i - 4 v.Aux = s - v.AddArg(p) - v.AddArg(w0) - v.AddArg(mem) + v.AddArg3(p, w0, mem) return true } // match: (MOVLstore [i] {s} p x1:(MOVLload [j] {s2} p2 mem) mem2:(MOVLstore [i-4] {s} p x2:(MOVLload [j-4] {s2} p2 mem) mem)) @@ -13927,14 +13145,11 @@ func rewriteValueAMD64_OpAMD64MOVLstore(v *Value) bool { v.reset(OpAMD64MOVQstore) v.AuxInt = i - 4 v.Aux = s - v.AddArg(p) v0 := b.NewValue0(x2.Pos, OpAMD64MOVQload, typ.UInt64) v0.AuxInt = j - 4 v0.Aux = s2 - v0.AddArg(p2) - v0.AddArg(mem) - v.AddArg(v0) - v.AddArg(mem) + v0.AddArg2(p2, mem) + v.AddArg3(p, v0, mem) return true } // match: (MOVLstore [off1] {sym1} (LEAL [off2] {sym2} base) val mem) @@ -13957,9 +13172,7 @@ func rewriteValueAMD64_OpAMD64MOVLstore(v *Value) bool { v.reset(OpAMD64MOVLstore) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(base) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(base, val, mem) return true } // match: (MOVLstore [off1] {sym} (ADDLconst [off2] ptr) val mem) @@ -13981,9 +13194,7 @@ func rewriteValueAMD64_OpAMD64MOVLstore(v *Value) bool { v.reset(OpAMD64MOVLstore) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } // match: (MOVLstore {sym} [off] ptr y:(ADDLload x [off] {sym} ptr mem) mem) @@ -14005,9 +13216,7 @@ func rewriteValueAMD64_OpAMD64MOVLstore(v *Value) bool { v.reset(OpAMD64ADDLmodify) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(x) - v.AddArg(mem) + v.AddArg3(ptr, x, mem) return true } // match: (MOVLstore {sym} [off] ptr y:(ANDLload x [off] {sym} ptr mem) mem) @@ -14029,9 +13238,7 @@ func rewriteValueAMD64_OpAMD64MOVLstore(v *Value) bool { v.reset(OpAMD64ANDLmodify) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(x) - v.AddArg(mem) + v.AddArg3(ptr, x, mem) return true } // match: (MOVLstore {sym} [off] ptr y:(ORLload x [off] {sym} ptr mem) mem) @@ -14053,9 +13260,7 @@ func rewriteValueAMD64_OpAMD64MOVLstore(v *Value) bool { v.reset(OpAMD64ORLmodify) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(x) - v.AddArg(mem) + v.AddArg3(ptr, x, mem) return true } // match: (MOVLstore {sym} [off] ptr y:(XORLload x [off] {sym} ptr mem) mem) @@ -14077,9 +13282,7 @@ func rewriteValueAMD64_OpAMD64MOVLstore(v *Value) bool { v.reset(OpAMD64XORLmodify) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(x) - v.AddArg(mem) + v.AddArg3(ptr, x, mem) return true } // match: (MOVLstore {sym} [off] ptr y:(ADDL l:(MOVLload [off] {sym} ptr mem) x) mem) @@ -14112,9 +13315,7 @@ func rewriteValueAMD64_OpAMD64MOVLstore(v *Value) bool { v.reset(OpAMD64ADDLmodify) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(x) - v.AddArg(mem) + v.AddArg3(ptr, x, mem) return true } break @@ -14142,9 +13343,7 @@ func rewriteValueAMD64_OpAMD64MOVLstore(v *Value) bool { v.reset(OpAMD64SUBLmodify) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(x) - v.AddArg(mem) + v.AddArg3(ptr, x, mem) return true } // match: (MOVLstore {sym} [off] ptr y:(ANDL l:(MOVLload [off] {sym} ptr mem) x) mem) @@ -14177,9 +13376,7 @@ func rewriteValueAMD64_OpAMD64MOVLstore(v *Value) bool { v.reset(OpAMD64ANDLmodify) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(x) - v.AddArg(mem) + v.AddArg3(ptr, x, mem) return true } break @@ -14214,9 +13411,7 @@ func rewriteValueAMD64_OpAMD64MOVLstore(v *Value) bool { v.reset(OpAMD64ORLmodify) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(x) - v.AddArg(mem) + v.AddArg3(ptr, x, mem) return true } break @@ -14251,9 +13446,7 @@ func rewriteValueAMD64_OpAMD64MOVLstore(v *Value) bool { v.reset(OpAMD64XORLmodify) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(x) - v.AddArg(mem) + v.AddArg3(ptr, x, mem) return true } break @@ -14281,9 +13474,7 @@ func rewriteValueAMD64_OpAMD64MOVLstore(v *Value) bool { v.reset(OpAMD64BTCLmodify) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(x) - v.AddArg(mem) + v.AddArg3(ptr, x, mem) return true } // match: (MOVLstore {sym} [off] ptr y:(BTRL l:(MOVLload [off] {sym} ptr mem) x) mem) @@ -14309,9 +13500,7 @@ func rewriteValueAMD64_OpAMD64MOVLstore(v *Value) bool { v.reset(OpAMD64BTRLmodify) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(x) - v.AddArg(mem) + v.AddArg3(ptr, x, mem) return true } // match: (MOVLstore {sym} [off] ptr y:(BTSL l:(MOVLload [off] {sym} ptr mem) x) mem) @@ -14337,9 +13526,7 @@ func rewriteValueAMD64_OpAMD64MOVLstore(v *Value) bool { v.reset(OpAMD64BTSLmodify) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(x) - v.AddArg(mem) + v.AddArg3(ptr, x, mem) return true } // match: (MOVLstore [off] {sym} ptr a:(ADDLconst [c] l:(MOVLload [off] {sym} ptr2 mem)) mem) @@ -14366,8 +13553,7 @@ func rewriteValueAMD64_OpAMD64MOVLstore(v *Value) bool { v.reset(OpAMD64ADDLconstmodify) v.AuxInt = makeValAndOff(c, off) v.Aux = sym - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVLstore [off] {sym} ptr a:(ANDLconst [c] l:(MOVLload [off] {sym} ptr2 mem)) mem) @@ -14394,8 +13580,7 @@ func rewriteValueAMD64_OpAMD64MOVLstore(v *Value) bool { v.reset(OpAMD64ANDLconstmodify) v.AuxInt = makeValAndOff(c, off) v.Aux = sym - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVLstore [off] {sym} ptr a:(ORLconst [c] l:(MOVLload [off] {sym} ptr2 mem)) mem) @@ -14422,8 +13607,7 @@ func rewriteValueAMD64_OpAMD64MOVLstore(v *Value) bool { v.reset(OpAMD64ORLconstmodify) v.AuxInt = makeValAndOff(c, off) v.Aux = sym - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVLstore [off] {sym} ptr a:(XORLconst [c] l:(MOVLload [off] {sym} ptr2 mem)) mem) @@ -14450,8 +13634,7 @@ func rewriteValueAMD64_OpAMD64MOVLstore(v *Value) bool { v.reset(OpAMD64XORLconstmodify) v.AuxInt = makeValAndOff(c, off) v.Aux = sym - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVLstore [off] {sym} ptr a:(BTCLconst [c] l:(MOVLload [off] {sym} ptr2 mem)) mem) @@ -14478,8 +13661,7 @@ func rewriteValueAMD64_OpAMD64MOVLstore(v *Value) bool { v.reset(OpAMD64BTCLconstmodify) v.AuxInt = makeValAndOff(c, off) v.Aux = sym - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVLstore [off] {sym} ptr a:(BTRLconst [c] l:(MOVLload [off] {sym} ptr2 mem)) mem) @@ -14506,8 +13688,7 @@ func rewriteValueAMD64_OpAMD64MOVLstore(v *Value) bool { v.reset(OpAMD64BTRLconstmodify) v.AuxInt = makeValAndOff(c, off) v.Aux = sym - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVLstore [off] {sym} ptr a:(BTSLconst [c] l:(MOVLload [off] {sym} ptr2 mem)) mem) @@ -14534,8 +13715,7 @@ func rewriteValueAMD64_OpAMD64MOVLstore(v *Value) bool { v.reset(OpAMD64BTSLconstmodify) v.AuxInt = makeValAndOff(c, off) v.Aux = sym - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVLstore [off] {sym} ptr (MOVLf2i val) mem) @@ -14552,9 +13732,7 @@ func rewriteValueAMD64_OpAMD64MOVLstore(v *Value) bool { v.reset(OpAMD64MOVSSstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } return false @@ -14582,8 +13760,7 @@ func rewriteValueAMD64_OpAMD64MOVLstoreconst(v *Value) bool { v.reset(OpAMD64MOVLstoreconst) v.AuxInt = ValAndOff(sc).add(off) v.Aux = s - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVLstoreconst [sc] {sym1} (LEAQ [off] {sym2} ptr) mem) @@ -14605,8 +13782,7 @@ func rewriteValueAMD64_OpAMD64MOVLstoreconst(v *Value) bool { v.reset(OpAMD64MOVLstoreconst) v.AuxInt = ValAndOff(sc).add(off) v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVLstoreconst [x] {sym1} (LEAQ1 [off] {sym2} ptr idx) mem) @@ -14629,9 +13805,7 @@ func rewriteValueAMD64_OpAMD64MOVLstoreconst(v *Value) bool { v.reset(OpAMD64MOVLstoreconstidx1) v.AuxInt = ValAndOff(x).add(off) v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } // match: (MOVLstoreconst [x] {sym1} (LEAQ4 [off] {sym2} ptr idx) mem) @@ -14654,9 +13828,7 @@ func rewriteValueAMD64_OpAMD64MOVLstoreconst(v *Value) bool { v.reset(OpAMD64MOVLstoreconstidx4) v.AuxInt = ValAndOff(x).add(off) v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } // match: (MOVLstoreconst [x] {sym} (ADDQ ptr idx) mem) @@ -14673,9 +13845,7 @@ func rewriteValueAMD64_OpAMD64MOVLstoreconst(v *Value) bool { v.reset(OpAMD64MOVLstoreconstidx1) v.AuxInt = x v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } // match: (MOVLstoreconst [c] {s} p x:(MOVLstoreconst [a] {s} p mem)) @@ -14700,11 +13870,9 @@ func rewriteValueAMD64_OpAMD64MOVLstoreconst(v *Value) bool { v.reset(OpAMD64MOVQstore) v.AuxInt = ValAndOff(a).Off() v.Aux = s - v.AddArg(p) v0 := b.NewValue0(x.Pos, OpAMD64MOVQconst, typ.UInt64) v0.AuxInt = ValAndOff(a).Val()&0xffffffff | ValAndOff(c).Val()<<32 - v.AddArg(v0) - v.AddArg(mem) + v.AddArg3(p, v0, mem) return true } // match: (MOVLstoreconst [a] {s} p x:(MOVLstoreconst [c] {s} p mem)) @@ -14729,11 +13897,9 @@ func rewriteValueAMD64_OpAMD64MOVLstoreconst(v *Value) bool { v.reset(OpAMD64MOVQstore) v.AuxInt = ValAndOff(a).Off() v.Aux = s - v.AddArg(p) v0 := b.NewValue0(x.Pos, OpAMD64MOVQconst, typ.UInt64) v0.AuxInt = ValAndOff(a).Val()&0xffffffff | ValAndOff(c).Val()<<32 - v.AddArg(v0) - v.AddArg(mem) + v.AddArg3(p, v0, mem) return true } // match: (MOVLstoreconst [sc] {sym1} (LEAL [off] {sym2} ptr) mem) @@ -14755,8 +13921,7 @@ func rewriteValueAMD64_OpAMD64MOVLstoreconst(v *Value) bool { v.reset(OpAMD64MOVLstoreconst) v.AuxInt = ValAndOff(sc).add(off) v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVLstoreconst [sc] {s} (ADDLconst [off] ptr) mem) @@ -14777,8 +13942,7 @@ func rewriteValueAMD64_OpAMD64MOVLstoreconst(v *Value) bool { v.reset(OpAMD64MOVLstoreconst) v.AuxInt = ValAndOff(sc).add(off) v.Aux = s - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } return false @@ -14804,9 +13968,7 @@ func rewriteValueAMD64_OpAMD64MOVLstoreconstidx1(v *Value) bool { v.reset(OpAMD64MOVLstoreconstidx4) v.AuxInt = c v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } break @@ -14831,9 +13993,7 @@ func rewriteValueAMD64_OpAMD64MOVLstoreconstidx1(v *Value) bool { v.reset(OpAMD64MOVLstoreconstidx1) v.AuxInt = ValAndOff(x).add(c) v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } break @@ -14858,9 +14018,7 @@ func rewriteValueAMD64_OpAMD64MOVLstoreconstidx1(v *Value) bool { v.reset(OpAMD64MOVLstoreconstidx1) v.AuxInt = ValAndOff(x).add(c) v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } break @@ -14892,12 +14050,9 @@ func rewriteValueAMD64_OpAMD64MOVLstoreconstidx1(v *Value) bool { v.reset(OpAMD64MOVQstoreidx1) v.AuxInt = ValAndOff(a).Off() v.Aux = s - v.AddArg(p) - v.AddArg(i) v0 := b.NewValue0(v.Pos, OpAMD64MOVQconst, typ.UInt64) v0.AuxInt = ValAndOff(a).Val()&0xffffffff | ValAndOff(c).Val()<<32 - v.AddArg(v0) - v.AddArg(mem) + v.AddArg4(p, i, v0, mem) return true } } @@ -14930,9 +14085,7 @@ func rewriteValueAMD64_OpAMD64MOVLstoreconstidx4(v *Value) bool { v.reset(OpAMD64MOVLstoreconstidx4) v.AuxInt = ValAndOff(x).add(c) v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } // match: (MOVLstoreconstidx4 [x] {sym} ptr (ADDQconst [c] idx) mem) @@ -14954,9 +14107,7 @@ func rewriteValueAMD64_OpAMD64MOVLstoreconstidx4(v *Value) bool { v.reset(OpAMD64MOVLstoreconstidx4) v.AuxInt = ValAndOff(x).add(4 * c) v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } // match: (MOVLstoreconstidx4 [c] {s} p i x:(MOVLstoreconstidx4 [a] {s} p i mem)) @@ -14982,15 +14133,12 @@ func rewriteValueAMD64_OpAMD64MOVLstoreconstidx4(v *Value) bool { v.reset(OpAMD64MOVQstoreidx1) v.AuxInt = ValAndOff(a).Off() v.Aux = s - v.AddArg(p) v0 := b.NewValue0(v.Pos, OpAMD64SHLQconst, i.Type) v0.AuxInt = 2 v0.AddArg(i) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpAMD64MOVQconst, typ.UInt64) v1.AuxInt = ValAndOff(a).Val()&0xffffffff | ValAndOff(c).Val()<<32 - v.AddArg(v1) - v.AddArg(mem) + v.AddArg4(p, v0, v1, mem) return true } return false @@ -15016,10 +14164,7 @@ func rewriteValueAMD64_OpAMD64MOVLstoreidx1(v *Value) bool { v.reset(OpAMD64MOVLstoreidx4) v.AuxInt = c v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) + v.AddArg4(ptr, idx, val, mem) return true } break @@ -15040,10 +14185,7 @@ func rewriteValueAMD64_OpAMD64MOVLstoreidx1(v *Value) bool { v.reset(OpAMD64MOVLstoreidx8) v.AuxInt = c v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) + v.AddArg4(ptr, idx, val, mem) return true } break @@ -15069,10 +14211,7 @@ func rewriteValueAMD64_OpAMD64MOVLstoreidx1(v *Value) bool { v.reset(OpAMD64MOVLstoreidx1) v.AuxInt = c + d v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) + v.AddArg4(ptr, idx, val, mem) return true } break @@ -15098,10 +14237,7 @@ func rewriteValueAMD64_OpAMD64MOVLstoreidx1(v *Value) bool { v.reset(OpAMD64MOVLstoreidx1) v.AuxInt = c + d v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) + v.AddArg4(ptr, idx, val, mem) return true } break @@ -15133,10 +14269,7 @@ func rewriteValueAMD64_OpAMD64MOVLstoreidx1(v *Value) bool { v.reset(OpAMD64MOVQstoreidx1) v.AuxInt = i - 4 v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v.AddArg(w) - v.AddArg(mem) + v.AddArg4(p, idx, w, mem) return true } } @@ -15174,10 +14307,7 @@ func rewriteValueAMD64_OpAMD64MOVLstoreidx1(v *Value) bool { v.reset(OpAMD64MOVQstoreidx1) v.AuxInt = i - 4 v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v.AddArg(w0) - v.AddArg(mem) + v.AddArg4(p, idx, w0, mem) return true } } @@ -15203,9 +14333,7 @@ func rewriteValueAMD64_OpAMD64MOVLstoreidx1(v *Value) bool { v.reset(OpAMD64MOVLstore) v.AuxInt = i + c v.Aux = s - v.AddArg(p) - v.AddArg(w) - v.AddArg(mem) + v.AddArg3(p, w, mem) return true } break @@ -15229,9 +14357,7 @@ func rewriteValueAMD64_OpAMD64MOVLstoreidx1(v *Value) bool { v.reset(OpAMD64MOVLstoreconstidx1) v.AuxInt = makeValAndOff(int64(int32(c)), off) v.Aux = s - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } return false @@ -15262,10 +14388,7 @@ func rewriteValueAMD64_OpAMD64MOVLstoreidx4(v *Value) bool { v.reset(OpAMD64MOVLstoreidx4) v.AuxInt = c + d v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) + v.AddArg4(ptr, idx, val, mem) return true } // match: (MOVLstoreidx4 [c] {sym} ptr (ADDQconst [d] idx) val mem) @@ -15288,10 +14411,7 @@ func rewriteValueAMD64_OpAMD64MOVLstoreidx4(v *Value) bool { v.reset(OpAMD64MOVLstoreidx4) v.AuxInt = c + 4*d v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) + v.AddArg4(ptr, idx, val, mem) return true } // match: (MOVLstoreidx4 [i] {s} p idx (SHRQconst [32] w) x:(MOVLstoreidx4 [i-4] {s} p idx w mem)) @@ -15317,13 +14437,10 @@ func rewriteValueAMD64_OpAMD64MOVLstoreidx4(v *Value) bool { v.reset(OpAMD64MOVQstoreidx1) v.AuxInt = i - 4 v.Aux = s - v.AddArg(p) v0 := b.NewValue0(v.Pos, OpAMD64SHLQconst, idx.Type) v0.AuxInt = 2 v0.AddArg(idx) - v.AddArg(v0) - v.AddArg(w) - v.AddArg(mem) + v.AddArg4(p, v0, w, mem) return true } // match: (MOVLstoreidx4 [i] {s} p idx (SHRQconst [j] w) x:(MOVLstoreidx4 [i-4] {s} p idx w0:(SHRQconst [j-32] w) mem)) @@ -15354,13 +14471,10 @@ func rewriteValueAMD64_OpAMD64MOVLstoreidx4(v *Value) bool { v.reset(OpAMD64MOVQstoreidx1) v.AuxInt = i - 4 v.Aux = s - v.AddArg(p) v0 := b.NewValue0(v.Pos, OpAMD64SHLQconst, idx.Type) v0.AuxInt = 2 v0.AddArg(idx) - v.AddArg(v0) - v.AddArg(w0) - v.AddArg(mem) + v.AddArg4(p, v0, w0, mem) return true } // match: (MOVLstoreidx4 [i] {s} p (MOVQconst [c]) w mem) @@ -15382,9 +14496,7 @@ func rewriteValueAMD64_OpAMD64MOVLstoreidx4(v *Value) bool { v.reset(OpAMD64MOVLstore) v.AuxInt = i + 4*c v.Aux = s - v.AddArg(p) - v.AddArg(w) - v.AddArg(mem) + v.AddArg3(p, w, mem) return true } // match: (MOVLstoreidx4 [off] {s} ptr idx (MOVQconst [c]) mem) @@ -15406,9 +14518,7 @@ func rewriteValueAMD64_OpAMD64MOVLstoreidx4(v *Value) bool { v.reset(OpAMD64MOVLstoreconstidx4) v.AuxInt = makeValAndOff(int64(int32(c)), off) v.Aux = s - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } return false @@ -15438,10 +14548,7 @@ func rewriteValueAMD64_OpAMD64MOVLstoreidx8(v *Value) bool { v.reset(OpAMD64MOVLstoreidx8) v.AuxInt = c + d v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) + v.AddArg4(ptr, idx, val, mem) return true } // match: (MOVLstoreidx8 [c] {sym} ptr (ADDQconst [d] idx) val mem) @@ -15464,10 +14571,7 @@ func rewriteValueAMD64_OpAMD64MOVLstoreidx8(v *Value) bool { v.reset(OpAMD64MOVLstoreidx8) v.AuxInt = c + 8*d v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) + v.AddArg4(ptr, idx, val, mem) return true } // match: (MOVLstoreidx8 [i] {s} p (MOVQconst [c]) w mem) @@ -15489,9 +14593,7 @@ func rewriteValueAMD64_OpAMD64MOVLstoreidx8(v *Value) bool { v.reset(OpAMD64MOVLstore) v.AuxInt = i + 8*c v.Aux = s - v.AddArg(p) - v.AddArg(w) - v.AddArg(mem) + v.AddArg3(p, w, mem) return true } return false @@ -15517,8 +14619,7 @@ func rewriteValueAMD64_OpAMD64MOVOload(v *Value) bool { v.reset(OpAMD64MOVOload) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVOload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) @@ -15540,8 +14641,7 @@ func rewriteValueAMD64_OpAMD64MOVOload(v *Value) bool { v.reset(OpAMD64MOVOload) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(base) - v.AddArg(mem) + v.AddArg2(base, mem) return true } return false @@ -15572,9 +14672,7 @@ func rewriteValueAMD64_OpAMD64MOVOstore(v *Value) bool { v.reset(OpAMD64MOVOstore) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } // match: (MOVOstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) @@ -15597,9 +14695,7 @@ func rewriteValueAMD64_OpAMD64MOVOstore(v *Value) bool { v.reset(OpAMD64MOVOstore) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(base) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(base, val, mem) return true } // match: (MOVOstore [dstOff] {dstSym} ptr (MOVOload [srcOff] {srcSym} (SB) _) mem) @@ -15614,7 +14710,6 @@ func rewriteValueAMD64_OpAMD64MOVOstore(v *Value) bool { } srcOff := v_1.AuxInt srcSym := v_1.Aux - _ = v_1.Args[1] v_1_0 := v_1.Args[0] if v_1_0.Op != OpSB { break @@ -15626,19 +14721,15 @@ func rewriteValueAMD64_OpAMD64MOVOstore(v *Value) bool { v.reset(OpAMD64MOVQstore) v.AuxInt = dstOff + 8 v.Aux = dstSym - v.AddArg(ptr) v0 := b.NewValue0(v_1.Pos, OpAMD64MOVQconst, typ.UInt64) v0.AuxInt = int64(read64(srcSym, srcOff+8, config.ctxt.Arch.ByteOrder)) - v.AddArg(v0) v1 := b.NewValue0(v_1.Pos, OpAMD64MOVQstore, types.TypeMem) v1.AuxInt = dstOff v1.Aux = dstSym - v1.AddArg(ptr) v2 := b.NewValue0(v_1.Pos, OpAMD64MOVQconst, typ.UInt64) v2.AuxInt = int64(read64(srcSym, srcOff, config.ctxt.Arch.ByteOrder)) - v1.AddArg(v2) - v1.AddArg(mem) - v.AddArg(v1) + v1.AddArg3(ptr, v2, mem) + v.AddArg3(ptr, v0, v1) return true } return false @@ -15664,8 +14755,7 @@ func rewriteValueAMD64_OpAMD64MOVQatomicload(v *Value) bool { v.reset(OpAMD64MOVQatomicload) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVQatomicload [off1] {sym1} (LEAQ [off2] {sym2} ptr) mem) @@ -15687,8 +14777,7 @@ func rewriteValueAMD64_OpAMD64MOVQatomicload(v *Value) bool { v.reset(OpAMD64MOVQatomicload) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } return false @@ -15712,8 +14801,7 @@ func rewriteValueAMD64_OpAMD64MOVQf2i(v *Value) bool { } b = b.Func.Entry v0 := b.NewValue0(v.Pos, OpArg, t) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v0.AuxInt = off v0.Aux = sym return true @@ -15739,8 +14827,7 @@ func rewriteValueAMD64_OpAMD64MOVQi2f(v *Value) bool { } b = b.Func.Entry v0 := b.NewValue0(v.Pos, OpArg, t) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v0.AuxInt = off v0.Aux = sym return true @@ -15764,15 +14851,12 @@ func rewriteValueAMD64_OpAMD64MOVQload(v *Value) bool { } off2 := v_1.AuxInt sym2 := v_1.Aux - _ = v_1.Args[2] - ptr2 := v_1.Args[0] x := v_1.Args[1] + ptr2 := v_1.Args[0] if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (MOVQload [off1] {sym} (ADDQconst [off2] ptr) mem) @@ -15793,8 +14877,7 @@ func rewriteValueAMD64_OpAMD64MOVQload(v *Value) bool { v.reset(OpAMD64MOVQload) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVQload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) @@ -15816,8 +14899,7 @@ func rewriteValueAMD64_OpAMD64MOVQload(v *Value) bool { v.reset(OpAMD64MOVQload) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(base) - v.AddArg(mem) + v.AddArg2(base, mem) return true } // match: (MOVQload [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) mem) @@ -15840,9 +14922,7 @@ func rewriteValueAMD64_OpAMD64MOVQload(v *Value) bool { v.reset(OpAMD64MOVQloadidx1) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } // match: (MOVQload [off1] {sym1} (LEAQ8 [off2] {sym2} ptr idx) mem) @@ -15865,9 +14945,7 @@ func rewriteValueAMD64_OpAMD64MOVQload(v *Value) bool { v.reset(OpAMD64MOVQloadidx8) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } // match: (MOVQload [off] {sym} (ADDQ ptr idx) mem) @@ -15892,9 +14970,7 @@ func rewriteValueAMD64_OpAMD64MOVQload(v *Value) bool { v.reset(OpAMD64MOVQloadidx1) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } break @@ -15918,8 +14994,7 @@ func rewriteValueAMD64_OpAMD64MOVQload(v *Value) bool { v.reset(OpAMD64MOVQload) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(base) - v.AddArg(mem) + v.AddArg2(base, mem) return true } // match: (MOVQload [off1] {sym} (ADDLconst [off2] ptr) mem) @@ -15940,8 +15015,7 @@ func rewriteValueAMD64_OpAMD64MOVQload(v *Value) bool { v.reset(OpAMD64MOVQload) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVQload [off] {sym} ptr (MOVSDstore [off] {sym} ptr val _)) @@ -15953,11 +15027,10 @@ func rewriteValueAMD64_OpAMD64MOVQload(v *Value) bool { if v_1.Op != OpAMD64MOVSDstore || v_1.AuxInt != off || v_1.Aux != sym { break } - _ = v_1.Args[2] + val := v_1.Args[1] if ptr != v_1.Args[0] { break } - val := v_1.Args[1] v.reset(OpAMD64MOVQf2i) v.AddArg(val) return true @@ -15996,9 +15069,7 @@ func rewriteValueAMD64_OpAMD64MOVQloadidx1(v *Value) bool { v.reset(OpAMD64MOVQloadidx8) v.AuxInt = c v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } break @@ -16023,9 +15094,7 @@ func rewriteValueAMD64_OpAMD64MOVQloadidx1(v *Value) bool { v.reset(OpAMD64MOVQloadidx1) v.AuxInt = c + d v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } break @@ -16050,9 +15119,7 @@ func rewriteValueAMD64_OpAMD64MOVQloadidx1(v *Value) bool { v.reset(OpAMD64MOVQloadidx1) v.AuxInt = c + d v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } break @@ -16076,8 +15143,7 @@ func rewriteValueAMD64_OpAMD64MOVQloadidx1(v *Value) bool { v.reset(OpAMD64MOVQload) v.AuxInt = i + c v.Aux = s - v.AddArg(p) - v.AddArg(mem) + v.AddArg2(p, mem) return true } break @@ -16107,9 +15173,7 @@ func rewriteValueAMD64_OpAMD64MOVQloadidx8(v *Value) bool { v.reset(OpAMD64MOVQloadidx8) v.AuxInt = c + d v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } // match: (MOVQloadidx8 [c] {sym} ptr (ADDQconst [d] idx) mem) @@ -16131,9 +15195,7 @@ func rewriteValueAMD64_OpAMD64MOVQloadidx8(v *Value) bool { v.reset(OpAMD64MOVQloadidx8) v.AuxInt = c + 8*d v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } // match: (MOVQloadidx8 [i] {s} p (MOVQconst [c]) mem) @@ -16154,8 +15216,7 @@ func rewriteValueAMD64_OpAMD64MOVQloadidx8(v *Value) bool { v.reset(OpAMD64MOVQload) v.AuxInt = i + 8*c v.Aux = s - v.AddArg(p) - v.AddArg(mem) + v.AddArg2(p, mem) return true } return false @@ -16183,9 +15244,7 @@ func rewriteValueAMD64_OpAMD64MOVQstore(v *Value) bool { v.reset(OpAMD64MOVQstore) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } // match: (MOVQstore [off] {sym} ptr (MOVQconst [c]) mem) @@ -16206,8 +15265,7 @@ func rewriteValueAMD64_OpAMD64MOVQstore(v *Value) bool { v.reset(OpAMD64MOVQstoreconst) v.AuxInt = makeValAndOff(c, off) v.Aux = sym - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVQstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) @@ -16230,9 +15288,7 @@ func rewriteValueAMD64_OpAMD64MOVQstore(v *Value) bool { v.reset(OpAMD64MOVQstore) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(base) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(base, val, mem) return true } // match: (MOVQstore [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) val mem) @@ -16256,10 +15312,7 @@ func rewriteValueAMD64_OpAMD64MOVQstore(v *Value) bool { v.reset(OpAMD64MOVQstoreidx1) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) + v.AddArg4(ptr, idx, val, mem) return true } // match: (MOVQstore [off1] {sym1} (LEAQ8 [off2] {sym2} ptr idx) val mem) @@ -16283,10 +15336,7 @@ func rewriteValueAMD64_OpAMD64MOVQstore(v *Value) bool { v.reset(OpAMD64MOVQstoreidx8) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) + v.AddArg4(ptr, idx, val, mem) return true } // match: (MOVQstore [off] {sym} (ADDQ ptr idx) val mem) @@ -16312,10 +15362,7 @@ func rewriteValueAMD64_OpAMD64MOVQstore(v *Value) bool { v.reset(OpAMD64MOVQstoreidx1) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) + v.AddArg4(ptr, idx, val, mem) return true } break @@ -16340,9 +15387,7 @@ func rewriteValueAMD64_OpAMD64MOVQstore(v *Value) bool { v.reset(OpAMD64MOVQstore) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(base) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(base, val, mem) return true } // match: (MOVQstore [off1] {sym} (ADDLconst [off2] ptr) val mem) @@ -16364,9 +15409,7 @@ func rewriteValueAMD64_OpAMD64MOVQstore(v *Value) bool { v.reset(OpAMD64MOVQstore) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } // match: (MOVQstore {sym} [off] ptr y:(ADDQload x [off] {sym} ptr mem) mem) @@ -16388,9 +15431,7 @@ func rewriteValueAMD64_OpAMD64MOVQstore(v *Value) bool { v.reset(OpAMD64ADDQmodify) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(x) - v.AddArg(mem) + v.AddArg3(ptr, x, mem) return true } // match: (MOVQstore {sym} [off] ptr y:(ANDQload x [off] {sym} ptr mem) mem) @@ -16412,9 +15453,7 @@ func rewriteValueAMD64_OpAMD64MOVQstore(v *Value) bool { v.reset(OpAMD64ANDQmodify) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(x) - v.AddArg(mem) + v.AddArg3(ptr, x, mem) return true } // match: (MOVQstore {sym} [off] ptr y:(ORQload x [off] {sym} ptr mem) mem) @@ -16436,9 +15475,7 @@ func rewriteValueAMD64_OpAMD64MOVQstore(v *Value) bool { v.reset(OpAMD64ORQmodify) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(x) - v.AddArg(mem) + v.AddArg3(ptr, x, mem) return true } // match: (MOVQstore {sym} [off] ptr y:(XORQload x [off] {sym} ptr mem) mem) @@ -16460,9 +15497,7 @@ func rewriteValueAMD64_OpAMD64MOVQstore(v *Value) bool { v.reset(OpAMD64XORQmodify) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(x) - v.AddArg(mem) + v.AddArg3(ptr, x, mem) return true } // match: (MOVQstore {sym} [off] ptr y:(ADDQ l:(MOVQload [off] {sym} ptr mem) x) mem) @@ -16495,9 +15530,7 @@ func rewriteValueAMD64_OpAMD64MOVQstore(v *Value) bool { v.reset(OpAMD64ADDQmodify) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(x) - v.AddArg(mem) + v.AddArg3(ptr, x, mem) return true } break @@ -16525,9 +15558,7 @@ func rewriteValueAMD64_OpAMD64MOVQstore(v *Value) bool { v.reset(OpAMD64SUBQmodify) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(x) - v.AddArg(mem) + v.AddArg3(ptr, x, mem) return true } // match: (MOVQstore {sym} [off] ptr y:(ANDQ l:(MOVQload [off] {sym} ptr mem) x) mem) @@ -16560,9 +15591,7 @@ func rewriteValueAMD64_OpAMD64MOVQstore(v *Value) bool { v.reset(OpAMD64ANDQmodify) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(x) - v.AddArg(mem) + v.AddArg3(ptr, x, mem) return true } break @@ -16597,9 +15626,7 @@ func rewriteValueAMD64_OpAMD64MOVQstore(v *Value) bool { v.reset(OpAMD64ORQmodify) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(x) - v.AddArg(mem) + v.AddArg3(ptr, x, mem) return true } break @@ -16634,9 +15661,7 @@ func rewriteValueAMD64_OpAMD64MOVQstore(v *Value) bool { v.reset(OpAMD64XORQmodify) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(x) - v.AddArg(mem) + v.AddArg3(ptr, x, mem) return true } break @@ -16664,9 +15689,7 @@ func rewriteValueAMD64_OpAMD64MOVQstore(v *Value) bool { v.reset(OpAMD64BTCQmodify) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(x) - v.AddArg(mem) + v.AddArg3(ptr, x, mem) return true } // match: (MOVQstore {sym} [off] ptr y:(BTRQ l:(MOVQload [off] {sym} ptr mem) x) mem) @@ -16692,9 +15715,7 @@ func rewriteValueAMD64_OpAMD64MOVQstore(v *Value) bool { v.reset(OpAMD64BTRQmodify) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(x) - v.AddArg(mem) + v.AddArg3(ptr, x, mem) return true } // match: (MOVQstore {sym} [off] ptr y:(BTSQ l:(MOVQload [off] {sym} ptr mem) x) mem) @@ -16720,9 +15741,7 @@ func rewriteValueAMD64_OpAMD64MOVQstore(v *Value) bool { v.reset(OpAMD64BTSQmodify) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(x) - v.AddArg(mem) + v.AddArg3(ptr, x, mem) return true } // match: (MOVQstore [off] {sym} ptr a:(ADDQconst [c] l:(MOVQload [off] {sym} ptr2 mem)) mem) @@ -16749,8 +15768,7 @@ func rewriteValueAMD64_OpAMD64MOVQstore(v *Value) bool { v.reset(OpAMD64ADDQconstmodify) v.AuxInt = makeValAndOff(c, off) v.Aux = sym - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVQstore [off] {sym} ptr a:(ANDQconst [c] l:(MOVQload [off] {sym} ptr2 mem)) mem) @@ -16777,8 +15795,7 @@ func rewriteValueAMD64_OpAMD64MOVQstore(v *Value) bool { v.reset(OpAMD64ANDQconstmodify) v.AuxInt = makeValAndOff(c, off) v.Aux = sym - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVQstore [off] {sym} ptr a:(ORQconst [c] l:(MOVQload [off] {sym} ptr2 mem)) mem) @@ -16805,8 +15822,7 @@ func rewriteValueAMD64_OpAMD64MOVQstore(v *Value) bool { v.reset(OpAMD64ORQconstmodify) v.AuxInt = makeValAndOff(c, off) v.Aux = sym - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVQstore [off] {sym} ptr a:(XORQconst [c] l:(MOVQload [off] {sym} ptr2 mem)) mem) @@ -16833,8 +15849,7 @@ func rewriteValueAMD64_OpAMD64MOVQstore(v *Value) bool { v.reset(OpAMD64XORQconstmodify) v.AuxInt = makeValAndOff(c, off) v.Aux = sym - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVQstore [off] {sym} ptr a:(BTCQconst [c] l:(MOVQload [off] {sym} ptr2 mem)) mem) @@ -16861,8 +15876,7 @@ func rewriteValueAMD64_OpAMD64MOVQstore(v *Value) bool { v.reset(OpAMD64BTCQconstmodify) v.AuxInt = makeValAndOff(c, off) v.Aux = sym - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVQstore [off] {sym} ptr a:(BTRQconst [c] l:(MOVQload [off] {sym} ptr2 mem)) mem) @@ -16889,8 +15903,7 @@ func rewriteValueAMD64_OpAMD64MOVQstore(v *Value) bool { v.reset(OpAMD64BTRQconstmodify) v.AuxInt = makeValAndOff(c, off) v.Aux = sym - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVQstore [off] {sym} ptr a:(BTSQconst [c] l:(MOVQload [off] {sym} ptr2 mem)) mem) @@ -16917,8 +15930,7 @@ func rewriteValueAMD64_OpAMD64MOVQstore(v *Value) bool { v.reset(OpAMD64BTSQconstmodify) v.AuxInt = makeValAndOff(c, off) v.Aux = sym - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVQstore [off] {sym} ptr (MOVQf2i val) mem) @@ -16935,9 +15947,7 @@ func rewriteValueAMD64_OpAMD64MOVQstore(v *Value) bool { v.reset(OpAMD64MOVSDstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } return false @@ -16965,8 +15975,7 @@ func rewriteValueAMD64_OpAMD64MOVQstoreconst(v *Value) bool { v.reset(OpAMD64MOVQstoreconst) v.AuxInt = ValAndOff(sc).add(off) v.Aux = s - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVQstoreconst [sc] {sym1} (LEAQ [off] {sym2} ptr) mem) @@ -16988,8 +15997,7 @@ func rewriteValueAMD64_OpAMD64MOVQstoreconst(v *Value) bool { v.reset(OpAMD64MOVQstoreconst) v.AuxInt = ValAndOff(sc).add(off) v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVQstoreconst [x] {sym1} (LEAQ1 [off] {sym2} ptr idx) mem) @@ -17012,9 +16020,7 @@ func rewriteValueAMD64_OpAMD64MOVQstoreconst(v *Value) bool { v.reset(OpAMD64MOVQstoreconstidx1) v.AuxInt = ValAndOff(x).add(off) v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } // match: (MOVQstoreconst [x] {sym1} (LEAQ8 [off] {sym2} ptr idx) mem) @@ -17037,9 +16043,7 @@ func rewriteValueAMD64_OpAMD64MOVQstoreconst(v *Value) bool { v.reset(OpAMD64MOVQstoreconstidx8) v.AuxInt = ValAndOff(x).add(off) v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } // match: (MOVQstoreconst [x] {sym} (ADDQ ptr idx) mem) @@ -17056,9 +16060,7 @@ func rewriteValueAMD64_OpAMD64MOVQstoreconst(v *Value) bool { v.reset(OpAMD64MOVQstoreconstidx1) v.AuxInt = x v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } // match: (MOVQstoreconst [c] {s} p x:(MOVQstoreconst [c2] {s} p mem)) @@ -17083,11 +16085,9 @@ func rewriteValueAMD64_OpAMD64MOVQstoreconst(v *Value) bool { v.reset(OpAMD64MOVOstore) v.AuxInt = ValAndOff(c2).Off() v.Aux = s - v.AddArg(p) v0 := b.NewValue0(x.Pos, OpAMD64MOVOconst, types.TypeInt128) v0.AuxInt = 0 - v.AddArg(v0) - v.AddArg(mem) + v.AddArg3(p, v0, mem) return true } // match: (MOVQstoreconst [sc] {sym1} (LEAL [off] {sym2} ptr) mem) @@ -17109,8 +16109,7 @@ func rewriteValueAMD64_OpAMD64MOVQstoreconst(v *Value) bool { v.reset(OpAMD64MOVQstoreconst) v.AuxInt = ValAndOff(sc).add(off) v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVQstoreconst [sc] {s} (ADDLconst [off] ptr) mem) @@ -17131,8 +16130,7 @@ func rewriteValueAMD64_OpAMD64MOVQstoreconst(v *Value) bool { v.reset(OpAMD64MOVQstoreconst) v.AuxInt = ValAndOff(sc).add(off) v.Aux = s - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } return false @@ -17156,9 +16154,7 @@ func rewriteValueAMD64_OpAMD64MOVQstoreconstidx1(v *Value) bool { v.reset(OpAMD64MOVQstoreconstidx8) v.AuxInt = c v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } break @@ -17183,9 +16179,7 @@ func rewriteValueAMD64_OpAMD64MOVQstoreconstidx1(v *Value) bool { v.reset(OpAMD64MOVQstoreconstidx1) v.AuxInt = ValAndOff(x).add(c) v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } break @@ -17210,9 +16204,7 @@ func rewriteValueAMD64_OpAMD64MOVQstoreconstidx1(v *Value) bool { v.reset(OpAMD64MOVQstoreconstidx1) v.AuxInt = ValAndOff(x).add(c) v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } break @@ -17242,9 +16234,7 @@ func rewriteValueAMD64_OpAMD64MOVQstoreconstidx8(v *Value) bool { v.reset(OpAMD64MOVQstoreconstidx8) v.AuxInt = ValAndOff(x).add(c) v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } // match: (MOVQstoreconstidx8 [x] {sym} ptr (ADDQconst [c] idx) mem) @@ -17266,9 +16256,7 @@ func rewriteValueAMD64_OpAMD64MOVQstoreconstidx8(v *Value) bool { v.reset(OpAMD64MOVQstoreconstidx8) v.AuxInt = ValAndOff(x).add(8 * c) v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } return false @@ -17294,10 +16282,7 @@ func rewriteValueAMD64_OpAMD64MOVQstoreidx1(v *Value) bool { v.reset(OpAMD64MOVQstoreidx8) v.AuxInt = c v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) + v.AddArg4(ptr, idx, val, mem) return true } break @@ -17323,10 +16308,7 @@ func rewriteValueAMD64_OpAMD64MOVQstoreidx1(v *Value) bool { v.reset(OpAMD64MOVQstoreidx1) v.AuxInt = c + d v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) + v.AddArg4(ptr, idx, val, mem) return true } break @@ -17352,10 +16334,7 @@ func rewriteValueAMD64_OpAMD64MOVQstoreidx1(v *Value) bool { v.reset(OpAMD64MOVQstoreidx1) v.AuxInt = c + d v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) + v.AddArg4(ptr, idx, val, mem) return true } break @@ -17380,9 +16359,7 @@ func rewriteValueAMD64_OpAMD64MOVQstoreidx1(v *Value) bool { v.reset(OpAMD64MOVQstore) v.AuxInt = i + c v.Aux = s - v.AddArg(p) - v.AddArg(w) - v.AddArg(mem) + v.AddArg3(p, w, mem) return true } break @@ -17406,9 +16383,7 @@ func rewriteValueAMD64_OpAMD64MOVQstoreidx1(v *Value) bool { v.reset(OpAMD64MOVQstoreconstidx1) v.AuxInt = makeValAndOff(c, off) v.Aux = s - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } return false @@ -17438,10 +16413,7 @@ func rewriteValueAMD64_OpAMD64MOVQstoreidx8(v *Value) bool { v.reset(OpAMD64MOVQstoreidx8) v.AuxInt = c + d v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) + v.AddArg4(ptr, idx, val, mem) return true } // match: (MOVQstoreidx8 [c] {sym} ptr (ADDQconst [d] idx) val mem) @@ -17464,10 +16436,7 @@ func rewriteValueAMD64_OpAMD64MOVQstoreidx8(v *Value) bool { v.reset(OpAMD64MOVQstoreidx8) v.AuxInt = c + 8*d v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) + v.AddArg4(ptr, idx, val, mem) return true } // match: (MOVQstoreidx8 [i] {s} p (MOVQconst [c]) w mem) @@ -17489,9 +16458,7 @@ func rewriteValueAMD64_OpAMD64MOVQstoreidx8(v *Value) bool { v.reset(OpAMD64MOVQstore) v.AuxInt = i + 8*c v.Aux = s - v.AddArg(p) - v.AddArg(w) - v.AddArg(mem) + v.AddArg3(p, w, mem) return true } // match: (MOVQstoreidx8 [off] {s} ptr idx (MOVQconst [c]) mem) @@ -17513,9 +16480,7 @@ func rewriteValueAMD64_OpAMD64MOVQstoreidx8(v *Value) bool { v.reset(OpAMD64MOVQstoreconstidx8) v.AuxInt = makeValAndOff(c, off) v.Aux = s - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } return false @@ -17541,8 +16506,7 @@ func rewriteValueAMD64_OpAMD64MOVSDload(v *Value) bool { v.reset(OpAMD64MOVSDload) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVSDload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) @@ -17564,8 +16528,7 @@ func rewriteValueAMD64_OpAMD64MOVSDload(v *Value) bool { v.reset(OpAMD64MOVSDload) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(base) - v.AddArg(mem) + v.AddArg2(base, mem) return true } // match: (MOVSDload [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) mem) @@ -17588,9 +16551,7 @@ func rewriteValueAMD64_OpAMD64MOVSDload(v *Value) bool { v.reset(OpAMD64MOVSDloadidx1) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } // match: (MOVSDload [off1] {sym1} (LEAQ8 [off2] {sym2} ptr idx) mem) @@ -17613,9 +16574,7 @@ func rewriteValueAMD64_OpAMD64MOVSDload(v *Value) bool { v.reset(OpAMD64MOVSDloadidx8) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } // match: (MOVSDload [off] {sym} (ADDQ ptr idx) mem) @@ -17640,9 +16599,7 @@ func rewriteValueAMD64_OpAMD64MOVSDload(v *Value) bool { v.reset(OpAMD64MOVSDloadidx1) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } break @@ -17656,11 +16613,10 @@ func rewriteValueAMD64_OpAMD64MOVSDload(v *Value) bool { if v_1.Op != OpAMD64MOVQstore || v_1.AuxInt != off || v_1.Aux != sym { break } - _ = v_1.Args[2] + val := v_1.Args[1] if ptr != v_1.Args[0] { break } - val := v_1.Args[1] v.reset(OpAMD64MOVQi2f) v.AddArg(val) return true @@ -17685,9 +16641,7 @@ func rewriteValueAMD64_OpAMD64MOVSDloadidx1(v *Value) bool { v.reset(OpAMD64MOVSDloadidx8) v.AuxInt = c v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } // match: (MOVSDloadidx1 [c] {sym} (ADDQconst [d] ptr) idx mem) @@ -17709,9 +16663,7 @@ func rewriteValueAMD64_OpAMD64MOVSDloadidx1(v *Value) bool { v.reset(OpAMD64MOVSDloadidx1) v.AuxInt = c + d v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } // match: (MOVSDloadidx1 [c] {sym} ptr (ADDQconst [d] idx) mem) @@ -17733,9 +16685,7 @@ func rewriteValueAMD64_OpAMD64MOVSDloadidx1(v *Value) bool { v.reset(OpAMD64MOVSDloadidx1) v.AuxInt = c + d v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } // match: (MOVSDloadidx1 [i] {s} p (MOVQconst [c]) mem) @@ -17756,8 +16706,7 @@ func rewriteValueAMD64_OpAMD64MOVSDloadidx1(v *Value) bool { v.reset(OpAMD64MOVSDload) v.AuxInt = i + c v.Aux = s - v.AddArg(p) - v.AddArg(mem) + v.AddArg2(p, mem) return true } return false @@ -17785,9 +16734,7 @@ func rewriteValueAMD64_OpAMD64MOVSDloadidx8(v *Value) bool { v.reset(OpAMD64MOVSDloadidx8) v.AuxInt = c + d v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } // match: (MOVSDloadidx8 [c] {sym} ptr (ADDQconst [d] idx) mem) @@ -17809,9 +16756,7 @@ func rewriteValueAMD64_OpAMD64MOVSDloadidx8(v *Value) bool { v.reset(OpAMD64MOVSDloadidx8) v.AuxInt = c + 8*d v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } // match: (MOVSDloadidx8 [i] {s} p (MOVQconst [c]) mem) @@ -17832,8 +16777,7 @@ func rewriteValueAMD64_OpAMD64MOVSDloadidx8(v *Value) bool { v.reset(OpAMD64MOVSDload) v.AuxInt = i + 8*c v.Aux = s - v.AddArg(p) - v.AddArg(mem) + v.AddArg2(p, mem) return true } return false @@ -17861,9 +16805,7 @@ func rewriteValueAMD64_OpAMD64MOVSDstore(v *Value) bool { v.reset(OpAMD64MOVSDstore) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } // match: (MOVSDstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) @@ -17886,9 +16828,7 @@ func rewriteValueAMD64_OpAMD64MOVSDstore(v *Value) bool { v.reset(OpAMD64MOVSDstore) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(base) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(base, val, mem) return true } // match: (MOVSDstore [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) val mem) @@ -17912,10 +16852,7 @@ func rewriteValueAMD64_OpAMD64MOVSDstore(v *Value) bool { v.reset(OpAMD64MOVSDstoreidx1) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) + v.AddArg4(ptr, idx, val, mem) return true } // match: (MOVSDstore [off1] {sym1} (LEAQ8 [off2] {sym2} ptr idx) val mem) @@ -17939,10 +16876,7 @@ func rewriteValueAMD64_OpAMD64MOVSDstore(v *Value) bool { v.reset(OpAMD64MOVSDstoreidx8) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) + v.AddArg4(ptr, idx, val, mem) return true } // match: (MOVSDstore [off] {sym} (ADDQ ptr idx) val mem) @@ -17968,10 +16902,7 @@ func rewriteValueAMD64_OpAMD64MOVSDstore(v *Value) bool { v.reset(OpAMD64MOVSDstoreidx1) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) + v.AddArg4(ptr, idx, val, mem) return true } break @@ -17990,9 +16921,7 @@ func rewriteValueAMD64_OpAMD64MOVSDstore(v *Value) bool { v.reset(OpAMD64MOVQstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } return false @@ -18017,10 +16946,7 @@ func rewriteValueAMD64_OpAMD64MOVSDstoreidx1(v *Value) bool { v.reset(OpAMD64MOVSDstoreidx8) v.AuxInt = c v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) + v.AddArg4(ptr, idx, val, mem) return true } // match: (MOVSDstoreidx1 [c] {sym} (ADDQconst [d] ptr) idx val mem) @@ -18043,10 +16969,7 @@ func rewriteValueAMD64_OpAMD64MOVSDstoreidx1(v *Value) bool { v.reset(OpAMD64MOVSDstoreidx1) v.AuxInt = c + d v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) + v.AddArg4(ptr, idx, val, mem) return true } // match: (MOVSDstoreidx1 [c] {sym} ptr (ADDQconst [d] idx) val mem) @@ -18069,10 +16992,7 @@ func rewriteValueAMD64_OpAMD64MOVSDstoreidx1(v *Value) bool { v.reset(OpAMD64MOVSDstoreidx1) v.AuxInt = c + d v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) + v.AddArg4(ptr, idx, val, mem) return true } // match: (MOVSDstoreidx1 [i] {s} p (MOVQconst [c]) w mem) @@ -18094,9 +17014,7 @@ func rewriteValueAMD64_OpAMD64MOVSDstoreidx1(v *Value) bool { v.reset(OpAMD64MOVSDstore) v.AuxInt = i + c v.Aux = s - v.AddArg(p) - v.AddArg(w) - v.AddArg(mem) + v.AddArg3(p, w, mem) return true } return false @@ -18126,10 +17044,7 @@ func rewriteValueAMD64_OpAMD64MOVSDstoreidx8(v *Value) bool { v.reset(OpAMD64MOVSDstoreidx8) v.AuxInt = c + d v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) + v.AddArg4(ptr, idx, val, mem) return true } // match: (MOVSDstoreidx8 [c] {sym} ptr (ADDQconst [d] idx) val mem) @@ -18152,10 +17067,7 @@ func rewriteValueAMD64_OpAMD64MOVSDstoreidx8(v *Value) bool { v.reset(OpAMD64MOVSDstoreidx8) v.AuxInt = c + 8*d v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) + v.AddArg4(ptr, idx, val, mem) return true } // match: (MOVSDstoreidx8 [i] {s} p (MOVQconst [c]) w mem) @@ -18177,9 +17089,7 @@ func rewriteValueAMD64_OpAMD64MOVSDstoreidx8(v *Value) bool { v.reset(OpAMD64MOVSDstore) v.AuxInt = i + 8*c v.Aux = s - v.AddArg(p) - v.AddArg(w) - v.AddArg(mem) + v.AddArg3(p, w, mem) return true } return false @@ -18205,8 +17115,7 @@ func rewriteValueAMD64_OpAMD64MOVSSload(v *Value) bool { v.reset(OpAMD64MOVSSload) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVSSload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) @@ -18228,8 +17137,7 @@ func rewriteValueAMD64_OpAMD64MOVSSload(v *Value) bool { v.reset(OpAMD64MOVSSload) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(base) - v.AddArg(mem) + v.AddArg2(base, mem) return true } // match: (MOVSSload [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) mem) @@ -18252,9 +17160,7 @@ func rewriteValueAMD64_OpAMD64MOVSSload(v *Value) bool { v.reset(OpAMD64MOVSSloadidx1) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } // match: (MOVSSload [off1] {sym1} (LEAQ4 [off2] {sym2} ptr idx) mem) @@ -18277,9 +17183,7 @@ func rewriteValueAMD64_OpAMD64MOVSSload(v *Value) bool { v.reset(OpAMD64MOVSSloadidx4) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } // match: (MOVSSload [off] {sym} (ADDQ ptr idx) mem) @@ -18304,9 +17208,7 @@ func rewriteValueAMD64_OpAMD64MOVSSload(v *Value) bool { v.reset(OpAMD64MOVSSloadidx1) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } break @@ -18320,11 +17222,10 @@ func rewriteValueAMD64_OpAMD64MOVSSload(v *Value) bool { if v_1.Op != OpAMD64MOVLstore || v_1.AuxInt != off || v_1.Aux != sym { break } - _ = v_1.Args[2] + val := v_1.Args[1] if ptr != v_1.Args[0] { break } - val := v_1.Args[1] v.reset(OpAMD64MOVLi2f) v.AddArg(val) return true @@ -18349,9 +17250,7 @@ func rewriteValueAMD64_OpAMD64MOVSSloadidx1(v *Value) bool { v.reset(OpAMD64MOVSSloadidx4) v.AuxInt = c v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } // match: (MOVSSloadidx1 [c] {sym} (ADDQconst [d] ptr) idx mem) @@ -18373,9 +17272,7 @@ func rewriteValueAMD64_OpAMD64MOVSSloadidx1(v *Value) bool { v.reset(OpAMD64MOVSSloadidx1) v.AuxInt = c + d v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } // match: (MOVSSloadidx1 [c] {sym} ptr (ADDQconst [d] idx) mem) @@ -18397,9 +17294,7 @@ func rewriteValueAMD64_OpAMD64MOVSSloadidx1(v *Value) bool { v.reset(OpAMD64MOVSSloadidx1) v.AuxInt = c + d v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } // match: (MOVSSloadidx1 [i] {s} p (MOVQconst [c]) mem) @@ -18420,8 +17315,7 @@ func rewriteValueAMD64_OpAMD64MOVSSloadidx1(v *Value) bool { v.reset(OpAMD64MOVSSload) v.AuxInt = i + c v.Aux = s - v.AddArg(p) - v.AddArg(mem) + v.AddArg2(p, mem) return true } return false @@ -18449,9 +17343,7 @@ func rewriteValueAMD64_OpAMD64MOVSSloadidx4(v *Value) bool { v.reset(OpAMD64MOVSSloadidx4) v.AuxInt = c + d v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } // match: (MOVSSloadidx4 [c] {sym} ptr (ADDQconst [d] idx) mem) @@ -18473,9 +17365,7 @@ func rewriteValueAMD64_OpAMD64MOVSSloadidx4(v *Value) bool { v.reset(OpAMD64MOVSSloadidx4) v.AuxInt = c + 4*d v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } // match: (MOVSSloadidx4 [i] {s} p (MOVQconst [c]) mem) @@ -18496,8 +17386,7 @@ func rewriteValueAMD64_OpAMD64MOVSSloadidx4(v *Value) bool { v.reset(OpAMD64MOVSSload) v.AuxInt = i + 4*c v.Aux = s - v.AddArg(p) - v.AddArg(mem) + v.AddArg2(p, mem) return true } return false @@ -18525,9 +17414,7 @@ func rewriteValueAMD64_OpAMD64MOVSSstore(v *Value) bool { v.reset(OpAMD64MOVSSstore) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } // match: (MOVSSstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) @@ -18550,9 +17437,7 @@ func rewriteValueAMD64_OpAMD64MOVSSstore(v *Value) bool { v.reset(OpAMD64MOVSSstore) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(base) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(base, val, mem) return true } // match: (MOVSSstore [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) val mem) @@ -18576,10 +17461,7 @@ func rewriteValueAMD64_OpAMD64MOVSSstore(v *Value) bool { v.reset(OpAMD64MOVSSstoreidx1) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) + v.AddArg4(ptr, idx, val, mem) return true } // match: (MOVSSstore [off1] {sym1} (LEAQ4 [off2] {sym2} ptr idx) val mem) @@ -18603,10 +17485,7 @@ func rewriteValueAMD64_OpAMD64MOVSSstore(v *Value) bool { v.reset(OpAMD64MOVSSstoreidx4) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) + v.AddArg4(ptr, idx, val, mem) return true } // match: (MOVSSstore [off] {sym} (ADDQ ptr idx) val mem) @@ -18632,10 +17511,7 @@ func rewriteValueAMD64_OpAMD64MOVSSstore(v *Value) bool { v.reset(OpAMD64MOVSSstoreidx1) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) + v.AddArg4(ptr, idx, val, mem) return true } break @@ -18654,9 +17530,7 @@ func rewriteValueAMD64_OpAMD64MOVSSstore(v *Value) bool { v.reset(OpAMD64MOVLstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } return false @@ -18681,10 +17555,7 @@ func rewriteValueAMD64_OpAMD64MOVSSstoreidx1(v *Value) bool { v.reset(OpAMD64MOVSSstoreidx4) v.AuxInt = c v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) + v.AddArg4(ptr, idx, val, mem) return true } // match: (MOVSSstoreidx1 [c] {sym} (ADDQconst [d] ptr) idx val mem) @@ -18707,10 +17578,7 @@ func rewriteValueAMD64_OpAMD64MOVSSstoreidx1(v *Value) bool { v.reset(OpAMD64MOVSSstoreidx1) v.AuxInt = c + d v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) + v.AddArg4(ptr, idx, val, mem) return true } // match: (MOVSSstoreidx1 [c] {sym} ptr (ADDQconst [d] idx) val mem) @@ -18733,10 +17601,7 @@ func rewriteValueAMD64_OpAMD64MOVSSstoreidx1(v *Value) bool { v.reset(OpAMD64MOVSSstoreidx1) v.AuxInt = c + d v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) + v.AddArg4(ptr, idx, val, mem) return true } // match: (MOVSSstoreidx1 [i] {s} p (MOVQconst [c]) w mem) @@ -18758,9 +17623,7 @@ func rewriteValueAMD64_OpAMD64MOVSSstoreidx1(v *Value) bool { v.reset(OpAMD64MOVSSstore) v.AuxInt = i + c v.Aux = s - v.AddArg(p) - v.AddArg(w) - v.AddArg(mem) + v.AddArg3(p, w, mem) return true } return false @@ -18790,10 +17653,7 @@ func rewriteValueAMD64_OpAMD64MOVSSstoreidx4(v *Value) bool { v.reset(OpAMD64MOVSSstoreidx4) v.AuxInt = c + d v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) + v.AddArg4(ptr, idx, val, mem) return true } // match: (MOVSSstoreidx4 [c] {sym} ptr (ADDQconst [d] idx) val mem) @@ -18816,10 +17676,7 @@ func rewriteValueAMD64_OpAMD64MOVSSstoreidx4(v *Value) bool { v.reset(OpAMD64MOVSSstoreidx4) v.AuxInt = c + 4*d v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) + v.AddArg4(ptr, idx, val, mem) return true } // match: (MOVSSstoreidx4 [i] {s} p (MOVQconst [c]) w mem) @@ -18841,9 +17698,7 @@ func rewriteValueAMD64_OpAMD64MOVSSstoreidx4(v *Value) bool { v.reset(OpAMD64MOVSSstore) v.AuxInt = i + 4*c v.Aux = s - v.AddArg(p) - v.AddArg(w) - v.AddArg(mem) + v.AddArg3(p, w, mem) return true } return false @@ -18868,12 +17723,10 @@ func rewriteValueAMD64_OpAMD64MOVWQSX(v *Value) bool { } b = x.Block v0 := b.NewValue0(x.Pos, OpAMD64MOVWQSXload, v.Type) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v0.AuxInt = off v0.Aux = sym - v0.AddArg(ptr) - v0.AddArg(mem) + v0.AddArg2(ptr, mem) return true } // match: (MOVWQSX x:(MOVLload [off] {sym} ptr mem)) @@ -18893,12 +17746,10 @@ func rewriteValueAMD64_OpAMD64MOVWQSX(v *Value) bool { } b = x.Block v0 := b.NewValue0(x.Pos, OpAMD64MOVWQSXload, v.Type) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v0.AuxInt = off v0.Aux = sym - v0.AddArg(ptr) - v0.AddArg(mem) + v0.AddArg2(ptr, mem) return true } // match: (MOVWQSX x:(MOVQload [off] {sym} ptr mem)) @@ -18918,12 +17769,10 @@ func rewriteValueAMD64_OpAMD64MOVWQSX(v *Value) bool { } b = x.Block v0 := b.NewValue0(x.Pos, OpAMD64MOVWQSXload, v.Type) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v0.AuxInt = off v0.Aux = sym - v0.AddArg(ptr) - v0.AddArg(mem) + v0.AddArg2(ptr, mem) return true } // match: (MOVWQSX (ANDLconst [c] x)) @@ -18982,9 +17831,8 @@ func rewriteValueAMD64_OpAMD64MOVWQSXload(v *Value) bool { } off2 := v_1.AuxInt sym2 := v_1.Aux - _ = v_1.Args[2] - ptr2 := v_1.Args[0] x := v_1.Args[1] + ptr2 := v_1.Args[0] if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) { break } @@ -19011,8 +17859,7 @@ func rewriteValueAMD64_OpAMD64MOVWQSXload(v *Value) bool { v.reset(OpAMD64MOVWQSXload) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(base) - v.AddArg(mem) + v.AddArg2(base, mem) return true } return false @@ -19037,12 +17884,10 @@ func rewriteValueAMD64_OpAMD64MOVWQZX(v *Value) bool { } b = x.Block v0 := b.NewValue0(x.Pos, OpAMD64MOVWload, v.Type) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v0.AuxInt = off v0.Aux = sym - v0.AddArg(ptr) - v0.AddArg(mem) + v0.AddArg2(ptr, mem) return true } // match: (MOVWQZX x:(MOVLload [off] {sym} ptr mem)) @@ -19062,12 +17907,10 @@ func rewriteValueAMD64_OpAMD64MOVWQZX(v *Value) bool { } b = x.Block v0 := b.NewValue0(x.Pos, OpAMD64MOVWload, v.Type) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v0.AuxInt = off v0.Aux = sym - v0.AddArg(ptr) - v0.AddArg(mem) + v0.AddArg2(ptr, mem) return true } // match: (MOVWQZX x:(MOVQload [off] {sym} ptr mem)) @@ -19087,12 +17930,10 @@ func rewriteValueAMD64_OpAMD64MOVWQZX(v *Value) bool { } b = x.Block v0 := b.NewValue0(x.Pos, OpAMD64MOVWload, v.Type) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v0.AuxInt = off v0.Aux = sym - v0.AddArg(ptr) - v0.AddArg(mem) + v0.AddArg2(ptr, mem) return true } // match: (MOVWQZX x) @@ -19103,9 +17944,7 @@ func rewriteValueAMD64_OpAMD64MOVWQZX(v *Value) bool { if !(zeroUpper48Bits(x, 3)) { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (MOVWQZX x:(MOVWloadidx1 [off] {sym} ptr idx mem)) @@ -19126,13 +17965,10 @@ func rewriteValueAMD64_OpAMD64MOVWQZX(v *Value) bool { } b = x.Block v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, v.Type) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v0.AuxInt = off v0.Aux = sym - v0.AddArg(ptr) - v0.AddArg(idx) - v0.AddArg(mem) + v0.AddArg3(ptr, idx, mem) return true } // match: (MOVWQZX x:(MOVWloadidx2 [off] {sym} ptr idx mem)) @@ -19153,13 +17989,10 @@ func rewriteValueAMD64_OpAMD64MOVWQZX(v *Value) bool { } b = x.Block v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx2, v.Type) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v0.AuxInt = off v0.Aux = sym - v0.AddArg(ptr) - v0.AddArg(idx) - v0.AddArg(mem) + v0.AddArg3(ptr, idx, mem) return true } // match: (MOVWQZX (ANDLconst [c] x)) @@ -19216,9 +18049,8 @@ func rewriteValueAMD64_OpAMD64MOVWload(v *Value) bool { } off2 := v_1.AuxInt sym2 := v_1.Aux - _ = v_1.Args[2] - ptr2 := v_1.Args[0] x := v_1.Args[1] + ptr2 := v_1.Args[0] if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) { break } @@ -19244,8 +18076,7 @@ func rewriteValueAMD64_OpAMD64MOVWload(v *Value) bool { v.reset(OpAMD64MOVWload) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVWload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) @@ -19267,8 +18098,7 @@ func rewriteValueAMD64_OpAMD64MOVWload(v *Value) bool { v.reset(OpAMD64MOVWload) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(base) - v.AddArg(mem) + v.AddArg2(base, mem) return true } // match: (MOVWload [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) mem) @@ -19291,9 +18121,7 @@ func rewriteValueAMD64_OpAMD64MOVWload(v *Value) bool { v.reset(OpAMD64MOVWloadidx1) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } // match: (MOVWload [off1] {sym1} (LEAQ2 [off2] {sym2} ptr idx) mem) @@ -19316,9 +18144,7 @@ func rewriteValueAMD64_OpAMD64MOVWload(v *Value) bool { v.reset(OpAMD64MOVWloadidx2) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } // match: (MOVWload [off] {sym} (ADDQ ptr idx) mem) @@ -19343,9 +18169,7 @@ func rewriteValueAMD64_OpAMD64MOVWload(v *Value) bool { v.reset(OpAMD64MOVWloadidx1) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } break @@ -19369,8 +18193,7 @@ func rewriteValueAMD64_OpAMD64MOVWload(v *Value) bool { v.reset(OpAMD64MOVWload) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(base) - v.AddArg(mem) + v.AddArg2(base, mem) return true } // match: (MOVWload [off1] {sym} (ADDLconst [off2] ptr) mem) @@ -19391,8 +18214,7 @@ func rewriteValueAMD64_OpAMD64MOVWload(v *Value) bool { v.reset(OpAMD64MOVWload) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVWload [off] {sym} (SB) _) @@ -19429,9 +18251,7 @@ func rewriteValueAMD64_OpAMD64MOVWloadidx1(v *Value) bool { v.reset(OpAMD64MOVWloadidx2) v.AuxInt = c v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } break @@ -19456,9 +18276,7 @@ func rewriteValueAMD64_OpAMD64MOVWloadidx1(v *Value) bool { v.reset(OpAMD64MOVWloadidx1) v.AuxInt = c + d v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } break @@ -19483,9 +18301,7 @@ func rewriteValueAMD64_OpAMD64MOVWloadidx1(v *Value) bool { v.reset(OpAMD64MOVWloadidx1) v.AuxInt = c + d v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } break @@ -19509,8 +18325,7 @@ func rewriteValueAMD64_OpAMD64MOVWloadidx1(v *Value) bool { v.reset(OpAMD64MOVWload) v.AuxInt = i + c v.Aux = s - v.AddArg(p) - v.AddArg(mem) + v.AddArg2(p, mem) return true } break @@ -19540,9 +18355,7 @@ func rewriteValueAMD64_OpAMD64MOVWloadidx2(v *Value) bool { v.reset(OpAMD64MOVWloadidx2) v.AuxInt = c + d v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } // match: (MOVWloadidx2 [c] {sym} ptr (ADDQconst [d] idx) mem) @@ -19564,9 +18377,7 @@ func rewriteValueAMD64_OpAMD64MOVWloadidx2(v *Value) bool { v.reset(OpAMD64MOVWloadidx2) v.AuxInt = c + 2*d v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } // match: (MOVWloadidx2 [i] {s} p (MOVQconst [c]) mem) @@ -19587,8 +18398,7 @@ func rewriteValueAMD64_OpAMD64MOVWloadidx2(v *Value) bool { v.reset(OpAMD64MOVWload) v.AuxInt = i + 2*c v.Aux = s - v.AddArg(p) - v.AddArg(mem) + v.AddArg2(p, mem) return true } return false @@ -19613,9 +18423,7 @@ func rewriteValueAMD64_OpAMD64MOVWstore(v *Value) bool { v.reset(OpAMD64MOVWstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(x) - v.AddArg(mem) + v.AddArg3(ptr, x, mem) return true } // match: (MOVWstore [off] {sym} ptr (MOVWQZX x) mem) @@ -19632,9 +18440,7 @@ func rewriteValueAMD64_OpAMD64MOVWstore(v *Value) bool { v.reset(OpAMD64MOVWstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(x) - v.AddArg(mem) + v.AddArg3(ptr, x, mem) return true } // match: (MOVWstore [off1] {sym} (ADDQconst [off2] ptr) val mem) @@ -19656,9 +18462,7 @@ func rewriteValueAMD64_OpAMD64MOVWstore(v *Value) bool { v.reset(OpAMD64MOVWstore) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } // match: (MOVWstore [off] {sym} ptr (MOVLconst [c]) mem) @@ -19679,8 +18483,7 @@ func rewriteValueAMD64_OpAMD64MOVWstore(v *Value) bool { v.reset(OpAMD64MOVWstoreconst) v.AuxInt = makeValAndOff(int64(int16(c)), off) v.Aux = sym - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVWstore [off] {sym} ptr (MOVQconst [c]) mem) @@ -19701,8 +18504,7 @@ func rewriteValueAMD64_OpAMD64MOVWstore(v *Value) bool { v.reset(OpAMD64MOVWstoreconst) v.AuxInt = makeValAndOff(int64(int16(c)), off) v.Aux = sym - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVWstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) @@ -19725,9 +18527,7 @@ func rewriteValueAMD64_OpAMD64MOVWstore(v *Value) bool { v.reset(OpAMD64MOVWstore) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(base) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(base, val, mem) return true } // match: (MOVWstore [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) val mem) @@ -19751,10 +18551,7 @@ func rewriteValueAMD64_OpAMD64MOVWstore(v *Value) bool { v.reset(OpAMD64MOVWstoreidx1) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) + v.AddArg4(ptr, idx, val, mem) return true } // match: (MOVWstore [off1] {sym1} (LEAQ2 [off2] {sym2} ptr idx) val mem) @@ -19778,10 +18575,7 @@ func rewriteValueAMD64_OpAMD64MOVWstore(v *Value) bool { v.reset(OpAMD64MOVWstoreidx2) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) + v.AddArg4(ptr, idx, val, mem) return true } // match: (MOVWstore [off] {sym} (ADDQ ptr idx) val mem) @@ -19807,10 +18601,7 @@ func rewriteValueAMD64_OpAMD64MOVWstore(v *Value) bool { v.reset(OpAMD64MOVWstoreidx1) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) + v.AddArg4(ptr, idx, val, mem) return true } break @@ -19837,9 +18628,7 @@ func rewriteValueAMD64_OpAMD64MOVWstore(v *Value) bool { v.reset(OpAMD64MOVLstore) v.AuxInt = i - 2 v.Aux = s - v.AddArg(p) - v.AddArg(w) - v.AddArg(mem) + v.AddArg3(p, w, mem) return true } // match: (MOVWstore [i] {s} p (SHRQconst [16] w) x:(MOVWstore [i-2] {s} p w mem)) @@ -19864,9 +18653,7 @@ func rewriteValueAMD64_OpAMD64MOVWstore(v *Value) bool { v.reset(OpAMD64MOVLstore) v.AuxInt = i - 2 v.Aux = s - v.AddArg(p) - v.AddArg(w) - v.AddArg(mem) + v.AddArg3(p, w, mem) return true } // match: (MOVWstore [i] {s} p (SHRLconst [j] w) x:(MOVWstore [i-2] {s} p w0:(SHRLconst [j-16] w) mem)) @@ -19896,9 +18683,7 @@ func rewriteValueAMD64_OpAMD64MOVWstore(v *Value) bool { v.reset(OpAMD64MOVLstore) v.AuxInt = i - 2 v.Aux = s - v.AddArg(p) - v.AddArg(w0) - v.AddArg(mem) + v.AddArg3(p, w0, mem) return true } // match: (MOVWstore [i] {s} p (SHRQconst [j] w) x:(MOVWstore [i-2] {s} p w0:(SHRQconst [j-16] w) mem)) @@ -19928,9 +18713,7 @@ func rewriteValueAMD64_OpAMD64MOVWstore(v *Value) bool { v.reset(OpAMD64MOVLstore) v.AuxInt = i - 2 v.Aux = s - v.AddArg(p) - v.AddArg(w0) - v.AddArg(mem) + v.AddArg3(p, w0, mem) return true } // match: (MOVWstore [i] {s} p x1:(MOVWload [j] {s2} p2 mem) mem2:(MOVWstore [i-2] {s} p x2:(MOVWload [j-2] {s2} p2 mem) mem)) @@ -19967,14 +18750,11 @@ func rewriteValueAMD64_OpAMD64MOVWstore(v *Value) bool { v.reset(OpAMD64MOVLstore) v.AuxInt = i - 2 v.Aux = s - v.AddArg(p) v0 := b.NewValue0(x2.Pos, OpAMD64MOVLload, typ.UInt32) v0.AuxInt = j - 2 v0.Aux = s2 - v0.AddArg(p2) - v0.AddArg(mem) - v.AddArg(v0) - v.AddArg(mem) + v0.AddArg2(p2, mem) + v.AddArg3(p, v0, mem) return true } // match: (MOVWstore [off1] {sym1} (LEAL [off2] {sym2} base) val mem) @@ -19997,9 +18777,7 @@ func rewriteValueAMD64_OpAMD64MOVWstore(v *Value) bool { v.reset(OpAMD64MOVWstore) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(base) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(base, val, mem) return true } // match: (MOVWstore [off1] {sym} (ADDLconst [off2] ptr) val mem) @@ -20021,9 +18799,7 @@ func rewriteValueAMD64_OpAMD64MOVWstore(v *Value) bool { v.reset(OpAMD64MOVWstore) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } return false @@ -20049,8 +18825,7 @@ func rewriteValueAMD64_OpAMD64MOVWstoreconst(v *Value) bool { v.reset(OpAMD64MOVWstoreconst) v.AuxInt = ValAndOff(sc).add(off) v.Aux = s - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVWstoreconst [sc] {sym1} (LEAQ [off] {sym2} ptr) mem) @@ -20072,8 +18847,7 @@ func rewriteValueAMD64_OpAMD64MOVWstoreconst(v *Value) bool { v.reset(OpAMD64MOVWstoreconst) v.AuxInt = ValAndOff(sc).add(off) v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVWstoreconst [x] {sym1} (LEAQ1 [off] {sym2} ptr idx) mem) @@ -20096,9 +18870,7 @@ func rewriteValueAMD64_OpAMD64MOVWstoreconst(v *Value) bool { v.reset(OpAMD64MOVWstoreconstidx1) v.AuxInt = ValAndOff(x).add(off) v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } // match: (MOVWstoreconst [x] {sym1} (LEAQ2 [off] {sym2} ptr idx) mem) @@ -20121,9 +18893,7 @@ func rewriteValueAMD64_OpAMD64MOVWstoreconst(v *Value) bool { v.reset(OpAMD64MOVWstoreconstidx2) v.AuxInt = ValAndOff(x).add(off) v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } // match: (MOVWstoreconst [x] {sym} (ADDQ ptr idx) mem) @@ -20140,9 +18910,7 @@ func rewriteValueAMD64_OpAMD64MOVWstoreconst(v *Value) bool { v.reset(OpAMD64MOVWstoreconstidx1) v.AuxInt = x v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } // match: (MOVWstoreconst [c] {s} p x:(MOVWstoreconst [a] {s} p mem)) @@ -20167,8 +18935,7 @@ func rewriteValueAMD64_OpAMD64MOVWstoreconst(v *Value) bool { v.reset(OpAMD64MOVLstoreconst) v.AuxInt = makeValAndOff(ValAndOff(a).Val()&0xffff|ValAndOff(c).Val()<<16, ValAndOff(a).Off()) v.Aux = s - v.AddArg(p) - v.AddArg(mem) + v.AddArg2(p, mem) return true } // match: (MOVWstoreconst [a] {s} p x:(MOVWstoreconst [c] {s} p mem)) @@ -20193,8 +18960,7 @@ func rewriteValueAMD64_OpAMD64MOVWstoreconst(v *Value) bool { v.reset(OpAMD64MOVLstoreconst) v.AuxInt = makeValAndOff(ValAndOff(a).Val()&0xffff|ValAndOff(c).Val()<<16, ValAndOff(a).Off()) v.Aux = s - v.AddArg(p) - v.AddArg(mem) + v.AddArg2(p, mem) return true } // match: (MOVWstoreconst [sc] {sym1} (LEAL [off] {sym2} ptr) mem) @@ -20216,8 +18982,7 @@ func rewriteValueAMD64_OpAMD64MOVWstoreconst(v *Value) bool { v.reset(OpAMD64MOVWstoreconst) v.AuxInt = ValAndOff(sc).add(off) v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVWstoreconst [sc] {s} (ADDLconst [off] ptr) mem) @@ -20238,8 +19003,7 @@ func rewriteValueAMD64_OpAMD64MOVWstoreconst(v *Value) bool { v.reset(OpAMD64MOVWstoreconst) v.AuxInt = ValAndOff(sc).add(off) v.Aux = s - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } return false @@ -20263,9 +19027,7 @@ func rewriteValueAMD64_OpAMD64MOVWstoreconstidx1(v *Value) bool { v.reset(OpAMD64MOVWstoreconstidx2) v.AuxInt = c v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } break @@ -20290,9 +19052,7 @@ func rewriteValueAMD64_OpAMD64MOVWstoreconstidx1(v *Value) bool { v.reset(OpAMD64MOVWstoreconstidx1) v.AuxInt = ValAndOff(x).add(c) v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } break @@ -20317,9 +19077,7 @@ func rewriteValueAMD64_OpAMD64MOVWstoreconstidx1(v *Value) bool { v.reset(OpAMD64MOVWstoreconstidx1) v.AuxInt = ValAndOff(x).add(c) v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } break @@ -20351,9 +19109,7 @@ func rewriteValueAMD64_OpAMD64MOVWstoreconstidx1(v *Value) bool { v.reset(OpAMD64MOVLstoreconstidx1) v.AuxInt = makeValAndOff(ValAndOff(a).Val()&0xffff|ValAndOff(c).Val()<<16, ValAndOff(a).Off()) v.Aux = s - v.AddArg(p) - v.AddArg(i) - v.AddArg(mem) + v.AddArg3(p, i, mem) return true } } @@ -20385,9 +19141,7 @@ func rewriteValueAMD64_OpAMD64MOVWstoreconstidx2(v *Value) bool { v.reset(OpAMD64MOVWstoreconstidx2) v.AuxInt = ValAndOff(x).add(c) v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } // match: (MOVWstoreconstidx2 [x] {sym} ptr (ADDQconst [c] idx) mem) @@ -20409,9 +19163,7 @@ func rewriteValueAMD64_OpAMD64MOVWstoreconstidx2(v *Value) bool { v.reset(OpAMD64MOVWstoreconstidx2) v.AuxInt = ValAndOff(x).add(2 * c) v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } // match: (MOVWstoreconstidx2 [c] {s} p i x:(MOVWstoreconstidx2 [a] {s} p i mem)) @@ -20437,12 +19189,10 @@ func rewriteValueAMD64_OpAMD64MOVWstoreconstidx2(v *Value) bool { v.reset(OpAMD64MOVLstoreconstidx1) v.AuxInt = makeValAndOff(ValAndOff(a).Val()&0xffff|ValAndOff(c).Val()<<16, ValAndOff(a).Off()) v.Aux = s - v.AddArg(p) v0 := b.NewValue0(v.Pos, OpAMD64SHLQconst, i.Type) v0.AuxInt = 1 v0.AddArg(i) - v.AddArg(v0) - v.AddArg(mem) + v.AddArg3(p, v0, mem) return true } return false @@ -20468,10 +19218,7 @@ func rewriteValueAMD64_OpAMD64MOVWstoreidx1(v *Value) bool { v.reset(OpAMD64MOVWstoreidx2) v.AuxInt = c v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) + v.AddArg4(ptr, idx, val, mem) return true } break @@ -20497,10 +19244,7 @@ func rewriteValueAMD64_OpAMD64MOVWstoreidx1(v *Value) bool { v.reset(OpAMD64MOVWstoreidx1) v.AuxInt = c + d v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) + v.AddArg4(ptr, idx, val, mem) return true } break @@ -20526,10 +19270,7 @@ func rewriteValueAMD64_OpAMD64MOVWstoreidx1(v *Value) bool { v.reset(OpAMD64MOVWstoreidx1) v.AuxInt = c + d v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) + v.AddArg4(ptr, idx, val, mem) return true } break @@ -20561,10 +19302,7 @@ func rewriteValueAMD64_OpAMD64MOVWstoreidx1(v *Value) bool { v.reset(OpAMD64MOVLstoreidx1) v.AuxInt = i - 2 v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v.AddArg(w) - v.AddArg(mem) + v.AddArg4(p, idx, w, mem) return true } } @@ -20597,10 +19335,7 @@ func rewriteValueAMD64_OpAMD64MOVWstoreidx1(v *Value) bool { v.reset(OpAMD64MOVLstoreidx1) v.AuxInt = i - 2 v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v.AddArg(w) - v.AddArg(mem) + v.AddArg4(p, idx, w, mem) return true } } @@ -20638,10 +19373,7 @@ func rewriteValueAMD64_OpAMD64MOVWstoreidx1(v *Value) bool { v.reset(OpAMD64MOVLstoreidx1) v.AuxInt = i - 2 v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v.AddArg(w0) - v.AddArg(mem) + v.AddArg4(p, idx, w0, mem) return true } } @@ -20679,10 +19411,7 @@ func rewriteValueAMD64_OpAMD64MOVWstoreidx1(v *Value) bool { v.reset(OpAMD64MOVLstoreidx1) v.AuxInt = i - 2 v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v.AddArg(w0) - v.AddArg(mem) + v.AddArg4(p, idx, w0, mem) return true } } @@ -20708,9 +19437,7 @@ func rewriteValueAMD64_OpAMD64MOVWstoreidx1(v *Value) bool { v.reset(OpAMD64MOVWstore) v.AuxInt = i + c v.Aux = s - v.AddArg(p) - v.AddArg(w) - v.AddArg(mem) + v.AddArg3(p, w, mem) return true } break @@ -20734,9 +19461,7 @@ func rewriteValueAMD64_OpAMD64MOVWstoreidx1(v *Value) bool { v.reset(OpAMD64MOVWstoreconstidx1) v.AuxInt = makeValAndOff(int64(int16(c)), off) v.Aux = s - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } return false @@ -20767,10 +19492,7 @@ func rewriteValueAMD64_OpAMD64MOVWstoreidx2(v *Value) bool { v.reset(OpAMD64MOVWstoreidx2) v.AuxInt = c + d v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) + v.AddArg4(ptr, idx, val, mem) return true } // match: (MOVWstoreidx2 [c] {sym} ptr (ADDQconst [d] idx) val mem) @@ -20793,10 +19515,7 @@ func rewriteValueAMD64_OpAMD64MOVWstoreidx2(v *Value) bool { v.reset(OpAMD64MOVWstoreidx2) v.AuxInt = c + 2*d v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) + v.AddArg4(ptr, idx, val, mem) return true } // match: (MOVWstoreidx2 [i] {s} p idx (SHRLconst [16] w) x:(MOVWstoreidx2 [i-2] {s} p idx w mem)) @@ -20822,13 +19541,10 @@ func rewriteValueAMD64_OpAMD64MOVWstoreidx2(v *Value) bool { v.reset(OpAMD64MOVLstoreidx1) v.AuxInt = i - 2 v.Aux = s - v.AddArg(p) v0 := b.NewValue0(v.Pos, OpAMD64SHLQconst, idx.Type) v0.AuxInt = 1 v0.AddArg(idx) - v.AddArg(v0) - v.AddArg(w) - v.AddArg(mem) + v.AddArg4(p, v0, w, mem) return true } // match: (MOVWstoreidx2 [i] {s} p idx (SHRQconst [16] w) x:(MOVWstoreidx2 [i-2] {s} p idx w mem)) @@ -20854,13 +19570,10 @@ func rewriteValueAMD64_OpAMD64MOVWstoreidx2(v *Value) bool { v.reset(OpAMD64MOVLstoreidx1) v.AuxInt = i - 2 v.Aux = s - v.AddArg(p) v0 := b.NewValue0(v.Pos, OpAMD64SHLQconst, idx.Type) v0.AuxInt = 1 v0.AddArg(idx) - v.AddArg(v0) - v.AddArg(w) - v.AddArg(mem) + v.AddArg4(p, v0, w, mem) return true } // match: (MOVWstoreidx2 [i] {s} p idx (SHRQconst [j] w) x:(MOVWstoreidx2 [i-2] {s} p idx w0:(SHRQconst [j-16] w) mem)) @@ -20891,13 +19604,10 @@ func rewriteValueAMD64_OpAMD64MOVWstoreidx2(v *Value) bool { v.reset(OpAMD64MOVLstoreidx1) v.AuxInt = i - 2 v.Aux = s - v.AddArg(p) v0 := b.NewValue0(v.Pos, OpAMD64SHLQconst, idx.Type) v0.AuxInt = 1 v0.AddArg(idx) - v.AddArg(v0) - v.AddArg(w0) - v.AddArg(mem) + v.AddArg4(p, v0, w0, mem) return true } // match: (MOVWstoreidx2 [i] {s} p (MOVQconst [c]) w mem) @@ -20919,9 +19629,7 @@ func rewriteValueAMD64_OpAMD64MOVWstoreidx2(v *Value) bool { v.reset(OpAMD64MOVWstore) v.AuxInt = i + 2*c v.Aux = s - v.AddArg(p) - v.AddArg(w) - v.AddArg(mem) + v.AddArg3(p, w, mem) return true } // match: (MOVWstoreidx2 [off] {s} ptr idx (MOVLconst [c]) mem) @@ -20943,9 +19651,7 @@ func rewriteValueAMD64_OpAMD64MOVWstoreidx2(v *Value) bool { v.reset(OpAMD64MOVWstoreconstidx2) v.AuxInt = makeValAndOff(int64(int16(c)), off) v.Aux = s - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } return false @@ -20997,8 +19703,7 @@ func rewriteValueAMD64_OpAMD64MULLconst(v *Value) bool { x := v_0 v.reset(OpAMD64NEGL) v0 := b.NewValue0(v.Pos, OpAMD64LEAL8, v.Type) - v0.AddArg(x) - v0.AddArg(x) + v0.AddArg2(x, x) v.AddArg(v0) return true } @@ -21011,8 +19716,7 @@ func rewriteValueAMD64_OpAMD64MULLconst(v *Value) bool { x := v_0 v.reset(OpAMD64NEGL) v0 := b.NewValue0(v.Pos, OpAMD64LEAL4, v.Type) - v0.AddArg(x) - v0.AddArg(x) + v0.AddArg2(x, x) v.AddArg(v0) return true } @@ -21025,8 +19729,7 @@ func rewriteValueAMD64_OpAMD64MULLconst(v *Value) bool { x := v_0 v.reset(OpAMD64NEGL) v0 := b.NewValue0(v.Pos, OpAMD64LEAL2, v.Type) - v0.AddArg(x) - v0.AddArg(x) + v0.AddArg2(x, x) v.AddArg(v0) return true } @@ -21058,9 +19761,7 @@ func rewriteValueAMD64_OpAMD64MULLconst(v *Value) bool { break } x := v_0 - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (MULLconst [ 3] x) @@ -21071,8 +19772,7 @@ func rewriteValueAMD64_OpAMD64MULLconst(v *Value) bool { } x := v_0 v.reset(OpAMD64LEAL2) - v.AddArg(x) - v.AddArg(x) + v.AddArg2(x, x) return true } // match: (MULLconst [ 5] x) @@ -21083,8 +19783,7 @@ func rewriteValueAMD64_OpAMD64MULLconst(v *Value) bool { } x := v_0 v.reset(OpAMD64LEAL4) - v.AddArg(x) - v.AddArg(x) + v.AddArg2(x, x) return true } // match: (MULLconst [ 7] x) @@ -21095,11 +19794,9 @@ func rewriteValueAMD64_OpAMD64MULLconst(v *Value) bool { } x := v_0 v.reset(OpAMD64LEAL2) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpAMD64LEAL2, v.Type) - v0.AddArg(x) - v0.AddArg(x) - v.AddArg(v0) + v0.AddArg2(x, x) + v.AddArg2(x, v0) return true } // match: (MULLconst [ 9] x) @@ -21110,8 +19807,7 @@ func rewriteValueAMD64_OpAMD64MULLconst(v *Value) bool { } x := v_0 v.reset(OpAMD64LEAL8) - v.AddArg(x) - v.AddArg(x) + v.AddArg2(x, x) return true } // match: (MULLconst [11] x) @@ -21122,11 +19818,9 @@ func rewriteValueAMD64_OpAMD64MULLconst(v *Value) bool { } x := v_0 v.reset(OpAMD64LEAL2) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpAMD64LEAL4, v.Type) - v0.AddArg(x) - v0.AddArg(x) - v.AddArg(v0) + v0.AddArg2(x, x) + v.AddArg2(x, v0) return true } // match: (MULLconst [13] x) @@ -21137,11 +19831,9 @@ func rewriteValueAMD64_OpAMD64MULLconst(v *Value) bool { } x := v_0 v.reset(OpAMD64LEAL4) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpAMD64LEAL2, v.Type) - v0.AddArg(x) - v0.AddArg(x) - v.AddArg(v0) + v0.AddArg2(x, x) + v.AddArg2(x, v0) return true } // match: (MULLconst [19] x) @@ -21152,11 +19844,9 @@ func rewriteValueAMD64_OpAMD64MULLconst(v *Value) bool { } x := v_0 v.reset(OpAMD64LEAL2) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpAMD64LEAL8, v.Type) - v0.AddArg(x) - v0.AddArg(x) - v.AddArg(v0) + v0.AddArg2(x, x) + v.AddArg2(x, v0) return true } // match: (MULLconst [21] x) @@ -21167,11 +19857,9 @@ func rewriteValueAMD64_OpAMD64MULLconst(v *Value) bool { } x := v_0 v.reset(OpAMD64LEAL4) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpAMD64LEAL4, v.Type) - v0.AddArg(x) - v0.AddArg(x) - v.AddArg(v0) + v0.AddArg2(x, x) + v.AddArg2(x, v0) return true } // match: (MULLconst [25] x) @@ -21182,11 +19870,9 @@ func rewriteValueAMD64_OpAMD64MULLconst(v *Value) bool { } x := v_0 v.reset(OpAMD64LEAL8) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpAMD64LEAL2, v.Type) - v0.AddArg(x) - v0.AddArg(x) - v.AddArg(v0) + v0.AddArg2(x, x) + v.AddArg2(x, v0) return true } // match: (MULLconst [27] x) @@ -21198,13 +19884,10 @@ func rewriteValueAMD64_OpAMD64MULLconst(v *Value) bool { x := v_0 v.reset(OpAMD64LEAL8) v0 := b.NewValue0(v.Pos, OpAMD64LEAL2, v.Type) - v0.AddArg(x) - v0.AddArg(x) - v.AddArg(v0) + v0.AddArg2(x, x) v1 := b.NewValue0(v.Pos, OpAMD64LEAL2, v.Type) - v1.AddArg(x) - v1.AddArg(x) - v.AddArg(v1) + v1.AddArg2(x, x) + v.AddArg2(v0, v1) return true } // match: (MULLconst [37] x) @@ -21215,11 +19898,9 @@ func rewriteValueAMD64_OpAMD64MULLconst(v *Value) bool { } x := v_0 v.reset(OpAMD64LEAL4) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpAMD64LEAL8, v.Type) - v0.AddArg(x) - v0.AddArg(x) - v.AddArg(v0) + v0.AddArg2(x, x) + v.AddArg2(x, v0) return true } // match: (MULLconst [41] x) @@ -21230,11 +19911,9 @@ func rewriteValueAMD64_OpAMD64MULLconst(v *Value) bool { } x := v_0 v.reset(OpAMD64LEAL8) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpAMD64LEAL4, v.Type) - v0.AddArg(x) - v0.AddArg(x) - v.AddArg(v0) + v0.AddArg2(x, x) + v.AddArg2(x, v0) return true } // match: (MULLconst [45] x) @@ -21246,13 +19925,10 @@ func rewriteValueAMD64_OpAMD64MULLconst(v *Value) bool { x := v_0 v.reset(OpAMD64LEAL8) v0 := b.NewValue0(v.Pos, OpAMD64LEAL4, v.Type) - v0.AddArg(x) - v0.AddArg(x) - v.AddArg(v0) + v0.AddArg2(x, x) v1 := b.NewValue0(v.Pos, OpAMD64LEAL4, v.Type) - v1.AddArg(x) - v1.AddArg(x) - v.AddArg(v1) + v1.AddArg2(x, x) + v.AddArg2(v0, v1) return true } // match: (MULLconst [73] x) @@ -21263,11 +19939,9 @@ func rewriteValueAMD64_OpAMD64MULLconst(v *Value) bool { } x := v_0 v.reset(OpAMD64LEAL8) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpAMD64LEAL8, v.Type) - v0.AddArg(x) - v0.AddArg(x) - v.AddArg(v0) + v0.AddArg2(x, x) + v.AddArg2(x, v0) return true } // match: (MULLconst [81] x) @@ -21279,13 +19953,10 @@ func rewriteValueAMD64_OpAMD64MULLconst(v *Value) bool { x := v_0 v.reset(OpAMD64LEAL8) v0 := b.NewValue0(v.Pos, OpAMD64LEAL8, v.Type) - v0.AddArg(x) - v0.AddArg(x) - v.AddArg(v0) + v0.AddArg2(x, x) v1 := b.NewValue0(v.Pos, OpAMD64LEAL8, v.Type) - v1.AddArg(x) - v1.AddArg(x) - v.AddArg(v1) + v1.AddArg2(x, x) + v.AddArg2(v0, v1) return true } // match: (MULLconst [c] x) @@ -21301,8 +19972,7 @@ func rewriteValueAMD64_OpAMD64MULLconst(v *Value) bool { v0 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) v0.AuxInt = log2(c + 1) v0.AddArg(x) - v.AddArg(v0) - v.AddArg(x) + v.AddArg2(v0, x) return true } // match: (MULLconst [c] x) @@ -21318,8 +19988,7 @@ func rewriteValueAMD64_OpAMD64MULLconst(v *Value) bool { v0 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) v0.AuxInt = log2(c - 1) v0.AddArg(x) - v.AddArg(v0) - v.AddArg(x) + v.AddArg2(v0, x) return true } // match: (MULLconst [c] x) @@ -21335,8 +20004,7 @@ func rewriteValueAMD64_OpAMD64MULLconst(v *Value) bool { v0 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) v0.AuxInt = log2(c - 2) v0.AddArg(x) - v.AddArg(v0) - v.AddArg(x) + v.AddArg2(v0, x) return true } // match: (MULLconst [c] x) @@ -21352,8 +20020,7 @@ func rewriteValueAMD64_OpAMD64MULLconst(v *Value) bool { v0 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) v0.AuxInt = log2(c - 4) v0.AddArg(x) - v.AddArg(v0) - v.AddArg(x) + v.AddArg2(v0, x) return true } // match: (MULLconst [c] x) @@ -21369,8 +20036,7 @@ func rewriteValueAMD64_OpAMD64MULLconst(v *Value) bool { v0 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) v0.AuxInt = log2(c - 8) v0.AddArg(x) - v.AddArg(v0) - v.AddArg(x) + v.AddArg2(v0, x) return true } // match: (MULLconst [c] x) @@ -21385,8 +20051,7 @@ func rewriteValueAMD64_OpAMD64MULLconst(v *Value) bool { v.reset(OpAMD64SHLLconst) v.AuxInt = log2(c / 3) v0 := b.NewValue0(v.Pos, OpAMD64LEAL2, v.Type) - v0.AddArg(x) - v0.AddArg(x) + v0.AddArg2(x, x) v.AddArg(v0) return true } @@ -21402,8 +20067,7 @@ func rewriteValueAMD64_OpAMD64MULLconst(v *Value) bool { v.reset(OpAMD64SHLLconst) v.AuxInt = log2(c / 5) v0 := b.NewValue0(v.Pos, OpAMD64LEAL4, v.Type) - v0.AddArg(x) - v0.AddArg(x) + v0.AddArg2(x, x) v.AddArg(v0) return true } @@ -21419,8 +20083,7 @@ func rewriteValueAMD64_OpAMD64MULLconst(v *Value) bool { v.reset(OpAMD64SHLLconst) v.AuxInt = log2(c / 9) v0 := b.NewValue0(v.Pos, OpAMD64LEAL8, v.Type) - v0.AddArg(x) - v0.AddArg(x) + v0.AddArg2(x, x) v.AddArg(v0) return true } @@ -21493,8 +20156,7 @@ func rewriteValueAMD64_OpAMD64MULQconst(v *Value) bool { x := v_0 v.reset(OpAMD64NEGQ) v0 := b.NewValue0(v.Pos, OpAMD64LEAQ8, v.Type) - v0.AddArg(x) - v0.AddArg(x) + v0.AddArg2(x, x) v.AddArg(v0) return true } @@ -21507,8 +20169,7 @@ func rewriteValueAMD64_OpAMD64MULQconst(v *Value) bool { x := v_0 v.reset(OpAMD64NEGQ) v0 := b.NewValue0(v.Pos, OpAMD64LEAQ4, v.Type) - v0.AddArg(x) - v0.AddArg(x) + v0.AddArg2(x, x) v.AddArg(v0) return true } @@ -21521,8 +20182,7 @@ func rewriteValueAMD64_OpAMD64MULQconst(v *Value) bool { x := v_0 v.reset(OpAMD64NEGQ) v0 := b.NewValue0(v.Pos, OpAMD64LEAQ2, v.Type) - v0.AddArg(x) - v0.AddArg(x) + v0.AddArg2(x, x) v.AddArg(v0) return true } @@ -21554,9 +20214,7 @@ func rewriteValueAMD64_OpAMD64MULQconst(v *Value) bool { break } x := v_0 - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (MULQconst [ 3] x) @@ -21567,8 +20225,7 @@ func rewriteValueAMD64_OpAMD64MULQconst(v *Value) bool { } x := v_0 v.reset(OpAMD64LEAQ2) - v.AddArg(x) - v.AddArg(x) + v.AddArg2(x, x) return true } // match: (MULQconst [ 5] x) @@ -21579,8 +20236,7 @@ func rewriteValueAMD64_OpAMD64MULQconst(v *Value) bool { } x := v_0 v.reset(OpAMD64LEAQ4) - v.AddArg(x) - v.AddArg(x) + v.AddArg2(x, x) return true } // match: (MULQconst [ 7] x) @@ -21591,11 +20247,9 @@ func rewriteValueAMD64_OpAMD64MULQconst(v *Value) bool { } x := v_0 v.reset(OpAMD64LEAQ2) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpAMD64LEAQ2, v.Type) - v0.AddArg(x) - v0.AddArg(x) - v.AddArg(v0) + v0.AddArg2(x, x) + v.AddArg2(x, v0) return true } // match: (MULQconst [ 9] x) @@ -21606,8 +20260,7 @@ func rewriteValueAMD64_OpAMD64MULQconst(v *Value) bool { } x := v_0 v.reset(OpAMD64LEAQ8) - v.AddArg(x) - v.AddArg(x) + v.AddArg2(x, x) return true } // match: (MULQconst [11] x) @@ -21618,11 +20271,9 @@ func rewriteValueAMD64_OpAMD64MULQconst(v *Value) bool { } x := v_0 v.reset(OpAMD64LEAQ2) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpAMD64LEAQ4, v.Type) - v0.AddArg(x) - v0.AddArg(x) - v.AddArg(v0) + v0.AddArg2(x, x) + v.AddArg2(x, v0) return true } // match: (MULQconst [13] x) @@ -21633,11 +20284,9 @@ func rewriteValueAMD64_OpAMD64MULQconst(v *Value) bool { } x := v_0 v.reset(OpAMD64LEAQ4) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpAMD64LEAQ2, v.Type) - v0.AddArg(x) - v0.AddArg(x) - v.AddArg(v0) + v0.AddArg2(x, x) + v.AddArg2(x, v0) return true } // match: (MULQconst [19] x) @@ -21648,11 +20297,9 @@ func rewriteValueAMD64_OpAMD64MULQconst(v *Value) bool { } x := v_0 v.reset(OpAMD64LEAQ2) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpAMD64LEAQ8, v.Type) - v0.AddArg(x) - v0.AddArg(x) - v.AddArg(v0) + v0.AddArg2(x, x) + v.AddArg2(x, v0) return true } // match: (MULQconst [21] x) @@ -21663,11 +20310,9 @@ func rewriteValueAMD64_OpAMD64MULQconst(v *Value) bool { } x := v_0 v.reset(OpAMD64LEAQ4) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpAMD64LEAQ4, v.Type) - v0.AddArg(x) - v0.AddArg(x) - v.AddArg(v0) + v0.AddArg2(x, x) + v.AddArg2(x, v0) return true } // match: (MULQconst [25] x) @@ -21678,11 +20323,9 @@ func rewriteValueAMD64_OpAMD64MULQconst(v *Value) bool { } x := v_0 v.reset(OpAMD64LEAQ8) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpAMD64LEAQ2, v.Type) - v0.AddArg(x) - v0.AddArg(x) - v.AddArg(v0) + v0.AddArg2(x, x) + v.AddArg2(x, v0) return true } // match: (MULQconst [27] x) @@ -21694,13 +20337,10 @@ func rewriteValueAMD64_OpAMD64MULQconst(v *Value) bool { x := v_0 v.reset(OpAMD64LEAQ8) v0 := b.NewValue0(v.Pos, OpAMD64LEAQ2, v.Type) - v0.AddArg(x) - v0.AddArg(x) - v.AddArg(v0) + v0.AddArg2(x, x) v1 := b.NewValue0(v.Pos, OpAMD64LEAQ2, v.Type) - v1.AddArg(x) - v1.AddArg(x) - v.AddArg(v1) + v1.AddArg2(x, x) + v.AddArg2(v0, v1) return true } // match: (MULQconst [37] x) @@ -21711,11 +20351,9 @@ func rewriteValueAMD64_OpAMD64MULQconst(v *Value) bool { } x := v_0 v.reset(OpAMD64LEAQ4) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpAMD64LEAQ8, v.Type) - v0.AddArg(x) - v0.AddArg(x) - v.AddArg(v0) + v0.AddArg2(x, x) + v.AddArg2(x, v0) return true } // match: (MULQconst [41] x) @@ -21726,11 +20364,9 @@ func rewriteValueAMD64_OpAMD64MULQconst(v *Value) bool { } x := v_0 v.reset(OpAMD64LEAQ8) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpAMD64LEAQ4, v.Type) - v0.AddArg(x) - v0.AddArg(x) - v.AddArg(v0) + v0.AddArg2(x, x) + v.AddArg2(x, v0) return true } // match: (MULQconst [45] x) @@ -21742,13 +20378,10 @@ func rewriteValueAMD64_OpAMD64MULQconst(v *Value) bool { x := v_0 v.reset(OpAMD64LEAQ8) v0 := b.NewValue0(v.Pos, OpAMD64LEAQ4, v.Type) - v0.AddArg(x) - v0.AddArg(x) - v.AddArg(v0) + v0.AddArg2(x, x) v1 := b.NewValue0(v.Pos, OpAMD64LEAQ4, v.Type) - v1.AddArg(x) - v1.AddArg(x) - v.AddArg(v1) + v1.AddArg2(x, x) + v.AddArg2(v0, v1) return true } // match: (MULQconst [73] x) @@ -21759,11 +20392,9 @@ func rewriteValueAMD64_OpAMD64MULQconst(v *Value) bool { } x := v_0 v.reset(OpAMD64LEAQ8) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpAMD64LEAQ8, v.Type) - v0.AddArg(x) - v0.AddArg(x) - v.AddArg(v0) + v0.AddArg2(x, x) + v.AddArg2(x, v0) return true } // match: (MULQconst [81] x) @@ -21775,13 +20406,10 @@ func rewriteValueAMD64_OpAMD64MULQconst(v *Value) bool { x := v_0 v.reset(OpAMD64LEAQ8) v0 := b.NewValue0(v.Pos, OpAMD64LEAQ8, v.Type) - v0.AddArg(x) - v0.AddArg(x) - v.AddArg(v0) + v0.AddArg2(x, x) v1 := b.NewValue0(v.Pos, OpAMD64LEAQ8, v.Type) - v1.AddArg(x) - v1.AddArg(x) - v.AddArg(v1) + v1.AddArg2(x, x) + v.AddArg2(v0, v1) return true } // match: (MULQconst [c] x) @@ -21797,8 +20425,7 @@ func rewriteValueAMD64_OpAMD64MULQconst(v *Value) bool { v0 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) v0.AuxInt = log2(c + 1) v0.AddArg(x) - v.AddArg(v0) - v.AddArg(x) + v.AddArg2(v0, x) return true } // match: (MULQconst [c] x) @@ -21814,8 +20441,7 @@ func rewriteValueAMD64_OpAMD64MULQconst(v *Value) bool { v0 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) v0.AuxInt = log2(c - 1) v0.AddArg(x) - v.AddArg(v0) - v.AddArg(x) + v.AddArg2(v0, x) return true } // match: (MULQconst [c] x) @@ -21831,8 +20457,7 @@ func rewriteValueAMD64_OpAMD64MULQconst(v *Value) bool { v0 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) v0.AuxInt = log2(c - 2) v0.AddArg(x) - v.AddArg(v0) - v.AddArg(x) + v.AddArg2(v0, x) return true } // match: (MULQconst [c] x) @@ -21848,8 +20473,7 @@ func rewriteValueAMD64_OpAMD64MULQconst(v *Value) bool { v0 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) v0.AuxInt = log2(c - 4) v0.AddArg(x) - v.AddArg(v0) - v.AddArg(x) + v.AddArg2(v0, x) return true } // match: (MULQconst [c] x) @@ -21865,8 +20489,7 @@ func rewriteValueAMD64_OpAMD64MULQconst(v *Value) bool { v0 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) v0.AuxInt = log2(c - 8) v0.AddArg(x) - v.AddArg(v0) - v.AddArg(x) + v.AddArg2(v0, x) return true } // match: (MULQconst [c] x) @@ -21881,8 +20504,7 @@ func rewriteValueAMD64_OpAMD64MULQconst(v *Value) bool { v.reset(OpAMD64SHLQconst) v.AuxInt = log2(c / 3) v0 := b.NewValue0(v.Pos, OpAMD64LEAQ2, v.Type) - v0.AddArg(x) - v0.AddArg(x) + v0.AddArg2(x, x) v.AddArg(v0) return true } @@ -21898,8 +20520,7 @@ func rewriteValueAMD64_OpAMD64MULQconst(v *Value) bool { v.reset(OpAMD64SHLQconst) v.AuxInt = log2(c / 5) v0 := b.NewValue0(v.Pos, OpAMD64LEAQ4, v.Type) - v0.AddArg(x) - v0.AddArg(x) + v0.AddArg2(x, x) v.AddArg(v0) return true } @@ -21915,8 +20536,7 @@ func rewriteValueAMD64_OpAMD64MULQconst(v *Value) bool { v.reset(OpAMD64SHLQconst) v.AuxInt = log2(c / 9) v0 := b.NewValue0(v.Pos, OpAMD64LEAQ8, v.Type) - v0.AddArg(x) - v0.AddArg(x) + v0.AddArg2(x, x) v.AddArg(v0) return true } @@ -21974,9 +20594,7 @@ func rewriteValueAMD64_OpAMD64MULSD(v *Value) bool { v.reset(OpAMD64MULSDload) v.AuxInt = off v.Aux = sym - v.AddArg(x) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg3(x, ptr, mem) return true } break @@ -22008,9 +20626,7 @@ func rewriteValueAMD64_OpAMD64MULSDload(v *Value) bool { v.reset(OpAMD64MULSDload) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(val) - v.AddArg(base) - v.AddArg(mem) + v.AddArg3(val, base, mem) return true } // match: (MULSDload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem) @@ -22033,9 +20649,7 @@ func rewriteValueAMD64_OpAMD64MULSDload(v *Value) bool { v.reset(OpAMD64MULSDload) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(val) - v.AddArg(base) - v.AddArg(mem) + v.AddArg3(val, base, mem) return true } // match: (MULSDload x [off] {sym} ptr (MOVQstore [off] {sym} ptr y _)) @@ -22048,16 +20662,14 @@ func rewriteValueAMD64_OpAMD64MULSDload(v *Value) bool { if v_2.Op != OpAMD64MOVQstore || v_2.AuxInt != off || v_2.Aux != sym { break } - _ = v_2.Args[2] + y := v_2.Args[1] if ptr != v_2.Args[0] { break } - y := v_2.Args[1] v.reset(OpAMD64MULSD) - v.AddArg(x) v0 := b.NewValue0(v_2.Pos, OpAMD64MOVQi2f, typ.Float64) v0.AddArg(y) - v.AddArg(v0) + v.AddArg2(x, v0) return true } return false @@ -22085,9 +20697,7 @@ func rewriteValueAMD64_OpAMD64MULSS(v *Value) bool { v.reset(OpAMD64MULSSload) v.AuxInt = off v.Aux = sym - v.AddArg(x) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg3(x, ptr, mem) return true } break @@ -22119,9 +20729,7 @@ func rewriteValueAMD64_OpAMD64MULSSload(v *Value) bool { v.reset(OpAMD64MULSSload) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(val) - v.AddArg(base) - v.AddArg(mem) + v.AddArg3(val, base, mem) return true } // match: (MULSSload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem) @@ -22144,9 +20752,7 @@ func rewriteValueAMD64_OpAMD64MULSSload(v *Value) bool { v.reset(OpAMD64MULSSload) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(val) - v.AddArg(base) - v.AddArg(mem) + v.AddArg3(val, base, mem) return true } // match: (MULSSload x [off] {sym} ptr (MOVLstore [off] {sym} ptr y _)) @@ -22159,16 +20765,14 @@ func rewriteValueAMD64_OpAMD64MULSSload(v *Value) bool { if v_2.Op != OpAMD64MOVLstore || v_2.AuxInt != off || v_2.Aux != sym { break } - _ = v_2.Args[2] + y := v_2.Args[1] if ptr != v_2.Args[0] { break } - y := v_2.Args[1] v.reset(OpAMD64MULSS) - v.AddArg(x) v0 := b.NewValue0(v_2.Pos, OpAMD64MOVLi2f, typ.Float32) v0.AddArg(y) - v.AddArg(v0) + v.AddArg2(x, v0) return true } return false @@ -22182,9 +20786,7 @@ func rewriteValueAMD64_OpAMD64NEGL(v *Value) bool { break } x := v_0.Args[0] - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (NEGL s:(SUBL x y)) @@ -22201,8 +20803,7 @@ func rewriteValueAMD64_OpAMD64NEGL(v *Value) bool { break } v.reset(OpAMD64SUBL) - v.AddArg(y) - v.AddArg(x) + v.AddArg2(y, x) return true } // match: (NEGL (MOVLconst [c])) @@ -22227,9 +20828,7 @@ func rewriteValueAMD64_OpAMD64NEGQ(v *Value) bool { break } x := v_0.Args[0] - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (NEGQ s:(SUBQ x y)) @@ -22246,8 +20845,7 @@ func rewriteValueAMD64_OpAMD64NEGQ(v *Value) bool { break } v.reset(OpAMD64SUBQ) - v.AddArg(y) - v.AddArg(x) + v.AddArg2(y, x) return true } // match: (NEGQ (MOVQconst [c])) @@ -22333,8 +20931,7 @@ func rewriteValueAMD64_OpAMD64ORL(v *Value) bool { } x := v_1 v.reset(OpAMD64BTSL) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } break @@ -22493,8 +21090,7 @@ func rewriteValueAMD64_OpAMD64ORL(v *Value) bool { continue } v.reset(OpAMD64ROLL) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } } @@ -22544,8 +21140,7 @@ func rewriteValueAMD64_OpAMD64ORL(v *Value) bool { continue } v.reset(OpAMD64ROLL) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } } @@ -22595,8 +21190,7 @@ func rewriteValueAMD64_OpAMD64ORL(v *Value) bool { continue } v.reset(OpAMD64RORL) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } } @@ -22646,8 +21240,7 @@ func rewriteValueAMD64_OpAMD64ORL(v *Value) bool { continue } v.reset(OpAMD64RORL) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } } @@ -22711,8 +21304,7 @@ func rewriteValueAMD64_OpAMD64ORL(v *Value) bool { continue } v.reset(OpAMD64ROLW) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } } @@ -22776,8 +21368,7 @@ func rewriteValueAMD64_OpAMD64ORL(v *Value) bool { continue } v.reset(OpAMD64ROLW) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } } @@ -22818,8 +21409,7 @@ func rewriteValueAMD64_OpAMD64ORL(v *Value) bool { continue } v.reset(OpAMD64RORW) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } break @@ -22859,8 +21449,7 @@ func rewriteValueAMD64_OpAMD64ORL(v *Value) bool { continue } v.reset(OpAMD64RORW) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } break @@ -22923,8 +21512,7 @@ func rewriteValueAMD64_OpAMD64ORL(v *Value) bool { continue } v.reset(OpAMD64ROLB) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } } @@ -22988,8 +21576,7 @@ func rewriteValueAMD64_OpAMD64ORL(v *Value) bool { continue } v.reset(OpAMD64ROLB) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } } @@ -23030,8 +21617,7 @@ func rewriteValueAMD64_OpAMD64ORL(v *Value) bool { continue } v.reset(OpAMD64RORB) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } break @@ -23071,8 +21657,7 @@ func rewriteValueAMD64_OpAMD64ORL(v *Value) bool { continue } v.reset(OpAMD64RORB) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } break @@ -23084,9 +21669,7 @@ func rewriteValueAMD64_OpAMD64ORL(v *Value) bool { if x != v_1 { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (ORL x0:(MOVBload [i0] {s} p mem) sh:(SHLLconst [8] x1:(MOVBload [i1] {s} p mem))) @@ -23120,12 +21703,10 @@ func rewriteValueAMD64_OpAMD64ORL(v *Value) bool { } b = mergePoint(b, x0, x1) v0 := b.NewValue0(x1.Pos, OpAMD64MOVWload, typ.UInt16) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v0.AuxInt = i0 v0.Aux = s - v0.AddArg(p) - v0.AddArg(mem) + v0.AddArg2(p, mem) return true } break @@ -23161,12 +21742,10 @@ func rewriteValueAMD64_OpAMD64ORL(v *Value) bool { } b = mergePoint(b, x0, x1) v0 := b.NewValue0(x1.Pos, OpAMD64MOVLload, typ.UInt32) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v0.AuxInt = i0 v0.Aux = s - v0.AddArg(p) - v0.AddArg(mem) + v0.AddArg2(p, mem) return true } break @@ -23220,18 +21799,15 @@ func rewriteValueAMD64_OpAMD64ORL(v *Value) bool { } b = mergePoint(b, x0, x1, y) v0 := b.NewValue0(x0.Pos, OpAMD64ORL, v.Type) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v1 := b.NewValue0(x0.Pos, OpAMD64SHLLconst, v.Type) v1.AuxInt = j0 v2 := b.NewValue0(x0.Pos, OpAMD64MOVWload, typ.UInt16) v2.AuxInt = i0 v2.Aux = s - v2.AddArg(p) - v2.AddArg(mem) + v2.AddArg2(p, mem) v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) + v0.AddArg2(v1, y) return true } } @@ -23275,13 +21851,10 @@ func rewriteValueAMD64_OpAMD64ORL(v *Value) bool { } b = mergePoint(b, x0, x1) v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, v.Type) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v0.AuxInt = i0 v0.Aux = s - v0.AddArg(p) - v0.AddArg(idx) - v0.AddArg(mem) + v0.AddArg3(p, idx, mem) return true } } @@ -23326,13 +21899,10 @@ func rewriteValueAMD64_OpAMD64ORL(v *Value) bool { } b = mergePoint(b, x0, x1) v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v0.AuxInt = i0 v0.Aux = s - v0.AddArg(p) - v0.AddArg(idx) - v0.AddArg(mem) + v0.AddArg3(p, idx, mem) return true } } @@ -23395,19 +21965,15 @@ func rewriteValueAMD64_OpAMD64ORL(v *Value) bool { } b = mergePoint(b, x0, x1, y) v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) v1.AuxInt = j0 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) v2.AuxInt = i0 v2.Aux = s - v2.AddArg(p) - v2.AddArg(idx) - v2.AddArg(mem) + v2.AddArg3(p, idx, mem) v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) + v0.AddArg2(v1, y) return true } } @@ -23446,14 +22012,12 @@ func rewriteValueAMD64_OpAMD64ORL(v *Value) bool { } b = mergePoint(b, x0, x1) v0 := b.NewValue0(x0.Pos, OpAMD64ROLWconst, v.Type) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v0.AuxInt = 8 v1 := b.NewValue0(x0.Pos, OpAMD64MOVWload, typ.UInt16) v1.AuxInt = i0 v1.Aux = s - v1.AddArg(p) - v1.AddArg(mem) + v1.AddArg2(p, mem) v0.AddArg(v1) return true } @@ -23498,13 +22062,11 @@ func rewriteValueAMD64_OpAMD64ORL(v *Value) bool { } b = mergePoint(b, x0, x1) v0 := b.NewValue0(x0.Pos, OpAMD64BSWAPL, v.Type) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v1 := b.NewValue0(x0.Pos, OpAMD64MOVLload, typ.UInt32) v1.AuxInt = i0 v1.Aux = s - v1.AddArg(p) - v1.AddArg(mem) + v1.AddArg2(p, mem) v0.AddArg(v1) return true } @@ -23559,8 +22121,7 @@ func rewriteValueAMD64_OpAMD64ORL(v *Value) bool { } b = mergePoint(b, x0, x1, y) v0 := b.NewValue0(x1.Pos, OpAMD64ORL, v.Type) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v1 := b.NewValue0(x1.Pos, OpAMD64SHLLconst, v.Type) v1.AuxInt = j1 v2 := b.NewValue0(x1.Pos, OpAMD64ROLWconst, typ.UInt16) @@ -23568,12 +22129,10 @@ func rewriteValueAMD64_OpAMD64ORL(v *Value) bool { v3 := b.NewValue0(x1.Pos, OpAMD64MOVWload, typ.UInt16) v3.AuxInt = i0 v3.Aux = s - v3.AddArg(p) - v3.AddArg(mem) + v3.AddArg2(p, mem) v2.AddArg(v3) v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) + v0.AddArg2(v1, y) return true } } @@ -23617,15 +22176,12 @@ func rewriteValueAMD64_OpAMD64ORL(v *Value) bool { } b = mergePoint(b, x0, x1) v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, v.Type) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v0.AuxInt = 8 v1 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) v1.AuxInt = i0 v1.Aux = s - v1.AddArg(p) - v1.AddArg(idx) - v1.AddArg(mem) + v1.AddArg3(p, idx, mem) v0.AddArg(v1) return true } @@ -23679,14 +22235,11 @@ func rewriteValueAMD64_OpAMD64ORL(v *Value) bool { } b = mergePoint(b, x0, x1) v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v1 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) v1.AuxInt = i0 v1.Aux = s - v1.AddArg(p) - v1.AddArg(idx) - v1.AddArg(mem) + v1.AddArg3(p, idx, mem) v0.AddArg(v1) return true } @@ -23750,8 +22303,7 @@ func rewriteValueAMD64_OpAMD64ORL(v *Value) bool { } b = mergePoint(b, x0, x1, y) v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) v1.AuxInt = j1 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) @@ -23759,13 +22311,10 @@ func rewriteValueAMD64_OpAMD64ORL(v *Value) bool { v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) v3.AuxInt = i0 v3.Aux = s - v3.AddArg(p) - v3.AddArg(idx) - v3.AddArg(mem) + v3.AddArg3(p, idx, mem) v2.AddArg(v3) v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) + v0.AddArg2(v1, y) return true } } @@ -23793,9 +22342,7 @@ func rewriteValueAMD64_OpAMD64ORL(v *Value) bool { v.reset(OpAMD64ORLload) v.AuxInt = off v.Aux = sym - v.AddArg(x) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg3(x, ptr, mem) return true } break @@ -23855,9 +22402,7 @@ func rewriteValueAMD64_OpAMD64ORLconst(v *Value) bool { if !(int32(c) == 0) { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (ORLconst [c] _) @@ -23907,8 +22452,7 @@ func rewriteValueAMD64_OpAMD64ORLconstmodify(v *Value) bool { v.reset(OpAMD64ORLconstmodify) v.AuxInt = ValAndOff(valoff1).add(off2) v.Aux = sym - v.AddArg(base) - v.AddArg(mem) + v.AddArg2(base, mem) return true } // match: (ORLconstmodify [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem) @@ -23930,8 +22474,7 @@ func rewriteValueAMD64_OpAMD64ORLconstmodify(v *Value) bool { v.reset(OpAMD64ORLconstmodify) v.AuxInt = ValAndOff(valoff1).add(off2) v.Aux = mergeSym(sym1, sym2) - v.AddArg(base) - v.AddArg(mem) + v.AddArg2(base, mem) return true } return false @@ -23961,9 +22504,7 @@ func rewriteValueAMD64_OpAMD64ORLload(v *Value) bool { v.reset(OpAMD64ORLload) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(val) - v.AddArg(base) - v.AddArg(mem) + v.AddArg3(val, base, mem) return true } // match: (ORLload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem) @@ -23986,9 +22527,7 @@ func rewriteValueAMD64_OpAMD64ORLload(v *Value) bool { v.reset(OpAMD64ORLload) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(val) - v.AddArg(base) - v.AddArg(mem) + v.AddArg3(val, base, mem) return true } // match: ( ORLload x [off] {sym} ptr (MOVSSstore [off] {sym} ptr y _)) @@ -24001,16 +22540,14 @@ func rewriteValueAMD64_OpAMD64ORLload(v *Value) bool { if v_2.Op != OpAMD64MOVSSstore || v_2.AuxInt != off || v_2.Aux != sym { break } - _ = v_2.Args[2] + y := v_2.Args[1] if ptr != v_2.Args[0] { break } - y := v_2.Args[1] v.reset(OpAMD64ORL) - v.AddArg(x) v0 := b.NewValue0(v_2.Pos, OpAMD64MOVLf2i, typ.UInt32) v0.AddArg(y) - v.AddArg(v0) + v.AddArg2(x, v0) return true } return false @@ -24038,9 +22575,7 @@ func rewriteValueAMD64_OpAMD64ORLmodify(v *Value) bool { v.reset(OpAMD64ORLmodify) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(base) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(base, val, mem) return true } // match: (ORLmodify [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) @@ -24063,9 +22598,7 @@ func rewriteValueAMD64_OpAMD64ORLmodify(v *Value) bool { v.reset(OpAMD64ORLmodify) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(base) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(base, val, mem) return true } return false @@ -24089,8 +22622,7 @@ func rewriteValueAMD64_OpAMD64ORQ(v *Value) bool { } x := v_1 v.reset(OpAMD64BTSQ) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } break @@ -24135,6 +22667,22 @@ func rewriteValueAMD64_OpAMD64ORQ(v *Value) bool { } break } + // match: (ORQ x (MOVLconst [c])) + // result: (ORQconst [c] x) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpAMD64MOVLconst { + continue + } + c := v_1.AuxInt + v.reset(OpAMD64ORQconst) + v.AuxInt = c + v.AddArg(x) + return true + } + break + } // match: (ORQ (SHLQconst x [c]) (SHRQconst x [d])) // cond: d==64-c // result: (ROLQconst x [c]) @@ -24203,8 +22751,7 @@ func rewriteValueAMD64_OpAMD64ORQ(v *Value) bool { continue } v.reset(OpAMD64ROLQ) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } } @@ -24254,8 +22801,7 @@ func rewriteValueAMD64_OpAMD64ORQ(v *Value) bool { continue } v.reset(OpAMD64ROLQ) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } } @@ -24305,8 +22851,7 @@ func rewriteValueAMD64_OpAMD64ORQ(v *Value) bool { continue } v.reset(OpAMD64RORQ) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } } @@ -24356,13 +22901,30 @@ func rewriteValueAMD64_OpAMD64ORQ(v *Value) bool { continue } v.reset(OpAMD64RORQ) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } } break } + // match: (ORQ (MOVQconst [c]) (MOVQconst [d])) + // result: (MOVQconst [c|d]) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpAMD64MOVQconst { + continue + } + c := v_0.AuxInt + if v_1.Op != OpAMD64MOVQconst { + continue + } + d := v_1.AuxInt + v.reset(OpAMD64MOVQconst) + v.AuxInt = c | d + return true + } + break + } // match: (ORQ x x) // result: x for { @@ -24370,9 +22932,7 @@ func rewriteValueAMD64_OpAMD64ORQ(v *Value) bool { if x != v_1 { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (ORQ x0:(MOVBload [i0] {s} p mem) sh:(SHLQconst [8] x1:(MOVBload [i1] {s} p mem))) @@ -24406,12 +22966,10 @@ func rewriteValueAMD64_OpAMD64ORQ(v *Value) bool { } b = mergePoint(b, x0, x1) v0 := b.NewValue0(x1.Pos, OpAMD64MOVWload, typ.UInt16) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v0.AuxInt = i0 v0.Aux = s - v0.AddArg(p) - v0.AddArg(mem) + v0.AddArg2(p, mem) return true } break @@ -24447,12 +23005,10 @@ func rewriteValueAMD64_OpAMD64ORQ(v *Value) bool { } b = mergePoint(b, x0, x1) v0 := b.NewValue0(x1.Pos, OpAMD64MOVLload, typ.UInt32) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v0.AuxInt = i0 v0.Aux = s - v0.AddArg(p) - v0.AddArg(mem) + v0.AddArg2(p, mem) return true } break @@ -24488,12 +23044,10 @@ func rewriteValueAMD64_OpAMD64ORQ(v *Value) bool { } b = mergePoint(b, x0, x1) v0 := b.NewValue0(x1.Pos, OpAMD64MOVQload, typ.UInt64) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v0.AuxInt = i0 v0.Aux = s - v0.AddArg(p) - v0.AddArg(mem) + v0.AddArg2(p, mem) return true } break @@ -24547,18 +23101,15 @@ func rewriteValueAMD64_OpAMD64ORQ(v *Value) bool { } b = mergePoint(b, x0, x1, y) v0 := b.NewValue0(x0.Pos, OpAMD64ORQ, v.Type) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v1 := b.NewValue0(x0.Pos, OpAMD64SHLQconst, v.Type) v1.AuxInt = j0 v2 := b.NewValue0(x0.Pos, OpAMD64MOVWload, typ.UInt16) v2.AuxInt = i0 v2.Aux = s - v2.AddArg(p) - v2.AddArg(mem) + v2.AddArg2(p, mem) v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) + v0.AddArg2(v1, y) return true } } @@ -24613,18 +23164,15 @@ func rewriteValueAMD64_OpAMD64ORQ(v *Value) bool { } b = mergePoint(b, x0, x1, y) v0 := b.NewValue0(x0.Pos, OpAMD64ORQ, v.Type) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v1 := b.NewValue0(x0.Pos, OpAMD64SHLQconst, v.Type) v1.AuxInt = j0 v2 := b.NewValue0(x0.Pos, OpAMD64MOVLload, typ.UInt32) v2.AuxInt = i0 v2.Aux = s - v2.AddArg(p) - v2.AddArg(mem) + v2.AddArg2(p, mem) v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) + v0.AddArg2(v1, y) return true } } @@ -24668,13 +23216,10 @@ func rewriteValueAMD64_OpAMD64ORQ(v *Value) bool { } b = mergePoint(b, x0, x1) v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, v.Type) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v0.AuxInt = i0 v0.Aux = s - v0.AddArg(p) - v0.AddArg(idx) - v0.AddArg(mem) + v0.AddArg3(p, idx, mem) return true } } @@ -24719,13 +23264,10 @@ func rewriteValueAMD64_OpAMD64ORQ(v *Value) bool { } b = mergePoint(b, x0, x1) v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v0.AuxInt = i0 v0.Aux = s - v0.AddArg(p) - v0.AddArg(idx) - v0.AddArg(mem) + v0.AddArg3(p, idx, mem) return true } } @@ -24770,13 +23312,10 @@ func rewriteValueAMD64_OpAMD64ORQ(v *Value) bool { } b = mergePoint(b, x0, x1) v0 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx1, typ.UInt64) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v0.AuxInt = i0 v0.Aux = s - v0.AddArg(p) - v0.AddArg(idx) - v0.AddArg(mem) + v0.AddArg3(p, idx, mem) return true } } @@ -24839,19 +23378,15 @@ func rewriteValueAMD64_OpAMD64ORQ(v *Value) bool { } b = mergePoint(b, x0, x1, y) v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) v1.AuxInt = j0 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) v2.AuxInt = i0 v2.Aux = s - v2.AddArg(p) - v2.AddArg(idx) - v2.AddArg(mem) + v2.AddArg3(p, idx, mem) v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) + v0.AddArg2(v1, y) return true } } @@ -24915,19 +23450,15 @@ func rewriteValueAMD64_OpAMD64ORQ(v *Value) bool { } b = mergePoint(b, x0, x1, y) v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) v1.AuxInt = j0 v2 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) v2.AuxInt = i0 v2.Aux = s - v2.AddArg(p) - v2.AddArg(idx) - v2.AddArg(mem) + v2.AddArg3(p, idx, mem) v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) + v0.AddArg2(v1, y) return true } } @@ -24966,14 +23497,12 @@ func rewriteValueAMD64_OpAMD64ORQ(v *Value) bool { } b = mergePoint(b, x0, x1) v0 := b.NewValue0(x0.Pos, OpAMD64ROLWconst, v.Type) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v0.AuxInt = 8 v1 := b.NewValue0(x0.Pos, OpAMD64MOVWload, typ.UInt16) v1.AuxInt = i0 v1.Aux = s - v1.AddArg(p) - v1.AddArg(mem) + v1.AddArg2(p, mem) v0.AddArg(v1) return true } @@ -25018,13 +23547,11 @@ func rewriteValueAMD64_OpAMD64ORQ(v *Value) bool { } b = mergePoint(b, x0, x1) v0 := b.NewValue0(x0.Pos, OpAMD64BSWAPL, v.Type) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v1 := b.NewValue0(x0.Pos, OpAMD64MOVLload, typ.UInt32) v1.AuxInt = i0 v1.Aux = s - v1.AddArg(p) - v1.AddArg(mem) + v1.AddArg2(p, mem) v0.AddArg(v1) return true } @@ -25069,13 +23596,11 @@ func rewriteValueAMD64_OpAMD64ORQ(v *Value) bool { } b = mergePoint(b, x0, x1) v0 := b.NewValue0(x0.Pos, OpAMD64BSWAPQ, v.Type) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v1 := b.NewValue0(x0.Pos, OpAMD64MOVQload, typ.UInt64) v1.AuxInt = i0 v1.Aux = s - v1.AddArg(p) - v1.AddArg(mem) + v1.AddArg2(p, mem) v0.AddArg(v1) return true } @@ -25130,8 +23655,7 @@ func rewriteValueAMD64_OpAMD64ORQ(v *Value) bool { } b = mergePoint(b, x0, x1, y) v0 := b.NewValue0(x1.Pos, OpAMD64ORQ, v.Type) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v1 := b.NewValue0(x1.Pos, OpAMD64SHLQconst, v.Type) v1.AuxInt = j1 v2 := b.NewValue0(x1.Pos, OpAMD64ROLWconst, typ.UInt16) @@ -25139,12 +23663,10 @@ func rewriteValueAMD64_OpAMD64ORQ(v *Value) bool { v3 := b.NewValue0(x1.Pos, OpAMD64MOVWload, typ.UInt16) v3.AuxInt = i0 v3.Aux = s - v3.AddArg(p) - v3.AddArg(mem) + v3.AddArg2(p, mem) v2.AddArg(v3) v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) + v0.AddArg2(v1, y) return true } } @@ -25207,20 +23729,17 @@ func rewriteValueAMD64_OpAMD64ORQ(v *Value) bool { } b = mergePoint(b, x0, x1, y) v0 := b.NewValue0(x1.Pos, OpAMD64ORQ, v.Type) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v1 := b.NewValue0(x1.Pos, OpAMD64SHLQconst, v.Type) v1.AuxInt = j1 v2 := b.NewValue0(x1.Pos, OpAMD64BSWAPL, typ.UInt32) v3 := b.NewValue0(x1.Pos, OpAMD64MOVLload, typ.UInt32) v3.AuxInt = i0 v3.Aux = s - v3.AddArg(p) - v3.AddArg(mem) + v3.AddArg2(p, mem) v2.AddArg(v3) v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) + v0.AddArg2(v1, y) return true } } @@ -25264,15 +23783,12 @@ func rewriteValueAMD64_OpAMD64ORQ(v *Value) bool { } b = mergePoint(b, x0, x1) v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, v.Type) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v0.AuxInt = 8 v1 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) v1.AuxInt = i0 v1.Aux = s - v1.AddArg(p) - v1.AddArg(idx) - v1.AddArg(mem) + v1.AddArg3(p, idx, mem) v0.AddArg(v1) return true } @@ -25326,14 +23842,11 @@ func rewriteValueAMD64_OpAMD64ORQ(v *Value) bool { } b = mergePoint(b, x0, x1) v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v1 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) v1.AuxInt = i0 v1.Aux = s - v1.AddArg(p) - v1.AddArg(idx) - v1.AddArg(mem) + v1.AddArg3(p, idx, mem) v0.AddArg(v1) return true } @@ -25387,14 +23900,11 @@ func rewriteValueAMD64_OpAMD64ORQ(v *Value) bool { } b = mergePoint(b, x0, x1) v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, v.Type) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v1 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx1, typ.UInt64) v1.AuxInt = i0 v1.Aux = s - v1.AddArg(p) - v1.AddArg(idx) - v1.AddArg(mem) + v1.AddArg3(p, idx, mem) v0.AddArg(v1) return true } @@ -25458,8 +23968,7 @@ func rewriteValueAMD64_OpAMD64ORQ(v *Value) bool { } b = mergePoint(b, x0, x1, y) v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) v1.AuxInt = j1 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) @@ -25467,13 +23976,10 @@ func rewriteValueAMD64_OpAMD64ORQ(v *Value) bool { v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) v3.AuxInt = i0 v3.Aux = s - v3.AddArg(p) - v3.AddArg(idx) - v3.AddArg(mem) + v3.AddArg3(p, idx, mem) v2.AddArg(v3) v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) + v0.AddArg2(v1, y) return true } } @@ -25545,21 +24051,17 @@ func rewriteValueAMD64_OpAMD64ORQ(v *Value) bool { } b = mergePoint(b, x0, x1, y) v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) v1.AuxInt = j1 v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, typ.UInt32) v3 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) v3.AuxInt = i0 v3.Aux = s - v3.AddArg(p) - v3.AddArg(idx) - v3.AddArg(mem) + v3.AddArg3(p, idx, mem) v2.AddArg(v3) v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) + v0.AddArg2(v1, y) return true } } @@ -25587,9 +24089,7 @@ func rewriteValueAMD64_OpAMD64ORQ(v *Value) bool { v.reset(OpAMD64ORQload) v.AuxInt = off v.Aux = sym - v.AddArg(x) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg3(x, ptr, mem) return true } break @@ -25647,9 +24147,7 @@ func rewriteValueAMD64_OpAMD64ORQconst(v *Value) bool { break } x := v_0 - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (ORQconst [-1] _) @@ -25697,8 +24195,7 @@ func rewriteValueAMD64_OpAMD64ORQconstmodify(v *Value) bool { v.reset(OpAMD64ORQconstmodify) v.AuxInt = ValAndOff(valoff1).add(off2) v.Aux = sym - v.AddArg(base) - v.AddArg(mem) + v.AddArg2(base, mem) return true } // match: (ORQconstmodify [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem) @@ -25720,8 +24217,7 @@ func rewriteValueAMD64_OpAMD64ORQconstmodify(v *Value) bool { v.reset(OpAMD64ORQconstmodify) v.AuxInt = ValAndOff(valoff1).add(off2) v.Aux = mergeSym(sym1, sym2) - v.AddArg(base) - v.AddArg(mem) + v.AddArg2(base, mem) return true } return false @@ -25751,9 +24247,7 @@ func rewriteValueAMD64_OpAMD64ORQload(v *Value) bool { v.reset(OpAMD64ORQload) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(val) - v.AddArg(base) - v.AddArg(mem) + v.AddArg3(val, base, mem) return true } // match: (ORQload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem) @@ -25776,9 +24270,7 @@ func rewriteValueAMD64_OpAMD64ORQload(v *Value) bool { v.reset(OpAMD64ORQload) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(val) - v.AddArg(base) - v.AddArg(mem) + v.AddArg3(val, base, mem) return true } // match: ( ORQload x [off] {sym} ptr (MOVSDstore [off] {sym} ptr y _)) @@ -25791,16 +24283,14 @@ func rewriteValueAMD64_OpAMD64ORQload(v *Value) bool { if v_2.Op != OpAMD64MOVSDstore || v_2.AuxInt != off || v_2.Aux != sym { break } - _ = v_2.Args[2] + y := v_2.Args[1] if ptr != v_2.Args[0] { break } - y := v_2.Args[1] v.reset(OpAMD64ORQ) - v.AddArg(x) v0 := b.NewValue0(v_2.Pos, OpAMD64MOVQf2i, typ.UInt64) v0.AddArg(y) - v.AddArg(v0) + v.AddArg2(x, v0) return true } return false @@ -25828,9 +24318,7 @@ func rewriteValueAMD64_OpAMD64ORQmodify(v *Value) bool { v.reset(OpAMD64ORQmodify) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(base) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(base, val, mem) return true } // match: (ORQmodify [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) @@ -25853,9 +24341,7 @@ func rewriteValueAMD64_OpAMD64ORQmodify(v *Value) bool { v.reset(OpAMD64ORQmodify) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(base) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(base, val, mem) return true } return false @@ -25872,8 +24358,7 @@ func rewriteValueAMD64_OpAMD64ROLB(v *Value) bool { } y := v_1.Args[0] v.reset(OpAMD64RORB) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (ROLB x (NEGL y)) @@ -25885,8 +24370,7 @@ func rewriteValueAMD64_OpAMD64ROLB(v *Value) bool { } y := v_1.Args[0] v.reset(OpAMD64RORB) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (ROLB x (MOVQconst [c])) @@ -25940,9 +24424,7 @@ func rewriteValueAMD64_OpAMD64ROLBconst(v *Value) bool { break } x := v_0 - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } return false @@ -25959,8 +24441,7 @@ func rewriteValueAMD64_OpAMD64ROLL(v *Value) bool { } y := v_1.Args[0] v.reset(OpAMD64RORL) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (ROLL x (NEGL y)) @@ -25972,8 +24453,7 @@ func rewriteValueAMD64_OpAMD64ROLL(v *Value) bool { } y := v_1.Args[0] v.reset(OpAMD64RORL) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (ROLL x (MOVQconst [c])) @@ -26027,9 +24507,7 @@ func rewriteValueAMD64_OpAMD64ROLLconst(v *Value) bool { break } x := v_0 - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } return false @@ -26046,8 +24524,7 @@ func rewriteValueAMD64_OpAMD64ROLQ(v *Value) bool { } y := v_1.Args[0] v.reset(OpAMD64RORQ) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (ROLQ x (NEGL y)) @@ -26059,8 +24536,7 @@ func rewriteValueAMD64_OpAMD64ROLQ(v *Value) bool { } y := v_1.Args[0] v.reset(OpAMD64RORQ) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (ROLQ x (MOVQconst [c])) @@ -26114,9 +24590,7 @@ func rewriteValueAMD64_OpAMD64ROLQconst(v *Value) bool { break } x := v_0 - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } return false @@ -26133,8 +24607,7 @@ func rewriteValueAMD64_OpAMD64ROLW(v *Value) bool { } y := v_1.Args[0] v.reset(OpAMD64RORW) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (ROLW x (NEGL y)) @@ -26146,8 +24619,7 @@ func rewriteValueAMD64_OpAMD64ROLW(v *Value) bool { } y := v_1.Args[0] v.reset(OpAMD64RORW) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (ROLW x (MOVQconst [c])) @@ -26201,9 +24673,7 @@ func rewriteValueAMD64_OpAMD64ROLWconst(v *Value) bool { break } x := v_0 - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } return false @@ -26220,8 +24690,7 @@ func rewriteValueAMD64_OpAMD64RORB(v *Value) bool { } y := v_1.Args[0] v.reset(OpAMD64ROLB) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (RORB x (NEGL y)) @@ -26233,8 +24702,7 @@ func rewriteValueAMD64_OpAMD64RORB(v *Value) bool { } y := v_1.Args[0] v.reset(OpAMD64ROLB) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (RORB x (MOVQconst [c])) @@ -26277,8 +24745,7 @@ func rewriteValueAMD64_OpAMD64RORL(v *Value) bool { } y := v_1.Args[0] v.reset(OpAMD64ROLL) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (RORL x (NEGL y)) @@ -26290,8 +24757,7 @@ func rewriteValueAMD64_OpAMD64RORL(v *Value) bool { } y := v_1.Args[0] v.reset(OpAMD64ROLL) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (RORL x (MOVQconst [c])) @@ -26334,8 +24800,7 @@ func rewriteValueAMD64_OpAMD64RORQ(v *Value) bool { } y := v_1.Args[0] v.reset(OpAMD64ROLQ) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (RORQ x (NEGL y)) @@ -26347,8 +24812,7 @@ func rewriteValueAMD64_OpAMD64RORQ(v *Value) bool { } y := v_1.Args[0] v.reset(OpAMD64ROLQ) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (RORQ x (MOVQconst [c])) @@ -26391,8 +24855,7 @@ func rewriteValueAMD64_OpAMD64RORW(v *Value) bool { } y := v_1.Args[0] v.reset(OpAMD64ROLW) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (RORW x (NEGL y)) @@ -26404,8 +24867,7 @@ func rewriteValueAMD64_OpAMD64RORW(v *Value) bool { } y := v_1.Args[0] v.reset(OpAMD64ROLW) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (RORW x (MOVQconst [c])) @@ -26476,9 +24938,7 @@ func rewriteValueAMD64_OpAMD64SARBconst(v *Value) bool { break } x := v_0 - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (SARBconst [c] (MOVQconst [d])) @@ -26539,8 +24999,7 @@ func rewriteValueAMD64_OpAMD64SARL(v *Value) bool { break } v.reset(OpAMD64SARL) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (SARL x (NEGQ (ADDQconst [c] y))) @@ -26562,10 +25021,9 @@ func rewriteValueAMD64_OpAMD64SARL(v *Value) bool { break } v.reset(OpAMD64SARL) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t) v0.AddArg(y) - v.AddArg(v0) + v.AddArg2(x, v0) return true } // match: (SARL x (ANDQconst [c] y)) @@ -26582,8 +25040,7 @@ func rewriteValueAMD64_OpAMD64SARL(v *Value) bool { break } v.reset(OpAMD64SARL) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (SARL x (NEGQ (ANDQconst [c] y))) @@ -26605,10 +25062,9 @@ func rewriteValueAMD64_OpAMD64SARL(v *Value) bool { break } v.reset(OpAMD64SARL) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t) v0.AddArg(y) - v.AddArg(v0) + v.AddArg2(x, v0) return true } // match: (SARL x (ADDLconst [c] y)) @@ -26625,8 +25081,7 @@ func rewriteValueAMD64_OpAMD64SARL(v *Value) bool { break } v.reset(OpAMD64SARL) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (SARL x (NEGL (ADDLconst [c] y))) @@ -26648,10 +25103,9 @@ func rewriteValueAMD64_OpAMD64SARL(v *Value) bool { break } v.reset(OpAMD64SARL) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t) v0.AddArg(y) - v.AddArg(v0) + v.AddArg2(x, v0) return true } // match: (SARL x (ANDLconst [c] y)) @@ -26668,8 +25122,7 @@ func rewriteValueAMD64_OpAMD64SARL(v *Value) bool { break } v.reset(OpAMD64SARL) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (SARL x (NEGL (ANDLconst [c] y))) @@ -26691,10 +25144,9 @@ func rewriteValueAMD64_OpAMD64SARL(v *Value) bool { break } v.reset(OpAMD64SARL) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t) v0.AddArg(y) - v.AddArg(v0) + v.AddArg2(x, v0) return true } return false @@ -26708,9 +25160,7 @@ func rewriteValueAMD64_OpAMD64SARLconst(v *Value) bool { break } x := v_0 - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (SARLconst [c] (MOVQconst [d])) @@ -26771,8 +25221,7 @@ func rewriteValueAMD64_OpAMD64SARQ(v *Value) bool { break } v.reset(OpAMD64SARQ) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (SARQ x (NEGQ (ADDQconst [c] y))) @@ -26794,10 +25243,9 @@ func rewriteValueAMD64_OpAMD64SARQ(v *Value) bool { break } v.reset(OpAMD64SARQ) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t) v0.AddArg(y) - v.AddArg(v0) + v.AddArg2(x, v0) return true } // match: (SARQ x (ANDQconst [c] y)) @@ -26814,8 +25262,7 @@ func rewriteValueAMD64_OpAMD64SARQ(v *Value) bool { break } v.reset(OpAMD64SARQ) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (SARQ x (NEGQ (ANDQconst [c] y))) @@ -26837,10 +25284,9 @@ func rewriteValueAMD64_OpAMD64SARQ(v *Value) bool { break } v.reset(OpAMD64SARQ) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t) v0.AddArg(y) - v.AddArg(v0) + v.AddArg2(x, v0) return true } // match: (SARQ x (ADDLconst [c] y)) @@ -26857,8 +25303,7 @@ func rewriteValueAMD64_OpAMD64SARQ(v *Value) bool { break } v.reset(OpAMD64SARQ) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (SARQ x (NEGL (ADDLconst [c] y))) @@ -26880,10 +25325,9 @@ func rewriteValueAMD64_OpAMD64SARQ(v *Value) bool { break } v.reset(OpAMD64SARQ) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t) v0.AddArg(y) - v.AddArg(v0) + v.AddArg2(x, v0) return true } // match: (SARQ x (ANDLconst [c] y)) @@ -26900,8 +25344,7 @@ func rewriteValueAMD64_OpAMD64SARQ(v *Value) bool { break } v.reset(OpAMD64SARQ) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (SARQ x (NEGL (ANDLconst [c] y))) @@ -26923,10 +25366,9 @@ func rewriteValueAMD64_OpAMD64SARQ(v *Value) bool { break } v.reset(OpAMD64SARQ) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t) v0.AddArg(y) - v.AddArg(v0) + v.AddArg2(x, v0) return true } return false @@ -26940,9 +25382,7 @@ func rewriteValueAMD64_OpAMD64SARQconst(v *Value) bool { break } x := v_0 - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (SARQconst [c] (MOVQconst [d])) @@ -26999,9 +25439,7 @@ func rewriteValueAMD64_OpAMD64SARWconst(v *Value) bool { break } x := v_0 - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (SARWconst [c] (MOVQconst [d])) @@ -27091,8 +25529,7 @@ func rewriteValueAMD64_OpAMD64SBBQ(v *Value) bool { } v.reset(OpAMD64SBBQconst) v.AuxInt = c - v.AddArg(x) - v.AddArg(borrow) + v.AddArg2(x, borrow) return true } // match: (SBBQ x y (FlagEQ)) @@ -27104,8 +25541,7 @@ func rewriteValueAMD64_OpAMD64SBBQ(v *Value) bool { break } v.reset(OpAMD64SUBQborrow) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } return false @@ -27388,9 +25824,7 @@ func rewriteValueAMD64_OpAMD64SETAEstore(v *Value) bool { v.reset(OpAMD64SETBEstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(x) - v.AddArg(mem) + v.AddArg3(ptr, x, mem) return true } // match: (SETAEstore [off1] {sym} (ADDQconst [off2] base) val mem) @@ -27412,9 +25846,7 @@ func rewriteValueAMD64_OpAMD64SETAEstore(v *Value) bool { v.reset(OpAMD64SETAEstore) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(base) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(base, val, mem) return true } // match: (SETAEstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) @@ -27437,9 +25869,7 @@ func rewriteValueAMD64_OpAMD64SETAEstore(v *Value) bool { v.reset(OpAMD64SETAEstore) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(base) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(base, val, mem) return true } // match: (SETAEstore [off] {sym} ptr (FlagEQ) mem) @@ -27455,11 +25885,9 @@ func rewriteValueAMD64_OpAMD64SETAEstore(v *Value) bool { v.reset(OpAMD64MOVBstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) v0.AuxInt = 1 - v.AddArg(v0) - v.AddArg(mem) + v.AddArg3(ptr, v0, mem) return true } // match: (SETAEstore [off] {sym} ptr (FlagLT_ULT) mem) @@ -27475,11 +25903,9 @@ func rewriteValueAMD64_OpAMD64SETAEstore(v *Value) bool { v.reset(OpAMD64MOVBstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) v0.AuxInt = 0 - v.AddArg(v0) - v.AddArg(mem) + v.AddArg3(ptr, v0, mem) return true } // match: (SETAEstore [off] {sym} ptr (FlagLT_UGT) mem) @@ -27495,11 +25921,9 @@ func rewriteValueAMD64_OpAMD64SETAEstore(v *Value) bool { v.reset(OpAMD64MOVBstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) v0.AuxInt = 1 - v.AddArg(v0) - v.AddArg(mem) + v.AddArg3(ptr, v0, mem) return true } // match: (SETAEstore [off] {sym} ptr (FlagGT_ULT) mem) @@ -27515,11 +25939,9 @@ func rewriteValueAMD64_OpAMD64SETAEstore(v *Value) bool { v.reset(OpAMD64MOVBstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) v0.AuxInt = 0 - v.AddArg(v0) - v.AddArg(mem) + v.AddArg3(ptr, v0, mem) return true } // match: (SETAEstore [off] {sym} ptr (FlagGT_UGT) mem) @@ -27535,11 +25957,9 @@ func rewriteValueAMD64_OpAMD64SETAEstore(v *Value) bool { v.reset(OpAMD64MOVBstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) v0.AuxInt = 1 - v.AddArg(v0) - v.AddArg(mem) + v.AddArg3(ptr, v0, mem) return true } return false @@ -27564,9 +25984,7 @@ func rewriteValueAMD64_OpAMD64SETAstore(v *Value) bool { v.reset(OpAMD64SETBstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(x) - v.AddArg(mem) + v.AddArg3(ptr, x, mem) return true } // match: (SETAstore [off1] {sym} (ADDQconst [off2] base) val mem) @@ -27588,9 +26006,7 @@ func rewriteValueAMD64_OpAMD64SETAstore(v *Value) bool { v.reset(OpAMD64SETAstore) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(base) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(base, val, mem) return true } // match: (SETAstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) @@ -27613,9 +26029,7 @@ func rewriteValueAMD64_OpAMD64SETAstore(v *Value) bool { v.reset(OpAMD64SETAstore) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(base) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(base, val, mem) return true } // match: (SETAstore [off] {sym} ptr (FlagEQ) mem) @@ -27631,11 +26045,9 @@ func rewriteValueAMD64_OpAMD64SETAstore(v *Value) bool { v.reset(OpAMD64MOVBstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) v0.AuxInt = 0 - v.AddArg(v0) - v.AddArg(mem) + v.AddArg3(ptr, v0, mem) return true } // match: (SETAstore [off] {sym} ptr (FlagLT_ULT) mem) @@ -27651,11 +26063,9 @@ func rewriteValueAMD64_OpAMD64SETAstore(v *Value) bool { v.reset(OpAMD64MOVBstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) v0.AuxInt = 0 - v.AddArg(v0) - v.AddArg(mem) + v.AddArg3(ptr, v0, mem) return true } // match: (SETAstore [off] {sym} ptr (FlagLT_UGT) mem) @@ -27671,11 +26081,9 @@ func rewriteValueAMD64_OpAMD64SETAstore(v *Value) bool { v.reset(OpAMD64MOVBstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) v0.AuxInt = 1 - v.AddArg(v0) - v.AddArg(mem) + v.AddArg3(ptr, v0, mem) return true } // match: (SETAstore [off] {sym} ptr (FlagGT_ULT) mem) @@ -27691,11 +26099,9 @@ func rewriteValueAMD64_OpAMD64SETAstore(v *Value) bool { v.reset(OpAMD64MOVBstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) v0.AuxInt = 0 - v.AddArg(v0) - v.AddArg(mem) + v.AddArg3(ptr, v0, mem) return true } // match: (SETAstore [off] {sym} ptr (FlagGT_UGT) mem) @@ -27711,11 +26117,9 @@ func rewriteValueAMD64_OpAMD64SETAstore(v *Value) bool { v.reset(OpAMD64MOVBstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) v0.AuxInt = 1 - v.AddArg(v0) - v.AddArg(mem) + v.AddArg3(ptr, v0, mem) return true } return false @@ -27926,9 +26330,7 @@ func rewriteValueAMD64_OpAMD64SETBEstore(v *Value) bool { v.reset(OpAMD64SETAEstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(x) - v.AddArg(mem) + v.AddArg3(ptr, x, mem) return true } // match: (SETBEstore [off1] {sym} (ADDQconst [off2] base) val mem) @@ -27950,9 +26352,7 @@ func rewriteValueAMD64_OpAMD64SETBEstore(v *Value) bool { v.reset(OpAMD64SETBEstore) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(base) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(base, val, mem) return true } // match: (SETBEstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) @@ -27975,9 +26375,7 @@ func rewriteValueAMD64_OpAMD64SETBEstore(v *Value) bool { v.reset(OpAMD64SETBEstore) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(base) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(base, val, mem) return true } // match: (SETBEstore [off] {sym} ptr (FlagEQ) mem) @@ -27993,11 +26391,9 @@ func rewriteValueAMD64_OpAMD64SETBEstore(v *Value) bool { v.reset(OpAMD64MOVBstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) v0.AuxInt = 1 - v.AddArg(v0) - v.AddArg(mem) + v.AddArg3(ptr, v0, mem) return true } // match: (SETBEstore [off] {sym} ptr (FlagLT_ULT) mem) @@ -28013,11 +26409,9 @@ func rewriteValueAMD64_OpAMD64SETBEstore(v *Value) bool { v.reset(OpAMD64MOVBstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) v0.AuxInt = 1 - v.AddArg(v0) - v.AddArg(mem) + v.AddArg3(ptr, v0, mem) return true } // match: (SETBEstore [off] {sym} ptr (FlagLT_UGT) mem) @@ -28033,11 +26427,9 @@ func rewriteValueAMD64_OpAMD64SETBEstore(v *Value) bool { v.reset(OpAMD64MOVBstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) v0.AuxInt = 0 - v.AddArg(v0) - v.AddArg(mem) + v.AddArg3(ptr, v0, mem) return true } // match: (SETBEstore [off] {sym} ptr (FlagGT_ULT) mem) @@ -28053,11 +26445,9 @@ func rewriteValueAMD64_OpAMD64SETBEstore(v *Value) bool { v.reset(OpAMD64MOVBstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) v0.AuxInt = 1 - v.AddArg(v0) - v.AddArg(mem) + v.AddArg3(ptr, v0, mem) return true } // match: (SETBEstore [off] {sym} ptr (FlagGT_UGT) mem) @@ -28073,11 +26463,9 @@ func rewriteValueAMD64_OpAMD64SETBEstore(v *Value) bool { v.reset(OpAMD64MOVBstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) v0.AuxInt = 0 - v.AddArg(v0) - v.AddArg(mem) + v.AddArg3(ptr, v0, mem) return true } return false @@ -28102,9 +26490,7 @@ func rewriteValueAMD64_OpAMD64SETBstore(v *Value) bool { v.reset(OpAMD64SETAstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(x) - v.AddArg(mem) + v.AddArg3(ptr, x, mem) return true } // match: (SETBstore [off1] {sym} (ADDQconst [off2] base) val mem) @@ -28126,9 +26512,7 @@ func rewriteValueAMD64_OpAMD64SETBstore(v *Value) bool { v.reset(OpAMD64SETBstore) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(base) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(base, val, mem) return true } // match: (SETBstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) @@ -28151,9 +26535,7 @@ func rewriteValueAMD64_OpAMD64SETBstore(v *Value) bool { v.reset(OpAMD64SETBstore) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(base) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(base, val, mem) return true } // match: (SETBstore [off] {sym} ptr (FlagEQ) mem) @@ -28169,11 +26551,9 @@ func rewriteValueAMD64_OpAMD64SETBstore(v *Value) bool { v.reset(OpAMD64MOVBstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) v0.AuxInt = 0 - v.AddArg(v0) - v.AddArg(mem) + v.AddArg3(ptr, v0, mem) return true } // match: (SETBstore [off] {sym} ptr (FlagLT_ULT) mem) @@ -28189,11 +26569,9 @@ func rewriteValueAMD64_OpAMD64SETBstore(v *Value) bool { v.reset(OpAMD64MOVBstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) v0.AuxInt = 1 - v.AddArg(v0) - v.AddArg(mem) + v.AddArg3(ptr, v0, mem) return true } // match: (SETBstore [off] {sym} ptr (FlagLT_UGT) mem) @@ -28209,11 +26587,9 @@ func rewriteValueAMD64_OpAMD64SETBstore(v *Value) bool { v.reset(OpAMD64MOVBstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) v0.AuxInt = 0 - v.AddArg(v0) - v.AddArg(mem) + v.AddArg3(ptr, v0, mem) return true } // match: (SETBstore [off] {sym} ptr (FlagGT_ULT) mem) @@ -28229,11 +26605,9 @@ func rewriteValueAMD64_OpAMD64SETBstore(v *Value) bool { v.reset(OpAMD64MOVBstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) v0.AuxInt = 1 - v.AddArg(v0) - v.AddArg(mem) + v.AddArg3(ptr, v0, mem) return true } // match: (SETBstore [off] {sym} ptr (FlagGT_UGT) mem) @@ -28249,11 +26623,9 @@ func rewriteValueAMD64_OpAMD64SETBstore(v *Value) bool { v.reset(OpAMD64MOVBstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) v0.AuxInt = 0 - v.AddArg(v0) - v.AddArg(mem) + v.AddArg3(ptr, v0, mem) return true } return false @@ -28282,8 +26654,7 @@ func rewriteValueAMD64_OpAMD64SETEQ(v *Value) bool { y := v_0_1 v.reset(OpAMD64SETAE) v0 := b.NewValue0(v.Pos, OpAMD64BTL, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -28310,8 +26681,7 @@ func rewriteValueAMD64_OpAMD64SETEQ(v *Value) bool { y := v_0_1 v.reset(OpAMD64SETAE) v0 := b.NewValue0(v.Pos, OpAMD64BTQ, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -28702,12 +27072,9 @@ func rewriteValueAMD64_OpAMD64SETEQstore(v *Value) bool { v.reset(OpAMD64SETAEstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) v0 := b.NewValue0(v.Pos, OpAMD64BTL, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) - v.AddArg(mem) + v0.AddArg2(x, y) + v.AddArg3(ptr, v0, mem) return true } break @@ -28738,12 +27105,9 @@ func rewriteValueAMD64_OpAMD64SETEQstore(v *Value) bool { v.reset(OpAMD64SETAEstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) v0 := b.NewValue0(v.Pos, OpAMD64BTQ, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) - v.AddArg(mem) + v0.AddArg2(x, y) + v.AddArg3(ptr, v0, mem) return true } break @@ -28767,12 +27131,10 @@ func rewriteValueAMD64_OpAMD64SETEQstore(v *Value) bool { v.reset(OpAMD64SETAEstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags) v0.AuxInt = log2uint32(c) v0.AddArg(x) - v.AddArg(v0) - v.AddArg(mem) + v.AddArg3(ptr, v0, mem) return true } // match: (SETEQstore [off] {sym} ptr (TESTQconst [c] x) mem) @@ -28794,12 +27156,10 @@ func rewriteValueAMD64_OpAMD64SETEQstore(v *Value) bool { v.reset(OpAMD64SETAEstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) v0.AuxInt = log2(c) v0.AddArg(x) - v.AddArg(v0) - v.AddArg(mem) + v.AddArg3(ptr, v0, mem) return true } // match: (SETEQstore [off] {sym} ptr (TESTQ (MOVQconst [c]) x) mem) @@ -28828,12 +27188,10 @@ func rewriteValueAMD64_OpAMD64SETEQstore(v *Value) bool { v.reset(OpAMD64SETAEstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) v0.AuxInt = log2(c) v0.AddArg(x) - v.AddArg(v0) - v.AddArg(mem) + v.AddArg3(ptr, v0, mem) return true } break @@ -28855,12 +27213,10 @@ func rewriteValueAMD64_OpAMD64SETEQstore(v *Value) bool { v.reset(OpAMD64SETNEstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) v0 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) v0.AuxInt = 0 v0.AddArg(s) - v.AddArg(v0) - v.AddArg(mem) + v.AddArg3(ptr, v0, mem) return true } // match: (SETEQstore [off] {sym} ptr (CMPQconst [1] s:(ANDQconst [1] _)) mem) @@ -28880,12 +27236,10 @@ func rewriteValueAMD64_OpAMD64SETEQstore(v *Value) bool { v.reset(OpAMD64SETNEstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) v0 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) v0.AuxInt = 0 v0.AddArg(s) - v.AddArg(v0) - v.AddArg(mem) + v.AddArg3(ptr, v0, mem) return true } // match: (SETEQstore [off] {sym} ptr (TESTQ z1:(SHLQconst [63] (SHRQconst [63] x)) z2) mem) @@ -28919,12 +27273,10 @@ func rewriteValueAMD64_OpAMD64SETEQstore(v *Value) bool { v.reset(OpAMD64SETAEstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) v0.AuxInt = 63 v0.AddArg(x) - v.AddArg(v0) - v.AddArg(mem) + v.AddArg3(ptr, v0, mem) return true } break @@ -28960,12 +27312,10 @@ func rewriteValueAMD64_OpAMD64SETEQstore(v *Value) bool { v.reset(OpAMD64SETAEstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags) v0.AuxInt = 31 v0.AddArg(x) - v.AddArg(v0) - v.AddArg(mem) + v.AddArg3(ptr, v0, mem) return true } break @@ -29001,12 +27351,10 @@ func rewriteValueAMD64_OpAMD64SETEQstore(v *Value) bool { v.reset(OpAMD64SETAEstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) v0.AuxInt = 0 v0.AddArg(x) - v.AddArg(v0) - v.AddArg(mem) + v.AddArg3(ptr, v0, mem) return true } break @@ -29042,12 +27390,10 @@ func rewriteValueAMD64_OpAMD64SETEQstore(v *Value) bool { v.reset(OpAMD64SETAEstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags) v0.AuxInt = 0 v0.AddArg(x) - v.AddArg(v0) - v.AddArg(mem) + v.AddArg3(ptr, v0, mem) return true } break @@ -29079,12 +27425,10 @@ func rewriteValueAMD64_OpAMD64SETEQstore(v *Value) bool { v.reset(OpAMD64SETAEstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) v0.AuxInt = 63 v0.AddArg(x) - v.AddArg(v0) - v.AddArg(mem) + v.AddArg3(ptr, v0, mem) return true } break @@ -29116,12 +27460,10 @@ func rewriteValueAMD64_OpAMD64SETEQstore(v *Value) bool { v.reset(OpAMD64SETAEstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags) v0.AuxInt = 31 v0.AddArg(x) - v.AddArg(v0) - v.AddArg(mem) + v.AddArg3(ptr, v0, mem) return true } break @@ -29140,9 +27482,7 @@ func rewriteValueAMD64_OpAMD64SETEQstore(v *Value) bool { v.reset(OpAMD64SETEQstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(x) - v.AddArg(mem) + v.AddArg3(ptr, x, mem) return true } // match: (SETEQstore [off1] {sym} (ADDQconst [off2] base) val mem) @@ -29164,9 +27504,7 @@ func rewriteValueAMD64_OpAMD64SETEQstore(v *Value) bool { v.reset(OpAMD64SETEQstore) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(base) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(base, val, mem) return true } // match: (SETEQstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) @@ -29189,9 +27527,7 @@ func rewriteValueAMD64_OpAMD64SETEQstore(v *Value) bool { v.reset(OpAMD64SETEQstore) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(base) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(base, val, mem) return true } // match: (SETEQstore [off] {sym} ptr (FlagEQ) mem) @@ -29207,11 +27543,9 @@ func rewriteValueAMD64_OpAMD64SETEQstore(v *Value) bool { v.reset(OpAMD64MOVBstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) v0.AuxInt = 1 - v.AddArg(v0) - v.AddArg(mem) + v.AddArg3(ptr, v0, mem) return true } // match: (SETEQstore [off] {sym} ptr (FlagLT_ULT) mem) @@ -29227,11 +27561,9 @@ func rewriteValueAMD64_OpAMD64SETEQstore(v *Value) bool { v.reset(OpAMD64MOVBstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) v0.AuxInt = 0 - v.AddArg(v0) - v.AddArg(mem) + v.AddArg3(ptr, v0, mem) return true } // match: (SETEQstore [off] {sym} ptr (FlagLT_UGT) mem) @@ -29247,11 +27579,9 @@ func rewriteValueAMD64_OpAMD64SETEQstore(v *Value) bool { v.reset(OpAMD64MOVBstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) v0.AuxInt = 0 - v.AddArg(v0) - v.AddArg(mem) + v.AddArg3(ptr, v0, mem) return true } // match: (SETEQstore [off] {sym} ptr (FlagGT_ULT) mem) @@ -29267,11 +27597,9 @@ func rewriteValueAMD64_OpAMD64SETEQstore(v *Value) bool { v.reset(OpAMD64MOVBstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) v0.AuxInt = 0 - v.AddArg(v0) - v.AddArg(mem) + v.AddArg3(ptr, v0, mem) return true } // match: (SETEQstore [off] {sym} ptr (FlagGT_UGT) mem) @@ -29287,11 +27615,9 @@ func rewriteValueAMD64_OpAMD64SETEQstore(v *Value) bool { v.reset(OpAMD64MOVBstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) v0.AuxInt = 0 - v.AddArg(v0) - v.AddArg(mem) + v.AddArg3(ptr, v0, mem) return true } return false @@ -29446,9 +27772,7 @@ func rewriteValueAMD64_OpAMD64SETGEstore(v *Value) bool { v.reset(OpAMD64SETLEstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(x) - v.AddArg(mem) + v.AddArg3(ptr, x, mem) return true } // match: (SETGEstore [off1] {sym} (ADDQconst [off2] base) val mem) @@ -29470,9 +27794,7 @@ func rewriteValueAMD64_OpAMD64SETGEstore(v *Value) bool { v.reset(OpAMD64SETGEstore) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(base) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(base, val, mem) return true } // match: (SETGEstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) @@ -29495,9 +27817,7 @@ func rewriteValueAMD64_OpAMD64SETGEstore(v *Value) bool { v.reset(OpAMD64SETGEstore) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(base) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(base, val, mem) return true } // match: (SETGEstore [off] {sym} ptr (FlagEQ) mem) @@ -29513,11 +27833,9 @@ func rewriteValueAMD64_OpAMD64SETGEstore(v *Value) bool { v.reset(OpAMD64MOVBstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) v0.AuxInt = 1 - v.AddArg(v0) - v.AddArg(mem) + v.AddArg3(ptr, v0, mem) return true } // match: (SETGEstore [off] {sym} ptr (FlagLT_ULT) mem) @@ -29533,11 +27851,9 @@ func rewriteValueAMD64_OpAMD64SETGEstore(v *Value) bool { v.reset(OpAMD64MOVBstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) v0.AuxInt = 0 - v.AddArg(v0) - v.AddArg(mem) + v.AddArg3(ptr, v0, mem) return true } // match: (SETGEstore [off] {sym} ptr (FlagLT_UGT) mem) @@ -29553,11 +27869,9 @@ func rewriteValueAMD64_OpAMD64SETGEstore(v *Value) bool { v.reset(OpAMD64MOVBstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) v0.AuxInt = 0 - v.AddArg(v0) - v.AddArg(mem) + v.AddArg3(ptr, v0, mem) return true } // match: (SETGEstore [off] {sym} ptr (FlagGT_ULT) mem) @@ -29573,11 +27887,9 @@ func rewriteValueAMD64_OpAMD64SETGEstore(v *Value) bool { v.reset(OpAMD64MOVBstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) v0.AuxInt = 1 - v.AddArg(v0) - v.AddArg(mem) + v.AddArg3(ptr, v0, mem) return true } // match: (SETGEstore [off] {sym} ptr (FlagGT_UGT) mem) @@ -29593,11 +27905,9 @@ func rewriteValueAMD64_OpAMD64SETGEstore(v *Value) bool { v.reset(OpAMD64MOVBstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) v0.AuxInt = 1 - v.AddArg(v0) - v.AddArg(mem) + v.AddArg3(ptr, v0, mem) return true } return false @@ -29622,9 +27932,7 @@ func rewriteValueAMD64_OpAMD64SETGstore(v *Value) bool { v.reset(OpAMD64SETLstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(x) - v.AddArg(mem) + v.AddArg3(ptr, x, mem) return true } // match: (SETGstore [off1] {sym} (ADDQconst [off2] base) val mem) @@ -29646,9 +27954,7 @@ func rewriteValueAMD64_OpAMD64SETGstore(v *Value) bool { v.reset(OpAMD64SETGstore) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(base) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(base, val, mem) return true } // match: (SETGstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) @@ -29671,9 +27977,7 @@ func rewriteValueAMD64_OpAMD64SETGstore(v *Value) bool { v.reset(OpAMD64SETGstore) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(base) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(base, val, mem) return true } // match: (SETGstore [off] {sym} ptr (FlagEQ) mem) @@ -29689,11 +27993,9 @@ func rewriteValueAMD64_OpAMD64SETGstore(v *Value) bool { v.reset(OpAMD64MOVBstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) v0.AuxInt = 0 - v.AddArg(v0) - v.AddArg(mem) + v.AddArg3(ptr, v0, mem) return true } // match: (SETGstore [off] {sym} ptr (FlagLT_ULT) mem) @@ -29709,11 +28011,9 @@ func rewriteValueAMD64_OpAMD64SETGstore(v *Value) bool { v.reset(OpAMD64MOVBstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) v0.AuxInt = 0 - v.AddArg(v0) - v.AddArg(mem) + v.AddArg3(ptr, v0, mem) return true } // match: (SETGstore [off] {sym} ptr (FlagLT_UGT) mem) @@ -29729,11 +28029,9 @@ func rewriteValueAMD64_OpAMD64SETGstore(v *Value) bool { v.reset(OpAMD64MOVBstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) v0.AuxInt = 0 - v.AddArg(v0) - v.AddArg(mem) + v.AddArg3(ptr, v0, mem) return true } // match: (SETGstore [off] {sym} ptr (FlagGT_ULT) mem) @@ -29749,11 +28047,9 @@ func rewriteValueAMD64_OpAMD64SETGstore(v *Value) bool { v.reset(OpAMD64MOVBstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) v0.AuxInt = 1 - v.AddArg(v0) - v.AddArg(mem) + v.AddArg3(ptr, v0, mem) return true } // match: (SETGstore [off] {sym} ptr (FlagGT_UGT) mem) @@ -29769,11 +28065,9 @@ func rewriteValueAMD64_OpAMD64SETGstore(v *Value) bool { v.reset(OpAMD64MOVBstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) v0.AuxInt = 1 - v.AddArg(v0) - v.AddArg(mem) + v.AddArg3(ptr, v0, mem) return true } return false @@ -29928,9 +28222,7 @@ func rewriteValueAMD64_OpAMD64SETLEstore(v *Value) bool { v.reset(OpAMD64SETGEstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(x) - v.AddArg(mem) + v.AddArg3(ptr, x, mem) return true } // match: (SETLEstore [off1] {sym} (ADDQconst [off2] base) val mem) @@ -29952,9 +28244,7 @@ func rewriteValueAMD64_OpAMD64SETLEstore(v *Value) bool { v.reset(OpAMD64SETLEstore) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(base) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(base, val, mem) return true } // match: (SETLEstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) @@ -29977,9 +28267,7 @@ func rewriteValueAMD64_OpAMD64SETLEstore(v *Value) bool { v.reset(OpAMD64SETLEstore) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(base) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(base, val, mem) return true } // match: (SETLEstore [off] {sym} ptr (FlagEQ) mem) @@ -29995,11 +28283,9 @@ func rewriteValueAMD64_OpAMD64SETLEstore(v *Value) bool { v.reset(OpAMD64MOVBstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) v0.AuxInt = 1 - v.AddArg(v0) - v.AddArg(mem) + v.AddArg3(ptr, v0, mem) return true } // match: (SETLEstore [off] {sym} ptr (FlagLT_ULT) mem) @@ -30015,11 +28301,9 @@ func rewriteValueAMD64_OpAMD64SETLEstore(v *Value) bool { v.reset(OpAMD64MOVBstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) v0.AuxInt = 1 - v.AddArg(v0) - v.AddArg(mem) + v.AddArg3(ptr, v0, mem) return true } // match: (SETLEstore [off] {sym} ptr (FlagLT_UGT) mem) @@ -30035,11 +28319,9 @@ func rewriteValueAMD64_OpAMD64SETLEstore(v *Value) bool { v.reset(OpAMD64MOVBstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) v0.AuxInt = 1 - v.AddArg(v0) - v.AddArg(mem) + v.AddArg3(ptr, v0, mem) return true } // match: (SETLEstore [off] {sym} ptr (FlagGT_ULT) mem) @@ -30055,11 +28337,9 @@ func rewriteValueAMD64_OpAMD64SETLEstore(v *Value) bool { v.reset(OpAMD64MOVBstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) v0.AuxInt = 0 - v.AddArg(v0) - v.AddArg(mem) + v.AddArg3(ptr, v0, mem) return true } // match: (SETLEstore [off] {sym} ptr (FlagGT_UGT) mem) @@ -30075,11 +28355,9 @@ func rewriteValueAMD64_OpAMD64SETLEstore(v *Value) bool { v.reset(OpAMD64MOVBstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) v0.AuxInt = 0 - v.AddArg(v0) - v.AddArg(mem) + v.AddArg3(ptr, v0, mem) return true } return false @@ -30104,9 +28382,7 @@ func rewriteValueAMD64_OpAMD64SETLstore(v *Value) bool { v.reset(OpAMD64SETGstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(x) - v.AddArg(mem) + v.AddArg3(ptr, x, mem) return true } // match: (SETLstore [off1] {sym} (ADDQconst [off2] base) val mem) @@ -30128,9 +28404,7 @@ func rewriteValueAMD64_OpAMD64SETLstore(v *Value) bool { v.reset(OpAMD64SETLstore) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(base) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(base, val, mem) return true } // match: (SETLstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) @@ -30153,9 +28427,7 @@ func rewriteValueAMD64_OpAMD64SETLstore(v *Value) bool { v.reset(OpAMD64SETLstore) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(base) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(base, val, mem) return true } // match: (SETLstore [off] {sym} ptr (FlagEQ) mem) @@ -30171,11 +28443,9 @@ func rewriteValueAMD64_OpAMD64SETLstore(v *Value) bool { v.reset(OpAMD64MOVBstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) v0.AuxInt = 0 - v.AddArg(v0) - v.AddArg(mem) + v.AddArg3(ptr, v0, mem) return true } // match: (SETLstore [off] {sym} ptr (FlagLT_ULT) mem) @@ -30191,11 +28461,9 @@ func rewriteValueAMD64_OpAMD64SETLstore(v *Value) bool { v.reset(OpAMD64MOVBstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) v0.AuxInt = 1 - v.AddArg(v0) - v.AddArg(mem) + v.AddArg3(ptr, v0, mem) return true } // match: (SETLstore [off] {sym} ptr (FlagLT_UGT) mem) @@ -30211,11 +28479,9 @@ func rewriteValueAMD64_OpAMD64SETLstore(v *Value) bool { v.reset(OpAMD64MOVBstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) v0.AuxInt = 1 - v.AddArg(v0) - v.AddArg(mem) + v.AddArg3(ptr, v0, mem) return true } // match: (SETLstore [off] {sym} ptr (FlagGT_ULT) mem) @@ -30231,11 +28497,9 @@ func rewriteValueAMD64_OpAMD64SETLstore(v *Value) bool { v.reset(OpAMD64MOVBstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) v0.AuxInt = 0 - v.AddArg(v0) - v.AddArg(mem) + v.AddArg3(ptr, v0, mem) return true } // match: (SETLstore [off] {sym} ptr (FlagGT_UGT) mem) @@ -30251,11 +28515,9 @@ func rewriteValueAMD64_OpAMD64SETLstore(v *Value) bool { v.reset(OpAMD64MOVBstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) v0.AuxInt = 0 - v.AddArg(v0) - v.AddArg(mem) + v.AddArg3(ptr, v0, mem) return true } return false @@ -30284,8 +28546,7 @@ func rewriteValueAMD64_OpAMD64SETNE(v *Value) bool { y := v_0_1 v.reset(OpAMD64SETB) v0 := b.NewValue0(v.Pos, OpAMD64BTL, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -30312,8 +28573,7 @@ func rewriteValueAMD64_OpAMD64SETNE(v *Value) bool { y := v_0_1 v.reset(OpAMD64SETB) v0 := b.NewValue0(v.Pos, OpAMD64BTQ, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -30704,12 +28964,9 @@ func rewriteValueAMD64_OpAMD64SETNEstore(v *Value) bool { v.reset(OpAMD64SETBstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) v0 := b.NewValue0(v.Pos, OpAMD64BTL, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) - v.AddArg(mem) + v0.AddArg2(x, y) + v.AddArg3(ptr, v0, mem) return true } break @@ -30740,12 +28997,9 @@ func rewriteValueAMD64_OpAMD64SETNEstore(v *Value) bool { v.reset(OpAMD64SETBstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) v0 := b.NewValue0(v.Pos, OpAMD64BTQ, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) - v.AddArg(mem) + v0.AddArg2(x, y) + v.AddArg3(ptr, v0, mem) return true } break @@ -30769,12 +29023,10 @@ func rewriteValueAMD64_OpAMD64SETNEstore(v *Value) bool { v.reset(OpAMD64SETBstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags) v0.AuxInt = log2uint32(c) v0.AddArg(x) - v.AddArg(v0) - v.AddArg(mem) + v.AddArg3(ptr, v0, mem) return true } // match: (SETNEstore [off] {sym} ptr (TESTQconst [c] x) mem) @@ -30796,12 +29048,10 @@ func rewriteValueAMD64_OpAMD64SETNEstore(v *Value) bool { v.reset(OpAMD64SETBstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) v0.AuxInt = log2(c) v0.AddArg(x) - v.AddArg(v0) - v.AddArg(mem) + v.AddArg3(ptr, v0, mem) return true } // match: (SETNEstore [off] {sym} ptr (TESTQ (MOVQconst [c]) x) mem) @@ -30830,12 +29080,10 @@ func rewriteValueAMD64_OpAMD64SETNEstore(v *Value) bool { v.reset(OpAMD64SETBstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) v0.AuxInt = log2(c) v0.AddArg(x) - v.AddArg(v0) - v.AddArg(mem) + v.AddArg3(ptr, v0, mem) return true } break @@ -30857,12 +29105,10 @@ func rewriteValueAMD64_OpAMD64SETNEstore(v *Value) bool { v.reset(OpAMD64SETEQstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) v0 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) v0.AuxInt = 0 v0.AddArg(s) - v.AddArg(v0) - v.AddArg(mem) + v.AddArg3(ptr, v0, mem) return true } // match: (SETNEstore [off] {sym} ptr (CMPQconst [1] s:(ANDQconst [1] _)) mem) @@ -30882,12 +29128,10 @@ func rewriteValueAMD64_OpAMD64SETNEstore(v *Value) bool { v.reset(OpAMD64SETEQstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) v0 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) v0.AuxInt = 0 v0.AddArg(s) - v.AddArg(v0) - v.AddArg(mem) + v.AddArg3(ptr, v0, mem) return true } // match: (SETNEstore [off] {sym} ptr (TESTQ z1:(SHLQconst [63] (SHRQconst [63] x)) z2) mem) @@ -30921,12 +29165,10 @@ func rewriteValueAMD64_OpAMD64SETNEstore(v *Value) bool { v.reset(OpAMD64SETBstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) v0.AuxInt = 63 v0.AddArg(x) - v.AddArg(v0) - v.AddArg(mem) + v.AddArg3(ptr, v0, mem) return true } break @@ -30962,12 +29204,10 @@ func rewriteValueAMD64_OpAMD64SETNEstore(v *Value) bool { v.reset(OpAMD64SETBstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags) v0.AuxInt = 31 v0.AddArg(x) - v.AddArg(v0) - v.AddArg(mem) + v.AddArg3(ptr, v0, mem) return true } break @@ -31003,12 +29243,10 @@ func rewriteValueAMD64_OpAMD64SETNEstore(v *Value) bool { v.reset(OpAMD64SETBstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) v0.AuxInt = 0 v0.AddArg(x) - v.AddArg(v0) - v.AddArg(mem) + v.AddArg3(ptr, v0, mem) return true } break @@ -31044,12 +29282,10 @@ func rewriteValueAMD64_OpAMD64SETNEstore(v *Value) bool { v.reset(OpAMD64SETBstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags) v0.AuxInt = 0 v0.AddArg(x) - v.AddArg(v0) - v.AddArg(mem) + v.AddArg3(ptr, v0, mem) return true } break @@ -31081,12 +29317,10 @@ func rewriteValueAMD64_OpAMD64SETNEstore(v *Value) bool { v.reset(OpAMD64SETBstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) v0.AuxInt = 63 v0.AddArg(x) - v.AddArg(v0) - v.AddArg(mem) + v.AddArg3(ptr, v0, mem) return true } break @@ -31118,12 +29352,10 @@ func rewriteValueAMD64_OpAMD64SETNEstore(v *Value) bool { v.reset(OpAMD64SETBstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags) v0.AuxInt = 31 v0.AddArg(x) - v.AddArg(v0) - v.AddArg(mem) + v.AddArg3(ptr, v0, mem) return true } break @@ -31142,9 +29374,7 @@ func rewriteValueAMD64_OpAMD64SETNEstore(v *Value) bool { v.reset(OpAMD64SETNEstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(x) - v.AddArg(mem) + v.AddArg3(ptr, x, mem) return true } // match: (SETNEstore [off1] {sym} (ADDQconst [off2] base) val mem) @@ -31166,9 +29396,7 @@ func rewriteValueAMD64_OpAMD64SETNEstore(v *Value) bool { v.reset(OpAMD64SETNEstore) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(base) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(base, val, mem) return true } // match: (SETNEstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) @@ -31191,9 +29419,7 @@ func rewriteValueAMD64_OpAMD64SETNEstore(v *Value) bool { v.reset(OpAMD64SETNEstore) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(base) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(base, val, mem) return true } // match: (SETNEstore [off] {sym} ptr (FlagEQ) mem) @@ -31209,11 +29435,9 @@ func rewriteValueAMD64_OpAMD64SETNEstore(v *Value) bool { v.reset(OpAMD64MOVBstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) v0.AuxInt = 0 - v.AddArg(v0) - v.AddArg(mem) + v.AddArg3(ptr, v0, mem) return true } // match: (SETNEstore [off] {sym} ptr (FlagLT_ULT) mem) @@ -31229,11 +29453,9 @@ func rewriteValueAMD64_OpAMD64SETNEstore(v *Value) bool { v.reset(OpAMD64MOVBstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) v0.AuxInt = 1 - v.AddArg(v0) - v.AddArg(mem) + v.AddArg3(ptr, v0, mem) return true } // match: (SETNEstore [off] {sym} ptr (FlagLT_UGT) mem) @@ -31249,11 +29471,9 @@ func rewriteValueAMD64_OpAMD64SETNEstore(v *Value) bool { v.reset(OpAMD64MOVBstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) v0.AuxInt = 1 - v.AddArg(v0) - v.AddArg(mem) + v.AddArg3(ptr, v0, mem) return true } // match: (SETNEstore [off] {sym} ptr (FlagGT_ULT) mem) @@ -31269,11 +29489,9 @@ func rewriteValueAMD64_OpAMD64SETNEstore(v *Value) bool { v.reset(OpAMD64MOVBstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) v0.AuxInt = 1 - v.AddArg(v0) - v.AddArg(mem) + v.AddArg3(ptr, v0, mem) return true } // match: (SETNEstore [off] {sym} ptr (FlagGT_UGT) mem) @@ -31289,11 +29507,9 @@ func rewriteValueAMD64_OpAMD64SETNEstore(v *Value) bool { v.reset(OpAMD64MOVBstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) v0.AuxInt = 1 - v.AddArg(v0) - v.AddArg(mem) + v.AddArg3(ptr, v0, mem) return true } return false @@ -31342,8 +29558,7 @@ func rewriteValueAMD64_OpAMD64SHLL(v *Value) bool { break } v.reset(OpAMD64SHLL) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (SHLL x (NEGQ (ADDQconst [c] y))) @@ -31365,10 +29580,9 @@ func rewriteValueAMD64_OpAMD64SHLL(v *Value) bool { break } v.reset(OpAMD64SHLL) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t) v0.AddArg(y) - v.AddArg(v0) + v.AddArg2(x, v0) return true } // match: (SHLL x (ANDQconst [c] y)) @@ -31385,8 +29599,7 @@ func rewriteValueAMD64_OpAMD64SHLL(v *Value) bool { break } v.reset(OpAMD64SHLL) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (SHLL x (NEGQ (ANDQconst [c] y))) @@ -31408,10 +29621,9 @@ func rewriteValueAMD64_OpAMD64SHLL(v *Value) bool { break } v.reset(OpAMD64SHLL) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t) v0.AddArg(y) - v.AddArg(v0) + v.AddArg2(x, v0) return true } // match: (SHLL x (ADDLconst [c] y)) @@ -31428,8 +29640,7 @@ func rewriteValueAMD64_OpAMD64SHLL(v *Value) bool { break } v.reset(OpAMD64SHLL) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (SHLL x (NEGL (ADDLconst [c] y))) @@ -31451,10 +29662,9 @@ func rewriteValueAMD64_OpAMD64SHLL(v *Value) bool { break } v.reset(OpAMD64SHLL) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t) v0.AddArg(y) - v.AddArg(v0) + v.AddArg2(x, v0) return true } // match: (SHLL x (ANDLconst [c] y)) @@ -31471,8 +29681,7 @@ func rewriteValueAMD64_OpAMD64SHLL(v *Value) bool { break } v.reset(OpAMD64SHLL) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (SHLL x (NEGL (ANDLconst [c] y))) @@ -31494,10 +29703,9 @@ func rewriteValueAMD64_OpAMD64SHLL(v *Value) bool { break } v.reset(OpAMD64SHLL) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t) v0.AddArg(y) - v.AddArg(v0) + v.AddArg2(x, v0) return true } return false @@ -31523,9 +29731,7 @@ func rewriteValueAMD64_OpAMD64SHLLconst(v *Value) bool { break } x := v_0 - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (SHLLconst [d] (MOVLconst [c])) @@ -31586,8 +29792,7 @@ func rewriteValueAMD64_OpAMD64SHLQ(v *Value) bool { break } v.reset(OpAMD64SHLQ) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (SHLQ x (NEGQ (ADDQconst [c] y))) @@ -31609,10 +29814,9 @@ func rewriteValueAMD64_OpAMD64SHLQ(v *Value) bool { break } v.reset(OpAMD64SHLQ) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t) v0.AddArg(y) - v.AddArg(v0) + v.AddArg2(x, v0) return true } // match: (SHLQ x (ANDQconst [c] y)) @@ -31629,8 +29833,7 @@ func rewriteValueAMD64_OpAMD64SHLQ(v *Value) bool { break } v.reset(OpAMD64SHLQ) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (SHLQ x (NEGQ (ANDQconst [c] y))) @@ -31652,10 +29855,9 @@ func rewriteValueAMD64_OpAMD64SHLQ(v *Value) bool { break } v.reset(OpAMD64SHLQ) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t) v0.AddArg(y) - v.AddArg(v0) + v.AddArg2(x, v0) return true } // match: (SHLQ x (ADDLconst [c] y)) @@ -31672,8 +29874,7 @@ func rewriteValueAMD64_OpAMD64SHLQ(v *Value) bool { break } v.reset(OpAMD64SHLQ) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (SHLQ x (NEGL (ADDLconst [c] y))) @@ -31695,10 +29896,9 @@ func rewriteValueAMD64_OpAMD64SHLQ(v *Value) bool { break } v.reset(OpAMD64SHLQ) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t) v0.AddArg(y) - v.AddArg(v0) + v.AddArg2(x, v0) return true } // match: (SHLQ x (ANDLconst [c] y)) @@ -31715,8 +29915,7 @@ func rewriteValueAMD64_OpAMD64SHLQ(v *Value) bool { break } v.reset(OpAMD64SHLQ) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (SHLQ x (NEGL (ANDLconst [c] y))) @@ -31738,10 +29937,9 @@ func rewriteValueAMD64_OpAMD64SHLQ(v *Value) bool { break } v.reset(OpAMD64SHLQ) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t) v0.AddArg(y) - v.AddArg(v0) + v.AddArg2(x, v0) return true } return false @@ -31767,9 +29965,7 @@ func rewriteValueAMD64_OpAMD64SHLQconst(v *Value) bool { break } x := v_0 - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (SHLQconst [d] (MOVQconst [c])) @@ -31784,6 +29980,18 @@ func rewriteValueAMD64_OpAMD64SHLQconst(v *Value) bool { v.AuxInt = c << uint64(d) return true } + // match: (SHLQconst [d] (MOVLconst [c])) + // result: (MOVQconst [int64(int32(c)) << uint64(d)]) + for { + d := v.AuxInt + if v_0.Op != OpAMD64MOVLconst { + break + } + c := v_0.AuxInt + v.reset(OpAMD64MOVQconst) + v.AuxInt = int64(int32(c)) << uint64(d) + return true + } return false } func rewriteValueAMD64_OpAMD64SHRB(v *Value) bool { @@ -31864,9 +30072,7 @@ func rewriteValueAMD64_OpAMD64SHRBconst(v *Value) bool { break } x := v_0 - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } return false @@ -31915,8 +30121,7 @@ func rewriteValueAMD64_OpAMD64SHRL(v *Value) bool { break } v.reset(OpAMD64SHRL) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (SHRL x (NEGQ (ADDQconst [c] y))) @@ -31938,10 +30143,9 @@ func rewriteValueAMD64_OpAMD64SHRL(v *Value) bool { break } v.reset(OpAMD64SHRL) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t) v0.AddArg(y) - v.AddArg(v0) + v.AddArg2(x, v0) return true } // match: (SHRL x (ANDQconst [c] y)) @@ -31958,8 +30162,7 @@ func rewriteValueAMD64_OpAMD64SHRL(v *Value) bool { break } v.reset(OpAMD64SHRL) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (SHRL x (NEGQ (ANDQconst [c] y))) @@ -31981,10 +30184,9 @@ func rewriteValueAMD64_OpAMD64SHRL(v *Value) bool { break } v.reset(OpAMD64SHRL) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t) v0.AddArg(y) - v.AddArg(v0) + v.AddArg2(x, v0) return true } // match: (SHRL x (ADDLconst [c] y)) @@ -32001,8 +30203,7 @@ func rewriteValueAMD64_OpAMD64SHRL(v *Value) bool { break } v.reset(OpAMD64SHRL) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (SHRL x (NEGL (ADDLconst [c] y))) @@ -32024,10 +30225,9 @@ func rewriteValueAMD64_OpAMD64SHRL(v *Value) bool { break } v.reset(OpAMD64SHRL) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t) v0.AddArg(y) - v.AddArg(v0) + v.AddArg2(x, v0) return true } // match: (SHRL x (ANDLconst [c] y)) @@ -32044,8 +30244,7 @@ func rewriteValueAMD64_OpAMD64SHRL(v *Value) bool { break } v.reset(OpAMD64SHRL) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (SHRL x (NEGL (ANDLconst [c] y))) @@ -32067,10 +30266,9 @@ func rewriteValueAMD64_OpAMD64SHRL(v *Value) bool { break } v.reset(OpAMD64SHRL) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t) v0.AddArg(y) - v.AddArg(v0) + v.AddArg2(x, v0) return true } return false @@ -32096,9 +30294,7 @@ func rewriteValueAMD64_OpAMD64SHRLconst(v *Value) bool { break } x := v_0 - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } return false @@ -32147,8 +30343,7 @@ func rewriteValueAMD64_OpAMD64SHRQ(v *Value) bool { break } v.reset(OpAMD64SHRQ) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (SHRQ x (NEGQ (ADDQconst [c] y))) @@ -32170,10 +30365,9 @@ func rewriteValueAMD64_OpAMD64SHRQ(v *Value) bool { break } v.reset(OpAMD64SHRQ) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t) v0.AddArg(y) - v.AddArg(v0) + v.AddArg2(x, v0) return true } // match: (SHRQ x (ANDQconst [c] y)) @@ -32190,8 +30384,7 @@ func rewriteValueAMD64_OpAMD64SHRQ(v *Value) bool { break } v.reset(OpAMD64SHRQ) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (SHRQ x (NEGQ (ANDQconst [c] y))) @@ -32213,10 +30406,9 @@ func rewriteValueAMD64_OpAMD64SHRQ(v *Value) bool { break } v.reset(OpAMD64SHRQ) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t) v0.AddArg(y) - v.AddArg(v0) + v.AddArg2(x, v0) return true } // match: (SHRQ x (ADDLconst [c] y)) @@ -32233,8 +30425,7 @@ func rewriteValueAMD64_OpAMD64SHRQ(v *Value) bool { break } v.reset(OpAMD64SHRQ) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (SHRQ x (NEGL (ADDLconst [c] y))) @@ -32256,10 +30447,9 @@ func rewriteValueAMD64_OpAMD64SHRQ(v *Value) bool { break } v.reset(OpAMD64SHRQ) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t) v0.AddArg(y) - v.AddArg(v0) + v.AddArg2(x, v0) return true } // match: (SHRQ x (ANDLconst [c] y)) @@ -32276,8 +30466,7 @@ func rewriteValueAMD64_OpAMD64SHRQ(v *Value) bool { break } v.reset(OpAMD64SHRQ) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (SHRQ x (NEGL (ANDLconst [c] y))) @@ -32299,10 +30488,9 @@ func rewriteValueAMD64_OpAMD64SHRQ(v *Value) bool { break } v.reset(OpAMD64SHRQ) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t) v0.AddArg(y) - v.AddArg(v0) + v.AddArg2(x, v0) return true } return false @@ -32328,9 +30516,7 @@ func rewriteValueAMD64_OpAMD64SHRQconst(v *Value) bool { break } x := v_0 - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } return false @@ -32413,9 +30599,7 @@ func rewriteValueAMD64_OpAMD64SHRWconst(v *Value) bool { break } x := v_0 - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } return false @@ -32482,9 +30666,7 @@ func rewriteValueAMD64_OpAMD64SUBL(v *Value) bool { v.reset(OpAMD64SUBLload) v.AuxInt = off v.Aux = sym - v.AddArg(x) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg3(x, ptr, mem) return true } return false @@ -32500,9 +30682,7 @@ func rewriteValueAMD64_OpAMD64SUBLconst(v *Value) bool { if !(int32(c) == 0) { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (SUBLconst [c] x) @@ -32541,9 +30721,7 @@ func rewriteValueAMD64_OpAMD64SUBLload(v *Value) bool { v.reset(OpAMD64SUBLload) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(val) - v.AddArg(base) - v.AddArg(mem) + v.AddArg3(val, base, mem) return true } // match: (SUBLload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem) @@ -32566,9 +30744,7 @@ func rewriteValueAMD64_OpAMD64SUBLload(v *Value) bool { v.reset(OpAMD64SUBLload) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(val) - v.AddArg(base) - v.AddArg(mem) + v.AddArg3(val, base, mem) return true } // match: (SUBLload x [off] {sym} ptr (MOVSSstore [off] {sym} ptr y _)) @@ -32581,16 +30757,14 @@ func rewriteValueAMD64_OpAMD64SUBLload(v *Value) bool { if v_2.Op != OpAMD64MOVSSstore || v_2.AuxInt != off || v_2.Aux != sym { break } - _ = v_2.Args[2] + y := v_2.Args[1] if ptr != v_2.Args[0] { break } - y := v_2.Args[1] v.reset(OpAMD64SUBL) - v.AddArg(x) v0 := b.NewValue0(v_2.Pos, OpAMD64MOVLf2i, typ.UInt32) v0.AddArg(y) - v.AddArg(v0) + v.AddArg2(x, v0) return true } return false @@ -32618,9 +30792,7 @@ func rewriteValueAMD64_OpAMD64SUBLmodify(v *Value) bool { v.reset(OpAMD64SUBLmodify) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(base) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(base, val, mem) return true } // match: (SUBLmodify [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) @@ -32643,9 +30815,7 @@ func rewriteValueAMD64_OpAMD64SUBLmodify(v *Value) bool { v.reset(OpAMD64SUBLmodify) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(base) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(base, val, mem) return true } return false @@ -32720,9 +30890,7 @@ func rewriteValueAMD64_OpAMD64SUBQ(v *Value) bool { v.reset(OpAMD64SUBQload) v.AuxInt = off v.Aux = sym - v.AddArg(x) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg3(x, ptr, mem) return true } return false @@ -32758,9 +30926,7 @@ func rewriteValueAMD64_OpAMD64SUBQconst(v *Value) bool { break } x := v_0 - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (SUBQconst [c] x) @@ -32834,9 +31000,7 @@ func rewriteValueAMD64_OpAMD64SUBQload(v *Value) bool { v.reset(OpAMD64SUBQload) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(val) - v.AddArg(base) - v.AddArg(mem) + v.AddArg3(val, base, mem) return true } // match: (SUBQload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem) @@ -32859,9 +31023,7 @@ func rewriteValueAMD64_OpAMD64SUBQload(v *Value) bool { v.reset(OpAMD64SUBQload) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(val) - v.AddArg(base) - v.AddArg(mem) + v.AddArg3(val, base, mem) return true } // match: (SUBQload x [off] {sym} ptr (MOVSDstore [off] {sym} ptr y _)) @@ -32874,16 +31036,14 @@ func rewriteValueAMD64_OpAMD64SUBQload(v *Value) bool { if v_2.Op != OpAMD64MOVSDstore || v_2.AuxInt != off || v_2.Aux != sym { break } - _ = v_2.Args[2] + y := v_2.Args[1] if ptr != v_2.Args[0] { break } - y := v_2.Args[1] v.reset(OpAMD64SUBQ) - v.AddArg(x) v0 := b.NewValue0(v_2.Pos, OpAMD64MOVQf2i, typ.UInt64) v0.AddArg(y) - v.AddArg(v0) + v.AddArg2(x, v0) return true } return false @@ -32911,9 +31071,7 @@ func rewriteValueAMD64_OpAMD64SUBQmodify(v *Value) bool { v.reset(OpAMD64SUBQmodify) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(base) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(base, val, mem) return true } // match: (SUBQmodify [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) @@ -32936,9 +31094,7 @@ func rewriteValueAMD64_OpAMD64SUBQmodify(v *Value) bool { v.reset(OpAMD64SUBQmodify) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(base) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(base, val, mem) return true } return false @@ -32965,9 +31121,7 @@ func rewriteValueAMD64_OpAMD64SUBSD(v *Value) bool { v.reset(OpAMD64SUBSDload) v.AuxInt = off v.Aux = sym - v.AddArg(x) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg3(x, ptr, mem) return true } return false @@ -32997,9 +31151,7 @@ func rewriteValueAMD64_OpAMD64SUBSDload(v *Value) bool { v.reset(OpAMD64SUBSDload) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(val) - v.AddArg(base) - v.AddArg(mem) + v.AddArg3(val, base, mem) return true } // match: (SUBSDload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem) @@ -33022,9 +31174,7 @@ func rewriteValueAMD64_OpAMD64SUBSDload(v *Value) bool { v.reset(OpAMD64SUBSDload) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(val) - v.AddArg(base) - v.AddArg(mem) + v.AddArg3(val, base, mem) return true } // match: (SUBSDload x [off] {sym} ptr (MOVQstore [off] {sym} ptr y _)) @@ -33037,16 +31187,14 @@ func rewriteValueAMD64_OpAMD64SUBSDload(v *Value) bool { if v_2.Op != OpAMD64MOVQstore || v_2.AuxInt != off || v_2.Aux != sym { break } - _ = v_2.Args[2] + y := v_2.Args[1] if ptr != v_2.Args[0] { break } - y := v_2.Args[1] v.reset(OpAMD64SUBSD) - v.AddArg(x) v0 := b.NewValue0(v_2.Pos, OpAMD64MOVQi2f, typ.Float64) v0.AddArg(y) - v.AddArg(v0) + v.AddArg2(x, v0) return true } return false @@ -33073,9 +31221,7 @@ func rewriteValueAMD64_OpAMD64SUBSS(v *Value) bool { v.reset(OpAMD64SUBSSload) v.AuxInt = off v.Aux = sym - v.AddArg(x) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg3(x, ptr, mem) return true } return false @@ -33105,9 +31251,7 @@ func rewriteValueAMD64_OpAMD64SUBSSload(v *Value) bool { v.reset(OpAMD64SUBSSload) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(val) - v.AddArg(base) - v.AddArg(mem) + v.AddArg3(val, base, mem) return true } // match: (SUBSSload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem) @@ -33130,9 +31274,7 @@ func rewriteValueAMD64_OpAMD64SUBSSload(v *Value) bool { v.reset(OpAMD64SUBSSload) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(val) - v.AddArg(base) - v.AddArg(mem) + v.AddArg3(val, base, mem) return true } // match: (SUBSSload x [off] {sym} ptr (MOVLstore [off] {sym} ptr y _)) @@ -33145,16 +31287,14 @@ func rewriteValueAMD64_OpAMD64SUBSSload(v *Value) bool { if v_2.Op != OpAMD64MOVLstore || v_2.AuxInt != off || v_2.Aux != sym { break } - _ = v_2.Args[2] + y := v_2.Args[1] if ptr != v_2.Args[0] { break } - y := v_2.Args[1] v.reset(OpAMD64SUBSS) - v.AddArg(x) v0 := b.NewValue0(v_2.Pos, OpAMD64MOVLi2f, typ.Float32) v0.AddArg(y) - v.AddArg(v0) + v.AddArg2(x, v0) return true } return false @@ -33198,12 +31338,10 @@ func rewriteValueAMD64_OpAMD64TESTB(v *Value) bool { } b = l.Block v0 := b.NewValue0(l.Pos, OpAMD64CMPBconstload, types.TypeFlags) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v0.AuxInt = makeValAndOff(0, off) v0.Aux = sym - v0.AddArg(ptr) - v0.AddArg(mem) + v0.AddArg2(ptr, mem) return true } break @@ -33224,8 +31362,7 @@ func rewriteValueAMD64_OpAMD64TESTBconst(v *Value) bool { break } v.reset(OpAMD64TESTB) - v.AddArg(x) - v.AddArg(x) + v.AddArg2(x, x) return true } return false @@ -33269,12 +31406,10 @@ func rewriteValueAMD64_OpAMD64TESTL(v *Value) bool { } b = l.Block v0 := b.NewValue0(l.Pos, OpAMD64CMPLconstload, types.TypeFlags) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v0.AuxInt = makeValAndOff(0, off) v0.Aux = sym - v0.AddArg(ptr) - v0.AddArg(mem) + v0.AddArg2(ptr, mem) return true } break @@ -33305,8 +31440,7 @@ func rewriteValueAMD64_OpAMD64TESTLconst(v *Value) bool { break } v.reset(OpAMD64TESTL) - v.AddArg(x) - v.AddArg(x) + v.AddArg2(x, x) return true } return false @@ -33354,12 +31488,10 @@ func rewriteValueAMD64_OpAMD64TESTQ(v *Value) bool { } b = l.Block v0 := b.NewValue0(l.Pos, OpAMD64CMPQconstload, types.TypeFlags) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v0.AuxInt = makeValAndOff(0, off) v0.Aux = sym - v0.AddArg(ptr) - v0.AddArg(mem) + v0.AddArg2(ptr, mem) return true } break @@ -33390,8 +31522,7 @@ func rewriteValueAMD64_OpAMD64TESTQconst(v *Value) bool { break } v.reset(OpAMD64TESTQ) - v.AddArg(x) - v.AddArg(x) + v.AddArg2(x, x) return true } return false @@ -33435,12 +31566,10 @@ func rewriteValueAMD64_OpAMD64TESTW(v *Value) bool { } b = l.Block v0 := b.NewValue0(l.Pos, OpAMD64CMPWconstload, types.TypeFlags) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v0.AuxInt = makeValAndOff(0, off) v0.Aux = sym - v0.AddArg(ptr) - v0.AddArg(mem) + v0.AddArg2(ptr, mem) return true } break @@ -33461,8 +31590,7 @@ func rewriteValueAMD64_OpAMD64TESTWconst(v *Value) bool { break } v.reset(OpAMD64TESTW) - v.AddArg(x) - v.AddArg(x) + v.AddArg2(x, x) return true } return false @@ -33490,9 +31618,7 @@ func rewriteValueAMD64_OpAMD64XADDLlock(v *Value) bool { v.reset(OpAMD64XADDLlock) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(val) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg3(val, ptr, mem) return true } return false @@ -33520,9 +31646,7 @@ func rewriteValueAMD64_OpAMD64XADDQlock(v *Value) bool { v.reset(OpAMD64XADDQlock) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(val) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg3(val, ptr, mem) return true } return false @@ -33550,9 +31674,7 @@ func rewriteValueAMD64_OpAMD64XCHGL(v *Value) bool { v.reset(OpAMD64XCHGL) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(val) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg3(val, ptr, mem) return true } // match: (XCHGL [off1] {sym1} val (LEAQ [off2] {sym2} ptr) mem) @@ -33575,9 +31697,7 @@ func rewriteValueAMD64_OpAMD64XCHGL(v *Value) bool { v.reset(OpAMD64XCHGL) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(val) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg3(val, ptr, mem) return true } return false @@ -33605,9 +31725,7 @@ func rewriteValueAMD64_OpAMD64XCHGQ(v *Value) bool { v.reset(OpAMD64XCHGQ) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(val) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg3(val, ptr, mem) return true } // match: (XCHGQ [off1] {sym1} val (LEAQ [off2] {sym2} ptr) mem) @@ -33630,9 +31748,7 @@ func rewriteValueAMD64_OpAMD64XCHGQ(v *Value) bool { v.reset(OpAMD64XCHGQ) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(val) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg3(val, ptr, mem) return true } return false @@ -33654,8 +31770,7 @@ func rewriteValueAMD64_OpAMD64XORL(v *Value) bool { } x := v_1 v.reset(OpAMD64BTCL) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } break @@ -33801,9 +31916,7 @@ func rewriteValueAMD64_OpAMD64XORL(v *Value) bool { v.reset(OpAMD64XORLload) v.AuxInt = off v.Aux = sym - v.AddArg(x) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg3(x, ptr, mem) return true } break @@ -33973,9 +32086,7 @@ func rewriteValueAMD64_OpAMD64XORLconst(v *Value) bool { if !(int32(c) == 0) { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (XORLconst [c] (MOVLconst [d])) @@ -34013,8 +32124,7 @@ func rewriteValueAMD64_OpAMD64XORLconstmodify(v *Value) bool { v.reset(OpAMD64XORLconstmodify) v.AuxInt = ValAndOff(valoff1).add(off2) v.Aux = sym - v.AddArg(base) - v.AddArg(mem) + v.AddArg2(base, mem) return true } // match: (XORLconstmodify [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem) @@ -34036,8 +32146,7 @@ func rewriteValueAMD64_OpAMD64XORLconstmodify(v *Value) bool { v.reset(OpAMD64XORLconstmodify) v.AuxInt = ValAndOff(valoff1).add(off2) v.Aux = mergeSym(sym1, sym2) - v.AddArg(base) - v.AddArg(mem) + v.AddArg2(base, mem) return true } return false @@ -34067,9 +32176,7 @@ func rewriteValueAMD64_OpAMD64XORLload(v *Value) bool { v.reset(OpAMD64XORLload) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(val) - v.AddArg(base) - v.AddArg(mem) + v.AddArg3(val, base, mem) return true } // match: (XORLload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem) @@ -34092,9 +32199,7 @@ func rewriteValueAMD64_OpAMD64XORLload(v *Value) bool { v.reset(OpAMD64XORLload) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(val) - v.AddArg(base) - v.AddArg(mem) + v.AddArg3(val, base, mem) return true } // match: (XORLload x [off] {sym} ptr (MOVSSstore [off] {sym} ptr y _)) @@ -34107,16 +32212,14 @@ func rewriteValueAMD64_OpAMD64XORLload(v *Value) bool { if v_2.Op != OpAMD64MOVSSstore || v_2.AuxInt != off || v_2.Aux != sym { break } - _ = v_2.Args[2] + y := v_2.Args[1] if ptr != v_2.Args[0] { break } - y := v_2.Args[1] v.reset(OpAMD64XORL) - v.AddArg(x) v0 := b.NewValue0(v_2.Pos, OpAMD64MOVLf2i, typ.UInt32) v0.AddArg(y) - v.AddArg(v0) + v.AddArg2(x, v0) return true } return false @@ -34144,9 +32247,7 @@ func rewriteValueAMD64_OpAMD64XORLmodify(v *Value) bool { v.reset(OpAMD64XORLmodify) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(base) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(base, val, mem) return true } // match: (XORLmodify [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) @@ -34169,9 +32270,7 @@ func rewriteValueAMD64_OpAMD64XORLmodify(v *Value) bool { v.reset(OpAMD64XORLmodify) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(base) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(base, val, mem) return true } return false @@ -34193,8 +32292,7 @@ func rewriteValueAMD64_OpAMD64XORQ(v *Value) bool { } x := v_1 v.reset(OpAMD64BTCQ) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } break @@ -34294,9 +32392,7 @@ func rewriteValueAMD64_OpAMD64XORQ(v *Value) bool { v.reset(OpAMD64XORQload) v.AuxInt = off v.Aux = sym - v.AddArg(x) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg3(x, ptr, mem) return true } break @@ -34354,9 +32450,7 @@ func rewriteValueAMD64_OpAMD64XORQconst(v *Value) bool { break } x := v_0 - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (XORQconst [c] (MOVQconst [d])) @@ -34394,8 +32488,7 @@ func rewriteValueAMD64_OpAMD64XORQconstmodify(v *Value) bool { v.reset(OpAMD64XORQconstmodify) v.AuxInt = ValAndOff(valoff1).add(off2) v.Aux = sym - v.AddArg(base) - v.AddArg(mem) + v.AddArg2(base, mem) return true } // match: (XORQconstmodify [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem) @@ -34417,8 +32510,7 @@ func rewriteValueAMD64_OpAMD64XORQconstmodify(v *Value) bool { v.reset(OpAMD64XORQconstmodify) v.AuxInt = ValAndOff(valoff1).add(off2) v.Aux = mergeSym(sym1, sym2) - v.AddArg(base) - v.AddArg(mem) + v.AddArg2(base, mem) return true } return false @@ -34448,9 +32540,7 @@ func rewriteValueAMD64_OpAMD64XORQload(v *Value) bool { v.reset(OpAMD64XORQload) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(val) - v.AddArg(base) - v.AddArg(mem) + v.AddArg3(val, base, mem) return true } // match: (XORQload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem) @@ -34473,9 +32563,7 @@ func rewriteValueAMD64_OpAMD64XORQload(v *Value) bool { v.reset(OpAMD64XORQload) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(val) - v.AddArg(base) - v.AddArg(mem) + v.AddArg3(val, base, mem) return true } // match: (XORQload x [off] {sym} ptr (MOVSDstore [off] {sym} ptr y _)) @@ -34488,16 +32576,14 @@ func rewriteValueAMD64_OpAMD64XORQload(v *Value) bool { if v_2.Op != OpAMD64MOVSDstore || v_2.AuxInt != off || v_2.Aux != sym { break } - _ = v_2.Args[2] + y := v_2.Args[1] if ptr != v_2.Args[0] { break } - y := v_2.Args[1] v.reset(OpAMD64XORQ) - v.AddArg(x) v0 := b.NewValue0(v_2.Pos, OpAMD64MOVQf2i, typ.UInt64) v0.AddArg(y) - v.AddArg(v0) + v.AddArg2(x, v0) return true } return false @@ -34525,9 +32611,7 @@ func rewriteValueAMD64_OpAMD64XORQmodify(v *Value) bool { v.reset(OpAMD64XORQmodify) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(base) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(base, val, mem) return true } // match: (XORQmodify [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) @@ -34550,9 +32634,7 @@ func rewriteValueAMD64_OpAMD64XORQmodify(v *Value) bool { v.reset(OpAMD64XORQmodify) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(base) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(base, val, mem) return true } return false @@ -34570,12 +32652,9 @@ func rewriteValueAMD64_OpAtomicAdd32(v *Value) bool { val := v_1 mem := v_2 v.reset(OpAMD64AddTupleFirst32) - v.AddArg(val) v0 := b.NewValue0(v.Pos, OpAMD64XADDLlock, types.NewTuple(typ.UInt32, types.TypeMem)) - v0.AddArg(val) - v0.AddArg(ptr) - v0.AddArg(mem) - v.AddArg(v0) + v0.AddArg3(val, ptr, mem) + v.AddArg2(val, v0) return true } } @@ -34592,12 +32671,9 @@ func rewriteValueAMD64_OpAtomicAdd64(v *Value) bool { val := v_1 mem := v_2 v.reset(OpAMD64AddTupleFirst64) - v.AddArg(val) v0 := b.NewValue0(v.Pos, OpAMD64XADDQlock, types.NewTuple(typ.UInt64, types.TypeMem)) - v0.AddArg(val) - v0.AddArg(ptr) - v0.AddArg(mem) - v.AddArg(v0) + v0.AddArg3(val, ptr, mem) + v.AddArg2(val, v0) return true } } @@ -34612,9 +32688,7 @@ func rewriteValueAMD64_OpAtomicExchange32(v *Value) bool { val := v_1 mem := v_2 v.reset(OpAMD64XCHGL) - v.AddArg(val) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg3(val, ptr, mem) return true } } @@ -34629,9 +32703,7 @@ func rewriteValueAMD64_OpAtomicExchange64(v *Value) bool { val := v_1 mem := v_2 v.reset(OpAMD64XCHGQ) - v.AddArg(val) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg3(val, ptr, mem) return true } } @@ -34649,9 +32721,7 @@ func rewriteValueAMD64_OpAtomicStore32(v *Value) bool { mem := v_2 v.reset(OpSelect1) v0 := b.NewValue0(v.Pos, OpAMD64XCHGL, types.NewTuple(typ.UInt32, types.TypeMem)) - v0.AddArg(val) - v0.AddArg(ptr) - v0.AddArg(mem) + v0.AddArg3(val, ptr, mem) v.AddArg(v0) return true } @@ -34670,9 +32740,7 @@ func rewriteValueAMD64_OpAtomicStore64(v *Value) bool { mem := v_2 v.reset(OpSelect1) v0 := b.NewValue0(v.Pos, OpAMD64XCHGQ, types.NewTuple(typ.UInt64, types.TypeMem)) - v0.AddArg(val) - v0.AddArg(ptr) - v0.AddArg(mem) + v0.AddArg3(val, ptr, mem) v.AddArg(v0) return true } @@ -34691,9 +32759,7 @@ func rewriteValueAMD64_OpAtomicStore8(v *Value) bool { mem := v_2 v.reset(OpSelect1) v0 := b.NewValue0(v.Pos, OpAMD64XCHGB, types.NewTuple(typ.UInt8, types.TypeMem)) - v0.AddArg(val) - v0.AddArg(ptr) - v0.AddArg(mem) + v0.AddArg3(val, ptr, mem) v.AddArg(v0) return true } @@ -34712,9 +32778,7 @@ func rewriteValueAMD64_OpAtomicStorePtrNoWB(v *Value) bool { mem := v_2 v.reset(OpSelect1) v0 := b.NewValue0(v.Pos, OpAMD64XCHGQ, types.NewTuple(typ.BytePtr, types.TypeMem)) - v0.AddArg(val) - v0.AddArg(ptr) - v0.AddArg(mem) + v0.AddArg3(val, ptr, mem) v.AddArg(v0) return true } @@ -34732,10 +32796,9 @@ func rewriteValueAMD64_OpBitLen16(v *Value) bool { v0.AuxInt = 1 v1 := b.NewValue0(v.Pos, OpAMD64MOVWQZX, typ.UInt32) v1.AddArg(x) - v0.AddArg(v1) v2 := b.NewValue0(v.Pos, OpAMD64MOVWQZX, typ.UInt32) v2.AddArg(x) - v0.AddArg(v2) + v0.AddArg2(v1, v2) v.AddArg(v0) return true } @@ -34754,10 +32817,9 @@ func rewriteValueAMD64_OpBitLen32(v *Value) bool { v1.AuxInt = 1 v2 := b.NewValue0(v.Pos, OpAMD64MOVLQZX, typ.UInt64) v2.AddArg(x) - v1.AddArg(v2) v3 := b.NewValue0(v.Pos, OpAMD64MOVLQZX, typ.UInt64) v3.AddArg(x) - v1.AddArg(v3) + v1.AddArg2(v2, v3) v0.AddArg(v1) v.AddArg(v0) return true @@ -34779,15 +32841,13 @@ func rewriteValueAMD64_OpBitLen64(v *Value) bool { v2 := b.NewValue0(v.Pos, OpAMD64BSRQ, types.NewTuple(typ.UInt64, types.TypeFlags)) v2.AddArg(x) v1.AddArg(v2) - v0.AddArg(v1) v3 := b.NewValue0(v.Pos, OpAMD64MOVQconst, t) v3.AuxInt = -1 - v0.AddArg(v3) v4 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) v5 := b.NewValue0(v.Pos, OpAMD64BSRQ, types.NewTuple(typ.UInt64, types.TypeFlags)) v5.AddArg(x) v4.AddArg(v5) - v0.AddArg(v4) + v0.AddArg3(v1, v3, v4) v.AddArg(v0) return true } @@ -34805,10 +32865,9 @@ func rewriteValueAMD64_OpBitLen8(v *Value) bool { v0.AuxInt = 1 v1 := b.NewValue0(v.Pos, OpAMD64MOVBQZX, typ.UInt32) v1.AddArg(x) - v0.AddArg(v1) v2 := b.NewValue0(v.Pos, OpAMD64MOVBQZX, typ.UInt32) v2.AddArg(x) - v0.AddArg(v2) + v0.AddArg2(v1, v2) v.AddArg(v0) return true } @@ -34846,9 +32905,7 @@ func rewriteValueAMD64_OpCondSelect(v *Value) bool { break } v.reset(OpAMD64CMOVQEQ) - v.AddArg(y) - v.AddArg(x) - v.AddArg(cond) + v.AddArg3(y, x, cond) return true } // match: (CondSelect x y (SETNE cond)) @@ -34866,9 +32923,7 @@ func rewriteValueAMD64_OpCondSelect(v *Value) bool { break } v.reset(OpAMD64CMOVQNE) - v.AddArg(y) - v.AddArg(x) - v.AddArg(cond) + v.AddArg3(y, x, cond) return true } // match: (CondSelect x y (SETL cond)) @@ -34886,9 +32941,7 @@ func rewriteValueAMD64_OpCondSelect(v *Value) bool { break } v.reset(OpAMD64CMOVQLT) - v.AddArg(y) - v.AddArg(x) - v.AddArg(cond) + v.AddArg3(y, x, cond) return true } // match: (CondSelect x y (SETG cond)) @@ -34906,9 +32959,7 @@ func rewriteValueAMD64_OpCondSelect(v *Value) bool { break } v.reset(OpAMD64CMOVQGT) - v.AddArg(y) - v.AddArg(x) - v.AddArg(cond) + v.AddArg3(y, x, cond) return true } // match: (CondSelect x y (SETLE cond)) @@ -34926,9 +32977,7 @@ func rewriteValueAMD64_OpCondSelect(v *Value) bool { break } v.reset(OpAMD64CMOVQLE) - v.AddArg(y) - v.AddArg(x) - v.AddArg(cond) + v.AddArg3(y, x, cond) return true } // match: (CondSelect x y (SETGE cond)) @@ -34946,9 +32995,7 @@ func rewriteValueAMD64_OpCondSelect(v *Value) bool { break } v.reset(OpAMD64CMOVQGE) - v.AddArg(y) - v.AddArg(x) - v.AddArg(cond) + v.AddArg3(y, x, cond) return true } // match: (CondSelect x y (SETA cond)) @@ -34966,9 +33013,7 @@ func rewriteValueAMD64_OpCondSelect(v *Value) bool { break } v.reset(OpAMD64CMOVQHI) - v.AddArg(y) - v.AddArg(x) - v.AddArg(cond) + v.AddArg3(y, x, cond) return true } // match: (CondSelect x y (SETB cond)) @@ -34986,9 +33031,7 @@ func rewriteValueAMD64_OpCondSelect(v *Value) bool { break } v.reset(OpAMD64CMOVQCS) - v.AddArg(y) - v.AddArg(x) - v.AddArg(cond) + v.AddArg3(y, x, cond) return true } // match: (CondSelect x y (SETAE cond)) @@ -35006,9 +33049,7 @@ func rewriteValueAMD64_OpCondSelect(v *Value) bool { break } v.reset(OpAMD64CMOVQCC) - v.AddArg(y) - v.AddArg(x) - v.AddArg(cond) + v.AddArg3(y, x, cond) return true } // match: (CondSelect x y (SETBE cond)) @@ -35026,9 +33067,7 @@ func rewriteValueAMD64_OpCondSelect(v *Value) bool { break } v.reset(OpAMD64CMOVQLS) - v.AddArg(y) - v.AddArg(x) - v.AddArg(cond) + v.AddArg3(y, x, cond) return true } // match: (CondSelect x y (SETEQF cond)) @@ -35046,9 +33085,7 @@ func rewriteValueAMD64_OpCondSelect(v *Value) bool { break } v.reset(OpAMD64CMOVQEQF) - v.AddArg(y) - v.AddArg(x) - v.AddArg(cond) + v.AddArg3(y, x, cond) return true } // match: (CondSelect x y (SETNEF cond)) @@ -35066,9 +33103,7 @@ func rewriteValueAMD64_OpCondSelect(v *Value) bool { break } v.reset(OpAMD64CMOVQNEF) - v.AddArg(y) - v.AddArg(x) - v.AddArg(cond) + v.AddArg3(y, x, cond) return true } // match: (CondSelect x y (SETGF cond)) @@ -35086,9 +33121,7 @@ func rewriteValueAMD64_OpCondSelect(v *Value) bool { break } v.reset(OpAMD64CMOVQGTF) - v.AddArg(y) - v.AddArg(x) - v.AddArg(cond) + v.AddArg3(y, x, cond) return true } // match: (CondSelect x y (SETGEF cond)) @@ -35106,9 +33139,7 @@ func rewriteValueAMD64_OpCondSelect(v *Value) bool { break } v.reset(OpAMD64CMOVQGEF) - v.AddArg(y) - v.AddArg(x) - v.AddArg(cond) + v.AddArg3(y, x, cond) return true } // match: (CondSelect x y (SETEQ cond)) @@ -35126,9 +33157,7 @@ func rewriteValueAMD64_OpCondSelect(v *Value) bool { break } v.reset(OpAMD64CMOVLEQ) - v.AddArg(y) - v.AddArg(x) - v.AddArg(cond) + v.AddArg3(y, x, cond) return true } // match: (CondSelect x y (SETNE cond)) @@ -35146,9 +33175,7 @@ func rewriteValueAMD64_OpCondSelect(v *Value) bool { break } v.reset(OpAMD64CMOVLNE) - v.AddArg(y) - v.AddArg(x) - v.AddArg(cond) + v.AddArg3(y, x, cond) return true } // match: (CondSelect x y (SETL cond)) @@ -35166,9 +33193,7 @@ func rewriteValueAMD64_OpCondSelect(v *Value) bool { break } v.reset(OpAMD64CMOVLLT) - v.AddArg(y) - v.AddArg(x) - v.AddArg(cond) + v.AddArg3(y, x, cond) return true } // match: (CondSelect x y (SETG cond)) @@ -35186,9 +33211,7 @@ func rewriteValueAMD64_OpCondSelect(v *Value) bool { break } v.reset(OpAMD64CMOVLGT) - v.AddArg(y) - v.AddArg(x) - v.AddArg(cond) + v.AddArg3(y, x, cond) return true } // match: (CondSelect x y (SETLE cond)) @@ -35206,9 +33229,7 @@ func rewriteValueAMD64_OpCondSelect(v *Value) bool { break } v.reset(OpAMD64CMOVLLE) - v.AddArg(y) - v.AddArg(x) - v.AddArg(cond) + v.AddArg3(y, x, cond) return true } // match: (CondSelect x y (SETGE cond)) @@ -35226,9 +33247,7 @@ func rewriteValueAMD64_OpCondSelect(v *Value) bool { break } v.reset(OpAMD64CMOVLGE) - v.AddArg(y) - v.AddArg(x) - v.AddArg(cond) + v.AddArg3(y, x, cond) return true } // match: (CondSelect x y (SETA cond)) @@ -35246,9 +33265,7 @@ func rewriteValueAMD64_OpCondSelect(v *Value) bool { break } v.reset(OpAMD64CMOVLHI) - v.AddArg(y) - v.AddArg(x) - v.AddArg(cond) + v.AddArg3(y, x, cond) return true } // match: (CondSelect x y (SETB cond)) @@ -35266,9 +33283,7 @@ func rewriteValueAMD64_OpCondSelect(v *Value) bool { break } v.reset(OpAMD64CMOVLCS) - v.AddArg(y) - v.AddArg(x) - v.AddArg(cond) + v.AddArg3(y, x, cond) return true } // match: (CondSelect x y (SETAE cond)) @@ -35286,9 +33301,7 @@ func rewriteValueAMD64_OpCondSelect(v *Value) bool { break } v.reset(OpAMD64CMOVLCC) - v.AddArg(y) - v.AddArg(x) - v.AddArg(cond) + v.AddArg3(y, x, cond) return true } // match: (CondSelect x y (SETBE cond)) @@ -35306,9 +33319,7 @@ func rewriteValueAMD64_OpCondSelect(v *Value) bool { break } v.reset(OpAMD64CMOVLLS) - v.AddArg(y) - v.AddArg(x) - v.AddArg(cond) + v.AddArg3(y, x, cond) return true } // match: (CondSelect x y (SETEQF cond)) @@ -35326,9 +33337,7 @@ func rewriteValueAMD64_OpCondSelect(v *Value) bool { break } v.reset(OpAMD64CMOVLEQF) - v.AddArg(y) - v.AddArg(x) - v.AddArg(cond) + v.AddArg3(y, x, cond) return true } // match: (CondSelect x y (SETNEF cond)) @@ -35346,9 +33355,7 @@ func rewriteValueAMD64_OpCondSelect(v *Value) bool { break } v.reset(OpAMD64CMOVLNEF) - v.AddArg(y) - v.AddArg(x) - v.AddArg(cond) + v.AddArg3(y, x, cond) return true } // match: (CondSelect x y (SETGF cond)) @@ -35366,9 +33373,7 @@ func rewriteValueAMD64_OpCondSelect(v *Value) bool { break } v.reset(OpAMD64CMOVLGTF) - v.AddArg(y) - v.AddArg(x) - v.AddArg(cond) + v.AddArg3(y, x, cond) return true } // match: (CondSelect x y (SETGEF cond)) @@ -35386,9 +33391,7 @@ func rewriteValueAMD64_OpCondSelect(v *Value) bool { break } v.reset(OpAMD64CMOVLGEF) - v.AddArg(y) - v.AddArg(x) - v.AddArg(cond) + v.AddArg3(y, x, cond) return true } // match: (CondSelect x y (SETEQ cond)) @@ -35406,9 +33409,7 @@ func rewriteValueAMD64_OpCondSelect(v *Value) bool { break } v.reset(OpAMD64CMOVWEQ) - v.AddArg(y) - v.AddArg(x) - v.AddArg(cond) + v.AddArg3(y, x, cond) return true } // match: (CondSelect x y (SETNE cond)) @@ -35426,9 +33427,7 @@ func rewriteValueAMD64_OpCondSelect(v *Value) bool { break } v.reset(OpAMD64CMOVWNE) - v.AddArg(y) - v.AddArg(x) - v.AddArg(cond) + v.AddArg3(y, x, cond) return true } // match: (CondSelect x y (SETL cond)) @@ -35446,9 +33445,7 @@ func rewriteValueAMD64_OpCondSelect(v *Value) bool { break } v.reset(OpAMD64CMOVWLT) - v.AddArg(y) - v.AddArg(x) - v.AddArg(cond) + v.AddArg3(y, x, cond) return true } // match: (CondSelect x y (SETG cond)) @@ -35466,9 +33463,7 @@ func rewriteValueAMD64_OpCondSelect(v *Value) bool { break } v.reset(OpAMD64CMOVWGT) - v.AddArg(y) - v.AddArg(x) - v.AddArg(cond) + v.AddArg3(y, x, cond) return true } // match: (CondSelect x y (SETLE cond)) @@ -35486,9 +33481,7 @@ func rewriteValueAMD64_OpCondSelect(v *Value) bool { break } v.reset(OpAMD64CMOVWLE) - v.AddArg(y) - v.AddArg(x) - v.AddArg(cond) + v.AddArg3(y, x, cond) return true } // match: (CondSelect x y (SETGE cond)) @@ -35506,9 +33499,7 @@ func rewriteValueAMD64_OpCondSelect(v *Value) bool { break } v.reset(OpAMD64CMOVWGE) - v.AddArg(y) - v.AddArg(x) - v.AddArg(cond) + v.AddArg3(y, x, cond) return true } // match: (CondSelect x y (SETA cond)) @@ -35526,9 +33517,7 @@ func rewriteValueAMD64_OpCondSelect(v *Value) bool { break } v.reset(OpAMD64CMOVWHI) - v.AddArg(y) - v.AddArg(x) - v.AddArg(cond) + v.AddArg3(y, x, cond) return true } // match: (CondSelect x y (SETB cond)) @@ -35546,9 +33535,7 @@ func rewriteValueAMD64_OpCondSelect(v *Value) bool { break } v.reset(OpAMD64CMOVWCS) - v.AddArg(y) - v.AddArg(x) - v.AddArg(cond) + v.AddArg3(y, x, cond) return true } // match: (CondSelect x y (SETAE cond)) @@ -35566,9 +33553,7 @@ func rewriteValueAMD64_OpCondSelect(v *Value) bool { break } v.reset(OpAMD64CMOVWCC) - v.AddArg(y) - v.AddArg(x) - v.AddArg(cond) + v.AddArg3(y, x, cond) return true } // match: (CondSelect x y (SETBE cond)) @@ -35586,9 +33571,7 @@ func rewriteValueAMD64_OpCondSelect(v *Value) bool { break } v.reset(OpAMD64CMOVWLS) - v.AddArg(y) - v.AddArg(x) - v.AddArg(cond) + v.AddArg3(y, x, cond) return true } // match: (CondSelect x y (SETEQF cond)) @@ -35606,9 +33589,7 @@ func rewriteValueAMD64_OpCondSelect(v *Value) bool { break } v.reset(OpAMD64CMOVWEQF) - v.AddArg(y) - v.AddArg(x) - v.AddArg(cond) + v.AddArg3(y, x, cond) return true } // match: (CondSelect x y (SETNEF cond)) @@ -35626,9 +33607,7 @@ func rewriteValueAMD64_OpCondSelect(v *Value) bool { break } v.reset(OpAMD64CMOVWNEF) - v.AddArg(y) - v.AddArg(x) - v.AddArg(cond) + v.AddArg3(y, x, cond) return true } // match: (CondSelect x y (SETGF cond)) @@ -35646,9 +33625,7 @@ func rewriteValueAMD64_OpCondSelect(v *Value) bool { break } v.reset(OpAMD64CMOVWGTF) - v.AddArg(y) - v.AddArg(x) - v.AddArg(cond) + v.AddArg3(y, x, cond) return true } // match: (CondSelect x y (SETGEF cond)) @@ -35666,9 +33643,7 @@ func rewriteValueAMD64_OpCondSelect(v *Value) bool { break } v.reset(OpAMD64CMOVWGEF) - v.AddArg(y) - v.AddArg(x) - v.AddArg(cond) + v.AddArg3(y, x, cond) return true } // match: (CondSelect x y check) @@ -35684,11 +33659,9 @@ func rewriteValueAMD64_OpCondSelect(v *Value) bool { } v.reset(OpCondSelect) v.Type = t - v.AddArg(x) - v.AddArg(y) v0 := b.NewValue0(v.Pos, OpAMD64MOVBQZX, typ.UInt64) v0.AddArg(check) - v.AddArg(v0) + v.AddArg3(x, y, v0) return true } // match: (CondSelect x y check) @@ -35704,11 +33677,9 @@ func rewriteValueAMD64_OpCondSelect(v *Value) bool { } v.reset(OpCondSelect) v.Type = t - v.AddArg(x) - v.AddArg(y) v0 := b.NewValue0(v.Pos, OpAMD64MOVWQZX, typ.UInt64) v0.AddArg(check) - v.AddArg(v0) + v.AddArg3(x, y, v0) return true } // match: (CondSelect x y check) @@ -35724,11 +33695,9 @@ func rewriteValueAMD64_OpCondSelect(v *Value) bool { } v.reset(OpCondSelect) v.Type = t - v.AddArg(x) - v.AddArg(y) v0 := b.NewValue0(v.Pos, OpAMD64MOVLQZX, typ.UInt64) v0.AddArg(check) - v.AddArg(v0) + v.AddArg3(x, y, v0) return true } // match: (CondSelect x y check) @@ -35743,12 +33712,10 @@ func rewriteValueAMD64_OpCondSelect(v *Value) bool { break } v.reset(OpAMD64CMOVQNE) - v.AddArg(y) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) v0.AuxInt = 0 v0.AddArg(check) - v.AddArg(v0) + v.AddArg3(y, x, v0) return true } // match: (CondSelect x y check) @@ -35763,12 +33730,10 @@ func rewriteValueAMD64_OpCondSelect(v *Value) bool { break } v.reset(OpAMD64CMOVLNE) - v.AddArg(y) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) v0.AuxInt = 0 v0.AddArg(check) - v.AddArg(v0) + v.AddArg3(y, x, v0) return true } // match: (CondSelect x y check) @@ -35783,12 +33748,10 @@ func rewriteValueAMD64_OpCondSelect(v *Value) bool { break } v.reset(OpAMD64CMOVWNE) - v.AddArg(y) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) v0.AuxInt = 0 v0.AddArg(check) - v.AddArg(v0) + v.AddArg3(y, x, v0) return true } return false @@ -35841,15 +33804,13 @@ func rewriteValueAMD64_OpCtz64(v *Value) bool { v1 := b.NewValue0(v.Pos, OpAMD64BSFQ, types.NewTuple(typ.UInt64, types.TypeFlags)) v1.AddArg(x) v0.AddArg(v1) - v.AddArg(v0) v2 := b.NewValue0(v.Pos, OpAMD64MOVQconst, t) v2.AuxInt = 64 - v.AddArg(v2) v3 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) v4 := b.NewValue0(v.Pos, OpAMD64BSFQ, types.NewTuple(typ.UInt64, types.TypeFlags)) v4.AddArg(x) v3.AddArg(v4) - v.AddArg(v3) + v.AddArg3(v0, v2, v3) return true } } @@ -35898,8 +33859,7 @@ func rewriteValueAMD64_OpDiv16(v *Value) bool { v.reset(OpSelect0) v0 := b.NewValue0(v.Pos, OpAMD64DIVW, types.NewTuple(typ.Int16, typ.Int16)) v0.AuxInt = a - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -35916,8 +33876,7 @@ func rewriteValueAMD64_OpDiv16u(v *Value) bool { y := v_1 v.reset(OpSelect0) v0 := b.NewValue0(v.Pos, OpAMD64DIVWU, types.NewTuple(typ.UInt16, typ.UInt16)) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -35936,8 +33895,7 @@ func rewriteValueAMD64_OpDiv32(v *Value) bool { v.reset(OpSelect0) v0 := b.NewValue0(v.Pos, OpAMD64DIVL, types.NewTuple(typ.Int32, typ.Int32)) v0.AuxInt = a - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -35954,8 +33912,7 @@ func rewriteValueAMD64_OpDiv32u(v *Value) bool { y := v_1 v.reset(OpSelect0) v0 := b.NewValue0(v.Pos, OpAMD64DIVLU, types.NewTuple(typ.UInt32, typ.UInt32)) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -35974,8 +33931,7 @@ func rewriteValueAMD64_OpDiv64(v *Value) bool { v.reset(OpSelect0) v0 := b.NewValue0(v.Pos, OpAMD64DIVQ, types.NewTuple(typ.Int64, typ.Int64)) v0.AuxInt = a - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -35992,8 +33948,7 @@ func rewriteValueAMD64_OpDiv64u(v *Value) bool { y := v_1 v.reset(OpSelect0) v0 := b.NewValue0(v.Pos, OpAMD64DIVQU, types.NewTuple(typ.UInt64, typ.UInt64)) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -36012,10 +33967,9 @@ func rewriteValueAMD64_OpDiv8(v *Value) bool { v0 := b.NewValue0(v.Pos, OpAMD64DIVW, types.NewTuple(typ.Int16, typ.Int16)) v1 := b.NewValue0(v.Pos, OpSignExt8to16, typ.Int16) v1.AddArg(x) - v0.AddArg(v1) v2 := b.NewValue0(v.Pos, OpSignExt8to16, typ.Int16) v2.AddArg(y) - v0.AddArg(v2) + v0.AddArg2(v1, v2) v.AddArg(v0) return true } @@ -36034,10 +33988,9 @@ func rewriteValueAMD64_OpDiv8u(v *Value) bool { v0 := b.NewValue0(v.Pos, OpAMD64DIVWU, types.NewTuple(typ.UInt16, typ.UInt16)) v1 := b.NewValue0(v.Pos, OpZeroExt8to16, typ.UInt16) v1.AddArg(x) - v0.AddArg(v1) v2 := b.NewValue0(v.Pos, OpZeroExt8to16, typ.UInt16) v2.AddArg(y) - v0.AddArg(v2) + v0.AddArg2(v1, v2) v.AddArg(v0) return true } @@ -36053,8 +34006,7 @@ func rewriteValueAMD64_OpEq16(v *Value) bool { y := v_1 v.reset(OpAMD64SETEQ) v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -36070,8 +34022,7 @@ func rewriteValueAMD64_OpEq32(v *Value) bool { y := v_1 v.reset(OpAMD64SETEQ) v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -36087,8 +34038,7 @@ func rewriteValueAMD64_OpEq32F(v *Value) bool { y := v_1 v.reset(OpAMD64SETEQF) v0 := b.NewValue0(v.Pos, OpAMD64UCOMISS, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -36104,8 +34054,7 @@ func rewriteValueAMD64_OpEq64(v *Value) bool { y := v_1 v.reset(OpAMD64SETEQ) v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -36121,8 +34070,7 @@ func rewriteValueAMD64_OpEq64F(v *Value) bool { y := v_1 v.reset(OpAMD64SETEQF) v0 := b.NewValue0(v.Pos, OpAMD64UCOMISD, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -36138,8 +34086,7 @@ func rewriteValueAMD64_OpEq8(v *Value) bool { y := v_1 v.reset(OpAMD64SETEQ) v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -36155,8 +34102,7 @@ func rewriteValueAMD64_OpEqB(v *Value) bool { y := v_1 v.reset(OpAMD64SETEQ) v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -36172,8 +34118,7 @@ func rewriteValueAMD64_OpEqPtr(v *Value) bool { y := v_1 v.reset(OpAMD64SETEQ) v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -36189,9 +34134,7 @@ func rewriteValueAMD64_OpFMA(v *Value) bool { y := v_1 z := v_2 v.reset(OpAMD64VFMADD231SD) - v.AddArg(z) - v.AddArg(x) - v.AddArg(y) + v.AddArg3(z, x, y) return true } } @@ -36218,8 +34161,7 @@ func rewriteValueAMD64_OpGeq32F(v *Value) bool { y := v_1 v.reset(OpAMD64SETGEF) v0 := b.NewValue0(v.Pos, OpAMD64UCOMISS, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -36235,8 +34177,7 @@ func rewriteValueAMD64_OpGeq64F(v *Value) bool { y := v_1 v.reset(OpAMD64SETGEF) v0 := b.NewValue0(v.Pos, OpAMD64UCOMISD, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -36252,8 +34193,7 @@ func rewriteValueAMD64_OpGreater32F(v *Value) bool { y := v_1 v.reset(OpAMD64SETGF) v0 := b.NewValue0(v.Pos, OpAMD64UCOMISS, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -36269,8 +34209,7 @@ func rewriteValueAMD64_OpGreater64F(v *Value) bool { y := v_1 v.reset(OpAMD64SETGF) v0 := b.NewValue0(v.Pos, OpAMD64UCOMISD, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -36286,8 +34225,7 @@ func rewriteValueAMD64_OpIsInBounds(v *Value) bool { len := v_1 v.reset(OpAMD64SETB) v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) - v0.AddArg(idx) - v0.AddArg(len) + v0.AddArg2(idx, len) v.AddArg(v0) return true } @@ -36301,8 +34239,7 @@ func rewriteValueAMD64_OpIsNonNil(v *Value) bool { p := v_0 v.reset(OpAMD64SETNE) v0 := b.NewValue0(v.Pos, OpAMD64TESTQ, types.TypeFlags) - v0.AddArg(p) - v0.AddArg(p) + v0.AddArg2(p, p) v.AddArg(v0) return true } @@ -36318,8 +34255,7 @@ func rewriteValueAMD64_OpIsSliceInBounds(v *Value) bool { len := v_1 v.reset(OpAMD64SETBE) v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) - v0.AddArg(idx) - v0.AddArg(len) + v0.AddArg2(idx, len) v.AddArg(v0) return true } @@ -36335,8 +34271,7 @@ func rewriteValueAMD64_OpLeq16(v *Value) bool { y := v_1 v.reset(OpAMD64SETLE) v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -36352,8 +34287,7 @@ func rewriteValueAMD64_OpLeq16U(v *Value) bool { y := v_1 v.reset(OpAMD64SETBE) v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -36369,8 +34303,7 @@ func rewriteValueAMD64_OpLeq32(v *Value) bool { y := v_1 v.reset(OpAMD64SETLE) v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -36386,8 +34319,7 @@ func rewriteValueAMD64_OpLeq32F(v *Value) bool { y := v_1 v.reset(OpAMD64SETGEF) v0 := b.NewValue0(v.Pos, OpAMD64UCOMISS, types.TypeFlags) - v0.AddArg(y) - v0.AddArg(x) + v0.AddArg2(y, x) v.AddArg(v0) return true } @@ -36403,8 +34335,7 @@ func rewriteValueAMD64_OpLeq32U(v *Value) bool { y := v_1 v.reset(OpAMD64SETBE) v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -36420,8 +34351,7 @@ func rewriteValueAMD64_OpLeq64(v *Value) bool { y := v_1 v.reset(OpAMD64SETLE) v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -36437,8 +34367,7 @@ func rewriteValueAMD64_OpLeq64F(v *Value) bool { y := v_1 v.reset(OpAMD64SETGEF) v0 := b.NewValue0(v.Pos, OpAMD64UCOMISD, types.TypeFlags) - v0.AddArg(y) - v0.AddArg(x) + v0.AddArg2(y, x) v.AddArg(v0) return true } @@ -36454,8 +34383,7 @@ func rewriteValueAMD64_OpLeq64U(v *Value) bool { y := v_1 v.reset(OpAMD64SETBE) v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -36471,8 +34399,7 @@ func rewriteValueAMD64_OpLeq8(v *Value) bool { y := v_1 v.reset(OpAMD64SETLE) v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -36488,8 +34415,7 @@ func rewriteValueAMD64_OpLeq8U(v *Value) bool { y := v_1 v.reset(OpAMD64SETBE) v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -36505,8 +34431,7 @@ func rewriteValueAMD64_OpLess16(v *Value) bool { y := v_1 v.reset(OpAMD64SETL) v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -36522,8 +34447,7 @@ func rewriteValueAMD64_OpLess16U(v *Value) bool { y := v_1 v.reset(OpAMD64SETB) v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -36539,8 +34463,7 @@ func rewriteValueAMD64_OpLess32(v *Value) bool { y := v_1 v.reset(OpAMD64SETL) v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -36556,8 +34479,7 @@ func rewriteValueAMD64_OpLess32F(v *Value) bool { y := v_1 v.reset(OpAMD64SETGF) v0 := b.NewValue0(v.Pos, OpAMD64UCOMISS, types.TypeFlags) - v0.AddArg(y) - v0.AddArg(x) + v0.AddArg2(y, x) v.AddArg(v0) return true } @@ -36573,8 +34495,7 @@ func rewriteValueAMD64_OpLess32U(v *Value) bool { y := v_1 v.reset(OpAMD64SETB) v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -36590,8 +34511,7 @@ func rewriteValueAMD64_OpLess64(v *Value) bool { y := v_1 v.reset(OpAMD64SETL) v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -36607,8 +34527,7 @@ func rewriteValueAMD64_OpLess64F(v *Value) bool { y := v_1 v.reset(OpAMD64SETGF) v0 := b.NewValue0(v.Pos, OpAMD64UCOMISD, types.TypeFlags) - v0.AddArg(y) - v0.AddArg(x) + v0.AddArg2(y, x) v.AddArg(v0) return true } @@ -36624,8 +34543,7 @@ func rewriteValueAMD64_OpLess64U(v *Value) bool { y := v_1 v.reset(OpAMD64SETB) v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -36641,8 +34559,7 @@ func rewriteValueAMD64_OpLess8(v *Value) bool { y := v_1 v.reset(OpAMD64SETL) v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -36658,8 +34575,7 @@ func rewriteValueAMD64_OpLess8U(v *Value) bool { y := v_1 v.reset(OpAMD64SETB) v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -36678,8 +34594,7 @@ func rewriteValueAMD64_OpLoad(v *Value) bool { break } v.reset(OpAMD64MOVQload) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (Load ptr mem) @@ -36693,8 +34608,7 @@ func rewriteValueAMD64_OpLoad(v *Value) bool { break } v.reset(OpAMD64MOVLload) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (Load ptr mem) @@ -36708,8 +34622,7 @@ func rewriteValueAMD64_OpLoad(v *Value) bool { break } v.reset(OpAMD64MOVWload) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (Load ptr mem) @@ -36723,8 +34636,7 @@ func rewriteValueAMD64_OpLoad(v *Value) bool { break } v.reset(OpAMD64MOVBload) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (Load ptr mem) @@ -36738,8 +34650,7 @@ func rewriteValueAMD64_OpLoad(v *Value) bool { break } v.reset(OpAMD64MOVSSload) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (Load ptr mem) @@ -36753,8 +34664,7 @@ func rewriteValueAMD64_OpLoad(v *Value) bool { break } v.reset(OpAMD64MOVSDload) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } return false @@ -36788,15 +34698,13 @@ func rewriteValueAMD64_OpLsh16x16(v *Value) bool { } v.reset(OpAMD64ANDL) v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) + v0.AddArg2(x, y) v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags) v2.AuxInt = 32 v2.AddArg(y) v1.AddArg(v2) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } // match: (Lsh16x16 x y) @@ -36809,8 +34717,7 @@ func rewriteValueAMD64_OpLsh16x16(v *Value) bool { break } v.reset(OpAMD64SHLL) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } return false @@ -36831,15 +34738,13 @@ func rewriteValueAMD64_OpLsh16x32(v *Value) bool { } v.reset(OpAMD64ANDL) v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) + v0.AddArg2(x, y) v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) v2.AuxInt = 32 v2.AddArg(y) v1.AddArg(v2) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } // match: (Lsh16x32 x y) @@ -36852,8 +34757,7 @@ func rewriteValueAMD64_OpLsh16x32(v *Value) bool { break } v.reset(OpAMD64SHLL) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } return false @@ -36874,15 +34778,13 @@ func rewriteValueAMD64_OpLsh16x64(v *Value) bool { } v.reset(OpAMD64ANDL) v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) + v0.AddArg2(x, y) v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) v2.AuxInt = 32 v2.AddArg(y) v1.AddArg(v2) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } // match: (Lsh16x64 x y) @@ -36895,8 +34797,7 @@ func rewriteValueAMD64_OpLsh16x64(v *Value) bool { break } v.reset(OpAMD64SHLL) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } return false @@ -36917,15 +34818,13 @@ func rewriteValueAMD64_OpLsh16x8(v *Value) bool { } v.reset(OpAMD64ANDL) v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) + v0.AddArg2(x, y) v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags) v2.AuxInt = 32 v2.AddArg(y) v1.AddArg(v2) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } // match: (Lsh16x8 x y) @@ -36938,8 +34837,7 @@ func rewriteValueAMD64_OpLsh16x8(v *Value) bool { break } v.reset(OpAMD64SHLL) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } return false @@ -36960,15 +34858,13 @@ func rewriteValueAMD64_OpLsh32x16(v *Value) bool { } v.reset(OpAMD64ANDL) v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) + v0.AddArg2(x, y) v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags) v2.AuxInt = 32 v2.AddArg(y) v1.AddArg(v2) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } // match: (Lsh32x16 x y) @@ -36981,8 +34877,7 @@ func rewriteValueAMD64_OpLsh32x16(v *Value) bool { break } v.reset(OpAMD64SHLL) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } return false @@ -37003,15 +34898,13 @@ func rewriteValueAMD64_OpLsh32x32(v *Value) bool { } v.reset(OpAMD64ANDL) v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) + v0.AddArg2(x, y) v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) v2.AuxInt = 32 v2.AddArg(y) v1.AddArg(v2) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } // match: (Lsh32x32 x y) @@ -37024,8 +34917,7 @@ func rewriteValueAMD64_OpLsh32x32(v *Value) bool { break } v.reset(OpAMD64SHLL) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } return false @@ -37046,15 +34938,13 @@ func rewriteValueAMD64_OpLsh32x64(v *Value) bool { } v.reset(OpAMD64ANDL) v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) + v0.AddArg2(x, y) v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) v2.AuxInt = 32 v2.AddArg(y) v1.AddArg(v2) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } // match: (Lsh32x64 x y) @@ -37067,8 +34957,7 @@ func rewriteValueAMD64_OpLsh32x64(v *Value) bool { break } v.reset(OpAMD64SHLL) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } return false @@ -37089,15 +34978,13 @@ func rewriteValueAMD64_OpLsh32x8(v *Value) bool { } v.reset(OpAMD64ANDL) v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) + v0.AddArg2(x, y) v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags) v2.AuxInt = 32 v2.AddArg(y) v1.AddArg(v2) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } // match: (Lsh32x8 x y) @@ -37110,8 +34997,7 @@ func rewriteValueAMD64_OpLsh32x8(v *Value) bool { break } v.reset(OpAMD64SHLL) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } return false @@ -37132,15 +35018,13 @@ func rewriteValueAMD64_OpLsh64x16(v *Value) bool { } v.reset(OpAMD64ANDQ) v0 := b.NewValue0(v.Pos, OpAMD64SHLQ, t) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) + v0.AddArg2(x, y) v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t) v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags) v2.AuxInt = 64 v2.AddArg(y) v1.AddArg(v2) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } // match: (Lsh64x16 x y) @@ -37153,8 +35037,7 @@ func rewriteValueAMD64_OpLsh64x16(v *Value) bool { break } v.reset(OpAMD64SHLQ) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } return false @@ -37175,15 +35058,13 @@ func rewriteValueAMD64_OpLsh64x32(v *Value) bool { } v.reset(OpAMD64ANDQ) v0 := b.NewValue0(v.Pos, OpAMD64SHLQ, t) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) + v0.AddArg2(x, y) v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t) v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) v2.AuxInt = 64 v2.AddArg(y) v1.AddArg(v2) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } // match: (Lsh64x32 x y) @@ -37196,8 +35077,7 @@ func rewriteValueAMD64_OpLsh64x32(v *Value) bool { break } v.reset(OpAMD64SHLQ) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } return false @@ -37218,15 +35098,13 @@ func rewriteValueAMD64_OpLsh64x64(v *Value) bool { } v.reset(OpAMD64ANDQ) v0 := b.NewValue0(v.Pos, OpAMD64SHLQ, t) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) + v0.AddArg2(x, y) v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t) v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) v2.AuxInt = 64 v2.AddArg(y) v1.AddArg(v2) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } // match: (Lsh64x64 x y) @@ -37239,8 +35117,7 @@ func rewriteValueAMD64_OpLsh64x64(v *Value) bool { break } v.reset(OpAMD64SHLQ) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } return false @@ -37261,15 +35138,13 @@ func rewriteValueAMD64_OpLsh64x8(v *Value) bool { } v.reset(OpAMD64ANDQ) v0 := b.NewValue0(v.Pos, OpAMD64SHLQ, t) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) + v0.AddArg2(x, y) v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t) v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags) v2.AuxInt = 64 v2.AddArg(y) v1.AddArg(v2) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } // match: (Lsh64x8 x y) @@ -37282,8 +35157,7 @@ func rewriteValueAMD64_OpLsh64x8(v *Value) bool { break } v.reset(OpAMD64SHLQ) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } return false @@ -37304,15 +35178,13 @@ func rewriteValueAMD64_OpLsh8x16(v *Value) bool { } v.reset(OpAMD64ANDL) v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) + v0.AddArg2(x, y) v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags) v2.AuxInt = 32 v2.AddArg(y) v1.AddArg(v2) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } // match: (Lsh8x16 x y) @@ -37325,8 +35197,7 @@ func rewriteValueAMD64_OpLsh8x16(v *Value) bool { break } v.reset(OpAMD64SHLL) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } return false @@ -37347,15 +35218,13 @@ func rewriteValueAMD64_OpLsh8x32(v *Value) bool { } v.reset(OpAMD64ANDL) v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) + v0.AddArg2(x, y) v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) v2.AuxInt = 32 v2.AddArg(y) v1.AddArg(v2) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } // match: (Lsh8x32 x y) @@ -37368,8 +35237,7 @@ func rewriteValueAMD64_OpLsh8x32(v *Value) bool { break } v.reset(OpAMD64SHLL) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } return false @@ -37390,15 +35258,13 @@ func rewriteValueAMD64_OpLsh8x64(v *Value) bool { } v.reset(OpAMD64ANDL) v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) + v0.AddArg2(x, y) v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) v2.AuxInt = 32 v2.AddArg(y) v1.AddArg(v2) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } // match: (Lsh8x64 x y) @@ -37411,8 +35277,7 @@ func rewriteValueAMD64_OpLsh8x64(v *Value) bool { break } v.reset(OpAMD64SHLL) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } return false @@ -37433,15 +35298,13 @@ func rewriteValueAMD64_OpLsh8x8(v *Value) bool { } v.reset(OpAMD64ANDL) v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) + v0.AddArg2(x, y) v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags) v2.AuxInt = 32 v2.AddArg(y) v1.AddArg(v2) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } // match: (Lsh8x8 x y) @@ -37454,8 +35317,7 @@ func rewriteValueAMD64_OpLsh8x8(v *Value) bool { break } v.reset(OpAMD64SHLL) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } return false @@ -37474,8 +35336,7 @@ func rewriteValueAMD64_OpMod16(v *Value) bool { v.reset(OpSelect1) v0 := b.NewValue0(v.Pos, OpAMD64DIVW, types.NewTuple(typ.Int16, typ.Int16)) v0.AuxInt = a - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -37492,8 +35353,7 @@ func rewriteValueAMD64_OpMod16u(v *Value) bool { y := v_1 v.reset(OpSelect1) v0 := b.NewValue0(v.Pos, OpAMD64DIVWU, types.NewTuple(typ.UInt16, typ.UInt16)) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -37512,8 +35372,7 @@ func rewriteValueAMD64_OpMod32(v *Value) bool { v.reset(OpSelect1) v0 := b.NewValue0(v.Pos, OpAMD64DIVL, types.NewTuple(typ.Int32, typ.Int32)) v0.AuxInt = a - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -37530,8 +35389,7 @@ func rewriteValueAMD64_OpMod32u(v *Value) bool { y := v_1 v.reset(OpSelect1) v0 := b.NewValue0(v.Pos, OpAMD64DIVLU, types.NewTuple(typ.UInt32, typ.UInt32)) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -37550,8 +35408,7 @@ func rewriteValueAMD64_OpMod64(v *Value) bool { v.reset(OpSelect1) v0 := b.NewValue0(v.Pos, OpAMD64DIVQ, types.NewTuple(typ.Int64, typ.Int64)) v0.AuxInt = a - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -37568,8 +35425,7 @@ func rewriteValueAMD64_OpMod64u(v *Value) bool { y := v_1 v.reset(OpSelect1) v0 := b.NewValue0(v.Pos, OpAMD64DIVQU, types.NewTuple(typ.UInt64, typ.UInt64)) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -37588,10 +35444,9 @@ func rewriteValueAMD64_OpMod8(v *Value) bool { v0 := b.NewValue0(v.Pos, OpAMD64DIVW, types.NewTuple(typ.Int16, typ.Int16)) v1 := b.NewValue0(v.Pos, OpSignExt8to16, typ.Int16) v1.AddArg(x) - v0.AddArg(v1) v2 := b.NewValue0(v.Pos, OpSignExt8to16, typ.Int16) v2.AddArg(y) - v0.AddArg(v2) + v0.AddArg2(v1, v2) v.AddArg(v0) return true } @@ -37610,10 +35465,9 @@ func rewriteValueAMD64_OpMod8u(v *Value) bool { v0 := b.NewValue0(v.Pos, OpAMD64DIVWU, types.NewTuple(typ.UInt16, typ.UInt16)) v1 := b.NewValue0(v.Pos, OpZeroExt8to16, typ.UInt16) v1.AddArg(x) - v0.AddArg(v1) v2 := b.NewValue0(v.Pos, OpZeroExt8to16, typ.UInt16) v2.AddArg(y) - v0.AddArg(v2) + v0.AddArg2(v1, v2) v.AddArg(v0) return true } @@ -37632,9 +35486,7 @@ func rewriteValueAMD64_OpMove(v *Value) bool { break } mem := v_2 - v.reset(OpCopy) - v.Type = mem.Type - v.AddArg(mem) + v.copyOf(mem) return true } // match: (Move [1] dst src mem) @@ -37647,12 +35499,9 @@ func rewriteValueAMD64_OpMove(v *Value) bool { src := v_1 mem := v_2 v.reset(OpAMD64MOVBstore) - v.AddArg(dst) v0 := b.NewValue0(v.Pos, OpAMD64MOVBload, typ.UInt8) - v0.AddArg(src) - v0.AddArg(mem) - v.AddArg(v0) - v.AddArg(mem) + v0.AddArg2(src, mem) + v.AddArg3(dst, v0, mem) return true } // match: (Move [2] dst src mem) @@ -37665,12 +35514,9 @@ func rewriteValueAMD64_OpMove(v *Value) bool { src := v_1 mem := v_2 v.reset(OpAMD64MOVWstore) - v.AddArg(dst) v0 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) - v0.AddArg(src) - v0.AddArg(mem) - v.AddArg(v0) - v.AddArg(mem) + v0.AddArg2(src, mem) + v.AddArg3(dst, v0, mem) return true } // match: (Move [4] dst src mem) @@ -37683,12 +35529,9 @@ func rewriteValueAMD64_OpMove(v *Value) bool { src := v_1 mem := v_2 v.reset(OpAMD64MOVLstore) - v.AddArg(dst) v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) - v0.AddArg(src) - v0.AddArg(mem) - v.AddArg(v0) - v.AddArg(mem) + v0.AddArg2(src, mem) + v.AddArg3(dst, v0, mem) return true } // match: (Move [8] dst src mem) @@ -37701,12 +35544,9 @@ func rewriteValueAMD64_OpMove(v *Value) bool { src := v_1 mem := v_2 v.reset(OpAMD64MOVQstore) - v.AddArg(dst) v0 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) - v0.AddArg(src) - v0.AddArg(mem) - v.AddArg(v0) - v.AddArg(mem) + v0.AddArg2(src, mem) + v.AddArg3(dst, v0, mem) return true } // match: (Move [16] dst src mem) @@ -37723,12 +35563,9 @@ func rewriteValueAMD64_OpMove(v *Value) bool { break } v.reset(OpAMD64MOVOstore) - v.AddArg(dst) v0 := b.NewValue0(v.Pos, OpAMD64MOVOload, types.TypeInt128) - v0.AddArg(src) - v0.AddArg(mem) - v.AddArg(v0) - v.AddArg(mem) + v0.AddArg2(src, mem) + v.AddArg3(dst, v0, mem) return true } // match: (Move [16] dst src mem) @@ -37746,20 +35583,14 @@ func rewriteValueAMD64_OpMove(v *Value) bool { } v.reset(OpAMD64MOVQstore) v.AuxInt = 8 - v.AddArg(dst) v0 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) v0.AuxInt = 8 - v0.AddArg(src) - v0.AddArg(mem) - v.AddArg(v0) + v0.AddArg2(src, mem) v1 := b.NewValue0(v.Pos, OpAMD64MOVQstore, types.TypeMem) - v1.AddArg(dst) v2 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) - v2.AddArg(src) - v2.AddArg(mem) - v1.AddArg(v2) - v1.AddArg(mem) - v.AddArg(v1) + v2.AddArg2(src, mem) + v1.AddArg3(dst, v2, mem) + v.AddArg3(dst, v0, v1) return true } // match: (Move [32] dst src mem) @@ -37776,17 +35607,13 @@ func rewriteValueAMD64_OpMove(v *Value) bool { v0 := b.NewValue0(v.Pos, OpOffPtr, dst.Type) v0.AuxInt = 16 v0.AddArg(dst) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpOffPtr, src.Type) v1.AuxInt = 16 v1.AddArg(src) - v.AddArg(v1) v2 := b.NewValue0(v.Pos, OpMove, types.TypeMem) v2.AuxInt = 16 - v2.AddArg(dst) - v2.AddArg(src) - v2.AddArg(mem) - v.AddArg(v2) + v2.AddArg3(dst, src, mem) + v.AddArg3(v0, v1, v2) return true } // match: (Move [48] dst src mem) @@ -37807,17 +35634,13 @@ func rewriteValueAMD64_OpMove(v *Value) bool { v0 := b.NewValue0(v.Pos, OpOffPtr, dst.Type) v0.AuxInt = 16 v0.AddArg(dst) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpOffPtr, src.Type) v1.AuxInt = 16 v1.AddArg(src) - v.AddArg(v1) v2 := b.NewValue0(v.Pos, OpMove, types.TypeMem) v2.AuxInt = 16 - v2.AddArg(dst) - v2.AddArg(src) - v2.AddArg(mem) - v.AddArg(v2) + v2.AddArg3(dst, src, mem) + v.AddArg3(v0, v1, v2) return true } // match: (Move [64] dst src mem) @@ -37838,17 +35661,13 @@ func rewriteValueAMD64_OpMove(v *Value) bool { v0 := b.NewValue0(v.Pos, OpOffPtr, dst.Type) v0.AuxInt = 32 v0.AddArg(dst) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpOffPtr, src.Type) v1.AuxInt = 32 v1.AddArg(src) - v.AddArg(v1) v2 := b.NewValue0(v.Pos, OpMove, types.TypeMem) v2.AuxInt = 32 - v2.AddArg(dst) - v2.AddArg(src) - v2.AddArg(mem) - v.AddArg(v2) + v2.AddArg3(dst, src, mem) + v.AddArg3(v0, v1, v2) return true } // match: (Move [3] dst src mem) @@ -37862,20 +35681,14 @@ func rewriteValueAMD64_OpMove(v *Value) bool { mem := v_2 v.reset(OpAMD64MOVBstore) v.AuxInt = 2 - v.AddArg(dst) v0 := b.NewValue0(v.Pos, OpAMD64MOVBload, typ.UInt8) v0.AuxInt = 2 - v0.AddArg(src) - v0.AddArg(mem) - v.AddArg(v0) + v0.AddArg2(src, mem) v1 := b.NewValue0(v.Pos, OpAMD64MOVWstore, types.TypeMem) - v1.AddArg(dst) v2 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) - v2.AddArg(src) - v2.AddArg(mem) - v1.AddArg(v2) - v1.AddArg(mem) - v.AddArg(v1) + v2.AddArg2(src, mem) + v1.AddArg3(dst, v2, mem) + v.AddArg3(dst, v0, v1) return true } // match: (Move [5] dst src mem) @@ -37889,20 +35702,14 @@ func rewriteValueAMD64_OpMove(v *Value) bool { mem := v_2 v.reset(OpAMD64MOVBstore) v.AuxInt = 4 - v.AddArg(dst) v0 := b.NewValue0(v.Pos, OpAMD64MOVBload, typ.UInt8) v0.AuxInt = 4 - v0.AddArg(src) - v0.AddArg(mem) - v.AddArg(v0) + v0.AddArg2(src, mem) v1 := b.NewValue0(v.Pos, OpAMD64MOVLstore, types.TypeMem) - v1.AddArg(dst) v2 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) - v2.AddArg(src) - v2.AddArg(mem) - v1.AddArg(v2) - v1.AddArg(mem) - v.AddArg(v1) + v2.AddArg2(src, mem) + v1.AddArg3(dst, v2, mem) + v.AddArg3(dst, v0, v1) return true } // match: (Move [6] dst src mem) @@ -37916,20 +35723,14 @@ func rewriteValueAMD64_OpMove(v *Value) bool { mem := v_2 v.reset(OpAMD64MOVWstore) v.AuxInt = 4 - v.AddArg(dst) v0 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) v0.AuxInt = 4 - v0.AddArg(src) - v0.AddArg(mem) - v.AddArg(v0) + v0.AddArg2(src, mem) v1 := b.NewValue0(v.Pos, OpAMD64MOVLstore, types.TypeMem) - v1.AddArg(dst) v2 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) - v2.AddArg(src) - v2.AddArg(mem) - v1.AddArg(v2) - v1.AddArg(mem) - v.AddArg(v1) + v2.AddArg2(src, mem) + v1.AddArg3(dst, v2, mem) + v.AddArg3(dst, v0, v1) return true } // match: (Move [7] dst src mem) @@ -37943,20 +35744,14 @@ func rewriteValueAMD64_OpMove(v *Value) bool { mem := v_2 v.reset(OpAMD64MOVLstore) v.AuxInt = 3 - v.AddArg(dst) v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) v0.AuxInt = 3 - v0.AddArg(src) - v0.AddArg(mem) - v.AddArg(v0) + v0.AddArg2(src, mem) v1 := b.NewValue0(v.Pos, OpAMD64MOVLstore, types.TypeMem) - v1.AddArg(dst) v2 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) - v2.AddArg(src) - v2.AddArg(mem) - v1.AddArg(v2) - v1.AddArg(mem) - v.AddArg(v1) + v2.AddArg2(src, mem) + v1.AddArg3(dst, v2, mem) + v.AddArg3(dst, v0, v1) return true } // match: (Move [9] dst src mem) @@ -37970,20 +35765,14 @@ func rewriteValueAMD64_OpMove(v *Value) bool { mem := v_2 v.reset(OpAMD64MOVBstore) v.AuxInt = 8 - v.AddArg(dst) v0 := b.NewValue0(v.Pos, OpAMD64MOVBload, typ.UInt8) v0.AuxInt = 8 - v0.AddArg(src) - v0.AddArg(mem) - v.AddArg(v0) + v0.AddArg2(src, mem) v1 := b.NewValue0(v.Pos, OpAMD64MOVQstore, types.TypeMem) - v1.AddArg(dst) v2 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) - v2.AddArg(src) - v2.AddArg(mem) - v1.AddArg(v2) - v1.AddArg(mem) - v.AddArg(v1) + v2.AddArg2(src, mem) + v1.AddArg3(dst, v2, mem) + v.AddArg3(dst, v0, v1) return true } // match: (Move [10] dst src mem) @@ -37997,20 +35786,14 @@ func rewriteValueAMD64_OpMove(v *Value) bool { mem := v_2 v.reset(OpAMD64MOVWstore) v.AuxInt = 8 - v.AddArg(dst) v0 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) v0.AuxInt = 8 - v0.AddArg(src) - v0.AddArg(mem) - v.AddArg(v0) + v0.AddArg2(src, mem) v1 := b.NewValue0(v.Pos, OpAMD64MOVQstore, types.TypeMem) - v1.AddArg(dst) v2 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) - v2.AddArg(src) - v2.AddArg(mem) - v1.AddArg(v2) - v1.AddArg(mem) - v.AddArg(v1) + v2.AddArg2(src, mem) + v1.AddArg3(dst, v2, mem) + v.AddArg3(dst, v0, v1) return true } // match: (Move [12] dst src mem) @@ -38024,20 +35807,14 @@ func rewriteValueAMD64_OpMove(v *Value) bool { mem := v_2 v.reset(OpAMD64MOVLstore) v.AuxInt = 8 - v.AddArg(dst) v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) v0.AuxInt = 8 - v0.AddArg(src) - v0.AddArg(mem) - v.AddArg(v0) + v0.AddArg2(src, mem) v1 := b.NewValue0(v.Pos, OpAMD64MOVQstore, types.TypeMem) - v1.AddArg(dst) v2 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) - v2.AddArg(src) - v2.AddArg(mem) - v1.AddArg(v2) - v1.AddArg(mem) - v.AddArg(v1) + v2.AddArg2(src, mem) + v1.AddArg3(dst, v2, mem) + v.AddArg3(dst, v0, v1) return true } // match: (Move [s] dst src mem) @@ -38053,20 +35830,14 @@ func rewriteValueAMD64_OpMove(v *Value) bool { } v.reset(OpAMD64MOVQstore) v.AuxInt = s - 8 - v.AddArg(dst) v0 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) v0.AuxInt = s - 8 - v0.AddArg(src) - v0.AddArg(mem) - v.AddArg(v0) + v0.AddArg2(src, mem) v1 := b.NewValue0(v.Pos, OpAMD64MOVQstore, types.TypeMem) - v1.AddArg(dst) v2 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) - v2.AddArg(src) - v2.AddArg(mem) - v1.AddArg(v2) - v1.AddArg(mem) - v.AddArg(v1) + v2.AddArg2(src, mem) + v1.AddArg3(dst, v2, mem) + v.AddArg3(dst, v0, v1) return true } // match: (Move [s] dst src mem) @@ -38085,19 +35856,14 @@ func rewriteValueAMD64_OpMove(v *Value) bool { v0 := b.NewValue0(v.Pos, OpOffPtr, dst.Type) v0.AuxInt = s % 16 v0.AddArg(dst) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpOffPtr, src.Type) v1.AuxInt = s % 16 v1.AddArg(src) - v.AddArg(v1) v2 := b.NewValue0(v.Pos, OpAMD64MOVQstore, types.TypeMem) - v2.AddArg(dst) v3 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) - v3.AddArg(src) - v3.AddArg(mem) - v2.AddArg(v3) - v2.AddArg(mem) - v.AddArg(v2) + v3.AddArg2(src, mem) + v2.AddArg3(dst, v3, mem) + v.AddArg3(v0, v1, v2) return true } // match: (Move [s] dst src mem) @@ -38116,19 +35882,14 @@ func rewriteValueAMD64_OpMove(v *Value) bool { v0 := b.NewValue0(v.Pos, OpOffPtr, dst.Type) v0.AuxInt = s % 16 v0.AddArg(dst) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpOffPtr, src.Type) v1.AuxInt = s % 16 v1.AddArg(src) - v.AddArg(v1) v2 := b.NewValue0(v.Pos, OpAMD64MOVOstore, types.TypeMem) - v2.AddArg(dst) v3 := b.NewValue0(v.Pos, OpAMD64MOVOload, types.TypeInt128) - v3.AddArg(src) - v3.AddArg(mem) - v2.AddArg(v3) - v2.AddArg(mem) - v.AddArg(v2) + v3.AddArg2(src, mem) + v2.AddArg3(dst, v3, mem) + v.AddArg3(v0, v1, v2) return true } // match: (Move [s] dst src mem) @@ -38147,28 +35908,20 @@ func rewriteValueAMD64_OpMove(v *Value) bool { v0 := b.NewValue0(v.Pos, OpOffPtr, dst.Type) v0.AuxInt = s % 16 v0.AddArg(dst) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpOffPtr, src.Type) v1.AuxInt = s % 16 v1.AddArg(src) - v.AddArg(v1) v2 := b.NewValue0(v.Pos, OpAMD64MOVQstore, types.TypeMem) v2.AuxInt = 8 - v2.AddArg(dst) v3 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) v3.AuxInt = 8 - v3.AddArg(src) - v3.AddArg(mem) - v2.AddArg(v3) + v3.AddArg2(src, mem) v4 := b.NewValue0(v.Pos, OpAMD64MOVQstore, types.TypeMem) - v4.AddArg(dst) v5 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) - v5.AddArg(src) - v5.AddArg(mem) - v4.AddArg(v5) - v4.AddArg(mem) - v2.AddArg(v4) - v.AddArg(v2) + v5.AddArg2(src, mem) + v4.AddArg3(dst, v5, mem) + v2.AddArg3(dst, v3, v4) + v.AddArg3(v0, v1, v2) return true } // match: (Move [s] dst src mem) @@ -38184,9 +35937,7 @@ func rewriteValueAMD64_OpMove(v *Value) bool { } v.reset(OpAMD64DUFFCOPY) v.AuxInt = 14 * (64 - s/16) - v.AddArg(dst) - v.AddArg(src) - v.AddArg(mem) + v.AddArg3(dst, src, mem) return true } // match: (Move [s] dst src mem) @@ -38201,12 +35952,9 @@ func rewriteValueAMD64_OpMove(v *Value) bool { break } v.reset(OpAMD64REPMOVSQ) - v.AddArg(dst) - v.AddArg(src) v0 := b.NewValue0(v.Pos, OpAMD64MOVQconst, typ.UInt64) v0.AuxInt = s / 8 - v.AddArg(v0) - v.AddArg(mem) + v.AddArg4(dst, src, v0, mem) return true } return false @@ -38220,10 +35968,9 @@ func rewriteValueAMD64_OpNeg32F(v *Value) bool { for { x := v_0 v.reset(OpAMD64PXOR) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpAMD64MOVSSconst, typ.Float32) v0.AuxInt = auxFrom32F(float32(math.Copysign(0, -1))) - v.AddArg(v0) + v.AddArg2(x, v0) return true } } @@ -38236,10 +35983,9 @@ func rewriteValueAMD64_OpNeg64F(v *Value) bool { for { x := v_0 v.reset(OpAMD64PXOR) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpAMD64MOVSDconst, typ.Float64) v0.AuxInt = auxFrom64F(math.Copysign(0, -1)) - v.AddArg(v0) + v.AddArg2(x, v0) return true } } @@ -38254,8 +36000,7 @@ func rewriteValueAMD64_OpNeq16(v *Value) bool { y := v_1 v.reset(OpAMD64SETNE) v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -38271,8 +36016,7 @@ func rewriteValueAMD64_OpNeq32(v *Value) bool { y := v_1 v.reset(OpAMD64SETNE) v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -38288,8 +36032,7 @@ func rewriteValueAMD64_OpNeq32F(v *Value) bool { y := v_1 v.reset(OpAMD64SETNEF) v0 := b.NewValue0(v.Pos, OpAMD64UCOMISS, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -38305,8 +36048,7 @@ func rewriteValueAMD64_OpNeq64(v *Value) bool { y := v_1 v.reset(OpAMD64SETNE) v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -38322,8 +36064,7 @@ func rewriteValueAMD64_OpNeq64F(v *Value) bool { y := v_1 v.reset(OpAMD64SETNEF) v0 := b.NewValue0(v.Pos, OpAMD64UCOMISD, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -38339,8 +36080,7 @@ func rewriteValueAMD64_OpNeq8(v *Value) bool { y := v_1 v.reset(OpAMD64SETNE) v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -38356,8 +36096,7 @@ func rewriteValueAMD64_OpNeqB(v *Value) bool { y := v_1 v.reset(OpAMD64SETNE) v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -38373,8 +36112,7 @@ func rewriteValueAMD64_OpNeqPtr(v *Value) bool { y := v_1 v.reset(OpAMD64SETNE) v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -38417,8 +36155,7 @@ func rewriteValueAMD64_OpOffPtr(v *Value) bool { v.reset(OpAMD64ADDQ) v0 := b.NewValue0(v.Pos, OpAMD64MOVQconst, typ.UInt64) v0.AuxInt = off - v.AddArg(v0) - v.AddArg(ptr) + v.AddArg2(v0, ptr) return true } } @@ -38439,9 +36176,7 @@ func rewriteValueAMD64_OpPanicBounds(v *Value) bool { } v.reset(OpAMD64LoweredPanicBoundsA) v.AuxInt = kind - v.AddArg(x) - v.AddArg(y) - v.AddArg(mem) + v.AddArg3(x, y, mem) return true } // match: (PanicBounds [kind] x y mem) @@ -38457,9 +36192,7 @@ func rewriteValueAMD64_OpPanicBounds(v *Value) bool { } v.reset(OpAMD64LoweredPanicBoundsB) v.AuxInt = kind - v.AddArg(x) - v.AddArg(y) - v.AddArg(mem) + v.AddArg3(x, y, mem) return true } // match: (PanicBounds [kind] x y mem) @@ -38475,9 +36208,7 @@ func rewriteValueAMD64_OpPanicBounds(v *Value) bool { } v.reset(OpAMD64LoweredPanicBoundsC) v.AuxInt = kind - v.AddArg(x) - v.AddArg(y) - v.AddArg(mem) + v.AddArg3(x, y, mem) return true } return false @@ -38540,15 +36271,13 @@ func rewriteValueAMD64_OpRsh16Ux16(v *Value) bool { } v.reset(OpAMD64ANDL) v0 := b.NewValue0(v.Pos, OpAMD64SHRW, t) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) + v0.AddArg2(x, y) v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags) v2.AuxInt = 16 v2.AddArg(y) v1.AddArg(v2) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } // match: (Rsh16Ux16 x y) @@ -38561,8 +36290,7 @@ func rewriteValueAMD64_OpRsh16Ux16(v *Value) bool { break } v.reset(OpAMD64SHRW) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } return false @@ -38583,15 +36311,13 @@ func rewriteValueAMD64_OpRsh16Ux32(v *Value) bool { } v.reset(OpAMD64ANDL) v0 := b.NewValue0(v.Pos, OpAMD64SHRW, t) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) + v0.AddArg2(x, y) v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) v2.AuxInt = 16 v2.AddArg(y) v1.AddArg(v2) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } // match: (Rsh16Ux32 x y) @@ -38604,8 +36330,7 @@ func rewriteValueAMD64_OpRsh16Ux32(v *Value) bool { break } v.reset(OpAMD64SHRW) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } return false @@ -38626,15 +36351,13 @@ func rewriteValueAMD64_OpRsh16Ux64(v *Value) bool { } v.reset(OpAMD64ANDL) v0 := b.NewValue0(v.Pos, OpAMD64SHRW, t) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) + v0.AddArg2(x, y) v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) v2.AuxInt = 16 v2.AddArg(y) v1.AddArg(v2) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } // match: (Rsh16Ux64 x y) @@ -38647,8 +36370,7 @@ func rewriteValueAMD64_OpRsh16Ux64(v *Value) bool { break } v.reset(OpAMD64SHRW) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } return false @@ -38669,15 +36391,13 @@ func rewriteValueAMD64_OpRsh16Ux8(v *Value) bool { } v.reset(OpAMD64ANDL) v0 := b.NewValue0(v.Pos, OpAMD64SHRW, t) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) + v0.AddArg2(x, y) v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags) v2.AuxInt = 16 v2.AddArg(y) v1.AddArg(v2) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } // match: (Rsh16Ux8 x y) @@ -38690,8 +36410,7 @@ func rewriteValueAMD64_OpRsh16Ux8(v *Value) bool { break } v.reset(OpAMD64SHRW) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } return false @@ -38712,9 +36431,7 @@ func rewriteValueAMD64_OpRsh16x16(v *Value) bool { } v.reset(OpAMD64SARW) v.Type = t - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type) - v0.AddArg(y) v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type) v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type) v3 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags) @@ -38722,8 +36439,8 @@ func rewriteValueAMD64_OpRsh16x16(v *Value) bool { v3.AddArg(y) v2.AddArg(v3) v1.AddArg(v2) - v0.AddArg(v1) - v.AddArg(v0) + v0.AddArg2(y, v1) + v.AddArg2(x, v0) return true } // match: (Rsh16x16 x y) @@ -38736,8 +36453,7 @@ func rewriteValueAMD64_OpRsh16x16(v *Value) bool { break } v.reset(OpAMD64SARW) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } return false @@ -38758,9 +36474,7 @@ func rewriteValueAMD64_OpRsh16x32(v *Value) bool { } v.reset(OpAMD64SARW) v.Type = t - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type) - v0.AddArg(y) v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type) v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type) v3 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) @@ -38768,8 +36482,8 @@ func rewriteValueAMD64_OpRsh16x32(v *Value) bool { v3.AddArg(y) v2.AddArg(v3) v1.AddArg(v2) - v0.AddArg(v1) - v.AddArg(v0) + v0.AddArg2(y, v1) + v.AddArg2(x, v0) return true } // match: (Rsh16x32 x y) @@ -38782,8 +36496,7 @@ func rewriteValueAMD64_OpRsh16x32(v *Value) bool { break } v.reset(OpAMD64SARW) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } return false @@ -38804,9 +36517,7 @@ func rewriteValueAMD64_OpRsh16x64(v *Value) bool { } v.reset(OpAMD64SARW) v.Type = t - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpAMD64ORQ, y.Type) - v0.AddArg(y) v1 := b.NewValue0(v.Pos, OpAMD64NOTQ, y.Type) v2 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, y.Type) v3 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) @@ -38814,8 +36525,8 @@ func rewriteValueAMD64_OpRsh16x64(v *Value) bool { v3.AddArg(y) v2.AddArg(v3) v1.AddArg(v2) - v0.AddArg(v1) - v.AddArg(v0) + v0.AddArg2(y, v1) + v.AddArg2(x, v0) return true } // match: (Rsh16x64 x y) @@ -38828,8 +36539,7 @@ func rewriteValueAMD64_OpRsh16x64(v *Value) bool { break } v.reset(OpAMD64SARW) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } return false @@ -38850,9 +36560,7 @@ func rewriteValueAMD64_OpRsh16x8(v *Value) bool { } v.reset(OpAMD64SARW) v.Type = t - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type) - v0.AddArg(y) v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type) v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type) v3 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags) @@ -38860,8 +36568,8 @@ func rewriteValueAMD64_OpRsh16x8(v *Value) bool { v3.AddArg(y) v2.AddArg(v3) v1.AddArg(v2) - v0.AddArg(v1) - v.AddArg(v0) + v0.AddArg2(y, v1) + v.AddArg2(x, v0) return true } // match: (Rsh16x8 x y) @@ -38874,8 +36582,7 @@ func rewriteValueAMD64_OpRsh16x8(v *Value) bool { break } v.reset(OpAMD64SARW) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } return false @@ -38896,15 +36603,13 @@ func rewriteValueAMD64_OpRsh32Ux16(v *Value) bool { } v.reset(OpAMD64ANDL) v0 := b.NewValue0(v.Pos, OpAMD64SHRL, t) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) + v0.AddArg2(x, y) v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags) v2.AuxInt = 32 v2.AddArg(y) v1.AddArg(v2) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } // match: (Rsh32Ux16 x y) @@ -38917,8 +36622,7 @@ func rewriteValueAMD64_OpRsh32Ux16(v *Value) bool { break } v.reset(OpAMD64SHRL) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } return false @@ -38939,15 +36643,13 @@ func rewriteValueAMD64_OpRsh32Ux32(v *Value) bool { } v.reset(OpAMD64ANDL) v0 := b.NewValue0(v.Pos, OpAMD64SHRL, t) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) + v0.AddArg2(x, y) v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) v2.AuxInt = 32 v2.AddArg(y) v1.AddArg(v2) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } // match: (Rsh32Ux32 x y) @@ -38960,8 +36662,7 @@ func rewriteValueAMD64_OpRsh32Ux32(v *Value) bool { break } v.reset(OpAMD64SHRL) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } return false @@ -38982,15 +36683,13 @@ func rewriteValueAMD64_OpRsh32Ux64(v *Value) bool { } v.reset(OpAMD64ANDL) v0 := b.NewValue0(v.Pos, OpAMD64SHRL, t) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) + v0.AddArg2(x, y) v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) v2.AuxInt = 32 v2.AddArg(y) v1.AddArg(v2) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } // match: (Rsh32Ux64 x y) @@ -39003,8 +36702,7 @@ func rewriteValueAMD64_OpRsh32Ux64(v *Value) bool { break } v.reset(OpAMD64SHRL) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } return false @@ -39025,15 +36723,13 @@ func rewriteValueAMD64_OpRsh32Ux8(v *Value) bool { } v.reset(OpAMD64ANDL) v0 := b.NewValue0(v.Pos, OpAMD64SHRL, t) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) + v0.AddArg2(x, y) v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags) v2.AuxInt = 32 v2.AddArg(y) v1.AddArg(v2) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } // match: (Rsh32Ux8 x y) @@ -39046,8 +36742,7 @@ func rewriteValueAMD64_OpRsh32Ux8(v *Value) bool { break } v.reset(OpAMD64SHRL) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } return false @@ -39068,9 +36763,7 @@ func rewriteValueAMD64_OpRsh32x16(v *Value) bool { } v.reset(OpAMD64SARL) v.Type = t - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type) - v0.AddArg(y) v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type) v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type) v3 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags) @@ -39078,8 +36771,8 @@ func rewriteValueAMD64_OpRsh32x16(v *Value) bool { v3.AddArg(y) v2.AddArg(v3) v1.AddArg(v2) - v0.AddArg(v1) - v.AddArg(v0) + v0.AddArg2(y, v1) + v.AddArg2(x, v0) return true } // match: (Rsh32x16 x y) @@ -39092,8 +36785,7 @@ func rewriteValueAMD64_OpRsh32x16(v *Value) bool { break } v.reset(OpAMD64SARL) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } return false @@ -39114,9 +36806,7 @@ func rewriteValueAMD64_OpRsh32x32(v *Value) bool { } v.reset(OpAMD64SARL) v.Type = t - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type) - v0.AddArg(y) v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type) v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type) v3 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) @@ -39124,8 +36814,8 @@ func rewriteValueAMD64_OpRsh32x32(v *Value) bool { v3.AddArg(y) v2.AddArg(v3) v1.AddArg(v2) - v0.AddArg(v1) - v.AddArg(v0) + v0.AddArg2(y, v1) + v.AddArg2(x, v0) return true } // match: (Rsh32x32 x y) @@ -39138,8 +36828,7 @@ func rewriteValueAMD64_OpRsh32x32(v *Value) bool { break } v.reset(OpAMD64SARL) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } return false @@ -39160,9 +36849,7 @@ func rewriteValueAMD64_OpRsh32x64(v *Value) bool { } v.reset(OpAMD64SARL) v.Type = t - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpAMD64ORQ, y.Type) - v0.AddArg(y) v1 := b.NewValue0(v.Pos, OpAMD64NOTQ, y.Type) v2 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, y.Type) v3 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) @@ -39170,8 +36857,8 @@ func rewriteValueAMD64_OpRsh32x64(v *Value) bool { v3.AddArg(y) v2.AddArg(v3) v1.AddArg(v2) - v0.AddArg(v1) - v.AddArg(v0) + v0.AddArg2(y, v1) + v.AddArg2(x, v0) return true } // match: (Rsh32x64 x y) @@ -39184,8 +36871,7 @@ func rewriteValueAMD64_OpRsh32x64(v *Value) bool { break } v.reset(OpAMD64SARL) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } return false @@ -39206,9 +36892,7 @@ func rewriteValueAMD64_OpRsh32x8(v *Value) bool { } v.reset(OpAMD64SARL) v.Type = t - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type) - v0.AddArg(y) v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type) v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type) v3 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags) @@ -39216,8 +36900,8 @@ func rewriteValueAMD64_OpRsh32x8(v *Value) bool { v3.AddArg(y) v2.AddArg(v3) v1.AddArg(v2) - v0.AddArg(v1) - v.AddArg(v0) + v0.AddArg2(y, v1) + v.AddArg2(x, v0) return true } // match: (Rsh32x8 x y) @@ -39230,8 +36914,7 @@ func rewriteValueAMD64_OpRsh32x8(v *Value) bool { break } v.reset(OpAMD64SARL) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } return false @@ -39252,15 +36935,13 @@ func rewriteValueAMD64_OpRsh64Ux16(v *Value) bool { } v.reset(OpAMD64ANDQ) v0 := b.NewValue0(v.Pos, OpAMD64SHRQ, t) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) + v0.AddArg2(x, y) v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t) v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags) v2.AuxInt = 64 v2.AddArg(y) v1.AddArg(v2) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } // match: (Rsh64Ux16 x y) @@ -39273,8 +36954,7 @@ func rewriteValueAMD64_OpRsh64Ux16(v *Value) bool { break } v.reset(OpAMD64SHRQ) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } return false @@ -39295,15 +36975,13 @@ func rewriteValueAMD64_OpRsh64Ux32(v *Value) bool { } v.reset(OpAMD64ANDQ) v0 := b.NewValue0(v.Pos, OpAMD64SHRQ, t) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) + v0.AddArg2(x, y) v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t) v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) v2.AuxInt = 64 v2.AddArg(y) v1.AddArg(v2) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } // match: (Rsh64Ux32 x y) @@ -39316,8 +36994,7 @@ func rewriteValueAMD64_OpRsh64Ux32(v *Value) bool { break } v.reset(OpAMD64SHRQ) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } return false @@ -39338,15 +37015,13 @@ func rewriteValueAMD64_OpRsh64Ux64(v *Value) bool { } v.reset(OpAMD64ANDQ) v0 := b.NewValue0(v.Pos, OpAMD64SHRQ, t) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) + v0.AddArg2(x, y) v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t) v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) v2.AuxInt = 64 v2.AddArg(y) v1.AddArg(v2) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } // match: (Rsh64Ux64 x y) @@ -39359,8 +37034,7 @@ func rewriteValueAMD64_OpRsh64Ux64(v *Value) bool { break } v.reset(OpAMD64SHRQ) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } return false @@ -39381,15 +37055,13 @@ func rewriteValueAMD64_OpRsh64Ux8(v *Value) bool { } v.reset(OpAMD64ANDQ) v0 := b.NewValue0(v.Pos, OpAMD64SHRQ, t) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) + v0.AddArg2(x, y) v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t) v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags) v2.AuxInt = 64 v2.AddArg(y) v1.AddArg(v2) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } // match: (Rsh64Ux8 x y) @@ -39402,8 +37074,7 @@ func rewriteValueAMD64_OpRsh64Ux8(v *Value) bool { break } v.reset(OpAMD64SHRQ) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } return false @@ -39424,9 +37095,7 @@ func rewriteValueAMD64_OpRsh64x16(v *Value) bool { } v.reset(OpAMD64SARQ) v.Type = t - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type) - v0.AddArg(y) v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type) v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type) v3 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags) @@ -39434,8 +37103,8 @@ func rewriteValueAMD64_OpRsh64x16(v *Value) bool { v3.AddArg(y) v2.AddArg(v3) v1.AddArg(v2) - v0.AddArg(v1) - v.AddArg(v0) + v0.AddArg2(y, v1) + v.AddArg2(x, v0) return true } // match: (Rsh64x16 x y) @@ -39448,8 +37117,7 @@ func rewriteValueAMD64_OpRsh64x16(v *Value) bool { break } v.reset(OpAMD64SARQ) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } return false @@ -39470,9 +37138,7 @@ func rewriteValueAMD64_OpRsh64x32(v *Value) bool { } v.reset(OpAMD64SARQ) v.Type = t - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type) - v0.AddArg(y) v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type) v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type) v3 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) @@ -39480,8 +37146,8 @@ func rewriteValueAMD64_OpRsh64x32(v *Value) bool { v3.AddArg(y) v2.AddArg(v3) v1.AddArg(v2) - v0.AddArg(v1) - v.AddArg(v0) + v0.AddArg2(y, v1) + v.AddArg2(x, v0) return true } // match: (Rsh64x32 x y) @@ -39494,8 +37160,7 @@ func rewriteValueAMD64_OpRsh64x32(v *Value) bool { break } v.reset(OpAMD64SARQ) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } return false @@ -39516,9 +37181,7 @@ func rewriteValueAMD64_OpRsh64x64(v *Value) bool { } v.reset(OpAMD64SARQ) v.Type = t - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpAMD64ORQ, y.Type) - v0.AddArg(y) v1 := b.NewValue0(v.Pos, OpAMD64NOTQ, y.Type) v2 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, y.Type) v3 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) @@ -39526,8 +37189,8 @@ func rewriteValueAMD64_OpRsh64x64(v *Value) bool { v3.AddArg(y) v2.AddArg(v3) v1.AddArg(v2) - v0.AddArg(v1) - v.AddArg(v0) + v0.AddArg2(y, v1) + v.AddArg2(x, v0) return true } // match: (Rsh64x64 x y) @@ -39540,8 +37203,7 @@ func rewriteValueAMD64_OpRsh64x64(v *Value) bool { break } v.reset(OpAMD64SARQ) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } return false @@ -39562,9 +37224,7 @@ func rewriteValueAMD64_OpRsh64x8(v *Value) bool { } v.reset(OpAMD64SARQ) v.Type = t - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type) - v0.AddArg(y) v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type) v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type) v3 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags) @@ -39572,8 +37232,8 @@ func rewriteValueAMD64_OpRsh64x8(v *Value) bool { v3.AddArg(y) v2.AddArg(v3) v1.AddArg(v2) - v0.AddArg(v1) - v.AddArg(v0) + v0.AddArg2(y, v1) + v.AddArg2(x, v0) return true } // match: (Rsh64x8 x y) @@ -39586,8 +37246,7 @@ func rewriteValueAMD64_OpRsh64x8(v *Value) bool { break } v.reset(OpAMD64SARQ) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } return false @@ -39608,15 +37267,13 @@ func rewriteValueAMD64_OpRsh8Ux16(v *Value) bool { } v.reset(OpAMD64ANDL) v0 := b.NewValue0(v.Pos, OpAMD64SHRB, t) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) + v0.AddArg2(x, y) v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags) v2.AuxInt = 8 v2.AddArg(y) v1.AddArg(v2) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } // match: (Rsh8Ux16 x y) @@ -39629,8 +37286,7 @@ func rewriteValueAMD64_OpRsh8Ux16(v *Value) bool { break } v.reset(OpAMD64SHRB) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } return false @@ -39651,15 +37307,13 @@ func rewriteValueAMD64_OpRsh8Ux32(v *Value) bool { } v.reset(OpAMD64ANDL) v0 := b.NewValue0(v.Pos, OpAMD64SHRB, t) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) + v0.AddArg2(x, y) v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) v2.AuxInt = 8 v2.AddArg(y) v1.AddArg(v2) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } // match: (Rsh8Ux32 x y) @@ -39672,8 +37326,7 @@ func rewriteValueAMD64_OpRsh8Ux32(v *Value) bool { break } v.reset(OpAMD64SHRB) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } return false @@ -39694,15 +37347,13 @@ func rewriteValueAMD64_OpRsh8Ux64(v *Value) bool { } v.reset(OpAMD64ANDL) v0 := b.NewValue0(v.Pos, OpAMD64SHRB, t) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) + v0.AddArg2(x, y) v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) v2.AuxInt = 8 v2.AddArg(y) v1.AddArg(v2) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } // match: (Rsh8Ux64 x y) @@ -39715,8 +37366,7 @@ func rewriteValueAMD64_OpRsh8Ux64(v *Value) bool { break } v.reset(OpAMD64SHRB) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } return false @@ -39737,15 +37387,13 @@ func rewriteValueAMD64_OpRsh8Ux8(v *Value) bool { } v.reset(OpAMD64ANDL) v0 := b.NewValue0(v.Pos, OpAMD64SHRB, t) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) + v0.AddArg2(x, y) v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags) v2.AuxInt = 8 v2.AddArg(y) v1.AddArg(v2) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } // match: (Rsh8Ux8 x y) @@ -39758,8 +37406,7 @@ func rewriteValueAMD64_OpRsh8Ux8(v *Value) bool { break } v.reset(OpAMD64SHRB) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } return false @@ -39780,9 +37427,7 @@ func rewriteValueAMD64_OpRsh8x16(v *Value) bool { } v.reset(OpAMD64SARB) v.Type = t - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type) - v0.AddArg(y) v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type) v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type) v3 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags) @@ -39790,8 +37435,8 @@ func rewriteValueAMD64_OpRsh8x16(v *Value) bool { v3.AddArg(y) v2.AddArg(v3) v1.AddArg(v2) - v0.AddArg(v1) - v.AddArg(v0) + v0.AddArg2(y, v1) + v.AddArg2(x, v0) return true } // match: (Rsh8x16 x y) @@ -39804,8 +37449,7 @@ func rewriteValueAMD64_OpRsh8x16(v *Value) bool { break } v.reset(OpAMD64SARB) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } return false @@ -39826,9 +37470,7 @@ func rewriteValueAMD64_OpRsh8x32(v *Value) bool { } v.reset(OpAMD64SARB) v.Type = t - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type) - v0.AddArg(y) v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type) v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type) v3 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) @@ -39836,8 +37478,8 @@ func rewriteValueAMD64_OpRsh8x32(v *Value) bool { v3.AddArg(y) v2.AddArg(v3) v1.AddArg(v2) - v0.AddArg(v1) - v.AddArg(v0) + v0.AddArg2(y, v1) + v.AddArg2(x, v0) return true } // match: (Rsh8x32 x y) @@ -39850,8 +37492,7 @@ func rewriteValueAMD64_OpRsh8x32(v *Value) bool { break } v.reset(OpAMD64SARB) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } return false @@ -39872,9 +37513,7 @@ func rewriteValueAMD64_OpRsh8x64(v *Value) bool { } v.reset(OpAMD64SARB) v.Type = t - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpAMD64ORQ, y.Type) - v0.AddArg(y) v1 := b.NewValue0(v.Pos, OpAMD64NOTQ, y.Type) v2 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, y.Type) v3 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) @@ -39882,8 +37521,8 @@ func rewriteValueAMD64_OpRsh8x64(v *Value) bool { v3.AddArg(y) v2.AddArg(v3) v1.AddArg(v2) - v0.AddArg(v1) - v.AddArg(v0) + v0.AddArg2(y, v1) + v.AddArg2(x, v0) return true } // match: (Rsh8x64 x y) @@ -39896,8 +37535,7 @@ func rewriteValueAMD64_OpRsh8x64(v *Value) bool { break } v.reset(OpAMD64SARB) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } return false @@ -39918,9 +37556,7 @@ func rewriteValueAMD64_OpRsh8x8(v *Value) bool { } v.reset(OpAMD64SARB) v.Type = t - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type) - v0.AddArg(y) v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type) v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type) v3 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags) @@ -39928,8 +37564,8 @@ func rewriteValueAMD64_OpRsh8x8(v *Value) bool { v3.AddArg(y) v2.AddArg(v3) v1.AddArg(v2) - v0.AddArg(v1) - v.AddArg(v0) + v0.AddArg2(y, v1) + v.AddArg2(x, v0) return true } // match: (Rsh8x8 x y) @@ -39942,8 +37578,7 @@ func rewriteValueAMD64_OpRsh8x8(v *Value) bool { break } v.reset(OpAMD64SARB) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } return false @@ -39963,8 +37598,7 @@ func rewriteValueAMD64_OpSelect0(v *Value) bool { v.reset(OpSelect0) v.Type = typ.UInt64 v0 := b.NewValue0(v.Pos, OpAMD64MULQU, types.NewTuple(typ.UInt64, types.TypeFlags)) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -39979,8 +37613,7 @@ func rewriteValueAMD64_OpSelect0(v *Value) bool { v.reset(OpSelect0) v.Type = typ.UInt32 v0 := b.NewValue0(v.Pos, OpAMD64MULLU, types.NewTuple(typ.UInt32, types.TypeFlags)) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -39996,13 +37629,11 @@ func rewriteValueAMD64_OpSelect0(v *Value) bool { v.reset(OpSelect0) v.Type = typ.UInt64 v0 := b.NewValue0(v.Pos, OpAMD64ADCQ, types.NewTuple(typ.UInt64, types.TypeFlags)) - v0.AddArg(x) - v0.AddArg(y) v1 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) v2 := b.NewValue0(v.Pos, OpAMD64NEGLflags, types.NewTuple(typ.UInt32, types.TypeFlags)) v2.AddArg(c) v1.AddArg(v2) - v0.AddArg(v1) + v0.AddArg3(x, y, v1) v.AddArg(v0) return true } @@ -40018,13 +37649,11 @@ func rewriteValueAMD64_OpSelect0(v *Value) bool { v.reset(OpSelect0) v.Type = typ.UInt64 v0 := b.NewValue0(v.Pos, OpAMD64SBBQ, types.NewTuple(typ.UInt64, types.TypeFlags)) - v0.AddArg(x) - v0.AddArg(y) v1 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) v2 := b.NewValue0(v.Pos, OpAMD64NEGLflags, types.NewTuple(typ.UInt32, types.TypeFlags)) v2.AddArg(c) v1.AddArg(v2) - v0.AddArg(v1) + v0.AddArg3(x, y, v1) v.AddArg(v0) return true } @@ -40038,10 +37667,9 @@ func rewriteValueAMD64_OpSelect0(v *Value) bool { tuple := v_0.Args[1] val := v_0.Args[0] v.reset(OpAMD64ADDL) - v.AddArg(val) v0 := b.NewValue0(v.Pos, OpSelect0, t) v0.AddArg(tuple) - v.AddArg(v0) + v.AddArg2(val, v0) return true } // match: (Select0 (AddTupleFirst64 val tuple)) @@ -40054,10 +37682,9 @@ func rewriteValueAMD64_OpSelect0(v *Value) bool { tuple := v_0.Args[1] val := v_0.Args[0] v.reset(OpAMD64ADDQ) - v.AddArg(val) v0 := b.NewValue0(v.Pos, OpSelect0, t) v0.AddArg(tuple) - v.AddArg(v0) + v.AddArg2(val, v0) return true } return false @@ -40077,8 +37704,7 @@ func rewriteValueAMD64_OpSelect1(v *Value) bool { v.reset(OpAMD64SETO) v0 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) v1 := b.NewValue0(v.Pos, OpAMD64MULQU, types.NewTuple(typ.UInt64, types.TypeFlags)) - v1.AddArg(x) - v1.AddArg(y) + v1.AddArg2(x, y) v0.AddArg(v1) v.AddArg(v0) return true @@ -40094,8 +37720,7 @@ func rewriteValueAMD64_OpSelect1(v *Value) bool { v.reset(OpAMD64SETO) v0 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) v1 := b.NewValue0(v.Pos, OpAMD64MULLU, types.NewTuple(typ.UInt32, types.TypeFlags)) - v1.AddArg(x) - v1.AddArg(y) + v1.AddArg2(x, y) v0.AddArg(v1) v.AddArg(v0) return true @@ -40114,13 +37739,11 @@ func rewriteValueAMD64_OpSelect1(v *Value) bool { v0 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, typ.UInt64) v1 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) v2 := b.NewValue0(v.Pos, OpAMD64ADCQ, types.NewTuple(typ.UInt64, types.TypeFlags)) - v2.AddArg(x) - v2.AddArg(y) v3 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) v4 := b.NewValue0(v.Pos, OpAMD64NEGLflags, types.NewTuple(typ.UInt32, types.TypeFlags)) v4.AddArg(c) v3.AddArg(v4) - v2.AddArg(v3) + v2.AddArg3(x, y, v3) v1.AddArg(v2) v0.AddArg(v1) v.AddArg(v0) @@ -40140,13 +37763,11 @@ func rewriteValueAMD64_OpSelect1(v *Value) bool { v0 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, typ.UInt64) v1 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) v2 := b.NewValue0(v.Pos, OpAMD64SBBQ, types.NewTuple(typ.UInt64, types.TypeFlags)) - v2.AddArg(x) - v2.AddArg(y) v3 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) v4 := b.NewValue0(v.Pos, OpAMD64NEGLflags, types.NewTuple(typ.UInt32, types.TypeFlags)) v4.AddArg(c) v3.AddArg(v4) - v2.AddArg(v3) + v2.AddArg3(x, y, v3) v1.AddArg(v2) v0.AddArg(v1) v.AddArg(v0) @@ -40180,9 +37801,7 @@ func rewriteValueAMD64_OpSelect1(v *Value) bool { break } x := v_0_0_0.Args[0] - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (Select1 (AddTupleFirst32 _ tuple)) @@ -40241,9 +37860,7 @@ func rewriteValueAMD64_OpStore(v *Value) bool { break } v.reset(OpAMD64MOVSDstore) - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } // match: (Store {t} ptr val mem) @@ -40258,9 +37875,7 @@ func rewriteValueAMD64_OpStore(v *Value) bool { break } v.reset(OpAMD64MOVSSstore) - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } // match: (Store {t} ptr val mem) @@ -40275,9 +37890,7 @@ func rewriteValueAMD64_OpStore(v *Value) bool { break } v.reset(OpAMD64MOVQstore) - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } // match: (Store {t} ptr val mem) @@ -40292,9 +37905,7 @@ func rewriteValueAMD64_OpStore(v *Value) bool { break } v.reset(OpAMD64MOVLstore) - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } // match: (Store {t} ptr val mem) @@ -40309,9 +37920,7 @@ func rewriteValueAMD64_OpStore(v *Value) bool { break } v.reset(OpAMD64MOVWstore) - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } // match: (Store {t} ptr val mem) @@ -40326,9 +37935,7 @@ func rewriteValueAMD64_OpStore(v *Value) bool { break } v.reset(OpAMD64MOVBstore) - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } return false @@ -40358,9 +37965,7 @@ func rewriteValueAMD64_OpZero(v *Value) bool { break } mem := v_1 - v.reset(OpCopy) - v.Type = mem.Type - v.AddArg(mem) + v.copyOf(mem) return true } // match: (Zero [1] destptr mem) @@ -40373,8 +37978,7 @@ func rewriteValueAMD64_OpZero(v *Value) bool { mem := v_1 v.reset(OpAMD64MOVBstoreconst) v.AuxInt = 0 - v.AddArg(destptr) - v.AddArg(mem) + v.AddArg2(destptr, mem) return true } // match: (Zero [2] destptr mem) @@ -40387,8 +37991,7 @@ func rewriteValueAMD64_OpZero(v *Value) bool { mem := v_1 v.reset(OpAMD64MOVWstoreconst) v.AuxInt = 0 - v.AddArg(destptr) - v.AddArg(mem) + v.AddArg2(destptr, mem) return true } // match: (Zero [4] destptr mem) @@ -40401,8 +38004,7 @@ func rewriteValueAMD64_OpZero(v *Value) bool { mem := v_1 v.reset(OpAMD64MOVLstoreconst) v.AuxInt = 0 - v.AddArg(destptr) - v.AddArg(mem) + v.AddArg2(destptr, mem) return true } // match: (Zero [8] destptr mem) @@ -40415,8 +38017,7 @@ func rewriteValueAMD64_OpZero(v *Value) bool { mem := v_1 v.reset(OpAMD64MOVQstoreconst) v.AuxInt = 0 - v.AddArg(destptr) - v.AddArg(mem) + v.AddArg2(destptr, mem) return true } // match: (Zero [3] destptr mem) @@ -40429,12 +38030,10 @@ func rewriteValueAMD64_OpZero(v *Value) bool { mem := v_1 v.reset(OpAMD64MOVBstoreconst) v.AuxInt = makeValAndOff(0, 2) - v.AddArg(destptr) v0 := b.NewValue0(v.Pos, OpAMD64MOVWstoreconst, types.TypeMem) v0.AuxInt = 0 - v0.AddArg(destptr) - v0.AddArg(mem) - v.AddArg(v0) + v0.AddArg2(destptr, mem) + v.AddArg2(destptr, v0) return true } // match: (Zero [5] destptr mem) @@ -40447,12 +38046,10 @@ func rewriteValueAMD64_OpZero(v *Value) bool { mem := v_1 v.reset(OpAMD64MOVBstoreconst) v.AuxInt = makeValAndOff(0, 4) - v.AddArg(destptr) v0 := b.NewValue0(v.Pos, OpAMD64MOVLstoreconst, types.TypeMem) v0.AuxInt = 0 - v0.AddArg(destptr) - v0.AddArg(mem) - v.AddArg(v0) + v0.AddArg2(destptr, mem) + v.AddArg2(destptr, v0) return true } // match: (Zero [6] destptr mem) @@ -40465,12 +38062,10 @@ func rewriteValueAMD64_OpZero(v *Value) bool { mem := v_1 v.reset(OpAMD64MOVWstoreconst) v.AuxInt = makeValAndOff(0, 4) - v.AddArg(destptr) v0 := b.NewValue0(v.Pos, OpAMD64MOVLstoreconst, types.TypeMem) v0.AuxInt = 0 - v0.AddArg(destptr) - v0.AddArg(mem) - v.AddArg(v0) + v0.AddArg2(destptr, mem) + v.AddArg2(destptr, v0) return true } // match: (Zero [7] destptr mem) @@ -40483,12 +38078,10 @@ func rewriteValueAMD64_OpZero(v *Value) bool { mem := v_1 v.reset(OpAMD64MOVLstoreconst) v.AuxInt = makeValAndOff(0, 3) - v.AddArg(destptr) v0 := b.NewValue0(v.Pos, OpAMD64MOVLstoreconst, types.TypeMem) v0.AuxInt = 0 - v0.AddArg(destptr) - v0.AddArg(mem) - v.AddArg(v0) + v0.AddArg2(destptr, mem) + v.AddArg2(destptr, v0) return true } // match: (Zero [s] destptr mem) @@ -40506,12 +38099,10 @@ func rewriteValueAMD64_OpZero(v *Value) bool { v0 := b.NewValue0(v.Pos, OpOffPtr, destptr.Type) v0.AuxInt = s % 8 v0.AddArg(destptr) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, types.TypeMem) v1.AuxInt = 0 - v1.AddArg(destptr) - v1.AddArg(mem) - v.AddArg(v1) + v1.AddArg2(destptr, mem) + v.AddArg2(v0, v1) return true } // match: (Zero [16] destptr mem) @@ -40528,12 +38119,10 @@ func rewriteValueAMD64_OpZero(v *Value) bool { } v.reset(OpAMD64MOVQstoreconst) v.AuxInt = makeValAndOff(0, 8) - v.AddArg(destptr) v0 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, types.TypeMem) v0.AuxInt = 0 - v0.AddArg(destptr) - v0.AddArg(mem) - v.AddArg(v0) + v0.AddArg2(destptr, mem) + v.AddArg2(destptr, v0) return true } // match: (Zero [24] destptr mem) @@ -40550,16 +38139,13 @@ func rewriteValueAMD64_OpZero(v *Value) bool { } v.reset(OpAMD64MOVQstoreconst) v.AuxInt = makeValAndOff(0, 16) - v.AddArg(destptr) v0 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, types.TypeMem) v0.AuxInt = makeValAndOff(0, 8) - v0.AddArg(destptr) v1 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, types.TypeMem) v1.AuxInt = 0 - v1.AddArg(destptr) - v1.AddArg(mem) - v0.AddArg(v1) - v.AddArg(v0) + v1.AddArg2(destptr, mem) + v0.AddArg2(destptr, v1) + v.AddArg2(destptr, v0) return true } // match: (Zero [32] destptr mem) @@ -40576,20 +38162,16 @@ func rewriteValueAMD64_OpZero(v *Value) bool { } v.reset(OpAMD64MOVQstoreconst) v.AuxInt = makeValAndOff(0, 24) - v.AddArg(destptr) v0 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, types.TypeMem) v0.AuxInt = makeValAndOff(0, 16) - v0.AddArg(destptr) v1 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, types.TypeMem) v1.AuxInt = makeValAndOff(0, 8) - v1.AddArg(destptr) v2 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, types.TypeMem) v2.AuxInt = 0 - v2.AddArg(destptr) - v2.AddArg(mem) - v1.AddArg(v2) - v0.AddArg(v1) - v.AddArg(v0) + v2.AddArg2(destptr, mem) + v1.AddArg2(destptr, v2) + v0.AddArg2(destptr, v1) + v.AddArg2(destptr, v0) return true } // match: (Zero [s] destptr mem) @@ -40604,12 +38186,10 @@ func rewriteValueAMD64_OpZero(v *Value) bool { } v.reset(OpAMD64MOVQstoreconst) v.AuxInt = makeValAndOff(0, s-8) - v.AddArg(destptr) v0 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, types.TypeMem) v0.AuxInt = 0 - v0.AddArg(destptr) - v0.AddArg(mem) - v.AddArg(v0) + v0.AddArg2(destptr, mem) + v.AddArg2(destptr, v0) return true } // match: (Zero [s] destptr mem) @@ -40627,14 +38207,11 @@ func rewriteValueAMD64_OpZero(v *Value) bool { v0 := b.NewValue0(v.Pos, OpOffPtr, destptr.Type) v0.AuxInt = s % 16 v0.AddArg(destptr) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpAMD64MOVOstore, types.TypeMem) - v1.AddArg(destptr) v2 := b.NewValue0(v.Pos, OpAMD64MOVOconst, types.TypeInt128) v2.AuxInt = 0 - v1.AddArg(v2) - v1.AddArg(mem) - v.AddArg(v1) + v1.AddArg3(destptr, v2, mem) + v.AddArg2(v0, v1) return true } // match: (Zero [s] destptr mem) @@ -40652,12 +38229,10 @@ func rewriteValueAMD64_OpZero(v *Value) bool { v0 := b.NewValue0(v.Pos, OpOffPtr, destptr.Type) v0.AuxInt = s % 16 v0.AddArg(destptr) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, types.TypeMem) v1.AuxInt = 0 - v1.AddArg(destptr) - v1.AddArg(mem) - v.AddArg(v1) + v1.AddArg2(destptr, mem) + v.AddArg2(v0, v1) return true } // match: (Zero [16] destptr mem) @@ -40673,11 +38248,9 @@ func rewriteValueAMD64_OpZero(v *Value) bool { break } v.reset(OpAMD64MOVOstore) - v.AddArg(destptr) v0 := b.NewValue0(v.Pos, OpAMD64MOVOconst, types.TypeInt128) v0.AuxInt = 0 - v.AddArg(v0) - v.AddArg(mem) + v.AddArg3(destptr, v0, mem) return true } // match: (Zero [32] destptr mem) @@ -40696,17 +38269,13 @@ func rewriteValueAMD64_OpZero(v *Value) bool { v0 := b.NewValue0(v.Pos, OpOffPtr, destptr.Type) v0.AuxInt = 16 v0.AddArg(destptr) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpAMD64MOVOconst, types.TypeInt128) v1.AuxInt = 0 - v.AddArg(v1) v2 := b.NewValue0(v.Pos, OpAMD64MOVOstore, types.TypeMem) - v2.AddArg(destptr) v3 := b.NewValue0(v.Pos, OpAMD64MOVOconst, types.TypeInt128) v3.AuxInt = 0 - v2.AddArg(v3) - v2.AddArg(mem) - v.AddArg(v2) + v2.AddArg3(destptr, v3, mem) + v.AddArg3(v0, v1, v2) return true } // match: (Zero [48] destptr mem) @@ -40725,26 +38294,20 @@ func rewriteValueAMD64_OpZero(v *Value) bool { v0 := b.NewValue0(v.Pos, OpOffPtr, destptr.Type) v0.AuxInt = 32 v0.AddArg(destptr) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpAMD64MOVOconst, types.TypeInt128) v1.AuxInt = 0 - v.AddArg(v1) v2 := b.NewValue0(v.Pos, OpAMD64MOVOstore, types.TypeMem) v3 := b.NewValue0(v.Pos, OpOffPtr, destptr.Type) v3.AuxInt = 16 v3.AddArg(destptr) - v2.AddArg(v3) v4 := b.NewValue0(v.Pos, OpAMD64MOVOconst, types.TypeInt128) v4.AuxInt = 0 - v2.AddArg(v4) v5 := b.NewValue0(v.Pos, OpAMD64MOVOstore, types.TypeMem) - v5.AddArg(destptr) v6 := b.NewValue0(v.Pos, OpAMD64MOVOconst, types.TypeInt128) v6.AuxInt = 0 - v5.AddArg(v6) - v5.AddArg(mem) - v2.AddArg(v5) - v.AddArg(v2) + v5.AddArg3(destptr, v6, mem) + v2.AddArg3(v3, v4, v5) + v.AddArg3(v0, v1, v2) return true } // match: (Zero [64] destptr mem) @@ -40763,35 +38326,27 @@ func rewriteValueAMD64_OpZero(v *Value) bool { v0 := b.NewValue0(v.Pos, OpOffPtr, destptr.Type) v0.AuxInt = 48 v0.AddArg(destptr) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpAMD64MOVOconst, types.TypeInt128) v1.AuxInt = 0 - v.AddArg(v1) v2 := b.NewValue0(v.Pos, OpAMD64MOVOstore, types.TypeMem) v3 := b.NewValue0(v.Pos, OpOffPtr, destptr.Type) v3.AuxInt = 32 v3.AddArg(destptr) - v2.AddArg(v3) v4 := b.NewValue0(v.Pos, OpAMD64MOVOconst, types.TypeInt128) v4.AuxInt = 0 - v2.AddArg(v4) v5 := b.NewValue0(v.Pos, OpAMD64MOVOstore, types.TypeMem) v6 := b.NewValue0(v.Pos, OpOffPtr, destptr.Type) v6.AuxInt = 16 v6.AddArg(destptr) - v5.AddArg(v6) v7 := b.NewValue0(v.Pos, OpAMD64MOVOconst, types.TypeInt128) v7.AuxInt = 0 - v5.AddArg(v7) v8 := b.NewValue0(v.Pos, OpAMD64MOVOstore, types.TypeMem) - v8.AddArg(destptr) v9 := b.NewValue0(v.Pos, OpAMD64MOVOconst, types.TypeInt128) v9.AuxInt = 0 - v8.AddArg(v9) - v8.AddArg(mem) - v5.AddArg(v8) - v2.AddArg(v5) - v.AddArg(v2) + v8.AddArg3(destptr, v9, mem) + v5.AddArg3(v6, v7, v8) + v2.AddArg3(v3, v4, v5) + v.AddArg3(v0, v1, v2) return true } // match: (Zero [s] destptr mem) @@ -40806,11 +38361,9 @@ func rewriteValueAMD64_OpZero(v *Value) bool { } v.reset(OpAMD64DUFFZERO) v.AuxInt = s - v.AddArg(destptr) v0 := b.NewValue0(v.Pos, OpAMD64MOVOconst, types.TypeInt128) v0.AuxInt = 0 - v.AddArg(v0) - v.AddArg(mem) + v.AddArg3(destptr, v0, mem) return true } // match: (Zero [s] destptr mem) @@ -40824,14 +38377,11 @@ func rewriteValueAMD64_OpZero(v *Value) bool { break } v.reset(OpAMD64REPSTOSQ) - v.AddArg(destptr) v0 := b.NewValue0(v.Pos, OpAMD64MOVQconst, typ.UInt64) v0.AuxInt = s / 8 - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpAMD64MOVQconst, typ.UInt64) v1.AuxInt = 0 - v.AddArg(v1) - v.AddArg(mem) + v.AddArg4(destptr, v0, v1, mem) return true } return false @@ -40856,11 +38406,9 @@ func rewriteBlockAMD64(b *Block) bool { continue } y := v_0_1 - b.Reset(BlockAMD64UGE) v0 := b.NewValue0(v_0.Pos, OpAMD64BTL, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - b.AddControl(v0) + v0.AddArg2(x, y) + b.resetWithControl(BlockAMD64UGE, v0) return true } break @@ -40882,11 +38430,9 @@ func rewriteBlockAMD64(b *Block) bool { continue } y := v_0_1 - b.Reset(BlockAMD64UGE) v0 := b.NewValue0(v_0.Pos, OpAMD64BTQ, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - b.AddControl(v0) + v0.AddArg2(x, y) + b.resetWithControl(BlockAMD64UGE, v0) return true } break @@ -40901,11 +38447,10 @@ func rewriteBlockAMD64(b *Block) bool { if !(isUint32PowerOfTwo(c)) { break } - b.Reset(BlockAMD64UGE) v0 := b.NewValue0(v_0.Pos, OpAMD64BTLconst, types.TypeFlags) v0.AuxInt = log2uint32(c) v0.AddArg(x) - b.AddControl(v0) + b.resetWithControl(BlockAMD64UGE, v0) return true } // match: (EQ (TESTQconst [c] x)) @@ -40918,11 +38463,10 @@ func rewriteBlockAMD64(b *Block) bool { if !(isUint64PowerOfTwo(c)) { break } - b.Reset(BlockAMD64UGE) v0 := b.NewValue0(v_0.Pos, OpAMD64BTQconst, types.TypeFlags) v0.AuxInt = log2(c) v0.AddArg(x) - b.AddControl(v0) + b.resetWithControl(BlockAMD64UGE, v0) return true } // match: (EQ (TESTQ (MOVQconst [c]) x)) @@ -40942,11 +38486,10 @@ func rewriteBlockAMD64(b *Block) bool { if !(isUint64PowerOfTwo(c)) { continue } - b.Reset(BlockAMD64UGE) v0 := b.NewValue0(v_0.Pos, OpAMD64BTQconst, types.TypeFlags) v0.AuxInt = log2(c) v0.AddArg(x) - b.AddControl(v0) + b.resetWithControl(BlockAMD64UGE, v0) return true } break @@ -40973,11 +38516,10 @@ func rewriteBlockAMD64(b *Block) bool { if !(z1 == z2) { continue } - b.Reset(BlockAMD64UGE) v0 := b.NewValue0(v_0.Pos, OpAMD64BTQconst, types.TypeFlags) v0.AuxInt = 63 v0.AddArg(x) - b.AddControl(v0) + b.resetWithControl(BlockAMD64UGE, v0) return true } break @@ -41004,11 +38546,10 @@ func rewriteBlockAMD64(b *Block) bool { if !(z1 == z2) { continue } - b.Reset(BlockAMD64UGE) v0 := b.NewValue0(v_0.Pos, OpAMD64BTQconst, types.TypeFlags) v0.AuxInt = 31 v0.AddArg(x) - b.AddControl(v0) + b.resetWithControl(BlockAMD64UGE, v0) return true } break @@ -41035,11 +38576,10 @@ func rewriteBlockAMD64(b *Block) bool { if !(z1 == z2) { continue } - b.Reset(BlockAMD64UGE) v0 := b.NewValue0(v_0.Pos, OpAMD64BTQconst, types.TypeFlags) v0.AuxInt = 0 v0.AddArg(x) - b.AddControl(v0) + b.resetWithControl(BlockAMD64UGE, v0) return true } break @@ -41066,11 +38606,10 @@ func rewriteBlockAMD64(b *Block) bool { if !(z1 == z2) { continue } - b.Reset(BlockAMD64UGE) v0 := b.NewValue0(v_0.Pos, OpAMD64BTLconst, types.TypeFlags) v0.AuxInt = 0 v0.AddArg(x) - b.AddControl(v0) + b.resetWithControl(BlockAMD64UGE, v0) return true } break @@ -41093,11 +38632,10 @@ func rewriteBlockAMD64(b *Block) bool { if !(z1 == z2) { continue } - b.Reset(BlockAMD64UGE) v0 := b.NewValue0(v_0.Pos, OpAMD64BTQconst, types.TypeFlags) v0.AuxInt = 63 v0.AddArg(x) - b.AddControl(v0) + b.resetWithControl(BlockAMD64UGE, v0) return true } break @@ -41120,11 +38658,10 @@ func rewriteBlockAMD64(b *Block) bool { if !(z1 == z2) { continue } - b.Reset(BlockAMD64UGE) v0 := b.NewValue0(v_0.Pos, OpAMD64BTLconst, types.TypeFlags) v0.AuxInt = 31 v0.AddArg(x) - b.AddControl(v0) + b.resetWithControl(BlockAMD64UGE, v0) return true } break @@ -41134,8 +38671,7 @@ func rewriteBlockAMD64(b *Block) bool { for b.Controls[0].Op == OpAMD64InvertFlags { v_0 := b.Controls[0] cmp := v_0.Args[0] - b.Reset(BlockAMD64EQ) - b.AddControl(cmp) + b.resetWithControl(BlockAMD64EQ, cmp) return true } // match: (EQ (FlagEQ) yes no) @@ -41178,8 +38714,7 @@ func rewriteBlockAMD64(b *Block) bool { for b.Controls[0].Op == OpAMD64InvertFlags { v_0 := b.Controls[0] cmp := v_0.Args[0] - b.Reset(BlockAMD64LE) - b.AddControl(cmp) + b.resetWithControl(BlockAMD64LE, cmp) return true } // match: (GE (FlagEQ) yes no) @@ -41220,8 +38755,7 @@ func rewriteBlockAMD64(b *Block) bool { for b.Controls[0].Op == OpAMD64InvertFlags { v_0 := b.Controls[0] cmp := v_0.Args[0] - b.Reset(BlockAMD64LT) - b.AddControl(cmp) + b.resetWithControl(BlockAMD64LT, cmp) return true } // match: (GT (FlagEQ) yes no) @@ -41263,8 +38797,7 @@ func rewriteBlockAMD64(b *Block) bool { for b.Controls[0].Op == OpAMD64SETL { v_0 := b.Controls[0] cmp := v_0.Args[0] - b.Reset(BlockAMD64LT) - b.AddControl(cmp) + b.resetWithControl(BlockAMD64LT, cmp) return true } // match: (If (SETLE cmp) yes no) @@ -41272,8 +38805,7 @@ func rewriteBlockAMD64(b *Block) bool { for b.Controls[0].Op == OpAMD64SETLE { v_0 := b.Controls[0] cmp := v_0.Args[0] - b.Reset(BlockAMD64LE) - b.AddControl(cmp) + b.resetWithControl(BlockAMD64LE, cmp) return true } // match: (If (SETG cmp) yes no) @@ -41281,8 +38813,7 @@ func rewriteBlockAMD64(b *Block) bool { for b.Controls[0].Op == OpAMD64SETG { v_0 := b.Controls[0] cmp := v_0.Args[0] - b.Reset(BlockAMD64GT) - b.AddControl(cmp) + b.resetWithControl(BlockAMD64GT, cmp) return true } // match: (If (SETGE cmp) yes no) @@ -41290,8 +38821,7 @@ func rewriteBlockAMD64(b *Block) bool { for b.Controls[0].Op == OpAMD64SETGE { v_0 := b.Controls[0] cmp := v_0.Args[0] - b.Reset(BlockAMD64GE) - b.AddControl(cmp) + b.resetWithControl(BlockAMD64GE, cmp) return true } // match: (If (SETEQ cmp) yes no) @@ -41299,8 +38829,7 @@ func rewriteBlockAMD64(b *Block) bool { for b.Controls[0].Op == OpAMD64SETEQ { v_0 := b.Controls[0] cmp := v_0.Args[0] - b.Reset(BlockAMD64EQ) - b.AddControl(cmp) + b.resetWithControl(BlockAMD64EQ, cmp) return true } // match: (If (SETNE cmp) yes no) @@ -41308,8 +38837,7 @@ func rewriteBlockAMD64(b *Block) bool { for b.Controls[0].Op == OpAMD64SETNE { v_0 := b.Controls[0] cmp := v_0.Args[0] - b.Reset(BlockAMD64NE) - b.AddControl(cmp) + b.resetWithControl(BlockAMD64NE, cmp) return true } // match: (If (SETB cmp) yes no) @@ -41317,8 +38845,7 @@ func rewriteBlockAMD64(b *Block) bool { for b.Controls[0].Op == OpAMD64SETB { v_0 := b.Controls[0] cmp := v_0.Args[0] - b.Reset(BlockAMD64ULT) - b.AddControl(cmp) + b.resetWithControl(BlockAMD64ULT, cmp) return true } // match: (If (SETBE cmp) yes no) @@ -41326,8 +38853,7 @@ func rewriteBlockAMD64(b *Block) bool { for b.Controls[0].Op == OpAMD64SETBE { v_0 := b.Controls[0] cmp := v_0.Args[0] - b.Reset(BlockAMD64ULE) - b.AddControl(cmp) + b.resetWithControl(BlockAMD64ULE, cmp) return true } // match: (If (SETA cmp) yes no) @@ -41335,8 +38861,7 @@ func rewriteBlockAMD64(b *Block) bool { for b.Controls[0].Op == OpAMD64SETA { v_0 := b.Controls[0] cmp := v_0.Args[0] - b.Reset(BlockAMD64UGT) - b.AddControl(cmp) + b.resetWithControl(BlockAMD64UGT, cmp) return true } // match: (If (SETAE cmp) yes no) @@ -41344,8 +38869,7 @@ func rewriteBlockAMD64(b *Block) bool { for b.Controls[0].Op == OpAMD64SETAE { v_0 := b.Controls[0] cmp := v_0.Args[0] - b.Reset(BlockAMD64UGE) - b.AddControl(cmp) + b.resetWithControl(BlockAMD64UGE, cmp) return true } // match: (If (SETO cmp) yes no) @@ -41353,8 +38877,7 @@ func rewriteBlockAMD64(b *Block) bool { for b.Controls[0].Op == OpAMD64SETO { v_0 := b.Controls[0] cmp := v_0.Args[0] - b.Reset(BlockAMD64OS) - b.AddControl(cmp) + b.resetWithControl(BlockAMD64OS, cmp) return true } // match: (If (SETGF cmp) yes no) @@ -41362,8 +38885,7 @@ func rewriteBlockAMD64(b *Block) bool { for b.Controls[0].Op == OpAMD64SETGF { v_0 := b.Controls[0] cmp := v_0.Args[0] - b.Reset(BlockAMD64UGT) - b.AddControl(cmp) + b.resetWithControl(BlockAMD64UGT, cmp) return true } // match: (If (SETGEF cmp) yes no) @@ -41371,8 +38893,7 @@ func rewriteBlockAMD64(b *Block) bool { for b.Controls[0].Op == OpAMD64SETGEF { v_0 := b.Controls[0] cmp := v_0.Args[0] - b.Reset(BlockAMD64UGE) - b.AddControl(cmp) + b.resetWithControl(BlockAMD64UGE, cmp) return true } // match: (If (SETEQF cmp) yes no) @@ -41380,8 +38901,7 @@ func rewriteBlockAMD64(b *Block) bool { for b.Controls[0].Op == OpAMD64SETEQF { v_0 := b.Controls[0] cmp := v_0.Args[0] - b.Reset(BlockAMD64EQF) - b.AddControl(cmp) + b.resetWithControl(BlockAMD64EQF, cmp) return true } // match: (If (SETNEF cmp) yes no) @@ -41389,19 +38909,16 @@ func rewriteBlockAMD64(b *Block) bool { for b.Controls[0].Op == OpAMD64SETNEF { v_0 := b.Controls[0] cmp := v_0.Args[0] - b.Reset(BlockAMD64NEF) - b.AddControl(cmp) + b.resetWithControl(BlockAMD64NEF, cmp) return true } // match: (If cond yes no) // result: (NE (TESTB cond cond) yes no) for { cond := b.Controls[0] - b.Reset(BlockAMD64NE) v0 := b.NewValue0(cond.Pos, OpAMD64TESTB, types.TypeFlags) - v0.AddArg(cond) - v0.AddArg(cond) - b.AddControl(v0) + v0.AddArg2(cond, cond) + b.resetWithControl(BlockAMD64NE, v0) return true } case BlockAMD64LE: @@ -41410,8 +38927,7 @@ func rewriteBlockAMD64(b *Block) bool { for b.Controls[0].Op == OpAMD64InvertFlags { v_0 := b.Controls[0] cmp := v_0.Args[0] - b.Reset(BlockAMD64GE) - b.AddControl(cmp) + b.resetWithControl(BlockAMD64GE, cmp) return true } // match: (LE (FlagEQ) yes no) @@ -41452,8 +38968,7 @@ func rewriteBlockAMD64(b *Block) bool { for b.Controls[0].Op == OpAMD64InvertFlags { v_0 := b.Controls[0] cmp := v_0.Args[0] - b.Reset(BlockAMD64GT) - b.AddControl(cmp) + b.resetWithControl(BlockAMD64GT, cmp) return true } // match: (LT (FlagEQ) yes no) @@ -41504,8 +39019,7 @@ func rewriteBlockAMD64(b *Block) bool { if v_0_1.Op != OpAMD64SETL || cmp != v_0_1.Args[0] { break } - b.Reset(BlockAMD64LT) - b.AddControl(cmp) + b.resetWithControl(BlockAMD64LT, cmp) return true } // match: (NE (TESTB (SETLE cmp) (SETLE cmp)) yes no) @@ -41522,8 +39036,7 @@ func rewriteBlockAMD64(b *Block) bool { if v_0_1.Op != OpAMD64SETLE || cmp != v_0_1.Args[0] { break } - b.Reset(BlockAMD64LE) - b.AddControl(cmp) + b.resetWithControl(BlockAMD64LE, cmp) return true } // match: (NE (TESTB (SETG cmp) (SETG cmp)) yes no) @@ -41540,8 +39053,7 @@ func rewriteBlockAMD64(b *Block) bool { if v_0_1.Op != OpAMD64SETG || cmp != v_0_1.Args[0] { break } - b.Reset(BlockAMD64GT) - b.AddControl(cmp) + b.resetWithControl(BlockAMD64GT, cmp) return true } // match: (NE (TESTB (SETGE cmp) (SETGE cmp)) yes no) @@ -41558,8 +39070,7 @@ func rewriteBlockAMD64(b *Block) bool { if v_0_1.Op != OpAMD64SETGE || cmp != v_0_1.Args[0] { break } - b.Reset(BlockAMD64GE) - b.AddControl(cmp) + b.resetWithControl(BlockAMD64GE, cmp) return true } // match: (NE (TESTB (SETEQ cmp) (SETEQ cmp)) yes no) @@ -41576,8 +39087,7 @@ func rewriteBlockAMD64(b *Block) bool { if v_0_1.Op != OpAMD64SETEQ || cmp != v_0_1.Args[0] { break } - b.Reset(BlockAMD64EQ) - b.AddControl(cmp) + b.resetWithControl(BlockAMD64EQ, cmp) return true } // match: (NE (TESTB (SETNE cmp) (SETNE cmp)) yes no) @@ -41594,8 +39104,7 @@ func rewriteBlockAMD64(b *Block) bool { if v_0_1.Op != OpAMD64SETNE || cmp != v_0_1.Args[0] { break } - b.Reset(BlockAMD64NE) - b.AddControl(cmp) + b.resetWithControl(BlockAMD64NE, cmp) return true } // match: (NE (TESTB (SETB cmp) (SETB cmp)) yes no) @@ -41612,8 +39121,7 @@ func rewriteBlockAMD64(b *Block) bool { if v_0_1.Op != OpAMD64SETB || cmp != v_0_1.Args[0] { break } - b.Reset(BlockAMD64ULT) - b.AddControl(cmp) + b.resetWithControl(BlockAMD64ULT, cmp) return true } // match: (NE (TESTB (SETBE cmp) (SETBE cmp)) yes no) @@ -41630,8 +39138,7 @@ func rewriteBlockAMD64(b *Block) bool { if v_0_1.Op != OpAMD64SETBE || cmp != v_0_1.Args[0] { break } - b.Reset(BlockAMD64ULE) - b.AddControl(cmp) + b.resetWithControl(BlockAMD64ULE, cmp) return true } // match: (NE (TESTB (SETA cmp) (SETA cmp)) yes no) @@ -41648,8 +39155,7 @@ func rewriteBlockAMD64(b *Block) bool { if v_0_1.Op != OpAMD64SETA || cmp != v_0_1.Args[0] { break } - b.Reset(BlockAMD64UGT) - b.AddControl(cmp) + b.resetWithControl(BlockAMD64UGT, cmp) return true } // match: (NE (TESTB (SETAE cmp) (SETAE cmp)) yes no) @@ -41666,8 +39172,7 @@ func rewriteBlockAMD64(b *Block) bool { if v_0_1.Op != OpAMD64SETAE || cmp != v_0_1.Args[0] { break } - b.Reset(BlockAMD64UGE) - b.AddControl(cmp) + b.resetWithControl(BlockAMD64UGE, cmp) return true } // match: (NE (TESTB (SETO cmp) (SETO cmp)) yes no) @@ -41684,8 +39189,7 @@ func rewriteBlockAMD64(b *Block) bool { if v_0_1.Op != OpAMD64SETO || cmp != v_0_1.Args[0] { break } - b.Reset(BlockAMD64OS) - b.AddControl(cmp) + b.resetWithControl(BlockAMD64OS, cmp) return true } // match: (NE (TESTL (SHLL (MOVLconst [1]) x) y)) @@ -41705,11 +39209,9 @@ func rewriteBlockAMD64(b *Block) bool { continue } y := v_0_1 - b.Reset(BlockAMD64ULT) v0 := b.NewValue0(v_0.Pos, OpAMD64BTL, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - b.AddControl(v0) + v0.AddArg2(x, y) + b.resetWithControl(BlockAMD64ULT, v0) return true } break @@ -41731,11 +39233,9 @@ func rewriteBlockAMD64(b *Block) bool { continue } y := v_0_1 - b.Reset(BlockAMD64ULT) v0 := b.NewValue0(v_0.Pos, OpAMD64BTQ, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - b.AddControl(v0) + v0.AddArg2(x, y) + b.resetWithControl(BlockAMD64ULT, v0) return true } break @@ -41750,11 +39250,10 @@ func rewriteBlockAMD64(b *Block) bool { if !(isUint32PowerOfTwo(c)) { break } - b.Reset(BlockAMD64ULT) v0 := b.NewValue0(v_0.Pos, OpAMD64BTLconst, types.TypeFlags) v0.AuxInt = log2uint32(c) v0.AddArg(x) - b.AddControl(v0) + b.resetWithControl(BlockAMD64ULT, v0) return true } // match: (NE (TESTQconst [c] x)) @@ -41767,11 +39266,10 @@ func rewriteBlockAMD64(b *Block) bool { if !(isUint64PowerOfTwo(c)) { break } - b.Reset(BlockAMD64ULT) v0 := b.NewValue0(v_0.Pos, OpAMD64BTQconst, types.TypeFlags) v0.AuxInt = log2(c) v0.AddArg(x) - b.AddControl(v0) + b.resetWithControl(BlockAMD64ULT, v0) return true } // match: (NE (TESTQ (MOVQconst [c]) x)) @@ -41791,11 +39289,10 @@ func rewriteBlockAMD64(b *Block) bool { if !(isUint64PowerOfTwo(c)) { continue } - b.Reset(BlockAMD64ULT) v0 := b.NewValue0(v_0.Pos, OpAMD64BTQconst, types.TypeFlags) v0.AuxInt = log2(c) v0.AddArg(x) - b.AddControl(v0) + b.resetWithControl(BlockAMD64ULT, v0) return true } break @@ -41822,11 +39319,10 @@ func rewriteBlockAMD64(b *Block) bool { if !(z1 == z2) { continue } - b.Reset(BlockAMD64ULT) v0 := b.NewValue0(v_0.Pos, OpAMD64BTQconst, types.TypeFlags) v0.AuxInt = 63 v0.AddArg(x) - b.AddControl(v0) + b.resetWithControl(BlockAMD64ULT, v0) return true } break @@ -41853,11 +39349,10 @@ func rewriteBlockAMD64(b *Block) bool { if !(z1 == z2) { continue } - b.Reset(BlockAMD64ULT) v0 := b.NewValue0(v_0.Pos, OpAMD64BTQconst, types.TypeFlags) v0.AuxInt = 31 v0.AddArg(x) - b.AddControl(v0) + b.resetWithControl(BlockAMD64ULT, v0) return true } break @@ -41884,11 +39379,10 @@ func rewriteBlockAMD64(b *Block) bool { if !(z1 == z2) { continue } - b.Reset(BlockAMD64ULT) v0 := b.NewValue0(v_0.Pos, OpAMD64BTQconst, types.TypeFlags) v0.AuxInt = 0 v0.AddArg(x) - b.AddControl(v0) + b.resetWithControl(BlockAMD64ULT, v0) return true } break @@ -41915,11 +39409,10 @@ func rewriteBlockAMD64(b *Block) bool { if !(z1 == z2) { continue } - b.Reset(BlockAMD64ULT) v0 := b.NewValue0(v_0.Pos, OpAMD64BTLconst, types.TypeFlags) v0.AuxInt = 0 v0.AddArg(x) - b.AddControl(v0) + b.resetWithControl(BlockAMD64ULT, v0) return true } break @@ -41942,11 +39435,10 @@ func rewriteBlockAMD64(b *Block) bool { if !(z1 == z2) { continue } - b.Reset(BlockAMD64ULT) v0 := b.NewValue0(v_0.Pos, OpAMD64BTQconst, types.TypeFlags) v0.AuxInt = 63 v0.AddArg(x) - b.AddControl(v0) + b.resetWithControl(BlockAMD64ULT, v0) return true } break @@ -41969,11 +39461,10 @@ func rewriteBlockAMD64(b *Block) bool { if !(z1 == z2) { continue } - b.Reset(BlockAMD64ULT) v0 := b.NewValue0(v_0.Pos, OpAMD64BTLconst, types.TypeFlags) v0.AuxInt = 31 v0.AddArg(x) - b.AddControl(v0) + b.resetWithControl(BlockAMD64ULT, v0) return true } break @@ -41992,8 +39483,7 @@ func rewriteBlockAMD64(b *Block) bool { if v_0_1.Op != OpAMD64SETGF || cmp != v_0_1.Args[0] { break } - b.Reset(BlockAMD64UGT) - b.AddControl(cmp) + b.resetWithControl(BlockAMD64UGT, cmp) return true } // match: (NE (TESTB (SETGEF cmp) (SETGEF cmp)) yes no) @@ -42010,8 +39500,7 @@ func rewriteBlockAMD64(b *Block) bool { if v_0_1.Op != OpAMD64SETGEF || cmp != v_0_1.Args[0] { break } - b.Reset(BlockAMD64UGE) - b.AddControl(cmp) + b.resetWithControl(BlockAMD64UGE, cmp) return true } // match: (NE (TESTB (SETEQF cmp) (SETEQF cmp)) yes no) @@ -42028,8 +39517,7 @@ func rewriteBlockAMD64(b *Block) bool { if v_0_1.Op != OpAMD64SETEQF || cmp != v_0_1.Args[0] { break } - b.Reset(BlockAMD64EQF) - b.AddControl(cmp) + b.resetWithControl(BlockAMD64EQF, cmp) return true } // match: (NE (TESTB (SETNEF cmp) (SETNEF cmp)) yes no) @@ -42046,8 +39534,7 @@ func rewriteBlockAMD64(b *Block) bool { if v_0_1.Op != OpAMD64SETNEF || cmp != v_0_1.Args[0] { break } - b.Reset(BlockAMD64NEF) - b.AddControl(cmp) + b.resetWithControl(BlockAMD64NEF, cmp) return true } // match: (NE (InvertFlags cmp) yes no) @@ -42055,8 +39542,7 @@ func rewriteBlockAMD64(b *Block) bool { for b.Controls[0].Op == OpAMD64InvertFlags { v_0 := b.Controls[0] cmp := v_0.Args[0] - b.Reset(BlockAMD64NE) - b.AddControl(cmp) + b.resetWithControl(BlockAMD64NE, cmp) return true } // match: (NE (FlagEQ) yes no) @@ -42140,8 +39626,7 @@ func rewriteBlockAMD64(b *Block) bool { for b.Controls[0].Op == OpAMD64InvertFlags { v_0 := b.Controls[0] cmp := v_0.Args[0] - b.Reset(BlockAMD64ULE) - b.AddControl(cmp) + b.resetWithControl(BlockAMD64ULE, cmp) return true } // match: (UGE (FlagEQ) yes no) @@ -42182,8 +39667,7 @@ func rewriteBlockAMD64(b *Block) bool { for b.Controls[0].Op == OpAMD64InvertFlags { v_0 := b.Controls[0] cmp := v_0.Args[0] - b.Reset(BlockAMD64ULT) - b.AddControl(cmp) + b.resetWithControl(BlockAMD64ULT, cmp) return true } // match: (UGT (FlagEQ) yes no) @@ -42225,8 +39709,7 @@ func rewriteBlockAMD64(b *Block) bool { for b.Controls[0].Op == OpAMD64InvertFlags { v_0 := b.Controls[0] cmp := v_0.Args[0] - b.Reset(BlockAMD64UGE) - b.AddControl(cmp) + b.resetWithControl(BlockAMD64UGE, cmp) return true } // match: (ULE (FlagEQ) yes no) @@ -42315,8 +39798,7 @@ func rewriteBlockAMD64(b *Block) bool { for b.Controls[0].Op == OpAMD64InvertFlags { v_0 := b.Controls[0] cmp := v_0.Args[0] - b.Reset(BlockAMD64UGT) - b.AddControl(cmp) + b.resetWithControl(BlockAMD64UGT, cmp) return true } // match: (ULT (FlagEQ) yes no) diff --git a/src/cmd/compile/internal/ssa/rewriteAMD64splitload.go b/src/cmd/compile/internal/ssa/rewriteAMD64splitload.go index 69df3f7a1e..40a7013744 100644 --- a/src/cmd/compile/internal/ssa/rewriteAMD64splitload.go +++ b/src/cmd/compile/internal/ssa/rewriteAMD64splitload.go @@ -41,8 +41,7 @@ func rewriteValueAMD64splitload_OpAMD64CMPBconstload(v *Value) bool { v0 := b.NewValue0(v.Pos, OpAMD64MOVBload, typ.UInt8) v0.AuxInt = offOnly(vo) v0.Aux = sym - v0.AddArg(ptr) - v0.AddArg(mem) + v0.AddArg2(ptr, mem) v.AddArg(v0) return true } @@ -65,10 +64,8 @@ func rewriteValueAMD64splitload_OpAMD64CMPBload(v *Value) bool { v0 := b.NewValue0(v.Pos, OpAMD64MOVBload, typ.UInt8) v0.AuxInt = off v0.Aux = sym - v0.AddArg(ptr) - v0.AddArg(mem) - v.AddArg(v0) - v.AddArg(x) + v0.AddArg2(ptr, mem) + v.AddArg2(v0, x) return true } } @@ -89,8 +86,7 @@ func rewriteValueAMD64splitload_OpAMD64CMPLconstload(v *Value) bool { v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) v0.AuxInt = offOnly(vo) v0.Aux = sym - v0.AddArg(ptr) - v0.AddArg(mem) + v0.AddArg2(ptr, mem) v.AddArg(v0) return true } @@ -113,10 +109,8 @@ func rewriteValueAMD64splitload_OpAMD64CMPLload(v *Value) bool { v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) v0.AuxInt = off v0.Aux = sym - v0.AddArg(ptr) - v0.AddArg(mem) - v.AddArg(v0) - v.AddArg(x) + v0.AddArg2(ptr, mem) + v.AddArg2(v0, x) return true } } @@ -137,8 +131,7 @@ func rewriteValueAMD64splitload_OpAMD64CMPQconstload(v *Value) bool { v0 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) v0.AuxInt = offOnly(vo) v0.Aux = sym - v0.AddArg(ptr) - v0.AddArg(mem) + v0.AddArg2(ptr, mem) v.AddArg(v0) return true } @@ -161,10 +154,8 @@ func rewriteValueAMD64splitload_OpAMD64CMPQload(v *Value) bool { v0 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) v0.AuxInt = off v0.Aux = sym - v0.AddArg(ptr) - v0.AddArg(mem) - v.AddArg(v0) - v.AddArg(x) + v0.AddArg2(ptr, mem) + v.AddArg2(v0, x) return true } } @@ -185,8 +176,7 @@ func rewriteValueAMD64splitload_OpAMD64CMPWconstload(v *Value) bool { v0 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) v0.AuxInt = offOnly(vo) v0.Aux = sym - v0.AddArg(ptr) - v0.AddArg(mem) + v0.AddArg2(ptr, mem) v.AddArg(v0) return true } @@ -209,10 +199,8 @@ func rewriteValueAMD64splitload_OpAMD64CMPWload(v *Value) bool { v0 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) v0.AuxInt = off v0.Aux = sym - v0.AddArg(ptr) - v0.AddArg(mem) - v.AddArg(v0) - v.AddArg(x) + v0.AddArg2(ptr, mem) + v.AddArg2(v0, x) return true } } diff --git a/src/cmd/compile/internal/ssa/rewriteARM.go b/src/cmd/compile/internal/ssa/rewriteARM.go index baa3c66e0f..91ef5fe14f 100644 --- a/src/cmd/compile/internal/ssa/rewriteARM.go +++ b/src/cmd/compile/internal/ssa/rewriteARM.go @@ -545,6 +545,9 @@ func rewriteValueARM(v *Value) bool { case OpCvt64Fto32U: v.Op = OpARMMOVDWU return true + case OpCvtBoolToUint8: + v.Op = OpCopy + return true case OpDiv16: return rewriteValueARM_OpDiv16(v) case OpDiv16u: @@ -915,8 +918,7 @@ func rewriteValueARM_OpARMADC(v *Value) bool { flags := v_2 v.reset(OpARMADCconst) v.AuxInt = c - v.AddArg(x) - v.AddArg(flags) + v.AddArg2(x, flags) return true } break @@ -934,9 +936,7 @@ func rewriteValueARM_OpARMADC(v *Value) bool { flags := v_2 v.reset(OpARMADCshiftLL) v.AuxInt = c - v.AddArg(x) - v.AddArg(y) - v.AddArg(flags) + v.AddArg3(x, y, flags) return true } break @@ -954,9 +954,7 @@ func rewriteValueARM_OpARMADC(v *Value) bool { flags := v_2 v.reset(OpARMADCshiftRL) v.AuxInt = c - v.AddArg(x) - v.AddArg(y) - v.AddArg(flags) + v.AddArg3(x, y, flags) return true } break @@ -974,9 +972,7 @@ func rewriteValueARM_OpARMADC(v *Value) bool { flags := v_2 v.reset(OpARMADCshiftRA) v.AuxInt = c - v.AddArg(x) - v.AddArg(y) - v.AddArg(flags) + v.AddArg3(x, y, flags) return true } break @@ -993,10 +989,7 @@ func rewriteValueARM_OpARMADC(v *Value) bool { y := v_1.Args[0] flags := v_2 v.reset(OpARMADCshiftLLreg) - v.AddArg(x) - v.AddArg(y) - v.AddArg(z) - v.AddArg(flags) + v.AddArg4(x, y, z, flags) return true } break @@ -1013,10 +1006,7 @@ func rewriteValueARM_OpARMADC(v *Value) bool { y := v_1.Args[0] flags := v_2 v.reset(OpARMADCshiftRLreg) - v.AddArg(x) - v.AddArg(y) - v.AddArg(z) - v.AddArg(flags) + v.AddArg4(x, y, z, flags) return true } break @@ -1033,10 +1023,7 @@ func rewriteValueARM_OpARMADC(v *Value) bool { y := v_1.Args[0] flags := v_2 v.reset(OpARMADCshiftRAreg) - v.AddArg(x) - v.AddArg(y) - v.AddArg(z) - v.AddArg(flags) + v.AddArg4(x, y, z, flags) return true } break @@ -1058,8 +1045,7 @@ func rewriteValueARM_OpARMADCconst(v *Value) bool { flags := v_1 v.reset(OpARMADCconst) v.AuxInt = int64(int32(c + d)) - v.AddArg(x) - v.AddArg(flags) + v.AddArg2(x, flags) return true } // match: (ADCconst [c] (SUBconst [d] x) flags) @@ -1074,8 +1060,7 @@ func rewriteValueARM_OpARMADCconst(v *Value) bool { flags := v_1 v.reset(OpARMADCconst) v.AuxInt = int64(int32(c - d)) - v.AddArg(x) - v.AddArg(flags) + v.AddArg2(x, flags) return true } return false @@ -1100,8 +1085,7 @@ func rewriteValueARM_OpARMADCshiftLL(v *Value) bool { v0 := b.NewValue0(v.Pos, OpARMSLLconst, x.Type) v0.AuxInt = d v0.AddArg(x) - v.AddArg(v0) - v.AddArg(flags) + v.AddArg2(v0, flags) return true } // match: (ADCshiftLL x (MOVWconst [c]) [d] flags) @@ -1116,8 +1100,7 @@ func rewriteValueARM_OpARMADCshiftLL(v *Value) bool { flags := v_2 v.reset(OpARMADCconst) v.AuxInt = int64(int32(uint32(c) << uint64(d))) - v.AddArg(x) - v.AddArg(flags) + v.AddArg2(x, flags) return true } return false @@ -1141,10 +1124,8 @@ func rewriteValueARM_OpARMADCshiftLLreg(v *Value) bool { v.reset(OpARMADCconst) v.AuxInt = c v0 := b.NewValue0(v.Pos, OpARMSLL, x.Type) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) - v.AddArg(flags) + v0.AddArg2(x, y) + v.AddArg2(v0, flags) return true } // match: (ADCshiftLLreg x y (MOVWconst [c]) flags) @@ -1159,9 +1140,7 @@ func rewriteValueARM_OpARMADCshiftLLreg(v *Value) bool { flags := v_3 v.reset(OpARMADCshiftLL) v.AuxInt = c - v.AddArg(x) - v.AddArg(y) - v.AddArg(flags) + v.AddArg3(x, y, flags) return true } return false @@ -1186,8 +1165,7 @@ func rewriteValueARM_OpARMADCshiftRA(v *Value) bool { v0 := b.NewValue0(v.Pos, OpARMSRAconst, x.Type) v0.AuxInt = d v0.AddArg(x) - v.AddArg(v0) - v.AddArg(flags) + v.AddArg2(v0, flags) return true } // match: (ADCshiftRA x (MOVWconst [c]) [d] flags) @@ -1202,8 +1180,7 @@ func rewriteValueARM_OpARMADCshiftRA(v *Value) bool { flags := v_2 v.reset(OpARMADCconst) v.AuxInt = int64(int32(c) >> uint64(d)) - v.AddArg(x) - v.AddArg(flags) + v.AddArg2(x, flags) return true } return false @@ -1227,10 +1204,8 @@ func rewriteValueARM_OpARMADCshiftRAreg(v *Value) bool { v.reset(OpARMADCconst) v.AuxInt = c v0 := b.NewValue0(v.Pos, OpARMSRA, x.Type) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) - v.AddArg(flags) + v0.AddArg2(x, y) + v.AddArg2(v0, flags) return true } // match: (ADCshiftRAreg x y (MOVWconst [c]) flags) @@ -1245,9 +1220,7 @@ func rewriteValueARM_OpARMADCshiftRAreg(v *Value) bool { flags := v_3 v.reset(OpARMADCshiftRA) v.AuxInt = c - v.AddArg(x) - v.AddArg(y) - v.AddArg(flags) + v.AddArg3(x, y, flags) return true } return false @@ -1272,8 +1245,7 @@ func rewriteValueARM_OpARMADCshiftRL(v *Value) bool { v0 := b.NewValue0(v.Pos, OpARMSRLconst, x.Type) v0.AuxInt = d v0.AddArg(x) - v.AddArg(v0) - v.AddArg(flags) + v.AddArg2(v0, flags) return true } // match: (ADCshiftRL x (MOVWconst [c]) [d] flags) @@ -1288,8 +1260,7 @@ func rewriteValueARM_OpARMADCshiftRL(v *Value) bool { flags := v_2 v.reset(OpARMADCconst) v.AuxInt = int64(int32(uint32(c) >> uint64(d))) - v.AddArg(x) - v.AddArg(flags) + v.AddArg2(x, flags) return true } return false @@ -1313,10 +1284,8 @@ func rewriteValueARM_OpARMADCshiftRLreg(v *Value) bool { v.reset(OpARMADCconst) v.AuxInt = c v0 := b.NewValue0(v.Pos, OpARMSRL, x.Type) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) - v.AddArg(flags) + v0.AddArg2(x, y) + v.AddArg2(v0, flags) return true } // match: (ADCshiftRLreg x y (MOVWconst [c]) flags) @@ -1331,9 +1300,7 @@ func rewriteValueARM_OpARMADCshiftRLreg(v *Value) bool { flags := v_3 v.reset(OpARMADCshiftRL) v.AuxInt = c - v.AddArg(x) - v.AddArg(y) - v.AddArg(flags) + v.AddArg3(x, y, flags) return true } return false @@ -1370,8 +1337,7 @@ func rewriteValueARM_OpARMADD(v *Value) bool { y := v_1.Args[0] v.reset(OpARMADDshiftLL) v.AuxInt = c - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } break @@ -1388,8 +1354,7 @@ func rewriteValueARM_OpARMADD(v *Value) bool { y := v_1.Args[0] v.reset(OpARMADDshiftRL) v.AuxInt = c - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } break @@ -1406,8 +1371,7 @@ func rewriteValueARM_OpARMADD(v *Value) bool { y := v_1.Args[0] v.reset(OpARMADDshiftRA) v.AuxInt = c - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } break @@ -1423,9 +1387,7 @@ func rewriteValueARM_OpARMADD(v *Value) bool { z := v_1.Args[1] y := v_1.Args[0] v.reset(OpARMADDshiftLLreg) - v.AddArg(x) - v.AddArg(y) - v.AddArg(z) + v.AddArg3(x, y, z) return true } break @@ -1441,9 +1403,7 @@ func rewriteValueARM_OpARMADD(v *Value) bool { z := v_1.Args[1] y := v_1.Args[0] v.reset(OpARMADDshiftRLreg) - v.AddArg(x) - v.AddArg(y) - v.AddArg(z) + v.AddArg3(x, y, z) return true } break @@ -1459,9 +1419,7 @@ func rewriteValueARM_OpARMADD(v *Value) bool { z := v_1.Args[1] y := v_1.Args[0] v.reset(OpARMADDshiftRAreg) - v.AddArg(x) - v.AddArg(y) - v.AddArg(z) + v.AddArg3(x, y, z) return true } break @@ -1476,8 +1434,7 @@ func rewriteValueARM_OpARMADD(v *Value) bool { } y := v_1.Args[0] v.reset(OpARMSUB) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } break @@ -1500,8 +1457,7 @@ func rewriteValueARM_OpARMADD(v *Value) bool { v.reset(OpARMRSBconst) v.AuxInt = c + d v0 := b.NewValue0(v.Pos, OpARMADD, t) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -1518,9 +1474,7 @@ func rewriteValueARM_OpARMADD(v *Value) bool { x := v_0.Args[0] a := v_1 v.reset(OpARMMULA) - v.AddArg(x) - v.AddArg(y) - v.AddArg(a) + v.AddArg3(x, y, a) return true } break @@ -1545,9 +1499,7 @@ func rewriteValueARM_OpARMADDD(v *Value) bool { continue } v.reset(OpARMMULAD) - v.AddArg(a) - v.AddArg(x) - v.AddArg(y) + v.AddArg3(a, x, y) return true } break @@ -1567,9 +1519,7 @@ func rewriteValueARM_OpARMADDD(v *Value) bool { continue } v.reset(OpARMMULSD) - v.AddArg(a) - v.AddArg(x) - v.AddArg(y) + v.AddArg3(a, x, y) return true } break @@ -1594,9 +1544,7 @@ func rewriteValueARM_OpARMADDF(v *Value) bool { continue } v.reset(OpARMMULAF) - v.AddArg(a) - v.AddArg(x) - v.AddArg(y) + v.AddArg3(a, x, y) return true } break @@ -1616,9 +1564,7 @@ func rewriteValueARM_OpARMADDF(v *Value) bool { continue } v.reset(OpARMMULSF) - v.AddArg(a) - v.AddArg(x) - v.AddArg(y) + v.AddArg3(a, x, y) return true } break @@ -1656,8 +1602,7 @@ func rewriteValueARM_OpARMADDS(v *Value) bool { y := v_1.Args[0] v.reset(OpARMADDSshiftLL) v.AuxInt = c - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } break @@ -1674,8 +1619,7 @@ func rewriteValueARM_OpARMADDS(v *Value) bool { y := v_1.Args[0] v.reset(OpARMADDSshiftRL) v.AuxInt = c - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } break @@ -1692,8 +1636,7 @@ func rewriteValueARM_OpARMADDS(v *Value) bool { y := v_1.Args[0] v.reset(OpARMADDSshiftRA) v.AuxInt = c - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } break @@ -1709,9 +1652,7 @@ func rewriteValueARM_OpARMADDS(v *Value) bool { z := v_1.Args[1] y := v_1.Args[0] v.reset(OpARMADDSshiftLLreg) - v.AddArg(x) - v.AddArg(y) - v.AddArg(z) + v.AddArg3(x, y, z) return true } break @@ -1727,9 +1668,7 @@ func rewriteValueARM_OpARMADDS(v *Value) bool { z := v_1.Args[1] y := v_1.Args[0] v.reset(OpARMADDSshiftRLreg) - v.AddArg(x) - v.AddArg(y) - v.AddArg(z) + v.AddArg3(x, y, z) return true } break @@ -1745,9 +1684,7 @@ func rewriteValueARM_OpARMADDS(v *Value) bool { z := v_1.Args[1] y := v_1.Args[0] v.reset(OpARMADDSshiftRAreg) - v.AddArg(x) - v.AddArg(y) - v.AddArg(z) + v.AddArg3(x, y, z) return true } break @@ -1808,8 +1745,7 @@ func rewriteValueARM_OpARMADDSshiftLLreg(v *Value) bool { v.reset(OpARMADDSconst) v.AuxInt = c v0 := b.NewValue0(v.Pos, OpARMSLL, x.Type) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -1824,8 +1760,7 @@ func rewriteValueARM_OpARMADDSshiftLLreg(v *Value) bool { c := v_2.AuxInt v.reset(OpARMADDSshiftLL) v.AuxInt = c - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } return false @@ -1884,8 +1819,7 @@ func rewriteValueARM_OpARMADDSshiftRAreg(v *Value) bool { v.reset(OpARMADDSconst) v.AuxInt = c v0 := b.NewValue0(v.Pos, OpARMSRA, x.Type) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -1900,8 +1834,7 @@ func rewriteValueARM_OpARMADDSshiftRAreg(v *Value) bool { c := v_2.AuxInt v.reset(OpARMADDSshiftRA) v.AuxInt = c - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } return false @@ -1960,8 +1893,7 @@ func rewriteValueARM_OpARMADDSshiftRLreg(v *Value) bool { v.reset(OpARMADDSconst) v.AuxInt = c v0 := b.NewValue0(v.Pos, OpARMSRL, x.Type) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -1976,8 +1908,7 @@ func rewriteValueARM_OpARMADDSshiftRLreg(v *Value) bool { c := v_2.AuxInt v.reset(OpARMADDSshiftRL) v.AuxInt = c - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } return false @@ -2007,9 +1938,7 @@ func rewriteValueARM_OpARMADDconst(v *Value) bool { break } x := v_0 - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (ADDconst [c] x) @@ -2200,8 +2129,7 @@ func rewriteValueARM_OpARMADDshiftLLreg(v *Value) bool { v.reset(OpARMADDconst) v.AuxInt = c v0 := b.NewValue0(v.Pos, OpARMSLL, x.Type) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -2216,8 +2144,7 @@ func rewriteValueARM_OpARMADDshiftLLreg(v *Value) bool { c := v_2.AuxInt v.reset(OpARMADDshiftLL) v.AuxInt = c - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } return false @@ -2276,8 +2203,7 @@ func rewriteValueARM_OpARMADDshiftRAreg(v *Value) bool { v.reset(OpARMADDconst) v.AuxInt = c v0 := b.NewValue0(v.Pos, OpARMSRA, x.Type) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -2292,8 +2218,7 @@ func rewriteValueARM_OpARMADDshiftRAreg(v *Value) bool { c := v_2.AuxInt v.reset(OpARMADDshiftRA) v.AuxInt = c - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } return false @@ -2368,8 +2293,7 @@ func rewriteValueARM_OpARMADDshiftRLreg(v *Value) bool { v.reset(OpARMADDconst) v.AuxInt = c v0 := b.NewValue0(v.Pos, OpARMSRL, x.Type) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -2384,8 +2308,7 @@ func rewriteValueARM_OpARMADDshiftRLreg(v *Value) bool { c := v_2.AuxInt v.reset(OpARMADDshiftRL) v.AuxInt = c - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } return false @@ -2421,8 +2344,7 @@ func rewriteValueARM_OpARMAND(v *Value) bool { y := v_1.Args[0] v.reset(OpARMANDshiftLL) v.AuxInt = c - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } break @@ -2439,8 +2361,7 @@ func rewriteValueARM_OpARMAND(v *Value) bool { y := v_1.Args[0] v.reset(OpARMANDshiftRL) v.AuxInt = c - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } break @@ -2457,8 +2378,7 @@ func rewriteValueARM_OpARMAND(v *Value) bool { y := v_1.Args[0] v.reset(OpARMANDshiftRA) v.AuxInt = c - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } break @@ -2474,9 +2394,7 @@ func rewriteValueARM_OpARMAND(v *Value) bool { z := v_1.Args[1] y := v_1.Args[0] v.reset(OpARMANDshiftLLreg) - v.AddArg(x) - v.AddArg(y) - v.AddArg(z) + v.AddArg3(x, y, z) return true } break @@ -2492,9 +2410,7 @@ func rewriteValueARM_OpARMAND(v *Value) bool { z := v_1.Args[1] y := v_1.Args[0] v.reset(OpARMANDshiftRLreg) - v.AddArg(x) - v.AddArg(y) - v.AddArg(z) + v.AddArg3(x, y, z) return true } break @@ -2510,9 +2426,7 @@ func rewriteValueARM_OpARMAND(v *Value) bool { z := v_1.Args[1] y := v_1.Args[0] v.reset(OpARMANDshiftRAreg) - v.AddArg(x) - v.AddArg(y) - v.AddArg(z) + v.AddArg3(x, y, z) return true } break @@ -2524,9 +2438,7 @@ func rewriteValueARM_OpARMAND(v *Value) bool { if x != v_1 { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (AND x (MVN y)) @@ -2539,8 +2451,7 @@ func rewriteValueARM_OpARMAND(v *Value) bool { } y := v_1.Args[0] v.reset(OpARMBIC) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } break @@ -2557,8 +2468,7 @@ func rewriteValueARM_OpARMAND(v *Value) bool { y := v_1.Args[0] v.reset(OpARMBICshiftLL) v.AuxInt = c - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } break @@ -2575,8 +2485,7 @@ func rewriteValueARM_OpARMAND(v *Value) bool { y := v_1.Args[0] v.reset(OpARMBICshiftRL) v.AuxInt = c - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } break @@ -2593,8 +2502,7 @@ func rewriteValueARM_OpARMAND(v *Value) bool { y := v_1.Args[0] v.reset(OpARMBICshiftRA) v.AuxInt = c - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } break @@ -2622,9 +2530,7 @@ func rewriteValueARM_OpARMANDconst(v *Value) bool { if !(int32(c) == -1) { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (ANDconst [c] x) @@ -2732,9 +2638,7 @@ func rewriteValueARM_OpARMANDshiftLL(v *Value) bool { if x != y.Args[0] || !(c == d) { break } - v.reset(OpCopy) - v.Type = y.Type - v.AddArg(y) + v.copyOf(y) return true } return false @@ -2756,8 +2660,7 @@ func rewriteValueARM_OpARMANDshiftLLreg(v *Value) bool { v.reset(OpARMANDconst) v.AuxInt = c v0 := b.NewValue0(v.Pos, OpARMSLL, x.Type) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -2772,8 +2675,7 @@ func rewriteValueARM_OpARMANDshiftLLreg(v *Value) bool { c := v_2.AuxInt v.reset(OpARMANDshiftLL) v.AuxInt = c - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } return false @@ -2827,9 +2729,7 @@ func rewriteValueARM_OpARMANDshiftRA(v *Value) bool { if x != y.Args[0] || !(c == d) { break } - v.reset(OpCopy) - v.Type = y.Type - v.AddArg(y) + v.copyOf(y) return true } return false @@ -2851,8 +2751,7 @@ func rewriteValueARM_OpARMANDshiftRAreg(v *Value) bool { v.reset(OpARMANDconst) v.AuxInt = c v0 := b.NewValue0(v.Pos, OpARMSRA, x.Type) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -2867,8 +2766,7 @@ func rewriteValueARM_OpARMANDshiftRAreg(v *Value) bool { c := v_2.AuxInt v.reset(OpARMANDshiftRA) v.AuxInt = c - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } return false @@ -2922,9 +2820,7 @@ func rewriteValueARM_OpARMANDshiftRL(v *Value) bool { if x != y.Args[0] || !(c == d) { break } - v.reset(OpCopy) - v.Type = y.Type - v.AddArg(y) + v.copyOf(y) return true } return false @@ -2946,8 +2842,7 @@ func rewriteValueARM_OpARMANDshiftRLreg(v *Value) bool { v.reset(OpARMANDconst) v.AuxInt = c v0 := b.NewValue0(v.Pos, OpARMSRL, x.Type) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -2962,8 +2857,7 @@ func rewriteValueARM_OpARMANDshiftRLreg(v *Value) bool { c := v_2.AuxInt v.reset(OpARMANDshiftRL) v.AuxInt = c - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } return false @@ -3027,8 +2921,7 @@ func rewriteValueARM_OpARMBIC(v *Value) bool { y := v_1.Args[0] v.reset(OpARMBICshiftLL) v.AuxInt = c - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (BIC x (SRLconst [c] y)) @@ -3042,8 +2935,7 @@ func rewriteValueARM_OpARMBIC(v *Value) bool { y := v_1.Args[0] v.reset(OpARMBICshiftRL) v.AuxInt = c - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (BIC x (SRAconst [c] y)) @@ -3057,8 +2949,7 @@ func rewriteValueARM_OpARMBIC(v *Value) bool { y := v_1.Args[0] v.reset(OpARMBICshiftRA) v.AuxInt = c - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (BIC x (SLL y z)) @@ -3071,9 +2962,7 @@ func rewriteValueARM_OpARMBIC(v *Value) bool { z := v_1.Args[1] y := v_1.Args[0] v.reset(OpARMBICshiftLLreg) - v.AddArg(x) - v.AddArg(y) - v.AddArg(z) + v.AddArg3(x, y, z) return true } // match: (BIC x (SRL y z)) @@ -3086,9 +2975,7 @@ func rewriteValueARM_OpARMBIC(v *Value) bool { z := v_1.Args[1] y := v_1.Args[0] v.reset(OpARMBICshiftRLreg) - v.AddArg(x) - v.AddArg(y) - v.AddArg(z) + v.AddArg3(x, y, z) return true } // match: (BIC x (SRA y z)) @@ -3101,9 +2988,7 @@ func rewriteValueARM_OpARMBIC(v *Value) bool { z := v_1.Args[1] y := v_1.Args[0] v.reset(OpARMBICshiftRAreg) - v.AddArg(x) - v.AddArg(y) - v.AddArg(z) + v.AddArg3(x, y, z) return true } // match: (BIC x x) @@ -3128,9 +3013,7 @@ func rewriteValueARM_OpARMBICconst(v *Value) bool { break } x := v_0 - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (BICconst [c] _) @@ -3252,8 +3135,7 @@ func rewriteValueARM_OpARMBICshiftLLreg(v *Value) bool { c := v_2.AuxInt v.reset(OpARMBICshiftLL) v.AuxInt = c - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } return false @@ -3309,8 +3191,7 @@ func rewriteValueARM_OpARMBICshiftRAreg(v *Value) bool { c := v_2.AuxInt v.reset(OpARMBICshiftRA) v.AuxInt = c - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } return false @@ -3366,8 +3247,7 @@ func rewriteValueARM_OpARMBICshiftRLreg(v *Value) bool { c := v_2.AuxInt v.reset(OpARMBICshiftRL) v.AuxInt = c - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } return false @@ -3403,8 +3283,7 @@ func rewriteValueARM_OpARMCMN(v *Value) bool { y := v_1.Args[0] v.reset(OpARMCMNshiftLL) v.AuxInt = c - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } break @@ -3421,8 +3300,7 @@ func rewriteValueARM_OpARMCMN(v *Value) bool { y := v_1.Args[0] v.reset(OpARMCMNshiftRL) v.AuxInt = c - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } break @@ -3439,8 +3317,7 @@ func rewriteValueARM_OpARMCMN(v *Value) bool { y := v_1.Args[0] v.reset(OpARMCMNshiftRA) v.AuxInt = c - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } break @@ -3456,9 +3333,7 @@ func rewriteValueARM_OpARMCMN(v *Value) bool { z := v_1.Args[1] y := v_1.Args[0] v.reset(OpARMCMNshiftLLreg) - v.AddArg(x) - v.AddArg(y) - v.AddArg(z) + v.AddArg3(x, y, z) return true } break @@ -3474,9 +3349,7 @@ func rewriteValueARM_OpARMCMN(v *Value) bool { z := v_1.Args[1] y := v_1.Args[0] v.reset(OpARMCMNshiftRLreg) - v.AddArg(x) - v.AddArg(y) - v.AddArg(z) + v.AddArg3(x, y, z) return true } break @@ -3492,9 +3365,7 @@ func rewriteValueARM_OpARMCMN(v *Value) bool { z := v_1.Args[1] y := v_1.Args[0] v.reset(OpARMCMNshiftRAreg) - v.AddArg(x) - v.AddArg(y) - v.AddArg(z) + v.AddArg3(x, y, z) return true } break @@ -3509,8 +3380,7 @@ func rewriteValueARM_OpARMCMN(v *Value) bool { } y := v_1.Args[0] v.reset(OpARMCMP) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } break @@ -3650,8 +3520,7 @@ func rewriteValueARM_OpARMCMNshiftLLreg(v *Value) bool { v.reset(OpARMCMNconst) v.AuxInt = c v0 := b.NewValue0(v.Pos, OpARMSLL, x.Type) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -3666,8 +3535,7 @@ func rewriteValueARM_OpARMCMNshiftLLreg(v *Value) bool { c := v_2.AuxInt v.reset(OpARMCMNshiftLL) v.AuxInt = c - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } return false @@ -3726,8 +3594,7 @@ func rewriteValueARM_OpARMCMNshiftRAreg(v *Value) bool { v.reset(OpARMCMNconst) v.AuxInt = c v0 := b.NewValue0(v.Pos, OpARMSRA, x.Type) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -3742,8 +3609,7 @@ func rewriteValueARM_OpARMCMNshiftRAreg(v *Value) bool { c := v_2.AuxInt v.reset(OpARMCMNshiftRA) v.AuxInt = c - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } return false @@ -3802,8 +3668,7 @@ func rewriteValueARM_OpARMCMNshiftRLreg(v *Value) bool { v.reset(OpARMCMNconst) v.AuxInt = c v0 := b.NewValue0(v.Pos, OpARMSRL, x.Type) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -3818,8 +3683,7 @@ func rewriteValueARM_OpARMCMNshiftRLreg(v *Value) bool { c := v_2.AuxInt v.reset(OpARMCMNshiftRL) v.AuxInt = c - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } return false @@ -3845,9 +3709,7 @@ func rewriteValueARM_OpARMCMOVWHSconst(v *Value) bool { if v_1.Op != OpARMFlagLT_ULT { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (CMOVWHSconst _ (FlagLT_UGT) [c]) @@ -3868,9 +3730,7 @@ func rewriteValueARM_OpARMCMOVWHSconst(v *Value) bool { if v_1.Op != OpARMFlagGT_ULT { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (CMOVWHSconst _ (FlagGT_UGT) [c]) @@ -3895,8 +3755,7 @@ func rewriteValueARM_OpARMCMOVWHSconst(v *Value) bool { flags := v_1.Args[0] v.reset(OpARMCMOVWLSconst) v.AuxInt = c - v.AddArg(x) - v.AddArg(flags) + v.AddArg2(x, flags) return true } return false @@ -3933,9 +3792,7 @@ func rewriteValueARM_OpARMCMOVWLSconst(v *Value) bool { if v_1.Op != OpARMFlagLT_UGT { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (CMOVWLSconst _ (FlagGT_ULT) [c]) @@ -3956,9 +3813,7 @@ func rewriteValueARM_OpARMCMOVWLSconst(v *Value) bool { if v_1.Op != OpARMFlagGT_UGT { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (CMOVWLSconst x (InvertFlags flags) [c]) @@ -3972,8 +3827,7 @@ func rewriteValueARM_OpARMCMOVWLSconst(v *Value) bool { flags := v_1.Args[0] v.reset(OpARMCMOVWHSconst) v.AuxInt = c - v.AddArg(x) - v.AddArg(flags) + v.AddArg2(x, flags) return true } return false @@ -4021,8 +3875,7 @@ func rewriteValueARM_OpARMCMP(v *Value) bool { } v.reset(OpARMInvertFlags) v0 := b.NewValue0(v.Pos, OpARMCMP, types.TypeFlags) - v0.AddArg(y) - v0.AddArg(x) + v0.AddArg2(y, x) v.AddArg(v0) return true } @@ -4037,8 +3890,7 @@ func rewriteValueARM_OpARMCMP(v *Value) bool { y := v_1.Args[0] v.reset(OpARMCMPshiftLL) v.AuxInt = c - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (CMP (SLLconst [c] y) x) @@ -4053,8 +3905,7 @@ func rewriteValueARM_OpARMCMP(v *Value) bool { v.reset(OpARMInvertFlags) v0 := b.NewValue0(v.Pos, OpARMCMPshiftLL, types.TypeFlags) v0.AuxInt = c - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -4069,8 +3920,7 @@ func rewriteValueARM_OpARMCMP(v *Value) bool { y := v_1.Args[0] v.reset(OpARMCMPshiftRL) v.AuxInt = c - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (CMP (SRLconst [c] y) x) @@ -4085,8 +3935,7 @@ func rewriteValueARM_OpARMCMP(v *Value) bool { v.reset(OpARMInvertFlags) v0 := b.NewValue0(v.Pos, OpARMCMPshiftRL, types.TypeFlags) v0.AuxInt = c - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -4101,8 +3950,7 @@ func rewriteValueARM_OpARMCMP(v *Value) bool { y := v_1.Args[0] v.reset(OpARMCMPshiftRA) v.AuxInt = c - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (CMP (SRAconst [c] y) x) @@ -4117,8 +3965,7 @@ func rewriteValueARM_OpARMCMP(v *Value) bool { v.reset(OpARMInvertFlags) v0 := b.NewValue0(v.Pos, OpARMCMPshiftRA, types.TypeFlags) v0.AuxInt = c - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -4132,9 +3979,7 @@ func rewriteValueARM_OpARMCMP(v *Value) bool { z := v_1.Args[1] y := v_1.Args[0] v.reset(OpARMCMPshiftLLreg) - v.AddArg(x) - v.AddArg(y) - v.AddArg(z) + v.AddArg3(x, y, z) return true } // match: (CMP (SLL y z) x) @@ -4148,9 +3993,7 @@ func rewriteValueARM_OpARMCMP(v *Value) bool { x := v_1 v.reset(OpARMInvertFlags) v0 := b.NewValue0(v.Pos, OpARMCMPshiftLLreg, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - v0.AddArg(z) + v0.AddArg3(x, y, z) v.AddArg(v0) return true } @@ -4164,9 +4007,7 @@ func rewriteValueARM_OpARMCMP(v *Value) bool { z := v_1.Args[1] y := v_1.Args[0] v.reset(OpARMCMPshiftRLreg) - v.AddArg(x) - v.AddArg(y) - v.AddArg(z) + v.AddArg3(x, y, z) return true } // match: (CMP (SRL y z) x) @@ -4180,9 +4021,7 @@ func rewriteValueARM_OpARMCMP(v *Value) bool { x := v_1 v.reset(OpARMInvertFlags) v0 := b.NewValue0(v.Pos, OpARMCMPshiftRLreg, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - v0.AddArg(z) + v0.AddArg3(x, y, z) v.AddArg(v0) return true } @@ -4196,9 +4035,7 @@ func rewriteValueARM_OpARMCMP(v *Value) bool { z := v_1.Args[1] y := v_1.Args[0] v.reset(OpARMCMPshiftRAreg) - v.AddArg(x) - v.AddArg(y) - v.AddArg(z) + v.AddArg3(x, y, z) return true } // match: (CMP (SRA y z) x) @@ -4212,9 +4049,7 @@ func rewriteValueARM_OpARMCMP(v *Value) bool { x := v_1 v.reset(OpARMInvertFlags) v0 := b.NewValue0(v.Pos, OpARMCMPshiftRAreg, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - v0.AddArg(z) + v0.AddArg3(x, y, z) v.AddArg(v0) return true } @@ -4227,8 +4062,7 @@ func rewriteValueARM_OpARMCMP(v *Value) bool { } y := v_1.Args[0] v.reset(OpARMCMN) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } return false @@ -4453,8 +4287,7 @@ func rewriteValueARM_OpARMCMPshiftLLreg(v *Value) bool { v0 := b.NewValue0(v.Pos, OpARMCMPconst, types.TypeFlags) v0.AuxInt = c v1 := b.NewValue0(v.Pos, OpARMSLL, x.Type) - v1.AddArg(x) - v1.AddArg(y) + v1.AddArg2(x, y) v0.AddArg(v1) v.AddArg(v0) return true @@ -4470,8 +4303,7 @@ func rewriteValueARM_OpARMCMPshiftLLreg(v *Value) bool { c := v_2.AuxInt v.reset(OpARMCMPshiftLL) v.AuxInt = c - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } return false @@ -4533,8 +4365,7 @@ func rewriteValueARM_OpARMCMPshiftRAreg(v *Value) bool { v0 := b.NewValue0(v.Pos, OpARMCMPconst, types.TypeFlags) v0.AuxInt = c v1 := b.NewValue0(v.Pos, OpARMSRA, x.Type) - v1.AddArg(x) - v1.AddArg(y) + v1.AddArg2(x, y) v0.AddArg(v1) v.AddArg(v0) return true @@ -4550,8 +4381,7 @@ func rewriteValueARM_OpARMCMPshiftRAreg(v *Value) bool { c := v_2.AuxInt v.reset(OpARMCMPshiftRA) v.AuxInt = c - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } return false @@ -4613,8 +4443,7 @@ func rewriteValueARM_OpARMCMPshiftRLreg(v *Value) bool { v0 := b.NewValue0(v.Pos, OpARMCMPconst, types.TypeFlags) v0.AuxInt = c v1 := b.NewValue0(v.Pos, OpARMSRL, x.Type) - v1.AddArg(x) - v1.AddArg(y) + v1.AddArg2(x, y) v0.AddArg(v1) v.AddArg(v0) return true @@ -4630,8 +4459,7 @@ func rewriteValueARM_OpARMCMPshiftRLreg(v *Value) bool { c := v_2.AuxInt v.reset(OpARMCMPshiftRL) v.AuxInt = c - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } return false @@ -5238,8 +5066,7 @@ func rewriteValueARM_OpARMMOVBUload(v *Value) bool { v.reset(OpARMMOVBUload) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVBUload [off1] {sym} (SUBconst [off2] ptr) mem) @@ -5256,8 +5083,7 @@ func rewriteValueARM_OpARMMOVBUload(v *Value) bool { v.reset(OpARMMOVBUload) v.AuxInt = off1 - off2 v.Aux = sym - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVBUload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem) @@ -5279,8 +5105,7 @@ func rewriteValueARM_OpARMMOVBUload(v *Value) bool { v.reset(OpARMMOVBUload) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVBUload [off] {sym} ptr (MOVBstore [off2] {sym2} ptr2 x _)) @@ -5295,9 +5120,8 @@ func rewriteValueARM_OpARMMOVBUload(v *Value) bool { } off2 := v_1.AuxInt sym2 := v_1.Aux - _ = v_1.Args[2] - ptr2 := v_1.Args[0] x := v_1.Args[1] + ptr2 := v_1.Args[0] if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) { break } @@ -5323,9 +5147,7 @@ func rewriteValueARM_OpARMMOVBUload(v *Value) bool { break } v.reset(OpARMMOVBUloadidx) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } // match: (MOVBUload [off] {sym} (SB) _) @@ -5356,13 +5178,9 @@ func rewriteValueARM_OpARMMOVBUloadidx(v *Value) bool { if v_2.Op != OpARMMOVBstoreidx { break } - _ = v_2.Args[3] - ptr2 := v_2.Args[0] - if idx != v_2.Args[1] { - break - } x := v_2.Args[2] - if !(isSamePtr(ptr, ptr2)) { + ptr2 := v_2.Args[0] + if idx != v_2.Args[1] || !(isSamePtr(ptr, ptr2)) { break } v.reset(OpARMMOVBUreg) @@ -5380,8 +5198,7 @@ func rewriteValueARM_OpARMMOVBUloadidx(v *Value) bool { mem := v_2 v.reset(OpARMMOVBUload) v.AuxInt = c - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVBUloadidx (MOVWconst [c]) ptr mem) @@ -5395,8 +5212,7 @@ func rewriteValueARM_OpARMMOVBUloadidx(v *Value) bool { mem := v_2 v.reset(OpARMMOVBUload) v.AuxInt = c - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } return false @@ -5410,7 +5226,6 @@ func rewriteValueARM_OpARMMOVBUreg(v *Value) bool { if x.Op != OpARMMOVBUload { break } - _ = x.Args[1] v.reset(OpARMMOVWreg) v.AddArg(x) return true @@ -5469,8 +5284,7 @@ func rewriteValueARM_OpARMMOVBload(v *Value) bool { v.reset(OpARMMOVBload) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVBload [off1] {sym} (SUBconst [off2] ptr) mem) @@ -5487,8 +5301,7 @@ func rewriteValueARM_OpARMMOVBload(v *Value) bool { v.reset(OpARMMOVBload) v.AuxInt = off1 - off2 v.Aux = sym - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVBload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem) @@ -5510,8 +5323,7 @@ func rewriteValueARM_OpARMMOVBload(v *Value) bool { v.reset(OpARMMOVBload) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVBload [off] {sym} ptr (MOVBstore [off2] {sym2} ptr2 x _)) @@ -5526,9 +5338,8 @@ func rewriteValueARM_OpARMMOVBload(v *Value) bool { } off2 := v_1.AuxInt sym2 := v_1.Aux - _ = v_1.Args[2] - ptr2 := v_1.Args[0] x := v_1.Args[1] + ptr2 := v_1.Args[0] if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) { break } @@ -5554,9 +5365,7 @@ func rewriteValueARM_OpARMMOVBload(v *Value) bool { break } v.reset(OpARMMOVBloadidx) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } return false @@ -5574,13 +5383,9 @@ func rewriteValueARM_OpARMMOVBloadidx(v *Value) bool { if v_2.Op != OpARMMOVBstoreidx { break } - _ = v_2.Args[3] - ptr2 := v_2.Args[0] - if idx != v_2.Args[1] { - break - } x := v_2.Args[2] - if !(isSamePtr(ptr, ptr2)) { + ptr2 := v_2.Args[0] + if idx != v_2.Args[1] || !(isSamePtr(ptr, ptr2)) { break } v.reset(OpARMMOVBreg) @@ -5598,8 +5403,7 @@ func rewriteValueARM_OpARMMOVBloadidx(v *Value) bool { mem := v_2 v.reset(OpARMMOVBload) v.AuxInt = c - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVBloadidx (MOVWconst [c]) ptr mem) @@ -5613,8 +5417,7 @@ func rewriteValueARM_OpARMMOVBloadidx(v *Value) bool { mem := v_2 v.reset(OpARMMOVBload) v.AuxInt = c - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } return false @@ -5628,7 +5431,6 @@ func rewriteValueARM_OpARMMOVBreg(v *Value) bool { if x.Op != OpARMMOVBload { break } - _ = x.Args[1] v.reset(OpARMMOVWreg) v.AddArg(x) return true @@ -5693,9 +5495,7 @@ func rewriteValueARM_OpARMMOVBstore(v *Value) bool { v.reset(OpARMMOVBstore) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } // match: (MOVBstore [off1] {sym} (SUBconst [off2] ptr) val mem) @@ -5713,9 +5513,7 @@ func rewriteValueARM_OpARMMOVBstore(v *Value) bool { v.reset(OpARMMOVBstore) v.AuxInt = off1 - off2 v.Aux = sym - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } // match: (MOVBstore [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) val mem) @@ -5738,9 +5536,7 @@ func rewriteValueARM_OpARMMOVBstore(v *Value) bool { v.reset(OpARMMOVBstore) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } // match: (MOVBstore [off] {sym} ptr (MOVBreg x) mem) @@ -5757,9 +5553,7 @@ func rewriteValueARM_OpARMMOVBstore(v *Value) bool { v.reset(OpARMMOVBstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(x) - v.AddArg(mem) + v.AddArg3(ptr, x, mem) return true } // match: (MOVBstore [off] {sym} ptr (MOVBUreg x) mem) @@ -5776,9 +5570,7 @@ func rewriteValueARM_OpARMMOVBstore(v *Value) bool { v.reset(OpARMMOVBstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(x) - v.AddArg(mem) + v.AddArg3(ptr, x, mem) return true } // match: (MOVBstore [off] {sym} ptr (MOVHreg x) mem) @@ -5795,9 +5587,7 @@ func rewriteValueARM_OpARMMOVBstore(v *Value) bool { v.reset(OpARMMOVBstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(x) - v.AddArg(mem) + v.AddArg3(ptr, x, mem) return true } // match: (MOVBstore [off] {sym} ptr (MOVHUreg x) mem) @@ -5814,9 +5604,7 @@ func rewriteValueARM_OpARMMOVBstore(v *Value) bool { v.reset(OpARMMOVBstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(x) - v.AddArg(mem) + v.AddArg3(ptr, x, mem) return true } // match: (MOVBstore [0] {sym} (ADD ptr idx) val mem) @@ -5838,10 +5626,7 @@ func rewriteValueARM_OpARMMOVBstore(v *Value) bool { break } v.reset(OpARMMOVBstoreidx) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) + v.AddArg4(ptr, idx, val, mem) return true } return false @@ -5863,9 +5648,7 @@ func rewriteValueARM_OpARMMOVBstoreidx(v *Value) bool { mem := v_3 v.reset(OpARMMOVBstore) v.AuxInt = c - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } // match: (MOVBstoreidx (MOVWconst [c]) ptr val mem) @@ -5880,9 +5663,7 @@ func rewriteValueARM_OpARMMOVBstoreidx(v *Value) bool { mem := v_3 v.reset(OpARMMOVBstore) v.AuxInt = c - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } return false @@ -5904,8 +5685,7 @@ func rewriteValueARM_OpARMMOVDload(v *Value) bool { v.reset(OpARMMOVDload) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVDload [off1] {sym} (SUBconst [off2] ptr) mem) @@ -5922,8 +5702,7 @@ func rewriteValueARM_OpARMMOVDload(v *Value) bool { v.reset(OpARMMOVDload) v.AuxInt = off1 - off2 v.Aux = sym - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVDload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem) @@ -5945,8 +5724,7 @@ func rewriteValueARM_OpARMMOVDload(v *Value) bool { v.reset(OpARMMOVDload) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVDload [off] {sym} ptr (MOVDstore [off2] {sym2} ptr2 x _)) @@ -5961,15 +5739,12 @@ func rewriteValueARM_OpARMMOVDload(v *Value) bool { } off2 := v_1.AuxInt sym2 := v_1.Aux - _ = v_1.Args[2] - ptr2 := v_1.Args[0] x := v_1.Args[1] + ptr2 := v_1.Args[0] if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } return false @@ -5993,9 +5768,7 @@ func rewriteValueARM_OpARMMOVDstore(v *Value) bool { v.reset(OpARMMOVDstore) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } // match: (MOVDstore [off1] {sym} (SUBconst [off2] ptr) val mem) @@ -6013,9 +5786,7 @@ func rewriteValueARM_OpARMMOVDstore(v *Value) bool { v.reset(OpARMMOVDstore) v.AuxInt = off1 - off2 v.Aux = sym - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } // match: (MOVDstore [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) val mem) @@ -6038,9 +5809,7 @@ func rewriteValueARM_OpARMMOVDstore(v *Value) bool { v.reset(OpARMMOVDstore) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } return false @@ -6062,8 +5831,7 @@ func rewriteValueARM_OpARMMOVFload(v *Value) bool { v.reset(OpARMMOVFload) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVFload [off1] {sym} (SUBconst [off2] ptr) mem) @@ -6080,8 +5848,7 @@ func rewriteValueARM_OpARMMOVFload(v *Value) bool { v.reset(OpARMMOVFload) v.AuxInt = off1 - off2 v.Aux = sym - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVFload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem) @@ -6103,8 +5870,7 @@ func rewriteValueARM_OpARMMOVFload(v *Value) bool { v.reset(OpARMMOVFload) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVFload [off] {sym} ptr (MOVFstore [off2] {sym2} ptr2 x _)) @@ -6119,15 +5885,12 @@ func rewriteValueARM_OpARMMOVFload(v *Value) bool { } off2 := v_1.AuxInt sym2 := v_1.Aux - _ = v_1.Args[2] - ptr2 := v_1.Args[0] x := v_1.Args[1] + ptr2 := v_1.Args[0] if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } return false @@ -6151,9 +5914,7 @@ func rewriteValueARM_OpARMMOVFstore(v *Value) bool { v.reset(OpARMMOVFstore) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } // match: (MOVFstore [off1] {sym} (SUBconst [off2] ptr) val mem) @@ -6171,9 +5932,7 @@ func rewriteValueARM_OpARMMOVFstore(v *Value) bool { v.reset(OpARMMOVFstore) v.AuxInt = off1 - off2 v.Aux = sym - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } // match: (MOVFstore [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) val mem) @@ -6196,9 +5955,7 @@ func rewriteValueARM_OpARMMOVFstore(v *Value) bool { v.reset(OpARMMOVFstore) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } return false @@ -6222,8 +5979,7 @@ func rewriteValueARM_OpARMMOVHUload(v *Value) bool { v.reset(OpARMMOVHUload) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVHUload [off1] {sym} (SUBconst [off2] ptr) mem) @@ -6240,8 +5996,7 @@ func rewriteValueARM_OpARMMOVHUload(v *Value) bool { v.reset(OpARMMOVHUload) v.AuxInt = off1 - off2 v.Aux = sym - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVHUload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem) @@ -6263,8 +6018,7 @@ func rewriteValueARM_OpARMMOVHUload(v *Value) bool { v.reset(OpARMMOVHUload) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVHUload [off] {sym} ptr (MOVHstore [off2] {sym2} ptr2 x _)) @@ -6279,9 +6033,8 @@ func rewriteValueARM_OpARMMOVHUload(v *Value) bool { } off2 := v_1.AuxInt sym2 := v_1.Aux - _ = v_1.Args[2] - ptr2 := v_1.Args[0] x := v_1.Args[1] + ptr2 := v_1.Args[0] if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) { break } @@ -6307,9 +6060,7 @@ func rewriteValueARM_OpARMMOVHUload(v *Value) bool { break } v.reset(OpARMMOVHUloadidx) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } // match: (MOVHUload [off] {sym} (SB) _) @@ -6340,13 +6091,9 @@ func rewriteValueARM_OpARMMOVHUloadidx(v *Value) bool { if v_2.Op != OpARMMOVHstoreidx { break } - _ = v_2.Args[3] - ptr2 := v_2.Args[0] - if idx != v_2.Args[1] { - break - } x := v_2.Args[2] - if !(isSamePtr(ptr, ptr2)) { + ptr2 := v_2.Args[0] + if idx != v_2.Args[1] || !(isSamePtr(ptr, ptr2)) { break } v.reset(OpARMMOVHUreg) @@ -6364,8 +6111,7 @@ func rewriteValueARM_OpARMMOVHUloadidx(v *Value) bool { mem := v_2 v.reset(OpARMMOVHUload) v.AuxInt = c - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVHUloadidx (MOVWconst [c]) ptr mem) @@ -6379,8 +6125,7 @@ func rewriteValueARM_OpARMMOVHUloadidx(v *Value) bool { mem := v_2 v.reset(OpARMMOVHUload) v.AuxInt = c - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } return false @@ -6394,7 +6139,6 @@ func rewriteValueARM_OpARMMOVHUreg(v *Value) bool { if x.Op != OpARMMOVBUload { break } - _ = x.Args[1] v.reset(OpARMMOVWreg) v.AddArg(x) return true @@ -6406,7 +6150,6 @@ func rewriteValueARM_OpARMMOVHUreg(v *Value) bool { if x.Op != OpARMMOVHUload { break } - _ = x.Args[1] v.reset(OpARMMOVWreg) v.AddArg(x) return true @@ -6476,8 +6219,7 @@ func rewriteValueARM_OpARMMOVHload(v *Value) bool { v.reset(OpARMMOVHload) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVHload [off1] {sym} (SUBconst [off2] ptr) mem) @@ -6494,8 +6236,7 @@ func rewriteValueARM_OpARMMOVHload(v *Value) bool { v.reset(OpARMMOVHload) v.AuxInt = off1 - off2 v.Aux = sym - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVHload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem) @@ -6517,8 +6258,7 @@ func rewriteValueARM_OpARMMOVHload(v *Value) bool { v.reset(OpARMMOVHload) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVHload [off] {sym} ptr (MOVHstore [off2] {sym2} ptr2 x _)) @@ -6533,9 +6273,8 @@ func rewriteValueARM_OpARMMOVHload(v *Value) bool { } off2 := v_1.AuxInt sym2 := v_1.Aux - _ = v_1.Args[2] - ptr2 := v_1.Args[0] x := v_1.Args[1] + ptr2 := v_1.Args[0] if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) { break } @@ -6561,9 +6300,7 @@ func rewriteValueARM_OpARMMOVHload(v *Value) bool { break } v.reset(OpARMMOVHloadidx) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } return false @@ -6581,13 +6318,9 @@ func rewriteValueARM_OpARMMOVHloadidx(v *Value) bool { if v_2.Op != OpARMMOVHstoreidx { break } - _ = v_2.Args[3] - ptr2 := v_2.Args[0] - if idx != v_2.Args[1] { - break - } x := v_2.Args[2] - if !(isSamePtr(ptr, ptr2)) { + ptr2 := v_2.Args[0] + if idx != v_2.Args[1] || !(isSamePtr(ptr, ptr2)) { break } v.reset(OpARMMOVHreg) @@ -6605,8 +6338,7 @@ func rewriteValueARM_OpARMMOVHloadidx(v *Value) bool { mem := v_2 v.reset(OpARMMOVHload) v.AuxInt = c - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVHloadidx (MOVWconst [c]) ptr mem) @@ -6620,8 +6352,7 @@ func rewriteValueARM_OpARMMOVHloadidx(v *Value) bool { mem := v_2 v.reset(OpARMMOVHload) v.AuxInt = c - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } return false @@ -6635,7 +6366,6 @@ func rewriteValueARM_OpARMMOVHreg(v *Value) bool { if x.Op != OpARMMOVBload { break } - _ = x.Args[1] v.reset(OpARMMOVWreg) v.AddArg(x) return true @@ -6647,7 +6377,6 @@ func rewriteValueARM_OpARMMOVHreg(v *Value) bool { if x.Op != OpARMMOVBUload { break } - _ = x.Args[1] v.reset(OpARMMOVWreg) v.AddArg(x) return true @@ -6659,7 +6388,6 @@ func rewriteValueARM_OpARMMOVHreg(v *Value) bool { if x.Op != OpARMMOVHload { break } - _ = x.Args[1] v.reset(OpARMMOVWreg) v.AddArg(x) return true @@ -6746,9 +6474,7 @@ func rewriteValueARM_OpARMMOVHstore(v *Value) bool { v.reset(OpARMMOVHstore) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } // match: (MOVHstore [off1] {sym} (SUBconst [off2] ptr) val mem) @@ -6766,9 +6492,7 @@ func rewriteValueARM_OpARMMOVHstore(v *Value) bool { v.reset(OpARMMOVHstore) v.AuxInt = off1 - off2 v.Aux = sym - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } // match: (MOVHstore [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) val mem) @@ -6791,9 +6515,7 @@ func rewriteValueARM_OpARMMOVHstore(v *Value) bool { v.reset(OpARMMOVHstore) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } // match: (MOVHstore [off] {sym} ptr (MOVHreg x) mem) @@ -6810,9 +6532,7 @@ func rewriteValueARM_OpARMMOVHstore(v *Value) bool { v.reset(OpARMMOVHstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(x) - v.AddArg(mem) + v.AddArg3(ptr, x, mem) return true } // match: (MOVHstore [off] {sym} ptr (MOVHUreg x) mem) @@ -6829,9 +6549,7 @@ func rewriteValueARM_OpARMMOVHstore(v *Value) bool { v.reset(OpARMMOVHstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(x) - v.AddArg(mem) + v.AddArg3(ptr, x, mem) return true } // match: (MOVHstore [0] {sym} (ADD ptr idx) val mem) @@ -6853,10 +6571,7 @@ func rewriteValueARM_OpARMMOVHstore(v *Value) bool { break } v.reset(OpARMMOVHstoreidx) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) + v.AddArg4(ptr, idx, val, mem) return true } return false @@ -6878,9 +6593,7 @@ func rewriteValueARM_OpARMMOVHstoreidx(v *Value) bool { mem := v_3 v.reset(OpARMMOVHstore) v.AuxInt = c - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } // match: (MOVHstoreidx (MOVWconst [c]) ptr val mem) @@ -6895,9 +6608,7 @@ func rewriteValueARM_OpARMMOVHstoreidx(v *Value) bool { mem := v_3 v.reset(OpARMMOVHstore) v.AuxInt = c - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } return false @@ -6921,8 +6632,7 @@ func rewriteValueARM_OpARMMOVWload(v *Value) bool { v.reset(OpARMMOVWload) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVWload [off1] {sym} (SUBconst [off2] ptr) mem) @@ -6939,8 +6649,7 @@ func rewriteValueARM_OpARMMOVWload(v *Value) bool { v.reset(OpARMMOVWload) v.AuxInt = off1 - off2 v.Aux = sym - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVWload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem) @@ -6962,8 +6671,7 @@ func rewriteValueARM_OpARMMOVWload(v *Value) bool { v.reset(OpARMMOVWload) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVWload [off] {sym} ptr (MOVWstore [off2] {sym2} ptr2 x _)) @@ -6978,15 +6686,12 @@ func rewriteValueARM_OpARMMOVWload(v *Value) bool { } off2 := v_1.AuxInt sym2 := v_1.Aux - _ = v_1.Args[2] - ptr2 := v_1.Args[0] x := v_1.Args[1] + ptr2 := v_1.Args[0] if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (MOVWload [0] {sym} (ADD ptr idx) mem) @@ -7007,9 +6712,7 @@ func rewriteValueARM_OpARMMOVWload(v *Value) bool { break } v.reset(OpARMMOVWloadidx) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } // match: (MOVWload [0] {sym} (ADDshiftLL ptr idx [c]) mem) @@ -7032,9 +6735,7 @@ func rewriteValueARM_OpARMMOVWload(v *Value) bool { } v.reset(OpARMMOVWloadshiftLL) v.AuxInt = c - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } // match: (MOVWload [0] {sym} (ADDshiftRL ptr idx [c]) mem) @@ -7057,9 +6758,7 @@ func rewriteValueARM_OpARMMOVWload(v *Value) bool { } v.reset(OpARMMOVWloadshiftRL) v.AuxInt = c - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } // match: (MOVWload [0] {sym} (ADDshiftRA ptr idx [c]) mem) @@ -7082,9 +6781,7 @@ func rewriteValueARM_OpARMMOVWload(v *Value) bool { } v.reset(OpARMMOVWloadshiftRA) v.AuxInt = c - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } // match: (MOVWload [off] {sym} (SB) _) @@ -7115,18 +6812,12 @@ func rewriteValueARM_OpARMMOVWloadidx(v *Value) bool { if v_2.Op != OpARMMOVWstoreidx { break } - _ = v_2.Args[3] - ptr2 := v_2.Args[0] - if idx != v_2.Args[1] { - break - } x := v_2.Args[2] - if !(isSamePtr(ptr, ptr2)) { + ptr2 := v_2.Args[0] + if idx != v_2.Args[1] || !(isSamePtr(ptr, ptr2)) { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (MOVWloadidx ptr (MOVWconst [c]) mem) @@ -7140,8 +6831,7 @@ func rewriteValueARM_OpARMMOVWloadidx(v *Value) bool { mem := v_2 v.reset(OpARMMOVWload) v.AuxInt = c - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVWloadidx (MOVWconst [c]) ptr mem) @@ -7155,8 +6845,7 @@ func rewriteValueARM_OpARMMOVWloadidx(v *Value) bool { mem := v_2 v.reset(OpARMMOVWload) v.AuxInt = c - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVWloadidx ptr (SLLconst idx [c]) mem) @@ -7171,9 +6860,7 @@ func rewriteValueARM_OpARMMOVWloadidx(v *Value) bool { mem := v_2 v.reset(OpARMMOVWloadshiftLL) v.AuxInt = c - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } // match: (MOVWloadidx (SLLconst idx [c]) ptr mem) @@ -7188,9 +6875,7 @@ func rewriteValueARM_OpARMMOVWloadidx(v *Value) bool { mem := v_2 v.reset(OpARMMOVWloadshiftLL) v.AuxInt = c - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } // match: (MOVWloadidx ptr (SRLconst idx [c]) mem) @@ -7205,9 +6890,7 @@ func rewriteValueARM_OpARMMOVWloadidx(v *Value) bool { mem := v_2 v.reset(OpARMMOVWloadshiftRL) v.AuxInt = c - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } // match: (MOVWloadidx (SRLconst idx [c]) ptr mem) @@ -7222,9 +6905,7 @@ func rewriteValueARM_OpARMMOVWloadidx(v *Value) bool { mem := v_2 v.reset(OpARMMOVWloadshiftRL) v.AuxInt = c - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } // match: (MOVWloadidx ptr (SRAconst idx [c]) mem) @@ -7239,9 +6920,7 @@ func rewriteValueARM_OpARMMOVWloadidx(v *Value) bool { mem := v_2 v.reset(OpARMMOVWloadshiftRA) v.AuxInt = c - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } // match: (MOVWloadidx (SRAconst idx [c]) ptr mem) @@ -7256,9 +6935,7 @@ func rewriteValueARM_OpARMMOVWloadidx(v *Value) bool { mem := v_2 v.reset(OpARMMOVWloadshiftRA) v.AuxInt = c - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } return false @@ -7278,18 +6955,12 @@ func rewriteValueARM_OpARMMOVWloadshiftLL(v *Value) bool { break } d := v_2.AuxInt - _ = v_2.Args[3] - ptr2 := v_2.Args[0] - if idx != v_2.Args[1] { - break - } x := v_2.Args[2] - if !(c == d && isSamePtr(ptr, ptr2)) { + ptr2 := v_2.Args[0] + if idx != v_2.Args[1] || !(c == d && isSamePtr(ptr, ptr2)) { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (MOVWloadshiftLL ptr (MOVWconst [c]) [d] mem) @@ -7304,8 +6975,7 @@ func rewriteValueARM_OpARMMOVWloadshiftLL(v *Value) bool { mem := v_2 v.reset(OpARMMOVWload) v.AuxInt = int64(uint32(c) << uint64(d)) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } return false @@ -7325,18 +6995,12 @@ func rewriteValueARM_OpARMMOVWloadshiftRA(v *Value) bool { break } d := v_2.AuxInt - _ = v_2.Args[3] - ptr2 := v_2.Args[0] - if idx != v_2.Args[1] { - break - } x := v_2.Args[2] - if !(c == d && isSamePtr(ptr, ptr2)) { + ptr2 := v_2.Args[0] + if idx != v_2.Args[1] || !(c == d && isSamePtr(ptr, ptr2)) { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (MOVWloadshiftRA ptr (MOVWconst [c]) [d] mem) @@ -7351,8 +7015,7 @@ func rewriteValueARM_OpARMMOVWloadshiftRA(v *Value) bool { mem := v_2 v.reset(OpARMMOVWload) v.AuxInt = int64(int32(c) >> uint64(d)) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } return false @@ -7372,18 +7035,12 @@ func rewriteValueARM_OpARMMOVWloadshiftRL(v *Value) bool { break } d := v_2.AuxInt - _ = v_2.Args[3] - ptr2 := v_2.Args[0] - if idx != v_2.Args[1] { - break - } x := v_2.Args[2] - if !(c == d && isSamePtr(ptr, ptr2)) { + ptr2 := v_2.Args[0] + if idx != v_2.Args[1] || !(c == d && isSamePtr(ptr, ptr2)) { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (MOVWloadshiftRL ptr (MOVWconst [c]) [d] mem) @@ -7398,8 +7055,7 @@ func rewriteValueARM_OpARMMOVWloadshiftRL(v *Value) bool { mem := v_2 v.reset(OpARMMOVWload) v.AuxInt = int64(uint32(c) >> uint64(d)) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } return false @@ -7450,9 +7106,7 @@ func rewriteValueARM_OpARMMOVWstore(v *Value) bool { v.reset(OpARMMOVWstore) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } // match: (MOVWstore [off1] {sym} (SUBconst [off2] ptr) val mem) @@ -7470,9 +7124,7 @@ func rewriteValueARM_OpARMMOVWstore(v *Value) bool { v.reset(OpARMMOVWstore) v.AuxInt = off1 - off2 v.Aux = sym - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } // match: (MOVWstore [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) val mem) @@ -7495,9 +7147,7 @@ func rewriteValueARM_OpARMMOVWstore(v *Value) bool { v.reset(OpARMMOVWstore) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } // match: (MOVWstore [0] {sym} (ADD ptr idx) val mem) @@ -7519,10 +7169,7 @@ func rewriteValueARM_OpARMMOVWstore(v *Value) bool { break } v.reset(OpARMMOVWstoreidx) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) + v.AddArg4(ptr, idx, val, mem) return true } // match: (MOVWstore [0] {sym} (ADDshiftLL ptr idx [c]) val mem) @@ -7546,10 +7193,7 @@ func rewriteValueARM_OpARMMOVWstore(v *Value) bool { } v.reset(OpARMMOVWstoreshiftLL) v.AuxInt = c - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) + v.AddArg4(ptr, idx, val, mem) return true } // match: (MOVWstore [0] {sym} (ADDshiftRL ptr idx [c]) val mem) @@ -7573,10 +7217,7 @@ func rewriteValueARM_OpARMMOVWstore(v *Value) bool { } v.reset(OpARMMOVWstoreshiftRL) v.AuxInt = c - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) + v.AddArg4(ptr, idx, val, mem) return true } // match: (MOVWstore [0] {sym} (ADDshiftRA ptr idx [c]) val mem) @@ -7600,10 +7241,7 @@ func rewriteValueARM_OpARMMOVWstore(v *Value) bool { } v.reset(OpARMMOVWstoreshiftRA) v.AuxInt = c - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) + v.AddArg4(ptr, idx, val, mem) return true } return false @@ -7625,9 +7263,7 @@ func rewriteValueARM_OpARMMOVWstoreidx(v *Value) bool { mem := v_3 v.reset(OpARMMOVWstore) v.AuxInt = c - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } // match: (MOVWstoreidx (MOVWconst [c]) ptr val mem) @@ -7642,9 +7278,7 @@ func rewriteValueARM_OpARMMOVWstoreidx(v *Value) bool { mem := v_3 v.reset(OpARMMOVWstore) v.AuxInt = c - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } // match: (MOVWstoreidx ptr (SLLconst idx [c]) val mem) @@ -7660,10 +7294,7 @@ func rewriteValueARM_OpARMMOVWstoreidx(v *Value) bool { mem := v_3 v.reset(OpARMMOVWstoreshiftLL) v.AuxInt = c - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) + v.AddArg4(ptr, idx, val, mem) return true } // match: (MOVWstoreidx (SLLconst idx [c]) ptr val mem) @@ -7679,10 +7310,7 @@ func rewriteValueARM_OpARMMOVWstoreidx(v *Value) bool { mem := v_3 v.reset(OpARMMOVWstoreshiftLL) v.AuxInt = c - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) + v.AddArg4(ptr, idx, val, mem) return true } // match: (MOVWstoreidx ptr (SRLconst idx [c]) val mem) @@ -7698,10 +7326,7 @@ func rewriteValueARM_OpARMMOVWstoreidx(v *Value) bool { mem := v_3 v.reset(OpARMMOVWstoreshiftRL) v.AuxInt = c - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) + v.AddArg4(ptr, idx, val, mem) return true } // match: (MOVWstoreidx (SRLconst idx [c]) ptr val mem) @@ -7717,10 +7342,7 @@ func rewriteValueARM_OpARMMOVWstoreidx(v *Value) bool { mem := v_3 v.reset(OpARMMOVWstoreshiftRL) v.AuxInt = c - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) + v.AddArg4(ptr, idx, val, mem) return true } // match: (MOVWstoreidx ptr (SRAconst idx [c]) val mem) @@ -7736,10 +7358,7 @@ func rewriteValueARM_OpARMMOVWstoreidx(v *Value) bool { mem := v_3 v.reset(OpARMMOVWstoreshiftRA) v.AuxInt = c - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) + v.AddArg4(ptr, idx, val, mem) return true } // match: (MOVWstoreidx (SRAconst idx [c]) ptr val mem) @@ -7755,10 +7374,7 @@ func rewriteValueARM_OpARMMOVWstoreidx(v *Value) bool { mem := v_3 v.reset(OpARMMOVWstoreshiftRA) v.AuxInt = c - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) + v.AddArg4(ptr, idx, val, mem) return true } return false @@ -7781,9 +7397,7 @@ func rewriteValueARM_OpARMMOVWstoreshiftLL(v *Value) bool { mem := v_3 v.reset(OpARMMOVWstore) v.AuxInt = int64(uint32(c) << uint64(d)) - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } return false @@ -7806,9 +7420,7 @@ func rewriteValueARM_OpARMMOVWstoreshiftRA(v *Value) bool { mem := v_3 v.reset(OpARMMOVWstore) v.AuxInt = int64(int32(c) >> uint64(d)) - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } return false @@ -7831,9 +7443,7 @@ func rewriteValueARM_OpARMMOVWstoreshiftRL(v *Value) bool { mem := v_3 v.reset(OpARMMOVWstore) v.AuxInt = int64(uint32(c) >> uint64(d)) - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } return false @@ -7883,9 +7493,7 @@ func rewriteValueARM_OpARMMUL(v *Value) bool { if v_1.Op != OpARMMOVWconst || v_1.AuxInt != 1 { continue } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } break @@ -7925,8 +7533,7 @@ func rewriteValueARM_OpARMMUL(v *Value) bool { } v.reset(OpARMADDshiftLL) v.AuxInt = log2(c - 1) - v.AddArg(x) - v.AddArg(x) + v.AddArg2(x, x) return true } break @@ -7946,8 +7553,7 @@ func rewriteValueARM_OpARMMUL(v *Value) bool { } v.reset(OpARMRSBshiftLL) v.AuxInt = log2(c + 1) - v.AddArg(x) - v.AddArg(x) + v.AddArg2(x, x) return true } break @@ -7969,8 +7575,7 @@ func rewriteValueARM_OpARMMUL(v *Value) bool { v.AuxInt = log2(c / 3) v0 := b.NewValue0(v.Pos, OpARMADDshiftLL, x.Type) v0.AuxInt = 1 - v0.AddArg(x) - v0.AddArg(x) + v0.AddArg2(x, x) v.AddArg(v0) return true } @@ -7993,8 +7598,7 @@ func rewriteValueARM_OpARMMUL(v *Value) bool { v.AuxInt = log2(c / 5) v0 := b.NewValue0(v.Pos, OpARMADDshiftLL, x.Type) v0.AuxInt = 2 - v0.AddArg(x) - v0.AddArg(x) + v0.AddArg2(x, x) v.AddArg(v0) return true } @@ -8017,8 +7621,7 @@ func rewriteValueARM_OpARMMUL(v *Value) bool { v.AuxInt = log2(c / 7) v0 := b.NewValue0(v.Pos, OpARMRSBshiftLL, x.Type) v0.AuxInt = 3 - v0.AddArg(x) - v0.AddArg(x) + v0.AddArg2(x, x) v.AddArg(v0) return true } @@ -8041,8 +7644,7 @@ func rewriteValueARM_OpARMMUL(v *Value) bool { v.AuxInt = log2(c / 9) v0 := b.NewValue0(v.Pos, OpARMADDshiftLL, x.Type) v0.AuxInt = 3 - v0.AddArg(x) - v0.AddArg(x) + v0.AddArg2(x, x) v.AddArg(v0) return true } @@ -8087,8 +7689,7 @@ func rewriteValueARM_OpARMMULA(v *Value) bool { break } v.reset(OpARMSUB) - v.AddArg(a) - v.AddArg(x) + v.AddArg2(a, x) return true } // match: (MULA _ (MOVWconst [0]) a) @@ -8098,9 +7699,7 @@ func rewriteValueARM_OpARMMULA(v *Value) bool { break } a := v_2 - v.reset(OpCopy) - v.Type = a.Type - v.AddArg(a) + v.copyOf(a) return true } // match: (MULA x (MOVWconst [1]) a) @@ -8112,8 +7711,7 @@ func rewriteValueARM_OpARMMULA(v *Value) bool { } a := v_2 v.reset(OpARMADD) - v.AddArg(x) - v.AddArg(a) + v.AddArg2(x, a) return true } // match: (MULA x (MOVWconst [c]) a) @@ -8133,8 +7731,7 @@ func rewriteValueARM_OpARMMULA(v *Value) bool { v0 := b.NewValue0(v.Pos, OpARMSLLconst, x.Type) v0.AuxInt = log2(c) v0.AddArg(x) - v.AddArg(v0) - v.AddArg(a) + v.AddArg2(v0, a) return true } // match: (MULA x (MOVWconst [c]) a) @@ -8153,10 +7750,8 @@ func rewriteValueARM_OpARMMULA(v *Value) bool { v.reset(OpARMADD) v0 := b.NewValue0(v.Pos, OpARMADDshiftLL, x.Type) v0.AuxInt = log2(c - 1) - v0.AddArg(x) - v0.AddArg(x) - v.AddArg(v0) - v.AddArg(a) + v0.AddArg2(x, x) + v.AddArg2(v0, a) return true } // match: (MULA x (MOVWconst [c]) a) @@ -8175,10 +7770,8 @@ func rewriteValueARM_OpARMMULA(v *Value) bool { v.reset(OpARMADD) v0 := b.NewValue0(v.Pos, OpARMRSBshiftLL, x.Type) v0.AuxInt = log2(c + 1) - v0.AddArg(x) - v0.AddArg(x) - v.AddArg(v0) - v.AddArg(a) + v0.AddArg2(x, x) + v.AddArg2(v0, a) return true } // match: (MULA x (MOVWconst [c]) a) @@ -8199,11 +7792,9 @@ func rewriteValueARM_OpARMMULA(v *Value) bool { v0.AuxInt = log2(c / 3) v1 := b.NewValue0(v.Pos, OpARMADDshiftLL, x.Type) v1.AuxInt = 1 - v1.AddArg(x) - v1.AddArg(x) + v1.AddArg2(x, x) v0.AddArg(v1) - v.AddArg(v0) - v.AddArg(a) + v.AddArg2(v0, a) return true } // match: (MULA x (MOVWconst [c]) a) @@ -8224,11 +7815,9 @@ func rewriteValueARM_OpARMMULA(v *Value) bool { v0.AuxInt = log2(c / 5) v1 := b.NewValue0(v.Pos, OpARMADDshiftLL, x.Type) v1.AuxInt = 2 - v1.AddArg(x) - v1.AddArg(x) + v1.AddArg2(x, x) v0.AddArg(v1) - v.AddArg(v0) - v.AddArg(a) + v.AddArg2(v0, a) return true } // match: (MULA x (MOVWconst [c]) a) @@ -8249,11 +7838,9 @@ func rewriteValueARM_OpARMMULA(v *Value) bool { v0.AuxInt = log2(c / 7) v1 := b.NewValue0(v.Pos, OpARMRSBshiftLL, x.Type) v1.AuxInt = 3 - v1.AddArg(x) - v1.AddArg(x) + v1.AddArg2(x, x) v0.AddArg(v1) - v.AddArg(v0) - v.AddArg(a) + v.AddArg2(v0, a) return true } // match: (MULA x (MOVWconst [c]) a) @@ -8274,11 +7861,9 @@ func rewriteValueARM_OpARMMULA(v *Value) bool { v0.AuxInt = log2(c / 9) v1 := b.NewValue0(v.Pos, OpARMADDshiftLL, x.Type) v1.AuxInt = 3 - v1.AddArg(x) - v1.AddArg(x) + v1.AddArg2(x, x) v0.AddArg(v1) - v.AddArg(v0) - v.AddArg(a) + v.AddArg2(v0, a) return true } // match: (MULA (MOVWconst [c]) x a) @@ -8295,8 +7880,7 @@ func rewriteValueARM_OpARMMULA(v *Value) bool { break } v.reset(OpARMSUB) - v.AddArg(a) - v.AddArg(x) + v.AddArg2(a, x) return true } // match: (MULA (MOVWconst [0]) _ a) @@ -8306,9 +7890,7 @@ func rewriteValueARM_OpARMMULA(v *Value) bool { break } a := v_2 - v.reset(OpCopy) - v.Type = a.Type - v.AddArg(a) + v.copyOf(a) return true } // match: (MULA (MOVWconst [1]) x a) @@ -8320,8 +7902,7 @@ func rewriteValueARM_OpARMMULA(v *Value) bool { x := v_1 a := v_2 v.reset(OpARMADD) - v.AddArg(x) - v.AddArg(a) + v.AddArg2(x, a) return true } // match: (MULA (MOVWconst [c]) x a) @@ -8341,8 +7922,7 @@ func rewriteValueARM_OpARMMULA(v *Value) bool { v0 := b.NewValue0(v.Pos, OpARMSLLconst, x.Type) v0.AuxInt = log2(c) v0.AddArg(x) - v.AddArg(v0) - v.AddArg(a) + v.AddArg2(v0, a) return true } // match: (MULA (MOVWconst [c]) x a) @@ -8361,10 +7941,8 @@ func rewriteValueARM_OpARMMULA(v *Value) bool { v.reset(OpARMADD) v0 := b.NewValue0(v.Pos, OpARMADDshiftLL, x.Type) v0.AuxInt = log2(c - 1) - v0.AddArg(x) - v0.AddArg(x) - v.AddArg(v0) - v.AddArg(a) + v0.AddArg2(x, x) + v.AddArg2(v0, a) return true } // match: (MULA (MOVWconst [c]) x a) @@ -8383,10 +7961,8 @@ func rewriteValueARM_OpARMMULA(v *Value) bool { v.reset(OpARMADD) v0 := b.NewValue0(v.Pos, OpARMRSBshiftLL, x.Type) v0.AuxInt = log2(c + 1) - v0.AddArg(x) - v0.AddArg(x) - v.AddArg(v0) - v.AddArg(a) + v0.AddArg2(x, x) + v.AddArg2(v0, a) return true } // match: (MULA (MOVWconst [c]) x a) @@ -8407,11 +7983,9 @@ func rewriteValueARM_OpARMMULA(v *Value) bool { v0.AuxInt = log2(c / 3) v1 := b.NewValue0(v.Pos, OpARMADDshiftLL, x.Type) v1.AuxInt = 1 - v1.AddArg(x) - v1.AddArg(x) + v1.AddArg2(x, x) v0.AddArg(v1) - v.AddArg(v0) - v.AddArg(a) + v.AddArg2(v0, a) return true } // match: (MULA (MOVWconst [c]) x a) @@ -8432,11 +8006,9 @@ func rewriteValueARM_OpARMMULA(v *Value) bool { v0.AuxInt = log2(c / 5) v1 := b.NewValue0(v.Pos, OpARMADDshiftLL, x.Type) v1.AuxInt = 2 - v1.AddArg(x) - v1.AddArg(x) + v1.AddArg2(x, x) v0.AddArg(v1) - v.AddArg(v0) - v.AddArg(a) + v.AddArg2(v0, a) return true } // match: (MULA (MOVWconst [c]) x a) @@ -8457,11 +8029,9 @@ func rewriteValueARM_OpARMMULA(v *Value) bool { v0.AuxInt = log2(c / 7) v1 := b.NewValue0(v.Pos, OpARMRSBshiftLL, x.Type) v1.AuxInt = 3 - v1.AddArg(x) - v1.AddArg(x) + v1.AddArg2(x, x) v0.AddArg(v1) - v.AddArg(v0) - v.AddArg(a) + v.AddArg2(v0, a) return true } // match: (MULA (MOVWconst [c]) x a) @@ -8482,11 +8052,9 @@ func rewriteValueARM_OpARMMULA(v *Value) bool { v0.AuxInt = log2(c / 9) v1 := b.NewValue0(v.Pos, OpARMADDshiftLL, x.Type) v1.AuxInt = 3 - v1.AddArg(x) - v1.AddArg(x) + v1.AddArg2(x, x) v0.AddArg(v1) - v.AddArg(v0) - v.AddArg(a) + v.AddArg2(v0, a) return true } // match: (MULA (MOVWconst [c]) (MOVWconst [d]) a) @@ -8525,8 +8093,7 @@ func rewriteValueARM_OpARMMULD(v *Value) bool { continue } v.reset(OpARMNMULD) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } break @@ -8550,8 +8117,7 @@ func rewriteValueARM_OpARMMULF(v *Value) bool { continue } v.reset(OpARMNMULF) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } break @@ -8577,8 +8143,7 @@ func rewriteValueARM_OpARMMULS(v *Value) bool { break } v.reset(OpARMADD) - v.AddArg(a) - v.AddArg(x) + v.AddArg2(a, x) return true } // match: (MULS _ (MOVWconst [0]) a) @@ -8588,9 +8153,7 @@ func rewriteValueARM_OpARMMULS(v *Value) bool { break } a := v_2 - v.reset(OpCopy) - v.Type = a.Type - v.AddArg(a) + v.copyOf(a) return true } // match: (MULS x (MOVWconst [1]) a) @@ -8602,8 +8165,7 @@ func rewriteValueARM_OpARMMULS(v *Value) bool { } a := v_2 v.reset(OpARMRSB) - v.AddArg(x) - v.AddArg(a) + v.AddArg2(x, a) return true } // match: (MULS x (MOVWconst [c]) a) @@ -8623,8 +8185,7 @@ func rewriteValueARM_OpARMMULS(v *Value) bool { v0 := b.NewValue0(v.Pos, OpARMSLLconst, x.Type) v0.AuxInt = log2(c) v0.AddArg(x) - v.AddArg(v0) - v.AddArg(a) + v.AddArg2(v0, a) return true } // match: (MULS x (MOVWconst [c]) a) @@ -8643,10 +8204,8 @@ func rewriteValueARM_OpARMMULS(v *Value) bool { v.reset(OpARMRSB) v0 := b.NewValue0(v.Pos, OpARMADDshiftLL, x.Type) v0.AuxInt = log2(c - 1) - v0.AddArg(x) - v0.AddArg(x) - v.AddArg(v0) - v.AddArg(a) + v0.AddArg2(x, x) + v.AddArg2(v0, a) return true } // match: (MULS x (MOVWconst [c]) a) @@ -8665,10 +8224,8 @@ func rewriteValueARM_OpARMMULS(v *Value) bool { v.reset(OpARMRSB) v0 := b.NewValue0(v.Pos, OpARMRSBshiftLL, x.Type) v0.AuxInt = log2(c + 1) - v0.AddArg(x) - v0.AddArg(x) - v.AddArg(v0) - v.AddArg(a) + v0.AddArg2(x, x) + v.AddArg2(v0, a) return true } // match: (MULS x (MOVWconst [c]) a) @@ -8689,11 +8246,9 @@ func rewriteValueARM_OpARMMULS(v *Value) bool { v0.AuxInt = log2(c / 3) v1 := b.NewValue0(v.Pos, OpARMADDshiftLL, x.Type) v1.AuxInt = 1 - v1.AddArg(x) - v1.AddArg(x) + v1.AddArg2(x, x) v0.AddArg(v1) - v.AddArg(v0) - v.AddArg(a) + v.AddArg2(v0, a) return true } // match: (MULS x (MOVWconst [c]) a) @@ -8714,11 +8269,9 @@ func rewriteValueARM_OpARMMULS(v *Value) bool { v0.AuxInt = log2(c / 5) v1 := b.NewValue0(v.Pos, OpARMADDshiftLL, x.Type) v1.AuxInt = 2 - v1.AddArg(x) - v1.AddArg(x) + v1.AddArg2(x, x) v0.AddArg(v1) - v.AddArg(v0) - v.AddArg(a) + v.AddArg2(v0, a) return true } // match: (MULS x (MOVWconst [c]) a) @@ -8739,11 +8292,9 @@ func rewriteValueARM_OpARMMULS(v *Value) bool { v0.AuxInt = log2(c / 7) v1 := b.NewValue0(v.Pos, OpARMRSBshiftLL, x.Type) v1.AuxInt = 3 - v1.AddArg(x) - v1.AddArg(x) + v1.AddArg2(x, x) v0.AddArg(v1) - v.AddArg(v0) - v.AddArg(a) + v.AddArg2(v0, a) return true } // match: (MULS x (MOVWconst [c]) a) @@ -8764,11 +8315,9 @@ func rewriteValueARM_OpARMMULS(v *Value) bool { v0.AuxInt = log2(c / 9) v1 := b.NewValue0(v.Pos, OpARMADDshiftLL, x.Type) v1.AuxInt = 3 - v1.AddArg(x) - v1.AddArg(x) + v1.AddArg2(x, x) v0.AddArg(v1) - v.AddArg(v0) - v.AddArg(a) + v.AddArg2(v0, a) return true } // match: (MULS (MOVWconst [c]) x a) @@ -8785,8 +8334,7 @@ func rewriteValueARM_OpARMMULS(v *Value) bool { break } v.reset(OpARMADD) - v.AddArg(a) - v.AddArg(x) + v.AddArg2(a, x) return true } // match: (MULS (MOVWconst [0]) _ a) @@ -8796,9 +8344,7 @@ func rewriteValueARM_OpARMMULS(v *Value) bool { break } a := v_2 - v.reset(OpCopy) - v.Type = a.Type - v.AddArg(a) + v.copyOf(a) return true } // match: (MULS (MOVWconst [1]) x a) @@ -8810,8 +8356,7 @@ func rewriteValueARM_OpARMMULS(v *Value) bool { x := v_1 a := v_2 v.reset(OpARMRSB) - v.AddArg(x) - v.AddArg(a) + v.AddArg2(x, a) return true } // match: (MULS (MOVWconst [c]) x a) @@ -8831,8 +8376,7 @@ func rewriteValueARM_OpARMMULS(v *Value) bool { v0 := b.NewValue0(v.Pos, OpARMSLLconst, x.Type) v0.AuxInt = log2(c) v0.AddArg(x) - v.AddArg(v0) - v.AddArg(a) + v.AddArg2(v0, a) return true } // match: (MULS (MOVWconst [c]) x a) @@ -8851,10 +8395,8 @@ func rewriteValueARM_OpARMMULS(v *Value) bool { v.reset(OpARMRSB) v0 := b.NewValue0(v.Pos, OpARMADDshiftLL, x.Type) v0.AuxInt = log2(c - 1) - v0.AddArg(x) - v0.AddArg(x) - v.AddArg(v0) - v.AddArg(a) + v0.AddArg2(x, x) + v.AddArg2(v0, a) return true } // match: (MULS (MOVWconst [c]) x a) @@ -8873,10 +8415,8 @@ func rewriteValueARM_OpARMMULS(v *Value) bool { v.reset(OpARMRSB) v0 := b.NewValue0(v.Pos, OpARMRSBshiftLL, x.Type) v0.AuxInt = log2(c + 1) - v0.AddArg(x) - v0.AddArg(x) - v.AddArg(v0) - v.AddArg(a) + v0.AddArg2(x, x) + v.AddArg2(v0, a) return true } // match: (MULS (MOVWconst [c]) x a) @@ -8897,11 +8437,9 @@ func rewriteValueARM_OpARMMULS(v *Value) bool { v0.AuxInt = log2(c / 3) v1 := b.NewValue0(v.Pos, OpARMADDshiftLL, x.Type) v1.AuxInt = 1 - v1.AddArg(x) - v1.AddArg(x) + v1.AddArg2(x, x) v0.AddArg(v1) - v.AddArg(v0) - v.AddArg(a) + v.AddArg2(v0, a) return true } // match: (MULS (MOVWconst [c]) x a) @@ -8922,11 +8460,9 @@ func rewriteValueARM_OpARMMULS(v *Value) bool { v0.AuxInt = log2(c / 5) v1 := b.NewValue0(v.Pos, OpARMADDshiftLL, x.Type) v1.AuxInt = 2 - v1.AddArg(x) - v1.AddArg(x) + v1.AddArg2(x, x) v0.AddArg(v1) - v.AddArg(v0) - v.AddArg(a) + v.AddArg2(v0, a) return true } // match: (MULS (MOVWconst [c]) x a) @@ -8947,11 +8483,9 @@ func rewriteValueARM_OpARMMULS(v *Value) bool { v0.AuxInt = log2(c / 7) v1 := b.NewValue0(v.Pos, OpARMRSBshiftLL, x.Type) v1.AuxInt = 3 - v1.AddArg(x) - v1.AddArg(x) + v1.AddArg2(x, x) v0.AddArg(v1) - v.AddArg(v0) - v.AddArg(a) + v.AddArg2(v0, a) return true } // match: (MULS (MOVWconst [c]) x a) @@ -8972,11 +8506,9 @@ func rewriteValueARM_OpARMMULS(v *Value) bool { v0.AuxInt = log2(c / 9) v1 := b.NewValue0(v.Pos, OpARMADDshiftLL, x.Type) v1.AuxInt = 3 - v1.AddArg(x) - v1.AddArg(x) + v1.AddArg2(x, x) v0.AddArg(v1) - v.AddArg(v0) - v.AddArg(a) + v.AddArg2(v0, a) return true } // match: (MULS (MOVWconst [c]) (MOVWconst [d]) a) @@ -9059,8 +8591,7 @@ func rewriteValueARM_OpARMMVN(v *Value) bool { y := v_0.Args[1] x := v_0.Args[0] v.reset(OpARMMVNshiftLLreg) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (MVN (SRL x y)) @@ -9072,8 +8603,7 @@ func rewriteValueARM_OpARMMVN(v *Value) bool { y := v_0.Args[1] x := v_0.Args[0] v.reset(OpARMMVNshiftRLreg) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (MVN (SRA x y)) @@ -9085,8 +8615,7 @@ func rewriteValueARM_OpARMMVN(v *Value) bool { y := v_0.Args[1] x := v_0.Args[0] v.reset(OpARMMVNshiftRAreg) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } return false @@ -9208,8 +8737,7 @@ func rewriteValueARM_OpARMNEGD(v *Value) bool { break } v.reset(OpARMNMULD) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } return false @@ -9229,8 +8757,7 @@ func rewriteValueARM_OpARMNEGF(v *Value) bool { break } v.reset(OpARMNMULF) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } return false @@ -9248,8 +8775,7 @@ func rewriteValueARM_OpARMNMULD(v *Value) bool { x := v_0.Args[0] y := v_1 v.reset(OpARMMULD) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } break @@ -9269,8 +8795,7 @@ func rewriteValueARM_OpARMNMULF(v *Value) bool { x := v_0.Args[0] y := v_1 v.reset(OpARMMULF) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } break @@ -9373,8 +8898,7 @@ func rewriteValueARM_OpARMOR(v *Value) bool { y := v_1.Args[0] v.reset(OpARMORshiftLL) v.AuxInt = c - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } break @@ -9391,8 +8915,7 @@ func rewriteValueARM_OpARMOR(v *Value) bool { y := v_1.Args[0] v.reset(OpARMORshiftRL) v.AuxInt = c - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } break @@ -9409,8 +8932,7 @@ func rewriteValueARM_OpARMOR(v *Value) bool { y := v_1.Args[0] v.reset(OpARMORshiftRA) v.AuxInt = c - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } break @@ -9426,9 +8948,7 @@ func rewriteValueARM_OpARMOR(v *Value) bool { z := v_1.Args[1] y := v_1.Args[0] v.reset(OpARMORshiftLLreg) - v.AddArg(x) - v.AddArg(y) - v.AddArg(z) + v.AddArg3(x, y, z) return true } break @@ -9444,9 +8964,7 @@ func rewriteValueARM_OpARMOR(v *Value) bool { z := v_1.Args[1] y := v_1.Args[0] v.reset(OpARMORshiftRLreg) - v.AddArg(x) - v.AddArg(y) - v.AddArg(z) + v.AddArg3(x, y, z) return true } break @@ -9462,9 +8980,7 @@ func rewriteValueARM_OpARMOR(v *Value) bool { z := v_1.Args[1] y := v_1.Args[0] v.reset(OpARMORshiftRAreg) - v.AddArg(x) - v.AddArg(y) - v.AddArg(z) + v.AddArg3(x, y, z) return true } break @@ -9476,9 +8992,7 @@ func rewriteValueARM_OpARMOR(v *Value) bool { if x != v_1 { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } return false @@ -9492,9 +9006,7 @@ func rewriteValueARM_OpARMORconst(v *Value) bool { break } x := v_0 - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (ORconst [c] _) @@ -9636,9 +9148,7 @@ func rewriteValueARM_OpARMORshiftLL(v *Value) bool { if x != y.Args[0] || !(c == d) { break } - v.reset(OpCopy) - v.Type = y.Type - v.AddArg(y) + v.copyOf(y) return true } return false @@ -9660,8 +9170,7 @@ func rewriteValueARM_OpARMORshiftLLreg(v *Value) bool { v.reset(OpARMORconst) v.AuxInt = c v0 := b.NewValue0(v.Pos, OpARMSLL, x.Type) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -9676,8 +9185,7 @@ func rewriteValueARM_OpARMORshiftLLreg(v *Value) bool { c := v_2.AuxInt v.reset(OpARMORshiftLL) v.AuxInt = c - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } return false @@ -9731,9 +9239,7 @@ func rewriteValueARM_OpARMORshiftRA(v *Value) bool { if x != y.Args[0] || !(c == d) { break } - v.reset(OpCopy) - v.Type = y.Type - v.AddArg(y) + v.copyOf(y) return true } return false @@ -9755,8 +9261,7 @@ func rewriteValueARM_OpARMORshiftRAreg(v *Value) bool { v.reset(OpARMORconst) v.AuxInt = c v0 := b.NewValue0(v.Pos, OpARMSRA, x.Type) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -9771,8 +9276,7 @@ func rewriteValueARM_OpARMORshiftRAreg(v *Value) bool { c := v_2.AuxInt v.reset(OpARMORshiftRA) v.AuxInt = c - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } return false @@ -9842,9 +9346,7 @@ func rewriteValueARM_OpARMORshiftRL(v *Value) bool { if x != y.Args[0] || !(c == d) { break } - v.reset(OpCopy) - v.Type = y.Type - v.AddArg(y) + v.copyOf(y) return true } return false @@ -9866,8 +9368,7 @@ func rewriteValueARM_OpARMORshiftRLreg(v *Value) bool { v.reset(OpARMORconst) v.AuxInt = c v0 := b.NewValue0(v.Pos, OpARMSRL, x.Type) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -9882,8 +9383,7 @@ func rewriteValueARM_OpARMORshiftRLreg(v *Value) bool { c := v_2.AuxInt v.reset(OpARMORshiftRL) v.AuxInt = c - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } return false @@ -9928,8 +9428,7 @@ func rewriteValueARM_OpARMRSB(v *Value) bool { y := v_1.Args[0] v.reset(OpARMRSBshiftLL) v.AuxInt = c - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (RSB (SLLconst [c] y) x) @@ -9943,8 +9442,7 @@ func rewriteValueARM_OpARMRSB(v *Value) bool { x := v_1 v.reset(OpARMSUBshiftLL) v.AuxInt = c - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (RSB x (SRLconst [c] y)) @@ -9958,8 +9456,7 @@ func rewriteValueARM_OpARMRSB(v *Value) bool { y := v_1.Args[0] v.reset(OpARMRSBshiftRL) v.AuxInt = c - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (RSB (SRLconst [c] y) x) @@ -9973,8 +9470,7 @@ func rewriteValueARM_OpARMRSB(v *Value) bool { x := v_1 v.reset(OpARMSUBshiftRL) v.AuxInt = c - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (RSB x (SRAconst [c] y)) @@ -9988,8 +9484,7 @@ func rewriteValueARM_OpARMRSB(v *Value) bool { y := v_1.Args[0] v.reset(OpARMRSBshiftRA) v.AuxInt = c - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (RSB (SRAconst [c] y) x) @@ -10003,8 +9498,7 @@ func rewriteValueARM_OpARMRSB(v *Value) bool { x := v_1 v.reset(OpARMSUBshiftRA) v.AuxInt = c - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (RSB x (SLL y z)) @@ -10017,9 +9511,7 @@ func rewriteValueARM_OpARMRSB(v *Value) bool { z := v_1.Args[1] y := v_1.Args[0] v.reset(OpARMRSBshiftLLreg) - v.AddArg(x) - v.AddArg(y) - v.AddArg(z) + v.AddArg3(x, y, z) return true } // match: (RSB (SLL y z) x) @@ -10032,9 +9524,7 @@ func rewriteValueARM_OpARMRSB(v *Value) bool { y := v_0.Args[0] x := v_1 v.reset(OpARMSUBshiftLLreg) - v.AddArg(x) - v.AddArg(y) - v.AddArg(z) + v.AddArg3(x, y, z) return true } // match: (RSB x (SRL y z)) @@ -10047,9 +9537,7 @@ func rewriteValueARM_OpARMRSB(v *Value) bool { z := v_1.Args[1] y := v_1.Args[0] v.reset(OpARMRSBshiftRLreg) - v.AddArg(x) - v.AddArg(y) - v.AddArg(z) + v.AddArg3(x, y, z) return true } // match: (RSB (SRL y z) x) @@ -10062,9 +9550,7 @@ func rewriteValueARM_OpARMRSB(v *Value) bool { y := v_0.Args[0] x := v_1 v.reset(OpARMSUBshiftRLreg) - v.AddArg(x) - v.AddArg(y) - v.AddArg(z) + v.AddArg3(x, y, z) return true } // match: (RSB x (SRA y z)) @@ -10077,9 +9563,7 @@ func rewriteValueARM_OpARMRSB(v *Value) bool { z := v_1.Args[1] y := v_1.Args[0] v.reset(OpARMRSBshiftRAreg) - v.AddArg(x) - v.AddArg(y) - v.AddArg(z) + v.AddArg3(x, y, z) return true } // match: (RSB (SRA y z) x) @@ -10092,9 +9576,7 @@ func rewriteValueARM_OpARMRSB(v *Value) bool { y := v_0.Args[0] x := v_1 v.reset(OpARMSUBshiftRAreg) - v.AddArg(x) - v.AddArg(y) - v.AddArg(z) + v.AddArg3(x, y, z) return true } // match: (RSB x x) @@ -10122,9 +9604,7 @@ func rewriteValueARM_OpARMRSB(v *Value) bool { break } v.reset(OpARMMULS) - v.AddArg(x) - v.AddArg(y) - v.AddArg(a) + v.AddArg3(x, y, a) return true } return false @@ -10183,8 +9663,7 @@ func rewriteValueARM_OpARMRSBSshiftLLreg(v *Value) bool { v.reset(OpARMSUBSconst) v.AuxInt = c v0 := b.NewValue0(v.Pos, OpARMSLL, x.Type) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -10199,8 +9678,7 @@ func rewriteValueARM_OpARMRSBSshiftLLreg(v *Value) bool { c := v_2.AuxInt v.reset(OpARMRSBSshiftLL) v.AuxInt = c - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } return false @@ -10259,8 +9737,7 @@ func rewriteValueARM_OpARMRSBSshiftRAreg(v *Value) bool { v.reset(OpARMSUBSconst) v.AuxInt = c v0 := b.NewValue0(v.Pos, OpARMSRA, x.Type) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -10275,8 +9752,7 @@ func rewriteValueARM_OpARMRSBSshiftRAreg(v *Value) bool { c := v_2.AuxInt v.reset(OpARMRSBSshiftRA) v.AuxInt = c - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } return false @@ -10335,8 +9811,7 @@ func rewriteValueARM_OpARMRSBSshiftRLreg(v *Value) bool { v.reset(OpARMSUBSconst) v.AuxInt = c v0 := b.NewValue0(v.Pos, OpARMSRL, x.Type) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -10351,8 +9826,7 @@ func rewriteValueARM_OpARMRSBSshiftRLreg(v *Value) bool { c := v_2.AuxInt v.reset(OpARMRSBSshiftRL) v.AuxInt = c - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } return false @@ -10486,8 +9960,7 @@ func rewriteValueARM_OpARMRSBshiftLLreg(v *Value) bool { v.reset(OpARMSUBconst) v.AuxInt = c v0 := b.NewValue0(v.Pos, OpARMSLL, x.Type) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -10502,8 +9975,7 @@ func rewriteValueARM_OpARMRSBshiftLLreg(v *Value) bool { c := v_2.AuxInt v.reset(OpARMRSBshiftLL) v.AuxInt = c - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } return false @@ -10579,8 +10051,7 @@ func rewriteValueARM_OpARMRSBshiftRAreg(v *Value) bool { v.reset(OpARMSUBconst) v.AuxInt = c v0 := b.NewValue0(v.Pos, OpARMSRA, x.Type) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -10595,8 +10066,7 @@ func rewriteValueARM_OpARMRSBshiftRAreg(v *Value) bool { c := v_2.AuxInt v.reset(OpARMRSBshiftRA) v.AuxInt = c - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } return false @@ -10672,8 +10142,7 @@ func rewriteValueARM_OpARMRSBshiftRLreg(v *Value) bool { v.reset(OpARMSUBconst) v.AuxInt = c v0 := b.NewValue0(v.Pos, OpARMSRL, x.Type) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -10688,8 +10157,7 @@ func rewriteValueARM_OpARMRSBshiftRLreg(v *Value) bool { c := v_2.AuxInt v.reset(OpARMRSBshiftRL) v.AuxInt = c - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } return false @@ -10709,8 +10177,7 @@ func rewriteValueARM_OpARMRSCconst(v *Value) bool { flags := v_1 v.reset(OpARMRSCconst) v.AuxInt = int64(int32(c - d)) - v.AddArg(x) - v.AddArg(flags) + v.AddArg2(x, flags) return true } // match: (RSCconst [c] (SUBconst [d] x) flags) @@ -10725,8 +10192,7 @@ func rewriteValueARM_OpARMRSCconst(v *Value) bool { flags := v_1 v.reset(OpARMRSCconst) v.AuxInt = int64(int32(c + d)) - v.AddArg(x) - v.AddArg(flags) + v.AddArg2(x, flags) return true } return false @@ -10751,8 +10217,7 @@ func rewriteValueARM_OpARMRSCshiftLL(v *Value) bool { v0 := b.NewValue0(v.Pos, OpARMSLLconst, x.Type) v0.AuxInt = d v0.AddArg(x) - v.AddArg(v0) - v.AddArg(flags) + v.AddArg2(v0, flags) return true } // match: (RSCshiftLL x (MOVWconst [c]) [d] flags) @@ -10767,8 +10232,7 @@ func rewriteValueARM_OpARMRSCshiftLL(v *Value) bool { flags := v_2 v.reset(OpARMRSCconst) v.AuxInt = int64(int32(uint32(c) << uint64(d))) - v.AddArg(x) - v.AddArg(flags) + v.AddArg2(x, flags) return true } return false @@ -10792,10 +10256,8 @@ func rewriteValueARM_OpARMRSCshiftLLreg(v *Value) bool { v.reset(OpARMSBCconst) v.AuxInt = c v0 := b.NewValue0(v.Pos, OpARMSLL, x.Type) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) - v.AddArg(flags) + v0.AddArg2(x, y) + v.AddArg2(v0, flags) return true } // match: (RSCshiftLLreg x y (MOVWconst [c]) flags) @@ -10810,9 +10272,7 @@ func rewriteValueARM_OpARMRSCshiftLLreg(v *Value) bool { flags := v_3 v.reset(OpARMRSCshiftLL) v.AuxInt = c - v.AddArg(x) - v.AddArg(y) - v.AddArg(flags) + v.AddArg3(x, y, flags) return true } return false @@ -10837,8 +10297,7 @@ func rewriteValueARM_OpARMRSCshiftRA(v *Value) bool { v0 := b.NewValue0(v.Pos, OpARMSRAconst, x.Type) v0.AuxInt = d v0.AddArg(x) - v.AddArg(v0) - v.AddArg(flags) + v.AddArg2(v0, flags) return true } // match: (RSCshiftRA x (MOVWconst [c]) [d] flags) @@ -10853,8 +10312,7 @@ func rewriteValueARM_OpARMRSCshiftRA(v *Value) bool { flags := v_2 v.reset(OpARMRSCconst) v.AuxInt = int64(int32(c) >> uint64(d)) - v.AddArg(x) - v.AddArg(flags) + v.AddArg2(x, flags) return true } return false @@ -10878,10 +10336,8 @@ func rewriteValueARM_OpARMRSCshiftRAreg(v *Value) bool { v.reset(OpARMSBCconst) v.AuxInt = c v0 := b.NewValue0(v.Pos, OpARMSRA, x.Type) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) - v.AddArg(flags) + v0.AddArg2(x, y) + v.AddArg2(v0, flags) return true } // match: (RSCshiftRAreg x y (MOVWconst [c]) flags) @@ -10896,9 +10352,7 @@ func rewriteValueARM_OpARMRSCshiftRAreg(v *Value) bool { flags := v_3 v.reset(OpARMRSCshiftRA) v.AuxInt = c - v.AddArg(x) - v.AddArg(y) - v.AddArg(flags) + v.AddArg3(x, y, flags) return true } return false @@ -10923,8 +10377,7 @@ func rewriteValueARM_OpARMRSCshiftRL(v *Value) bool { v0 := b.NewValue0(v.Pos, OpARMSRLconst, x.Type) v0.AuxInt = d v0.AddArg(x) - v.AddArg(v0) - v.AddArg(flags) + v.AddArg2(v0, flags) return true } // match: (RSCshiftRL x (MOVWconst [c]) [d] flags) @@ -10939,8 +10392,7 @@ func rewriteValueARM_OpARMRSCshiftRL(v *Value) bool { flags := v_2 v.reset(OpARMRSCconst) v.AuxInt = int64(int32(uint32(c) >> uint64(d))) - v.AddArg(x) - v.AddArg(flags) + v.AddArg2(x, flags) return true } return false @@ -10964,10 +10416,8 @@ func rewriteValueARM_OpARMRSCshiftRLreg(v *Value) bool { v.reset(OpARMSBCconst) v.AuxInt = c v0 := b.NewValue0(v.Pos, OpARMSRL, x.Type) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) - v.AddArg(flags) + v0.AddArg2(x, y) + v.AddArg2(v0, flags) return true } // match: (RSCshiftRLreg x y (MOVWconst [c]) flags) @@ -10982,9 +10432,7 @@ func rewriteValueARM_OpARMRSCshiftRLreg(v *Value) bool { flags := v_3 v.reset(OpARMRSCshiftRL) v.AuxInt = c - v.AddArg(x) - v.AddArg(y) - v.AddArg(flags) + v.AddArg3(x, y, flags) return true } return false @@ -11004,8 +10452,7 @@ func rewriteValueARM_OpARMSBC(v *Value) bool { flags := v_2 v.reset(OpARMRSCconst) v.AuxInt = c - v.AddArg(x) - v.AddArg(flags) + v.AddArg2(x, flags) return true } // match: (SBC x (MOVWconst [c]) flags) @@ -11019,8 +10466,7 @@ func rewriteValueARM_OpARMSBC(v *Value) bool { flags := v_2 v.reset(OpARMSBCconst) v.AuxInt = c - v.AddArg(x) - v.AddArg(flags) + v.AddArg2(x, flags) return true } // match: (SBC x (SLLconst [c] y) flags) @@ -11035,9 +10481,7 @@ func rewriteValueARM_OpARMSBC(v *Value) bool { flags := v_2 v.reset(OpARMSBCshiftLL) v.AuxInt = c - v.AddArg(x) - v.AddArg(y) - v.AddArg(flags) + v.AddArg3(x, y, flags) return true } // match: (SBC (SLLconst [c] y) x flags) @@ -11052,9 +10496,7 @@ func rewriteValueARM_OpARMSBC(v *Value) bool { flags := v_2 v.reset(OpARMRSCshiftLL) v.AuxInt = c - v.AddArg(x) - v.AddArg(y) - v.AddArg(flags) + v.AddArg3(x, y, flags) return true } // match: (SBC x (SRLconst [c] y) flags) @@ -11069,9 +10511,7 @@ func rewriteValueARM_OpARMSBC(v *Value) bool { flags := v_2 v.reset(OpARMSBCshiftRL) v.AuxInt = c - v.AddArg(x) - v.AddArg(y) - v.AddArg(flags) + v.AddArg3(x, y, flags) return true } // match: (SBC (SRLconst [c] y) x flags) @@ -11086,9 +10526,7 @@ func rewriteValueARM_OpARMSBC(v *Value) bool { flags := v_2 v.reset(OpARMRSCshiftRL) v.AuxInt = c - v.AddArg(x) - v.AddArg(y) - v.AddArg(flags) + v.AddArg3(x, y, flags) return true } // match: (SBC x (SRAconst [c] y) flags) @@ -11103,9 +10541,7 @@ func rewriteValueARM_OpARMSBC(v *Value) bool { flags := v_2 v.reset(OpARMSBCshiftRA) v.AuxInt = c - v.AddArg(x) - v.AddArg(y) - v.AddArg(flags) + v.AddArg3(x, y, flags) return true } // match: (SBC (SRAconst [c] y) x flags) @@ -11120,9 +10556,7 @@ func rewriteValueARM_OpARMSBC(v *Value) bool { flags := v_2 v.reset(OpARMRSCshiftRA) v.AuxInt = c - v.AddArg(x) - v.AddArg(y) - v.AddArg(flags) + v.AddArg3(x, y, flags) return true } // match: (SBC x (SLL y z) flags) @@ -11136,10 +10570,7 @@ func rewriteValueARM_OpARMSBC(v *Value) bool { y := v_1.Args[0] flags := v_2 v.reset(OpARMSBCshiftLLreg) - v.AddArg(x) - v.AddArg(y) - v.AddArg(z) - v.AddArg(flags) + v.AddArg4(x, y, z, flags) return true } // match: (SBC (SLL y z) x flags) @@ -11153,10 +10584,7 @@ func rewriteValueARM_OpARMSBC(v *Value) bool { x := v_1 flags := v_2 v.reset(OpARMRSCshiftLLreg) - v.AddArg(x) - v.AddArg(y) - v.AddArg(z) - v.AddArg(flags) + v.AddArg4(x, y, z, flags) return true } // match: (SBC x (SRL y z) flags) @@ -11170,10 +10598,7 @@ func rewriteValueARM_OpARMSBC(v *Value) bool { y := v_1.Args[0] flags := v_2 v.reset(OpARMSBCshiftRLreg) - v.AddArg(x) - v.AddArg(y) - v.AddArg(z) - v.AddArg(flags) + v.AddArg4(x, y, z, flags) return true } // match: (SBC (SRL y z) x flags) @@ -11187,10 +10612,7 @@ func rewriteValueARM_OpARMSBC(v *Value) bool { x := v_1 flags := v_2 v.reset(OpARMRSCshiftRLreg) - v.AddArg(x) - v.AddArg(y) - v.AddArg(z) - v.AddArg(flags) + v.AddArg4(x, y, z, flags) return true } // match: (SBC x (SRA y z) flags) @@ -11204,10 +10626,7 @@ func rewriteValueARM_OpARMSBC(v *Value) bool { y := v_1.Args[0] flags := v_2 v.reset(OpARMSBCshiftRAreg) - v.AddArg(x) - v.AddArg(y) - v.AddArg(z) - v.AddArg(flags) + v.AddArg4(x, y, z, flags) return true } // match: (SBC (SRA y z) x flags) @@ -11221,10 +10640,7 @@ func rewriteValueARM_OpARMSBC(v *Value) bool { x := v_1 flags := v_2 v.reset(OpARMRSCshiftRAreg) - v.AddArg(x) - v.AddArg(y) - v.AddArg(z) - v.AddArg(flags) + v.AddArg4(x, y, z, flags) return true } return false @@ -11244,8 +10660,7 @@ func rewriteValueARM_OpARMSBCconst(v *Value) bool { flags := v_1 v.reset(OpARMSBCconst) v.AuxInt = int64(int32(c - d)) - v.AddArg(x) - v.AddArg(flags) + v.AddArg2(x, flags) return true } // match: (SBCconst [c] (SUBconst [d] x) flags) @@ -11260,8 +10675,7 @@ func rewriteValueARM_OpARMSBCconst(v *Value) bool { flags := v_1 v.reset(OpARMSBCconst) v.AuxInt = int64(int32(c + d)) - v.AddArg(x) - v.AddArg(flags) + v.AddArg2(x, flags) return true } return false @@ -11286,8 +10700,7 @@ func rewriteValueARM_OpARMSBCshiftLL(v *Value) bool { v0 := b.NewValue0(v.Pos, OpARMSLLconst, x.Type) v0.AuxInt = d v0.AddArg(x) - v.AddArg(v0) - v.AddArg(flags) + v.AddArg2(v0, flags) return true } // match: (SBCshiftLL x (MOVWconst [c]) [d] flags) @@ -11302,8 +10715,7 @@ func rewriteValueARM_OpARMSBCshiftLL(v *Value) bool { flags := v_2 v.reset(OpARMSBCconst) v.AuxInt = int64(int32(uint32(c) << uint64(d))) - v.AddArg(x) - v.AddArg(flags) + v.AddArg2(x, flags) return true } return false @@ -11327,10 +10739,8 @@ func rewriteValueARM_OpARMSBCshiftLLreg(v *Value) bool { v.reset(OpARMRSCconst) v.AuxInt = c v0 := b.NewValue0(v.Pos, OpARMSLL, x.Type) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) - v.AddArg(flags) + v0.AddArg2(x, y) + v.AddArg2(v0, flags) return true } // match: (SBCshiftLLreg x y (MOVWconst [c]) flags) @@ -11345,9 +10755,7 @@ func rewriteValueARM_OpARMSBCshiftLLreg(v *Value) bool { flags := v_3 v.reset(OpARMSBCshiftLL) v.AuxInt = c - v.AddArg(x) - v.AddArg(y) - v.AddArg(flags) + v.AddArg3(x, y, flags) return true } return false @@ -11372,8 +10780,7 @@ func rewriteValueARM_OpARMSBCshiftRA(v *Value) bool { v0 := b.NewValue0(v.Pos, OpARMSRAconst, x.Type) v0.AuxInt = d v0.AddArg(x) - v.AddArg(v0) - v.AddArg(flags) + v.AddArg2(v0, flags) return true } // match: (SBCshiftRA x (MOVWconst [c]) [d] flags) @@ -11388,8 +10795,7 @@ func rewriteValueARM_OpARMSBCshiftRA(v *Value) bool { flags := v_2 v.reset(OpARMSBCconst) v.AuxInt = int64(int32(c) >> uint64(d)) - v.AddArg(x) - v.AddArg(flags) + v.AddArg2(x, flags) return true } return false @@ -11413,10 +10819,8 @@ func rewriteValueARM_OpARMSBCshiftRAreg(v *Value) bool { v.reset(OpARMRSCconst) v.AuxInt = c v0 := b.NewValue0(v.Pos, OpARMSRA, x.Type) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) - v.AddArg(flags) + v0.AddArg2(x, y) + v.AddArg2(v0, flags) return true } // match: (SBCshiftRAreg x y (MOVWconst [c]) flags) @@ -11431,9 +10835,7 @@ func rewriteValueARM_OpARMSBCshiftRAreg(v *Value) bool { flags := v_3 v.reset(OpARMSBCshiftRA) v.AuxInt = c - v.AddArg(x) - v.AddArg(y) - v.AddArg(flags) + v.AddArg3(x, y, flags) return true } return false @@ -11458,8 +10860,7 @@ func rewriteValueARM_OpARMSBCshiftRL(v *Value) bool { v0 := b.NewValue0(v.Pos, OpARMSRLconst, x.Type) v0.AuxInt = d v0.AddArg(x) - v.AddArg(v0) - v.AddArg(flags) + v.AddArg2(v0, flags) return true } // match: (SBCshiftRL x (MOVWconst [c]) [d] flags) @@ -11474,8 +10875,7 @@ func rewriteValueARM_OpARMSBCshiftRL(v *Value) bool { flags := v_2 v.reset(OpARMSBCconst) v.AuxInt = int64(int32(uint32(c) >> uint64(d))) - v.AddArg(x) - v.AddArg(flags) + v.AddArg2(x, flags) return true } return false @@ -11499,10 +10899,8 @@ func rewriteValueARM_OpARMSBCshiftRLreg(v *Value) bool { v.reset(OpARMRSCconst) v.AuxInt = c v0 := b.NewValue0(v.Pos, OpARMSRL, x.Type) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) - v.AddArg(flags) + v0.AddArg2(x, y) + v.AddArg2(v0, flags) return true } // match: (SBCshiftRLreg x y (MOVWconst [c]) flags) @@ -11517,9 +10915,7 @@ func rewriteValueARM_OpARMSBCshiftRLreg(v *Value) bool { flags := v_3 v.reset(OpARMSBCshiftRL) v.AuxInt = c - v.AddArg(x) - v.AddArg(y) - v.AddArg(flags) + v.AddArg3(x, y, flags) return true } return false @@ -11601,8 +10997,7 @@ func rewriteValueARM_OpARMSRAcond(v *Value) bool { break } v.reset(OpARMSRA) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (SRAcond x _ (FlagLT_UGT)) @@ -11626,8 +11021,7 @@ func rewriteValueARM_OpARMSRAcond(v *Value) bool { break } v.reset(OpARMSRA) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (SRAcond x _ (FlagGT_UGT)) @@ -11770,8 +11164,7 @@ func rewriteValueARM_OpARMSUB(v *Value) bool { y := v_1.Args[0] v.reset(OpARMSUBshiftLL) v.AuxInt = c - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (SUB (SLLconst [c] y) x) @@ -11785,8 +11178,7 @@ func rewriteValueARM_OpARMSUB(v *Value) bool { x := v_1 v.reset(OpARMRSBshiftLL) v.AuxInt = c - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (SUB x (SRLconst [c] y)) @@ -11800,8 +11192,7 @@ func rewriteValueARM_OpARMSUB(v *Value) bool { y := v_1.Args[0] v.reset(OpARMSUBshiftRL) v.AuxInt = c - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (SUB (SRLconst [c] y) x) @@ -11815,8 +11206,7 @@ func rewriteValueARM_OpARMSUB(v *Value) bool { x := v_1 v.reset(OpARMRSBshiftRL) v.AuxInt = c - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (SUB x (SRAconst [c] y)) @@ -11830,8 +11220,7 @@ func rewriteValueARM_OpARMSUB(v *Value) bool { y := v_1.Args[0] v.reset(OpARMSUBshiftRA) v.AuxInt = c - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (SUB (SRAconst [c] y) x) @@ -11845,8 +11234,7 @@ func rewriteValueARM_OpARMSUB(v *Value) bool { x := v_1 v.reset(OpARMRSBshiftRA) v.AuxInt = c - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (SUB x (SLL y z)) @@ -11859,9 +11247,7 @@ func rewriteValueARM_OpARMSUB(v *Value) bool { z := v_1.Args[1] y := v_1.Args[0] v.reset(OpARMSUBshiftLLreg) - v.AddArg(x) - v.AddArg(y) - v.AddArg(z) + v.AddArg3(x, y, z) return true } // match: (SUB (SLL y z) x) @@ -11874,9 +11260,7 @@ func rewriteValueARM_OpARMSUB(v *Value) bool { y := v_0.Args[0] x := v_1 v.reset(OpARMRSBshiftLLreg) - v.AddArg(x) - v.AddArg(y) - v.AddArg(z) + v.AddArg3(x, y, z) return true } // match: (SUB x (SRL y z)) @@ -11889,9 +11273,7 @@ func rewriteValueARM_OpARMSUB(v *Value) bool { z := v_1.Args[1] y := v_1.Args[0] v.reset(OpARMSUBshiftRLreg) - v.AddArg(x) - v.AddArg(y) - v.AddArg(z) + v.AddArg3(x, y, z) return true } // match: (SUB (SRL y z) x) @@ -11904,9 +11286,7 @@ func rewriteValueARM_OpARMSUB(v *Value) bool { y := v_0.Args[0] x := v_1 v.reset(OpARMRSBshiftRLreg) - v.AddArg(x) - v.AddArg(y) - v.AddArg(z) + v.AddArg3(x, y, z) return true } // match: (SUB x (SRA y z)) @@ -11919,9 +11299,7 @@ func rewriteValueARM_OpARMSUB(v *Value) bool { z := v_1.Args[1] y := v_1.Args[0] v.reset(OpARMSUBshiftRAreg) - v.AddArg(x) - v.AddArg(y) - v.AddArg(z) + v.AddArg3(x, y, z) return true } // match: (SUB (SRA y z) x) @@ -11934,9 +11312,7 @@ func rewriteValueARM_OpARMSUB(v *Value) bool { y := v_0.Args[0] x := v_1 v.reset(OpARMRSBshiftRAreg) - v.AddArg(x) - v.AddArg(y) - v.AddArg(z) + v.AddArg3(x, y, z) return true } // match: (SUB x x) @@ -11964,9 +11340,7 @@ func rewriteValueARM_OpARMSUB(v *Value) bool { break } v.reset(OpARMMULS) - v.AddArg(x) - v.AddArg(y) - v.AddArg(a) + v.AddArg3(x, y, a) return true } return false @@ -11988,9 +11362,7 @@ func rewriteValueARM_OpARMSUBD(v *Value) bool { break } v.reset(OpARMMULSD) - v.AddArg(a) - v.AddArg(x) - v.AddArg(y) + v.AddArg3(a, x, y) return true } // match: (SUBD a (NMULD x y)) @@ -12007,9 +11379,7 @@ func rewriteValueARM_OpARMSUBD(v *Value) bool { break } v.reset(OpARMMULAD) - v.AddArg(a) - v.AddArg(x) - v.AddArg(y) + v.AddArg3(a, x, y) return true } return false @@ -12031,9 +11401,7 @@ func rewriteValueARM_OpARMSUBF(v *Value) bool { break } v.reset(OpARMMULSF) - v.AddArg(a) - v.AddArg(x) - v.AddArg(y) + v.AddArg3(a, x, y) return true } // match: (SUBF a (NMULF x y)) @@ -12050,9 +11418,7 @@ func rewriteValueARM_OpARMSUBF(v *Value) bool { break } v.reset(OpARMMULAF) - v.AddArg(a) - v.AddArg(x) - v.AddArg(y) + v.AddArg3(a, x, y) return true } return false @@ -12084,8 +11450,7 @@ func rewriteValueARM_OpARMSUBS(v *Value) bool { y := v_1.Args[0] v.reset(OpARMSUBSshiftLL) v.AuxInt = c - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (SUBS (SLLconst [c] y) x) @@ -12099,8 +11464,7 @@ func rewriteValueARM_OpARMSUBS(v *Value) bool { x := v_1 v.reset(OpARMRSBSshiftLL) v.AuxInt = c - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (SUBS x (SRLconst [c] y)) @@ -12114,8 +11478,7 @@ func rewriteValueARM_OpARMSUBS(v *Value) bool { y := v_1.Args[0] v.reset(OpARMSUBSshiftRL) v.AuxInt = c - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (SUBS (SRLconst [c] y) x) @@ -12129,8 +11492,7 @@ func rewriteValueARM_OpARMSUBS(v *Value) bool { x := v_1 v.reset(OpARMRSBSshiftRL) v.AuxInt = c - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (SUBS x (SRAconst [c] y)) @@ -12144,8 +11506,7 @@ func rewriteValueARM_OpARMSUBS(v *Value) bool { y := v_1.Args[0] v.reset(OpARMSUBSshiftRA) v.AuxInt = c - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (SUBS (SRAconst [c] y) x) @@ -12159,8 +11520,7 @@ func rewriteValueARM_OpARMSUBS(v *Value) bool { x := v_1 v.reset(OpARMRSBSshiftRA) v.AuxInt = c - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (SUBS x (SLL y z)) @@ -12173,9 +11533,7 @@ func rewriteValueARM_OpARMSUBS(v *Value) bool { z := v_1.Args[1] y := v_1.Args[0] v.reset(OpARMSUBSshiftLLreg) - v.AddArg(x) - v.AddArg(y) - v.AddArg(z) + v.AddArg3(x, y, z) return true } // match: (SUBS (SLL y z) x) @@ -12188,9 +11546,7 @@ func rewriteValueARM_OpARMSUBS(v *Value) bool { y := v_0.Args[0] x := v_1 v.reset(OpARMRSBSshiftLLreg) - v.AddArg(x) - v.AddArg(y) - v.AddArg(z) + v.AddArg3(x, y, z) return true } // match: (SUBS x (SRL y z)) @@ -12203,9 +11559,7 @@ func rewriteValueARM_OpARMSUBS(v *Value) bool { z := v_1.Args[1] y := v_1.Args[0] v.reset(OpARMSUBSshiftRLreg) - v.AddArg(x) - v.AddArg(y) - v.AddArg(z) + v.AddArg3(x, y, z) return true } // match: (SUBS (SRL y z) x) @@ -12218,9 +11572,7 @@ func rewriteValueARM_OpARMSUBS(v *Value) bool { y := v_0.Args[0] x := v_1 v.reset(OpARMRSBSshiftRLreg) - v.AddArg(x) - v.AddArg(y) - v.AddArg(z) + v.AddArg3(x, y, z) return true } // match: (SUBS x (SRA y z)) @@ -12233,9 +11585,7 @@ func rewriteValueARM_OpARMSUBS(v *Value) bool { z := v_1.Args[1] y := v_1.Args[0] v.reset(OpARMSUBSshiftRAreg) - v.AddArg(x) - v.AddArg(y) - v.AddArg(z) + v.AddArg3(x, y, z) return true } // match: (SUBS (SRA y z) x) @@ -12248,9 +11598,7 @@ func rewriteValueARM_OpARMSUBS(v *Value) bool { y := v_0.Args[0] x := v_1 v.reset(OpARMRSBSshiftRAreg) - v.AddArg(x) - v.AddArg(y) - v.AddArg(z) + v.AddArg3(x, y, z) return true } return false @@ -12309,8 +11657,7 @@ func rewriteValueARM_OpARMSUBSshiftLLreg(v *Value) bool { v.reset(OpARMRSBSconst) v.AuxInt = c v0 := b.NewValue0(v.Pos, OpARMSLL, x.Type) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -12325,8 +11672,7 @@ func rewriteValueARM_OpARMSUBSshiftLLreg(v *Value) bool { c := v_2.AuxInt v.reset(OpARMSUBSshiftLL) v.AuxInt = c - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } return false @@ -12385,8 +11731,7 @@ func rewriteValueARM_OpARMSUBSshiftRAreg(v *Value) bool { v.reset(OpARMRSBSconst) v.AuxInt = c v0 := b.NewValue0(v.Pos, OpARMSRA, x.Type) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -12401,8 +11746,7 @@ func rewriteValueARM_OpARMSUBSshiftRAreg(v *Value) bool { c := v_2.AuxInt v.reset(OpARMSUBSshiftRA) v.AuxInt = c - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } return false @@ -12461,8 +11805,7 @@ func rewriteValueARM_OpARMSUBSshiftRLreg(v *Value) bool { v.reset(OpARMRSBSconst) v.AuxInt = c v0 := b.NewValue0(v.Pos, OpARMSRL, x.Type) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -12477,8 +11820,7 @@ func rewriteValueARM_OpARMSUBSshiftRLreg(v *Value) bool { c := v_2.AuxInt v.reset(OpARMSUBSshiftRL) v.AuxInt = c - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } return false @@ -12508,9 +11850,7 @@ func rewriteValueARM_OpARMSUBconst(v *Value) bool { break } x := v_0 - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (SUBconst [c] x) @@ -12668,8 +12008,7 @@ func rewriteValueARM_OpARMSUBshiftLLreg(v *Value) bool { v.reset(OpARMRSBconst) v.AuxInt = c v0 := b.NewValue0(v.Pos, OpARMSLL, x.Type) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -12684,8 +12023,7 @@ func rewriteValueARM_OpARMSUBshiftLLreg(v *Value) bool { c := v_2.AuxInt v.reset(OpARMSUBshiftLL) v.AuxInt = c - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } return false @@ -12761,8 +12099,7 @@ func rewriteValueARM_OpARMSUBshiftRAreg(v *Value) bool { v.reset(OpARMRSBconst) v.AuxInt = c v0 := b.NewValue0(v.Pos, OpARMSRA, x.Type) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -12777,8 +12114,7 @@ func rewriteValueARM_OpARMSUBshiftRAreg(v *Value) bool { c := v_2.AuxInt v.reset(OpARMSUBshiftRA) v.AuxInt = c - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } return false @@ -12854,8 +12190,7 @@ func rewriteValueARM_OpARMSUBshiftRLreg(v *Value) bool { v.reset(OpARMRSBconst) v.AuxInt = c v0 := b.NewValue0(v.Pos, OpARMSRL, x.Type) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -12870,8 +12205,7 @@ func rewriteValueARM_OpARMSUBshiftRLreg(v *Value) bool { c := v_2.AuxInt v.reset(OpARMSUBshiftRL) v.AuxInt = c - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } return false @@ -12907,8 +12241,7 @@ func rewriteValueARM_OpARMTEQ(v *Value) bool { y := v_1.Args[0] v.reset(OpARMTEQshiftLL) v.AuxInt = c - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } break @@ -12925,8 +12258,7 @@ func rewriteValueARM_OpARMTEQ(v *Value) bool { y := v_1.Args[0] v.reset(OpARMTEQshiftRL) v.AuxInt = c - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } break @@ -12943,8 +12275,7 @@ func rewriteValueARM_OpARMTEQ(v *Value) bool { y := v_1.Args[0] v.reset(OpARMTEQshiftRA) v.AuxInt = c - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } break @@ -12960,9 +12291,7 @@ func rewriteValueARM_OpARMTEQ(v *Value) bool { z := v_1.Args[1] y := v_1.Args[0] v.reset(OpARMTEQshiftLLreg) - v.AddArg(x) - v.AddArg(y) - v.AddArg(z) + v.AddArg3(x, y, z) return true } break @@ -12978,9 +12307,7 @@ func rewriteValueARM_OpARMTEQ(v *Value) bool { z := v_1.Args[1] y := v_1.Args[0] v.reset(OpARMTEQshiftRLreg) - v.AddArg(x) - v.AddArg(y) - v.AddArg(z) + v.AddArg3(x, y, z) return true } break @@ -12996,9 +12323,7 @@ func rewriteValueARM_OpARMTEQ(v *Value) bool { z := v_1.Args[1] y := v_1.Args[0] v.reset(OpARMTEQshiftRAreg) - v.AddArg(x) - v.AddArg(y) - v.AddArg(z) + v.AddArg3(x, y, z) return true } break @@ -13108,8 +12433,7 @@ func rewriteValueARM_OpARMTEQshiftLLreg(v *Value) bool { v.reset(OpARMTEQconst) v.AuxInt = c v0 := b.NewValue0(v.Pos, OpARMSLL, x.Type) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -13124,8 +12448,7 @@ func rewriteValueARM_OpARMTEQshiftLLreg(v *Value) bool { c := v_2.AuxInt v.reset(OpARMTEQshiftLL) v.AuxInt = c - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } return false @@ -13184,8 +12507,7 @@ func rewriteValueARM_OpARMTEQshiftRAreg(v *Value) bool { v.reset(OpARMTEQconst) v.AuxInt = c v0 := b.NewValue0(v.Pos, OpARMSRA, x.Type) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -13200,8 +12522,7 @@ func rewriteValueARM_OpARMTEQshiftRAreg(v *Value) bool { c := v_2.AuxInt v.reset(OpARMTEQshiftRA) v.AuxInt = c - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } return false @@ -13260,8 +12581,7 @@ func rewriteValueARM_OpARMTEQshiftRLreg(v *Value) bool { v.reset(OpARMTEQconst) v.AuxInt = c v0 := b.NewValue0(v.Pos, OpARMSRL, x.Type) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -13276,8 +12596,7 @@ func rewriteValueARM_OpARMTEQshiftRLreg(v *Value) bool { c := v_2.AuxInt v.reset(OpARMTEQshiftRL) v.AuxInt = c - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } return false @@ -13313,8 +12632,7 @@ func rewriteValueARM_OpARMTST(v *Value) bool { y := v_1.Args[0] v.reset(OpARMTSTshiftLL) v.AuxInt = c - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } break @@ -13331,8 +12649,7 @@ func rewriteValueARM_OpARMTST(v *Value) bool { y := v_1.Args[0] v.reset(OpARMTSTshiftRL) v.AuxInt = c - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } break @@ -13349,8 +12666,7 @@ func rewriteValueARM_OpARMTST(v *Value) bool { y := v_1.Args[0] v.reset(OpARMTSTshiftRA) v.AuxInt = c - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } break @@ -13366,9 +12682,7 @@ func rewriteValueARM_OpARMTST(v *Value) bool { z := v_1.Args[1] y := v_1.Args[0] v.reset(OpARMTSTshiftLLreg) - v.AddArg(x) - v.AddArg(y) - v.AddArg(z) + v.AddArg3(x, y, z) return true } break @@ -13384,9 +12698,7 @@ func rewriteValueARM_OpARMTST(v *Value) bool { z := v_1.Args[1] y := v_1.Args[0] v.reset(OpARMTSTshiftRLreg) - v.AddArg(x) - v.AddArg(y) - v.AddArg(z) + v.AddArg3(x, y, z) return true } break @@ -13402,9 +12714,7 @@ func rewriteValueARM_OpARMTST(v *Value) bool { z := v_1.Args[1] y := v_1.Args[0] v.reset(OpARMTSTshiftRAreg) - v.AddArg(x) - v.AddArg(y) - v.AddArg(z) + v.AddArg3(x, y, z) return true } break @@ -13514,8 +12824,7 @@ func rewriteValueARM_OpARMTSTshiftLLreg(v *Value) bool { v.reset(OpARMTSTconst) v.AuxInt = c v0 := b.NewValue0(v.Pos, OpARMSLL, x.Type) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -13530,8 +12839,7 @@ func rewriteValueARM_OpARMTSTshiftLLreg(v *Value) bool { c := v_2.AuxInt v.reset(OpARMTSTshiftLL) v.AuxInt = c - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } return false @@ -13590,8 +12898,7 @@ func rewriteValueARM_OpARMTSTshiftRAreg(v *Value) bool { v.reset(OpARMTSTconst) v.AuxInt = c v0 := b.NewValue0(v.Pos, OpARMSRA, x.Type) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -13606,8 +12913,7 @@ func rewriteValueARM_OpARMTSTshiftRAreg(v *Value) bool { c := v_2.AuxInt v.reset(OpARMTSTshiftRA) v.AuxInt = c - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } return false @@ -13666,8 +12972,7 @@ func rewriteValueARM_OpARMTSTshiftRLreg(v *Value) bool { v.reset(OpARMTSTconst) v.AuxInt = c v0 := b.NewValue0(v.Pos, OpARMSRL, x.Type) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -13682,8 +12987,7 @@ func rewriteValueARM_OpARMTSTshiftRLreg(v *Value) bool { c := v_2.AuxInt v.reset(OpARMTSTshiftRL) v.AuxInt = c - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } return false @@ -13719,8 +13023,7 @@ func rewriteValueARM_OpARMXOR(v *Value) bool { y := v_1.Args[0] v.reset(OpARMXORshiftLL) v.AuxInt = c - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } break @@ -13737,8 +13040,7 @@ func rewriteValueARM_OpARMXOR(v *Value) bool { y := v_1.Args[0] v.reset(OpARMXORshiftRL) v.AuxInt = c - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } break @@ -13755,8 +13057,7 @@ func rewriteValueARM_OpARMXOR(v *Value) bool { y := v_1.Args[0] v.reset(OpARMXORshiftRA) v.AuxInt = c - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } break @@ -13773,8 +13074,7 @@ func rewriteValueARM_OpARMXOR(v *Value) bool { y := v_1.Args[0] v.reset(OpARMXORshiftRR) v.AuxInt = c - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } break @@ -13790,9 +13090,7 @@ func rewriteValueARM_OpARMXOR(v *Value) bool { z := v_1.Args[1] y := v_1.Args[0] v.reset(OpARMXORshiftLLreg) - v.AddArg(x) - v.AddArg(y) - v.AddArg(z) + v.AddArg3(x, y, z) return true } break @@ -13808,9 +13106,7 @@ func rewriteValueARM_OpARMXOR(v *Value) bool { z := v_1.Args[1] y := v_1.Args[0] v.reset(OpARMXORshiftRLreg) - v.AddArg(x) - v.AddArg(y) - v.AddArg(z) + v.AddArg3(x, y, z) return true } break @@ -13826,9 +13122,7 @@ func rewriteValueARM_OpARMXOR(v *Value) bool { z := v_1.Args[1] y := v_1.Args[0] v.reset(OpARMXORshiftRAreg) - v.AddArg(x) - v.AddArg(y) - v.AddArg(z) + v.AddArg3(x, y, z) return true } break @@ -13855,9 +13149,7 @@ func rewriteValueARM_OpARMXORconst(v *Value) bool { break } x := v_0 - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (XORconst [c] (MOVWconst [d])) @@ -14009,8 +13301,7 @@ func rewriteValueARM_OpARMXORshiftLLreg(v *Value) bool { v.reset(OpARMXORconst) v.AuxInt = c v0 := b.NewValue0(v.Pos, OpARMSLL, x.Type) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -14025,8 +13316,7 @@ func rewriteValueARM_OpARMXORshiftLLreg(v *Value) bool { c := v_2.AuxInt v.reset(OpARMXORshiftLL) v.AuxInt = c - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } return false @@ -14102,8 +13392,7 @@ func rewriteValueARM_OpARMXORshiftRAreg(v *Value) bool { v.reset(OpARMXORconst) v.AuxInt = c v0 := b.NewValue0(v.Pos, OpARMSRA, x.Type) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -14118,8 +13407,7 @@ func rewriteValueARM_OpARMXORshiftRAreg(v *Value) bool { c := v_2.AuxInt v.reset(OpARMXORshiftRA) v.AuxInt = c - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } return false @@ -14211,8 +13499,7 @@ func rewriteValueARM_OpARMXORshiftRLreg(v *Value) bool { v.reset(OpARMXORconst) v.AuxInt = c v0 := b.NewValue0(v.Pos, OpARMSRL, x.Type) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -14227,8 +13514,7 @@ func rewriteValueARM_OpARMXORshiftRLreg(v *Value) bool { c := v_2.AuxInt v.reset(OpARMXORshiftRL) v.AuxInt = c - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } return false @@ -14284,11 +13570,9 @@ func rewriteValueARM_OpAvg32u(v *Value) bool { v0 := b.NewValue0(v.Pos, OpARMSRLconst, t) v0.AuxInt = 1 v1 := b.NewValue0(v.Pos, OpARMSUB, t) - v1.AddArg(x) - v1.AddArg(y) + v1.AddArg2(x, y) v0.AddArg(v1) - v.AddArg(v0) - v.AddArg(y) + v.AddArg2(v0, y) return true } } @@ -14327,18 +13611,16 @@ func rewriteValueARM_OpBswap32(v *Value) bool { v1 := b.NewValue0(v.Pos, OpARMBICconst, t) v1.AuxInt = 0xff0000 v2 := b.NewValue0(v.Pos, OpARMXOR, t) - v2.AddArg(x) v3 := b.NewValue0(v.Pos, OpARMSRRconst, t) v3.AuxInt = 16 v3.AddArg(x) - v2.AddArg(v3) + v2.AddArg2(x, v3) v1.AddArg(v2) v0.AddArg(v1) - v.AddArg(v0) v4 := b.NewValue0(v.Pos, OpARMSRRconst, t) v4.AuxInt = 8 v4.AddArg(x) - v.AddArg(v4) + v.AddArg2(v0, v4) return true } // match: (Bswap32 x) @@ -14386,14 +13668,13 @@ func rewriteValueARM_OpCtz16(v *Value) bool { v3 := b.NewValue0(v.Pos, OpARMORconst, typ.UInt32) v3.AuxInt = 0x10000 v3.AddArg(x) - v2.AddArg(v3) v4 := b.NewValue0(v.Pos, OpARMRSBconst, typ.UInt32) v4.AuxInt = 0 v5 := b.NewValue0(v.Pos, OpARMORconst, typ.UInt32) v5.AuxInt = 0x10000 v5.AddArg(x) v4.AddArg(v5) - v2.AddArg(v4) + v2.AddArg2(v3, v4) v1.AddArg(v2) v0.AddArg(v1) v.AddArg(v0) @@ -14438,11 +13719,10 @@ func rewriteValueARM_OpCtz32(v *Value) bool { v1 := b.NewValue0(v.Pos, OpARMSUBconst, t) v1.AuxInt = 1 v2 := b.NewValue0(v.Pos, OpARMAND, t) - v2.AddArg(x) v3 := b.NewValue0(v.Pos, OpARMRSBconst, t) v3.AuxInt = 0 v3.AddArg(x) - v2.AddArg(v3) + v2.AddArg2(x, v3) v1.AddArg(v2) v0.AddArg(v1) v.AddArg(v0) @@ -14488,14 +13768,13 @@ func rewriteValueARM_OpCtz8(v *Value) bool { v3 := b.NewValue0(v.Pos, OpARMORconst, typ.UInt32) v3.AuxInt = 0x100 v3.AddArg(x) - v2.AddArg(v3) v4 := b.NewValue0(v.Pos, OpARMRSBconst, typ.UInt32) v4.AuxInt = 0 v5 := b.NewValue0(v.Pos, OpARMORconst, typ.UInt32) v5.AuxInt = 0x100 v5.AddArg(x) v4.AddArg(v5) - v2.AddArg(v4) + v2.AddArg2(v3, v4) v1.AddArg(v2) v0.AddArg(v1) v.AddArg(v0) @@ -14535,10 +13814,9 @@ func rewriteValueARM_OpDiv16(v *Value) bool { v.reset(OpDiv32) v0 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32) v1.AddArg(y) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } } @@ -14555,10 +13833,9 @@ func rewriteValueARM_OpDiv16u(v *Value) bool { v.reset(OpDiv32u) v0 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) v1.AddArg(y) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } } @@ -14578,41 +13855,32 @@ func rewriteValueARM_OpDiv32(v *Value) bool { v2 := b.NewValue0(v.Pos, OpARMCALLudiv, types.NewTuple(typ.UInt32, typ.UInt32)) v3 := b.NewValue0(v.Pos, OpARMSUB, typ.UInt32) v4 := b.NewValue0(v.Pos, OpARMXOR, typ.UInt32) - v4.AddArg(x) v5 := b.NewValue0(v.Pos, OpSignmask, typ.Int32) v5.AddArg(x) - v4.AddArg(v5) - v3.AddArg(v4) + v4.AddArg2(x, v5) v6 := b.NewValue0(v.Pos, OpSignmask, typ.Int32) v6.AddArg(x) - v3.AddArg(v6) - v2.AddArg(v3) + v3.AddArg2(v4, v6) v7 := b.NewValue0(v.Pos, OpARMSUB, typ.UInt32) v8 := b.NewValue0(v.Pos, OpARMXOR, typ.UInt32) - v8.AddArg(y) v9 := b.NewValue0(v.Pos, OpSignmask, typ.Int32) v9.AddArg(y) - v8.AddArg(v9) - v7.AddArg(v8) + v8.AddArg2(y, v9) v10 := b.NewValue0(v.Pos, OpSignmask, typ.Int32) v10.AddArg(y) - v7.AddArg(v10) - v2.AddArg(v7) + v7.AddArg2(v8, v10) + v2.AddArg2(v3, v7) v1.AddArg(v2) - v0.AddArg(v1) v11 := b.NewValue0(v.Pos, OpSignmask, typ.Int32) v12 := b.NewValue0(v.Pos, OpARMXOR, typ.UInt32) - v12.AddArg(x) - v12.AddArg(y) + v12.AddArg2(x, y) v11.AddArg(v12) - v0.AddArg(v11) - v.AddArg(v0) + v0.AddArg2(v1, v11) v13 := b.NewValue0(v.Pos, OpSignmask, typ.Int32) v14 := b.NewValue0(v.Pos, OpARMXOR, typ.UInt32) - v14.AddArg(x) - v14.AddArg(y) + v14.AddArg2(x, y) v13.AddArg(v14) - v.AddArg(v13) + v.AddArg2(v0, v13) return true } } @@ -14629,8 +13897,7 @@ func rewriteValueARM_OpDiv32u(v *Value) bool { v.reset(OpSelect0) v.Type = typ.UInt32 v0 := b.NewValue0(v.Pos, OpARMCALLudiv, types.NewTuple(typ.UInt32, typ.UInt32)) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -14648,10 +13915,9 @@ func rewriteValueARM_OpDiv8(v *Value) bool { v.reset(OpDiv32) v0 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32) v1.AddArg(y) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } } @@ -14668,10 +13934,9 @@ func rewriteValueARM_OpDiv8u(v *Value) bool { v.reset(OpDiv32u) v0 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) v1.AddArg(y) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } } @@ -14689,10 +13954,9 @@ func rewriteValueARM_OpEq16(v *Value) bool { v0 := b.NewValue0(v.Pos, OpARMCMP, types.TypeFlags) v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) v1.AddArg(x) - v0.AddArg(v1) v2 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) v2.AddArg(y) - v0.AddArg(v2) + v0.AddArg2(v1, v2) v.AddArg(v0) return true } @@ -14708,8 +13972,7 @@ func rewriteValueARM_OpEq32(v *Value) bool { y := v_1 v.reset(OpARMEqual) v0 := b.NewValue0(v.Pos, OpARMCMP, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -14725,8 +13988,7 @@ func rewriteValueARM_OpEq32F(v *Value) bool { y := v_1 v.reset(OpARMEqual) v0 := b.NewValue0(v.Pos, OpARMCMPF, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -14742,8 +14004,7 @@ func rewriteValueARM_OpEq64F(v *Value) bool { y := v_1 v.reset(OpARMEqual) v0 := b.NewValue0(v.Pos, OpARMCMPD, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -14762,10 +14023,9 @@ func rewriteValueARM_OpEq8(v *Value) bool { v0 := b.NewValue0(v.Pos, OpARMCMP, types.TypeFlags) v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) v1.AddArg(x) - v0.AddArg(v1) v2 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) v2.AddArg(y) - v0.AddArg(v2) + v0.AddArg2(v1, v2) v.AddArg(v0) return true } @@ -14783,8 +14043,7 @@ func rewriteValueARM_OpEqB(v *Value) bool { v.reset(OpARMXORconst) v.AuxInt = 1 v0 := b.NewValue0(v.Pos, OpARMXOR, typ.Bool) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -14800,8 +14059,7 @@ func rewriteValueARM_OpEqPtr(v *Value) bool { y := v_1 v.reset(OpARMEqual) v0 := b.NewValue0(v.Pos, OpARMCMP, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -14817,9 +14075,7 @@ func rewriteValueARM_OpFMA(v *Value) bool { y := v_1 z := v_2 v.reset(OpARMFMULAD) - v.AddArg(z) - v.AddArg(x) - v.AddArg(y) + v.AddArg3(z, x, y) return true } } @@ -14834,8 +14090,7 @@ func rewriteValueARM_OpGeq32F(v *Value) bool { y := v_1 v.reset(OpARMGreaterEqual) v0 := b.NewValue0(v.Pos, OpARMCMPF, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -14851,8 +14106,7 @@ func rewriteValueARM_OpGeq64F(v *Value) bool { y := v_1 v.reset(OpARMGreaterEqual) v0 := b.NewValue0(v.Pos, OpARMCMPD, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -14868,8 +14122,7 @@ func rewriteValueARM_OpGreater32F(v *Value) bool { y := v_1 v.reset(OpARMGreaterThan) v0 := b.NewValue0(v.Pos, OpARMCMPF, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -14885,8 +14138,7 @@ func rewriteValueARM_OpGreater64F(v *Value) bool { y := v_1 v.reset(OpARMGreaterThan) v0 := b.NewValue0(v.Pos, OpARMCMPD, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -14902,8 +14154,7 @@ func rewriteValueARM_OpIsInBounds(v *Value) bool { len := v_1 v.reset(OpARMLessThanU) v0 := b.NewValue0(v.Pos, OpARMCMP, types.TypeFlags) - v0.AddArg(idx) - v0.AddArg(len) + v0.AddArg2(idx, len) v.AddArg(v0) return true } @@ -14934,8 +14185,7 @@ func rewriteValueARM_OpIsSliceInBounds(v *Value) bool { len := v_1 v.reset(OpARMLessEqualU) v0 := b.NewValue0(v.Pos, OpARMCMP, types.TypeFlags) - v0.AddArg(idx) - v0.AddArg(len) + v0.AddArg2(idx, len) v.AddArg(v0) return true } @@ -14954,10 +14204,9 @@ func rewriteValueARM_OpLeq16(v *Value) bool { v0 := b.NewValue0(v.Pos, OpARMCMP, types.TypeFlags) v1 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32) v1.AddArg(x) - v0.AddArg(v1) v2 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32) v2.AddArg(y) - v0.AddArg(v2) + v0.AddArg2(v1, v2) v.AddArg(v0) return true } @@ -14976,10 +14225,9 @@ func rewriteValueARM_OpLeq16U(v *Value) bool { v0 := b.NewValue0(v.Pos, OpARMCMP, types.TypeFlags) v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) v1.AddArg(x) - v0.AddArg(v1) v2 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) v2.AddArg(y) - v0.AddArg(v2) + v0.AddArg2(v1, v2) v.AddArg(v0) return true } @@ -14995,8 +14243,7 @@ func rewriteValueARM_OpLeq32(v *Value) bool { y := v_1 v.reset(OpARMLessEqual) v0 := b.NewValue0(v.Pos, OpARMCMP, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -15012,8 +14259,7 @@ func rewriteValueARM_OpLeq32F(v *Value) bool { y := v_1 v.reset(OpARMGreaterEqual) v0 := b.NewValue0(v.Pos, OpARMCMPF, types.TypeFlags) - v0.AddArg(y) - v0.AddArg(x) + v0.AddArg2(y, x) v.AddArg(v0) return true } @@ -15029,8 +14275,7 @@ func rewriteValueARM_OpLeq32U(v *Value) bool { y := v_1 v.reset(OpARMLessEqualU) v0 := b.NewValue0(v.Pos, OpARMCMP, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -15046,8 +14291,7 @@ func rewriteValueARM_OpLeq64F(v *Value) bool { y := v_1 v.reset(OpARMGreaterEqual) v0 := b.NewValue0(v.Pos, OpARMCMPD, types.TypeFlags) - v0.AddArg(y) - v0.AddArg(x) + v0.AddArg2(y, x) v.AddArg(v0) return true } @@ -15066,10 +14310,9 @@ func rewriteValueARM_OpLeq8(v *Value) bool { v0 := b.NewValue0(v.Pos, OpARMCMP, types.TypeFlags) v1 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32) v1.AddArg(x) - v0.AddArg(v1) v2 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32) v2.AddArg(y) - v0.AddArg(v2) + v0.AddArg2(v1, v2) v.AddArg(v0) return true } @@ -15088,10 +14331,9 @@ func rewriteValueARM_OpLeq8U(v *Value) bool { v0 := b.NewValue0(v.Pos, OpARMCMP, types.TypeFlags) v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) v1.AddArg(x) - v0.AddArg(v1) v2 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) v2.AddArg(y) - v0.AddArg(v2) + v0.AddArg2(v1, v2) v.AddArg(v0) return true } @@ -15110,10 +14352,9 @@ func rewriteValueARM_OpLess16(v *Value) bool { v0 := b.NewValue0(v.Pos, OpARMCMP, types.TypeFlags) v1 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32) v1.AddArg(x) - v0.AddArg(v1) v2 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32) v2.AddArg(y) - v0.AddArg(v2) + v0.AddArg2(v1, v2) v.AddArg(v0) return true } @@ -15132,10 +14373,9 @@ func rewriteValueARM_OpLess16U(v *Value) bool { v0 := b.NewValue0(v.Pos, OpARMCMP, types.TypeFlags) v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) v1.AddArg(x) - v0.AddArg(v1) v2 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) v2.AddArg(y) - v0.AddArg(v2) + v0.AddArg2(v1, v2) v.AddArg(v0) return true } @@ -15151,8 +14391,7 @@ func rewriteValueARM_OpLess32(v *Value) bool { y := v_1 v.reset(OpARMLessThan) v0 := b.NewValue0(v.Pos, OpARMCMP, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -15168,8 +14407,7 @@ func rewriteValueARM_OpLess32F(v *Value) bool { y := v_1 v.reset(OpARMGreaterThan) v0 := b.NewValue0(v.Pos, OpARMCMPF, types.TypeFlags) - v0.AddArg(y) - v0.AddArg(x) + v0.AddArg2(y, x) v.AddArg(v0) return true } @@ -15185,8 +14423,7 @@ func rewriteValueARM_OpLess32U(v *Value) bool { y := v_1 v.reset(OpARMLessThanU) v0 := b.NewValue0(v.Pos, OpARMCMP, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -15202,8 +14439,7 @@ func rewriteValueARM_OpLess64F(v *Value) bool { y := v_1 v.reset(OpARMGreaterThan) v0 := b.NewValue0(v.Pos, OpARMCMPD, types.TypeFlags) - v0.AddArg(y) - v0.AddArg(x) + v0.AddArg2(y, x) v.AddArg(v0) return true } @@ -15222,10 +14458,9 @@ func rewriteValueARM_OpLess8(v *Value) bool { v0 := b.NewValue0(v.Pos, OpARMCMP, types.TypeFlags) v1 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32) v1.AddArg(x) - v0.AddArg(v1) v2 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32) v2.AddArg(y) - v0.AddArg(v2) + v0.AddArg2(v1, v2) v.AddArg(v0) return true } @@ -15244,10 +14479,9 @@ func rewriteValueARM_OpLess8U(v *Value) bool { v0 := b.NewValue0(v.Pos, OpARMCMP, types.TypeFlags) v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) v1.AddArg(x) - v0.AddArg(v1) v2 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) v2.AddArg(y) - v0.AddArg(v2) + v0.AddArg2(v1, v2) v.AddArg(v0) return true } @@ -15266,8 +14500,7 @@ func rewriteValueARM_OpLoad(v *Value) bool { break } v.reset(OpARMMOVBUload) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (Load ptr mem) @@ -15281,8 +14514,7 @@ func rewriteValueARM_OpLoad(v *Value) bool { break } v.reset(OpARMMOVBload) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (Load ptr mem) @@ -15296,8 +14528,7 @@ func rewriteValueARM_OpLoad(v *Value) bool { break } v.reset(OpARMMOVBUload) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (Load ptr mem) @@ -15311,8 +14542,7 @@ func rewriteValueARM_OpLoad(v *Value) bool { break } v.reset(OpARMMOVHload) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (Load ptr mem) @@ -15326,8 +14556,7 @@ func rewriteValueARM_OpLoad(v *Value) bool { break } v.reset(OpARMMOVHUload) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (Load ptr mem) @@ -15341,8 +14570,7 @@ func rewriteValueARM_OpLoad(v *Value) bool { break } v.reset(OpARMMOVWload) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (Load ptr mem) @@ -15356,8 +14584,7 @@ func rewriteValueARM_OpLoad(v *Value) bool { break } v.reset(OpARMMOVFload) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (Load ptr mem) @@ -15371,8 +14598,7 @@ func rewriteValueARM_OpLoad(v *Value) bool { break } v.reset(OpARMMOVDload) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } return false @@ -15403,17 +14629,15 @@ func rewriteValueARM_OpLsh16x16(v *Value) bool { v.reset(OpARMCMOVWHSconst) v.AuxInt = 0 v0 := b.NewValue0(v.Pos, OpARMSLL, x.Type) - v0.AddArg(x) v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) v1.AddArg(y) - v0.AddArg(v1) - v.AddArg(v0) + v0.AddArg2(x, v1) v2 := b.NewValue0(v.Pos, OpARMCMPconst, types.TypeFlags) v2.AuxInt = 256 v3 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) v3.AddArg(y) v2.AddArg(v3) - v.AddArg(v2) + v.AddArg2(v0, v2) return true } } @@ -15429,13 +14653,11 @@ func rewriteValueARM_OpLsh16x32(v *Value) bool { v.reset(OpARMCMOVWHSconst) v.AuxInt = 0 v0 := b.NewValue0(v.Pos, OpARMSLL, x.Type) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) + v0.AddArg2(x, y) v1 := b.NewValue0(v.Pos, OpARMCMPconst, types.TypeFlags) v1.AuxInt = 256 v1.AddArg(y) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } } @@ -15487,10 +14709,9 @@ func rewriteValueARM_OpLsh16x8(v *Value) bool { x := v_0 y := v_1 v.reset(OpARMSLL) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) v0.AddArg(y) - v.AddArg(v0) + v.AddArg2(x, v0) return true } } @@ -15507,17 +14728,15 @@ func rewriteValueARM_OpLsh32x16(v *Value) bool { v.reset(OpARMCMOVWHSconst) v.AuxInt = 0 v0 := b.NewValue0(v.Pos, OpARMSLL, x.Type) - v0.AddArg(x) v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) v1.AddArg(y) - v0.AddArg(v1) - v.AddArg(v0) + v0.AddArg2(x, v1) v2 := b.NewValue0(v.Pos, OpARMCMPconst, types.TypeFlags) v2.AuxInt = 256 v3 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) v3.AddArg(y) v2.AddArg(v3) - v.AddArg(v2) + v.AddArg2(v0, v2) return true } } @@ -15533,13 +14752,11 @@ func rewriteValueARM_OpLsh32x32(v *Value) bool { v.reset(OpARMCMOVWHSconst) v.AuxInt = 0 v0 := b.NewValue0(v.Pos, OpARMSLL, x.Type) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) + v0.AddArg2(x, y) v1 := b.NewValue0(v.Pos, OpARMCMPconst, types.TypeFlags) v1.AuxInt = 256 v1.AddArg(y) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } } @@ -15591,10 +14808,9 @@ func rewriteValueARM_OpLsh32x8(v *Value) bool { x := v_0 y := v_1 v.reset(OpARMSLL) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) v0.AddArg(y) - v.AddArg(v0) + v.AddArg2(x, v0) return true } } @@ -15611,17 +14827,15 @@ func rewriteValueARM_OpLsh8x16(v *Value) bool { v.reset(OpARMCMOVWHSconst) v.AuxInt = 0 v0 := b.NewValue0(v.Pos, OpARMSLL, x.Type) - v0.AddArg(x) v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) v1.AddArg(y) - v0.AddArg(v1) - v.AddArg(v0) + v0.AddArg2(x, v1) v2 := b.NewValue0(v.Pos, OpARMCMPconst, types.TypeFlags) v2.AuxInt = 256 v3 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) v3.AddArg(y) v2.AddArg(v3) - v.AddArg(v2) + v.AddArg2(v0, v2) return true } } @@ -15637,13 +14851,11 @@ func rewriteValueARM_OpLsh8x32(v *Value) bool { v.reset(OpARMCMOVWHSconst) v.AuxInt = 0 v0 := b.NewValue0(v.Pos, OpARMSLL, x.Type) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) + v0.AddArg2(x, y) v1 := b.NewValue0(v.Pos, OpARMCMPconst, types.TypeFlags) v1.AuxInt = 256 v1.AddArg(y) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } } @@ -15695,10 +14907,9 @@ func rewriteValueARM_OpLsh8x8(v *Value) bool { x := v_0 y := v_1 v.reset(OpARMSLL) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) v0.AddArg(y) - v.AddArg(v0) + v.AddArg2(x, v0) return true } } @@ -15715,10 +14926,9 @@ func rewriteValueARM_OpMod16(v *Value) bool { v.reset(OpMod32) v0 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32) v1.AddArg(y) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } } @@ -15735,10 +14945,9 @@ func rewriteValueARM_OpMod16u(v *Value) bool { v.reset(OpMod32u) v0 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) v1.AddArg(y) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } } @@ -15758,35 +14967,28 @@ func rewriteValueARM_OpMod32(v *Value) bool { v2 := b.NewValue0(v.Pos, OpARMCALLudiv, types.NewTuple(typ.UInt32, typ.UInt32)) v3 := b.NewValue0(v.Pos, OpARMSUB, typ.UInt32) v4 := b.NewValue0(v.Pos, OpARMXOR, typ.UInt32) - v4.AddArg(x) v5 := b.NewValue0(v.Pos, OpSignmask, typ.Int32) v5.AddArg(x) - v4.AddArg(v5) - v3.AddArg(v4) + v4.AddArg2(x, v5) v6 := b.NewValue0(v.Pos, OpSignmask, typ.Int32) v6.AddArg(x) - v3.AddArg(v6) - v2.AddArg(v3) + v3.AddArg2(v4, v6) v7 := b.NewValue0(v.Pos, OpARMSUB, typ.UInt32) v8 := b.NewValue0(v.Pos, OpARMXOR, typ.UInt32) - v8.AddArg(y) v9 := b.NewValue0(v.Pos, OpSignmask, typ.Int32) v9.AddArg(y) - v8.AddArg(v9) - v7.AddArg(v8) + v8.AddArg2(y, v9) v10 := b.NewValue0(v.Pos, OpSignmask, typ.Int32) v10.AddArg(y) - v7.AddArg(v10) - v2.AddArg(v7) + v7.AddArg2(v8, v10) + v2.AddArg2(v3, v7) v1.AddArg(v2) - v0.AddArg(v1) v11 := b.NewValue0(v.Pos, OpSignmask, typ.Int32) v11.AddArg(x) - v0.AddArg(v11) - v.AddArg(v0) + v0.AddArg2(v1, v11) v12 := b.NewValue0(v.Pos, OpSignmask, typ.Int32) v12.AddArg(x) - v.AddArg(v12) + v.AddArg2(v0, v12) return true } } @@ -15803,8 +15005,7 @@ func rewriteValueARM_OpMod32u(v *Value) bool { v.reset(OpSelect1) v.Type = typ.UInt32 v0 := b.NewValue0(v.Pos, OpARMCALLudiv, types.NewTuple(typ.UInt32, typ.UInt32)) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -15822,10 +15023,9 @@ func rewriteValueARM_OpMod8(v *Value) bool { v.reset(OpMod32) v0 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32) v1.AddArg(y) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } } @@ -15842,10 +15042,9 @@ func rewriteValueARM_OpMod8u(v *Value) bool { v.reset(OpMod32u) v0 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) v1.AddArg(y) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } } @@ -15863,9 +15062,7 @@ func rewriteValueARM_OpMove(v *Value) bool { break } mem := v_2 - v.reset(OpCopy) - v.Type = mem.Type - v.AddArg(mem) + v.copyOf(mem) return true } // match: (Move [1] dst src mem) @@ -15878,12 +15075,9 @@ func rewriteValueARM_OpMove(v *Value) bool { src := v_1 mem := v_2 v.reset(OpARMMOVBstore) - v.AddArg(dst) v0 := b.NewValue0(v.Pos, OpARMMOVBUload, typ.UInt8) - v0.AddArg(src) - v0.AddArg(mem) - v.AddArg(v0) - v.AddArg(mem) + v0.AddArg2(src, mem) + v.AddArg3(dst, v0, mem) return true } // match: (Move [2] {t} dst src mem) @@ -15901,12 +15095,9 @@ func rewriteValueARM_OpMove(v *Value) bool { break } v.reset(OpARMMOVHstore) - v.AddArg(dst) v0 := b.NewValue0(v.Pos, OpARMMOVHUload, typ.UInt16) - v0.AddArg(src) - v0.AddArg(mem) - v.AddArg(v0) - v.AddArg(mem) + v0.AddArg2(src, mem) + v.AddArg3(dst, v0, mem) return true } // match: (Move [2] dst src mem) @@ -15920,20 +15111,14 @@ func rewriteValueARM_OpMove(v *Value) bool { mem := v_2 v.reset(OpARMMOVBstore) v.AuxInt = 1 - v.AddArg(dst) v0 := b.NewValue0(v.Pos, OpARMMOVBUload, typ.UInt8) v0.AuxInt = 1 - v0.AddArg(src) - v0.AddArg(mem) - v.AddArg(v0) + v0.AddArg2(src, mem) v1 := b.NewValue0(v.Pos, OpARMMOVBstore, types.TypeMem) - v1.AddArg(dst) v2 := b.NewValue0(v.Pos, OpARMMOVBUload, typ.UInt8) - v2.AddArg(src) - v2.AddArg(mem) - v1.AddArg(v2) - v1.AddArg(mem) - v.AddArg(v1) + v2.AddArg2(src, mem) + v1.AddArg3(dst, v2, mem) + v.AddArg3(dst, v0, v1) return true } // match: (Move [4] {t} dst src mem) @@ -15951,12 +15136,9 @@ func rewriteValueARM_OpMove(v *Value) bool { break } v.reset(OpARMMOVWstore) - v.AddArg(dst) v0 := b.NewValue0(v.Pos, OpARMMOVWload, typ.UInt32) - v0.AddArg(src) - v0.AddArg(mem) - v.AddArg(v0) - v.AddArg(mem) + v0.AddArg2(src, mem) + v.AddArg3(dst, v0, mem) return true } // match: (Move [4] {t} dst src mem) @@ -15975,20 +15157,14 @@ func rewriteValueARM_OpMove(v *Value) bool { } v.reset(OpARMMOVHstore) v.AuxInt = 2 - v.AddArg(dst) v0 := b.NewValue0(v.Pos, OpARMMOVHUload, typ.UInt16) v0.AuxInt = 2 - v0.AddArg(src) - v0.AddArg(mem) - v.AddArg(v0) + v0.AddArg2(src, mem) v1 := b.NewValue0(v.Pos, OpARMMOVHstore, types.TypeMem) - v1.AddArg(dst) v2 := b.NewValue0(v.Pos, OpARMMOVHUload, typ.UInt16) - v2.AddArg(src) - v2.AddArg(mem) - v1.AddArg(v2) - v1.AddArg(mem) - v.AddArg(v1) + v2.AddArg2(src, mem) + v1.AddArg3(dst, v2, mem) + v.AddArg3(dst, v0, v1) return true } // match: (Move [4] dst src mem) @@ -16002,38 +15178,26 @@ func rewriteValueARM_OpMove(v *Value) bool { mem := v_2 v.reset(OpARMMOVBstore) v.AuxInt = 3 - v.AddArg(dst) v0 := b.NewValue0(v.Pos, OpARMMOVBUload, typ.UInt8) v0.AuxInt = 3 - v0.AddArg(src) - v0.AddArg(mem) - v.AddArg(v0) + v0.AddArg2(src, mem) v1 := b.NewValue0(v.Pos, OpARMMOVBstore, types.TypeMem) v1.AuxInt = 2 - v1.AddArg(dst) v2 := b.NewValue0(v.Pos, OpARMMOVBUload, typ.UInt8) v2.AuxInt = 2 - v2.AddArg(src) - v2.AddArg(mem) - v1.AddArg(v2) + v2.AddArg2(src, mem) v3 := b.NewValue0(v.Pos, OpARMMOVBstore, types.TypeMem) v3.AuxInt = 1 - v3.AddArg(dst) v4 := b.NewValue0(v.Pos, OpARMMOVBUload, typ.UInt8) v4.AuxInt = 1 - v4.AddArg(src) - v4.AddArg(mem) - v3.AddArg(v4) + v4.AddArg2(src, mem) v5 := b.NewValue0(v.Pos, OpARMMOVBstore, types.TypeMem) - v5.AddArg(dst) v6 := b.NewValue0(v.Pos, OpARMMOVBUload, typ.UInt8) - v6.AddArg(src) - v6.AddArg(mem) - v5.AddArg(v6) - v5.AddArg(mem) - v3.AddArg(v5) - v1.AddArg(v3) - v.AddArg(v1) + v6.AddArg2(src, mem) + v5.AddArg3(dst, v6, mem) + v3.AddArg3(dst, v4, v5) + v1.AddArg3(dst, v2, v3) + v.AddArg3(dst, v0, v1) return true } // match: (Move [3] dst src mem) @@ -16047,29 +15211,20 @@ func rewriteValueARM_OpMove(v *Value) bool { mem := v_2 v.reset(OpARMMOVBstore) v.AuxInt = 2 - v.AddArg(dst) v0 := b.NewValue0(v.Pos, OpARMMOVBUload, typ.UInt8) v0.AuxInt = 2 - v0.AddArg(src) - v0.AddArg(mem) - v.AddArg(v0) + v0.AddArg2(src, mem) v1 := b.NewValue0(v.Pos, OpARMMOVBstore, types.TypeMem) v1.AuxInt = 1 - v1.AddArg(dst) v2 := b.NewValue0(v.Pos, OpARMMOVBUload, typ.UInt8) v2.AuxInt = 1 - v2.AddArg(src) - v2.AddArg(mem) - v1.AddArg(v2) + v2.AddArg2(src, mem) v3 := b.NewValue0(v.Pos, OpARMMOVBstore, types.TypeMem) - v3.AddArg(dst) v4 := b.NewValue0(v.Pos, OpARMMOVBUload, typ.UInt8) - v4.AddArg(src) - v4.AddArg(mem) - v3.AddArg(v4) - v3.AddArg(mem) - v1.AddArg(v3) - v.AddArg(v1) + v4.AddArg2(src, mem) + v3.AddArg3(dst, v4, mem) + v1.AddArg3(dst, v2, v3) + v.AddArg3(dst, v0, v1) return true } // match: (Move [s] {t} dst src mem) @@ -16086,9 +15241,7 @@ func rewriteValueARM_OpMove(v *Value) bool { } v.reset(OpARMDUFFCOPY) v.AuxInt = 8 * (128 - s/4) - v.AddArg(dst) - v.AddArg(src) - v.AddArg(mem) + v.AddArg3(dst, src, mem) return true } // match: (Move [s] {t} dst src mem) @@ -16105,13 +15258,10 @@ func rewriteValueARM_OpMove(v *Value) bool { } v.reset(OpARMLoweredMove) v.AuxInt = t.(*types.Type).Alignment() - v.AddArg(dst) - v.AddArg(src) v0 := b.NewValue0(v.Pos, OpARMADDconst, src.Type) v0.AuxInt = s - moveSize(t.(*types.Type).Alignment(), config) v0.AddArg(src) - v.AddArg(v0) - v.AddArg(mem) + v.AddArg4(dst, src, v0, mem) return true } return false @@ -16166,10 +15316,9 @@ func rewriteValueARM_OpNeq16(v *Value) bool { v0 := b.NewValue0(v.Pos, OpARMCMP, types.TypeFlags) v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) v1.AddArg(x) - v0.AddArg(v1) v2 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) v2.AddArg(y) - v0.AddArg(v2) + v0.AddArg2(v1, v2) v.AddArg(v0) return true } @@ -16185,8 +15334,7 @@ func rewriteValueARM_OpNeq32(v *Value) bool { y := v_1 v.reset(OpARMNotEqual) v0 := b.NewValue0(v.Pos, OpARMCMP, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -16202,8 +15350,7 @@ func rewriteValueARM_OpNeq32F(v *Value) bool { y := v_1 v.reset(OpARMNotEqual) v0 := b.NewValue0(v.Pos, OpARMCMPF, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -16219,8 +15366,7 @@ func rewriteValueARM_OpNeq64F(v *Value) bool { y := v_1 v.reset(OpARMNotEqual) v0 := b.NewValue0(v.Pos, OpARMCMPD, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -16239,10 +15385,9 @@ func rewriteValueARM_OpNeq8(v *Value) bool { v0 := b.NewValue0(v.Pos, OpARMCMP, types.TypeFlags) v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) v1.AddArg(x) - v0.AddArg(v1) v2 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) v2.AddArg(y) - v0.AddArg(v2) + v0.AddArg2(v1, v2) v.AddArg(v0) return true } @@ -16258,8 +15403,7 @@ func rewriteValueARM_OpNeqPtr(v *Value) bool { y := v_1 v.reset(OpARMNotEqual) v0 := b.NewValue0(v.Pos, OpARMCMP, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -16319,9 +15463,7 @@ func rewriteValueARM_OpPanicBounds(v *Value) bool { } v.reset(OpARMLoweredPanicBoundsA) v.AuxInt = kind - v.AddArg(x) - v.AddArg(y) - v.AddArg(mem) + v.AddArg3(x, y, mem) return true } // match: (PanicBounds [kind] x y mem) @@ -16337,9 +15479,7 @@ func rewriteValueARM_OpPanicBounds(v *Value) bool { } v.reset(OpARMLoweredPanicBoundsB) v.AuxInt = kind - v.AddArg(x) - v.AddArg(y) - v.AddArg(mem) + v.AddArg3(x, y, mem) return true } // match: (PanicBounds [kind] x y mem) @@ -16355,9 +15495,7 @@ func rewriteValueARM_OpPanicBounds(v *Value) bool { } v.reset(OpARMLoweredPanicBoundsC) v.AuxInt = kind - v.AddArg(x) - v.AddArg(y) - v.AddArg(mem) + v.AddArg3(x, y, mem) return true } return false @@ -16381,10 +15519,7 @@ func rewriteValueARM_OpPanicExtend(v *Value) bool { } v.reset(OpARMLoweredPanicExtendA) v.AuxInt = kind - v.AddArg(hi) - v.AddArg(lo) - v.AddArg(y) - v.AddArg(mem) + v.AddArg4(hi, lo, y, mem) return true } // match: (PanicExtend [kind] hi lo y mem) @@ -16401,10 +15536,7 @@ func rewriteValueARM_OpPanicExtend(v *Value) bool { } v.reset(OpARMLoweredPanicExtendB) v.AuxInt = kind - v.AddArg(hi) - v.AddArg(lo) - v.AddArg(y) - v.AddArg(mem) + v.AddArg4(hi, lo, y, mem) return true } // match: (PanicExtend [kind] hi lo y mem) @@ -16421,10 +15553,7 @@ func rewriteValueARM_OpPanicExtend(v *Value) bool { } v.reset(OpARMLoweredPanicExtendC) v.AuxInt = kind - v.AddArg(hi) - v.AddArg(lo) - v.AddArg(y) - v.AddArg(mem) + v.AddArg4(hi, lo, y, mem) return true } return false @@ -16445,17 +15574,14 @@ func rewriteValueARM_OpRotateLeft16(v *Value) bool { c := v_1.AuxInt v.reset(OpOr16) v0 := b.NewValue0(v.Pos, OpLsh16x32, t) - v0.AddArg(x) v1 := b.NewValue0(v.Pos, OpARMMOVWconst, typ.UInt32) v1.AuxInt = c & 15 - v0.AddArg(v1) - v.AddArg(v0) + v0.AddArg2(x, v1) v2 := b.NewValue0(v.Pos, OpRsh16Ux32, t) - v2.AddArg(x) v3 := b.NewValue0(v.Pos, OpARMMOVWconst, typ.UInt32) v3.AuxInt = -c & 15 - v2.AddArg(v3) - v.AddArg(v2) + v2.AddArg2(x, v3) + v.AddArg2(v0, v2) return true } return false @@ -16483,11 +15609,10 @@ func rewriteValueARM_OpRotateLeft32(v *Value) bool { x := v_0 y := v_1 v.reset(OpARMSRR) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpARMRSBconst, y.Type) v0.AuxInt = 0 v0.AddArg(y) - v.AddArg(v0) + v.AddArg2(x, v0) return true } } @@ -16507,17 +15632,14 @@ func rewriteValueARM_OpRotateLeft8(v *Value) bool { c := v_1.AuxInt v.reset(OpOr8) v0 := b.NewValue0(v.Pos, OpLsh8x32, t) - v0.AddArg(x) v1 := b.NewValue0(v.Pos, OpARMMOVWconst, typ.UInt32) v1.AuxInt = c & 7 - v0.AddArg(v1) - v.AddArg(v0) + v0.AddArg2(x, v1) v2 := b.NewValue0(v.Pos, OpRsh8Ux32, t) - v2.AddArg(x) v3 := b.NewValue0(v.Pos, OpARMMOVWconst, typ.UInt32) v3.AuxInt = -c & 7 - v2.AddArg(v3) - v.AddArg(v2) + v2.AddArg2(x, v3) + v.AddArg2(v0, v2) return true } return false @@ -16537,17 +15659,15 @@ func rewriteValueARM_OpRsh16Ux16(v *Value) bool { v0 := b.NewValue0(v.Pos, OpARMSRL, x.Type) v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) v1.AddArg(x) - v0.AddArg(v1) v2 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) v2.AddArg(y) - v0.AddArg(v2) - v.AddArg(v0) + v0.AddArg2(v1, v2) v3 := b.NewValue0(v.Pos, OpARMCMPconst, types.TypeFlags) v3.AuxInt = 256 v4 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) v4.AddArg(y) v3.AddArg(v4) - v.AddArg(v3) + v.AddArg2(v0, v3) return true } } @@ -16566,13 +15686,11 @@ func rewriteValueARM_OpRsh16Ux32(v *Value) bool { v0 := b.NewValue0(v.Pos, OpARMSRL, x.Type) v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) v1.AddArg(x) - v0.AddArg(v1) - v0.AddArg(y) - v.AddArg(v0) + v0.AddArg2(v1, y) v2 := b.NewValue0(v.Pos, OpARMCMPconst, types.TypeFlags) v2.AuxInt = 256 v2.AddArg(y) - v.AddArg(v2) + v.AddArg2(v0, v2) return true } } @@ -16631,10 +15749,9 @@ func rewriteValueARM_OpRsh16Ux8(v *Value) bool { v.reset(OpARMSRL) v0 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) v1.AddArg(y) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } } @@ -16651,16 +15768,14 @@ func rewriteValueARM_OpRsh16x16(v *Value) bool { v.reset(OpARMSRAcond) v0 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) v1.AddArg(y) - v.AddArg(v1) v2 := b.NewValue0(v.Pos, OpARMCMPconst, types.TypeFlags) v2.AuxInt = 256 v3 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) v3.AddArg(y) v2.AddArg(v3) - v.AddArg(v2) + v.AddArg3(v0, v1, v2) return true } } @@ -16677,12 +15792,10 @@ func rewriteValueARM_OpRsh16x32(v *Value) bool { v.reset(OpARMSRAcond) v0 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32) v0.AddArg(x) - v.AddArg(v0) - v.AddArg(y) v1 := b.NewValue0(v.Pos, OpARMCMPconst, types.TypeFlags) v1.AuxInt = 256 v1.AddArg(y) - v.AddArg(v1) + v.AddArg3(v0, y, v1) return true } } @@ -16746,10 +15859,9 @@ func rewriteValueARM_OpRsh16x8(v *Value) bool { v.reset(OpARMSRA) v0 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) v1.AddArg(y) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } } @@ -16766,17 +15878,15 @@ func rewriteValueARM_OpRsh32Ux16(v *Value) bool { v.reset(OpARMCMOVWHSconst) v.AuxInt = 0 v0 := b.NewValue0(v.Pos, OpARMSRL, x.Type) - v0.AddArg(x) v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) v1.AddArg(y) - v0.AddArg(v1) - v.AddArg(v0) + v0.AddArg2(x, v1) v2 := b.NewValue0(v.Pos, OpARMCMPconst, types.TypeFlags) v2.AuxInt = 256 v3 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) v3.AddArg(y) v2.AddArg(v3) - v.AddArg(v2) + v.AddArg2(v0, v2) return true } } @@ -16792,13 +15902,11 @@ func rewriteValueARM_OpRsh32Ux32(v *Value) bool { v.reset(OpARMCMOVWHSconst) v.AuxInt = 0 v0 := b.NewValue0(v.Pos, OpARMSRL, x.Type) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) + v0.AddArg2(x, y) v1 := b.NewValue0(v.Pos, OpARMCMPconst, types.TypeFlags) v1.AuxInt = 256 v1.AddArg(y) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } } @@ -16850,10 +15958,9 @@ func rewriteValueARM_OpRsh32Ux8(v *Value) bool { x := v_0 y := v_1 v.reset(OpARMSRL) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) v0.AddArg(y) - v.AddArg(v0) + v.AddArg2(x, v0) return true } } @@ -16868,16 +15975,14 @@ func rewriteValueARM_OpRsh32x16(v *Value) bool { x := v_0 y := v_1 v.reset(OpARMSRAcond) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) v0.AddArg(y) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpARMCMPconst, types.TypeFlags) v1.AuxInt = 256 v2 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) v2.AddArg(y) v1.AddArg(v2) - v.AddArg(v1) + v.AddArg3(x, v0, v1) return true } } @@ -16891,12 +15996,10 @@ func rewriteValueARM_OpRsh32x32(v *Value) bool { x := v_0 y := v_1 v.reset(OpARMSRAcond) - v.AddArg(x) - v.AddArg(y) v0 := b.NewValue0(v.Pos, OpARMCMPconst, types.TypeFlags) v0.AuxInt = 256 v0.AddArg(y) - v.AddArg(v0) + v.AddArg3(x, y, v0) return true } } @@ -16950,10 +16053,9 @@ func rewriteValueARM_OpRsh32x8(v *Value) bool { x := v_0 y := v_1 v.reset(OpARMSRA) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) v0.AddArg(y) - v.AddArg(v0) + v.AddArg2(x, v0) return true } } @@ -16972,17 +16074,15 @@ func rewriteValueARM_OpRsh8Ux16(v *Value) bool { v0 := b.NewValue0(v.Pos, OpARMSRL, x.Type) v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) v1.AddArg(x) - v0.AddArg(v1) v2 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) v2.AddArg(y) - v0.AddArg(v2) - v.AddArg(v0) + v0.AddArg2(v1, v2) v3 := b.NewValue0(v.Pos, OpARMCMPconst, types.TypeFlags) v3.AuxInt = 256 v4 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) v4.AddArg(y) v3.AddArg(v4) - v.AddArg(v3) + v.AddArg2(v0, v3) return true } } @@ -17001,13 +16101,11 @@ func rewriteValueARM_OpRsh8Ux32(v *Value) bool { v0 := b.NewValue0(v.Pos, OpARMSRL, x.Type) v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) v1.AddArg(x) - v0.AddArg(v1) - v0.AddArg(y) - v.AddArg(v0) + v0.AddArg2(v1, y) v2 := b.NewValue0(v.Pos, OpARMCMPconst, types.TypeFlags) v2.AuxInt = 256 v2.AddArg(y) - v.AddArg(v2) + v.AddArg2(v0, v2) return true } } @@ -17066,10 +16164,9 @@ func rewriteValueARM_OpRsh8Ux8(v *Value) bool { v.reset(OpARMSRL) v0 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) v1.AddArg(y) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } } @@ -17086,16 +16183,14 @@ func rewriteValueARM_OpRsh8x16(v *Value) bool { v.reset(OpARMSRAcond) v0 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) v1.AddArg(y) - v.AddArg(v1) v2 := b.NewValue0(v.Pos, OpARMCMPconst, types.TypeFlags) v2.AuxInt = 256 v3 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) v3.AddArg(y) v2.AddArg(v3) - v.AddArg(v2) + v.AddArg3(v0, v1, v2) return true } } @@ -17112,12 +16207,10 @@ func rewriteValueARM_OpRsh8x32(v *Value) bool { v.reset(OpARMSRAcond) v0 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32) v0.AddArg(x) - v.AddArg(v0) - v.AddArg(y) v1 := b.NewValue0(v.Pos, OpARMCMPconst, types.TypeFlags) v1.AuxInt = 256 v1.AddArg(y) - v.AddArg(v1) + v.AddArg3(v0, y, v1) return true } } @@ -17181,10 +16274,9 @@ func rewriteValueARM_OpRsh8x8(v *Value) bool { v.reset(OpARMSRA) v0 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) v1.AddArg(y) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } } @@ -17202,9 +16294,7 @@ func rewriteValueARM_OpSelect0(v *Value) bool { if v_0_1.Op != OpARMMOVWconst || v_0_1.AuxInt != 1 { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (Select0 (CALLudiv x (MOVWconst [c]))) @@ -17359,9 +16449,7 @@ func rewriteValueARM_OpStore(v *Value) bool { break } v.reset(OpARMMOVBstore) - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } // match: (Store {t} ptr val mem) @@ -17376,9 +16464,7 @@ func rewriteValueARM_OpStore(v *Value) bool { break } v.reset(OpARMMOVHstore) - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } // match: (Store {t} ptr val mem) @@ -17393,9 +16479,7 @@ func rewriteValueARM_OpStore(v *Value) bool { break } v.reset(OpARMMOVWstore) - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } // match: (Store {t} ptr val mem) @@ -17410,9 +16494,7 @@ func rewriteValueARM_OpStore(v *Value) bool { break } v.reset(OpARMMOVFstore) - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } // match: (Store {t} ptr val mem) @@ -17427,9 +16509,7 @@ func rewriteValueARM_OpStore(v *Value) bool { break } v.reset(OpARMMOVDstore) - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } return false @@ -17447,9 +16527,7 @@ func rewriteValueARM_OpZero(v *Value) bool { break } mem := v_1 - v.reset(OpCopy) - v.Type = mem.Type - v.AddArg(mem) + v.copyOf(mem) return true } // match: (Zero [1] ptr mem) @@ -17461,11 +16539,9 @@ func rewriteValueARM_OpZero(v *Value) bool { ptr := v_0 mem := v_1 v.reset(OpARMMOVBstore) - v.AddArg(ptr) v0 := b.NewValue0(v.Pos, OpARMMOVWconst, typ.UInt32) v0.AuxInt = 0 - v.AddArg(v0) - v.AddArg(mem) + v.AddArg3(ptr, v0, mem) return true } // match: (Zero [2] {t} ptr mem) @@ -17482,11 +16558,9 @@ func rewriteValueARM_OpZero(v *Value) bool { break } v.reset(OpARMMOVHstore) - v.AddArg(ptr) v0 := b.NewValue0(v.Pos, OpARMMOVWconst, typ.UInt32) v0.AuxInt = 0 - v.AddArg(v0) - v.AddArg(mem) + v.AddArg3(ptr, v0, mem) return true } // match: (Zero [2] ptr mem) @@ -17499,18 +16573,14 @@ func rewriteValueARM_OpZero(v *Value) bool { mem := v_1 v.reset(OpARMMOVBstore) v.AuxInt = 1 - v.AddArg(ptr) v0 := b.NewValue0(v.Pos, OpARMMOVWconst, typ.UInt32) v0.AuxInt = 0 - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpARMMOVBstore, types.TypeMem) v1.AuxInt = 0 - v1.AddArg(ptr) v2 := b.NewValue0(v.Pos, OpARMMOVWconst, typ.UInt32) v2.AuxInt = 0 - v1.AddArg(v2) - v1.AddArg(mem) - v.AddArg(v1) + v1.AddArg3(ptr, v2, mem) + v.AddArg3(ptr, v0, v1) return true } // match: (Zero [4] {t} ptr mem) @@ -17527,11 +16597,9 @@ func rewriteValueARM_OpZero(v *Value) bool { break } v.reset(OpARMMOVWstore) - v.AddArg(ptr) v0 := b.NewValue0(v.Pos, OpARMMOVWconst, typ.UInt32) v0.AuxInt = 0 - v.AddArg(v0) - v.AddArg(mem) + v.AddArg3(ptr, v0, mem) return true } // match: (Zero [4] {t} ptr mem) @@ -17549,18 +16617,14 @@ func rewriteValueARM_OpZero(v *Value) bool { } v.reset(OpARMMOVHstore) v.AuxInt = 2 - v.AddArg(ptr) v0 := b.NewValue0(v.Pos, OpARMMOVWconst, typ.UInt32) v0.AuxInt = 0 - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpARMMOVHstore, types.TypeMem) v1.AuxInt = 0 - v1.AddArg(ptr) v2 := b.NewValue0(v.Pos, OpARMMOVWconst, typ.UInt32) v2.AuxInt = 0 - v1.AddArg(v2) - v1.AddArg(mem) - v.AddArg(v1) + v1.AddArg3(ptr, v2, mem) + v.AddArg3(ptr, v0, v1) return true } // match: (Zero [4] ptr mem) @@ -17573,32 +16637,24 @@ func rewriteValueARM_OpZero(v *Value) bool { mem := v_1 v.reset(OpARMMOVBstore) v.AuxInt = 3 - v.AddArg(ptr) v0 := b.NewValue0(v.Pos, OpARMMOVWconst, typ.UInt32) v0.AuxInt = 0 - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpARMMOVBstore, types.TypeMem) v1.AuxInt = 2 - v1.AddArg(ptr) v2 := b.NewValue0(v.Pos, OpARMMOVWconst, typ.UInt32) v2.AuxInt = 0 - v1.AddArg(v2) v3 := b.NewValue0(v.Pos, OpARMMOVBstore, types.TypeMem) v3.AuxInt = 1 - v3.AddArg(ptr) v4 := b.NewValue0(v.Pos, OpARMMOVWconst, typ.UInt32) v4.AuxInt = 0 - v3.AddArg(v4) v5 := b.NewValue0(v.Pos, OpARMMOVBstore, types.TypeMem) v5.AuxInt = 0 - v5.AddArg(ptr) v6 := b.NewValue0(v.Pos, OpARMMOVWconst, typ.UInt32) v6.AuxInt = 0 - v5.AddArg(v6) - v5.AddArg(mem) - v3.AddArg(v5) - v1.AddArg(v3) - v.AddArg(v1) + v5.AddArg3(ptr, v6, mem) + v3.AddArg3(ptr, v4, v5) + v1.AddArg3(ptr, v2, v3) + v.AddArg3(ptr, v0, v1) return true } // match: (Zero [3] ptr mem) @@ -17611,25 +16667,19 @@ func rewriteValueARM_OpZero(v *Value) bool { mem := v_1 v.reset(OpARMMOVBstore) v.AuxInt = 2 - v.AddArg(ptr) v0 := b.NewValue0(v.Pos, OpARMMOVWconst, typ.UInt32) v0.AuxInt = 0 - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpARMMOVBstore, types.TypeMem) v1.AuxInt = 1 - v1.AddArg(ptr) v2 := b.NewValue0(v.Pos, OpARMMOVWconst, typ.UInt32) v2.AuxInt = 0 - v1.AddArg(v2) v3 := b.NewValue0(v.Pos, OpARMMOVBstore, types.TypeMem) v3.AuxInt = 0 - v3.AddArg(ptr) v4 := b.NewValue0(v.Pos, OpARMMOVWconst, typ.UInt32) v4.AuxInt = 0 - v3.AddArg(v4) - v3.AddArg(mem) - v1.AddArg(v3) - v.AddArg(v1) + v3.AddArg3(ptr, v4, mem) + v1.AddArg3(ptr, v2, v3) + v.AddArg3(ptr, v0, v1) return true } // match: (Zero [s] {t} ptr mem) @@ -17645,11 +16695,9 @@ func rewriteValueARM_OpZero(v *Value) bool { } v.reset(OpARMDUFFZERO) v.AuxInt = 4 * (128 - s/4) - v.AddArg(ptr) v0 := b.NewValue0(v.Pos, OpARMMOVWconst, typ.UInt32) v0.AuxInt = 0 - v.AddArg(v0) - v.AddArg(mem) + v.AddArg3(ptr, v0, mem) return true } // match: (Zero [s] {t} ptr mem) @@ -17665,15 +16713,12 @@ func rewriteValueARM_OpZero(v *Value) bool { } v.reset(OpARMLoweredZero) v.AuxInt = t.(*types.Type).Alignment() - v.AddArg(ptr) v0 := b.NewValue0(v.Pos, OpARMADDconst, ptr.Type) v0.AuxInt = s - moveSize(t.(*types.Type).Alignment(), config) v0.AddArg(ptr) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpARMMOVWconst, typ.UInt32) v1.AuxInt = 0 - v.AddArg(v1) - v.AddArg(mem) + v.AddArg4(ptr, v0, v1, mem) return true } return false @@ -17690,8 +16735,7 @@ func rewriteValueARM_OpZeromask(v *Value) bool { v.AuxInt = 31 v0 := b.NewValue0(v.Pos, OpARMRSBshiftRL, typ.Int32) v0.AuxInt = 1 - v0.AddArg(x) - v0.AddArg(x) + v0.AddArg2(x, x) v.AddArg(v0) return true } @@ -17738,8 +16782,7 @@ func rewriteBlockARM(b *Block) bool { for b.Controls[0].Op == OpARMInvertFlags { v_0 := b.Controls[0] cmp := v_0.Args[0] - b.Reset(BlockARMEQ) - b.AddControl(cmp) + b.resetWithControl(BlockARMEQ, cmp) return true } // match: (EQ (CMPconst [0] l:(SUB x y)) yes no) @@ -17759,11 +16802,9 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMEQ) v0 := b.NewValue0(v_0.Pos, OpARMCMP, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - b.AddControl(v0) + v0.AddArg2(x, y) + b.resetWithControl(BlockARMEQ, v0) return true } // match: (EQ (CMPconst [0] l:(MULS x y a)) yes no) @@ -17784,14 +16825,11 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMEQ) v0 := b.NewValue0(v_0.Pos, OpARMCMP, types.TypeFlags) - v0.AddArg(a) v1 := b.NewValue0(v_0.Pos, OpARMMUL, x.Type) - v1.AddArg(x) - v1.AddArg(y) - v0.AddArg(v1) - b.AddControl(v0) + v1.AddArg2(x, y) + v0.AddArg2(a, v1) + b.resetWithControl(BlockARMEQ, v0) return true } // match: (EQ (CMPconst [0] l:(SUBconst [c] x)) yes no) @@ -17811,11 +16849,10 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMEQ) v0 := b.NewValue0(v_0.Pos, OpARMCMPconst, types.TypeFlags) v0.AuxInt = c v0.AddArg(x) - b.AddControl(v0) + b.resetWithControl(BlockARMEQ, v0) return true } // match: (EQ (CMPconst [0] l:(SUBshiftLL x y [c])) yes no) @@ -17836,12 +16873,10 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMEQ) v0 := b.NewValue0(v_0.Pos, OpARMCMPshiftLL, types.TypeFlags) v0.AuxInt = c - v0.AddArg(x) - v0.AddArg(y) - b.AddControl(v0) + v0.AddArg2(x, y) + b.resetWithControl(BlockARMEQ, v0) return true } // match: (EQ (CMPconst [0] l:(SUBshiftRL x y [c])) yes no) @@ -17862,12 +16897,10 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMEQ) v0 := b.NewValue0(v_0.Pos, OpARMCMPshiftRL, types.TypeFlags) v0.AuxInt = c - v0.AddArg(x) - v0.AddArg(y) - b.AddControl(v0) + v0.AddArg2(x, y) + b.resetWithControl(BlockARMEQ, v0) return true } // match: (EQ (CMPconst [0] l:(SUBshiftRA x y [c])) yes no) @@ -17888,12 +16921,10 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMEQ) v0 := b.NewValue0(v_0.Pos, OpARMCMPshiftRA, types.TypeFlags) v0.AuxInt = c - v0.AddArg(x) - v0.AddArg(y) - b.AddControl(v0) + v0.AddArg2(x, y) + b.resetWithControl(BlockARMEQ, v0) return true } // match: (EQ (CMPconst [0] l:(SUBshiftLLreg x y z)) yes no) @@ -17914,12 +16945,9 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMEQ) v0 := b.NewValue0(v_0.Pos, OpARMCMPshiftLLreg, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - v0.AddArg(z) - b.AddControl(v0) + v0.AddArg3(x, y, z) + b.resetWithControl(BlockARMEQ, v0) return true } // match: (EQ (CMPconst [0] l:(SUBshiftRLreg x y z)) yes no) @@ -17940,12 +16968,9 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMEQ) v0 := b.NewValue0(v_0.Pos, OpARMCMPshiftRLreg, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - v0.AddArg(z) - b.AddControl(v0) + v0.AddArg3(x, y, z) + b.resetWithControl(BlockARMEQ, v0) return true } // match: (EQ (CMPconst [0] l:(SUBshiftRAreg x y z)) yes no) @@ -17966,12 +16991,9 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMEQ) v0 := b.NewValue0(v_0.Pos, OpARMCMPshiftRAreg, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - v0.AddArg(z) - b.AddControl(v0) + v0.AddArg3(x, y, z) + b.resetWithControl(BlockARMEQ, v0) return true } // match: (EQ (CMPconst [0] l:(ADD x y)) yes no) @@ -17995,11 +17017,9 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { continue } - b.Reset(BlockARMEQ) v0 := b.NewValue0(v_0.Pos, OpARMCMN, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - b.AddControl(v0) + v0.AddArg2(x, y) + b.resetWithControl(BlockARMEQ, v0) return true } break @@ -18022,14 +17042,11 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMEQ) v0 := b.NewValue0(v_0.Pos, OpARMCMN, types.TypeFlags) - v0.AddArg(a) v1 := b.NewValue0(v_0.Pos, OpARMMUL, x.Type) - v1.AddArg(x) - v1.AddArg(y) - v0.AddArg(v1) - b.AddControl(v0) + v1.AddArg2(x, y) + v0.AddArg2(a, v1) + b.resetWithControl(BlockARMEQ, v0) return true } // match: (EQ (CMPconst [0] l:(ADDconst [c] x)) yes no) @@ -18049,11 +17066,10 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMEQ) v0 := b.NewValue0(v_0.Pos, OpARMCMNconst, types.TypeFlags) v0.AuxInt = c v0.AddArg(x) - b.AddControl(v0) + b.resetWithControl(BlockARMEQ, v0) return true } // match: (EQ (CMPconst [0] l:(ADDshiftLL x y [c])) yes no) @@ -18074,12 +17090,10 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMEQ) v0 := b.NewValue0(v_0.Pos, OpARMCMNshiftLL, types.TypeFlags) v0.AuxInt = c - v0.AddArg(x) - v0.AddArg(y) - b.AddControl(v0) + v0.AddArg2(x, y) + b.resetWithControl(BlockARMEQ, v0) return true } // match: (EQ (CMPconst [0] l:(ADDshiftRL x y [c])) yes no) @@ -18100,12 +17114,10 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMEQ) v0 := b.NewValue0(v_0.Pos, OpARMCMNshiftRL, types.TypeFlags) v0.AuxInt = c - v0.AddArg(x) - v0.AddArg(y) - b.AddControl(v0) + v0.AddArg2(x, y) + b.resetWithControl(BlockARMEQ, v0) return true } // match: (EQ (CMPconst [0] l:(ADDshiftRA x y [c])) yes no) @@ -18126,12 +17138,10 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMEQ) v0 := b.NewValue0(v_0.Pos, OpARMCMNshiftRA, types.TypeFlags) v0.AuxInt = c - v0.AddArg(x) - v0.AddArg(y) - b.AddControl(v0) + v0.AddArg2(x, y) + b.resetWithControl(BlockARMEQ, v0) return true } // match: (EQ (CMPconst [0] l:(ADDshiftLLreg x y z)) yes no) @@ -18152,12 +17162,9 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMEQ) v0 := b.NewValue0(v_0.Pos, OpARMCMNshiftLLreg, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - v0.AddArg(z) - b.AddControl(v0) + v0.AddArg3(x, y, z) + b.resetWithControl(BlockARMEQ, v0) return true } // match: (EQ (CMPconst [0] l:(ADDshiftRLreg x y z)) yes no) @@ -18178,12 +17185,9 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMEQ) v0 := b.NewValue0(v_0.Pos, OpARMCMNshiftRLreg, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - v0.AddArg(z) - b.AddControl(v0) + v0.AddArg3(x, y, z) + b.resetWithControl(BlockARMEQ, v0) return true } // match: (EQ (CMPconst [0] l:(ADDshiftRAreg x y z)) yes no) @@ -18204,12 +17208,9 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMEQ) v0 := b.NewValue0(v_0.Pos, OpARMCMNshiftRAreg, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - v0.AddArg(z) - b.AddControl(v0) + v0.AddArg3(x, y, z) + b.resetWithControl(BlockARMEQ, v0) return true } // match: (EQ (CMPconst [0] l:(AND x y)) yes no) @@ -18233,11 +17234,9 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { continue } - b.Reset(BlockARMEQ) v0 := b.NewValue0(v_0.Pos, OpARMTST, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - b.AddControl(v0) + v0.AddArg2(x, y) + b.resetWithControl(BlockARMEQ, v0) return true } break @@ -18259,11 +17258,10 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMEQ) v0 := b.NewValue0(v_0.Pos, OpARMTSTconst, types.TypeFlags) v0.AuxInt = c v0.AddArg(x) - b.AddControl(v0) + b.resetWithControl(BlockARMEQ, v0) return true } // match: (EQ (CMPconst [0] l:(ANDshiftLL x y [c])) yes no) @@ -18284,12 +17282,10 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMEQ) v0 := b.NewValue0(v_0.Pos, OpARMTSTshiftLL, types.TypeFlags) v0.AuxInt = c - v0.AddArg(x) - v0.AddArg(y) - b.AddControl(v0) + v0.AddArg2(x, y) + b.resetWithControl(BlockARMEQ, v0) return true } // match: (EQ (CMPconst [0] l:(ANDshiftRL x y [c])) yes no) @@ -18310,12 +17306,10 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMEQ) v0 := b.NewValue0(v_0.Pos, OpARMTSTshiftRL, types.TypeFlags) v0.AuxInt = c - v0.AddArg(x) - v0.AddArg(y) - b.AddControl(v0) + v0.AddArg2(x, y) + b.resetWithControl(BlockARMEQ, v0) return true } // match: (EQ (CMPconst [0] l:(ANDshiftRA x y [c])) yes no) @@ -18336,12 +17330,10 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMEQ) v0 := b.NewValue0(v_0.Pos, OpARMTSTshiftRA, types.TypeFlags) v0.AuxInt = c - v0.AddArg(x) - v0.AddArg(y) - b.AddControl(v0) + v0.AddArg2(x, y) + b.resetWithControl(BlockARMEQ, v0) return true } // match: (EQ (CMPconst [0] l:(ANDshiftLLreg x y z)) yes no) @@ -18362,12 +17354,9 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMEQ) v0 := b.NewValue0(v_0.Pos, OpARMTSTshiftLLreg, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - v0.AddArg(z) - b.AddControl(v0) + v0.AddArg3(x, y, z) + b.resetWithControl(BlockARMEQ, v0) return true } // match: (EQ (CMPconst [0] l:(ANDshiftRLreg x y z)) yes no) @@ -18388,12 +17377,9 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMEQ) v0 := b.NewValue0(v_0.Pos, OpARMTSTshiftRLreg, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - v0.AddArg(z) - b.AddControl(v0) + v0.AddArg3(x, y, z) + b.resetWithControl(BlockARMEQ, v0) return true } // match: (EQ (CMPconst [0] l:(ANDshiftRAreg x y z)) yes no) @@ -18414,12 +17400,9 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMEQ) v0 := b.NewValue0(v_0.Pos, OpARMTSTshiftRAreg, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - v0.AddArg(z) - b.AddControl(v0) + v0.AddArg3(x, y, z) + b.resetWithControl(BlockARMEQ, v0) return true } // match: (EQ (CMPconst [0] l:(XOR x y)) yes no) @@ -18443,11 +17426,9 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { continue } - b.Reset(BlockARMEQ) v0 := b.NewValue0(v_0.Pos, OpARMTEQ, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - b.AddControl(v0) + v0.AddArg2(x, y) + b.resetWithControl(BlockARMEQ, v0) return true } break @@ -18469,11 +17450,10 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMEQ) v0 := b.NewValue0(v_0.Pos, OpARMTEQconst, types.TypeFlags) v0.AuxInt = c v0.AddArg(x) - b.AddControl(v0) + b.resetWithControl(BlockARMEQ, v0) return true } // match: (EQ (CMPconst [0] l:(XORshiftLL x y [c])) yes no) @@ -18494,12 +17474,10 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMEQ) v0 := b.NewValue0(v_0.Pos, OpARMTEQshiftLL, types.TypeFlags) v0.AuxInt = c - v0.AddArg(x) - v0.AddArg(y) - b.AddControl(v0) + v0.AddArg2(x, y) + b.resetWithControl(BlockARMEQ, v0) return true } // match: (EQ (CMPconst [0] l:(XORshiftRL x y [c])) yes no) @@ -18520,12 +17498,10 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMEQ) v0 := b.NewValue0(v_0.Pos, OpARMTEQshiftRL, types.TypeFlags) v0.AuxInt = c - v0.AddArg(x) - v0.AddArg(y) - b.AddControl(v0) + v0.AddArg2(x, y) + b.resetWithControl(BlockARMEQ, v0) return true } // match: (EQ (CMPconst [0] l:(XORshiftRA x y [c])) yes no) @@ -18546,12 +17522,10 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMEQ) v0 := b.NewValue0(v_0.Pos, OpARMTEQshiftRA, types.TypeFlags) v0.AuxInt = c - v0.AddArg(x) - v0.AddArg(y) - b.AddControl(v0) + v0.AddArg2(x, y) + b.resetWithControl(BlockARMEQ, v0) return true } // match: (EQ (CMPconst [0] l:(XORshiftLLreg x y z)) yes no) @@ -18572,12 +17546,9 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMEQ) v0 := b.NewValue0(v_0.Pos, OpARMTEQshiftLLreg, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - v0.AddArg(z) - b.AddControl(v0) + v0.AddArg3(x, y, z) + b.resetWithControl(BlockARMEQ, v0) return true } // match: (EQ (CMPconst [0] l:(XORshiftRLreg x y z)) yes no) @@ -18598,12 +17569,9 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMEQ) v0 := b.NewValue0(v_0.Pos, OpARMTEQshiftRLreg, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - v0.AddArg(z) - b.AddControl(v0) + v0.AddArg3(x, y, z) + b.resetWithControl(BlockARMEQ, v0) return true } // match: (EQ (CMPconst [0] l:(XORshiftRAreg x y z)) yes no) @@ -18624,12 +17592,9 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMEQ) v0 := b.NewValue0(v_0.Pos, OpARMTEQshiftRAreg, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - v0.AddArg(z) - b.AddControl(v0) + v0.AddArg3(x, y, z) + b.resetWithControl(BlockARMEQ, v0) return true } case BlockARMGE: @@ -18670,8 +17635,7 @@ func rewriteBlockARM(b *Block) bool { for b.Controls[0].Op == OpARMInvertFlags { v_0 := b.Controls[0] cmp := v_0.Args[0] - b.Reset(BlockARMLE) - b.AddControl(cmp) + b.resetWithControl(BlockARMLE, cmp) return true } // match: (GE (CMPconst [0] l:(SUB x y)) yes no) @@ -18691,11 +17655,9 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMGE) v0 := b.NewValue0(v_0.Pos, OpARMCMP, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - b.AddControl(v0) + v0.AddArg2(x, y) + b.resetWithControl(BlockARMGE, v0) return true } // match: (GE (CMPconst [0] l:(MULS x y a)) yes no) @@ -18716,14 +17678,11 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMGE) v0 := b.NewValue0(v_0.Pos, OpARMCMP, types.TypeFlags) - v0.AddArg(a) v1 := b.NewValue0(v_0.Pos, OpARMMUL, x.Type) - v1.AddArg(x) - v1.AddArg(y) - v0.AddArg(v1) - b.AddControl(v0) + v1.AddArg2(x, y) + v0.AddArg2(a, v1) + b.resetWithControl(BlockARMGE, v0) return true } // match: (GE (CMPconst [0] l:(SUBconst [c] x)) yes no) @@ -18743,11 +17702,10 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMGE) v0 := b.NewValue0(v_0.Pos, OpARMCMPconst, types.TypeFlags) v0.AuxInt = c v0.AddArg(x) - b.AddControl(v0) + b.resetWithControl(BlockARMGE, v0) return true } // match: (GE (CMPconst [0] l:(SUBshiftLL x y [c])) yes no) @@ -18768,12 +17726,10 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMGE) v0 := b.NewValue0(v_0.Pos, OpARMCMPshiftLL, types.TypeFlags) v0.AuxInt = c - v0.AddArg(x) - v0.AddArg(y) - b.AddControl(v0) + v0.AddArg2(x, y) + b.resetWithControl(BlockARMGE, v0) return true } // match: (GE (CMPconst [0] l:(SUBshiftRL x y [c])) yes no) @@ -18794,12 +17750,10 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMGE) v0 := b.NewValue0(v_0.Pos, OpARMCMPshiftRL, types.TypeFlags) v0.AuxInt = c - v0.AddArg(x) - v0.AddArg(y) - b.AddControl(v0) + v0.AddArg2(x, y) + b.resetWithControl(BlockARMGE, v0) return true } // match: (GE (CMPconst [0] l:(SUBshiftRA x y [c])) yes no) @@ -18820,12 +17774,10 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMGE) v0 := b.NewValue0(v_0.Pos, OpARMCMPshiftRA, types.TypeFlags) v0.AuxInt = c - v0.AddArg(x) - v0.AddArg(y) - b.AddControl(v0) + v0.AddArg2(x, y) + b.resetWithControl(BlockARMGE, v0) return true } // match: (GE (CMPconst [0] l:(SUBshiftLLreg x y z)) yes no) @@ -18846,12 +17798,9 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMGE) v0 := b.NewValue0(v_0.Pos, OpARMCMPshiftLLreg, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - v0.AddArg(z) - b.AddControl(v0) + v0.AddArg3(x, y, z) + b.resetWithControl(BlockARMGE, v0) return true } // match: (GE (CMPconst [0] l:(SUBshiftRLreg x y z)) yes no) @@ -18872,12 +17821,9 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMGE) v0 := b.NewValue0(v_0.Pos, OpARMCMPshiftRLreg, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - v0.AddArg(z) - b.AddControl(v0) + v0.AddArg3(x, y, z) + b.resetWithControl(BlockARMGE, v0) return true } // match: (GE (CMPconst [0] l:(SUBshiftRAreg x y z)) yes no) @@ -18898,12 +17844,9 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMGE) v0 := b.NewValue0(v_0.Pos, OpARMCMPshiftRAreg, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - v0.AddArg(z) - b.AddControl(v0) + v0.AddArg3(x, y, z) + b.resetWithControl(BlockARMGE, v0) return true } // match: (GE (CMPconst [0] l:(ADD x y)) yes no) @@ -18927,11 +17870,9 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { continue } - b.Reset(BlockARMGE) v0 := b.NewValue0(v_0.Pos, OpARMCMN, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - b.AddControl(v0) + v0.AddArg2(x, y) + b.resetWithControl(BlockARMGE, v0) return true } break @@ -18954,14 +17895,11 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMGE) v0 := b.NewValue0(v_0.Pos, OpARMCMN, types.TypeFlags) - v0.AddArg(a) v1 := b.NewValue0(v_0.Pos, OpARMMUL, x.Type) - v1.AddArg(x) - v1.AddArg(y) - v0.AddArg(v1) - b.AddControl(v0) + v1.AddArg2(x, y) + v0.AddArg2(a, v1) + b.resetWithControl(BlockARMGE, v0) return true } // match: (GE (CMPconst [0] l:(ADDconst [c] x)) yes no) @@ -18981,11 +17919,10 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMGE) v0 := b.NewValue0(v_0.Pos, OpARMCMNconst, types.TypeFlags) v0.AuxInt = c v0.AddArg(x) - b.AddControl(v0) + b.resetWithControl(BlockARMGE, v0) return true } // match: (GE (CMPconst [0] l:(ADDshiftLL x y [c])) yes no) @@ -19006,12 +17943,10 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMGE) v0 := b.NewValue0(v_0.Pos, OpARMCMNshiftLL, types.TypeFlags) v0.AuxInt = c - v0.AddArg(x) - v0.AddArg(y) - b.AddControl(v0) + v0.AddArg2(x, y) + b.resetWithControl(BlockARMGE, v0) return true } // match: (GE (CMPconst [0] l:(ADDshiftRL x y [c])) yes no) @@ -19032,12 +17967,10 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMGE) v0 := b.NewValue0(v_0.Pos, OpARMCMNshiftRL, types.TypeFlags) v0.AuxInt = c - v0.AddArg(x) - v0.AddArg(y) - b.AddControl(v0) + v0.AddArg2(x, y) + b.resetWithControl(BlockARMGE, v0) return true } // match: (GE (CMPconst [0] l:(ADDshiftRA x y [c])) yes no) @@ -19058,12 +17991,10 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMGE) v0 := b.NewValue0(v_0.Pos, OpARMCMNshiftRA, types.TypeFlags) v0.AuxInt = c - v0.AddArg(x) - v0.AddArg(y) - b.AddControl(v0) + v0.AddArg2(x, y) + b.resetWithControl(BlockARMGE, v0) return true } // match: (GE (CMPconst [0] l:(ADDshiftLLreg x y z)) yes no) @@ -19084,12 +18015,9 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMGE) v0 := b.NewValue0(v_0.Pos, OpARMCMNshiftLLreg, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - v0.AddArg(z) - b.AddControl(v0) + v0.AddArg3(x, y, z) + b.resetWithControl(BlockARMGE, v0) return true } // match: (GE (CMPconst [0] l:(ADDshiftRLreg x y z)) yes no) @@ -19110,12 +18038,9 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMGE) v0 := b.NewValue0(v_0.Pos, OpARMCMNshiftRLreg, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - v0.AddArg(z) - b.AddControl(v0) + v0.AddArg3(x, y, z) + b.resetWithControl(BlockARMGE, v0) return true } // match: (GE (CMPconst [0] l:(ADDshiftRAreg x y z)) yes no) @@ -19136,12 +18061,9 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMGE) v0 := b.NewValue0(v_0.Pos, OpARMCMNshiftRAreg, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - v0.AddArg(z) - b.AddControl(v0) + v0.AddArg3(x, y, z) + b.resetWithControl(BlockARMGE, v0) return true } // match: (GE (CMPconst [0] l:(AND x y)) yes no) @@ -19165,11 +18087,9 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { continue } - b.Reset(BlockARMGE) v0 := b.NewValue0(v_0.Pos, OpARMTST, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - b.AddControl(v0) + v0.AddArg2(x, y) + b.resetWithControl(BlockARMGE, v0) return true } break @@ -19191,11 +18111,10 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMGE) v0 := b.NewValue0(v_0.Pos, OpARMTSTconst, types.TypeFlags) v0.AuxInt = c v0.AddArg(x) - b.AddControl(v0) + b.resetWithControl(BlockARMGE, v0) return true } // match: (GE (CMPconst [0] l:(ANDshiftLL x y [c])) yes no) @@ -19216,12 +18135,10 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMGE) v0 := b.NewValue0(v_0.Pos, OpARMTSTshiftLL, types.TypeFlags) v0.AuxInt = c - v0.AddArg(x) - v0.AddArg(y) - b.AddControl(v0) + v0.AddArg2(x, y) + b.resetWithControl(BlockARMGE, v0) return true } // match: (GE (CMPconst [0] l:(ANDshiftRL x y [c])) yes no) @@ -19242,12 +18159,10 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMGE) v0 := b.NewValue0(v_0.Pos, OpARMTSTshiftRL, types.TypeFlags) v0.AuxInt = c - v0.AddArg(x) - v0.AddArg(y) - b.AddControl(v0) + v0.AddArg2(x, y) + b.resetWithControl(BlockARMGE, v0) return true } // match: (GE (CMPconst [0] l:(ANDshiftRA x y [c])) yes no) @@ -19268,12 +18183,10 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMGE) v0 := b.NewValue0(v_0.Pos, OpARMTSTshiftRA, types.TypeFlags) v0.AuxInt = c - v0.AddArg(x) - v0.AddArg(y) - b.AddControl(v0) + v0.AddArg2(x, y) + b.resetWithControl(BlockARMGE, v0) return true } // match: (GE (CMPconst [0] l:(ANDshiftLLreg x y z)) yes no) @@ -19294,12 +18207,9 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMGE) v0 := b.NewValue0(v_0.Pos, OpARMTSTshiftLLreg, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - v0.AddArg(z) - b.AddControl(v0) + v0.AddArg3(x, y, z) + b.resetWithControl(BlockARMGE, v0) return true } // match: (GE (CMPconst [0] l:(ANDshiftRLreg x y z)) yes no) @@ -19320,12 +18230,9 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMGE) v0 := b.NewValue0(v_0.Pos, OpARMTSTshiftRLreg, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - v0.AddArg(z) - b.AddControl(v0) + v0.AddArg3(x, y, z) + b.resetWithControl(BlockARMGE, v0) return true } // match: (GE (CMPconst [0] l:(ANDshiftRAreg x y z)) yes no) @@ -19346,12 +18253,9 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMGE) v0 := b.NewValue0(v_0.Pos, OpARMTSTshiftRAreg, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - v0.AddArg(z) - b.AddControl(v0) + v0.AddArg3(x, y, z) + b.resetWithControl(BlockARMGE, v0) return true } // match: (GE (CMPconst [0] l:(XOR x y)) yes no) @@ -19375,11 +18279,9 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { continue } - b.Reset(BlockARMGE) v0 := b.NewValue0(v_0.Pos, OpARMTEQ, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - b.AddControl(v0) + v0.AddArg2(x, y) + b.resetWithControl(BlockARMGE, v0) return true } break @@ -19401,11 +18303,10 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMGE) v0 := b.NewValue0(v_0.Pos, OpARMTEQconst, types.TypeFlags) v0.AuxInt = c v0.AddArg(x) - b.AddControl(v0) + b.resetWithControl(BlockARMGE, v0) return true } // match: (GE (CMPconst [0] l:(XORshiftLL x y [c])) yes no) @@ -19426,12 +18327,10 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMGE) v0 := b.NewValue0(v_0.Pos, OpARMTEQshiftLL, types.TypeFlags) v0.AuxInt = c - v0.AddArg(x) - v0.AddArg(y) - b.AddControl(v0) + v0.AddArg2(x, y) + b.resetWithControl(BlockARMGE, v0) return true } // match: (GE (CMPconst [0] l:(XORshiftRL x y [c])) yes no) @@ -19452,12 +18351,10 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMGE) v0 := b.NewValue0(v_0.Pos, OpARMTEQshiftRL, types.TypeFlags) v0.AuxInt = c - v0.AddArg(x) - v0.AddArg(y) - b.AddControl(v0) + v0.AddArg2(x, y) + b.resetWithControl(BlockARMGE, v0) return true } // match: (GE (CMPconst [0] l:(XORshiftRA x y [c])) yes no) @@ -19478,12 +18375,10 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMGE) v0 := b.NewValue0(v_0.Pos, OpARMTEQshiftRA, types.TypeFlags) v0.AuxInt = c - v0.AddArg(x) - v0.AddArg(y) - b.AddControl(v0) + v0.AddArg2(x, y) + b.resetWithControl(BlockARMGE, v0) return true } // match: (GE (CMPconst [0] l:(XORshiftLLreg x y z)) yes no) @@ -19504,12 +18399,9 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMGE) v0 := b.NewValue0(v_0.Pos, OpARMTEQshiftLLreg, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - v0.AddArg(z) - b.AddControl(v0) + v0.AddArg3(x, y, z) + b.resetWithControl(BlockARMGE, v0) return true } // match: (GE (CMPconst [0] l:(XORshiftRLreg x y z)) yes no) @@ -19530,12 +18422,9 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMGE) v0 := b.NewValue0(v_0.Pos, OpARMTEQshiftRLreg, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - v0.AddArg(z) - b.AddControl(v0) + v0.AddArg3(x, y, z) + b.resetWithControl(BlockARMGE, v0) return true } // match: (GE (CMPconst [0] l:(XORshiftRAreg x y z)) yes no) @@ -19556,12 +18445,9 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMGE) v0 := b.NewValue0(v_0.Pos, OpARMTEQshiftRAreg, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - v0.AddArg(z) - b.AddControl(v0) + v0.AddArg3(x, y, z) + b.resetWithControl(BlockARMGE, v0) return true } case BlockARMGT: @@ -19603,8 +18489,7 @@ func rewriteBlockARM(b *Block) bool { for b.Controls[0].Op == OpARMInvertFlags { v_0 := b.Controls[0] cmp := v_0.Args[0] - b.Reset(BlockARMLT) - b.AddControl(cmp) + b.resetWithControl(BlockARMLT, cmp) return true } // match: (GT (CMPconst [0] l:(SUB x y)) yes no) @@ -19624,11 +18509,9 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMGT) v0 := b.NewValue0(v_0.Pos, OpARMCMP, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - b.AddControl(v0) + v0.AddArg2(x, y) + b.resetWithControl(BlockARMGT, v0) return true } // match: (GT (CMPconst [0] l:(MULS x y a)) yes no) @@ -19649,14 +18532,11 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMGT) v0 := b.NewValue0(v_0.Pos, OpARMCMP, types.TypeFlags) - v0.AddArg(a) v1 := b.NewValue0(v_0.Pos, OpARMMUL, x.Type) - v1.AddArg(x) - v1.AddArg(y) - v0.AddArg(v1) - b.AddControl(v0) + v1.AddArg2(x, y) + v0.AddArg2(a, v1) + b.resetWithControl(BlockARMGT, v0) return true } // match: (GT (CMPconst [0] l:(SUBconst [c] x)) yes no) @@ -19676,11 +18556,10 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMGT) v0 := b.NewValue0(v_0.Pos, OpARMCMPconst, types.TypeFlags) v0.AuxInt = c v0.AddArg(x) - b.AddControl(v0) + b.resetWithControl(BlockARMGT, v0) return true } // match: (GT (CMPconst [0] l:(SUBshiftLL x y [c])) yes no) @@ -19701,12 +18580,10 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMGT) v0 := b.NewValue0(v_0.Pos, OpARMCMPshiftLL, types.TypeFlags) v0.AuxInt = c - v0.AddArg(x) - v0.AddArg(y) - b.AddControl(v0) + v0.AddArg2(x, y) + b.resetWithControl(BlockARMGT, v0) return true } // match: (GT (CMPconst [0] l:(SUBshiftRL x y [c])) yes no) @@ -19727,12 +18604,10 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMGT) v0 := b.NewValue0(v_0.Pos, OpARMCMPshiftRL, types.TypeFlags) v0.AuxInt = c - v0.AddArg(x) - v0.AddArg(y) - b.AddControl(v0) + v0.AddArg2(x, y) + b.resetWithControl(BlockARMGT, v0) return true } // match: (GT (CMPconst [0] l:(SUBshiftRA x y [c])) yes no) @@ -19753,12 +18628,10 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMGT) v0 := b.NewValue0(v_0.Pos, OpARMCMPshiftRA, types.TypeFlags) v0.AuxInt = c - v0.AddArg(x) - v0.AddArg(y) - b.AddControl(v0) + v0.AddArg2(x, y) + b.resetWithControl(BlockARMGT, v0) return true } // match: (GT (CMPconst [0] l:(SUBshiftLLreg x y z)) yes no) @@ -19779,12 +18652,9 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMGT) v0 := b.NewValue0(v_0.Pos, OpARMCMPshiftLLreg, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - v0.AddArg(z) - b.AddControl(v0) + v0.AddArg3(x, y, z) + b.resetWithControl(BlockARMGT, v0) return true } // match: (GT (CMPconst [0] l:(SUBshiftRLreg x y z)) yes no) @@ -19805,12 +18675,9 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMGT) v0 := b.NewValue0(v_0.Pos, OpARMCMPshiftRLreg, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - v0.AddArg(z) - b.AddControl(v0) + v0.AddArg3(x, y, z) + b.resetWithControl(BlockARMGT, v0) return true } // match: (GT (CMPconst [0] l:(SUBshiftRAreg x y z)) yes no) @@ -19831,12 +18698,9 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMGT) v0 := b.NewValue0(v_0.Pos, OpARMCMPshiftRAreg, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - v0.AddArg(z) - b.AddControl(v0) + v0.AddArg3(x, y, z) + b.resetWithControl(BlockARMGT, v0) return true } // match: (GT (CMPconst [0] l:(ADD x y)) yes no) @@ -19860,11 +18724,9 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { continue } - b.Reset(BlockARMGT) v0 := b.NewValue0(v_0.Pos, OpARMCMN, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - b.AddControl(v0) + v0.AddArg2(x, y) + b.resetWithControl(BlockARMGT, v0) return true } break @@ -19886,11 +18748,10 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMGT) v0 := b.NewValue0(v_0.Pos, OpARMCMNconst, types.TypeFlags) v0.AuxInt = c v0.AddArg(x) - b.AddControl(v0) + b.resetWithControl(BlockARMGT, v0) return true } // match: (GT (CMPconst [0] l:(ADDshiftLL x y [c])) yes no) @@ -19911,12 +18772,10 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMGT) v0 := b.NewValue0(v_0.Pos, OpARMCMNshiftLL, types.TypeFlags) v0.AuxInt = c - v0.AddArg(x) - v0.AddArg(y) - b.AddControl(v0) + v0.AddArg2(x, y) + b.resetWithControl(BlockARMGT, v0) return true } // match: (GT (CMPconst [0] l:(ADDshiftRL x y [c])) yes no) @@ -19937,12 +18796,10 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMGT) v0 := b.NewValue0(v_0.Pos, OpARMCMNshiftRL, types.TypeFlags) v0.AuxInt = c - v0.AddArg(x) - v0.AddArg(y) - b.AddControl(v0) + v0.AddArg2(x, y) + b.resetWithControl(BlockARMGT, v0) return true } // match: (GT (CMPconst [0] l:(ADDshiftRA x y [c])) yes no) @@ -19963,12 +18820,10 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMGT) v0 := b.NewValue0(v_0.Pos, OpARMCMNshiftRA, types.TypeFlags) v0.AuxInt = c - v0.AddArg(x) - v0.AddArg(y) - b.AddControl(v0) + v0.AddArg2(x, y) + b.resetWithControl(BlockARMGT, v0) return true } // match: (GT (CMPconst [0] l:(ADDshiftLLreg x y z)) yes no) @@ -19989,12 +18844,9 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMGT) v0 := b.NewValue0(v_0.Pos, OpARMCMNshiftLLreg, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - v0.AddArg(z) - b.AddControl(v0) + v0.AddArg3(x, y, z) + b.resetWithControl(BlockARMGT, v0) return true } // match: (GT (CMPconst [0] l:(ADDshiftRLreg x y z)) yes no) @@ -20015,12 +18867,9 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMGT) v0 := b.NewValue0(v_0.Pos, OpARMCMNshiftRLreg, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - v0.AddArg(z) - b.AddControl(v0) + v0.AddArg3(x, y, z) + b.resetWithControl(BlockARMGT, v0) return true } // match: (GT (CMPconst [0] l:(ADDshiftRAreg x y z)) yes no) @@ -20041,12 +18890,9 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMGT) v0 := b.NewValue0(v_0.Pos, OpARMCMNshiftRAreg, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - v0.AddArg(z) - b.AddControl(v0) + v0.AddArg3(x, y, z) + b.resetWithControl(BlockARMGT, v0) return true } // match: (GT (CMPconst [0] l:(AND x y)) yes no) @@ -20070,11 +18916,9 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { continue } - b.Reset(BlockARMGT) v0 := b.NewValue0(v_0.Pos, OpARMTST, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - b.AddControl(v0) + v0.AddArg2(x, y) + b.resetWithControl(BlockARMGT, v0) return true } break @@ -20097,14 +18941,11 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMGT) v0 := b.NewValue0(v_0.Pos, OpARMCMN, types.TypeFlags) - v0.AddArg(a) v1 := b.NewValue0(v_0.Pos, OpARMMUL, x.Type) - v1.AddArg(x) - v1.AddArg(y) - v0.AddArg(v1) - b.AddControl(v0) + v1.AddArg2(x, y) + v0.AddArg2(a, v1) + b.resetWithControl(BlockARMGT, v0) return true } // match: (GT (CMPconst [0] l:(ANDconst [c] x)) yes no) @@ -20124,11 +18965,10 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMGT) v0 := b.NewValue0(v_0.Pos, OpARMTSTconst, types.TypeFlags) v0.AuxInt = c v0.AddArg(x) - b.AddControl(v0) + b.resetWithControl(BlockARMGT, v0) return true } // match: (GT (CMPconst [0] l:(ANDshiftLL x y [c])) yes no) @@ -20149,12 +18989,10 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMGT) v0 := b.NewValue0(v_0.Pos, OpARMTSTshiftLL, types.TypeFlags) v0.AuxInt = c - v0.AddArg(x) - v0.AddArg(y) - b.AddControl(v0) + v0.AddArg2(x, y) + b.resetWithControl(BlockARMGT, v0) return true } // match: (GT (CMPconst [0] l:(ANDshiftRL x y [c])) yes no) @@ -20175,12 +19013,10 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMGT) v0 := b.NewValue0(v_0.Pos, OpARMTSTshiftRL, types.TypeFlags) v0.AuxInt = c - v0.AddArg(x) - v0.AddArg(y) - b.AddControl(v0) + v0.AddArg2(x, y) + b.resetWithControl(BlockARMGT, v0) return true } // match: (GT (CMPconst [0] l:(ANDshiftRA x y [c])) yes no) @@ -20201,12 +19037,10 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMGT) v0 := b.NewValue0(v_0.Pos, OpARMTSTshiftRA, types.TypeFlags) v0.AuxInt = c - v0.AddArg(x) - v0.AddArg(y) - b.AddControl(v0) + v0.AddArg2(x, y) + b.resetWithControl(BlockARMGT, v0) return true } // match: (GT (CMPconst [0] l:(ANDshiftLLreg x y z)) yes no) @@ -20227,12 +19061,9 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMGT) v0 := b.NewValue0(v_0.Pos, OpARMTSTshiftLLreg, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - v0.AddArg(z) - b.AddControl(v0) + v0.AddArg3(x, y, z) + b.resetWithControl(BlockARMGT, v0) return true } // match: (GT (CMPconst [0] l:(ANDshiftRLreg x y z)) yes no) @@ -20253,12 +19084,9 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMGT) v0 := b.NewValue0(v_0.Pos, OpARMTSTshiftRLreg, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - v0.AddArg(z) - b.AddControl(v0) + v0.AddArg3(x, y, z) + b.resetWithControl(BlockARMGT, v0) return true } // match: (GT (CMPconst [0] l:(ANDshiftRAreg x y z)) yes no) @@ -20279,12 +19107,9 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMGT) v0 := b.NewValue0(v_0.Pos, OpARMTSTshiftRAreg, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - v0.AddArg(z) - b.AddControl(v0) + v0.AddArg3(x, y, z) + b.resetWithControl(BlockARMGT, v0) return true } // match: (GT (CMPconst [0] l:(XOR x y)) yes no) @@ -20308,11 +19133,9 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { continue } - b.Reset(BlockARMGT) v0 := b.NewValue0(v_0.Pos, OpARMTEQ, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - b.AddControl(v0) + v0.AddArg2(x, y) + b.resetWithControl(BlockARMGT, v0) return true } break @@ -20334,11 +19157,10 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMGT) v0 := b.NewValue0(v_0.Pos, OpARMTEQconst, types.TypeFlags) v0.AuxInt = c v0.AddArg(x) - b.AddControl(v0) + b.resetWithControl(BlockARMGT, v0) return true } // match: (GT (CMPconst [0] l:(XORshiftLL x y [c])) yes no) @@ -20359,12 +19181,10 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMGT) v0 := b.NewValue0(v_0.Pos, OpARMTEQshiftLL, types.TypeFlags) v0.AuxInt = c - v0.AddArg(x) - v0.AddArg(y) - b.AddControl(v0) + v0.AddArg2(x, y) + b.resetWithControl(BlockARMGT, v0) return true } // match: (GT (CMPconst [0] l:(XORshiftRL x y [c])) yes no) @@ -20385,12 +19205,10 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMGT) v0 := b.NewValue0(v_0.Pos, OpARMTEQshiftRL, types.TypeFlags) v0.AuxInt = c - v0.AddArg(x) - v0.AddArg(y) - b.AddControl(v0) + v0.AddArg2(x, y) + b.resetWithControl(BlockARMGT, v0) return true } // match: (GT (CMPconst [0] l:(XORshiftRA x y [c])) yes no) @@ -20411,12 +19229,10 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMGT) v0 := b.NewValue0(v_0.Pos, OpARMTEQshiftRA, types.TypeFlags) v0.AuxInt = c - v0.AddArg(x) - v0.AddArg(y) - b.AddControl(v0) + v0.AddArg2(x, y) + b.resetWithControl(BlockARMGT, v0) return true } // match: (GT (CMPconst [0] l:(XORshiftLLreg x y z)) yes no) @@ -20437,12 +19253,9 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMGT) v0 := b.NewValue0(v_0.Pos, OpARMTEQshiftLLreg, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - v0.AddArg(z) - b.AddControl(v0) + v0.AddArg3(x, y, z) + b.resetWithControl(BlockARMGT, v0) return true } // match: (GT (CMPconst [0] l:(XORshiftRLreg x y z)) yes no) @@ -20463,12 +19276,9 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMGT) v0 := b.NewValue0(v_0.Pos, OpARMTEQshiftRLreg, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - v0.AddArg(z) - b.AddControl(v0) + v0.AddArg3(x, y, z) + b.resetWithControl(BlockARMGT, v0) return true } // match: (GT (CMPconst [0] l:(XORshiftRAreg x y z)) yes no) @@ -20489,12 +19299,9 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMGT) v0 := b.NewValue0(v_0.Pos, OpARMTEQshiftRAreg, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - v0.AddArg(z) - b.AddControl(v0) + v0.AddArg3(x, y, z) + b.resetWithControl(BlockARMGT, v0) return true } case BlockIf: @@ -20503,8 +19310,7 @@ func rewriteBlockARM(b *Block) bool { for b.Controls[0].Op == OpARMEqual { v_0 := b.Controls[0] cc := v_0.Args[0] - b.Reset(BlockARMEQ) - b.AddControl(cc) + b.resetWithControl(BlockARMEQ, cc) return true } // match: (If (NotEqual cc) yes no) @@ -20512,8 +19318,7 @@ func rewriteBlockARM(b *Block) bool { for b.Controls[0].Op == OpARMNotEqual { v_0 := b.Controls[0] cc := v_0.Args[0] - b.Reset(BlockARMNE) - b.AddControl(cc) + b.resetWithControl(BlockARMNE, cc) return true } // match: (If (LessThan cc) yes no) @@ -20521,8 +19326,7 @@ func rewriteBlockARM(b *Block) bool { for b.Controls[0].Op == OpARMLessThan { v_0 := b.Controls[0] cc := v_0.Args[0] - b.Reset(BlockARMLT) - b.AddControl(cc) + b.resetWithControl(BlockARMLT, cc) return true } // match: (If (LessThanU cc) yes no) @@ -20530,8 +19334,7 @@ func rewriteBlockARM(b *Block) bool { for b.Controls[0].Op == OpARMLessThanU { v_0 := b.Controls[0] cc := v_0.Args[0] - b.Reset(BlockARMULT) - b.AddControl(cc) + b.resetWithControl(BlockARMULT, cc) return true } // match: (If (LessEqual cc) yes no) @@ -20539,8 +19342,7 @@ func rewriteBlockARM(b *Block) bool { for b.Controls[0].Op == OpARMLessEqual { v_0 := b.Controls[0] cc := v_0.Args[0] - b.Reset(BlockARMLE) - b.AddControl(cc) + b.resetWithControl(BlockARMLE, cc) return true } // match: (If (LessEqualU cc) yes no) @@ -20548,8 +19350,7 @@ func rewriteBlockARM(b *Block) bool { for b.Controls[0].Op == OpARMLessEqualU { v_0 := b.Controls[0] cc := v_0.Args[0] - b.Reset(BlockARMULE) - b.AddControl(cc) + b.resetWithControl(BlockARMULE, cc) return true } // match: (If (GreaterThan cc) yes no) @@ -20557,8 +19358,7 @@ func rewriteBlockARM(b *Block) bool { for b.Controls[0].Op == OpARMGreaterThan { v_0 := b.Controls[0] cc := v_0.Args[0] - b.Reset(BlockARMGT) - b.AddControl(cc) + b.resetWithControl(BlockARMGT, cc) return true } // match: (If (GreaterThanU cc) yes no) @@ -20566,8 +19366,7 @@ func rewriteBlockARM(b *Block) bool { for b.Controls[0].Op == OpARMGreaterThanU { v_0 := b.Controls[0] cc := v_0.Args[0] - b.Reset(BlockARMUGT) - b.AddControl(cc) + b.resetWithControl(BlockARMUGT, cc) return true } // match: (If (GreaterEqual cc) yes no) @@ -20575,8 +19374,7 @@ func rewriteBlockARM(b *Block) bool { for b.Controls[0].Op == OpARMGreaterEqual { v_0 := b.Controls[0] cc := v_0.Args[0] - b.Reset(BlockARMGE) - b.AddControl(cc) + b.resetWithControl(BlockARMGE, cc) return true } // match: (If (GreaterEqualU cc) yes no) @@ -20584,19 +19382,17 @@ func rewriteBlockARM(b *Block) bool { for b.Controls[0].Op == OpARMGreaterEqualU { v_0 := b.Controls[0] cc := v_0.Args[0] - b.Reset(BlockARMUGE) - b.AddControl(cc) + b.resetWithControl(BlockARMUGE, cc) return true } // match: (If cond yes no) // result: (NE (CMPconst [0] cond) yes no) for { cond := b.Controls[0] - b.Reset(BlockARMNE) v0 := b.NewValue0(cond.Pos, OpARMCMPconst, types.TypeFlags) v0.AuxInt = 0 v0.AddArg(cond) - b.AddControl(v0) + b.resetWithControl(BlockARMNE, v0) return true } case BlockARMLE: @@ -20637,8 +19433,7 @@ func rewriteBlockARM(b *Block) bool { for b.Controls[0].Op == OpARMInvertFlags { v_0 := b.Controls[0] cmp := v_0.Args[0] - b.Reset(BlockARMGE) - b.AddControl(cmp) + b.resetWithControl(BlockARMGE, cmp) return true } // match: (LE (CMPconst [0] l:(SUB x y)) yes no) @@ -20658,11 +19453,9 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMLE) v0 := b.NewValue0(v_0.Pos, OpARMCMP, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - b.AddControl(v0) + v0.AddArg2(x, y) + b.resetWithControl(BlockARMLE, v0) return true } // match: (LE (CMPconst [0] l:(MULS x y a)) yes no) @@ -20683,14 +19476,11 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMLE) v0 := b.NewValue0(v_0.Pos, OpARMCMP, types.TypeFlags) - v0.AddArg(a) v1 := b.NewValue0(v_0.Pos, OpARMMUL, x.Type) - v1.AddArg(x) - v1.AddArg(y) - v0.AddArg(v1) - b.AddControl(v0) + v1.AddArg2(x, y) + v0.AddArg2(a, v1) + b.resetWithControl(BlockARMLE, v0) return true } // match: (LE (CMPconst [0] l:(SUBconst [c] x)) yes no) @@ -20710,11 +19500,10 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMLE) v0 := b.NewValue0(v_0.Pos, OpARMCMPconst, types.TypeFlags) v0.AuxInt = c v0.AddArg(x) - b.AddControl(v0) + b.resetWithControl(BlockARMLE, v0) return true } // match: (LE (CMPconst [0] l:(SUBshiftLL x y [c])) yes no) @@ -20735,12 +19524,10 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMLE) v0 := b.NewValue0(v_0.Pos, OpARMCMPshiftLL, types.TypeFlags) v0.AuxInt = c - v0.AddArg(x) - v0.AddArg(y) - b.AddControl(v0) + v0.AddArg2(x, y) + b.resetWithControl(BlockARMLE, v0) return true } // match: (LE (CMPconst [0] l:(SUBshiftRL x y [c])) yes no) @@ -20761,12 +19548,10 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMLE) v0 := b.NewValue0(v_0.Pos, OpARMCMPshiftRL, types.TypeFlags) v0.AuxInt = c - v0.AddArg(x) - v0.AddArg(y) - b.AddControl(v0) + v0.AddArg2(x, y) + b.resetWithControl(BlockARMLE, v0) return true } // match: (LE (CMPconst [0] l:(SUBshiftRA x y [c])) yes no) @@ -20787,12 +19572,10 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMLE) v0 := b.NewValue0(v_0.Pos, OpARMCMPshiftRA, types.TypeFlags) v0.AuxInt = c - v0.AddArg(x) - v0.AddArg(y) - b.AddControl(v0) + v0.AddArg2(x, y) + b.resetWithControl(BlockARMLE, v0) return true } // match: (LE (CMPconst [0] l:(SUBshiftLLreg x y z)) yes no) @@ -20813,12 +19596,9 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMLE) v0 := b.NewValue0(v_0.Pos, OpARMCMPshiftLLreg, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - v0.AddArg(z) - b.AddControl(v0) + v0.AddArg3(x, y, z) + b.resetWithControl(BlockARMLE, v0) return true } // match: (LE (CMPconst [0] l:(SUBshiftRLreg x y z)) yes no) @@ -20839,12 +19619,9 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMLE) v0 := b.NewValue0(v_0.Pos, OpARMCMPshiftRLreg, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - v0.AddArg(z) - b.AddControl(v0) + v0.AddArg3(x, y, z) + b.resetWithControl(BlockARMLE, v0) return true } // match: (LE (CMPconst [0] l:(SUBshiftRAreg x y z)) yes no) @@ -20865,12 +19642,9 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMLE) v0 := b.NewValue0(v_0.Pos, OpARMCMPshiftRAreg, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - v0.AddArg(z) - b.AddControl(v0) + v0.AddArg3(x, y, z) + b.resetWithControl(BlockARMLE, v0) return true } // match: (LE (CMPconst [0] l:(ADD x y)) yes no) @@ -20894,11 +19668,9 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { continue } - b.Reset(BlockARMLE) v0 := b.NewValue0(v_0.Pos, OpARMCMN, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - b.AddControl(v0) + v0.AddArg2(x, y) + b.resetWithControl(BlockARMLE, v0) return true } break @@ -20921,14 +19693,11 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMLE) v0 := b.NewValue0(v_0.Pos, OpARMCMN, types.TypeFlags) - v0.AddArg(a) v1 := b.NewValue0(v_0.Pos, OpARMMUL, x.Type) - v1.AddArg(x) - v1.AddArg(y) - v0.AddArg(v1) - b.AddControl(v0) + v1.AddArg2(x, y) + v0.AddArg2(a, v1) + b.resetWithControl(BlockARMLE, v0) return true } // match: (LE (CMPconst [0] l:(ADDconst [c] x)) yes no) @@ -20948,11 +19717,10 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMLE) v0 := b.NewValue0(v_0.Pos, OpARMCMNconst, types.TypeFlags) v0.AuxInt = c v0.AddArg(x) - b.AddControl(v0) + b.resetWithControl(BlockARMLE, v0) return true } // match: (LE (CMPconst [0] l:(ADDshiftLL x y [c])) yes no) @@ -20973,12 +19741,10 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMLE) v0 := b.NewValue0(v_0.Pos, OpARMCMNshiftLL, types.TypeFlags) v0.AuxInt = c - v0.AddArg(x) - v0.AddArg(y) - b.AddControl(v0) + v0.AddArg2(x, y) + b.resetWithControl(BlockARMLE, v0) return true } // match: (LE (CMPconst [0] l:(ADDshiftRL x y [c])) yes no) @@ -20999,12 +19765,10 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMLE) v0 := b.NewValue0(v_0.Pos, OpARMCMNshiftRL, types.TypeFlags) v0.AuxInt = c - v0.AddArg(x) - v0.AddArg(y) - b.AddControl(v0) + v0.AddArg2(x, y) + b.resetWithControl(BlockARMLE, v0) return true } // match: (LE (CMPconst [0] l:(ADDshiftRA x y [c])) yes no) @@ -21025,12 +19789,10 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMLE) v0 := b.NewValue0(v_0.Pos, OpARMCMNshiftRA, types.TypeFlags) v0.AuxInt = c - v0.AddArg(x) - v0.AddArg(y) - b.AddControl(v0) + v0.AddArg2(x, y) + b.resetWithControl(BlockARMLE, v0) return true } // match: (LE (CMPconst [0] l:(ADDshiftLLreg x y z)) yes no) @@ -21051,12 +19813,9 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMLE) v0 := b.NewValue0(v_0.Pos, OpARMCMNshiftLLreg, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - v0.AddArg(z) - b.AddControl(v0) + v0.AddArg3(x, y, z) + b.resetWithControl(BlockARMLE, v0) return true } // match: (LE (CMPconst [0] l:(ADDshiftRLreg x y z)) yes no) @@ -21077,12 +19836,9 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMLE) v0 := b.NewValue0(v_0.Pos, OpARMCMNshiftRLreg, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - v0.AddArg(z) - b.AddControl(v0) + v0.AddArg3(x, y, z) + b.resetWithControl(BlockARMLE, v0) return true } // match: (LE (CMPconst [0] l:(ADDshiftRAreg x y z)) yes no) @@ -21103,12 +19859,9 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMLE) v0 := b.NewValue0(v_0.Pos, OpARMCMNshiftRAreg, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - v0.AddArg(z) - b.AddControl(v0) + v0.AddArg3(x, y, z) + b.resetWithControl(BlockARMLE, v0) return true } // match: (LE (CMPconst [0] l:(AND x y)) yes no) @@ -21132,11 +19885,9 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { continue } - b.Reset(BlockARMLE) v0 := b.NewValue0(v_0.Pos, OpARMTST, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - b.AddControl(v0) + v0.AddArg2(x, y) + b.resetWithControl(BlockARMLE, v0) return true } break @@ -21158,11 +19909,10 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMLE) v0 := b.NewValue0(v_0.Pos, OpARMTSTconst, types.TypeFlags) v0.AuxInt = c v0.AddArg(x) - b.AddControl(v0) + b.resetWithControl(BlockARMLE, v0) return true } // match: (LE (CMPconst [0] l:(ANDshiftLL x y [c])) yes no) @@ -21183,12 +19933,10 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMLE) v0 := b.NewValue0(v_0.Pos, OpARMTSTshiftLL, types.TypeFlags) v0.AuxInt = c - v0.AddArg(x) - v0.AddArg(y) - b.AddControl(v0) + v0.AddArg2(x, y) + b.resetWithControl(BlockARMLE, v0) return true } // match: (LE (CMPconst [0] l:(ANDshiftRL x y [c])) yes no) @@ -21209,12 +19957,10 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMLE) v0 := b.NewValue0(v_0.Pos, OpARMTSTshiftRL, types.TypeFlags) v0.AuxInt = c - v0.AddArg(x) - v0.AddArg(y) - b.AddControl(v0) + v0.AddArg2(x, y) + b.resetWithControl(BlockARMLE, v0) return true } // match: (LE (CMPconst [0] l:(ANDshiftRA x y [c])) yes no) @@ -21235,12 +19981,10 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMLE) v0 := b.NewValue0(v_0.Pos, OpARMTSTshiftRA, types.TypeFlags) v0.AuxInt = c - v0.AddArg(x) - v0.AddArg(y) - b.AddControl(v0) + v0.AddArg2(x, y) + b.resetWithControl(BlockARMLE, v0) return true } // match: (LE (CMPconst [0] l:(ANDshiftLLreg x y z)) yes no) @@ -21261,12 +20005,9 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMLE) v0 := b.NewValue0(v_0.Pos, OpARMTSTshiftLLreg, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - v0.AddArg(z) - b.AddControl(v0) + v0.AddArg3(x, y, z) + b.resetWithControl(BlockARMLE, v0) return true } // match: (LE (CMPconst [0] l:(ANDshiftRLreg x y z)) yes no) @@ -21287,12 +20028,9 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMLE) v0 := b.NewValue0(v_0.Pos, OpARMTSTshiftRLreg, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - v0.AddArg(z) - b.AddControl(v0) + v0.AddArg3(x, y, z) + b.resetWithControl(BlockARMLE, v0) return true } // match: (LE (CMPconst [0] l:(ANDshiftRAreg x y z)) yes no) @@ -21313,12 +20051,9 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMLE) v0 := b.NewValue0(v_0.Pos, OpARMTSTshiftRAreg, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - v0.AddArg(z) - b.AddControl(v0) + v0.AddArg3(x, y, z) + b.resetWithControl(BlockARMLE, v0) return true } // match: (LE (CMPconst [0] l:(XOR x y)) yes no) @@ -21342,11 +20077,9 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { continue } - b.Reset(BlockARMLE) v0 := b.NewValue0(v_0.Pos, OpARMTEQ, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - b.AddControl(v0) + v0.AddArg2(x, y) + b.resetWithControl(BlockARMLE, v0) return true } break @@ -21368,11 +20101,10 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMLE) v0 := b.NewValue0(v_0.Pos, OpARMTEQconst, types.TypeFlags) v0.AuxInt = c v0.AddArg(x) - b.AddControl(v0) + b.resetWithControl(BlockARMLE, v0) return true } // match: (LE (CMPconst [0] l:(XORshiftLL x y [c])) yes no) @@ -21393,12 +20125,10 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMLE) v0 := b.NewValue0(v_0.Pos, OpARMTEQshiftLL, types.TypeFlags) v0.AuxInt = c - v0.AddArg(x) - v0.AddArg(y) - b.AddControl(v0) + v0.AddArg2(x, y) + b.resetWithControl(BlockARMLE, v0) return true } // match: (LE (CMPconst [0] l:(XORshiftRL x y [c])) yes no) @@ -21419,12 +20149,10 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMLE) v0 := b.NewValue0(v_0.Pos, OpARMTEQshiftRL, types.TypeFlags) v0.AuxInt = c - v0.AddArg(x) - v0.AddArg(y) - b.AddControl(v0) + v0.AddArg2(x, y) + b.resetWithControl(BlockARMLE, v0) return true } // match: (LE (CMPconst [0] l:(XORshiftRA x y [c])) yes no) @@ -21445,12 +20173,10 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMLE) v0 := b.NewValue0(v_0.Pos, OpARMTEQshiftRA, types.TypeFlags) v0.AuxInt = c - v0.AddArg(x) - v0.AddArg(y) - b.AddControl(v0) + v0.AddArg2(x, y) + b.resetWithControl(BlockARMLE, v0) return true } // match: (LE (CMPconst [0] l:(XORshiftLLreg x y z)) yes no) @@ -21471,12 +20197,9 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMLE) v0 := b.NewValue0(v_0.Pos, OpARMTEQshiftLLreg, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - v0.AddArg(z) - b.AddControl(v0) + v0.AddArg3(x, y, z) + b.resetWithControl(BlockARMLE, v0) return true } // match: (LE (CMPconst [0] l:(XORshiftRLreg x y z)) yes no) @@ -21497,12 +20220,9 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMLE) v0 := b.NewValue0(v_0.Pos, OpARMTEQshiftRLreg, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - v0.AddArg(z) - b.AddControl(v0) + v0.AddArg3(x, y, z) + b.resetWithControl(BlockARMLE, v0) return true } // match: (LE (CMPconst [0] l:(XORshiftRAreg x y z)) yes no) @@ -21523,12 +20243,9 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMLE) v0 := b.NewValue0(v_0.Pos, OpARMTEQshiftRAreg, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - v0.AddArg(z) - b.AddControl(v0) + v0.AddArg3(x, y, z) + b.resetWithControl(BlockARMLE, v0) return true } case BlockARMLT: @@ -21570,8 +20287,7 @@ func rewriteBlockARM(b *Block) bool { for b.Controls[0].Op == OpARMInvertFlags { v_0 := b.Controls[0] cmp := v_0.Args[0] - b.Reset(BlockARMGT) - b.AddControl(cmp) + b.resetWithControl(BlockARMGT, cmp) return true } // match: (LT (CMPconst [0] l:(SUB x y)) yes no) @@ -21591,11 +20307,9 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMLT) v0 := b.NewValue0(v_0.Pos, OpARMCMP, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - b.AddControl(v0) + v0.AddArg2(x, y) + b.resetWithControl(BlockARMLT, v0) return true } // match: (LT (CMPconst [0] l:(MULS x y a)) yes no) @@ -21616,14 +20330,11 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMLT) v0 := b.NewValue0(v_0.Pos, OpARMCMP, types.TypeFlags) - v0.AddArg(a) v1 := b.NewValue0(v_0.Pos, OpARMMUL, x.Type) - v1.AddArg(x) - v1.AddArg(y) - v0.AddArg(v1) - b.AddControl(v0) + v1.AddArg2(x, y) + v0.AddArg2(a, v1) + b.resetWithControl(BlockARMLT, v0) return true } // match: (LT (CMPconst [0] l:(SUBconst [c] x)) yes no) @@ -21643,11 +20354,10 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMLT) v0 := b.NewValue0(v_0.Pos, OpARMCMPconst, types.TypeFlags) v0.AuxInt = c v0.AddArg(x) - b.AddControl(v0) + b.resetWithControl(BlockARMLT, v0) return true } // match: (LT (CMPconst [0] l:(SUBshiftLL x y [c])) yes no) @@ -21668,12 +20378,10 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMLT) v0 := b.NewValue0(v_0.Pos, OpARMCMPshiftLL, types.TypeFlags) v0.AuxInt = c - v0.AddArg(x) - v0.AddArg(y) - b.AddControl(v0) + v0.AddArg2(x, y) + b.resetWithControl(BlockARMLT, v0) return true } // match: (LT (CMPconst [0] l:(SUBshiftRL x y [c])) yes no) @@ -21694,12 +20402,10 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMLT) v0 := b.NewValue0(v_0.Pos, OpARMCMPshiftRL, types.TypeFlags) v0.AuxInt = c - v0.AddArg(x) - v0.AddArg(y) - b.AddControl(v0) + v0.AddArg2(x, y) + b.resetWithControl(BlockARMLT, v0) return true } // match: (LT (CMPconst [0] l:(SUBshiftRA x y [c])) yes no) @@ -21720,12 +20426,10 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMLT) v0 := b.NewValue0(v_0.Pos, OpARMCMPshiftRA, types.TypeFlags) v0.AuxInt = c - v0.AddArg(x) - v0.AddArg(y) - b.AddControl(v0) + v0.AddArg2(x, y) + b.resetWithControl(BlockARMLT, v0) return true } // match: (LT (CMPconst [0] l:(SUBshiftLLreg x y z)) yes no) @@ -21746,12 +20450,9 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMLT) v0 := b.NewValue0(v_0.Pos, OpARMCMPshiftLLreg, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - v0.AddArg(z) - b.AddControl(v0) + v0.AddArg3(x, y, z) + b.resetWithControl(BlockARMLT, v0) return true } // match: (LT (CMPconst [0] l:(SUBshiftRLreg x y z)) yes no) @@ -21772,12 +20473,9 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMLT) v0 := b.NewValue0(v_0.Pos, OpARMCMPshiftRLreg, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - v0.AddArg(z) - b.AddControl(v0) + v0.AddArg3(x, y, z) + b.resetWithControl(BlockARMLT, v0) return true } // match: (LT (CMPconst [0] l:(SUBshiftRAreg x y z)) yes no) @@ -21798,12 +20496,9 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMLT) v0 := b.NewValue0(v_0.Pos, OpARMCMPshiftRAreg, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - v0.AddArg(z) - b.AddControl(v0) + v0.AddArg3(x, y, z) + b.resetWithControl(BlockARMLT, v0) return true } // match: (LT (CMPconst [0] l:(ADD x y)) yes no) @@ -21827,11 +20522,9 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { continue } - b.Reset(BlockARMLT) v0 := b.NewValue0(v_0.Pos, OpARMCMN, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - b.AddControl(v0) + v0.AddArg2(x, y) + b.resetWithControl(BlockARMLT, v0) return true } break @@ -21854,14 +20547,11 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMLT) v0 := b.NewValue0(v_0.Pos, OpARMCMN, types.TypeFlags) - v0.AddArg(a) v1 := b.NewValue0(v_0.Pos, OpARMMUL, x.Type) - v1.AddArg(x) - v1.AddArg(y) - v0.AddArg(v1) - b.AddControl(v0) + v1.AddArg2(x, y) + v0.AddArg2(a, v1) + b.resetWithControl(BlockARMLT, v0) return true } // match: (LT (CMPconst [0] l:(ADDconst [c] x)) yes no) @@ -21881,11 +20571,10 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMLT) v0 := b.NewValue0(v_0.Pos, OpARMCMNconst, types.TypeFlags) v0.AuxInt = c v0.AddArg(x) - b.AddControl(v0) + b.resetWithControl(BlockARMLT, v0) return true } // match: (LT (CMPconst [0] l:(ADDshiftLL x y [c])) yes no) @@ -21906,12 +20595,10 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMLT) v0 := b.NewValue0(v_0.Pos, OpARMCMNshiftLL, types.TypeFlags) v0.AuxInt = c - v0.AddArg(x) - v0.AddArg(y) - b.AddControl(v0) + v0.AddArg2(x, y) + b.resetWithControl(BlockARMLT, v0) return true } // match: (LT (CMPconst [0] l:(ADDshiftRL x y [c])) yes no) @@ -21932,12 +20619,10 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMLT) v0 := b.NewValue0(v_0.Pos, OpARMCMNshiftRL, types.TypeFlags) v0.AuxInt = c - v0.AddArg(x) - v0.AddArg(y) - b.AddControl(v0) + v0.AddArg2(x, y) + b.resetWithControl(BlockARMLT, v0) return true } // match: (LT (CMPconst [0] l:(ADDshiftRA x y [c])) yes no) @@ -21958,12 +20643,10 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMLT) v0 := b.NewValue0(v_0.Pos, OpARMCMNshiftRA, types.TypeFlags) v0.AuxInt = c - v0.AddArg(x) - v0.AddArg(y) - b.AddControl(v0) + v0.AddArg2(x, y) + b.resetWithControl(BlockARMLT, v0) return true } // match: (LT (CMPconst [0] l:(ADDshiftLLreg x y z)) yes no) @@ -21984,12 +20667,9 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMLT) v0 := b.NewValue0(v_0.Pos, OpARMCMNshiftLLreg, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - v0.AddArg(z) - b.AddControl(v0) + v0.AddArg3(x, y, z) + b.resetWithControl(BlockARMLT, v0) return true } // match: (LT (CMPconst [0] l:(ADDshiftRLreg x y z)) yes no) @@ -22010,12 +20690,9 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMLT) v0 := b.NewValue0(v_0.Pos, OpARMCMNshiftRLreg, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - v0.AddArg(z) - b.AddControl(v0) + v0.AddArg3(x, y, z) + b.resetWithControl(BlockARMLT, v0) return true } // match: (LT (CMPconst [0] l:(ADDshiftRAreg x y z)) yes no) @@ -22036,12 +20713,9 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMLT) v0 := b.NewValue0(v_0.Pos, OpARMCMNshiftRAreg, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - v0.AddArg(z) - b.AddControl(v0) + v0.AddArg3(x, y, z) + b.resetWithControl(BlockARMLT, v0) return true } // match: (LT (CMPconst [0] l:(AND x y)) yes no) @@ -22065,11 +20739,9 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { continue } - b.Reset(BlockARMLT) v0 := b.NewValue0(v_0.Pos, OpARMTST, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - b.AddControl(v0) + v0.AddArg2(x, y) + b.resetWithControl(BlockARMLT, v0) return true } break @@ -22091,11 +20763,10 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMLT) v0 := b.NewValue0(v_0.Pos, OpARMTSTconst, types.TypeFlags) v0.AuxInt = c v0.AddArg(x) - b.AddControl(v0) + b.resetWithControl(BlockARMLT, v0) return true } // match: (LT (CMPconst [0] l:(ANDshiftLL x y [c])) yes no) @@ -22116,12 +20787,10 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMLT) v0 := b.NewValue0(v_0.Pos, OpARMTSTshiftLL, types.TypeFlags) v0.AuxInt = c - v0.AddArg(x) - v0.AddArg(y) - b.AddControl(v0) + v0.AddArg2(x, y) + b.resetWithControl(BlockARMLT, v0) return true } // match: (LT (CMPconst [0] l:(ANDshiftRL x y [c])) yes no) @@ -22142,12 +20811,10 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMLT) v0 := b.NewValue0(v_0.Pos, OpARMTSTshiftRL, types.TypeFlags) v0.AuxInt = c - v0.AddArg(x) - v0.AddArg(y) - b.AddControl(v0) + v0.AddArg2(x, y) + b.resetWithControl(BlockARMLT, v0) return true } // match: (LT (CMPconst [0] l:(ANDshiftRA x y [c])) yes no) @@ -22168,12 +20835,10 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMLT) v0 := b.NewValue0(v_0.Pos, OpARMTSTshiftRA, types.TypeFlags) v0.AuxInt = c - v0.AddArg(x) - v0.AddArg(y) - b.AddControl(v0) + v0.AddArg2(x, y) + b.resetWithControl(BlockARMLT, v0) return true } // match: (LT (CMPconst [0] l:(ANDshiftLLreg x y z)) yes no) @@ -22194,12 +20859,9 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMLT) v0 := b.NewValue0(v_0.Pos, OpARMTSTshiftLLreg, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - v0.AddArg(z) - b.AddControl(v0) + v0.AddArg3(x, y, z) + b.resetWithControl(BlockARMLT, v0) return true } // match: (LT (CMPconst [0] l:(ANDshiftRLreg x y z)) yes no) @@ -22220,12 +20882,9 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMLT) v0 := b.NewValue0(v_0.Pos, OpARMTSTshiftRLreg, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - v0.AddArg(z) - b.AddControl(v0) + v0.AddArg3(x, y, z) + b.resetWithControl(BlockARMLT, v0) return true } // match: (LT (CMPconst [0] l:(ANDshiftRAreg x y z)) yes no) @@ -22246,12 +20905,9 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMLT) v0 := b.NewValue0(v_0.Pos, OpARMTSTshiftRAreg, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - v0.AddArg(z) - b.AddControl(v0) + v0.AddArg3(x, y, z) + b.resetWithControl(BlockARMLT, v0) return true } // match: (LT (CMPconst [0] l:(XOR x y)) yes no) @@ -22275,11 +20931,9 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { continue } - b.Reset(BlockARMLT) v0 := b.NewValue0(v_0.Pos, OpARMTEQ, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - b.AddControl(v0) + v0.AddArg2(x, y) + b.resetWithControl(BlockARMLT, v0) return true } break @@ -22301,11 +20955,10 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMLT) v0 := b.NewValue0(v_0.Pos, OpARMTEQconst, types.TypeFlags) v0.AuxInt = c v0.AddArg(x) - b.AddControl(v0) + b.resetWithControl(BlockARMLT, v0) return true } // match: (LT (CMPconst [0] l:(XORshiftLL x y [c])) yes no) @@ -22326,12 +20979,10 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMLT) v0 := b.NewValue0(v_0.Pos, OpARMTEQshiftLL, types.TypeFlags) v0.AuxInt = c - v0.AddArg(x) - v0.AddArg(y) - b.AddControl(v0) + v0.AddArg2(x, y) + b.resetWithControl(BlockARMLT, v0) return true } // match: (LT (CMPconst [0] l:(XORshiftRL x y [c])) yes no) @@ -22352,12 +21003,10 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMLT) v0 := b.NewValue0(v_0.Pos, OpARMTEQshiftRL, types.TypeFlags) v0.AuxInt = c - v0.AddArg(x) - v0.AddArg(y) - b.AddControl(v0) + v0.AddArg2(x, y) + b.resetWithControl(BlockARMLT, v0) return true } // match: (LT (CMPconst [0] l:(XORshiftRA x y [c])) yes no) @@ -22378,12 +21027,10 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMLT) v0 := b.NewValue0(v_0.Pos, OpARMTEQshiftRA, types.TypeFlags) v0.AuxInt = c - v0.AddArg(x) - v0.AddArg(y) - b.AddControl(v0) + v0.AddArg2(x, y) + b.resetWithControl(BlockARMLT, v0) return true } // match: (LT (CMPconst [0] l:(XORshiftLLreg x y z)) yes no) @@ -22404,12 +21051,9 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMLT) v0 := b.NewValue0(v_0.Pos, OpARMTEQshiftLLreg, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - v0.AddArg(z) - b.AddControl(v0) + v0.AddArg3(x, y, z) + b.resetWithControl(BlockARMLT, v0) return true } // match: (LT (CMPconst [0] l:(XORshiftRLreg x y z)) yes no) @@ -22430,12 +21074,9 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMLT) v0 := b.NewValue0(v_0.Pos, OpARMTEQshiftRLreg, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - v0.AddArg(z) - b.AddControl(v0) + v0.AddArg3(x, y, z) + b.resetWithControl(BlockARMLT, v0) return true } // match: (LT (CMPconst [0] l:(XORshiftRAreg x y z)) yes no) @@ -22456,12 +21097,9 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMLT) v0 := b.NewValue0(v_0.Pos, OpARMTEQshiftRAreg, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - v0.AddArg(z) - b.AddControl(v0) + v0.AddArg3(x, y, z) + b.resetWithControl(BlockARMLT, v0) return true } case BlockARMNE: @@ -22477,8 +21115,7 @@ func rewriteBlockARM(b *Block) bool { break } cc := v_0_0.Args[0] - b.Reset(BlockARMEQ) - b.AddControl(cc) + b.resetWithControl(BlockARMEQ, cc) return true } // match: (NE (CMPconst [0] (NotEqual cc)) yes no) @@ -22493,8 +21130,7 @@ func rewriteBlockARM(b *Block) bool { break } cc := v_0_0.Args[0] - b.Reset(BlockARMNE) - b.AddControl(cc) + b.resetWithControl(BlockARMNE, cc) return true } // match: (NE (CMPconst [0] (LessThan cc)) yes no) @@ -22509,8 +21145,7 @@ func rewriteBlockARM(b *Block) bool { break } cc := v_0_0.Args[0] - b.Reset(BlockARMLT) - b.AddControl(cc) + b.resetWithControl(BlockARMLT, cc) return true } // match: (NE (CMPconst [0] (LessThanU cc)) yes no) @@ -22525,8 +21160,7 @@ func rewriteBlockARM(b *Block) bool { break } cc := v_0_0.Args[0] - b.Reset(BlockARMULT) - b.AddControl(cc) + b.resetWithControl(BlockARMULT, cc) return true } // match: (NE (CMPconst [0] (LessEqual cc)) yes no) @@ -22541,8 +21175,7 @@ func rewriteBlockARM(b *Block) bool { break } cc := v_0_0.Args[0] - b.Reset(BlockARMLE) - b.AddControl(cc) + b.resetWithControl(BlockARMLE, cc) return true } // match: (NE (CMPconst [0] (LessEqualU cc)) yes no) @@ -22557,8 +21190,7 @@ func rewriteBlockARM(b *Block) bool { break } cc := v_0_0.Args[0] - b.Reset(BlockARMULE) - b.AddControl(cc) + b.resetWithControl(BlockARMULE, cc) return true } // match: (NE (CMPconst [0] (GreaterThan cc)) yes no) @@ -22573,8 +21205,7 @@ func rewriteBlockARM(b *Block) bool { break } cc := v_0_0.Args[0] - b.Reset(BlockARMGT) - b.AddControl(cc) + b.resetWithControl(BlockARMGT, cc) return true } // match: (NE (CMPconst [0] (GreaterThanU cc)) yes no) @@ -22589,8 +21220,7 @@ func rewriteBlockARM(b *Block) bool { break } cc := v_0_0.Args[0] - b.Reset(BlockARMUGT) - b.AddControl(cc) + b.resetWithControl(BlockARMUGT, cc) return true } // match: (NE (CMPconst [0] (GreaterEqual cc)) yes no) @@ -22605,8 +21235,7 @@ func rewriteBlockARM(b *Block) bool { break } cc := v_0_0.Args[0] - b.Reset(BlockARMGE) - b.AddControl(cc) + b.resetWithControl(BlockARMGE, cc) return true } // match: (NE (CMPconst [0] (GreaterEqualU cc)) yes no) @@ -22621,8 +21250,7 @@ func rewriteBlockARM(b *Block) bool { break } cc := v_0_0.Args[0] - b.Reset(BlockARMUGE) - b.AddControl(cc) + b.resetWithControl(BlockARMUGE, cc) return true } // match: (NE (FlagEQ) yes no) @@ -22661,8 +21289,7 @@ func rewriteBlockARM(b *Block) bool { for b.Controls[0].Op == OpARMInvertFlags { v_0 := b.Controls[0] cmp := v_0.Args[0] - b.Reset(BlockARMNE) - b.AddControl(cmp) + b.resetWithControl(BlockARMNE, cmp) return true } // match: (NE (CMPconst [0] l:(SUB x y)) yes no) @@ -22682,11 +21309,9 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMNE) v0 := b.NewValue0(v_0.Pos, OpARMCMP, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - b.AddControl(v0) + v0.AddArg2(x, y) + b.resetWithControl(BlockARMNE, v0) return true } // match: (NE (CMPconst [0] l:(MULS x y a)) yes no) @@ -22707,14 +21332,11 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMNE) v0 := b.NewValue0(v_0.Pos, OpARMCMP, types.TypeFlags) - v0.AddArg(a) v1 := b.NewValue0(v_0.Pos, OpARMMUL, x.Type) - v1.AddArg(x) - v1.AddArg(y) - v0.AddArg(v1) - b.AddControl(v0) + v1.AddArg2(x, y) + v0.AddArg2(a, v1) + b.resetWithControl(BlockARMNE, v0) return true } // match: (NE (CMPconst [0] l:(SUBconst [c] x)) yes no) @@ -22734,11 +21356,10 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMNE) v0 := b.NewValue0(v_0.Pos, OpARMCMPconst, types.TypeFlags) v0.AuxInt = c v0.AddArg(x) - b.AddControl(v0) + b.resetWithControl(BlockARMNE, v0) return true } // match: (NE (CMPconst [0] l:(SUBshiftLL x y [c])) yes no) @@ -22759,12 +21380,10 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMNE) v0 := b.NewValue0(v_0.Pos, OpARMCMPshiftLL, types.TypeFlags) v0.AuxInt = c - v0.AddArg(x) - v0.AddArg(y) - b.AddControl(v0) + v0.AddArg2(x, y) + b.resetWithControl(BlockARMNE, v0) return true } // match: (NE (CMPconst [0] l:(SUBshiftRL x y [c])) yes no) @@ -22785,12 +21404,10 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMNE) v0 := b.NewValue0(v_0.Pos, OpARMCMPshiftRL, types.TypeFlags) v0.AuxInt = c - v0.AddArg(x) - v0.AddArg(y) - b.AddControl(v0) + v0.AddArg2(x, y) + b.resetWithControl(BlockARMNE, v0) return true } // match: (NE (CMPconst [0] l:(SUBshiftRA x y [c])) yes no) @@ -22811,12 +21428,10 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMNE) v0 := b.NewValue0(v_0.Pos, OpARMCMPshiftRA, types.TypeFlags) v0.AuxInt = c - v0.AddArg(x) - v0.AddArg(y) - b.AddControl(v0) + v0.AddArg2(x, y) + b.resetWithControl(BlockARMNE, v0) return true } // match: (NE (CMPconst [0] l:(SUBshiftLLreg x y z)) yes no) @@ -22837,12 +21452,9 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMNE) v0 := b.NewValue0(v_0.Pos, OpARMCMPshiftLLreg, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - v0.AddArg(z) - b.AddControl(v0) + v0.AddArg3(x, y, z) + b.resetWithControl(BlockARMNE, v0) return true } // match: (NE (CMPconst [0] l:(SUBshiftRLreg x y z)) yes no) @@ -22863,12 +21475,9 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMNE) v0 := b.NewValue0(v_0.Pos, OpARMCMPshiftRLreg, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - v0.AddArg(z) - b.AddControl(v0) + v0.AddArg3(x, y, z) + b.resetWithControl(BlockARMNE, v0) return true } // match: (NE (CMPconst [0] l:(SUBshiftRAreg x y z)) yes no) @@ -22889,12 +21498,9 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMNE) v0 := b.NewValue0(v_0.Pos, OpARMCMPshiftRAreg, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - v0.AddArg(z) - b.AddControl(v0) + v0.AddArg3(x, y, z) + b.resetWithControl(BlockARMNE, v0) return true } // match: (NE (CMPconst [0] l:(ADD x y)) yes no) @@ -22918,11 +21524,9 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { continue } - b.Reset(BlockARMNE) v0 := b.NewValue0(v_0.Pos, OpARMCMN, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - b.AddControl(v0) + v0.AddArg2(x, y) + b.resetWithControl(BlockARMNE, v0) return true } break @@ -22945,14 +21549,11 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMNE) v0 := b.NewValue0(v_0.Pos, OpARMCMN, types.TypeFlags) - v0.AddArg(a) v1 := b.NewValue0(v_0.Pos, OpARMMUL, x.Type) - v1.AddArg(x) - v1.AddArg(y) - v0.AddArg(v1) - b.AddControl(v0) + v1.AddArg2(x, y) + v0.AddArg2(a, v1) + b.resetWithControl(BlockARMNE, v0) return true } // match: (NE (CMPconst [0] l:(ADDconst [c] x)) yes no) @@ -22972,11 +21573,10 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMNE) v0 := b.NewValue0(v_0.Pos, OpARMCMNconst, types.TypeFlags) v0.AuxInt = c v0.AddArg(x) - b.AddControl(v0) + b.resetWithControl(BlockARMNE, v0) return true } // match: (NE (CMPconst [0] l:(ADDshiftLL x y [c])) yes no) @@ -22997,12 +21597,10 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMNE) v0 := b.NewValue0(v_0.Pos, OpARMCMNshiftLL, types.TypeFlags) v0.AuxInt = c - v0.AddArg(x) - v0.AddArg(y) - b.AddControl(v0) + v0.AddArg2(x, y) + b.resetWithControl(BlockARMNE, v0) return true } // match: (NE (CMPconst [0] l:(ADDshiftRL x y [c])) yes no) @@ -23023,12 +21621,10 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMNE) v0 := b.NewValue0(v_0.Pos, OpARMCMNshiftRL, types.TypeFlags) v0.AuxInt = c - v0.AddArg(x) - v0.AddArg(y) - b.AddControl(v0) + v0.AddArg2(x, y) + b.resetWithControl(BlockARMNE, v0) return true } // match: (NE (CMPconst [0] l:(ADDshiftRA x y [c])) yes no) @@ -23049,12 +21645,10 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMNE) v0 := b.NewValue0(v_0.Pos, OpARMCMNshiftRA, types.TypeFlags) v0.AuxInt = c - v0.AddArg(x) - v0.AddArg(y) - b.AddControl(v0) + v0.AddArg2(x, y) + b.resetWithControl(BlockARMNE, v0) return true } // match: (NE (CMPconst [0] l:(ADDshiftLLreg x y z)) yes no) @@ -23075,12 +21669,9 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMNE) v0 := b.NewValue0(v_0.Pos, OpARMCMNshiftLLreg, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - v0.AddArg(z) - b.AddControl(v0) + v0.AddArg3(x, y, z) + b.resetWithControl(BlockARMNE, v0) return true } // match: (NE (CMPconst [0] l:(ADDshiftRLreg x y z)) yes no) @@ -23101,12 +21692,9 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMNE) v0 := b.NewValue0(v_0.Pos, OpARMCMNshiftRLreg, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - v0.AddArg(z) - b.AddControl(v0) + v0.AddArg3(x, y, z) + b.resetWithControl(BlockARMNE, v0) return true } // match: (NE (CMPconst [0] l:(ADDshiftRAreg x y z)) yes no) @@ -23127,12 +21715,9 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMNE) v0 := b.NewValue0(v_0.Pos, OpARMCMNshiftRAreg, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - v0.AddArg(z) - b.AddControl(v0) + v0.AddArg3(x, y, z) + b.resetWithControl(BlockARMNE, v0) return true } // match: (NE (CMPconst [0] l:(AND x y)) yes no) @@ -23156,11 +21741,9 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { continue } - b.Reset(BlockARMNE) v0 := b.NewValue0(v_0.Pos, OpARMTST, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - b.AddControl(v0) + v0.AddArg2(x, y) + b.resetWithControl(BlockARMNE, v0) return true } break @@ -23182,11 +21765,10 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMNE) v0 := b.NewValue0(v_0.Pos, OpARMTSTconst, types.TypeFlags) v0.AuxInt = c v0.AddArg(x) - b.AddControl(v0) + b.resetWithControl(BlockARMNE, v0) return true } // match: (NE (CMPconst [0] l:(ANDshiftLL x y [c])) yes no) @@ -23207,12 +21789,10 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMNE) v0 := b.NewValue0(v_0.Pos, OpARMTSTshiftLL, types.TypeFlags) v0.AuxInt = c - v0.AddArg(x) - v0.AddArg(y) - b.AddControl(v0) + v0.AddArg2(x, y) + b.resetWithControl(BlockARMNE, v0) return true } // match: (NE (CMPconst [0] l:(ANDshiftRL x y [c])) yes no) @@ -23233,12 +21813,10 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMNE) v0 := b.NewValue0(v_0.Pos, OpARMTSTshiftRL, types.TypeFlags) v0.AuxInt = c - v0.AddArg(x) - v0.AddArg(y) - b.AddControl(v0) + v0.AddArg2(x, y) + b.resetWithControl(BlockARMNE, v0) return true } // match: (NE (CMPconst [0] l:(ANDshiftRA x y [c])) yes no) @@ -23259,12 +21837,10 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMNE) v0 := b.NewValue0(v_0.Pos, OpARMTSTshiftRA, types.TypeFlags) v0.AuxInt = c - v0.AddArg(x) - v0.AddArg(y) - b.AddControl(v0) + v0.AddArg2(x, y) + b.resetWithControl(BlockARMNE, v0) return true } // match: (NE (CMPconst [0] l:(ANDshiftLLreg x y z)) yes no) @@ -23285,12 +21861,9 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMNE) v0 := b.NewValue0(v_0.Pos, OpARMTSTshiftLLreg, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - v0.AddArg(z) - b.AddControl(v0) + v0.AddArg3(x, y, z) + b.resetWithControl(BlockARMNE, v0) return true } // match: (NE (CMPconst [0] l:(ANDshiftRLreg x y z)) yes no) @@ -23311,12 +21884,9 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMNE) v0 := b.NewValue0(v_0.Pos, OpARMTSTshiftRLreg, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - v0.AddArg(z) - b.AddControl(v0) + v0.AddArg3(x, y, z) + b.resetWithControl(BlockARMNE, v0) return true } // match: (NE (CMPconst [0] l:(ANDshiftRAreg x y z)) yes no) @@ -23337,12 +21907,9 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMNE) v0 := b.NewValue0(v_0.Pos, OpARMTSTshiftRAreg, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - v0.AddArg(z) - b.AddControl(v0) + v0.AddArg3(x, y, z) + b.resetWithControl(BlockARMNE, v0) return true } // match: (NE (CMPconst [0] l:(XOR x y)) yes no) @@ -23366,11 +21933,9 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { continue } - b.Reset(BlockARMNE) v0 := b.NewValue0(v_0.Pos, OpARMTEQ, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - b.AddControl(v0) + v0.AddArg2(x, y) + b.resetWithControl(BlockARMNE, v0) return true } break @@ -23392,11 +21957,10 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMNE) v0 := b.NewValue0(v_0.Pos, OpARMTEQconst, types.TypeFlags) v0.AuxInt = c v0.AddArg(x) - b.AddControl(v0) + b.resetWithControl(BlockARMNE, v0) return true } // match: (NE (CMPconst [0] l:(XORshiftLL x y [c])) yes no) @@ -23417,12 +21981,10 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMNE) v0 := b.NewValue0(v_0.Pos, OpARMTEQshiftLL, types.TypeFlags) v0.AuxInt = c - v0.AddArg(x) - v0.AddArg(y) - b.AddControl(v0) + v0.AddArg2(x, y) + b.resetWithControl(BlockARMNE, v0) return true } // match: (NE (CMPconst [0] l:(XORshiftRL x y [c])) yes no) @@ -23443,12 +22005,10 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMNE) v0 := b.NewValue0(v_0.Pos, OpARMTEQshiftRL, types.TypeFlags) v0.AuxInt = c - v0.AddArg(x) - v0.AddArg(y) - b.AddControl(v0) + v0.AddArg2(x, y) + b.resetWithControl(BlockARMNE, v0) return true } // match: (NE (CMPconst [0] l:(XORshiftRA x y [c])) yes no) @@ -23469,12 +22029,10 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMNE) v0 := b.NewValue0(v_0.Pos, OpARMTEQshiftRA, types.TypeFlags) v0.AuxInt = c - v0.AddArg(x) - v0.AddArg(y) - b.AddControl(v0) + v0.AddArg2(x, y) + b.resetWithControl(BlockARMNE, v0) return true } // match: (NE (CMPconst [0] l:(XORshiftLLreg x y z)) yes no) @@ -23495,12 +22053,9 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMNE) v0 := b.NewValue0(v_0.Pos, OpARMTEQshiftLLreg, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - v0.AddArg(z) - b.AddControl(v0) + v0.AddArg3(x, y, z) + b.resetWithControl(BlockARMNE, v0) return true } // match: (NE (CMPconst [0] l:(XORshiftRLreg x y z)) yes no) @@ -23521,12 +22076,9 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMNE) v0 := b.NewValue0(v_0.Pos, OpARMTEQshiftRLreg, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - v0.AddArg(z) - b.AddControl(v0) + v0.AddArg3(x, y, z) + b.resetWithControl(BlockARMNE, v0) return true } // match: (NE (CMPconst [0] l:(XORshiftRAreg x y z)) yes no) @@ -23547,12 +22099,9 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMNE) v0 := b.NewValue0(v_0.Pos, OpARMTEQshiftRAreg, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - v0.AddArg(z) - b.AddControl(v0) + v0.AddArg3(x, y, z) + b.resetWithControl(BlockARMNE, v0) return true } case BlockARMUGE: @@ -23593,8 +22142,7 @@ func rewriteBlockARM(b *Block) bool { for b.Controls[0].Op == OpARMInvertFlags { v_0 := b.Controls[0] cmp := v_0.Args[0] - b.Reset(BlockARMULE) - b.AddControl(cmp) + b.resetWithControl(BlockARMULE, cmp) return true } case BlockARMUGT: @@ -23636,8 +22184,7 @@ func rewriteBlockARM(b *Block) bool { for b.Controls[0].Op == OpARMInvertFlags { v_0 := b.Controls[0] cmp := v_0.Args[0] - b.Reset(BlockARMULT) - b.AddControl(cmp) + b.resetWithControl(BlockARMULT, cmp) return true } case BlockARMULE: @@ -23678,8 +22225,7 @@ func rewriteBlockARM(b *Block) bool { for b.Controls[0].Op == OpARMInvertFlags { v_0 := b.Controls[0] cmp := v_0.Args[0] - b.Reset(BlockARMUGE) - b.AddControl(cmp) + b.resetWithControl(BlockARMUGE, cmp) return true } case BlockARMULT: @@ -23721,8 +22267,7 @@ func rewriteBlockARM(b *Block) bool { for b.Controls[0].Op == OpARMInvertFlags { v_0 := b.Controls[0] cmp := v_0.Args[0] - b.Reset(BlockARMUGT) - b.AddControl(cmp) + b.resetWithControl(BlockARMUGT, cmp) return true } } diff --git a/src/cmd/compile/internal/ssa/rewriteARM64.go b/src/cmd/compile/internal/ssa/rewriteARM64.go index 4bee98e4d3..4d1ed50d9b 100644 --- a/src/cmd/compile/internal/ssa/rewriteARM64.go +++ b/src/cmd/compile/internal/ssa/rewriteARM64.go @@ -604,6 +604,9 @@ func rewriteValueARM64(v *Value) bool { case OpCvt64to64F: v.Op = OpARM64SCVTFD return true + case OpCvtBoolToUint8: + v.Op = OpCopy + return true case OpDiv16: return rewriteValueARM64_OpDiv16(v) case OpDiv16u: @@ -1091,9 +1094,7 @@ func rewriteValueARM64_OpARM64ADCSflags(v *Value) bool { } c := v_2_0_0.Args[0] v.reset(OpARM64ADCSflags) - v.AddArg(x) - v.AddArg(y) - v.AddArg(c) + v.AddArg3(x, y, c) return true } // match: (ADCSflags x y (Select1 (ADDSconstflags [-1] (MOVDconst [0])))) @@ -1113,8 +1114,7 @@ func rewriteValueARM64_OpARM64ADCSflags(v *Value) bool { break } v.reset(OpARM64ADDSflags) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } return false @@ -1156,9 +1156,7 @@ func rewriteValueARM64_OpARM64ADD(v *Value) bool { continue } v.reset(OpARM64MADD) - v.AddArg(a) - v.AddArg(x) - v.AddArg(y) + v.AddArg3(a, x, y) return true } break @@ -1179,9 +1177,7 @@ func rewriteValueARM64_OpARM64ADD(v *Value) bool { continue } v.reset(OpARM64MSUB) - v.AddArg(a) - v.AddArg(x) - v.AddArg(y) + v.AddArg3(a, x, y) return true } break @@ -1202,9 +1198,7 @@ func rewriteValueARM64_OpARM64ADD(v *Value) bool { continue } v.reset(OpARM64MADDW) - v.AddArg(a) - v.AddArg(x) - v.AddArg(y) + v.AddArg3(a, x, y) return true } break @@ -1225,9 +1219,7 @@ func rewriteValueARM64_OpARM64ADD(v *Value) bool { continue } v.reset(OpARM64MSUBW) - v.AddArg(a) - v.AddArg(x) - v.AddArg(y) + v.AddArg3(a, x, y) return true } break @@ -1242,8 +1234,7 @@ func rewriteValueARM64_OpARM64ADD(v *Value) bool { } y := v_1.Args[0] v.reset(OpARM64SUB) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } break @@ -1265,8 +1256,7 @@ func rewriteValueARM64_OpARM64ADD(v *Value) bool { } v.reset(OpARM64ADDshiftLL) v.AuxInt = c - v.AddArg(x0) - v.AddArg(y) + v.AddArg2(x0, y) return true } break @@ -1288,8 +1278,7 @@ func rewriteValueARM64_OpARM64ADD(v *Value) bool { } v.reset(OpARM64ADDshiftRL) v.AuxInt = c - v.AddArg(x0) - v.AddArg(y) + v.AddArg2(x0, y) return true } break @@ -1311,8 +1300,7 @@ func rewriteValueARM64_OpARM64ADD(v *Value) bool { } v.reset(OpARM64ADDshiftRA) v.AuxInt = c - v.AddArg(x0) - v.AddArg(y) + v.AddArg2(x0, y) return true } break @@ -1380,10 +1368,9 @@ func rewriteValueARM64_OpARM64ADD(v *Value) bool { continue } v.reset(OpARM64ROR) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpARM64NEG, t) v0.AddArg(y) - v.AddArg(v0) + v.AddArg2(x, v0) return true } break @@ -1451,8 +1438,7 @@ func rewriteValueARM64_OpARM64ADD(v *Value) bool { continue } v.reset(OpARM64ROR) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } break @@ -1521,10 +1507,9 @@ func rewriteValueARM64_OpARM64ADD(v *Value) bool { continue } v.reset(OpARM64RORW) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpARM64NEG, t) v0.AddArg(y) - v.AddArg(v0) + v.AddArg2(x, v0) return true } break @@ -1596,8 +1581,7 @@ func rewriteValueARM64_OpARM64ADD(v *Value) bool { continue } v.reset(OpARM64RORW) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } break @@ -1629,9 +1613,7 @@ func rewriteValueARM64_OpARM64ADDconst(v *Value) bool { break } x := v_0 - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (ADDconst [c] (MOVDconst [d])) @@ -1772,8 +1754,7 @@ func rewriteValueARM64_OpARM64ADDshiftLL(v *Value) bool { x2 := v_1 v.reset(OpARM64EXTRconst) v.AuxInt = 64 - c - v.AddArg(x2) - v.AddArg(x) + v.AddArg2(x2, x) return true } // match: (ADDshiftLL [c] (UBFX [bfc] x) x2) @@ -1793,8 +1774,7 @@ func rewriteValueARM64_OpARM64ADDshiftLL(v *Value) bool { } v.reset(OpARM64EXTRWconst) v.AuxInt = 32 - c - v.AddArg(x2) - v.AddArg(x) + v.AddArg2(x2, x) return true } return false @@ -1933,9 +1913,7 @@ func rewriteValueARM64_OpARM64AND(v *Value) bool { if x != v_1 { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (AND x (MVN y)) @@ -1948,8 +1926,7 @@ func rewriteValueARM64_OpARM64AND(v *Value) bool { } y := v_1.Args[0] v.reset(OpARM64BIC) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } break @@ -1971,8 +1948,7 @@ func rewriteValueARM64_OpARM64AND(v *Value) bool { } v.reset(OpARM64ANDshiftLL) v.AuxInt = c - v.AddArg(x0) - v.AddArg(y) + v.AddArg2(x0, y) return true } break @@ -1994,8 +1970,7 @@ func rewriteValueARM64_OpARM64AND(v *Value) bool { } v.reset(OpARM64ANDshiftRL) v.AuxInt = c - v.AddArg(x0) - v.AddArg(y) + v.AddArg2(x0, y) return true } break @@ -2017,8 +1992,7 @@ func rewriteValueARM64_OpARM64AND(v *Value) bool { } v.reset(OpARM64ANDshiftRA) v.AuxInt = c - v.AddArg(x0) - v.AddArg(y) + v.AddArg2(x0, y) return true } break @@ -2044,9 +2018,7 @@ func rewriteValueARM64_OpARM64ANDconst(v *Value) bool { break } x := v_0 - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (ANDconst [c] (MOVDconst [d])) @@ -2201,9 +2173,7 @@ func rewriteValueARM64_OpARM64ANDshiftLL(v *Value) bool { if x != y.Args[0] || !(c == d) { break } - v.reset(OpCopy) - v.Type = y.Type - v.AddArg(y) + v.copyOf(y) return true } return false @@ -2257,9 +2227,7 @@ func rewriteValueARM64_OpARM64ANDshiftRA(v *Value) bool { if x != y.Args[0] || !(c == d) { break } - v.reset(OpCopy) - v.Type = y.Type - v.AddArg(y) + v.copyOf(y) return true } return false @@ -2313,9 +2281,7 @@ func rewriteValueARM64_OpARM64ANDshiftRL(v *Value) bool { if x != y.Args[0] || !(c == d) { break } - v.reset(OpCopy) - v.Type = y.Type - v.AddArg(y) + v.copyOf(y) return true } return false @@ -2363,8 +2329,7 @@ func rewriteValueARM64_OpARM64BIC(v *Value) bool { } v.reset(OpARM64BICshiftLL) v.AuxInt = c - v.AddArg(x0) - v.AddArg(y) + v.AddArg2(x0, y) return true } // match: (BIC x0 x1:(SRLconst [c] y)) @@ -2383,8 +2348,7 @@ func rewriteValueARM64_OpARM64BIC(v *Value) bool { } v.reset(OpARM64BICshiftRL) v.AuxInt = c - v.AddArg(x0) - v.AddArg(y) + v.AddArg2(x0, y) return true } // match: (BIC x0 x1:(SRAconst [c] y)) @@ -2403,8 +2367,7 @@ func rewriteValueARM64_OpARM64BIC(v *Value) bool { } v.reset(OpARM64BICshiftRA) v.AuxInt = c - v.AddArg(x0) - v.AddArg(y) + v.AddArg2(x0, y) return true } return false @@ -2553,8 +2516,7 @@ func rewriteValueARM64_OpARM64CMN(v *Value) bool { } v.reset(OpARM64CMNshiftLL) v.AuxInt = c - v.AddArg(x0) - v.AddArg(y) + v.AddArg2(x0, y) return true } break @@ -2576,8 +2538,7 @@ func rewriteValueARM64_OpARM64CMN(v *Value) bool { } v.reset(OpARM64CMNshiftRL) v.AuxInt = c - v.AddArg(x0) - v.AddArg(y) + v.AddArg2(x0, y) return true } break @@ -2599,8 +2560,7 @@ func rewriteValueARM64_OpARM64CMN(v *Value) bool { } v.reset(OpARM64CMNshiftRA) v.AuxInt = c - v.AddArg(x0) - v.AddArg(y) + v.AddArg2(x0, y) return true } break @@ -2940,8 +2900,7 @@ func rewriteValueARM64_OpARM64CMP(v *Value) bool { } v.reset(OpARM64InvertFlags) v0 := b.NewValue0(v.Pos, OpARM64CMP, types.TypeFlags) - v0.AddArg(y) - v0.AddArg(x) + v0.AddArg2(y, x) v.AddArg(v0) return true } @@ -2961,8 +2920,7 @@ func rewriteValueARM64_OpARM64CMP(v *Value) bool { } v.reset(OpARM64CMPshiftLL) v.AuxInt = c - v.AddArg(x0) - v.AddArg(y) + v.AddArg2(x0, y) return true } // match: (CMP x0:(SLLconst [c] y) x1) @@ -2982,8 +2940,7 @@ func rewriteValueARM64_OpARM64CMP(v *Value) bool { v.reset(OpARM64InvertFlags) v0 := b.NewValue0(v.Pos, OpARM64CMPshiftLL, types.TypeFlags) v0.AuxInt = c - v0.AddArg(x1) - v0.AddArg(y) + v0.AddArg2(x1, y) v.AddArg(v0) return true } @@ -3003,8 +2960,7 @@ func rewriteValueARM64_OpARM64CMP(v *Value) bool { } v.reset(OpARM64CMPshiftRL) v.AuxInt = c - v.AddArg(x0) - v.AddArg(y) + v.AddArg2(x0, y) return true } // match: (CMP x0:(SRLconst [c] y) x1) @@ -3024,8 +2980,7 @@ func rewriteValueARM64_OpARM64CMP(v *Value) bool { v.reset(OpARM64InvertFlags) v0 := b.NewValue0(v.Pos, OpARM64CMPshiftRL, types.TypeFlags) v0.AuxInt = c - v0.AddArg(x1) - v0.AddArg(y) + v0.AddArg2(x1, y) v.AddArg(v0) return true } @@ -3045,8 +3000,7 @@ func rewriteValueARM64_OpARM64CMP(v *Value) bool { } v.reset(OpARM64CMPshiftRA) v.AuxInt = c - v.AddArg(x0) - v.AddArg(y) + v.AddArg2(x0, y) return true } // match: (CMP x0:(SRAconst [c] y) x1) @@ -3066,8 +3020,7 @@ func rewriteValueARM64_OpARM64CMP(v *Value) bool { v.reset(OpARM64InvertFlags) v0 := b.NewValue0(v.Pos, OpARM64CMPshiftRA, types.TypeFlags) v0.AuxInt = c - v0.AddArg(x1) - v0.AddArg(y) + v0.AddArg2(x1, y) v.AddArg(v0) return true } @@ -3116,8 +3069,7 @@ func rewriteValueARM64_OpARM64CMPW(v *Value) bool { } v.reset(OpARM64InvertFlags) v0 := b.NewValue0(v.Pos, OpARM64CMPW, types.TypeFlags) - v0.AddArg(y) - v0.AddArg(x) + v0.AddArg2(y, x) v.AddArg(v0) return true } @@ -3498,8 +3450,7 @@ func rewriteValueARM64_OpARM64CSEL(v *Value) bool { flag := v_2 v.reset(OpARM64CSEL0) v.Aux = cc - v.AddArg(x) - v.AddArg(flag) + v.AddArg2(x, flag) return true } // match: (CSEL {cc} (MOVDconst [0]) y flag) @@ -3513,8 +3464,7 @@ func rewriteValueARM64_OpARM64CSEL(v *Value) bool { flag := v_2 v.reset(OpARM64CSEL0) v.Aux = arm64Negate(cc.(Op)) - v.AddArg(y) - v.AddArg(flag) + v.AddArg2(y, flag) return true } // match: (CSEL {cc} x y (InvertFlags cmp)) @@ -3529,9 +3479,7 @@ func rewriteValueARM64_OpARM64CSEL(v *Value) bool { cmp := v_2.Args[0] v.reset(OpARM64CSEL) v.Aux = arm64Invert(cc.(Op)) - v.AddArg(x) - v.AddArg(y) - v.AddArg(cmp) + v.AddArg3(x, y, cmp) return true } // match: (CSEL {cc} x _ flag) @@ -3544,9 +3492,7 @@ func rewriteValueARM64_OpARM64CSEL(v *Value) bool { if !(ccARM64Eval(cc, flag) > 0) { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (CSEL {cc} _ y flag) @@ -3559,9 +3505,7 @@ func rewriteValueARM64_OpARM64CSEL(v *Value) bool { if !(ccARM64Eval(cc, flag) < 0) { break } - v.reset(OpCopy) - v.Type = y.Type - v.AddArg(y) + v.copyOf(y) return true } // match: (CSEL {cc} x y (CMPWconst [0] boolval)) @@ -3580,9 +3524,7 @@ func rewriteValueARM64_OpARM64CSEL(v *Value) bool { } v.reset(OpARM64CSEL) v.Aux = boolval.Op - v.AddArg(x) - v.AddArg(y) - v.AddArg(flagArg(boolval)) + v.AddArg3(x, y, flagArg(boolval)) return true } // match: (CSEL {cc} x y (CMPWconst [0] boolval)) @@ -3601,9 +3543,7 @@ func rewriteValueARM64_OpARM64CSEL(v *Value) bool { } v.reset(OpARM64CSEL) v.Aux = arm64Negate(boolval.Op) - v.AddArg(x) - v.AddArg(y) - v.AddArg(flagArg(boolval)) + v.AddArg3(x, y, flagArg(boolval)) return true } return false @@ -3622,8 +3562,7 @@ func rewriteValueARM64_OpARM64CSEL0(v *Value) bool { cmp := v_1.Args[0] v.reset(OpARM64CSEL0) v.Aux = arm64Invert(cc.(Op)) - v.AddArg(x) - v.AddArg(cmp) + v.AddArg2(x, cmp) return true } // match: (CSEL0 {cc} x flag) @@ -3636,9 +3575,7 @@ func rewriteValueARM64_OpARM64CSEL0(v *Value) bool { if !(ccARM64Eval(cc, flag) > 0) { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (CSEL0 {cc} _ flag) @@ -3669,8 +3606,7 @@ func rewriteValueARM64_OpARM64CSEL0(v *Value) bool { } v.reset(OpARM64CSEL0) v.Aux = boolval.Op - v.AddArg(x) - v.AddArg(flagArg(boolval)) + v.AddArg2(x, flagArg(boolval)) return true } // match: (CSEL0 {cc} x (CMPWconst [0] boolval)) @@ -3688,8 +3624,7 @@ func rewriteValueARM64_OpARM64CSEL0(v *Value) bool { } v.reset(OpARM64CSEL0) v.Aux = arm64Negate(boolval.Op) - v.AddArg(x) - v.AddArg(flagArg(boolval)) + v.AddArg2(x, flagArg(boolval)) return true } return false @@ -3777,8 +3712,7 @@ func rewriteValueARM64_OpARM64EON(v *Value) bool { } v.reset(OpARM64EONshiftLL) v.AuxInt = c - v.AddArg(x0) - v.AddArg(y) + v.AddArg2(x0, y) return true } // match: (EON x0 x1:(SRLconst [c] y)) @@ -3797,8 +3731,7 @@ func rewriteValueARM64_OpARM64EON(v *Value) bool { } v.reset(OpARM64EONshiftRL) v.AuxInt = c - v.AddArg(x0) - v.AddArg(y) + v.AddArg2(x0, y) return true } // match: (EON x0 x1:(SRAconst [c] y)) @@ -3817,8 +3750,7 @@ func rewriteValueARM64_OpARM64EON(v *Value) bool { } v.reset(OpARM64EONshiftRA) v.AuxInt = c - v.AddArg(x0) - v.AddArg(y) + v.AddArg2(x0, y) return true } return false @@ -4010,9 +3942,7 @@ func rewriteValueARM64_OpARM64FADDD(v *Value) bool { y := v_1.Args[1] x := v_1.Args[0] v.reset(OpARM64FMADDD) - v.AddArg(a) - v.AddArg(x) - v.AddArg(y) + v.AddArg3(a, x, y) return true } break @@ -4028,9 +3958,7 @@ func rewriteValueARM64_OpARM64FADDD(v *Value) bool { y := v_1.Args[1] x := v_1.Args[0] v.reset(OpARM64FMSUBD) - v.AddArg(a) - v.AddArg(x) - v.AddArg(y) + v.AddArg3(a, x, y) return true } break @@ -4051,9 +3979,7 @@ func rewriteValueARM64_OpARM64FADDS(v *Value) bool { y := v_1.Args[1] x := v_1.Args[0] v.reset(OpARM64FMADDS) - v.AddArg(a) - v.AddArg(x) - v.AddArg(y) + v.AddArg3(a, x, y) return true } break @@ -4069,9 +3995,7 @@ func rewriteValueARM64_OpARM64FADDS(v *Value) bool { y := v_1.Args[1] x := v_1.Args[0] v.reset(OpARM64FMSUBS) - v.AddArg(a) - v.AddArg(x) - v.AddArg(y) + v.AddArg3(a, x, y) return true } break @@ -4152,8 +4076,7 @@ func rewriteValueARM64_OpARM64FMOVDfpgp(v *Value) bool { sym := v_0.Aux b = b.Func.Entry v0 := b.NewValue0(v.Pos, OpArg, t) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v0.AuxInt = off v0.Aux = sym return true @@ -4174,8 +4097,7 @@ func rewriteValueARM64_OpARM64FMOVDgpfp(v *Value) bool { sym := v_0.Aux b = b.Func.Entry v0 := b.NewValue0(v.Pos, OpArg, t) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v0.AuxInt = off v0.Aux = sym return true @@ -4196,11 +4118,10 @@ func rewriteValueARM64_OpARM64FMOVDload(v *Value) bool { if v_1.Op != OpARM64MOVDstore || v_1.AuxInt != off || v_1.Aux != sym { break } - _ = v_1.Args[2] + val := v_1.Args[1] if ptr != v_1.Args[0] { break } - val := v_1.Args[1] v.reset(OpARM64FMOVDgpfp) v.AddArg(val) return true @@ -4223,8 +4144,7 @@ func rewriteValueARM64_OpARM64FMOVDload(v *Value) bool { v.reset(OpARM64FMOVDload) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (FMOVDload [off] {sym} (ADD ptr idx) mem) @@ -4243,9 +4163,7 @@ func rewriteValueARM64_OpARM64FMOVDload(v *Value) bool { break } v.reset(OpARM64FMOVDloadidx) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } // match: (FMOVDload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem) @@ -4267,8 +4185,7 @@ func rewriteValueARM64_OpARM64FMOVDload(v *Value) bool { v.reset(OpARM64FMOVDload) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } return false @@ -4288,8 +4205,7 @@ func rewriteValueARM64_OpARM64FMOVDloadidx(v *Value) bool { mem := v_2 v.reset(OpARM64FMOVDload) v.AuxInt = c - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (FMOVDloadidx (MOVDconst [c]) ptr mem) @@ -4303,8 +4219,7 @@ func rewriteValueARM64_OpARM64FMOVDloadidx(v *Value) bool { mem := v_2 v.reset(OpARM64FMOVDload) v.AuxInt = c - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } return false @@ -4329,9 +4244,7 @@ func rewriteValueARM64_OpARM64FMOVDstore(v *Value) bool { v.reset(OpARM64MOVDstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } // match: (FMOVDstore [off1] {sym} (ADDconst [off2] ptr) val mem) @@ -4353,9 +4266,7 @@ func rewriteValueARM64_OpARM64FMOVDstore(v *Value) bool { v.reset(OpARM64FMOVDstore) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } // match: (FMOVDstore [off] {sym} (ADD ptr idx) val mem) @@ -4375,10 +4286,7 @@ func rewriteValueARM64_OpARM64FMOVDstore(v *Value) bool { break } v.reset(OpARM64FMOVDstoreidx) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) + v.AddArg4(ptr, idx, val, mem) return true } // match: (FMOVDstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem) @@ -4401,9 +4309,7 @@ func rewriteValueARM64_OpARM64FMOVDstore(v *Value) bool { v.reset(OpARM64FMOVDstore) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } return false @@ -4425,9 +4331,7 @@ func rewriteValueARM64_OpARM64FMOVDstoreidx(v *Value) bool { mem := v_3 v.reset(OpARM64FMOVDstore) v.AuxInt = c - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } // match: (FMOVDstoreidx (MOVDconst [c]) idx val mem) @@ -4442,9 +4346,7 @@ func rewriteValueARM64_OpARM64FMOVDstoreidx(v *Value) bool { mem := v_3 v.reset(OpARM64FMOVDstore) v.AuxInt = c - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(idx, val, mem) return true } return false @@ -4463,11 +4365,10 @@ func rewriteValueARM64_OpARM64FMOVSload(v *Value) bool { if v_1.Op != OpARM64MOVWstore || v_1.AuxInt != off || v_1.Aux != sym { break } - _ = v_1.Args[2] + val := v_1.Args[1] if ptr != v_1.Args[0] { break } - val := v_1.Args[1] v.reset(OpARM64FMOVSgpfp) v.AddArg(val) return true @@ -4490,8 +4391,7 @@ func rewriteValueARM64_OpARM64FMOVSload(v *Value) bool { v.reset(OpARM64FMOVSload) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (FMOVSload [off] {sym} (ADD ptr idx) mem) @@ -4510,9 +4410,7 @@ func rewriteValueARM64_OpARM64FMOVSload(v *Value) bool { break } v.reset(OpARM64FMOVSloadidx) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } // match: (FMOVSload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem) @@ -4534,8 +4432,7 @@ func rewriteValueARM64_OpARM64FMOVSload(v *Value) bool { v.reset(OpARM64FMOVSload) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } return false @@ -4555,8 +4452,7 @@ func rewriteValueARM64_OpARM64FMOVSloadidx(v *Value) bool { mem := v_2 v.reset(OpARM64FMOVSload) v.AuxInt = c - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (FMOVSloadidx (MOVDconst [c]) ptr mem) @@ -4570,8 +4466,7 @@ func rewriteValueARM64_OpARM64FMOVSloadidx(v *Value) bool { mem := v_2 v.reset(OpARM64FMOVSload) v.AuxInt = c - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } return false @@ -4596,9 +4491,7 @@ func rewriteValueARM64_OpARM64FMOVSstore(v *Value) bool { v.reset(OpARM64MOVWstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } // match: (FMOVSstore [off1] {sym} (ADDconst [off2] ptr) val mem) @@ -4620,9 +4513,7 @@ func rewriteValueARM64_OpARM64FMOVSstore(v *Value) bool { v.reset(OpARM64FMOVSstore) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } // match: (FMOVSstore [off] {sym} (ADD ptr idx) val mem) @@ -4642,10 +4533,7 @@ func rewriteValueARM64_OpARM64FMOVSstore(v *Value) bool { break } v.reset(OpARM64FMOVSstoreidx) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) + v.AddArg4(ptr, idx, val, mem) return true } // match: (FMOVSstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem) @@ -4668,9 +4556,7 @@ func rewriteValueARM64_OpARM64FMOVSstore(v *Value) bool { v.reset(OpARM64FMOVSstore) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } return false @@ -4692,9 +4578,7 @@ func rewriteValueARM64_OpARM64FMOVSstoreidx(v *Value) bool { mem := v_3 v.reset(OpARM64FMOVSstore) v.AuxInt = c - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } // match: (FMOVSstoreidx (MOVDconst [c]) idx val mem) @@ -4709,9 +4593,7 @@ func rewriteValueARM64_OpARM64FMOVSstoreidx(v *Value) bool { mem := v_3 v.reset(OpARM64FMOVSstore) v.AuxInt = c - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(idx, val, mem) return true } return false @@ -4729,8 +4611,7 @@ func rewriteValueARM64_OpARM64FMULD(v *Value) bool { x := v_0.Args[0] y := v_1 v.reset(OpARM64FNMULD) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } break @@ -4750,8 +4631,7 @@ func rewriteValueARM64_OpARM64FMULS(v *Value) bool { x := v_0.Args[0] y := v_1 v.reset(OpARM64FNMULS) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } break @@ -4769,8 +4649,7 @@ func rewriteValueARM64_OpARM64FNEGD(v *Value) bool { y := v_0.Args[1] x := v_0.Args[0] v.reset(OpARM64FNMULD) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (FNEGD (FNMULD x y)) @@ -4782,8 +4661,7 @@ func rewriteValueARM64_OpARM64FNEGD(v *Value) bool { y := v_0.Args[1] x := v_0.Args[0] v.reset(OpARM64FMULD) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } return false @@ -4799,8 +4677,7 @@ func rewriteValueARM64_OpARM64FNEGS(v *Value) bool { y := v_0.Args[1] x := v_0.Args[0] v.reset(OpARM64FNMULS) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (FNEGS (FNMULS x y)) @@ -4812,8 +4689,7 @@ func rewriteValueARM64_OpARM64FNEGS(v *Value) bool { y := v_0.Args[1] x := v_0.Args[0] v.reset(OpARM64FMULS) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } return false @@ -4831,8 +4707,7 @@ func rewriteValueARM64_OpARM64FNMULD(v *Value) bool { x := v_0.Args[0] y := v_1 v.reset(OpARM64FMULD) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } break @@ -4852,8 +4727,7 @@ func rewriteValueARM64_OpARM64FNMULS(v *Value) bool { x := v_0.Args[0] y := v_1 v.reset(OpARM64FMULS) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } break @@ -4873,9 +4747,7 @@ func rewriteValueARM64_OpARM64FSUBD(v *Value) bool { y := v_1.Args[1] x := v_1.Args[0] v.reset(OpARM64FMSUBD) - v.AddArg(a) - v.AddArg(x) - v.AddArg(y) + v.AddArg3(a, x, y) return true } // match: (FSUBD (FMULD x y) a) @@ -4888,9 +4760,7 @@ func rewriteValueARM64_OpARM64FSUBD(v *Value) bool { x := v_0.Args[0] a := v_1 v.reset(OpARM64FNMSUBD) - v.AddArg(a) - v.AddArg(x) - v.AddArg(y) + v.AddArg3(a, x, y) return true } // match: (FSUBD a (FNMULD x y)) @@ -4903,9 +4773,7 @@ func rewriteValueARM64_OpARM64FSUBD(v *Value) bool { y := v_1.Args[1] x := v_1.Args[0] v.reset(OpARM64FMADDD) - v.AddArg(a) - v.AddArg(x) - v.AddArg(y) + v.AddArg3(a, x, y) return true } // match: (FSUBD (FNMULD x y) a) @@ -4918,9 +4786,7 @@ func rewriteValueARM64_OpARM64FSUBD(v *Value) bool { x := v_0.Args[0] a := v_1 v.reset(OpARM64FNMADDD) - v.AddArg(a) - v.AddArg(x) - v.AddArg(y) + v.AddArg3(a, x, y) return true } return false @@ -4938,9 +4804,7 @@ func rewriteValueARM64_OpARM64FSUBS(v *Value) bool { y := v_1.Args[1] x := v_1.Args[0] v.reset(OpARM64FMSUBS) - v.AddArg(a) - v.AddArg(x) - v.AddArg(y) + v.AddArg3(a, x, y) return true } // match: (FSUBS (FMULS x y) a) @@ -4953,9 +4817,7 @@ func rewriteValueARM64_OpARM64FSUBS(v *Value) bool { x := v_0.Args[0] a := v_1 v.reset(OpARM64FNMSUBS) - v.AddArg(a) - v.AddArg(x) - v.AddArg(y) + v.AddArg3(a, x, y) return true } // match: (FSUBS a (FNMULS x y)) @@ -4968,9 +4830,7 @@ func rewriteValueARM64_OpARM64FSUBS(v *Value) bool { y := v_1.Args[1] x := v_1.Args[0] v.reset(OpARM64FMADDS) - v.AddArg(a) - v.AddArg(x) - v.AddArg(y) + v.AddArg3(a, x, y) return true } // match: (FSUBS (FNMULS x y) a) @@ -4983,9 +4843,7 @@ func rewriteValueARM64_OpARM64FSUBS(v *Value) bool { x := v_0.Args[0] a := v_1 v.reset(OpARM64FNMADDS) - v.AddArg(a) - v.AddArg(x) - v.AddArg(y) + v.AddArg3(a, x, y) return true } return false @@ -5584,8 +5442,7 @@ func rewriteValueARM64_OpARM64MADD(v *Value) bool { break } v.reset(OpARM64SUB) - v.AddArg(a) - v.AddArg(x) + v.AddArg2(a, x) return true } // match: (MADD a _ (MOVDconst [0])) @@ -5595,9 +5452,7 @@ func rewriteValueARM64_OpARM64MADD(v *Value) bool { if v_2.Op != OpARM64MOVDconst || v_2.AuxInt != 0 { break } - v.reset(OpCopy) - v.Type = a.Type - v.AddArg(a) + v.copyOf(a) return true } // match: (MADD a x (MOVDconst [1])) @@ -5609,8 +5464,7 @@ func rewriteValueARM64_OpARM64MADD(v *Value) bool { break } v.reset(OpARM64ADD) - v.AddArg(a) - v.AddArg(x) + v.AddArg2(a, x) return true } // match: (MADD a x (MOVDconst [c])) @@ -5628,8 +5482,7 @@ func rewriteValueARM64_OpARM64MADD(v *Value) bool { } v.reset(OpARM64ADDshiftLL) v.AuxInt = log2(c) - v.AddArg(a) - v.AddArg(x) + v.AddArg2(a, x) return true } // match: (MADD a x (MOVDconst [c])) @@ -5646,12 +5499,10 @@ func rewriteValueARM64_OpARM64MADD(v *Value) bool { break } v.reset(OpARM64ADD) - v.AddArg(a) v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) v0.AuxInt = log2(c - 1) - v0.AddArg(x) - v0.AddArg(x) - v.AddArg(v0) + v0.AddArg2(x, x) + v.AddArg2(a, v0) return true } // match: (MADD a x (MOVDconst [c])) @@ -5668,12 +5519,10 @@ func rewriteValueARM64_OpARM64MADD(v *Value) bool { break } v.reset(OpARM64SUB) - v.AddArg(a) v0 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type) v0.AuxInt = log2(c + 1) - v0.AddArg(x) - v0.AddArg(x) - v.AddArg(v0) + v0.AddArg2(x, x) + v.AddArg2(a, v0) return true } // match: (MADD a x (MOVDconst [c])) @@ -5691,12 +5540,10 @@ func rewriteValueARM64_OpARM64MADD(v *Value) bool { } v.reset(OpARM64SUBshiftLL) v.AuxInt = log2(c / 3) - v.AddArg(a) v0 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type) v0.AuxInt = 2 - v0.AddArg(x) - v0.AddArg(x) - v.AddArg(v0) + v0.AddArg2(x, x) + v.AddArg2(a, v0) return true } // match: (MADD a x (MOVDconst [c])) @@ -5714,12 +5561,10 @@ func rewriteValueARM64_OpARM64MADD(v *Value) bool { } v.reset(OpARM64ADDshiftLL) v.AuxInt = log2(c / 5) - v.AddArg(a) v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) v0.AuxInt = 2 - v0.AddArg(x) - v0.AddArg(x) - v.AddArg(v0) + v0.AddArg2(x, x) + v.AddArg2(a, v0) return true } // match: (MADD a x (MOVDconst [c])) @@ -5737,12 +5582,10 @@ func rewriteValueARM64_OpARM64MADD(v *Value) bool { } v.reset(OpARM64SUBshiftLL) v.AuxInt = log2(c / 7) - v.AddArg(a) v0 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type) v0.AuxInt = 3 - v0.AddArg(x) - v0.AddArg(x) - v.AddArg(v0) + v0.AddArg2(x, x) + v.AddArg2(a, v0) return true } // match: (MADD a x (MOVDconst [c])) @@ -5760,12 +5603,10 @@ func rewriteValueARM64_OpARM64MADD(v *Value) bool { } v.reset(OpARM64ADDshiftLL) v.AuxInt = log2(c / 9) - v.AddArg(a) v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) v0.AuxInt = 3 - v0.AddArg(x) - v0.AddArg(x) - v.AddArg(v0) + v0.AddArg2(x, x) + v.AddArg2(a, v0) return true } // match: (MADD a (MOVDconst [-1]) x) @@ -5777,8 +5618,7 @@ func rewriteValueARM64_OpARM64MADD(v *Value) bool { } x := v_2 v.reset(OpARM64SUB) - v.AddArg(a) - v.AddArg(x) + v.AddArg2(a, x) return true } // match: (MADD a (MOVDconst [0]) _) @@ -5788,9 +5628,7 @@ func rewriteValueARM64_OpARM64MADD(v *Value) bool { if v_1.Op != OpARM64MOVDconst || v_1.AuxInt != 0 { break } - v.reset(OpCopy) - v.Type = a.Type - v.AddArg(a) + v.copyOf(a) return true } // match: (MADD a (MOVDconst [1]) x) @@ -5802,8 +5640,7 @@ func rewriteValueARM64_OpARM64MADD(v *Value) bool { } x := v_2 v.reset(OpARM64ADD) - v.AddArg(a) - v.AddArg(x) + v.AddArg2(a, x) return true } // match: (MADD a (MOVDconst [c]) x) @@ -5821,8 +5658,7 @@ func rewriteValueARM64_OpARM64MADD(v *Value) bool { } v.reset(OpARM64ADDshiftLL) v.AuxInt = log2(c) - v.AddArg(a) - v.AddArg(x) + v.AddArg2(a, x) return true } // match: (MADD a (MOVDconst [c]) x) @@ -5839,12 +5675,10 @@ func rewriteValueARM64_OpARM64MADD(v *Value) bool { break } v.reset(OpARM64ADD) - v.AddArg(a) v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) v0.AuxInt = log2(c - 1) - v0.AddArg(x) - v0.AddArg(x) - v.AddArg(v0) + v0.AddArg2(x, x) + v.AddArg2(a, v0) return true } // match: (MADD a (MOVDconst [c]) x) @@ -5861,12 +5695,10 @@ func rewriteValueARM64_OpARM64MADD(v *Value) bool { break } v.reset(OpARM64SUB) - v.AddArg(a) v0 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type) v0.AuxInt = log2(c + 1) - v0.AddArg(x) - v0.AddArg(x) - v.AddArg(v0) + v0.AddArg2(x, x) + v.AddArg2(a, v0) return true } // match: (MADD a (MOVDconst [c]) x) @@ -5884,12 +5716,10 @@ func rewriteValueARM64_OpARM64MADD(v *Value) bool { } v.reset(OpARM64SUBshiftLL) v.AuxInt = log2(c / 3) - v.AddArg(a) v0 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type) v0.AuxInt = 2 - v0.AddArg(x) - v0.AddArg(x) - v.AddArg(v0) + v0.AddArg2(x, x) + v.AddArg2(a, v0) return true } // match: (MADD a (MOVDconst [c]) x) @@ -5907,12 +5737,10 @@ func rewriteValueARM64_OpARM64MADD(v *Value) bool { } v.reset(OpARM64ADDshiftLL) v.AuxInt = log2(c / 5) - v.AddArg(a) v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) v0.AuxInt = 2 - v0.AddArg(x) - v0.AddArg(x) - v.AddArg(v0) + v0.AddArg2(x, x) + v.AddArg2(a, v0) return true } // match: (MADD a (MOVDconst [c]) x) @@ -5930,12 +5758,10 @@ func rewriteValueARM64_OpARM64MADD(v *Value) bool { } v.reset(OpARM64SUBshiftLL) v.AuxInt = log2(c / 7) - v.AddArg(a) v0 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type) v0.AuxInt = 3 - v0.AddArg(x) - v0.AddArg(x) - v.AddArg(v0) + v0.AddArg2(x, x) + v.AddArg2(a, v0) return true } // match: (MADD a (MOVDconst [c]) x) @@ -5953,12 +5779,10 @@ func rewriteValueARM64_OpARM64MADD(v *Value) bool { } v.reset(OpARM64ADDshiftLL) v.AuxInt = log2(c / 9) - v.AddArg(a) v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) v0.AuxInt = 3 - v0.AddArg(x) - v0.AddArg(x) - v.AddArg(v0) + v0.AddArg2(x, x) + v.AddArg2(a, v0) return true } // match: (MADD (MOVDconst [c]) x y) @@ -5973,8 +5797,7 @@ func rewriteValueARM64_OpARM64MADD(v *Value) bool { v.reset(OpARM64ADDconst) v.AuxInt = c v0 := b.NewValue0(v.Pos, OpARM64MUL, x.Type) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -6016,8 +5839,7 @@ func rewriteValueARM64_OpARM64MADDW(v *Value) bool { break } v.reset(OpARM64SUB) - v.AddArg(a) - v.AddArg(x) + v.AddArg2(a, x) return true } // match: (MADDW a _ (MOVDconst [c])) @@ -6032,9 +5854,7 @@ func rewriteValueARM64_OpARM64MADDW(v *Value) bool { if !(int32(c) == 0) { break } - v.reset(OpCopy) - v.Type = a.Type - v.AddArg(a) + v.copyOf(a) return true } // match: (MADDW a x (MOVDconst [c])) @@ -6051,8 +5871,7 @@ func rewriteValueARM64_OpARM64MADDW(v *Value) bool { break } v.reset(OpARM64ADD) - v.AddArg(a) - v.AddArg(x) + v.AddArg2(a, x) return true } // match: (MADDW a x (MOVDconst [c])) @@ -6070,8 +5889,7 @@ func rewriteValueARM64_OpARM64MADDW(v *Value) bool { } v.reset(OpARM64ADDshiftLL) v.AuxInt = log2(c) - v.AddArg(a) - v.AddArg(x) + v.AddArg2(a, x) return true } // match: (MADDW a x (MOVDconst [c])) @@ -6088,12 +5906,10 @@ func rewriteValueARM64_OpARM64MADDW(v *Value) bool { break } v.reset(OpARM64ADD) - v.AddArg(a) v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) v0.AuxInt = log2(c - 1) - v0.AddArg(x) - v0.AddArg(x) - v.AddArg(v0) + v0.AddArg2(x, x) + v.AddArg2(a, v0) return true } // match: (MADDW a x (MOVDconst [c])) @@ -6110,12 +5926,10 @@ func rewriteValueARM64_OpARM64MADDW(v *Value) bool { break } v.reset(OpARM64SUB) - v.AddArg(a) v0 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type) v0.AuxInt = log2(c + 1) - v0.AddArg(x) - v0.AddArg(x) - v.AddArg(v0) + v0.AddArg2(x, x) + v.AddArg2(a, v0) return true } // match: (MADDW a x (MOVDconst [c])) @@ -6133,12 +5947,10 @@ func rewriteValueARM64_OpARM64MADDW(v *Value) bool { } v.reset(OpARM64SUBshiftLL) v.AuxInt = log2(c / 3) - v.AddArg(a) v0 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type) v0.AuxInt = 2 - v0.AddArg(x) - v0.AddArg(x) - v.AddArg(v0) + v0.AddArg2(x, x) + v.AddArg2(a, v0) return true } // match: (MADDW a x (MOVDconst [c])) @@ -6156,12 +5968,10 @@ func rewriteValueARM64_OpARM64MADDW(v *Value) bool { } v.reset(OpARM64ADDshiftLL) v.AuxInt = log2(c / 5) - v.AddArg(a) v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) v0.AuxInt = 2 - v0.AddArg(x) - v0.AddArg(x) - v.AddArg(v0) + v0.AddArg2(x, x) + v.AddArg2(a, v0) return true } // match: (MADDW a x (MOVDconst [c])) @@ -6179,12 +5989,10 @@ func rewriteValueARM64_OpARM64MADDW(v *Value) bool { } v.reset(OpARM64SUBshiftLL) v.AuxInt = log2(c / 7) - v.AddArg(a) v0 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type) v0.AuxInt = 3 - v0.AddArg(x) - v0.AddArg(x) - v.AddArg(v0) + v0.AddArg2(x, x) + v.AddArg2(a, v0) return true } // match: (MADDW a x (MOVDconst [c])) @@ -6202,12 +6010,10 @@ func rewriteValueARM64_OpARM64MADDW(v *Value) bool { } v.reset(OpARM64ADDshiftLL) v.AuxInt = log2(c / 9) - v.AddArg(a) v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) v0.AuxInt = 3 - v0.AddArg(x) - v0.AddArg(x) - v.AddArg(v0) + v0.AddArg2(x, x) + v.AddArg2(a, v0) return true } // match: (MADDW a (MOVDconst [c]) x) @@ -6224,8 +6030,7 @@ func rewriteValueARM64_OpARM64MADDW(v *Value) bool { break } v.reset(OpARM64SUB) - v.AddArg(a) - v.AddArg(x) + v.AddArg2(a, x) return true } // match: (MADDW a (MOVDconst [c]) _) @@ -6240,9 +6045,7 @@ func rewriteValueARM64_OpARM64MADDW(v *Value) bool { if !(int32(c) == 0) { break } - v.reset(OpCopy) - v.Type = a.Type - v.AddArg(a) + v.copyOf(a) return true } // match: (MADDW a (MOVDconst [c]) x) @@ -6259,8 +6062,7 @@ func rewriteValueARM64_OpARM64MADDW(v *Value) bool { break } v.reset(OpARM64ADD) - v.AddArg(a) - v.AddArg(x) + v.AddArg2(a, x) return true } // match: (MADDW a (MOVDconst [c]) x) @@ -6278,8 +6080,7 @@ func rewriteValueARM64_OpARM64MADDW(v *Value) bool { } v.reset(OpARM64ADDshiftLL) v.AuxInt = log2(c) - v.AddArg(a) - v.AddArg(x) + v.AddArg2(a, x) return true } // match: (MADDW a (MOVDconst [c]) x) @@ -6296,12 +6097,10 @@ func rewriteValueARM64_OpARM64MADDW(v *Value) bool { break } v.reset(OpARM64ADD) - v.AddArg(a) v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) v0.AuxInt = log2(c - 1) - v0.AddArg(x) - v0.AddArg(x) - v.AddArg(v0) + v0.AddArg2(x, x) + v.AddArg2(a, v0) return true } // match: (MADDW a (MOVDconst [c]) x) @@ -6318,12 +6117,10 @@ func rewriteValueARM64_OpARM64MADDW(v *Value) bool { break } v.reset(OpARM64SUB) - v.AddArg(a) v0 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type) v0.AuxInt = log2(c + 1) - v0.AddArg(x) - v0.AddArg(x) - v.AddArg(v0) + v0.AddArg2(x, x) + v.AddArg2(a, v0) return true } // match: (MADDW a (MOVDconst [c]) x) @@ -6341,12 +6138,10 @@ func rewriteValueARM64_OpARM64MADDW(v *Value) bool { } v.reset(OpARM64SUBshiftLL) v.AuxInt = log2(c / 3) - v.AddArg(a) v0 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type) v0.AuxInt = 2 - v0.AddArg(x) - v0.AddArg(x) - v.AddArg(v0) + v0.AddArg2(x, x) + v.AddArg2(a, v0) return true } // match: (MADDW a (MOVDconst [c]) x) @@ -6364,12 +6159,10 @@ func rewriteValueARM64_OpARM64MADDW(v *Value) bool { } v.reset(OpARM64ADDshiftLL) v.AuxInt = log2(c / 5) - v.AddArg(a) v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) v0.AuxInt = 2 - v0.AddArg(x) - v0.AddArg(x) - v.AddArg(v0) + v0.AddArg2(x, x) + v.AddArg2(a, v0) return true } // match: (MADDW a (MOVDconst [c]) x) @@ -6387,12 +6180,10 @@ func rewriteValueARM64_OpARM64MADDW(v *Value) bool { } v.reset(OpARM64SUBshiftLL) v.AuxInt = log2(c / 7) - v.AddArg(a) v0 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type) v0.AuxInt = 3 - v0.AddArg(x) - v0.AddArg(x) - v.AddArg(v0) + v0.AddArg2(x, x) + v.AddArg2(a, v0) return true } // match: (MADDW a (MOVDconst [c]) x) @@ -6410,12 +6201,10 @@ func rewriteValueARM64_OpARM64MADDW(v *Value) bool { } v.reset(OpARM64ADDshiftLL) v.AuxInt = log2(c / 9) - v.AddArg(a) v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) v0.AuxInt = 3 - v0.AddArg(x) - v0.AddArg(x) - v.AddArg(v0) + v0.AddArg2(x, x) + v.AddArg2(a, v0) return true } // match: (MADDW (MOVDconst [c]) x y) @@ -6430,8 +6219,7 @@ func rewriteValueARM64_OpARM64MADDW(v *Value) bool { v.reset(OpARM64ADDconst) v.AuxInt = c v0 := b.NewValue0(v.Pos, OpARM64MULW, x.Type) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -6466,9 +6254,7 @@ func rewriteValueARM64_OpARM64MNEG(v *Value) bool { if v_1.Op != OpARM64MOVDconst || v_1.AuxInt != -1 { continue } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } break @@ -6538,8 +6324,7 @@ func rewriteValueARM64_OpARM64MNEG(v *Value) bool { v.reset(OpARM64NEG) v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) v0.AuxInt = log2(c - 1) - v0.AddArg(x) - v0.AddArg(x) + v0.AddArg2(x, x) v.AddArg(v0) return true } @@ -6563,8 +6348,7 @@ func rewriteValueARM64_OpARM64MNEG(v *Value) bool { v0.AuxInt = log2(c + 1) v1 := b.NewValue0(v.Pos, OpARM64NEG, x.Type) v1.AddArg(x) - v0.AddArg(v1) - v0.AddArg(x) + v0.AddArg2(v1, x) v.AddArg(v0) return true } @@ -6588,8 +6372,7 @@ func rewriteValueARM64_OpARM64MNEG(v *Value) bool { v.AuxInt = log2(c / 3) v0 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type) v0.AuxInt = 2 - v0.AddArg(x) - v0.AddArg(x) + v0.AddArg2(x, x) v.AddArg(v0) return true } @@ -6613,8 +6396,7 @@ func rewriteValueARM64_OpARM64MNEG(v *Value) bool { v0.AuxInt = log2(c / 5) v1 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) v1.AuxInt = 2 - v1.AddArg(x) - v1.AddArg(x) + v1.AddArg2(x, x) v0.AddArg(v1) v.AddArg(v0) return true @@ -6639,8 +6421,7 @@ func rewriteValueARM64_OpARM64MNEG(v *Value) bool { v.AuxInt = log2(c / 7) v0 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type) v0.AuxInt = 3 - v0.AddArg(x) - v0.AddArg(x) + v0.AddArg2(x, x) v.AddArg(v0) return true } @@ -6664,8 +6445,7 @@ func rewriteValueARM64_OpARM64MNEG(v *Value) bool { v0.AuxInt = log2(c / 9) v1 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) v1.AuxInt = 3 - v1.AddArg(x) - v1.AddArg(x) + v1.AddArg2(x, x) v0.AddArg(v1) v.AddArg(v0) return true @@ -6709,9 +6489,7 @@ func rewriteValueARM64_OpARM64MNEGW(v *Value) bool { if !(int32(c) == -1) { continue } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } break @@ -6791,8 +6569,7 @@ func rewriteValueARM64_OpARM64MNEGW(v *Value) bool { v.reset(OpARM64NEG) v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) v0.AuxInt = log2(c - 1) - v0.AddArg(x) - v0.AddArg(x) + v0.AddArg2(x, x) v.AddArg(v0) return true } @@ -6816,8 +6593,7 @@ func rewriteValueARM64_OpARM64MNEGW(v *Value) bool { v0.AuxInt = log2(c + 1) v1 := b.NewValue0(v.Pos, OpARM64NEG, x.Type) v1.AddArg(x) - v0.AddArg(v1) - v0.AddArg(x) + v0.AddArg2(v1, x) v.AddArg(v0) return true } @@ -6841,8 +6617,7 @@ func rewriteValueARM64_OpARM64MNEGW(v *Value) bool { v.AuxInt = log2(c / 3) v0 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type) v0.AuxInt = 2 - v0.AddArg(x) - v0.AddArg(x) + v0.AddArg2(x, x) v.AddArg(v0) return true } @@ -6866,8 +6641,7 @@ func rewriteValueARM64_OpARM64MNEGW(v *Value) bool { v0.AuxInt = log2(c / 5) v1 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) v1.AuxInt = 2 - v1.AddArg(x) - v1.AddArg(x) + v1.AddArg2(x, x) v0.AddArg(v1) v.AddArg(v0) return true @@ -6892,8 +6666,7 @@ func rewriteValueARM64_OpARM64MNEGW(v *Value) bool { v.AuxInt = log2(c / 7) v0 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type) v0.AuxInt = 3 - v0.AddArg(x) - v0.AddArg(x) + v0.AddArg2(x, x) v.AddArg(v0) return true } @@ -6917,8 +6690,7 @@ func rewriteValueARM64_OpARM64MNEGW(v *Value) bool { v0.AuxInt = log2(c / 9) v1 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) v1.AuxInt = 3 - v1.AddArg(x) - v1.AddArg(x) + v1.AddArg2(x, x) v0.AddArg(v1) v.AddArg(v0) return true @@ -7008,8 +6780,7 @@ func rewriteValueARM64_OpARM64MOVBUload(v *Value) bool { v.reset(OpARM64MOVBUload) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVBUload [off] {sym} (ADD ptr idx) mem) @@ -7028,9 +6799,7 @@ func rewriteValueARM64_OpARM64MOVBUload(v *Value) bool { break } v.reset(OpARM64MOVBUloadidx) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } // match: (MOVBUload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem) @@ -7052,8 +6821,7 @@ func rewriteValueARM64_OpARM64MOVBUload(v *Value) bool { v.reset(OpARM64MOVBUload) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVBUload [off] {sym} ptr (MOVBstorezero [off2] {sym2} ptr2 _)) @@ -7068,7 +6836,6 @@ func rewriteValueARM64_OpARM64MOVBUload(v *Value) bool { } off2 := v_1.AuxInt sym2 := v_1.Aux - _ = v_1.Args[1] ptr2 := v_1.Args[0] if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) { break @@ -7107,8 +6874,7 @@ func rewriteValueARM64_OpARM64MOVBUloadidx(v *Value) bool { mem := v_2 v.reset(OpARM64MOVBUload) v.AuxInt = c - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVBUloadidx (MOVDconst [c]) ptr mem) @@ -7122,8 +6888,7 @@ func rewriteValueARM64_OpARM64MOVBUloadidx(v *Value) bool { mem := v_2 v.reset(OpARM64MOVBUload) v.AuxInt = c - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVBUloadidx ptr idx (MOVBstorezeroidx ptr2 idx2 _)) @@ -7135,9 +6900,8 @@ func rewriteValueARM64_OpARM64MOVBUloadidx(v *Value) bool { if v_2.Op != OpARM64MOVBstorezeroidx { break } - _ = v_2.Args[2] - ptr2 := v_2.Args[0] idx2 := v_2.Args[1] + ptr2 := v_2.Args[0] if !(isSamePtr(ptr, ptr2) && isSamePtr(idx, idx2) || isSamePtr(ptr, idx2) && isSamePtr(idx, ptr2)) { break } @@ -7156,7 +6920,6 @@ func rewriteValueARM64_OpARM64MOVBUreg(v *Value) bool { if x.Op != OpARM64MOVBUload { break } - _ = x.Args[1] v.reset(OpARM64MOVDreg) v.AddArg(x) return true @@ -7168,7 +6931,6 @@ func rewriteValueARM64_OpARM64MOVBUreg(v *Value) bool { if x.Op != OpARM64MOVBUloadidx { break } - _ = x.Args[2] v.reset(OpARM64MOVDreg) v.AddArg(x) return true @@ -7279,8 +7041,7 @@ func rewriteValueARM64_OpARM64MOVBload(v *Value) bool { v.reset(OpARM64MOVBload) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVBload [off] {sym} (ADD ptr idx) mem) @@ -7299,9 +7060,7 @@ func rewriteValueARM64_OpARM64MOVBload(v *Value) bool { break } v.reset(OpARM64MOVBloadidx) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } // match: (MOVBload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem) @@ -7323,8 +7082,7 @@ func rewriteValueARM64_OpARM64MOVBload(v *Value) bool { v.reset(OpARM64MOVBload) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVBload [off] {sym} ptr (MOVBstorezero [off2] {sym2} ptr2 _)) @@ -7339,7 +7097,6 @@ func rewriteValueARM64_OpARM64MOVBload(v *Value) bool { } off2 := v_1.AuxInt sym2 := v_1.Aux - _ = v_1.Args[1] ptr2 := v_1.Args[0] if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) { break @@ -7365,8 +7122,7 @@ func rewriteValueARM64_OpARM64MOVBloadidx(v *Value) bool { mem := v_2 v.reset(OpARM64MOVBload) v.AuxInt = c - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVBloadidx (MOVDconst [c]) ptr mem) @@ -7380,8 +7136,7 @@ func rewriteValueARM64_OpARM64MOVBloadidx(v *Value) bool { mem := v_2 v.reset(OpARM64MOVBload) v.AuxInt = c - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVBloadidx ptr idx (MOVBstorezeroidx ptr2 idx2 _)) @@ -7393,9 +7148,8 @@ func rewriteValueARM64_OpARM64MOVBloadidx(v *Value) bool { if v_2.Op != OpARM64MOVBstorezeroidx { break } - _ = v_2.Args[2] - ptr2 := v_2.Args[0] idx2 := v_2.Args[1] + ptr2 := v_2.Args[0] if !(isSamePtr(ptr, ptr2) && isSamePtr(idx, idx2) || isSamePtr(ptr, idx2) && isSamePtr(idx, ptr2)) { break } @@ -7414,7 +7168,6 @@ func rewriteValueARM64_OpARM64MOVBreg(v *Value) bool { if x.Op != OpARM64MOVBload { break } - _ = x.Args[1] v.reset(OpARM64MOVDreg) v.AddArg(x) return true @@ -7426,7 +7179,6 @@ func rewriteValueARM64_OpARM64MOVBreg(v *Value) bool { if x.Op != OpARM64MOVBloadidx { break } - _ = x.Args[2] v.reset(OpARM64MOVDreg) v.AddArg(x) return true @@ -7497,9 +7249,7 @@ func rewriteValueARM64_OpARM64MOVBstore(v *Value) bool { v.reset(OpARM64MOVBstore) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } // match: (MOVBstore [off] {sym} (ADD ptr idx) val mem) @@ -7519,10 +7269,7 @@ func rewriteValueARM64_OpARM64MOVBstore(v *Value) bool { break } v.reset(OpARM64MOVBstoreidx) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) + v.AddArg4(ptr, idx, val, mem) return true } // match: (MOVBstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem) @@ -7545,9 +7292,7 @@ func rewriteValueARM64_OpARM64MOVBstore(v *Value) bool { v.reset(OpARM64MOVBstore) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } // match: (MOVBstore [off] {sym} ptr (MOVDconst [0]) mem) @@ -7563,8 +7308,7 @@ func rewriteValueARM64_OpARM64MOVBstore(v *Value) bool { v.reset(OpARM64MOVBstorezero) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVBstore [off] {sym} ptr (MOVBreg x) mem) @@ -7581,9 +7325,7 @@ func rewriteValueARM64_OpARM64MOVBstore(v *Value) bool { v.reset(OpARM64MOVBstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(x) - v.AddArg(mem) + v.AddArg3(ptr, x, mem) return true } // match: (MOVBstore [off] {sym} ptr (MOVBUreg x) mem) @@ -7600,9 +7342,7 @@ func rewriteValueARM64_OpARM64MOVBstore(v *Value) bool { v.reset(OpARM64MOVBstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(x) - v.AddArg(mem) + v.AddArg3(ptr, x, mem) return true } // match: (MOVBstore [off] {sym} ptr (MOVHreg x) mem) @@ -7619,9 +7359,7 @@ func rewriteValueARM64_OpARM64MOVBstore(v *Value) bool { v.reset(OpARM64MOVBstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(x) - v.AddArg(mem) + v.AddArg3(ptr, x, mem) return true } // match: (MOVBstore [off] {sym} ptr (MOVHUreg x) mem) @@ -7638,9 +7376,7 @@ func rewriteValueARM64_OpARM64MOVBstore(v *Value) bool { v.reset(OpARM64MOVBstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(x) - v.AddArg(mem) + v.AddArg3(ptr, x, mem) return true } // match: (MOVBstore [off] {sym} ptr (MOVWreg x) mem) @@ -7657,9 +7393,7 @@ func rewriteValueARM64_OpARM64MOVBstore(v *Value) bool { v.reset(OpARM64MOVBstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(x) - v.AddArg(mem) + v.AddArg3(ptr, x, mem) return true } // match: (MOVBstore [off] {sym} ptr (MOVWUreg x) mem) @@ -7676,9 +7410,7 @@ func rewriteValueARM64_OpARM64MOVBstore(v *Value) bool { v.reset(OpARM64MOVBstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(x) - v.AddArg(mem) + v.AddArg3(ptr, x, mem) return true } // match: (MOVBstore [i] {s} ptr0 (SRLconst [8] w) x:(MOVBstore [i-1] {s} ptr1 w mem)) @@ -7704,9 +7436,7 @@ func rewriteValueARM64_OpARM64MOVBstore(v *Value) bool { v.reset(OpARM64MOVHstore) v.AuxInt = i - 1 v.Aux = s - v.AddArg(ptr0) - v.AddArg(w) - v.AddArg(mem) + v.AddArg3(ptr0, w, mem) return true } // match: (MOVBstore [1] {s} (ADD ptr0 idx0) (SRLconst [8] w) x:(MOVBstoreidx ptr1 idx1 w mem)) @@ -7741,10 +7471,7 @@ func rewriteValueARM64_OpARM64MOVBstore(v *Value) bool { continue } v.reset(OpARM64MOVHstoreidx) - v.AddArg(ptr1) - v.AddArg(idx1) - v.AddArg(w) - v.AddArg(mem) + v.AddArg4(ptr1, idx1, w, mem) return true } break @@ -7772,9 +7499,7 @@ func rewriteValueARM64_OpARM64MOVBstore(v *Value) bool { v.reset(OpARM64MOVHstore) v.AuxInt = i - 1 v.Aux = s - v.AddArg(ptr0) - v.AddArg(w) - v.AddArg(mem) + v.AddArg3(ptr0, w, mem) return true } // match: (MOVBstore [1] {s} (ADD ptr0 idx0) (UBFX [armBFAuxInt(8, 8)] w) x:(MOVBstoreidx ptr1 idx1 w mem)) @@ -7809,10 +7534,7 @@ func rewriteValueARM64_OpARM64MOVBstore(v *Value) bool { continue } v.reset(OpARM64MOVHstoreidx) - v.AddArg(ptr1) - v.AddArg(idx1) - v.AddArg(w) - v.AddArg(mem) + v.AddArg4(ptr1, idx1, w, mem) return true } break @@ -7840,9 +7562,7 @@ func rewriteValueARM64_OpARM64MOVBstore(v *Value) bool { v.reset(OpARM64MOVHstore) v.AuxInt = i - 1 v.Aux = s - v.AddArg(ptr0) - v.AddArg(w) - v.AddArg(mem) + v.AddArg3(ptr0, w, mem) return true } // match: (MOVBstore [1] {s} (ADD ptr0 idx0) (UBFX [armBFAuxInt(8, 24)] w) x:(MOVBstoreidx ptr1 idx1 w mem)) @@ -7877,10 +7597,7 @@ func rewriteValueARM64_OpARM64MOVBstore(v *Value) bool { continue } v.reset(OpARM64MOVHstoreidx) - v.AddArg(ptr1) - v.AddArg(idx1) - v.AddArg(w) - v.AddArg(mem) + v.AddArg4(ptr1, idx1, w, mem) return true } break @@ -7912,9 +7629,7 @@ func rewriteValueARM64_OpARM64MOVBstore(v *Value) bool { v.reset(OpARM64MOVHstore) v.AuxInt = i - 1 v.Aux = s - v.AddArg(ptr0) - v.AddArg(w) - v.AddArg(mem) + v.AddArg3(ptr0, w, mem) return true } // match: (MOVBstore [1] {s} (ADD ptr0 idx0) (SRLconst [8] (MOVDreg w)) x:(MOVBstoreidx ptr1 idx1 w mem)) @@ -7953,10 +7668,7 @@ func rewriteValueARM64_OpARM64MOVBstore(v *Value) bool { continue } v.reset(OpARM64MOVHstoreidx) - v.AddArg(ptr1) - v.AddArg(idx1) - v.AddArg(w) - v.AddArg(mem) + v.AddArg4(ptr1, idx1, w, mem) return true } break @@ -7986,9 +7698,7 @@ func rewriteValueARM64_OpARM64MOVBstore(v *Value) bool { v.reset(OpARM64MOVHstore) v.AuxInt = i - 1 v.Aux = s - v.AddArg(ptr0) - v.AddArg(w0) - v.AddArg(mem) + v.AddArg3(ptr0, w0, mem) return true } // match: (MOVBstore [1] {s} (ADD ptr0 idx0) (SRLconst [j] w) x:(MOVBstoreidx ptr1 idx1 w0:(SRLconst [j-8] w) mem)) @@ -8025,10 +7735,7 @@ func rewriteValueARM64_OpARM64MOVBstore(v *Value) bool { continue } v.reset(OpARM64MOVHstoreidx) - v.AddArg(ptr1) - v.AddArg(idx1) - v.AddArg(w0) - v.AddArg(mem) + v.AddArg4(ptr1, idx1, w0, mem) return true } break @@ -8062,9 +7769,7 @@ func rewriteValueARM64_OpARM64MOVBstore(v *Value) bool { v.reset(OpARM64MOVHstore) v.AuxInt = i - 1 v.Aux = s - v.AddArg(ptr0) - v.AddArg(w0) - v.AddArg(mem) + v.AddArg3(ptr0, w0, mem) return true } // match: (MOVBstore [1] {s} (ADD ptr0 idx0) (UBFX [bfc] w) x:(MOVBstoreidx ptr1 idx1 w0:(UBFX [bfc2] w) mem)) @@ -8105,10 +7810,7 @@ func rewriteValueARM64_OpARM64MOVBstore(v *Value) bool { continue } v.reset(OpARM64MOVHstoreidx) - v.AddArg(ptr1) - v.AddArg(idx1) - v.AddArg(w0) - v.AddArg(mem) + v.AddArg4(ptr1, idx1, w0, mem) return true } break @@ -8146,9 +7848,7 @@ func rewriteValueARM64_OpARM64MOVBstore(v *Value) bool { v.reset(OpARM64MOVHstore) v.AuxInt = i - 1 v.Aux = s - v.AddArg(ptr0) - v.AddArg(w0) - v.AddArg(mem) + v.AddArg3(ptr0, w0, mem) return true } // match: (MOVBstore [1] {s} (ADD ptr0 idx0) (SRLconst [j] (MOVDreg w)) x:(MOVBstoreidx ptr1 idx1 w0:(SRLconst [j-8] (MOVDreg w)) mem)) @@ -8193,10 +7893,7 @@ func rewriteValueARM64_OpARM64MOVBstore(v *Value) bool { continue } v.reset(OpARM64MOVHstoreidx) - v.AddArg(ptr1) - v.AddArg(idx1) - v.AddArg(w0) - v.AddArg(mem) + v.AddArg4(ptr1, idx1, w0, mem) return true } break @@ -8296,11 +7993,9 @@ func rewriteValueARM64_OpARM64MOVBstore(v *Value) bool { v.reset(OpARM64MOVDstore) v.AuxInt = i - 7 v.Aux = s - v.AddArg(ptr) v0 := b.NewValue0(x6.Pos, OpARM64REV, w.Type) v0.AddArg(w) - v.AddArg(v0) - v.AddArg(mem) + v.AddArg3(ptr, v0, mem) return true } // match: (MOVBstore [7] {s} p w x0:(MOVBstore [6] {s} p (SRLconst [8] w) x1:(MOVBstore [5] {s} p (SRLconst [16] w) x2:(MOVBstore [4] {s} p (SRLconst [24] w) x3:(MOVBstore [3] {s} p (SRLconst [32] w) x4:(MOVBstore [2] {s} p (SRLconst [40] w) x5:(MOVBstore [1] {s} p1:(ADD ptr1 idx1) (SRLconst [48] w) x6:(MOVBstoreidx ptr0 idx0 (SRLconst [56] w) mem)))))))) @@ -8404,12 +8099,9 @@ func rewriteValueARM64_OpARM64MOVBstore(v *Value) bool { continue } v.reset(OpARM64MOVDstoreidx) - v.AddArg(ptr0) - v.AddArg(idx0) v0 := b.NewValue0(x5.Pos, OpARM64REV, w.Type) v0.AddArg(w) - v.AddArg(v0) - v.AddArg(mem) + v.AddArg4(ptr0, idx0, v0, mem) return true } break @@ -8461,11 +8153,9 @@ func rewriteValueARM64_OpARM64MOVBstore(v *Value) bool { v.reset(OpARM64MOVWstore) v.AuxInt = i - 3 v.Aux = s - v.AddArg(ptr) v0 := b.NewValue0(x2.Pos, OpARM64REVW, w.Type) v0.AddArg(w) - v.AddArg(v0) - v.AddArg(mem) + v.AddArg3(ptr, v0, mem) return true } // match: (MOVBstore [3] {s} p w x0:(MOVBstore [2] {s} p (UBFX [armBFAuxInt(8, 24)] w) x1:(MOVBstore [1] {s} p1:(ADD ptr1 idx1) (UBFX [armBFAuxInt(16, 16)] w) x2:(MOVBstoreidx ptr0 idx0 (UBFX [armBFAuxInt(24, 8)] w) mem)))) @@ -8521,12 +8211,9 @@ func rewriteValueARM64_OpARM64MOVBstore(v *Value) bool { continue } v.reset(OpARM64MOVWstoreidx) - v.AddArg(ptr0) - v.AddArg(idx0) v0 := b.NewValue0(x1.Pos, OpARM64REVW, w.Type) v0.AddArg(w) - v.AddArg(v0) - v.AddArg(mem) + v.AddArg4(ptr0, idx0, v0, mem) return true } break @@ -8590,11 +8277,9 @@ func rewriteValueARM64_OpARM64MOVBstore(v *Value) bool { v.reset(OpARM64MOVWstore) v.AuxInt = i - 3 v.Aux = s - v.AddArg(ptr) v0 := b.NewValue0(x2.Pos, OpARM64REVW, w.Type) v0.AddArg(w) - v.AddArg(v0) - v.AddArg(mem) + v.AddArg3(ptr, v0, mem) return true } // match: (MOVBstore [3] {s} p w x0:(MOVBstore [2] {s} p (SRLconst [8] (MOVDreg w)) x1:(MOVBstore [1] {s} p1:(ADD ptr1 idx1) (SRLconst [16] (MOVDreg w)) x2:(MOVBstoreidx ptr0 idx0 (SRLconst [24] (MOVDreg w)) mem)))) @@ -8662,12 +8347,9 @@ func rewriteValueARM64_OpARM64MOVBstore(v *Value) bool { continue } v.reset(OpARM64MOVWstoreidx) - v.AddArg(ptr0) - v.AddArg(idx0) v0 := b.NewValue0(x1.Pos, OpARM64REVW, w.Type) v0.AddArg(w) - v.AddArg(v0) - v.AddArg(mem) + v.AddArg4(ptr0, idx0, v0, mem) return true } break @@ -8719,11 +8401,9 @@ func rewriteValueARM64_OpARM64MOVBstore(v *Value) bool { v.reset(OpARM64MOVWstore) v.AuxInt = i - 3 v.Aux = s - v.AddArg(ptr) v0 := b.NewValue0(x2.Pos, OpARM64REVW, w.Type) v0.AddArg(w) - v.AddArg(v0) - v.AddArg(mem) + v.AddArg3(ptr, v0, mem) return true } // match: (MOVBstore [3] {s} p w x0:(MOVBstore [2] {s} p (SRLconst [8] w) x1:(MOVBstore [1] {s} p1:(ADD ptr1 idx1) (SRLconst [16] w) x2:(MOVBstoreidx ptr0 idx0 (SRLconst [24] w) mem)))) @@ -8779,12 +8459,9 @@ func rewriteValueARM64_OpARM64MOVBstore(v *Value) bool { continue } v.reset(OpARM64MOVWstoreidx) - v.AddArg(ptr0) - v.AddArg(idx0) v0 := b.NewValue0(x1.Pos, OpARM64REVW, w.Type) v0.AddArg(w) - v.AddArg(v0) - v.AddArg(mem) + v.AddArg4(ptr0, idx0, v0, mem) return true } break @@ -8812,11 +8489,9 @@ func rewriteValueARM64_OpARM64MOVBstore(v *Value) bool { v.reset(OpARM64MOVHstore) v.AuxInt = i - 1 v.Aux = s - v.AddArg(ptr) v0 := b.NewValue0(x.Pos, OpARM64REV16W, w.Type) v0.AddArg(w) - v.AddArg(v0) - v.AddArg(mem) + v.AddArg3(ptr, v0, mem) return true } // match: (MOVBstore [1] {s} (ADD ptr1 idx1) w x:(MOVBstoreidx ptr0 idx0 (SRLconst [8] w) mem)) @@ -8849,12 +8524,9 @@ func rewriteValueARM64_OpARM64MOVBstore(v *Value) bool { continue } v.reset(OpARM64MOVHstoreidx) - v.AddArg(ptr0) - v.AddArg(idx0) v0 := b.NewValue0(v.Pos, OpARM64REV16W, w.Type) v0.AddArg(w) - v.AddArg(v0) - v.AddArg(mem) + v.AddArg4(ptr0, idx0, v0, mem) return true } break @@ -8882,11 +8554,9 @@ func rewriteValueARM64_OpARM64MOVBstore(v *Value) bool { v.reset(OpARM64MOVHstore) v.AuxInt = i - 1 v.Aux = s - v.AddArg(ptr) v0 := b.NewValue0(x.Pos, OpARM64REV16W, w.Type) v0.AddArg(w) - v.AddArg(v0) - v.AddArg(mem) + v.AddArg3(ptr, v0, mem) return true } // match: (MOVBstore [1] {s} (ADD ptr1 idx1) w x:(MOVBstoreidx ptr0 idx0 (UBFX [armBFAuxInt(8, 8)] w) mem)) @@ -8919,12 +8589,9 @@ func rewriteValueARM64_OpARM64MOVBstore(v *Value) bool { continue } v.reset(OpARM64MOVHstoreidx) - v.AddArg(ptr0) - v.AddArg(idx0) v0 := b.NewValue0(v.Pos, OpARM64REV16W, w.Type) v0.AddArg(w) - v.AddArg(v0) - v.AddArg(mem) + v.AddArg4(ptr0, idx0, v0, mem) return true } break @@ -8956,11 +8623,9 @@ func rewriteValueARM64_OpARM64MOVBstore(v *Value) bool { v.reset(OpARM64MOVHstore) v.AuxInt = i - 1 v.Aux = s - v.AddArg(ptr) v0 := b.NewValue0(x.Pos, OpARM64REV16W, w.Type) v0.AddArg(w) - v.AddArg(v0) - v.AddArg(mem) + v.AddArg3(ptr, v0, mem) return true } // match: (MOVBstore [1] {s} (ADD ptr1 idx1) w x:(MOVBstoreidx ptr0 idx0 (SRLconst [8] (MOVDreg w)) mem)) @@ -8997,12 +8662,9 @@ func rewriteValueARM64_OpARM64MOVBstore(v *Value) bool { continue } v.reset(OpARM64MOVHstoreidx) - v.AddArg(ptr0) - v.AddArg(idx0) v0 := b.NewValue0(v.Pos, OpARM64REV16W, w.Type) v0.AddArg(w) - v.AddArg(v0) - v.AddArg(mem) + v.AddArg4(ptr0, idx0, v0, mem) return true } break @@ -9030,11 +8692,9 @@ func rewriteValueARM64_OpARM64MOVBstore(v *Value) bool { v.reset(OpARM64MOVHstore) v.AuxInt = i - 1 v.Aux = s - v.AddArg(ptr) v0 := b.NewValue0(x.Pos, OpARM64REV16W, w.Type) v0.AddArg(w) - v.AddArg(v0) - v.AddArg(mem) + v.AddArg3(ptr, v0, mem) return true } // match: (MOVBstore [1] {s} (ADD ptr1 idx1) w x:(MOVBstoreidx ptr0 idx0 (UBFX [armBFAuxInt(8, 24)] w) mem)) @@ -9067,12 +8727,9 @@ func rewriteValueARM64_OpARM64MOVBstore(v *Value) bool { continue } v.reset(OpARM64MOVHstoreidx) - v.AddArg(ptr0) - v.AddArg(idx0) v0 := b.NewValue0(v.Pos, OpARM64REV16W, w.Type) v0.AddArg(w) - v.AddArg(v0) - v.AddArg(mem) + v.AddArg4(ptr0, idx0, v0, mem) return true } break @@ -9097,9 +8754,7 @@ func rewriteValueARM64_OpARM64MOVBstoreidx(v *Value) bool { mem := v_3 v.reset(OpARM64MOVBstore) v.AuxInt = c - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } // match: (MOVBstoreidx (MOVDconst [c]) idx val mem) @@ -9114,9 +8769,7 @@ func rewriteValueARM64_OpARM64MOVBstoreidx(v *Value) bool { mem := v_3 v.reset(OpARM64MOVBstore) v.AuxInt = c - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(idx, val, mem) return true } // match: (MOVBstoreidx ptr idx (MOVDconst [0]) mem) @@ -9129,9 +8782,7 @@ func rewriteValueARM64_OpARM64MOVBstoreidx(v *Value) bool { } mem := v_3 v.reset(OpARM64MOVBstorezeroidx) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } // match: (MOVBstoreidx ptr idx (MOVBreg x) mem) @@ -9145,10 +8796,7 @@ func rewriteValueARM64_OpARM64MOVBstoreidx(v *Value) bool { x := v_2.Args[0] mem := v_3 v.reset(OpARM64MOVBstoreidx) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(x) - v.AddArg(mem) + v.AddArg4(ptr, idx, x, mem) return true } // match: (MOVBstoreidx ptr idx (MOVBUreg x) mem) @@ -9162,10 +8810,7 @@ func rewriteValueARM64_OpARM64MOVBstoreidx(v *Value) bool { x := v_2.Args[0] mem := v_3 v.reset(OpARM64MOVBstoreidx) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(x) - v.AddArg(mem) + v.AddArg4(ptr, idx, x, mem) return true } // match: (MOVBstoreidx ptr idx (MOVHreg x) mem) @@ -9179,10 +8824,7 @@ func rewriteValueARM64_OpARM64MOVBstoreidx(v *Value) bool { x := v_2.Args[0] mem := v_3 v.reset(OpARM64MOVBstoreidx) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(x) - v.AddArg(mem) + v.AddArg4(ptr, idx, x, mem) return true } // match: (MOVBstoreidx ptr idx (MOVHUreg x) mem) @@ -9196,10 +8838,7 @@ func rewriteValueARM64_OpARM64MOVBstoreidx(v *Value) bool { x := v_2.Args[0] mem := v_3 v.reset(OpARM64MOVBstoreidx) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(x) - v.AddArg(mem) + v.AddArg4(ptr, idx, x, mem) return true } // match: (MOVBstoreidx ptr idx (MOVWreg x) mem) @@ -9213,10 +8852,7 @@ func rewriteValueARM64_OpARM64MOVBstoreidx(v *Value) bool { x := v_2.Args[0] mem := v_3 v.reset(OpARM64MOVBstoreidx) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(x) - v.AddArg(mem) + v.AddArg4(ptr, idx, x, mem) return true } // match: (MOVBstoreidx ptr idx (MOVWUreg x) mem) @@ -9230,10 +8866,7 @@ func rewriteValueARM64_OpARM64MOVBstoreidx(v *Value) bool { x := v_2.Args[0] mem := v_3 v.reset(OpARM64MOVBstoreidx) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(x) - v.AddArg(mem) + v.AddArg4(ptr, idx, x, mem) return true } // match: (MOVBstoreidx ptr (ADDconst [1] idx) (SRLconst [8] w) x:(MOVBstoreidx ptr idx w mem)) @@ -9258,10 +8891,7 @@ func rewriteValueARM64_OpARM64MOVBstoreidx(v *Value) bool { break } v.reset(OpARM64MOVHstoreidx) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(w) - v.AddArg(mem) + v.AddArg4(ptr, idx, w, mem) return true } // match: (MOVBstoreidx ptr (ADDconst [3] idx) w x0:(MOVBstoreidx ptr (ADDconst [2] idx) (UBFX [armBFAuxInt(8, 24)] w) x1:(MOVBstoreidx ptr (ADDconst [1] idx) (UBFX [armBFAuxInt(16, 16)] w) x2:(MOVBstoreidx ptr idx (UBFX [armBFAuxInt(24, 8)] w) mem)))) @@ -9319,12 +8949,9 @@ func rewriteValueARM64_OpARM64MOVBstoreidx(v *Value) bool { break } v.reset(OpARM64MOVWstoreidx) - v.AddArg(ptr) - v.AddArg(idx) v0 := b.NewValue0(v.Pos, OpARM64REVW, w.Type) v0.AddArg(w) - v.AddArg(v0) - v.AddArg(mem) + v.AddArg4(ptr, idx, v0, mem) return true } // match: (MOVBstoreidx ptr idx w x0:(MOVBstoreidx ptr (ADDconst [1] idx) (UBFX [armBFAuxInt(8, 24)] w) x1:(MOVBstoreidx ptr (ADDconst [2] idx) (UBFX [armBFAuxInt(16, 16)] w) x2:(MOVBstoreidx ptr (ADDconst [3] idx) (UBFX [armBFAuxInt(24, 8)] w) mem)))) @@ -9383,10 +9010,7 @@ func rewriteValueARM64_OpARM64MOVBstoreidx(v *Value) bool { break } v.reset(OpARM64MOVWstoreidx) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(w) - v.AddArg(mem) + v.AddArg4(ptr, idx, w, mem) return true } // match: (MOVBstoreidx ptr (ADDconst [1] idx) w x:(MOVBstoreidx ptr idx (UBFX [armBFAuxInt(8, 8)] w) mem)) @@ -9412,12 +9036,9 @@ func rewriteValueARM64_OpARM64MOVBstoreidx(v *Value) bool { break } v.reset(OpARM64MOVHstoreidx) - v.AddArg(ptr) - v.AddArg(idx) v0 := b.NewValue0(v.Pos, OpARM64REV16W, w.Type) v0.AddArg(w) - v.AddArg(v0) - v.AddArg(mem) + v.AddArg4(ptr, idx, v0, mem) return true } // match: (MOVBstoreidx ptr idx w x:(MOVBstoreidx ptr (ADDconst [1] idx) (UBFX [armBFAuxInt(8, 8)] w) mem)) @@ -9444,10 +9065,7 @@ func rewriteValueARM64_OpARM64MOVBstoreidx(v *Value) bool { break } v.reset(OpARM64MOVHstoreidx) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(w) - v.AddArg(mem) + v.AddArg4(ptr, idx, w, mem) return true } return false @@ -9475,8 +9093,7 @@ func rewriteValueARM64_OpARM64MOVBstorezero(v *Value) bool { v.reset(OpARM64MOVBstorezero) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVBstorezero [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem) @@ -9498,8 +9115,7 @@ func rewriteValueARM64_OpARM64MOVBstorezero(v *Value) bool { v.reset(OpARM64MOVBstorezero) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVBstorezero [off] {sym} (ADD ptr idx) mem) @@ -9518,9 +9134,7 @@ func rewriteValueARM64_OpARM64MOVBstorezero(v *Value) bool { break } v.reset(OpARM64MOVBstorezeroidx) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } // match: (MOVBstorezero [i] {s} ptr0 x:(MOVBstorezero [j] {s} ptr1 mem)) @@ -9546,8 +9160,7 @@ func rewriteValueARM64_OpARM64MOVBstorezero(v *Value) bool { v.reset(OpARM64MOVHstorezero) v.AuxInt = min(i, j) v.Aux = s - v.AddArg(ptr0) - v.AddArg(mem) + v.AddArg2(ptr0, mem) return true } // match: (MOVBstorezero [1] {s} (ADD ptr0 idx0) x:(MOVBstorezeroidx ptr1 idx1 mem)) @@ -9578,9 +9191,7 @@ func rewriteValueARM64_OpARM64MOVBstorezero(v *Value) bool { continue } v.reset(OpARM64MOVHstorezeroidx) - v.AddArg(ptr1) - v.AddArg(idx1) - v.AddArg(mem) + v.AddArg3(ptr1, idx1, mem) return true } break @@ -9602,8 +9213,7 @@ func rewriteValueARM64_OpARM64MOVBstorezeroidx(v *Value) bool { mem := v_2 v.reset(OpARM64MOVBstorezero) v.AuxInt = c - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVBstorezeroidx (MOVDconst [c]) idx mem) @@ -9617,8 +9227,7 @@ func rewriteValueARM64_OpARM64MOVBstorezeroidx(v *Value) bool { mem := v_2 v.reset(OpARM64MOVBstorezero) v.AuxInt = c - v.AddArg(idx) - v.AddArg(mem) + v.AddArg2(idx, mem) return true } // match: (MOVBstorezeroidx ptr (ADDconst [1] idx) x:(MOVBstorezeroidx ptr idx mem)) @@ -9639,9 +9248,7 @@ func rewriteValueARM64_OpARM64MOVBstorezeroidx(v *Value) bool { break } v.reset(OpARM64MOVHstorezeroidx) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } return false @@ -9660,11 +9267,10 @@ func rewriteValueARM64_OpARM64MOVDload(v *Value) bool { if v_1.Op != OpARM64FMOVDstore || v_1.AuxInt != off || v_1.Aux != sym { break } - _ = v_1.Args[2] + val := v_1.Args[1] if ptr != v_1.Args[0] { break } - val := v_1.Args[1] v.reset(OpARM64FMOVDfpgp) v.AddArg(val) return true @@ -9687,8 +9293,7 @@ func rewriteValueARM64_OpARM64MOVDload(v *Value) bool { v.reset(OpARM64MOVDload) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVDload [off] {sym} (ADD ptr idx) mem) @@ -9707,9 +9312,7 @@ func rewriteValueARM64_OpARM64MOVDload(v *Value) bool { break } v.reset(OpARM64MOVDloadidx) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } // match: (MOVDload [off] {sym} (ADDshiftLL [3] ptr idx) mem) @@ -9728,9 +9331,7 @@ func rewriteValueARM64_OpARM64MOVDload(v *Value) bool { break } v.reset(OpARM64MOVDloadidx8) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } // match: (MOVDload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem) @@ -9752,8 +9353,7 @@ func rewriteValueARM64_OpARM64MOVDload(v *Value) bool { v.reset(OpARM64MOVDload) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVDload [off] {sym} ptr (MOVDstorezero [off2] {sym2} ptr2 _)) @@ -9768,7 +9368,6 @@ func rewriteValueARM64_OpARM64MOVDload(v *Value) bool { } off2 := v_1.AuxInt sym2 := v_1.Aux - _ = v_1.Args[1] ptr2 := v_1.Args[0] if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) { break @@ -9807,8 +9406,7 @@ func rewriteValueARM64_OpARM64MOVDloadidx(v *Value) bool { mem := v_2 v.reset(OpARM64MOVDload) v.AuxInt = c - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVDloadidx (MOVDconst [c]) ptr mem) @@ -9822,8 +9420,7 @@ func rewriteValueARM64_OpARM64MOVDloadidx(v *Value) bool { mem := v_2 v.reset(OpARM64MOVDload) v.AuxInt = c - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVDloadidx ptr (SLLconst [3] idx) mem) @@ -9836,9 +9433,7 @@ func rewriteValueARM64_OpARM64MOVDloadidx(v *Value) bool { idx := v_1.Args[0] mem := v_2 v.reset(OpARM64MOVDloadidx8) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } // match: (MOVDloadidx (SLLconst [3] idx) ptr mem) @@ -9851,9 +9446,7 @@ func rewriteValueARM64_OpARM64MOVDloadidx(v *Value) bool { ptr := v_1 mem := v_2 v.reset(OpARM64MOVDloadidx8) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } // match: (MOVDloadidx ptr idx (MOVDstorezeroidx ptr2 idx2 _)) @@ -9865,9 +9458,8 @@ func rewriteValueARM64_OpARM64MOVDloadidx(v *Value) bool { if v_2.Op != OpARM64MOVDstorezeroidx { break } - _ = v_2.Args[2] - ptr2 := v_2.Args[0] idx2 := v_2.Args[1] + ptr2 := v_2.Args[0] if !(isSamePtr(ptr, ptr2) && isSamePtr(idx, idx2) || isSamePtr(ptr, idx2) && isSamePtr(idx, ptr2)) { break } @@ -9892,8 +9484,7 @@ func rewriteValueARM64_OpARM64MOVDloadidx8(v *Value) bool { mem := v_2 v.reset(OpARM64MOVDload) v.AuxInt = c << 3 - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVDloadidx8 ptr idx (MOVDstorezeroidx8 ptr2 idx2 _)) @@ -9905,9 +9496,8 @@ func rewriteValueARM64_OpARM64MOVDloadidx8(v *Value) bool { if v_2.Op != OpARM64MOVDstorezeroidx8 { break } - _ = v_2.Args[2] - ptr2 := v_2.Args[0] idx2 := v_2.Args[1] + ptr2 := v_2.Args[0] if !(isSamePtr(ptr, ptr2) && isSamePtr(idx, idx2)) { break } @@ -9964,9 +9554,7 @@ func rewriteValueARM64_OpARM64MOVDstore(v *Value) bool { v.reset(OpARM64FMOVDstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } // match: (MOVDstore [off1] {sym} (ADDconst [off2] ptr) val mem) @@ -9988,9 +9576,7 @@ func rewriteValueARM64_OpARM64MOVDstore(v *Value) bool { v.reset(OpARM64MOVDstore) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } // match: (MOVDstore [off] {sym} (ADD ptr idx) val mem) @@ -10010,10 +9596,7 @@ func rewriteValueARM64_OpARM64MOVDstore(v *Value) bool { break } v.reset(OpARM64MOVDstoreidx) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) + v.AddArg4(ptr, idx, val, mem) return true } // match: (MOVDstore [off] {sym} (ADDshiftLL [3] ptr idx) val mem) @@ -10033,10 +9616,7 @@ func rewriteValueARM64_OpARM64MOVDstore(v *Value) bool { break } v.reset(OpARM64MOVDstoreidx8) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) + v.AddArg4(ptr, idx, val, mem) return true } // match: (MOVDstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem) @@ -10059,9 +9639,7 @@ func rewriteValueARM64_OpARM64MOVDstore(v *Value) bool { v.reset(OpARM64MOVDstore) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } // match: (MOVDstore [off] {sym} ptr (MOVDconst [0]) mem) @@ -10077,8 +9655,7 @@ func rewriteValueARM64_OpARM64MOVDstore(v *Value) bool { v.reset(OpARM64MOVDstorezero) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } return false @@ -10100,9 +9677,7 @@ func rewriteValueARM64_OpARM64MOVDstoreidx(v *Value) bool { mem := v_3 v.reset(OpARM64MOVDstore) v.AuxInt = c - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } // match: (MOVDstoreidx (MOVDconst [c]) idx val mem) @@ -10117,9 +9692,7 @@ func rewriteValueARM64_OpARM64MOVDstoreidx(v *Value) bool { mem := v_3 v.reset(OpARM64MOVDstore) v.AuxInt = c - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(idx, val, mem) return true } // match: (MOVDstoreidx ptr (SLLconst [3] idx) val mem) @@ -10133,10 +9706,7 @@ func rewriteValueARM64_OpARM64MOVDstoreidx(v *Value) bool { val := v_2 mem := v_3 v.reset(OpARM64MOVDstoreidx8) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) + v.AddArg4(ptr, idx, val, mem) return true } // match: (MOVDstoreidx (SLLconst [3] idx) ptr val mem) @@ -10150,10 +9720,7 @@ func rewriteValueARM64_OpARM64MOVDstoreidx(v *Value) bool { val := v_2 mem := v_3 v.reset(OpARM64MOVDstoreidx8) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) + v.AddArg4(ptr, idx, val, mem) return true } // match: (MOVDstoreidx ptr idx (MOVDconst [0]) mem) @@ -10166,9 +9733,7 @@ func rewriteValueARM64_OpARM64MOVDstoreidx(v *Value) bool { } mem := v_3 v.reset(OpARM64MOVDstorezeroidx) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } return false @@ -10190,9 +9755,7 @@ func rewriteValueARM64_OpARM64MOVDstoreidx8(v *Value) bool { mem := v_3 v.reset(OpARM64MOVDstore) v.AuxInt = c << 3 - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } // match: (MOVDstoreidx8 ptr idx (MOVDconst [0]) mem) @@ -10205,9 +9768,7 @@ func rewriteValueARM64_OpARM64MOVDstoreidx8(v *Value) bool { } mem := v_3 v.reset(OpARM64MOVDstorezeroidx8) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } return false @@ -10235,8 +9796,7 @@ func rewriteValueARM64_OpARM64MOVDstorezero(v *Value) bool { v.reset(OpARM64MOVDstorezero) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVDstorezero [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem) @@ -10258,8 +9818,7 @@ func rewriteValueARM64_OpARM64MOVDstorezero(v *Value) bool { v.reset(OpARM64MOVDstorezero) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVDstorezero [off] {sym} (ADD ptr idx) mem) @@ -10278,9 +9837,7 @@ func rewriteValueARM64_OpARM64MOVDstorezero(v *Value) bool { break } v.reset(OpARM64MOVDstorezeroidx) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } // match: (MOVDstorezero [off] {sym} (ADDshiftLL [3] ptr idx) mem) @@ -10299,9 +9856,7 @@ func rewriteValueARM64_OpARM64MOVDstorezero(v *Value) bool { break } v.reset(OpARM64MOVDstorezeroidx8) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } // match: (MOVDstorezero [i] {s} ptr0 x:(MOVDstorezero [j] {s} ptr1 mem)) @@ -10327,8 +9882,7 @@ func rewriteValueARM64_OpARM64MOVDstorezero(v *Value) bool { v.reset(OpARM64MOVQstorezero) v.AuxInt = min(i, j) v.Aux = s - v.AddArg(ptr0) - v.AddArg(mem) + v.AddArg2(ptr0, mem) return true } // match: (MOVDstorezero [8] {s} p0:(ADD ptr0 idx0) x:(MOVDstorezeroidx ptr1 idx1 mem)) @@ -10362,8 +9916,7 @@ func rewriteValueARM64_OpARM64MOVDstorezero(v *Value) bool { v.reset(OpARM64MOVQstorezero) v.AuxInt = 0 v.Aux = s - v.AddArg(p0) - v.AddArg(mem) + v.AddArg2(p0, mem) return true } break @@ -10395,8 +9948,7 @@ func rewriteValueARM64_OpARM64MOVDstorezero(v *Value) bool { v.reset(OpARM64MOVQstorezero) v.AuxInt = 0 v.Aux = s - v.AddArg(p0) - v.AddArg(mem) + v.AddArg2(p0, mem) return true } return false @@ -10416,8 +9968,7 @@ func rewriteValueARM64_OpARM64MOVDstorezeroidx(v *Value) bool { mem := v_2 v.reset(OpARM64MOVDstorezero) v.AuxInt = c - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVDstorezeroidx (MOVDconst [c]) idx mem) @@ -10431,8 +9982,7 @@ func rewriteValueARM64_OpARM64MOVDstorezeroidx(v *Value) bool { mem := v_2 v.reset(OpARM64MOVDstorezero) v.AuxInt = c - v.AddArg(idx) - v.AddArg(mem) + v.AddArg2(idx, mem) return true } // match: (MOVDstorezeroidx ptr (SLLconst [3] idx) mem) @@ -10445,9 +9995,7 @@ func rewriteValueARM64_OpARM64MOVDstorezeroidx(v *Value) bool { idx := v_1.Args[0] mem := v_2 v.reset(OpARM64MOVDstorezeroidx8) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } // match: (MOVDstorezeroidx (SLLconst [3] idx) ptr mem) @@ -10460,9 +10008,7 @@ func rewriteValueARM64_OpARM64MOVDstorezeroidx(v *Value) bool { ptr := v_1 mem := v_2 v.reset(OpARM64MOVDstorezeroidx8) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } return false @@ -10482,8 +10028,7 @@ func rewriteValueARM64_OpARM64MOVDstorezeroidx8(v *Value) bool { mem := v_2 v.reset(OpARM64MOVDstorezero) v.AuxInt = c << 3 - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } return false @@ -10511,8 +10056,7 @@ func rewriteValueARM64_OpARM64MOVHUload(v *Value) bool { v.reset(OpARM64MOVHUload) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVHUload [off] {sym} (ADD ptr idx) mem) @@ -10531,9 +10075,7 @@ func rewriteValueARM64_OpARM64MOVHUload(v *Value) bool { break } v.reset(OpARM64MOVHUloadidx) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } // match: (MOVHUload [off] {sym} (ADDshiftLL [1] ptr idx) mem) @@ -10552,9 +10094,7 @@ func rewriteValueARM64_OpARM64MOVHUload(v *Value) bool { break } v.reset(OpARM64MOVHUloadidx2) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } // match: (MOVHUload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem) @@ -10576,8 +10116,7 @@ func rewriteValueARM64_OpARM64MOVHUload(v *Value) bool { v.reset(OpARM64MOVHUload) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVHUload [off] {sym} ptr (MOVHstorezero [off2] {sym2} ptr2 _)) @@ -10592,7 +10131,6 @@ func rewriteValueARM64_OpARM64MOVHUload(v *Value) bool { } off2 := v_1.AuxInt sym2 := v_1.Aux - _ = v_1.Args[1] ptr2 := v_1.Args[0] if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) { break @@ -10631,8 +10169,7 @@ func rewriteValueARM64_OpARM64MOVHUloadidx(v *Value) bool { mem := v_2 v.reset(OpARM64MOVHUload) v.AuxInt = c - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVHUloadidx (MOVDconst [c]) ptr mem) @@ -10646,8 +10183,7 @@ func rewriteValueARM64_OpARM64MOVHUloadidx(v *Value) bool { mem := v_2 v.reset(OpARM64MOVHUload) v.AuxInt = c - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVHUloadidx ptr (SLLconst [1] idx) mem) @@ -10660,9 +10196,7 @@ func rewriteValueARM64_OpARM64MOVHUloadidx(v *Value) bool { idx := v_1.Args[0] mem := v_2 v.reset(OpARM64MOVHUloadidx2) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } // match: (MOVHUloadidx ptr (ADD idx idx) mem) @@ -10678,9 +10212,7 @@ func rewriteValueARM64_OpARM64MOVHUloadidx(v *Value) bool { } mem := v_2 v.reset(OpARM64MOVHUloadidx2) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } // match: (MOVHUloadidx (ADD idx idx) ptr mem) @@ -10696,9 +10228,7 @@ func rewriteValueARM64_OpARM64MOVHUloadidx(v *Value) bool { ptr := v_1 mem := v_2 v.reset(OpARM64MOVHUloadidx2) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } // match: (MOVHUloadidx ptr idx (MOVHstorezeroidx ptr2 idx2 _)) @@ -10710,9 +10240,8 @@ func rewriteValueARM64_OpARM64MOVHUloadidx(v *Value) bool { if v_2.Op != OpARM64MOVHstorezeroidx { break } - _ = v_2.Args[2] - ptr2 := v_2.Args[0] idx2 := v_2.Args[1] + ptr2 := v_2.Args[0] if !(isSamePtr(ptr, ptr2) && isSamePtr(idx, idx2) || isSamePtr(ptr, idx2) && isSamePtr(idx, ptr2)) { break } @@ -10737,8 +10266,7 @@ func rewriteValueARM64_OpARM64MOVHUloadidx2(v *Value) bool { mem := v_2 v.reset(OpARM64MOVHUload) v.AuxInt = c << 1 - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVHUloadidx2 ptr idx (MOVHstorezeroidx2 ptr2 idx2 _)) @@ -10750,9 +10278,8 @@ func rewriteValueARM64_OpARM64MOVHUloadidx2(v *Value) bool { if v_2.Op != OpARM64MOVHstorezeroidx2 { break } - _ = v_2.Args[2] - ptr2 := v_2.Args[0] idx2 := v_2.Args[1] + ptr2 := v_2.Args[0] if !(isSamePtr(ptr, ptr2) && isSamePtr(idx, idx2)) { break } @@ -10771,7 +10298,6 @@ func rewriteValueARM64_OpARM64MOVHUreg(v *Value) bool { if x.Op != OpARM64MOVBUload { break } - _ = x.Args[1] v.reset(OpARM64MOVDreg) v.AddArg(x) return true @@ -10783,7 +10309,6 @@ func rewriteValueARM64_OpARM64MOVHUreg(v *Value) bool { if x.Op != OpARM64MOVHUload { break } - _ = x.Args[1] v.reset(OpARM64MOVDreg) v.AddArg(x) return true @@ -10795,7 +10320,6 @@ func rewriteValueARM64_OpARM64MOVHUreg(v *Value) bool { if x.Op != OpARM64MOVBUloadidx { break } - _ = x.Args[2] v.reset(OpARM64MOVDreg) v.AddArg(x) return true @@ -10807,7 +10331,6 @@ func rewriteValueARM64_OpARM64MOVHUreg(v *Value) bool { if x.Op != OpARM64MOVHUloadidx { break } - _ = x.Args[2] v.reset(OpARM64MOVDreg) v.AddArg(x) return true @@ -10819,7 +10342,6 @@ func rewriteValueARM64_OpARM64MOVHUreg(v *Value) bool { if x.Op != OpARM64MOVHUloadidx2 { break } - _ = x.Args[2] v.reset(OpARM64MOVDreg) v.AddArg(x) return true @@ -10929,8 +10451,7 @@ func rewriteValueARM64_OpARM64MOVHload(v *Value) bool { v.reset(OpARM64MOVHload) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVHload [off] {sym} (ADD ptr idx) mem) @@ -10949,9 +10470,7 @@ func rewriteValueARM64_OpARM64MOVHload(v *Value) bool { break } v.reset(OpARM64MOVHloadidx) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } // match: (MOVHload [off] {sym} (ADDshiftLL [1] ptr idx) mem) @@ -10970,9 +10489,7 @@ func rewriteValueARM64_OpARM64MOVHload(v *Value) bool { break } v.reset(OpARM64MOVHloadidx2) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } // match: (MOVHload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem) @@ -10994,8 +10511,7 @@ func rewriteValueARM64_OpARM64MOVHload(v *Value) bool { v.reset(OpARM64MOVHload) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVHload [off] {sym} ptr (MOVHstorezero [off2] {sym2} ptr2 _)) @@ -11010,7 +10526,6 @@ func rewriteValueARM64_OpARM64MOVHload(v *Value) bool { } off2 := v_1.AuxInt sym2 := v_1.Aux - _ = v_1.Args[1] ptr2 := v_1.Args[0] if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) { break @@ -11036,8 +10551,7 @@ func rewriteValueARM64_OpARM64MOVHloadidx(v *Value) bool { mem := v_2 v.reset(OpARM64MOVHload) v.AuxInt = c - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVHloadidx (MOVDconst [c]) ptr mem) @@ -11051,8 +10565,7 @@ func rewriteValueARM64_OpARM64MOVHloadidx(v *Value) bool { mem := v_2 v.reset(OpARM64MOVHload) v.AuxInt = c - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVHloadidx ptr (SLLconst [1] idx) mem) @@ -11065,9 +10578,7 @@ func rewriteValueARM64_OpARM64MOVHloadidx(v *Value) bool { idx := v_1.Args[0] mem := v_2 v.reset(OpARM64MOVHloadidx2) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } // match: (MOVHloadidx ptr (ADD idx idx) mem) @@ -11083,9 +10594,7 @@ func rewriteValueARM64_OpARM64MOVHloadidx(v *Value) bool { } mem := v_2 v.reset(OpARM64MOVHloadidx2) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } // match: (MOVHloadidx (ADD idx idx) ptr mem) @@ -11101,9 +10610,7 @@ func rewriteValueARM64_OpARM64MOVHloadidx(v *Value) bool { ptr := v_1 mem := v_2 v.reset(OpARM64MOVHloadidx2) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } // match: (MOVHloadidx ptr idx (MOVHstorezeroidx ptr2 idx2 _)) @@ -11115,9 +10622,8 @@ func rewriteValueARM64_OpARM64MOVHloadidx(v *Value) bool { if v_2.Op != OpARM64MOVHstorezeroidx { break } - _ = v_2.Args[2] - ptr2 := v_2.Args[0] idx2 := v_2.Args[1] + ptr2 := v_2.Args[0] if !(isSamePtr(ptr, ptr2) && isSamePtr(idx, idx2) || isSamePtr(ptr, idx2) && isSamePtr(idx, ptr2)) { break } @@ -11142,8 +10648,7 @@ func rewriteValueARM64_OpARM64MOVHloadidx2(v *Value) bool { mem := v_2 v.reset(OpARM64MOVHload) v.AuxInt = c << 1 - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVHloadidx2 ptr idx (MOVHstorezeroidx2 ptr2 idx2 _)) @@ -11155,9 +10660,8 @@ func rewriteValueARM64_OpARM64MOVHloadidx2(v *Value) bool { if v_2.Op != OpARM64MOVHstorezeroidx2 { break } - _ = v_2.Args[2] - ptr2 := v_2.Args[0] idx2 := v_2.Args[1] + ptr2 := v_2.Args[0] if !(isSamePtr(ptr, ptr2) && isSamePtr(idx, idx2)) { break } @@ -11176,7 +10680,6 @@ func rewriteValueARM64_OpARM64MOVHreg(v *Value) bool { if x.Op != OpARM64MOVBload { break } - _ = x.Args[1] v.reset(OpARM64MOVDreg) v.AddArg(x) return true @@ -11188,7 +10691,6 @@ func rewriteValueARM64_OpARM64MOVHreg(v *Value) bool { if x.Op != OpARM64MOVBUload { break } - _ = x.Args[1] v.reset(OpARM64MOVDreg) v.AddArg(x) return true @@ -11200,7 +10702,6 @@ func rewriteValueARM64_OpARM64MOVHreg(v *Value) bool { if x.Op != OpARM64MOVHload { break } - _ = x.Args[1] v.reset(OpARM64MOVDreg) v.AddArg(x) return true @@ -11212,7 +10713,6 @@ func rewriteValueARM64_OpARM64MOVHreg(v *Value) bool { if x.Op != OpARM64MOVBloadidx { break } - _ = x.Args[2] v.reset(OpARM64MOVDreg) v.AddArg(x) return true @@ -11224,7 +10724,6 @@ func rewriteValueARM64_OpARM64MOVHreg(v *Value) bool { if x.Op != OpARM64MOVBUloadidx { break } - _ = x.Args[2] v.reset(OpARM64MOVDreg) v.AddArg(x) return true @@ -11236,7 +10735,6 @@ func rewriteValueARM64_OpARM64MOVHreg(v *Value) bool { if x.Op != OpARM64MOVHloadidx { break } - _ = x.Args[2] v.reset(OpARM64MOVDreg) v.AddArg(x) return true @@ -11248,7 +10746,6 @@ func rewriteValueARM64_OpARM64MOVHreg(v *Value) bool { if x.Op != OpARM64MOVHloadidx2 { break } - _ = x.Args[2] v.reset(OpARM64MOVDreg) v.AddArg(x) return true @@ -11341,9 +10838,7 @@ func rewriteValueARM64_OpARM64MOVHstore(v *Value) bool { v.reset(OpARM64MOVHstore) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } // match: (MOVHstore [off] {sym} (ADD ptr idx) val mem) @@ -11363,10 +10858,7 @@ func rewriteValueARM64_OpARM64MOVHstore(v *Value) bool { break } v.reset(OpARM64MOVHstoreidx) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) + v.AddArg4(ptr, idx, val, mem) return true } // match: (MOVHstore [off] {sym} (ADDshiftLL [1] ptr idx) val mem) @@ -11386,10 +10878,7 @@ func rewriteValueARM64_OpARM64MOVHstore(v *Value) bool { break } v.reset(OpARM64MOVHstoreidx2) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) + v.AddArg4(ptr, idx, val, mem) return true } // match: (MOVHstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem) @@ -11412,9 +10901,7 @@ func rewriteValueARM64_OpARM64MOVHstore(v *Value) bool { v.reset(OpARM64MOVHstore) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } // match: (MOVHstore [off] {sym} ptr (MOVDconst [0]) mem) @@ -11430,8 +10917,7 @@ func rewriteValueARM64_OpARM64MOVHstore(v *Value) bool { v.reset(OpARM64MOVHstorezero) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVHstore [off] {sym} ptr (MOVHreg x) mem) @@ -11448,9 +10934,7 @@ func rewriteValueARM64_OpARM64MOVHstore(v *Value) bool { v.reset(OpARM64MOVHstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(x) - v.AddArg(mem) + v.AddArg3(ptr, x, mem) return true } // match: (MOVHstore [off] {sym} ptr (MOVHUreg x) mem) @@ -11467,9 +10951,7 @@ func rewriteValueARM64_OpARM64MOVHstore(v *Value) bool { v.reset(OpARM64MOVHstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(x) - v.AddArg(mem) + v.AddArg3(ptr, x, mem) return true } // match: (MOVHstore [off] {sym} ptr (MOVWreg x) mem) @@ -11486,9 +10968,7 @@ func rewriteValueARM64_OpARM64MOVHstore(v *Value) bool { v.reset(OpARM64MOVHstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(x) - v.AddArg(mem) + v.AddArg3(ptr, x, mem) return true } // match: (MOVHstore [off] {sym} ptr (MOVWUreg x) mem) @@ -11505,9 +10985,7 @@ func rewriteValueARM64_OpARM64MOVHstore(v *Value) bool { v.reset(OpARM64MOVHstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(x) - v.AddArg(mem) + v.AddArg3(ptr, x, mem) return true } // match: (MOVHstore [i] {s} ptr0 (SRLconst [16] w) x:(MOVHstore [i-2] {s} ptr1 w mem)) @@ -11533,9 +11011,7 @@ func rewriteValueARM64_OpARM64MOVHstore(v *Value) bool { v.reset(OpARM64MOVWstore) v.AuxInt = i - 2 v.Aux = s - v.AddArg(ptr0) - v.AddArg(w) - v.AddArg(mem) + v.AddArg3(ptr0, w, mem) return true } // match: (MOVHstore [2] {s} (ADD ptr0 idx0) (SRLconst [16] w) x:(MOVHstoreidx ptr1 idx1 w mem)) @@ -11570,10 +11046,7 @@ func rewriteValueARM64_OpARM64MOVHstore(v *Value) bool { continue } v.reset(OpARM64MOVWstoreidx) - v.AddArg(ptr1) - v.AddArg(idx1) - v.AddArg(w) - v.AddArg(mem) + v.AddArg4(ptr1, idx1, w, mem) return true } break @@ -11606,13 +11079,10 @@ func rewriteValueARM64_OpARM64MOVHstore(v *Value) bool { break } v.reset(OpARM64MOVWstoreidx) - v.AddArg(ptr1) v0 := b.NewValue0(v.Pos, OpARM64SLLconst, idx1.Type) v0.AuxInt = 1 v0.AddArg(idx1) - v.AddArg(v0) - v.AddArg(w) - v.AddArg(mem) + v.AddArg4(ptr1, v0, w, mem) return true } // match: (MOVHstore [i] {s} ptr0 (UBFX [armBFAuxInt(16, 16)] w) x:(MOVHstore [i-2] {s} ptr1 w mem)) @@ -11638,9 +11108,7 @@ func rewriteValueARM64_OpARM64MOVHstore(v *Value) bool { v.reset(OpARM64MOVWstore) v.AuxInt = i - 2 v.Aux = s - v.AddArg(ptr0) - v.AddArg(w) - v.AddArg(mem) + v.AddArg3(ptr0, w, mem) return true } // match: (MOVHstore [2] {s} (ADD ptr0 idx0) (UBFX [armBFAuxInt(16, 16)] w) x:(MOVHstoreidx ptr1 idx1 w mem)) @@ -11675,10 +11143,7 @@ func rewriteValueARM64_OpARM64MOVHstore(v *Value) bool { continue } v.reset(OpARM64MOVWstoreidx) - v.AddArg(ptr1) - v.AddArg(idx1) - v.AddArg(w) - v.AddArg(mem) + v.AddArg4(ptr1, idx1, w, mem) return true } break @@ -11711,13 +11176,10 @@ func rewriteValueARM64_OpARM64MOVHstore(v *Value) bool { break } v.reset(OpARM64MOVWstoreidx) - v.AddArg(ptr1) v0 := b.NewValue0(v.Pos, OpARM64SLLconst, idx1.Type) v0.AuxInt = 1 v0.AddArg(idx1) - v.AddArg(v0) - v.AddArg(w) - v.AddArg(mem) + v.AddArg4(ptr1, v0, w, mem) return true } // match: (MOVHstore [i] {s} ptr0 (SRLconst [16] (MOVDreg w)) x:(MOVHstore [i-2] {s} ptr1 w mem)) @@ -11747,9 +11209,7 @@ func rewriteValueARM64_OpARM64MOVHstore(v *Value) bool { v.reset(OpARM64MOVWstore) v.AuxInt = i - 2 v.Aux = s - v.AddArg(ptr0) - v.AddArg(w) - v.AddArg(mem) + v.AddArg3(ptr0, w, mem) return true } // match: (MOVHstore [2] {s} (ADD ptr0 idx0) (SRLconst [16] (MOVDreg w)) x:(MOVHstoreidx ptr1 idx1 w mem)) @@ -11788,10 +11248,7 @@ func rewriteValueARM64_OpARM64MOVHstore(v *Value) bool { continue } v.reset(OpARM64MOVWstoreidx) - v.AddArg(ptr1) - v.AddArg(idx1) - v.AddArg(w) - v.AddArg(mem) + v.AddArg4(ptr1, idx1, w, mem) return true } break @@ -11828,13 +11285,10 @@ func rewriteValueARM64_OpARM64MOVHstore(v *Value) bool { break } v.reset(OpARM64MOVWstoreidx) - v.AddArg(ptr1) v0 := b.NewValue0(v.Pos, OpARM64SLLconst, idx1.Type) v0.AuxInt = 1 v0.AddArg(idx1) - v.AddArg(v0) - v.AddArg(w) - v.AddArg(mem) + v.AddArg4(ptr1, v0, w, mem) return true } // match: (MOVHstore [i] {s} ptr0 (SRLconst [j] w) x:(MOVHstore [i-2] {s} ptr1 w0:(SRLconst [j-16] w) mem)) @@ -11862,9 +11316,7 @@ func rewriteValueARM64_OpARM64MOVHstore(v *Value) bool { v.reset(OpARM64MOVWstore) v.AuxInt = i - 2 v.Aux = s - v.AddArg(ptr0) - v.AddArg(w0) - v.AddArg(mem) + v.AddArg3(ptr0, w0, mem) return true } // match: (MOVHstore [2] {s} (ADD ptr0 idx0) (SRLconst [j] w) x:(MOVHstoreidx ptr1 idx1 w0:(SRLconst [j-16] w) mem)) @@ -11901,10 +11353,7 @@ func rewriteValueARM64_OpARM64MOVHstore(v *Value) bool { continue } v.reset(OpARM64MOVWstoreidx) - v.AddArg(ptr1) - v.AddArg(idx1) - v.AddArg(w0) - v.AddArg(mem) + v.AddArg4(ptr1, idx1, w0, mem) return true } break @@ -11939,13 +11388,10 @@ func rewriteValueARM64_OpARM64MOVHstore(v *Value) bool { break } v.reset(OpARM64MOVWstoreidx) - v.AddArg(ptr1) v0 := b.NewValue0(v.Pos, OpARM64SLLconst, idx1.Type) v0.AuxInt = 1 v0.AddArg(idx1) - v.AddArg(v0) - v.AddArg(w0) - v.AddArg(mem) + v.AddArg4(ptr1, v0, w0, mem) return true } return false @@ -11967,9 +11413,7 @@ func rewriteValueARM64_OpARM64MOVHstoreidx(v *Value) bool { mem := v_3 v.reset(OpARM64MOVHstore) v.AuxInt = c - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } // match: (MOVHstoreidx (MOVDconst [c]) idx val mem) @@ -11984,9 +11428,7 @@ func rewriteValueARM64_OpARM64MOVHstoreidx(v *Value) bool { mem := v_3 v.reset(OpARM64MOVHstore) v.AuxInt = c - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(idx, val, mem) return true } // match: (MOVHstoreidx ptr (SLLconst [1] idx) val mem) @@ -12000,10 +11442,7 @@ func rewriteValueARM64_OpARM64MOVHstoreidx(v *Value) bool { val := v_2 mem := v_3 v.reset(OpARM64MOVHstoreidx2) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) + v.AddArg4(ptr, idx, val, mem) return true } // match: (MOVHstoreidx ptr (ADD idx idx) val mem) @@ -12020,10 +11459,7 @@ func rewriteValueARM64_OpARM64MOVHstoreidx(v *Value) bool { val := v_2 mem := v_3 v.reset(OpARM64MOVHstoreidx2) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) + v.AddArg4(ptr, idx, val, mem) return true } // match: (MOVHstoreidx (SLLconst [1] idx) ptr val mem) @@ -12037,10 +11473,7 @@ func rewriteValueARM64_OpARM64MOVHstoreidx(v *Value) bool { val := v_2 mem := v_3 v.reset(OpARM64MOVHstoreidx2) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) + v.AddArg4(ptr, idx, val, mem) return true } // match: (MOVHstoreidx (ADD idx idx) ptr val mem) @@ -12057,10 +11490,7 @@ func rewriteValueARM64_OpARM64MOVHstoreidx(v *Value) bool { val := v_2 mem := v_3 v.reset(OpARM64MOVHstoreidx2) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) + v.AddArg4(ptr, idx, val, mem) return true } // match: (MOVHstoreidx ptr idx (MOVDconst [0]) mem) @@ -12073,9 +11503,7 @@ func rewriteValueARM64_OpARM64MOVHstoreidx(v *Value) bool { } mem := v_3 v.reset(OpARM64MOVHstorezeroidx) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } // match: (MOVHstoreidx ptr idx (MOVHreg x) mem) @@ -12089,10 +11517,7 @@ func rewriteValueARM64_OpARM64MOVHstoreidx(v *Value) bool { x := v_2.Args[0] mem := v_3 v.reset(OpARM64MOVHstoreidx) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(x) - v.AddArg(mem) + v.AddArg4(ptr, idx, x, mem) return true } // match: (MOVHstoreidx ptr idx (MOVHUreg x) mem) @@ -12106,10 +11531,7 @@ func rewriteValueARM64_OpARM64MOVHstoreidx(v *Value) bool { x := v_2.Args[0] mem := v_3 v.reset(OpARM64MOVHstoreidx) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(x) - v.AddArg(mem) + v.AddArg4(ptr, idx, x, mem) return true } // match: (MOVHstoreidx ptr idx (MOVWreg x) mem) @@ -12123,10 +11545,7 @@ func rewriteValueARM64_OpARM64MOVHstoreidx(v *Value) bool { x := v_2.Args[0] mem := v_3 v.reset(OpARM64MOVHstoreidx) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(x) - v.AddArg(mem) + v.AddArg4(ptr, idx, x, mem) return true } // match: (MOVHstoreidx ptr idx (MOVWUreg x) mem) @@ -12140,10 +11559,7 @@ func rewriteValueARM64_OpARM64MOVHstoreidx(v *Value) bool { x := v_2.Args[0] mem := v_3 v.reset(OpARM64MOVHstoreidx) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(x) - v.AddArg(mem) + v.AddArg4(ptr, idx, x, mem) return true } // match: (MOVHstoreidx ptr (ADDconst [2] idx) (SRLconst [16] w) x:(MOVHstoreidx ptr idx w mem)) @@ -12168,10 +11584,7 @@ func rewriteValueARM64_OpARM64MOVHstoreidx(v *Value) bool { break } v.reset(OpARM64MOVWstoreidx) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(w) - v.AddArg(mem) + v.AddArg4(ptr, idx, w, mem) return true } return false @@ -12193,9 +11606,7 @@ func rewriteValueARM64_OpARM64MOVHstoreidx2(v *Value) bool { mem := v_3 v.reset(OpARM64MOVHstore) v.AuxInt = c << 1 - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } // match: (MOVHstoreidx2 ptr idx (MOVDconst [0]) mem) @@ -12208,9 +11619,7 @@ func rewriteValueARM64_OpARM64MOVHstoreidx2(v *Value) bool { } mem := v_3 v.reset(OpARM64MOVHstorezeroidx2) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } // match: (MOVHstoreidx2 ptr idx (MOVHreg x) mem) @@ -12224,10 +11633,7 @@ func rewriteValueARM64_OpARM64MOVHstoreidx2(v *Value) bool { x := v_2.Args[0] mem := v_3 v.reset(OpARM64MOVHstoreidx2) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(x) - v.AddArg(mem) + v.AddArg4(ptr, idx, x, mem) return true } // match: (MOVHstoreidx2 ptr idx (MOVHUreg x) mem) @@ -12241,10 +11647,7 @@ func rewriteValueARM64_OpARM64MOVHstoreidx2(v *Value) bool { x := v_2.Args[0] mem := v_3 v.reset(OpARM64MOVHstoreidx2) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(x) - v.AddArg(mem) + v.AddArg4(ptr, idx, x, mem) return true } // match: (MOVHstoreidx2 ptr idx (MOVWreg x) mem) @@ -12258,10 +11661,7 @@ func rewriteValueARM64_OpARM64MOVHstoreidx2(v *Value) bool { x := v_2.Args[0] mem := v_3 v.reset(OpARM64MOVHstoreidx2) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(x) - v.AddArg(mem) + v.AddArg4(ptr, idx, x, mem) return true } // match: (MOVHstoreidx2 ptr idx (MOVWUreg x) mem) @@ -12275,10 +11675,7 @@ func rewriteValueARM64_OpARM64MOVHstoreidx2(v *Value) bool { x := v_2.Args[0] mem := v_3 v.reset(OpARM64MOVHstoreidx2) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(x) - v.AddArg(mem) + v.AddArg4(ptr, idx, x, mem) return true } return false @@ -12306,8 +11703,7 @@ func rewriteValueARM64_OpARM64MOVHstorezero(v *Value) bool { v.reset(OpARM64MOVHstorezero) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVHstorezero [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem) @@ -12329,8 +11725,7 @@ func rewriteValueARM64_OpARM64MOVHstorezero(v *Value) bool { v.reset(OpARM64MOVHstorezero) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVHstorezero [off] {sym} (ADD ptr idx) mem) @@ -12349,9 +11744,7 @@ func rewriteValueARM64_OpARM64MOVHstorezero(v *Value) bool { break } v.reset(OpARM64MOVHstorezeroidx) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } // match: (MOVHstorezero [off] {sym} (ADDshiftLL [1] ptr idx) mem) @@ -12370,9 +11763,7 @@ func rewriteValueARM64_OpARM64MOVHstorezero(v *Value) bool { break } v.reset(OpARM64MOVHstorezeroidx2) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } // match: (MOVHstorezero [i] {s} ptr0 x:(MOVHstorezero [j] {s} ptr1 mem)) @@ -12398,8 +11789,7 @@ func rewriteValueARM64_OpARM64MOVHstorezero(v *Value) bool { v.reset(OpARM64MOVWstorezero) v.AuxInt = min(i, j) v.Aux = s - v.AddArg(ptr0) - v.AddArg(mem) + v.AddArg2(ptr0, mem) return true } // match: (MOVHstorezero [2] {s} (ADD ptr0 idx0) x:(MOVHstorezeroidx ptr1 idx1 mem)) @@ -12430,9 +11820,7 @@ func rewriteValueARM64_OpARM64MOVHstorezero(v *Value) bool { continue } v.reset(OpARM64MOVWstorezeroidx) - v.AddArg(ptr1) - v.AddArg(idx1) - v.AddArg(mem) + v.AddArg3(ptr1, idx1, mem) return true } break @@ -12461,12 +11849,10 @@ func rewriteValueARM64_OpARM64MOVHstorezero(v *Value) bool { break } v.reset(OpARM64MOVWstorezeroidx) - v.AddArg(ptr1) v0 := b.NewValue0(v.Pos, OpARM64SLLconst, idx1.Type) v0.AuxInt = 1 v0.AddArg(idx1) - v.AddArg(v0) - v.AddArg(mem) + v.AddArg3(ptr1, v0, mem) return true } return false @@ -12486,8 +11872,7 @@ func rewriteValueARM64_OpARM64MOVHstorezeroidx(v *Value) bool { mem := v_2 v.reset(OpARM64MOVHstorezero) v.AuxInt = c - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVHstorezeroidx (MOVDconst [c]) idx mem) @@ -12501,8 +11886,7 @@ func rewriteValueARM64_OpARM64MOVHstorezeroidx(v *Value) bool { mem := v_2 v.reset(OpARM64MOVHstorezero) v.AuxInt = c - v.AddArg(idx) - v.AddArg(mem) + v.AddArg2(idx, mem) return true } // match: (MOVHstorezeroidx ptr (SLLconst [1] idx) mem) @@ -12515,9 +11899,7 @@ func rewriteValueARM64_OpARM64MOVHstorezeroidx(v *Value) bool { idx := v_1.Args[0] mem := v_2 v.reset(OpARM64MOVHstorezeroidx2) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } // match: (MOVHstorezeroidx ptr (ADD idx idx) mem) @@ -12533,9 +11915,7 @@ func rewriteValueARM64_OpARM64MOVHstorezeroidx(v *Value) bool { } mem := v_2 v.reset(OpARM64MOVHstorezeroidx2) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } // match: (MOVHstorezeroidx (SLLconst [1] idx) ptr mem) @@ -12548,9 +11928,7 @@ func rewriteValueARM64_OpARM64MOVHstorezeroidx(v *Value) bool { ptr := v_1 mem := v_2 v.reset(OpARM64MOVHstorezeroidx2) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } // match: (MOVHstorezeroidx (ADD idx idx) ptr mem) @@ -12566,9 +11944,7 @@ func rewriteValueARM64_OpARM64MOVHstorezeroidx(v *Value) bool { ptr := v_1 mem := v_2 v.reset(OpARM64MOVHstorezeroidx2) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } // match: (MOVHstorezeroidx ptr (ADDconst [2] idx) x:(MOVHstorezeroidx ptr idx mem)) @@ -12589,9 +11965,7 @@ func rewriteValueARM64_OpARM64MOVHstorezeroidx(v *Value) bool { break } v.reset(OpARM64MOVWstorezeroidx) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } return false @@ -12611,8 +11985,7 @@ func rewriteValueARM64_OpARM64MOVHstorezeroidx2(v *Value) bool { mem := v_2 v.reset(OpARM64MOVHstorezero) v.AuxInt = c << 1 - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } return false @@ -12640,8 +12013,7 @@ func rewriteValueARM64_OpARM64MOVQstorezero(v *Value) bool { v.reset(OpARM64MOVQstorezero) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVQstorezero [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem) @@ -12663,8 +12035,7 @@ func rewriteValueARM64_OpARM64MOVQstorezero(v *Value) bool { v.reset(OpARM64MOVQstorezero) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } return false @@ -12683,11 +12054,10 @@ func rewriteValueARM64_OpARM64MOVWUload(v *Value) bool { if v_1.Op != OpARM64FMOVSstore || v_1.AuxInt != off || v_1.Aux != sym { break } - _ = v_1.Args[2] + val := v_1.Args[1] if ptr != v_1.Args[0] { break } - val := v_1.Args[1] v.reset(OpARM64FMOVSfpgp) v.AddArg(val) return true @@ -12710,8 +12080,7 @@ func rewriteValueARM64_OpARM64MOVWUload(v *Value) bool { v.reset(OpARM64MOVWUload) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVWUload [off] {sym} (ADD ptr idx) mem) @@ -12730,9 +12099,7 @@ func rewriteValueARM64_OpARM64MOVWUload(v *Value) bool { break } v.reset(OpARM64MOVWUloadidx) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } // match: (MOVWUload [off] {sym} (ADDshiftLL [2] ptr idx) mem) @@ -12751,9 +12118,7 @@ func rewriteValueARM64_OpARM64MOVWUload(v *Value) bool { break } v.reset(OpARM64MOVWUloadidx4) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } // match: (MOVWUload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem) @@ -12775,8 +12140,7 @@ func rewriteValueARM64_OpARM64MOVWUload(v *Value) bool { v.reset(OpARM64MOVWUload) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVWUload [off] {sym} ptr (MOVWstorezero [off2] {sym2} ptr2 _)) @@ -12791,7 +12155,6 @@ func rewriteValueARM64_OpARM64MOVWUload(v *Value) bool { } off2 := v_1.AuxInt sym2 := v_1.Aux - _ = v_1.Args[1] ptr2 := v_1.Args[0] if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) { break @@ -12830,8 +12193,7 @@ func rewriteValueARM64_OpARM64MOVWUloadidx(v *Value) bool { mem := v_2 v.reset(OpARM64MOVWUload) v.AuxInt = c - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVWUloadidx (MOVDconst [c]) ptr mem) @@ -12845,8 +12207,7 @@ func rewriteValueARM64_OpARM64MOVWUloadidx(v *Value) bool { mem := v_2 v.reset(OpARM64MOVWUload) v.AuxInt = c - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVWUloadidx ptr (SLLconst [2] idx) mem) @@ -12859,9 +12220,7 @@ func rewriteValueARM64_OpARM64MOVWUloadidx(v *Value) bool { idx := v_1.Args[0] mem := v_2 v.reset(OpARM64MOVWUloadidx4) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } // match: (MOVWUloadidx (SLLconst [2] idx) ptr mem) @@ -12874,9 +12233,7 @@ func rewriteValueARM64_OpARM64MOVWUloadidx(v *Value) bool { ptr := v_1 mem := v_2 v.reset(OpARM64MOVWUloadidx4) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } // match: (MOVWUloadidx ptr idx (MOVWstorezeroidx ptr2 idx2 _)) @@ -12888,9 +12245,8 @@ func rewriteValueARM64_OpARM64MOVWUloadidx(v *Value) bool { if v_2.Op != OpARM64MOVWstorezeroidx { break } - _ = v_2.Args[2] - ptr2 := v_2.Args[0] idx2 := v_2.Args[1] + ptr2 := v_2.Args[0] if !(isSamePtr(ptr, ptr2) && isSamePtr(idx, idx2) || isSamePtr(ptr, idx2) && isSamePtr(idx, ptr2)) { break } @@ -12915,8 +12271,7 @@ func rewriteValueARM64_OpARM64MOVWUloadidx4(v *Value) bool { mem := v_2 v.reset(OpARM64MOVWUload) v.AuxInt = c << 2 - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVWUloadidx4 ptr idx (MOVWstorezeroidx4 ptr2 idx2 _)) @@ -12928,9 +12283,8 @@ func rewriteValueARM64_OpARM64MOVWUloadidx4(v *Value) bool { if v_2.Op != OpARM64MOVWstorezeroidx4 { break } - _ = v_2.Args[2] - ptr2 := v_2.Args[0] idx2 := v_2.Args[1] + ptr2 := v_2.Args[0] if !(isSamePtr(ptr, ptr2) && isSamePtr(idx, idx2)) { break } @@ -12949,7 +12303,6 @@ func rewriteValueARM64_OpARM64MOVWUreg(v *Value) bool { if x.Op != OpARM64MOVBUload { break } - _ = x.Args[1] v.reset(OpARM64MOVDreg) v.AddArg(x) return true @@ -12961,7 +12314,6 @@ func rewriteValueARM64_OpARM64MOVWUreg(v *Value) bool { if x.Op != OpARM64MOVHUload { break } - _ = x.Args[1] v.reset(OpARM64MOVDreg) v.AddArg(x) return true @@ -12973,7 +12325,6 @@ func rewriteValueARM64_OpARM64MOVWUreg(v *Value) bool { if x.Op != OpARM64MOVWUload { break } - _ = x.Args[1] v.reset(OpARM64MOVDreg) v.AddArg(x) return true @@ -12985,7 +12336,6 @@ func rewriteValueARM64_OpARM64MOVWUreg(v *Value) bool { if x.Op != OpARM64MOVBUloadidx { break } - _ = x.Args[2] v.reset(OpARM64MOVDreg) v.AddArg(x) return true @@ -12997,7 +12347,6 @@ func rewriteValueARM64_OpARM64MOVWUreg(v *Value) bool { if x.Op != OpARM64MOVHUloadidx { break } - _ = x.Args[2] v.reset(OpARM64MOVDreg) v.AddArg(x) return true @@ -13009,7 +12358,6 @@ func rewriteValueARM64_OpARM64MOVWUreg(v *Value) bool { if x.Op != OpARM64MOVWUloadidx { break } - _ = x.Args[2] v.reset(OpARM64MOVDreg) v.AddArg(x) return true @@ -13021,7 +12369,6 @@ func rewriteValueARM64_OpARM64MOVWUreg(v *Value) bool { if x.Op != OpARM64MOVHUloadidx2 { break } - _ = x.Args[2] v.reset(OpARM64MOVDreg) v.AddArg(x) return true @@ -13033,7 +12380,6 @@ func rewriteValueARM64_OpARM64MOVWUreg(v *Value) bool { if x.Op != OpARM64MOVWUloadidx4 { break } - _ = x.Args[2] v.reset(OpARM64MOVDreg) v.AddArg(x) return true @@ -13154,8 +12500,7 @@ func rewriteValueARM64_OpARM64MOVWload(v *Value) bool { v.reset(OpARM64MOVWload) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVWload [off] {sym} (ADD ptr idx) mem) @@ -13174,9 +12519,7 @@ func rewriteValueARM64_OpARM64MOVWload(v *Value) bool { break } v.reset(OpARM64MOVWloadidx) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } // match: (MOVWload [off] {sym} (ADDshiftLL [2] ptr idx) mem) @@ -13195,9 +12538,7 @@ func rewriteValueARM64_OpARM64MOVWload(v *Value) bool { break } v.reset(OpARM64MOVWloadidx4) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } // match: (MOVWload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem) @@ -13219,8 +12560,7 @@ func rewriteValueARM64_OpARM64MOVWload(v *Value) bool { v.reset(OpARM64MOVWload) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVWload [off] {sym} ptr (MOVWstorezero [off2] {sym2} ptr2 _)) @@ -13235,7 +12575,6 @@ func rewriteValueARM64_OpARM64MOVWload(v *Value) bool { } off2 := v_1.AuxInt sym2 := v_1.Aux - _ = v_1.Args[1] ptr2 := v_1.Args[0] if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) { break @@ -13261,8 +12600,7 @@ func rewriteValueARM64_OpARM64MOVWloadidx(v *Value) bool { mem := v_2 v.reset(OpARM64MOVWload) v.AuxInt = c - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVWloadidx (MOVDconst [c]) ptr mem) @@ -13276,8 +12614,7 @@ func rewriteValueARM64_OpARM64MOVWloadidx(v *Value) bool { mem := v_2 v.reset(OpARM64MOVWload) v.AuxInt = c - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVWloadidx ptr (SLLconst [2] idx) mem) @@ -13290,9 +12627,7 @@ func rewriteValueARM64_OpARM64MOVWloadidx(v *Value) bool { idx := v_1.Args[0] mem := v_2 v.reset(OpARM64MOVWloadidx4) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } // match: (MOVWloadidx (SLLconst [2] idx) ptr mem) @@ -13305,9 +12640,7 @@ func rewriteValueARM64_OpARM64MOVWloadidx(v *Value) bool { ptr := v_1 mem := v_2 v.reset(OpARM64MOVWloadidx4) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } // match: (MOVWloadidx ptr idx (MOVWstorezeroidx ptr2 idx2 _)) @@ -13319,9 +12652,8 @@ func rewriteValueARM64_OpARM64MOVWloadidx(v *Value) bool { if v_2.Op != OpARM64MOVWstorezeroidx { break } - _ = v_2.Args[2] - ptr2 := v_2.Args[0] idx2 := v_2.Args[1] + ptr2 := v_2.Args[0] if !(isSamePtr(ptr, ptr2) && isSamePtr(idx, idx2) || isSamePtr(ptr, idx2) && isSamePtr(idx, ptr2)) { break } @@ -13346,8 +12678,7 @@ func rewriteValueARM64_OpARM64MOVWloadidx4(v *Value) bool { mem := v_2 v.reset(OpARM64MOVWload) v.AuxInt = c << 2 - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVWloadidx4 ptr idx (MOVWstorezeroidx4 ptr2 idx2 _)) @@ -13359,9 +12690,8 @@ func rewriteValueARM64_OpARM64MOVWloadidx4(v *Value) bool { if v_2.Op != OpARM64MOVWstorezeroidx4 { break } - _ = v_2.Args[2] - ptr2 := v_2.Args[0] idx2 := v_2.Args[1] + ptr2 := v_2.Args[0] if !(isSamePtr(ptr, ptr2) && isSamePtr(idx, idx2)) { break } @@ -13380,7 +12710,6 @@ func rewriteValueARM64_OpARM64MOVWreg(v *Value) bool { if x.Op != OpARM64MOVBload { break } - _ = x.Args[1] v.reset(OpARM64MOVDreg) v.AddArg(x) return true @@ -13392,7 +12721,6 @@ func rewriteValueARM64_OpARM64MOVWreg(v *Value) bool { if x.Op != OpARM64MOVBUload { break } - _ = x.Args[1] v.reset(OpARM64MOVDreg) v.AddArg(x) return true @@ -13404,7 +12732,6 @@ func rewriteValueARM64_OpARM64MOVWreg(v *Value) bool { if x.Op != OpARM64MOVHload { break } - _ = x.Args[1] v.reset(OpARM64MOVDreg) v.AddArg(x) return true @@ -13416,7 +12743,6 @@ func rewriteValueARM64_OpARM64MOVWreg(v *Value) bool { if x.Op != OpARM64MOVHUload { break } - _ = x.Args[1] v.reset(OpARM64MOVDreg) v.AddArg(x) return true @@ -13428,7 +12754,6 @@ func rewriteValueARM64_OpARM64MOVWreg(v *Value) bool { if x.Op != OpARM64MOVWload { break } - _ = x.Args[1] v.reset(OpARM64MOVDreg) v.AddArg(x) return true @@ -13440,7 +12765,6 @@ func rewriteValueARM64_OpARM64MOVWreg(v *Value) bool { if x.Op != OpARM64MOVBloadidx { break } - _ = x.Args[2] v.reset(OpARM64MOVDreg) v.AddArg(x) return true @@ -13452,7 +12776,6 @@ func rewriteValueARM64_OpARM64MOVWreg(v *Value) bool { if x.Op != OpARM64MOVBUloadidx { break } - _ = x.Args[2] v.reset(OpARM64MOVDreg) v.AddArg(x) return true @@ -13464,7 +12787,6 @@ func rewriteValueARM64_OpARM64MOVWreg(v *Value) bool { if x.Op != OpARM64MOVHloadidx { break } - _ = x.Args[2] v.reset(OpARM64MOVDreg) v.AddArg(x) return true @@ -13476,7 +12798,6 @@ func rewriteValueARM64_OpARM64MOVWreg(v *Value) bool { if x.Op != OpARM64MOVHUloadidx { break } - _ = x.Args[2] v.reset(OpARM64MOVDreg) v.AddArg(x) return true @@ -13488,7 +12809,6 @@ func rewriteValueARM64_OpARM64MOVWreg(v *Value) bool { if x.Op != OpARM64MOVWloadidx { break } - _ = x.Args[2] v.reset(OpARM64MOVDreg) v.AddArg(x) return true @@ -13500,7 +12820,6 @@ func rewriteValueARM64_OpARM64MOVWreg(v *Value) bool { if x.Op != OpARM64MOVHloadidx2 { break } - _ = x.Args[2] v.reset(OpARM64MOVDreg) v.AddArg(x) return true @@ -13512,7 +12831,6 @@ func rewriteValueARM64_OpARM64MOVWreg(v *Value) bool { if x.Op != OpARM64MOVHUloadidx2 { break } - _ = x.Args[2] v.reset(OpARM64MOVDreg) v.AddArg(x) return true @@ -13524,7 +12842,6 @@ func rewriteValueARM64_OpARM64MOVWreg(v *Value) bool { if x.Op != OpARM64MOVWloadidx4 { break } - _ = x.Args[2] v.reset(OpARM64MOVDreg) v.AddArg(x) return true @@ -13623,9 +12940,7 @@ func rewriteValueARM64_OpARM64MOVWstore(v *Value) bool { v.reset(OpARM64FMOVSstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } // match: (MOVWstore [off1] {sym} (ADDconst [off2] ptr) val mem) @@ -13647,9 +12962,7 @@ func rewriteValueARM64_OpARM64MOVWstore(v *Value) bool { v.reset(OpARM64MOVWstore) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } // match: (MOVWstore [off] {sym} (ADD ptr idx) val mem) @@ -13669,10 +12982,7 @@ func rewriteValueARM64_OpARM64MOVWstore(v *Value) bool { break } v.reset(OpARM64MOVWstoreidx) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) + v.AddArg4(ptr, idx, val, mem) return true } // match: (MOVWstore [off] {sym} (ADDshiftLL [2] ptr idx) val mem) @@ -13692,10 +13002,7 @@ func rewriteValueARM64_OpARM64MOVWstore(v *Value) bool { break } v.reset(OpARM64MOVWstoreidx4) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) + v.AddArg4(ptr, idx, val, mem) return true } // match: (MOVWstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem) @@ -13718,9 +13025,7 @@ func rewriteValueARM64_OpARM64MOVWstore(v *Value) bool { v.reset(OpARM64MOVWstore) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } // match: (MOVWstore [off] {sym} ptr (MOVDconst [0]) mem) @@ -13736,8 +13041,7 @@ func rewriteValueARM64_OpARM64MOVWstore(v *Value) bool { v.reset(OpARM64MOVWstorezero) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVWstore [off] {sym} ptr (MOVWreg x) mem) @@ -13754,9 +13058,7 @@ func rewriteValueARM64_OpARM64MOVWstore(v *Value) bool { v.reset(OpARM64MOVWstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(x) - v.AddArg(mem) + v.AddArg3(ptr, x, mem) return true } // match: (MOVWstore [off] {sym} ptr (MOVWUreg x) mem) @@ -13773,9 +13075,7 @@ func rewriteValueARM64_OpARM64MOVWstore(v *Value) bool { v.reset(OpARM64MOVWstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(x) - v.AddArg(mem) + v.AddArg3(ptr, x, mem) return true } // match: (MOVWstore [i] {s} ptr0 (SRLconst [32] w) x:(MOVWstore [i-4] {s} ptr1 w mem)) @@ -13801,9 +13101,7 @@ func rewriteValueARM64_OpARM64MOVWstore(v *Value) bool { v.reset(OpARM64MOVDstore) v.AuxInt = i - 4 v.Aux = s - v.AddArg(ptr0) - v.AddArg(w) - v.AddArg(mem) + v.AddArg3(ptr0, w, mem) return true } // match: (MOVWstore [4] {s} (ADD ptr0 idx0) (SRLconst [32] w) x:(MOVWstoreidx ptr1 idx1 w mem)) @@ -13838,10 +13136,7 @@ func rewriteValueARM64_OpARM64MOVWstore(v *Value) bool { continue } v.reset(OpARM64MOVDstoreidx) - v.AddArg(ptr1) - v.AddArg(idx1) - v.AddArg(w) - v.AddArg(mem) + v.AddArg4(ptr1, idx1, w, mem) return true } break @@ -13874,13 +13169,10 @@ func rewriteValueARM64_OpARM64MOVWstore(v *Value) bool { break } v.reset(OpARM64MOVDstoreidx) - v.AddArg(ptr1) v0 := b.NewValue0(v.Pos, OpARM64SLLconst, idx1.Type) v0.AuxInt = 2 v0.AddArg(idx1) - v.AddArg(v0) - v.AddArg(w) - v.AddArg(mem) + v.AddArg4(ptr1, v0, w, mem) return true } // match: (MOVWstore [i] {s} ptr0 (SRLconst [j] w) x:(MOVWstore [i-4] {s} ptr1 w0:(SRLconst [j-32] w) mem)) @@ -13908,9 +13200,7 @@ func rewriteValueARM64_OpARM64MOVWstore(v *Value) bool { v.reset(OpARM64MOVDstore) v.AuxInt = i - 4 v.Aux = s - v.AddArg(ptr0) - v.AddArg(w0) - v.AddArg(mem) + v.AddArg3(ptr0, w0, mem) return true } // match: (MOVWstore [4] {s} (ADD ptr0 idx0) (SRLconst [j] w) x:(MOVWstoreidx ptr1 idx1 w0:(SRLconst [j-32] w) mem)) @@ -13947,10 +13237,7 @@ func rewriteValueARM64_OpARM64MOVWstore(v *Value) bool { continue } v.reset(OpARM64MOVDstoreidx) - v.AddArg(ptr1) - v.AddArg(idx1) - v.AddArg(w0) - v.AddArg(mem) + v.AddArg4(ptr1, idx1, w0, mem) return true } break @@ -13985,13 +13272,10 @@ func rewriteValueARM64_OpARM64MOVWstore(v *Value) bool { break } v.reset(OpARM64MOVDstoreidx) - v.AddArg(ptr1) v0 := b.NewValue0(v.Pos, OpARM64SLLconst, idx1.Type) v0.AuxInt = 2 v0.AddArg(idx1) - v.AddArg(v0) - v.AddArg(w0) - v.AddArg(mem) + v.AddArg4(ptr1, v0, w0, mem) return true } return false @@ -14013,9 +13297,7 @@ func rewriteValueARM64_OpARM64MOVWstoreidx(v *Value) bool { mem := v_3 v.reset(OpARM64MOVWstore) v.AuxInt = c - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } // match: (MOVWstoreidx (MOVDconst [c]) idx val mem) @@ -14030,9 +13312,7 @@ func rewriteValueARM64_OpARM64MOVWstoreidx(v *Value) bool { mem := v_3 v.reset(OpARM64MOVWstore) v.AuxInt = c - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(idx, val, mem) return true } // match: (MOVWstoreidx ptr (SLLconst [2] idx) val mem) @@ -14046,10 +13326,7 @@ func rewriteValueARM64_OpARM64MOVWstoreidx(v *Value) bool { val := v_2 mem := v_3 v.reset(OpARM64MOVWstoreidx4) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) + v.AddArg4(ptr, idx, val, mem) return true } // match: (MOVWstoreidx (SLLconst [2] idx) ptr val mem) @@ -14063,10 +13340,7 @@ func rewriteValueARM64_OpARM64MOVWstoreidx(v *Value) bool { val := v_2 mem := v_3 v.reset(OpARM64MOVWstoreidx4) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) + v.AddArg4(ptr, idx, val, mem) return true } // match: (MOVWstoreidx ptr idx (MOVDconst [0]) mem) @@ -14079,9 +13353,7 @@ func rewriteValueARM64_OpARM64MOVWstoreidx(v *Value) bool { } mem := v_3 v.reset(OpARM64MOVWstorezeroidx) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } // match: (MOVWstoreidx ptr idx (MOVWreg x) mem) @@ -14095,10 +13367,7 @@ func rewriteValueARM64_OpARM64MOVWstoreidx(v *Value) bool { x := v_2.Args[0] mem := v_3 v.reset(OpARM64MOVWstoreidx) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(x) - v.AddArg(mem) + v.AddArg4(ptr, idx, x, mem) return true } // match: (MOVWstoreidx ptr idx (MOVWUreg x) mem) @@ -14112,10 +13381,7 @@ func rewriteValueARM64_OpARM64MOVWstoreidx(v *Value) bool { x := v_2.Args[0] mem := v_3 v.reset(OpARM64MOVWstoreidx) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(x) - v.AddArg(mem) + v.AddArg4(ptr, idx, x, mem) return true } // match: (MOVWstoreidx ptr (ADDconst [4] idx) (SRLconst [32] w) x:(MOVWstoreidx ptr idx w mem)) @@ -14140,10 +13406,7 @@ func rewriteValueARM64_OpARM64MOVWstoreidx(v *Value) bool { break } v.reset(OpARM64MOVDstoreidx) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(w) - v.AddArg(mem) + v.AddArg4(ptr, idx, w, mem) return true } return false @@ -14165,9 +13428,7 @@ func rewriteValueARM64_OpARM64MOVWstoreidx4(v *Value) bool { mem := v_3 v.reset(OpARM64MOVWstore) v.AuxInt = c << 2 - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } // match: (MOVWstoreidx4 ptr idx (MOVDconst [0]) mem) @@ -14180,9 +13441,7 @@ func rewriteValueARM64_OpARM64MOVWstoreidx4(v *Value) bool { } mem := v_3 v.reset(OpARM64MOVWstorezeroidx4) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } // match: (MOVWstoreidx4 ptr idx (MOVWreg x) mem) @@ -14196,10 +13455,7 @@ func rewriteValueARM64_OpARM64MOVWstoreidx4(v *Value) bool { x := v_2.Args[0] mem := v_3 v.reset(OpARM64MOVWstoreidx4) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(x) - v.AddArg(mem) + v.AddArg4(ptr, idx, x, mem) return true } // match: (MOVWstoreidx4 ptr idx (MOVWUreg x) mem) @@ -14213,10 +13469,7 @@ func rewriteValueARM64_OpARM64MOVWstoreidx4(v *Value) bool { x := v_2.Args[0] mem := v_3 v.reset(OpARM64MOVWstoreidx4) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(x) - v.AddArg(mem) + v.AddArg4(ptr, idx, x, mem) return true } return false @@ -14244,8 +13497,7 @@ func rewriteValueARM64_OpARM64MOVWstorezero(v *Value) bool { v.reset(OpARM64MOVWstorezero) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVWstorezero [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem) @@ -14267,8 +13519,7 @@ func rewriteValueARM64_OpARM64MOVWstorezero(v *Value) bool { v.reset(OpARM64MOVWstorezero) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVWstorezero [off] {sym} (ADD ptr idx) mem) @@ -14287,9 +13538,7 @@ func rewriteValueARM64_OpARM64MOVWstorezero(v *Value) bool { break } v.reset(OpARM64MOVWstorezeroidx) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } // match: (MOVWstorezero [off] {sym} (ADDshiftLL [2] ptr idx) mem) @@ -14308,9 +13557,7 @@ func rewriteValueARM64_OpARM64MOVWstorezero(v *Value) bool { break } v.reset(OpARM64MOVWstorezeroidx4) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } // match: (MOVWstorezero [i] {s} ptr0 x:(MOVWstorezero [j] {s} ptr1 mem)) @@ -14336,8 +13583,7 @@ func rewriteValueARM64_OpARM64MOVWstorezero(v *Value) bool { v.reset(OpARM64MOVDstorezero) v.AuxInt = min(i, j) v.Aux = s - v.AddArg(ptr0) - v.AddArg(mem) + v.AddArg2(ptr0, mem) return true } // match: (MOVWstorezero [4] {s} (ADD ptr0 idx0) x:(MOVWstorezeroidx ptr1 idx1 mem)) @@ -14368,9 +13614,7 @@ func rewriteValueARM64_OpARM64MOVWstorezero(v *Value) bool { continue } v.reset(OpARM64MOVDstorezeroidx) - v.AddArg(ptr1) - v.AddArg(idx1) - v.AddArg(mem) + v.AddArg3(ptr1, idx1, mem) return true } break @@ -14399,12 +13643,10 @@ func rewriteValueARM64_OpARM64MOVWstorezero(v *Value) bool { break } v.reset(OpARM64MOVDstorezeroidx) - v.AddArg(ptr1) v0 := b.NewValue0(v.Pos, OpARM64SLLconst, idx1.Type) v0.AuxInt = 2 v0.AddArg(idx1) - v.AddArg(v0) - v.AddArg(mem) + v.AddArg3(ptr1, v0, mem) return true } return false @@ -14424,8 +13666,7 @@ func rewriteValueARM64_OpARM64MOVWstorezeroidx(v *Value) bool { mem := v_2 v.reset(OpARM64MOVWstorezero) v.AuxInt = c - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVWstorezeroidx (MOVDconst [c]) idx mem) @@ -14439,8 +13680,7 @@ func rewriteValueARM64_OpARM64MOVWstorezeroidx(v *Value) bool { mem := v_2 v.reset(OpARM64MOVWstorezero) v.AuxInt = c - v.AddArg(idx) - v.AddArg(mem) + v.AddArg2(idx, mem) return true } // match: (MOVWstorezeroidx ptr (SLLconst [2] idx) mem) @@ -14453,9 +13693,7 @@ func rewriteValueARM64_OpARM64MOVWstorezeroidx(v *Value) bool { idx := v_1.Args[0] mem := v_2 v.reset(OpARM64MOVWstorezeroidx4) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } // match: (MOVWstorezeroidx (SLLconst [2] idx) ptr mem) @@ -14468,9 +13706,7 @@ func rewriteValueARM64_OpARM64MOVWstorezeroidx(v *Value) bool { ptr := v_1 mem := v_2 v.reset(OpARM64MOVWstorezeroidx4) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } // match: (MOVWstorezeroidx ptr (ADDconst [4] idx) x:(MOVWstorezeroidx ptr idx mem)) @@ -14491,9 +13727,7 @@ func rewriteValueARM64_OpARM64MOVWstorezeroidx(v *Value) bool { break } v.reset(OpARM64MOVDstorezeroidx) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } return false @@ -14513,8 +13747,7 @@ func rewriteValueARM64_OpARM64MOVWstorezeroidx4(v *Value) bool { mem := v_2 v.reset(OpARM64MOVWstorezero) v.AuxInt = c << 2 - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } return false @@ -14533,8 +13766,7 @@ func rewriteValueARM64_OpARM64MSUB(v *Value) bool { break } v.reset(OpARM64ADD) - v.AddArg(a) - v.AddArg(x) + v.AddArg2(a, x) return true } // match: (MSUB a _ (MOVDconst [0])) @@ -14544,9 +13776,7 @@ func rewriteValueARM64_OpARM64MSUB(v *Value) bool { if v_2.Op != OpARM64MOVDconst || v_2.AuxInt != 0 { break } - v.reset(OpCopy) - v.Type = a.Type - v.AddArg(a) + v.copyOf(a) return true } // match: (MSUB a x (MOVDconst [1])) @@ -14558,8 +13788,7 @@ func rewriteValueARM64_OpARM64MSUB(v *Value) bool { break } v.reset(OpARM64SUB) - v.AddArg(a) - v.AddArg(x) + v.AddArg2(a, x) return true } // match: (MSUB a x (MOVDconst [c])) @@ -14577,8 +13806,7 @@ func rewriteValueARM64_OpARM64MSUB(v *Value) bool { } v.reset(OpARM64SUBshiftLL) v.AuxInt = log2(c) - v.AddArg(a) - v.AddArg(x) + v.AddArg2(a, x) return true } // match: (MSUB a x (MOVDconst [c])) @@ -14595,12 +13823,10 @@ func rewriteValueARM64_OpARM64MSUB(v *Value) bool { break } v.reset(OpARM64SUB) - v.AddArg(a) v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) v0.AuxInt = log2(c - 1) - v0.AddArg(x) - v0.AddArg(x) - v.AddArg(v0) + v0.AddArg2(x, x) + v.AddArg2(a, v0) return true } // match: (MSUB a x (MOVDconst [c])) @@ -14617,12 +13843,10 @@ func rewriteValueARM64_OpARM64MSUB(v *Value) bool { break } v.reset(OpARM64ADD) - v.AddArg(a) v0 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type) v0.AuxInt = log2(c + 1) - v0.AddArg(x) - v0.AddArg(x) - v.AddArg(v0) + v0.AddArg2(x, x) + v.AddArg2(a, v0) return true } // match: (MSUB a x (MOVDconst [c])) @@ -14640,12 +13864,10 @@ func rewriteValueARM64_OpARM64MSUB(v *Value) bool { } v.reset(OpARM64ADDshiftLL) v.AuxInt = log2(c / 3) - v.AddArg(a) v0 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type) v0.AuxInt = 2 - v0.AddArg(x) - v0.AddArg(x) - v.AddArg(v0) + v0.AddArg2(x, x) + v.AddArg2(a, v0) return true } // match: (MSUB a x (MOVDconst [c])) @@ -14663,12 +13885,10 @@ func rewriteValueARM64_OpARM64MSUB(v *Value) bool { } v.reset(OpARM64SUBshiftLL) v.AuxInt = log2(c / 5) - v.AddArg(a) v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) v0.AuxInt = 2 - v0.AddArg(x) - v0.AddArg(x) - v.AddArg(v0) + v0.AddArg2(x, x) + v.AddArg2(a, v0) return true } // match: (MSUB a x (MOVDconst [c])) @@ -14686,12 +13906,10 @@ func rewriteValueARM64_OpARM64MSUB(v *Value) bool { } v.reset(OpARM64ADDshiftLL) v.AuxInt = log2(c / 7) - v.AddArg(a) v0 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type) v0.AuxInt = 3 - v0.AddArg(x) - v0.AddArg(x) - v.AddArg(v0) + v0.AddArg2(x, x) + v.AddArg2(a, v0) return true } // match: (MSUB a x (MOVDconst [c])) @@ -14709,12 +13927,10 @@ func rewriteValueARM64_OpARM64MSUB(v *Value) bool { } v.reset(OpARM64SUBshiftLL) v.AuxInt = log2(c / 9) - v.AddArg(a) v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) v0.AuxInt = 3 - v0.AddArg(x) - v0.AddArg(x) - v.AddArg(v0) + v0.AddArg2(x, x) + v.AddArg2(a, v0) return true } // match: (MSUB a (MOVDconst [-1]) x) @@ -14726,8 +13942,7 @@ func rewriteValueARM64_OpARM64MSUB(v *Value) bool { } x := v_2 v.reset(OpARM64ADD) - v.AddArg(a) - v.AddArg(x) + v.AddArg2(a, x) return true } // match: (MSUB a (MOVDconst [0]) _) @@ -14737,9 +13952,7 @@ func rewriteValueARM64_OpARM64MSUB(v *Value) bool { if v_1.Op != OpARM64MOVDconst || v_1.AuxInt != 0 { break } - v.reset(OpCopy) - v.Type = a.Type - v.AddArg(a) + v.copyOf(a) return true } // match: (MSUB a (MOVDconst [1]) x) @@ -14751,8 +13964,7 @@ func rewriteValueARM64_OpARM64MSUB(v *Value) bool { } x := v_2 v.reset(OpARM64SUB) - v.AddArg(a) - v.AddArg(x) + v.AddArg2(a, x) return true } // match: (MSUB a (MOVDconst [c]) x) @@ -14770,8 +13982,7 @@ func rewriteValueARM64_OpARM64MSUB(v *Value) bool { } v.reset(OpARM64SUBshiftLL) v.AuxInt = log2(c) - v.AddArg(a) - v.AddArg(x) + v.AddArg2(a, x) return true } // match: (MSUB a (MOVDconst [c]) x) @@ -14788,12 +13999,10 @@ func rewriteValueARM64_OpARM64MSUB(v *Value) bool { break } v.reset(OpARM64SUB) - v.AddArg(a) v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) v0.AuxInt = log2(c - 1) - v0.AddArg(x) - v0.AddArg(x) - v.AddArg(v0) + v0.AddArg2(x, x) + v.AddArg2(a, v0) return true } // match: (MSUB a (MOVDconst [c]) x) @@ -14810,12 +14019,10 @@ func rewriteValueARM64_OpARM64MSUB(v *Value) bool { break } v.reset(OpARM64ADD) - v.AddArg(a) v0 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type) v0.AuxInt = log2(c + 1) - v0.AddArg(x) - v0.AddArg(x) - v.AddArg(v0) + v0.AddArg2(x, x) + v.AddArg2(a, v0) return true } // match: (MSUB a (MOVDconst [c]) x) @@ -14833,12 +14040,10 @@ func rewriteValueARM64_OpARM64MSUB(v *Value) bool { } v.reset(OpARM64ADDshiftLL) v.AuxInt = log2(c / 3) - v.AddArg(a) v0 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type) v0.AuxInt = 2 - v0.AddArg(x) - v0.AddArg(x) - v.AddArg(v0) + v0.AddArg2(x, x) + v.AddArg2(a, v0) return true } // match: (MSUB a (MOVDconst [c]) x) @@ -14856,12 +14061,10 @@ func rewriteValueARM64_OpARM64MSUB(v *Value) bool { } v.reset(OpARM64SUBshiftLL) v.AuxInt = log2(c / 5) - v.AddArg(a) v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) v0.AuxInt = 2 - v0.AddArg(x) - v0.AddArg(x) - v.AddArg(v0) + v0.AddArg2(x, x) + v.AddArg2(a, v0) return true } // match: (MSUB a (MOVDconst [c]) x) @@ -14879,12 +14082,10 @@ func rewriteValueARM64_OpARM64MSUB(v *Value) bool { } v.reset(OpARM64ADDshiftLL) v.AuxInt = log2(c / 7) - v.AddArg(a) v0 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type) v0.AuxInt = 3 - v0.AddArg(x) - v0.AddArg(x) - v.AddArg(v0) + v0.AddArg2(x, x) + v.AddArg2(a, v0) return true } // match: (MSUB a (MOVDconst [c]) x) @@ -14902,12 +14103,10 @@ func rewriteValueARM64_OpARM64MSUB(v *Value) bool { } v.reset(OpARM64SUBshiftLL) v.AuxInt = log2(c / 9) - v.AddArg(a) v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) v0.AuxInt = 3 - v0.AddArg(x) - v0.AddArg(x) - v.AddArg(v0) + v0.AddArg2(x, x) + v.AddArg2(a, v0) return true } // match: (MSUB (MOVDconst [c]) x y) @@ -14922,8 +14121,7 @@ func rewriteValueARM64_OpARM64MSUB(v *Value) bool { v.reset(OpARM64ADDconst) v.AuxInt = c v0 := b.NewValue0(v.Pos, OpARM64MNEG, x.Type) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -14965,8 +14163,7 @@ func rewriteValueARM64_OpARM64MSUBW(v *Value) bool { break } v.reset(OpARM64ADD) - v.AddArg(a) - v.AddArg(x) + v.AddArg2(a, x) return true } // match: (MSUBW a _ (MOVDconst [c])) @@ -14981,9 +14178,7 @@ func rewriteValueARM64_OpARM64MSUBW(v *Value) bool { if !(int32(c) == 0) { break } - v.reset(OpCopy) - v.Type = a.Type - v.AddArg(a) + v.copyOf(a) return true } // match: (MSUBW a x (MOVDconst [c])) @@ -15000,8 +14195,7 @@ func rewriteValueARM64_OpARM64MSUBW(v *Value) bool { break } v.reset(OpARM64SUB) - v.AddArg(a) - v.AddArg(x) + v.AddArg2(a, x) return true } // match: (MSUBW a x (MOVDconst [c])) @@ -15019,8 +14213,7 @@ func rewriteValueARM64_OpARM64MSUBW(v *Value) bool { } v.reset(OpARM64SUBshiftLL) v.AuxInt = log2(c) - v.AddArg(a) - v.AddArg(x) + v.AddArg2(a, x) return true } // match: (MSUBW a x (MOVDconst [c])) @@ -15037,12 +14230,10 @@ func rewriteValueARM64_OpARM64MSUBW(v *Value) bool { break } v.reset(OpARM64SUB) - v.AddArg(a) v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) v0.AuxInt = log2(c - 1) - v0.AddArg(x) - v0.AddArg(x) - v.AddArg(v0) + v0.AddArg2(x, x) + v.AddArg2(a, v0) return true } // match: (MSUBW a x (MOVDconst [c])) @@ -15059,12 +14250,10 @@ func rewriteValueARM64_OpARM64MSUBW(v *Value) bool { break } v.reset(OpARM64ADD) - v.AddArg(a) v0 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type) v0.AuxInt = log2(c + 1) - v0.AddArg(x) - v0.AddArg(x) - v.AddArg(v0) + v0.AddArg2(x, x) + v.AddArg2(a, v0) return true } // match: (MSUBW a x (MOVDconst [c])) @@ -15082,12 +14271,10 @@ func rewriteValueARM64_OpARM64MSUBW(v *Value) bool { } v.reset(OpARM64ADDshiftLL) v.AuxInt = log2(c / 3) - v.AddArg(a) v0 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type) v0.AuxInt = 2 - v0.AddArg(x) - v0.AddArg(x) - v.AddArg(v0) + v0.AddArg2(x, x) + v.AddArg2(a, v0) return true } // match: (MSUBW a x (MOVDconst [c])) @@ -15105,12 +14292,10 @@ func rewriteValueARM64_OpARM64MSUBW(v *Value) bool { } v.reset(OpARM64SUBshiftLL) v.AuxInt = log2(c / 5) - v.AddArg(a) v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) v0.AuxInt = 2 - v0.AddArg(x) - v0.AddArg(x) - v.AddArg(v0) + v0.AddArg2(x, x) + v.AddArg2(a, v0) return true } // match: (MSUBW a x (MOVDconst [c])) @@ -15128,12 +14313,10 @@ func rewriteValueARM64_OpARM64MSUBW(v *Value) bool { } v.reset(OpARM64ADDshiftLL) v.AuxInt = log2(c / 7) - v.AddArg(a) v0 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type) v0.AuxInt = 3 - v0.AddArg(x) - v0.AddArg(x) - v.AddArg(v0) + v0.AddArg2(x, x) + v.AddArg2(a, v0) return true } // match: (MSUBW a x (MOVDconst [c])) @@ -15151,12 +14334,10 @@ func rewriteValueARM64_OpARM64MSUBW(v *Value) bool { } v.reset(OpARM64SUBshiftLL) v.AuxInt = log2(c / 9) - v.AddArg(a) v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) v0.AuxInt = 3 - v0.AddArg(x) - v0.AddArg(x) - v.AddArg(v0) + v0.AddArg2(x, x) + v.AddArg2(a, v0) return true } // match: (MSUBW a (MOVDconst [c]) x) @@ -15173,8 +14354,7 @@ func rewriteValueARM64_OpARM64MSUBW(v *Value) bool { break } v.reset(OpARM64ADD) - v.AddArg(a) - v.AddArg(x) + v.AddArg2(a, x) return true } // match: (MSUBW a (MOVDconst [c]) _) @@ -15189,9 +14369,7 @@ func rewriteValueARM64_OpARM64MSUBW(v *Value) bool { if !(int32(c) == 0) { break } - v.reset(OpCopy) - v.Type = a.Type - v.AddArg(a) + v.copyOf(a) return true } // match: (MSUBW a (MOVDconst [c]) x) @@ -15208,8 +14386,7 @@ func rewriteValueARM64_OpARM64MSUBW(v *Value) bool { break } v.reset(OpARM64SUB) - v.AddArg(a) - v.AddArg(x) + v.AddArg2(a, x) return true } // match: (MSUBW a (MOVDconst [c]) x) @@ -15227,8 +14404,7 @@ func rewriteValueARM64_OpARM64MSUBW(v *Value) bool { } v.reset(OpARM64SUBshiftLL) v.AuxInt = log2(c) - v.AddArg(a) - v.AddArg(x) + v.AddArg2(a, x) return true } // match: (MSUBW a (MOVDconst [c]) x) @@ -15245,12 +14421,10 @@ func rewriteValueARM64_OpARM64MSUBW(v *Value) bool { break } v.reset(OpARM64SUB) - v.AddArg(a) v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) v0.AuxInt = log2(c - 1) - v0.AddArg(x) - v0.AddArg(x) - v.AddArg(v0) + v0.AddArg2(x, x) + v.AddArg2(a, v0) return true } // match: (MSUBW a (MOVDconst [c]) x) @@ -15267,12 +14441,10 @@ func rewriteValueARM64_OpARM64MSUBW(v *Value) bool { break } v.reset(OpARM64ADD) - v.AddArg(a) v0 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type) v0.AuxInt = log2(c + 1) - v0.AddArg(x) - v0.AddArg(x) - v.AddArg(v0) + v0.AddArg2(x, x) + v.AddArg2(a, v0) return true } // match: (MSUBW a (MOVDconst [c]) x) @@ -15290,12 +14462,10 @@ func rewriteValueARM64_OpARM64MSUBW(v *Value) bool { } v.reset(OpARM64ADDshiftLL) v.AuxInt = log2(c / 3) - v.AddArg(a) v0 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type) v0.AuxInt = 2 - v0.AddArg(x) - v0.AddArg(x) - v.AddArg(v0) + v0.AddArg2(x, x) + v.AddArg2(a, v0) return true } // match: (MSUBW a (MOVDconst [c]) x) @@ -15313,12 +14483,10 @@ func rewriteValueARM64_OpARM64MSUBW(v *Value) bool { } v.reset(OpARM64SUBshiftLL) v.AuxInt = log2(c / 5) - v.AddArg(a) v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) v0.AuxInt = 2 - v0.AddArg(x) - v0.AddArg(x) - v.AddArg(v0) + v0.AddArg2(x, x) + v.AddArg2(a, v0) return true } // match: (MSUBW a (MOVDconst [c]) x) @@ -15336,12 +14504,10 @@ func rewriteValueARM64_OpARM64MSUBW(v *Value) bool { } v.reset(OpARM64ADDshiftLL) v.AuxInt = log2(c / 7) - v.AddArg(a) v0 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type) v0.AuxInt = 3 - v0.AddArg(x) - v0.AddArg(x) - v.AddArg(v0) + v0.AddArg2(x, x) + v.AddArg2(a, v0) return true } // match: (MSUBW a (MOVDconst [c]) x) @@ -15359,12 +14525,10 @@ func rewriteValueARM64_OpARM64MSUBW(v *Value) bool { } v.reset(OpARM64SUBshiftLL) v.AuxInt = log2(c / 9) - v.AddArg(a) v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) v0.AuxInt = 3 - v0.AddArg(x) - v0.AddArg(x) - v.AddArg(v0) + v0.AddArg2(x, x) + v.AddArg2(a, v0) return true } // match: (MSUBW (MOVDconst [c]) x y) @@ -15379,8 +14543,7 @@ func rewriteValueARM64_OpARM64MSUBW(v *Value) bool { v.reset(OpARM64ADDconst) v.AuxInt = c v0 := b.NewValue0(v.Pos, OpARM64MNEGW, x.Type) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -15417,8 +14580,7 @@ func rewriteValueARM64_OpARM64MUL(v *Value) bool { x := v_0.Args[0] y := v_1 v.reset(OpARM64MNEG) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } break @@ -15458,9 +14620,7 @@ func rewriteValueARM64_OpARM64MUL(v *Value) bool { if v_1.Op != OpARM64MOVDconst || v_1.AuxInt != 1 { continue } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } break @@ -15500,8 +14660,7 @@ func rewriteValueARM64_OpARM64MUL(v *Value) bool { } v.reset(OpARM64ADDshiftLL) v.AuxInt = log2(c - 1) - v.AddArg(x) - v.AddArg(x) + v.AddArg2(x, x) return true } break @@ -15523,8 +14682,7 @@ func rewriteValueARM64_OpARM64MUL(v *Value) bool { v.AuxInt = log2(c + 1) v0 := b.NewValue0(v.Pos, OpARM64NEG, x.Type) v0.AddArg(x) - v.AddArg(v0) - v.AddArg(x) + v.AddArg2(v0, x) return true } break @@ -15546,8 +14704,7 @@ func rewriteValueARM64_OpARM64MUL(v *Value) bool { v.AuxInt = log2(c / 3) v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) v0.AuxInt = 1 - v0.AddArg(x) - v0.AddArg(x) + v0.AddArg2(x, x) v.AddArg(v0) return true } @@ -15570,8 +14727,7 @@ func rewriteValueARM64_OpARM64MUL(v *Value) bool { v.AuxInt = log2(c / 5) v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) v0.AuxInt = 2 - v0.AddArg(x) - v0.AddArg(x) + v0.AddArg2(x, x) v.AddArg(v0) return true } @@ -15596,8 +14752,7 @@ func rewriteValueARM64_OpARM64MUL(v *Value) bool { v0.AuxInt = 3 v1 := b.NewValue0(v.Pos, OpARM64NEG, x.Type) v1.AddArg(x) - v0.AddArg(v1) - v0.AddArg(x) + v0.AddArg2(v1, x) v.AddArg(v0) return true } @@ -15620,8 +14775,7 @@ func rewriteValueARM64_OpARM64MUL(v *Value) bool { v.AuxInt = log2(c / 9) v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) v0.AuxInt = 3 - v0.AddArg(x) - v0.AddArg(x) + v0.AddArg2(x, x) v.AddArg(v0) return true } @@ -15661,8 +14815,7 @@ func rewriteValueARM64_OpARM64MULW(v *Value) bool { x := v_0.Args[0] y := v_1 v.reset(OpARM64MNEGW) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } break @@ -15717,9 +14870,7 @@ func rewriteValueARM64_OpARM64MULW(v *Value) bool { if !(int32(c) == 1) { continue } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } break @@ -15759,8 +14910,7 @@ func rewriteValueARM64_OpARM64MULW(v *Value) bool { } v.reset(OpARM64ADDshiftLL) v.AuxInt = log2(c - 1) - v.AddArg(x) - v.AddArg(x) + v.AddArg2(x, x) return true } break @@ -15782,8 +14932,7 @@ func rewriteValueARM64_OpARM64MULW(v *Value) bool { v.AuxInt = log2(c + 1) v0 := b.NewValue0(v.Pos, OpARM64NEG, x.Type) v0.AddArg(x) - v.AddArg(v0) - v.AddArg(x) + v.AddArg2(v0, x) return true } break @@ -15805,8 +14954,7 @@ func rewriteValueARM64_OpARM64MULW(v *Value) bool { v.AuxInt = log2(c / 3) v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) v0.AuxInt = 1 - v0.AddArg(x) - v0.AddArg(x) + v0.AddArg2(x, x) v.AddArg(v0) return true } @@ -15829,8 +14977,7 @@ func rewriteValueARM64_OpARM64MULW(v *Value) bool { v.AuxInt = log2(c / 5) v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) v0.AuxInt = 2 - v0.AddArg(x) - v0.AddArg(x) + v0.AddArg2(x, x) v.AddArg(v0) return true } @@ -15855,8 +15002,7 @@ func rewriteValueARM64_OpARM64MULW(v *Value) bool { v0.AuxInt = 3 v1 := b.NewValue0(v.Pos, OpARM64NEG, x.Type) v1.AddArg(x) - v0.AddArg(v1) - v0.AddArg(x) + v0.AddArg2(v1, x) v.AddArg(v0) return true } @@ -15879,8 +15025,7 @@ func rewriteValueARM64_OpARM64MULW(v *Value) bool { v.AuxInt = log2(c / 9) v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) v0.AuxInt = 3 - v0.AddArg(x) - v0.AddArg(x) + v0.AddArg2(x, x) v.AddArg(v0) return true } @@ -16034,8 +15179,7 @@ func rewriteValueARM64_OpARM64NEG(v *Value) bool { y := v_0.Args[1] x := v_0.Args[0] v.reset(OpARM64MNEG) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (NEG (MULW x y)) @@ -16047,8 +15191,7 @@ func rewriteValueARM64_OpARM64NEG(v *Value) bool { y := v_0.Args[1] x := v_0.Args[0] v.reset(OpARM64MNEGW) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (NEG (MOVDconst [c])) @@ -16259,9 +15402,7 @@ func rewriteValueARM64_OpARM64OR(v *Value) bool { if x != v_1 { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (OR x (MVN y)) @@ -16274,8 +15415,7 @@ func rewriteValueARM64_OpARM64OR(v *Value) bool { } y := v_1.Args[0] v.reset(OpARM64ORN) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } break @@ -16297,8 +15437,7 @@ func rewriteValueARM64_OpARM64OR(v *Value) bool { } v.reset(OpARM64ORshiftLL) v.AuxInt = c - v.AddArg(x0) - v.AddArg(y) + v.AddArg2(x0, y) return true } break @@ -16320,8 +15459,7 @@ func rewriteValueARM64_OpARM64OR(v *Value) bool { } v.reset(OpARM64ORshiftRL) v.AuxInt = c - v.AddArg(x0) - v.AddArg(y) + v.AddArg2(x0, y) return true } break @@ -16343,8 +15481,7 @@ func rewriteValueARM64_OpARM64OR(v *Value) bool { } v.reset(OpARM64ORshiftRA) v.AuxInt = c - v.AddArg(x0) - v.AddArg(y) + v.AddArg2(x0, y) return true } break @@ -16412,10 +15549,9 @@ func rewriteValueARM64_OpARM64OR(v *Value) bool { continue } v.reset(OpARM64ROR) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpARM64NEG, t) v0.AddArg(y) - v.AddArg(v0) + v.AddArg2(x, v0) return true } break @@ -16483,8 +15619,7 @@ func rewriteValueARM64_OpARM64OR(v *Value) bool { continue } v.reset(OpARM64ROR) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } break @@ -16553,10 +15688,9 @@ func rewriteValueARM64_OpARM64OR(v *Value) bool { continue } v.reset(OpARM64RORW) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpARM64NEG, t) v0.AddArg(y) - v.AddArg(v0) + v.AddArg2(x, v0) return true } break @@ -16628,8 +15762,7 @@ func rewriteValueARM64_OpARM64OR(v *Value) bool { continue } v.reset(OpARM64RORW) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } break @@ -16654,8 +15787,7 @@ func rewriteValueARM64_OpARM64OR(v *Value) bool { } v.reset(OpARM64BFI) v.AuxInt = bfc - v.AddArg(y) - v.AddArg(x) + v.AddArg2(y, x) return true } break @@ -16680,8 +15812,7 @@ func rewriteValueARM64_OpARM64OR(v *Value) bool { } v.reset(OpARM64BFXIL) v.AuxInt = bfc - v.AddArg(y) - v.AddArg(x) + v.AddArg2(y, x) return true } break @@ -16768,14 +15899,12 @@ func rewriteValueARM64_OpARM64OR(v *Value) bool { } b = mergePoint(b, x0, x1, x2, x3) v0 := b.NewValue0(x3.Pos, OpARM64MOVWUload, t) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v0.Aux = s v1 := b.NewValue0(x3.Pos, OpOffPtr, p.Type) v1.AuxInt = i0 v1.AddArg(p) - v0.AddArg(v1) - v0.AddArg(mem) + v0.AddArg2(v1, mem) return true } break @@ -16861,11 +15990,8 @@ func rewriteValueARM64_OpARM64OR(v *Value) bool { } b = mergePoint(b, x0, x1, x2, x3) v0 := b.NewValue0(x2.Pos, OpARM64MOVWUloadidx, t) - v.reset(OpCopy) - v.AddArg(v0) - v0.AddArg(ptr0) - v0.AddArg(idx0) - v0.AddArg(mem) + v.copyOf(v0) + v0.AddArg3(ptr0, idx0, mem) return true } } @@ -16952,11 +16078,8 @@ func rewriteValueARM64_OpARM64OR(v *Value) bool { } b = mergePoint(b, x0, x1, x2, x3) v0 := b.NewValue0(v.Pos, OpARM64MOVWUloadidx, t) - v.reset(OpCopy) - v.AddArg(v0) - v0.AddArg(ptr) - v0.AddArg(idx) - v0.AddArg(mem) + v.copyOf(v0) + v0.AddArg3(ptr, idx, mem) return true } break @@ -17127,14 +16250,12 @@ func rewriteValueARM64_OpARM64OR(v *Value) bool { } b = mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) v0 := b.NewValue0(x7.Pos, OpARM64MOVDload, t) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v0.Aux = s v1 := b.NewValue0(x7.Pos, OpOffPtr, p.Type) v1.AuxInt = i0 v1.AddArg(p) - v0.AddArg(v1) - v0.AddArg(mem) + v0.AddArg2(v1, mem) return true } break @@ -17288,11 +16409,8 @@ func rewriteValueARM64_OpARM64OR(v *Value) bool { } b = mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) v0 := b.NewValue0(x6.Pos, OpARM64MOVDloadidx, t) - v.reset(OpCopy) - v.AddArg(v0) - v0.AddArg(ptr0) - v0.AddArg(idx0) - v0.AddArg(mem) + v.copyOf(v0) + v0.AddArg3(ptr0, idx0, mem) return true } } @@ -17463,11 +16581,8 @@ func rewriteValueARM64_OpARM64OR(v *Value) bool { } b = mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) v0 := b.NewValue0(v.Pos, OpARM64MOVDloadidx, t) - v.reset(OpCopy) - v.AddArg(v0) - v0.AddArg(ptr) - v0.AddArg(idx) - v0.AddArg(mem) + v.copyOf(v0) + v0.AddArg3(ptr, idx, mem) return true } break @@ -17554,15 +16669,13 @@ func rewriteValueARM64_OpARM64OR(v *Value) bool { } b = mergePoint(b, x0, x1, x2, x3) v0 := b.NewValue0(x3.Pos, OpARM64REVW, t) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v1 := b.NewValue0(x3.Pos, OpARM64MOVWUload, t) v1.Aux = s v2 := b.NewValue0(x3.Pos, OpOffPtr, p.Type) v2.AuxInt = i0 v2.AddArg(p) - v1.AddArg(v2) - v1.AddArg(mem) + v1.AddArg2(v2, mem) v0.AddArg(v1) return true } @@ -17649,12 +16762,9 @@ func rewriteValueARM64_OpARM64OR(v *Value) bool { } b = mergePoint(b, x0, x1, x2, x3) v0 := b.NewValue0(x3.Pos, OpARM64REVW, t) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v1 := b.NewValue0(x3.Pos, OpARM64MOVWUloadidx, t) - v1.AddArg(ptr0) - v1.AddArg(idx0) - v1.AddArg(mem) + v1.AddArg3(ptr0, idx0, mem) v0.AddArg(v1) return true } @@ -17742,12 +16852,9 @@ func rewriteValueARM64_OpARM64OR(v *Value) bool { } b = mergePoint(b, x0, x1, x2, x3) v0 := b.NewValue0(v.Pos, OpARM64REVW, t) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v1 := b.NewValue0(v.Pos, OpARM64MOVWUloadidx, t) - v1.AddArg(ptr) - v1.AddArg(idx) - v1.AddArg(mem) + v1.AddArg3(ptr, idx, mem) v0.AddArg(v1) return true } @@ -17919,15 +17026,13 @@ func rewriteValueARM64_OpARM64OR(v *Value) bool { } b = mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) v0 := b.NewValue0(x7.Pos, OpARM64REV, t) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v1 := b.NewValue0(x7.Pos, OpARM64MOVDload, t) v1.Aux = s v2 := b.NewValue0(x7.Pos, OpOffPtr, p.Type) v2.AuxInt = i0 v2.AddArg(p) - v1.AddArg(v2) - v1.AddArg(mem) + v1.AddArg2(v2, mem) v0.AddArg(v1) return true } @@ -18082,12 +17187,9 @@ func rewriteValueARM64_OpARM64OR(v *Value) bool { } b = mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) v0 := b.NewValue0(x7.Pos, OpARM64REV, t) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v1 := b.NewValue0(x7.Pos, OpARM64MOVDloadidx, t) - v1.AddArg(ptr0) - v1.AddArg(idx0) - v1.AddArg(mem) + v1.AddArg3(ptr0, idx0, mem) v0.AddArg(v1) return true } @@ -18259,12 +17361,9 @@ func rewriteValueARM64_OpARM64OR(v *Value) bool { } b = mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) v0 := b.NewValue0(v.Pos, OpARM64REV, t) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v1 := b.NewValue0(v.Pos, OpARM64MOVDloadidx, t) - v1.AddArg(ptr) - v1.AddArg(idx) - v1.AddArg(mem) + v1.AddArg3(ptr, idx, mem) v0.AddArg(v1) return true } @@ -18315,8 +17414,7 @@ func rewriteValueARM64_OpARM64ORN(v *Value) bool { } v.reset(OpARM64ORNshiftLL) v.AuxInt = c - v.AddArg(x0) - v.AddArg(y) + v.AddArg2(x0, y) return true } // match: (ORN x0 x1:(SRLconst [c] y)) @@ -18335,8 +17433,7 @@ func rewriteValueARM64_OpARM64ORN(v *Value) bool { } v.reset(OpARM64ORNshiftRL) v.AuxInt = c - v.AddArg(x0) - v.AddArg(y) + v.AddArg2(x0, y) return true } // match: (ORN x0 x1:(SRAconst [c] y)) @@ -18355,8 +17452,7 @@ func rewriteValueARM64_OpARM64ORN(v *Value) bool { } v.reset(OpARM64ORNshiftRA) v.AuxInt = c - v.AddArg(x0) - v.AddArg(y) + v.AddArg2(x0, y) return true } return false @@ -18478,9 +17574,7 @@ func rewriteValueARM64_OpARM64ORconst(v *Value) bool { break } x := v_0 - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (ORconst [-1] _) @@ -18589,9 +17683,7 @@ func rewriteValueARM64_OpARM64ORshiftLL(v *Value) bool { if x != y.Args[0] || !(c == d) { break } - v.reset(OpCopy) - v.Type = y.Type - v.AddArg(y) + v.copyOf(y) return true } // match: ( ORshiftLL [c] (SRLconst x [64-c]) x) @@ -18654,8 +17746,7 @@ func rewriteValueARM64_OpARM64ORshiftLL(v *Value) bool { x2 := v_1 v.reset(OpARM64EXTRconst) v.AuxInt = 64 - c - v.AddArg(x2) - v.AddArg(x) + v.AddArg2(x2, x) return true } // match: ( ORshiftLL [c] (UBFX [bfc] x) x2) @@ -18675,8 +17766,7 @@ func rewriteValueARM64_OpARM64ORshiftLL(v *Value) bool { } v.reset(OpARM64EXTRWconst) v.AuxInt = 32 - c - v.AddArg(x2) - v.AddArg(x) + v.AddArg2(x2, x) return true } // match: (ORshiftLL [sc] (UBFX [bfc] x) (SRLconst [sc] y)) @@ -18698,8 +17788,7 @@ func rewriteValueARM64_OpARM64ORshiftLL(v *Value) bool { } v.reset(OpARM64BFXIL) v.AuxInt = bfc - v.AddArg(y) - v.AddArg(x) + v.AddArg2(y, x) return true } // match: (ORshiftLL [8] y0:(MOVDnop x0:(MOVBUload [i0] {s} p mem)) y1:(MOVDnop x1:(MOVBUload [i1] {s} p mem))) @@ -18740,14 +17829,12 @@ func rewriteValueARM64_OpARM64ORshiftLL(v *Value) bool { } b = mergePoint(b, x0, x1) v0 := b.NewValue0(x1.Pos, OpARM64MOVHUload, t) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v0.Aux = s v1 := b.NewValue0(x1.Pos, OpOffPtr, p.Type) v1.AuxInt = i0 v1.AddArg(p) - v0.AddArg(v1) - v0.AddArg(mem) + v0.AddArg2(v1, mem) return true } // match: (ORshiftLL [8] y0:(MOVDnop x0:(MOVBUloadidx ptr0 idx0 mem)) y1:(MOVDnop x1:(MOVBUload [1] {s} p1:(ADD ptr1 idx1) mem))) @@ -18794,11 +17881,8 @@ func rewriteValueARM64_OpARM64ORshiftLL(v *Value) bool { } b = mergePoint(b, x0, x1) v0 := b.NewValue0(x1.Pos, OpARM64MOVHUloadidx, t) - v.reset(OpCopy) - v.AddArg(v0) - v0.AddArg(ptr0) - v0.AddArg(idx0) - v0.AddArg(mem) + v.copyOf(v0) + v0.AddArg3(ptr0, idx0, mem) return true } break @@ -18840,11 +17924,8 @@ func rewriteValueARM64_OpARM64ORshiftLL(v *Value) bool { } b = mergePoint(b, x0, x1) v0 := b.NewValue0(v.Pos, OpARM64MOVHUloadidx, t) - v.reset(OpCopy) - v.AddArg(v0) - v0.AddArg(ptr) - v0.AddArg(idx) - v0.AddArg(mem) + v.copyOf(v0) + v0.AddArg3(ptr, idx, mem) return true } // match: (ORshiftLL [24] o0:(ORshiftLL [16] x0:(MOVHUload [i0] {s} p mem) y1:(MOVDnop x1:(MOVBUload [i2] {s} p mem))) y2:(MOVDnop x2:(MOVBUload [i3] {s} p mem))) @@ -18902,14 +17983,12 @@ func rewriteValueARM64_OpARM64ORshiftLL(v *Value) bool { } b = mergePoint(b, x0, x1, x2) v0 := b.NewValue0(x2.Pos, OpARM64MOVWUload, t) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v0.Aux = s v1 := b.NewValue0(x2.Pos, OpOffPtr, p.Type) v1.AuxInt = i0 v1.AddArg(p) - v0.AddArg(v1) - v0.AddArg(mem) + v0.AddArg2(v1, mem) return true } // match: (ORshiftLL [24] o0:(ORshiftLL [16] x0:(MOVHUloadidx ptr0 idx0 mem) y1:(MOVDnop x1:(MOVBUload [2] {s} p1:(ADD ptr1 idx1) mem))) y2:(MOVDnop x2:(MOVBUload [3] {s} p mem))) @@ -18970,11 +18049,8 @@ func rewriteValueARM64_OpARM64ORshiftLL(v *Value) bool { } b = mergePoint(b, x0, x1, x2) v0 := b.NewValue0(x2.Pos, OpARM64MOVWUloadidx, t) - v.reset(OpCopy) - v.AddArg(v0) - v0.AddArg(ptr0) - v0.AddArg(idx0) - v0.AddArg(mem) + v.copyOf(v0) + v0.AddArg3(ptr0, idx0, mem) return true } break @@ -19033,11 +18109,8 @@ func rewriteValueARM64_OpARM64ORshiftLL(v *Value) bool { } b = mergePoint(b, x0, x1, x2) v0 := b.NewValue0(v.Pos, OpARM64MOVWUloadidx, t) - v.reset(OpCopy) - v.AddArg(v0) - v0.AddArg(ptr) - v0.AddArg(idx) - v0.AddArg(mem) + v.copyOf(v0) + v0.AddArg3(ptr, idx, mem) return true } // match: (ORshiftLL [24] o0:(ORshiftLL [16] x0:(MOVHUloadidx2 ptr0 idx0 mem) y1:(MOVDnop x1:(MOVBUload [2] {s} p1:(ADDshiftLL [1] ptr1 idx1) mem))) y2:(MOVDnop x2:(MOVBUload [3] {s} p mem))) @@ -19094,14 +18167,11 @@ func rewriteValueARM64_OpARM64ORshiftLL(v *Value) bool { } b = mergePoint(b, x0, x1, x2) v0 := b.NewValue0(x2.Pos, OpARM64MOVWUloadidx, t) - v.reset(OpCopy) - v.AddArg(v0) - v0.AddArg(ptr0) + v.copyOf(v0) v1 := b.NewValue0(x2.Pos, OpARM64SLLconst, idx0.Type) v1.AuxInt = 1 v1.AddArg(idx0) - v0.AddArg(v1) - v0.AddArg(mem) + v0.AddArg3(ptr0, v1, mem) return true } // match: (ORshiftLL [56] o0:(ORshiftLL [48] o1:(ORshiftLL [40] o2:(ORshiftLL [32] x0:(MOVWUload [i0] {s} p mem) y1:(MOVDnop x1:(MOVBUload [i4] {s} p mem))) y2:(MOVDnop x2:(MOVBUload [i5] {s} p mem))) y3:(MOVDnop x3:(MOVBUload [i6] {s} p mem))) y4:(MOVDnop x4:(MOVBUload [i7] {s} p mem))) @@ -19201,14 +18271,12 @@ func rewriteValueARM64_OpARM64ORshiftLL(v *Value) bool { } b = mergePoint(b, x0, x1, x2, x3, x4) v0 := b.NewValue0(x4.Pos, OpARM64MOVDload, t) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v0.Aux = s v1 := b.NewValue0(x4.Pos, OpOffPtr, p.Type) v1.AuxInt = i0 v1.AddArg(p) - v0.AddArg(v1) - v0.AddArg(mem) + v0.AddArg2(v1, mem) return true } // match: (ORshiftLL [56] o0:(ORshiftLL [48] o1:(ORshiftLL [40] o2:(ORshiftLL [32] x0:(MOVWUloadidx ptr0 idx0 mem) y1:(MOVDnop x1:(MOVBUload [4] {s} p1:(ADD ptr1 idx1) mem))) y2:(MOVDnop x2:(MOVBUload [5] {s} p mem))) y3:(MOVDnop x3:(MOVBUload [6] {s} p mem))) y4:(MOVDnop x4:(MOVBUload [7] {s} p mem))) @@ -19303,11 +18371,8 @@ func rewriteValueARM64_OpARM64ORshiftLL(v *Value) bool { } b = mergePoint(b, x0, x1, x2, x3, x4) v0 := b.NewValue0(x4.Pos, OpARM64MOVDloadidx, t) - v.reset(OpCopy) - v.AddArg(v0) - v0.AddArg(ptr0) - v0.AddArg(idx0) - v0.AddArg(mem) + v.copyOf(v0) + v0.AddArg3(ptr0, idx0, mem) return true } break @@ -19400,14 +18465,11 @@ func rewriteValueARM64_OpARM64ORshiftLL(v *Value) bool { } b = mergePoint(b, x0, x1, x2, x3, x4) v0 := b.NewValue0(x4.Pos, OpARM64MOVDloadidx, t) - v.reset(OpCopy) - v.AddArg(v0) - v0.AddArg(ptr0) + v.copyOf(v0) v1 := b.NewValue0(x4.Pos, OpARM64SLLconst, idx0.Type) v1.AuxInt = 2 v1.AddArg(idx0) - v0.AddArg(v1) - v0.AddArg(mem) + v0.AddArg3(ptr0, v1, mem) return true } // match: (ORshiftLL [56] o0:(ORshiftLL [48] o1:(ORshiftLL [40] o2:(ORshiftLL [32] x0:(MOVWUloadidx ptr idx mem) y1:(MOVDnop x1:(MOVBUloadidx ptr (ADDconst [4] idx) mem))) y2:(MOVDnop x2:(MOVBUloadidx ptr (ADDconst [5] idx) mem))) y3:(MOVDnop x3:(MOVBUloadidx ptr (ADDconst [6] idx) mem))) y4:(MOVDnop x4:(MOVBUloadidx ptr (ADDconst [7] idx) mem))) @@ -19506,11 +18568,8 @@ func rewriteValueARM64_OpARM64ORshiftLL(v *Value) bool { } b = mergePoint(b, x0, x1, x2, x3, x4) v0 := b.NewValue0(v.Pos, OpARM64MOVDloadidx, t) - v.reset(OpCopy) - v.AddArg(v0) - v0.AddArg(ptr) - v0.AddArg(idx) - v0.AddArg(mem) + v.copyOf(v0) + v0.AddArg3(ptr, idx, mem) return true } // match: (ORshiftLL [8] y0:(MOVDnop x0:(MOVBUload [i1] {s} p mem)) y1:(MOVDnop x1:(MOVBUload [i0] {s} p mem))) @@ -19551,13 +18610,11 @@ func rewriteValueARM64_OpARM64ORshiftLL(v *Value) bool { } b = mergePoint(b, x0, x1) v0 := b.NewValue0(x1.Pos, OpARM64REV16W, t) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v1 := b.NewValue0(x1.Pos, OpARM64MOVHUload, t) v1.AuxInt = i0 v1.Aux = s - v1.AddArg(p) - v1.AddArg(mem) + v1.AddArg2(p, mem) v0.AddArg(v1) return true } @@ -19605,12 +18662,9 @@ func rewriteValueARM64_OpARM64ORshiftLL(v *Value) bool { } b = mergePoint(b, x0, x1) v0 := b.NewValue0(x0.Pos, OpARM64REV16W, t) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v1 := b.NewValue0(x0.Pos, OpARM64MOVHUloadidx, t) - v1.AddArg(ptr0) - v1.AddArg(idx0) - v1.AddArg(mem) + v1.AddArg3(ptr0, idx0, mem) v0.AddArg(v1) return true } @@ -19653,12 +18707,9 @@ func rewriteValueARM64_OpARM64ORshiftLL(v *Value) bool { } b = mergePoint(b, x0, x1) v0 := b.NewValue0(v.Pos, OpARM64REV16W, t) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v1 := b.NewValue0(v.Pos, OpARM64MOVHUloadidx, t) - v1.AddArg(ptr) - v1.AddArg(idx) - v1.AddArg(mem) + v1.AddArg3(ptr, idx, mem) v0.AddArg(v1) return true } @@ -19721,15 +18772,13 @@ func rewriteValueARM64_OpARM64ORshiftLL(v *Value) bool { } b = mergePoint(b, x0, x1, x2) v0 := b.NewValue0(x2.Pos, OpARM64REVW, t) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v1 := b.NewValue0(x2.Pos, OpARM64MOVWUload, t) v1.Aux = s v2 := b.NewValue0(x2.Pos, OpOffPtr, p.Type) v2.AuxInt = i0 v2.AddArg(p) - v1.AddArg(v2) - v1.AddArg(mem) + v1.AddArg2(v2, mem) v0.AddArg(v1) return true } @@ -19795,12 +18844,9 @@ func rewriteValueARM64_OpARM64ORshiftLL(v *Value) bool { } b = mergePoint(b, x0, x1, x2) v0 := b.NewValue0(x1.Pos, OpARM64REVW, t) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v1 := b.NewValue0(x1.Pos, OpARM64MOVWUloadidx, t) - v1.AddArg(ptr0) - v1.AddArg(idx0) - v1.AddArg(mem) + v1.AddArg3(ptr0, idx0, mem) v0.AddArg(v1) return true } @@ -19864,12 +18910,9 @@ func rewriteValueARM64_OpARM64ORshiftLL(v *Value) bool { } b = mergePoint(b, x0, x1, x2) v0 := b.NewValue0(v.Pos, OpARM64REVW, t) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v1 := b.NewValue0(v.Pos, OpARM64MOVWUloadidx, t) - v1.AddArg(ptr) - v1.AddArg(idx) - v1.AddArg(mem) + v1.AddArg3(ptr, idx, mem) v0.AddArg(v1) return true } @@ -19974,15 +19017,13 @@ func rewriteValueARM64_OpARM64ORshiftLL(v *Value) bool { } b = mergePoint(b, x0, x1, x2, x3, x4) v0 := b.NewValue0(x4.Pos, OpARM64REV, t) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v1 := b.NewValue0(x4.Pos, OpARM64MOVDload, t) v1.Aux = s v2 := b.NewValue0(x4.Pos, OpOffPtr, p.Type) v2.AuxInt = i0 v2.AddArg(p) - v1.AddArg(v2) - v1.AddArg(mem) + v1.AddArg2(v2, mem) v0.AddArg(v1) return true } @@ -20082,12 +19123,9 @@ func rewriteValueARM64_OpARM64ORshiftLL(v *Value) bool { } b = mergePoint(b, x0, x1, x2, x3, x4) v0 := b.NewValue0(x3.Pos, OpARM64REV, t) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v1 := b.NewValue0(x3.Pos, OpARM64MOVDloadidx, t) - v1.AddArg(ptr0) - v1.AddArg(idx0) - v1.AddArg(mem) + v1.AddArg3(ptr0, idx0, mem) v0.AddArg(v1) return true } @@ -20193,12 +19231,9 @@ func rewriteValueARM64_OpARM64ORshiftLL(v *Value) bool { } b = mergePoint(b, x0, x1, x2, x3, x4) v0 := b.NewValue0(v.Pos, OpARM64REV, t) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v1 := b.NewValue0(v.Pos, OpARM64MOVDloadidx, t) - v1.AddArg(ptr) - v1.AddArg(idx) - v1.AddArg(mem) + v1.AddArg3(ptr, idx, mem) v0.AddArg(v1) return true } @@ -20253,9 +19288,7 @@ func rewriteValueARM64_OpARM64ORshiftRA(v *Value) bool { if x != y.Args[0] || !(c == d) { break } - v.reset(OpCopy) - v.Type = y.Type - v.AddArg(y) + v.copyOf(y) return true } return false @@ -20309,9 +19342,7 @@ func rewriteValueARM64_OpARM64ORshiftRL(v *Value) bool { if x != y.Args[0] || !(c == d) { break } - v.reset(OpCopy) - v.Type = y.Type - v.AddArg(y) + v.copyOf(y) return true } // match: ( ORshiftRL [c] (SLLconst x [64-c]) x) @@ -20368,8 +19399,7 @@ func rewriteValueARM64_OpARM64ORshiftRL(v *Value) bool { } v.reset(OpARM64BFI) v.AuxInt = armBFAuxInt(lc-rc, 64-lc) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (ORshiftRL [rc] (ANDconst [ac] y) (SLLconst [lc] x)) @@ -20392,8 +19422,7 @@ func rewriteValueARM64_OpARM64ORshiftRL(v *Value) bool { } v.reset(OpARM64BFXIL) v.AuxInt = armBFAuxInt(rc-lc, 64-rc) - v.AddArg(y) - v.AddArg(x) + v.AddArg2(y, x) return true } return false @@ -20462,9 +19491,7 @@ func rewriteValueARM64_OpARM64SBCSflags(v *Value) bool { } bo := v_2_0_0_0.Args[0] v.reset(OpARM64SBCSflags) - v.AddArg(x) - v.AddArg(y) - v.AddArg(bo) + v.AddArg3(x, y, bo) return true } // match: (SBCSflags x y (Select1 (NEGSflags (MOVDconst [0])))) @@ -20484,8 +19511,7 @@ func rewriteValueARM64_OpARM64SBCSflags(v *Value) bool { break } v.reset(OpARM64SUBSflags) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } return false @@ -21040,10 +20066,7 @@ func rewriteValueARM64_OpARM64STP(v *Value) bool { v.reset(OpARM64STP) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(ptr) - v.AddArg(val1) - v.AddArg(val2) - v.AddArg(mem) + v.AddArg4(ptr, val1, val2, mem) return true } // match: (STP [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val1 val2 mem) @@ -21067,10 +20090,7 @@ func rewriteValueARM64_OpARM64STP(v *Value) bool { v.reset(OpARM64STP) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(val1) - v.AddArg(val2) - v.AddArg(mem) + v.AddArg4(ptr, val1, val2, mem) return true } // match: (STP [off] {sym} ptr (MOVDconst [0]) (MOVDconst [0]) mem) @@ -21086,8 +20106,7 @@ func rewriteValueARM64_OpARM64STP(v *Value) bool { v.reset(OpARM64MOVQstorezero) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } return false @@ -21124,9 +20143,7 @@ func rewriteValueARM64_OpARM64SUB(v *Value) bool { break } v.reset(OpARM64MSUB) - v.AddArg(a) - v.AddArg(x) - v.AddArg(y) + v.AddArg3(a, x, y) return true } // match: (SUB a l:(MNEG x y)) @@ -21144,9 +20161,7 @@ func rewriteValueARM64_OpARM64SUB(v *Value) bool { break } v.reset(OpARM64MADD) - v.AddArg(a) - v.AddArg(x) - v.AddArg(y) + v.AddArg3(a, x, y) return true } // match: (SUB a l:(MULW x y)) @@ -21164,9 +20179,7 @@ func rewriteValueARM64_OpARM64SUB(v *Value) bool { break } v.reset(OpARM64MSUBW) - v.AddArg(a) - v.AddArg(x) - v.AddArg(y) + v.AddArg3(a, x, y) return true } // match: (SUB a l:(MNEGW x y)) @@ -21184,9 +20197,7 @@ func rewriteValueARM64_OpARM64SUB(v *Value) bool { break } v.reset(OpARM64MADDW) - v.AddArg(a) - v.AddArg(x) - v.AddArg(y) + v.AddArg3(a, x, y) return true } // match: (SUB x x) @@ -21211,10 +20222,8 @@ func rewriteValueARM64_OpARM64SUB(v *Value) bool { y := v_1.Args[0] v.reset(OpARM64SUB) v0 := b.NewValue0(v.Pos, OpARM64ADD, v.Type) - v0.AddArg(x) - v0.AddArg(z) - v.AddArg(v0) - v.AddArg(y) + v0.AddArg2(x, z) + v.AddArg2(v0, y) return true } // match: (SUB (SUB x y) z) @@ -21227,11 +20236,9 @@ func rewriteValueARM64_OpARM64SUB(v *Value) bool { x := v_0.Args[0] z := v_1 v.reset(OpARM64SUB) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpARM64ADD, y.Type) - v0.AddArg(y) - v0.AddArg(z) - v.AddArg(v0) + v0.AddArg2(y, z) + v.AddArg2(x, v0) return true } // match: (SUB x0 x1:(SLLconst [c] y)) @@ -21250,8 +20257,7 @@ func rewriteValueARM64_OpARM64SUB(v *Value) bool { } v.reset(OpARM64SUBshiftLL) v.AuxInt = c - v.AddArg(x0) - v.AddArg(y) + v.AddArg2(x0, y) return true } // match: (SUB x0 x1:(SRLconst [c] y)) @@ -21270,8 +20276,7 @@ func rewriteValueARM64_OpARM64SUB(v *Value) bool { } v.reset(OpARM64SUBshiftRL) v.AuxInt = c - v.AddArg(x0) - v.AddArg(y) + v.AddArg2(x0, y) return true } // match: (SUB x0 x1:(SRAconst [c] y)) @@ -21290,8 +20295,7 @@ func rewriteValueARM64_OpARM64SUB(v *Value) bool { } v.reset(OpARM64SUBshiftRA) v.AuxInt = c - v.AddArg(x0) - v.AddArg(y) + v.AddArg2(x0, y) return true } return false @@ -21305,9 +20309,7 @@ func rewriteValueARM64_OpARM64SUBconst(v *Value) bool { break } x := v_0 - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (SUBconst [c] (MOVDconst [d])) @@ -21496,8 +20498,7 @@ func rewriteValueARM64_OpARM64TST(v *Value) bool { } v.reset(OpARM64TSTshiftLL) v.AuxInt = c - v.AddArg(x0) - v.AddArg(y) + v.AddArg2(x0, y) return true } break @@ -21519,8 +20520,7 @@ func rewriteValueARM64_OpARM64TST(v *Value) bool { } v.reset(OpARM64TSTshiftRL) v.AuxInt = c - v.AddArg(x0) - v.AddArg(y) + v.AddArg2(x0, y) return true } break @@ -21542,8 +20542,7 @@ func rewriteValueARM64_OpARM64TST(v *Value) bool { } v.reset(OpARM64TSTshiftRA) v.AuxInt = c - v.AddArg(x0) - v.AddArg(y) + v.AddArg2(x0, y) return true } break @@ -21888,9 +20887,7 @@ func rewriteValueARM64_OpARM64UDIV(v *Value) bool { if v_1.Op != OpARM64MOVDconst || v_1.AuxInt != 1 { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (UDIV x (MOVDconst [c])) @@ -21942,9 +20939,7 @@ func rewriteValueARM64_OpARM64UDIVW(v *Value) bool { if !(uint32(c) == 1) { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (UDIVW x (MOVDconst [c])) @@ -21996,12 +20991,9 @@ func rewriteValueARM64_OpARM64UMOD(v *Value) bool { y := v_1 v.reset(OpARM64MSUB) v.Type = typ.UInt64 - v.AddArg(x) - v.AddArg(y) v0 := b.NewValue0(v.Pos, OpARM64UDIV, typ.UInt64) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) + v0.AddArg2(x, y) + v.AddArg3(x, y, v0) return true } // match: (UMOD _ (MOVDconst [1])) @@ -22063,12 +21055,9 @@ func rewriteValueARM64_OpARM64UMODW(v *Value) bool { y := v_1 v.reset(OpARM64MSUBW) v.Type = typ.UInt32 - v.AddArg(x) - v.AddArg(y) v0 := b.NewValue0(v.Pos, OpARM64UDIVW, typ.UInt32) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) + v0.AddArg2(x, y) + v.AddArg3(x, y, v0) return true } // match: (UMODW _ (MOVDconst [c])) @@ -22162,8 +21151,7 @@ func rewriteValueARM64_OpARM64XOR(v *Value) bool { } y := v_1.Args[0] v.reset(OpARM64EON) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } break @@ -22185,8 +21173,7 @@ func rewriteValueARM64_OpARM64XOR(v *Value) bool { } v.reset(OpARM64XORshiftLL) v.AuxInt = c - v.AddArg(x0) - v.AddArg(y) + v.AddArg2(x0, y) return true } break @@ -22208,8 +21195,7 @@ func rewriteValueARM64_OpARM64XOR(v *Value) bool { } v.reset(OpARM64XORshiftRL) v.AuxInt = c - v.AddArg(x0) - v.AddArg(y) + v.AddArg2(x0, y) return true } break @@ -22231,8 +21217,7 @@ func rewriteValueARM64_OpARM64XOR(v *Value) bool { } v.reset(OpARM64XORshiftRA) v.AuxInt = c - v.AddArg(x0) - v.AddArg(y) + v.AddArg2(x0, y) return true } break @@ -22300,10 +21285,9 @@ func rewriteValueARM64_OpARM64XOR(v *Value) bool { continue } v.reset(OpARM64ROR) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpARM64NEG, t) v0.AddArg(y) - v.AddArg(v0) + v.AddArg2(x, v0) return true } break @@ -22371,8 +21355,7 @@ func rewriteValueARM64_OpARM64XOR(v *Value) bool { continue } v.reset(OpARM64ROR) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } break @@ -22441,10 +21424,9 @@ func rewriteValueARM64_OpARM64XOR(v *Value) bool { continue } v.reset(OpARM64RORW) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpARM64NEG, t) v0.AddArg(y) - v.AddArg(v0) + v.AddArg2(x, v0) return true } break @@ -22516,8 +21498,7 @@ func rewriteValueARM64_OpARM64XOR(v *Value) bool { continue } v.reset(OpARM64RORW) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } break @@ -22533,9 +21514,7 @@ func rewriteValueARM64_OpARM64XORconst(v *Value) bool { break } x := v_0 - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (XORconst [-1] x) @@ -22690,8 +21669,7 @@ func rewriteValueARM64_OpARM64XORshiftLL(v *Value) bool { x2 := v_1 v.reset(OpARM64EXTRconst) v.AuxInt = 64 - c - v.AddArg(x2) - v.AddArg(x) + v.AddArg2(x2, x) return true } // match: (XORshiftLL [c] (UBFX [bfc] x) x2) @@ -22711,8 +21689,7 @@ func rewriteValueARM64_OpARM64XORshiftLL(v *Value) bool { } v.reset(OpARM64EXTRWconst) v.AuxInt = 32 - c - v.AddArg(x2) - v.AddArg(x) + v.AddArg2(x2, x) return true } return false @@ -22873,9 +21850,7 @@ func rewriteValueARM64_OpAtomicAnd8(v *Value) bool { mem := v_2 v.reset(OpSelect1) v0 := b.NewValue0(v.Pos, OpARM64LoweredAtomicAnd8, types.NewTuple(typ.UInt8, types.TypeMem)) - v0.AddArg(ptr) - v0.AddArg(val) - v0.AddArg(mem) + v0.AddArg3(ptr, val, mem) v.AddArg(v0) return true } @@ -22894,9 +21869,7 @@ func rewriteValueARM64_OpAtomicOr8(v *Value) bool { mem := v_2 v.reset(OpSelect1) v0 := b.NewValue0(v.Pos, OpARM64LoweredAtomicOr8, types.NewTuple(typ.UInt8, types.TypeMem)) - v0.AddArg(ptr) - v0.AddArg(val) - v0.AddArg(mem) + v0.AddArg3(ptr, val, mem) v.AddArg(v0) return true } @@ -22915,11 +21888,9 @@ func rewriteValueARM64_OpAvg64u(v *Value) bool { v0 := b.NewValue0(v.Pos, OpARM64SRLconst, t) v0.AuxInt = 1 v1 := b.NewValue0(v.Pos, OpARM64SUB, t) - v1.AddArg(x) - v1.AddArg(y) + v1.AddArg2(x, y) v0.AddArg(v1) - v.AddArg(v0) - v.AddArg(y) + v.AddArg2(v0, y) return true } } @@ -22934,10 +21905,9 @@ func rewriteValueARM64_OpBitLen32(v *Value) bool { v.reset(OpARM64SUB) v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) v0.AuxInt = 32 - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpARM64CLZW, typ.Int) v1.AddArg(x) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } } @@ -22952,10 +21922,9 @@ func rewriteValueARM64_OpBitLen64(v *Value) bool { v.reset(OpARM64SUB) v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) v0.AuxInt = 64 - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpARM64CLZ, typ.Int) v1.AddArg(x) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } } @@ -23008,9 +21977,7 @@ func rewriteValueARM64_OpCondSelect(v *Value) bool { } v.reset(OpARM64CSEL) v.Aux = boolval.Op - v.AddArg(x) - v.AddArg(y) - v.AddArg(flagArg(boolval)) + v.AddArg3(x, y, flagArg(boolval)) return true } // match: (CondSelect x y boolval) @@ -23025,12 +21992,10 @@ func rewriteValueARM64_OpCondSelect(v *Value) bool { } v.reset(OpARM64CSEL) v.Aux = OpARM64NotEqual - v.AddArg(x) - v.AddArg(y) v0 := b.NewValue0(v.Pos, OpARM64CMPWconst, types.TypeFlags) v0.AuxInt = 0 v0.AddArg(boolval) - v.AddArg(v0) + v.AddArg3(x, y, v0) return true } return false @@ -23127,10 +22092,9 @@ func rewriteValueARM64_OpDiv16(v *Value) bool { v.reset(OpARM64DIVW) v0 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32) v1.AddArg(y) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } } @@ -23147,10 +22111,9 @@ func rewriteValueARM64_OpDiv16u(v *Value) bool { v.reset(OpARM64UDIVW) v0 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) v1.AddArg(y) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } } @@ -23167,10 +22130,9 @@ func rewriteValueARM64_OpDiv8(v *Value) bool { v.reset(OpARM64DIVW) v0 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32) v1.AddArg(y) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } } @@ -23187,10 +22149,9 @@ func rewriteValueARM64_OpDiv8u(v *Value) bool { v.reset(OpARM64UDIVW) v0 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) v1.AddArg(y) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } } @@ -23208,10 +22169,9 @@ func rewriteValueARM64_OpEq16(v *Value) bool { v0 := b.NewValue0(v.Pos, OpARM64CMPW, types.TypeFlags) v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) v1.AddArg(x) - v0.AddArg(v1) v2 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) v2.AddArg(y) - v0.AddArg(v2) + v0.AddArg2(v1, v2) v.AddArg(v0) return true } @@ -23227,8 +22187,7 @@ func rewriteValueARM64_OpEq32(v *Value) bool { y := v_1 v.reset(OpARM64Equal) v0 := b.NewValue0(v.Pos, OpARM64CMPW, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -23244,8 +22203,7 @@ func rewriteValueARM64_OpEq32F(v *Value) bool { y := v_1 v.reset(OpARM64Equal) v0 := b.NewValue0(v.Pos, OpARM64FCMPS, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -23261,8 +22219,7 @@ func rewriteValueARM64_OpEq64(v *Value) bool { y := v_1 v.reset(OpARM64Equal) v0 := b.NewValue0(v.Pos, OpARM64CMP, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -23278,8 +22235,7 @@ func rewriteValueARM64_OpEq64F(v *Value) bool { y := v_1 v.reset(OpARM64Equal) v0 := b.NewValue0(v.Pos, OpARM64FCMPD, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -23298,10 +22254,9 @@ func rewriteValueARM64_OpEq8(v *Value) bool { v0 := b.NewValue0(v.Pos, OpARM64CMPW, types.TypeFlags) v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) v1.AddArg(x) - v0.AddArg(v1) v2 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) v2.AddArg(y) - v0.AddArg(v2) + v0.AddArg2(v1, v2) v.AddArg(v0) return true } @@ -23319,11 +22274,9 @@ func rewriteValueARM64_OpEqB(v *Value) bool { v.reset(OpARM64XOR) v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) v0.AuxInt = 1 - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpARM64XOR, typ.Bool) - v1.AddArg(x) - v1.AddArg(y) - v.AddArg(v1) + v1.AddArg2(x, y) + v.AddArg2(v0, v1) return true } } @@ -23338,8 +22291,7 @@ func rewriteValueARM64_OpEqPtr(v *Value) bool { y := v_1 v.reset(OpARM64Equal) v0 := b.NewValue0(v.Pos, OpARM64CMP, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -23355,9 +22307,7 @@ func rewriteValueARM64_OpFMA(v *Value) bool { y := v_1 z := v_2 v.reset(OpARM64FMADDD) - v.AddArg(z) - v.AddArg(x) - v.AddArg(y) + v.AddArg3(z, x, y) return true } } @@ -23372,8 +22322,7 @@ func rewriteValueARM64_OpGeq32F(v *Value) bool { y := v_1 v.reset(OpARM64GreaterEqualF) v0 := b.NewValue0(v.Pos, OpARM64FCMPS, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -23389,8 +22338,7 @@ func rewriteValueARM64_OpGeq64F(v *Value) bool { y := v_1 v.reset(OpARM64GreaterEqualF) v0 := b.NewValue0(v.Pos, OpARM64FCMPD, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -23406,8 +22354,7 @@ func rewriteValueARM64_OpGreater32F(v *Value) bool { y := v_1 v.reset(OpARM64GreaterThanF) v0 := b.NewValue0(v.Pos, OpARM64FCMPS, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -23423,8 +22370,7 @@ func rewriteValueARM64_OpGreater64F(v *Value) bool { y := v_1 v.reset(OpARM64GreaterThanF) v0 := b.NewValue0(v.Pos, OpARM64FCMPD, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -23442,8 +22388,7 @@ func rewriteValueARM64_OpHmul32(v *Value) bool { v.reset(OpARM64SRAconst) v.AuxInt = 32 v0 := b.NewValue0(v.Pos, OpARM64MULL, typ.Int64) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -23461,8 +22406,7 @@ func rewriteValueARM64_OpHmul32u(v *Value) bool { v.reset(OpARM64SRAconst) v.AuxInt = 32 v0 := b.NewValue0(v.Pos, OpARM64UMULL, typ.UInt64) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -23478,8 +22422,7 @@ func rewriteValueARM64_OpIsInBounds(v *Value) bool { len := v_1 v.reset(OpARM64LessThanU) v0 := b.NewValue0(v.Pos, OpARM64CMP, types.TypeFlags) - v0.AddArg(idx) - v0.AddArg(len) + v0.AddArg2(idx, len) v.AddArg(v0) return true } @@ -23510,8 +22453,7 @@ func rewriteValueARM64_OpIsSliceInBounds(v *Value) bool { len := v_1 v.reset(OpARM64LessEqualU) v0 := b.NewValue0(v.Pos, OpARM64CMP, types.TypeFlags) - v0.AddArg(idx) - v0.AddArg(len) + v0.AddArg2(idx, len) v.AddArg(v0) return true } @@ -23530,10 +22472,9 @@ func rewriteValueARM64_OpLeq16(v *Value) bool { v0 := b.NewValue0(v.Pos, OpARM64CMPW, types.TypeFlags) v1 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32) v1.AddArg(x) - v0.AddArg(v1) v2 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32) v2.AddArg(y) - v0.AddArg(v2) + v0.AddArg2(v1, v2) v.AddArg(v0) return true } @@ -23552,10 +22493,9 @@ func rewriteValueARM64_OpLeq16U(v *Value) bool { v0 := b.NewValue0(v.Pos, OpARM64CMPW, types.TypeFlags) v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) v1.AddArg(x) - v0.AddArg(v1) v2 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) v2.AddArg(y) - v0.AddArg(v2) + v0.AddArg2(v1, v2) v.AddArg(v0) return true } @@ -23571,8 +22511,7 @@ func rewriteValueARM64_OpLeq32(v *Value) bool { y := v_1 v.reset(OpARM64LessEqual) v0 := b.NewValue0(v.Pos, OpARM64CMPW, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -23588,8 +22527,7 @@ func rewriteValueARM64_OpLeq32F(v *Value) bool { y := v_1 v.reset(OpARM64LessEqualF) v0 := b.NewValue0(v.Pos, OpARM64FCMPS, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -23605,8 +22543,7 @@ func rewriteValueARM64_OpLeq32U(v *Value) bool { y := v_1 v.reset(OpARM64LessEqualU) v0 := b.NewValue0(v.Pos, OpARM64CMPW, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -23622,8 +22559,7 @@ func rewriteValueARM64_OpLeq64(v *Value) bool { y := v_1 v.reset(OpARM64LessEqual) v0 := b.NewValue0(v.Pos, OpARM64CMP, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -23639,8 +22575,7 @@ func rewriteValueARM64_OpLeq64F(v *Value) bool { y := v_1 v.reset(OpARM64LessEqualF) v0 := b.NewValue0(v.Pos, OpARM64FCMPD, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -23656,8 +22591,7 @@ func rewriteValueARM64_OpLeq64U(v *Value) bool { y := v_1 v.reset(OpARM64LessEqualU) v0 := b.NewValue0(v.Pos, OpARM64CMP, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -23676,10 +22610,9 @@ func rewriteValueARM64_OpLeq8(v *Value) bool { v0 := b.NewValue0(v.Pos, OpARM64CMPW, types.TypeFlags) v1 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32) v1.AddArg(x) - v0.AddArg(v1) v2 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32) v2.AddArg(y) - v0.AddArg(v2) + v0.AddArg2(v1, v2) v.AddArg(v0) return true } @@ -23698,10 +22631,9 @@ func rewriteValueARM64_OpLeq8U(v *Value) bool { v0 := b.NewValue0(v.Pos, OpARM64CMPW, types.TypeFlags) v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) v1.AddArg(x) - v0.AddArg(v1) v2 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) v2.AddArg(y) - v0.AddArg(v2) + v0.AddArg2(v1, v2) v.AddArg(v0) return true } @@ -23720,10 +22652,9 @@ func rewriteValueARM64_OpLess16(v *Value) bool { v0 := b.NewValue0(v.Pos, OpARM64CMPW, types.TypeFlags) v1 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32) v1.AddArg(x) - v0.AddArg(v1) v2 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32) v2.AddArg(y) - v0.AddArg(v2) + v0.AddArg2(v1, v2) v.AddArg(v0) return true } @@ -23742,10 +22673,9 @@ func rewriteValueARM64_OpLess16U(v *Value) bool { v0 := b.NewValue0(v.Pos, OpARM64CMPW, types.TypeFlags) v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) v1.AddArg(x) - v0.AddArg(v1) v2 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) v2.AddArg(y) - v0.AddArg(v2) + v0.AddArg2(v1, v2) v.AddArg(v0) return true } @@ -23761,8 +22691,7 @@ func rewriteValueARM64_OpLess32(v *Value) bool { y := v_1 v.reset(OpARM64LessThan) v0 := b.NewValue0(v.Pos, OpARM64CMPW, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -23778,8 +22707,7 @@ func rewriteValueARM64_OpLess32F(v *Value) bool { y := v_1 v.reset(OpARM64LessThanF) v0 := b.NewValue0(v.Pos, OpARM64FCMPS, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -23795,8 +22723,7 @@ func rewriteValueARM64_OpLess32U(v *Value) bool { y := v_1 v.reset(OpARM64LessThanU) v0 := b.NewValue0(v.Pos, OpARM64CMPW, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -23812,8 +22739,7 @@ func rewriteValueARM64_OpLess64(v *Value) bool { y := v_1 v.reset(OpARM64LessThan) v0 := b.NewValue0(v.Pos, OpARM64CMP, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -23829,8 +22755,7 @@ func rewriteValueARM64_OpLess64F(v *Value) bool { y := v_1 v.reset(OpARM64LessThanF) v0 := b.NewValue0(v.Pos, OpARM64FCMPD, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -23846,8 +22771,7 @@ func rewriteValueARM64_OpLess64U(v *Value) bool { y := v_1 v.reset(OpARM64LessThanU) v0 := b.NewValue0(v.Pos, OpARM64CMP, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -23866,10 +22790,9 @@ func rewriteValueARM64_OpLess8(v *Value) bool { v0 := b.NewValue0(v.Pos, OpARM64CMPW, types.TypeFlags) v1 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32) v1.AddArg(x) - v0.AddArg(v1) v2 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32) v2.AddArg(y) - v0.AddArg(v2) + v0.AddArg2(v1, v2) v.AddArg(v0) return true } @@ -23888,10 +22811,9 @@ func rewriteValueARM64_OpLess8U(v *Value) bool { v0 := b.NewValue0(v.Pos, OpARM64CMPW, types.TypeFlags) v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) v1.AddArg(x) - v0.AddArg(v1) v2 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) v2.AddArg(y) - v0.AddArg(v2) + v0.AddArg2(v1, v2) v.AddArg(v0) return true } @@ -23910,8 +22832,7 @@ func rewriteValueARM64_OpLoad(v *Value) bool { break } v.reset(OpARM64MOVBUload) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (Load ptr mem) @@ -23925,8 +22846,7 @@ func rewriteValueARM64_OpLoad(v *Value) bool { break } v.reset(OpARM64MOVBload) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (Load ptr mem) @@ -23940,8 +22860,7 @@ func rewriteValueARM64_OpLoad(v *Value) bool { break } v.reset(OpARM64MOVBUload) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (Load ptr mem) @@ -23955,8 +22874,7 @@ func rewriteValueARM64_OpLoad(v *Value) bool { break } v.reset(OpARM64MOVHload) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (Load ptr mem) @@ -23970,8 +22888,7 @@ func rewriteValueARM64_OpLoad(v *Value) bool { break } v.reset(OpARM64MOVHUload) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (Load ptr mem) @@ -23985,8 +22902,7 @@ func rewriteValueARM64_OpLoad(v *Value) bool { break } v.reset(OpARM64MOVWload) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (Load ptr mem) @@ -24000,8 +22916,7 @@ func rewriteValueARM64_OpLoad(v *Value) bool { break } v.reset(OpARM64MOVWUload) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (Load ptr mem) @@ -24015,8 +22930,7 @@ func rewriteValueARM64_OpLoad(v *Value) bool { break } v.reset(OpARM64MOVDload) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (Load ptr mem) @@ -24030,8 +22944,7 @@ func rewriteValueARM64_OpLoad(v *Value) bool { break } v.reset(OpARM64FMOVSload) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (Load ptr mem) @@ -24045,8 +22958,7 @@ func rewriteValueARM64_OpLoad(v *Value) bool { break } v.reset(OpARM64FMOVDload) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } return false @@ -24078,20 +22990,17 @@ func rewriteValueARM64_OpLsh16x16(v *Value) bool { v.reset(OpARM64CSEL) v.Aux = OpARM64LessThanU v0 := b.NewValue0(v.Pos, OpARM64SLL, t) - v0.AddArg(x) v1 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) v1.AddArg(y) - v0.AddArg(v1) - v.AddArg(v0) + v0.AddArg2(x, v1) v2 := b.NewValue0(v.Pos, OpConst64, t) v2.AuxInt = 0 - v.AddArg(v2) v3 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) v3.AuxInt = 64 v4 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) v4.AddArg(y) v3.AddArg(v4) - v.AddArg(v3) + v.AddArg3(v0, v2, v3) return true } } @@ -24109,20 +23018,17 @@ func rewriteValueARM64_OpLsh16x32(v *Value) bool { v.reset(OpARM64CSEL) v.Aux = OpARM64LessThanU v0 := b.NewValue0(v.Pos, OpARM64SLL, t) - v0.AddArg(x) v1 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) v1.AddArg(y) - v0.AddArg(v1) - v.AddArg(v0) + v0.AddArg2(x, v1) v2 := b.NewValue0(v.Pos, OpConst64, t) v2.AuxInt = 0 - v.AddArg(v2) v3 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) v3.AuxInt = 64 v4 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) v4.AddArg(y) v3.AddArg(v4) - v.AddArg(v3) + v.AddArg3(v0, v2, v3) return true } } @@ -24139,16 +23045,13 @@ func rewriteValueARM64_OpLsh16x64(v *Value) bool { v.reset(OpARM64CSEL) v.Aux = OpARM64LessThanU v0 := b.NewValue0(v.Pos, OpARM64SLL, t) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) + v0.AddArg2(x, y) v1 := b.NewValue0(v.Pos, OpConst64, t) v1.AuxInt = 0 - v.AddArg(v1) v2 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) v2.AuxInt = 64 v2.AddArg(y) - v.AddArg(v2) + v.AddArg3(v0, v1, v2) return true } } @@ -24166,20 +23069,17 @@ func rewriteValueARM64_OpLsh16x8(v *Value) bool { v.reset(OpARM64CSEL) v.Aux = OpARM64LessThanU v0 := b.NewValue0(v.Pos, OpARM64SLL, t) - v0.AddArg(x) v1 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) v1.AddArg(y) - v0.AddArg(v1) - v.AddArg(v0) + v0.AddArg2(x, v1) v2 := b.NewValue0(v.Pos, OpConst64, t) v2.AuxInt = 0 - v.AddArg(v2) v3 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) v3.AuxInt = 64 v4 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) v4.AddArg(y) v3.AddArg(v4) - v.AddArg(v3) + v.AddArg3(v0, v2, v3) return true } } @@ -24197,20 +23097,17 @@ func rewriteValueARM64_OpLsh32x16(v *Value) bool { v.reset(OpARM64CSEL) v.Aux = OpARM64LessThanU v0 := b.NewValue0(v.Pos, OpARM64SLL, t) - v0.AddArg(x) v1 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) v1.AddArg(y) - v0.AddArg(v1) - v.AddArg(v0) + v0.AddArg2(x, v1) v2 := b.NewValue0(v.Pos, OpConst64, t) v2.AuxInt = 0 - v.AddArg(v2) v3 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) v3.AuxInt = 64 v4 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) v4.AddArg(y) v3.AddArg(v4) - v.AddArg(v3) + v.AddArg3(v0, v2, v3) return true } } @@ -24228,20 +23125,17 @@ func rewriteValueARM64_OpLsh32x32(v *Value) bool { v.reset(OpARM64CSEL) v.Aux = OpARM64LessThanU v0 := b.NewValue0(v.Pos, OpARM64SLL, t) - v0.AddArg(x) v1 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) v1.AddArg(y) - v0.AddArg(v1) - v.AddArg(v0) + v0.AddArg2(x, v1) v2 := b.NewValue0(v.Pos, OpConst64, t) v2.AuxInt = 0 - v.AddArg(v2) v3 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) v3.AuxInt = 64 v4 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) v4.AddArg(y) v3.AddArg(v4) - v.AddArg(v3) + v.AddArg3(v0, v2, v3) return true } } @@ -24258,16 +23152,13 @@ func rewriteValueARM64_OpLsh32x64(v *Value) bool { v.reset(OpARM64CSEL) v.Aux = OpARM64LessThanU v0 := b.NewValue0(v.Pos, OpARM64SLL, t) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) + v0.AddArg2(x, y) v1 := b.NewValue0(v.Pos, OpConst64, t) v1.AuxInt = 0 - v.AddArg(v1) v2 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) v2.AuxInt = 64 v2.AddArg(y) - v.AddArg(v2) + v.AddArg3(v0, v1, v2) return true } } @@ -24285,20 +23176,17 @@ func rewriteValueARM64_OpLsh32x8(v *Value) bool { v.reset(OpARM64CSEL) v.Aux = OpARM64LessThanU v0 := b.NewValue0(v.Pos, OpARM64SLL, t) - v0.AddArg(x) v1 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) v1.AddArg(y) - v0.AddArg(v1) - v.AddArg(v0) + v0.AddArg2(x, v1) v2 := b.NewValue0(v.Pos, OpConst64, t) v2.AuxInt = 0 - v.AddArg(v2) v3 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) v3.AuxInt = 64 v4 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) v4.AddArg(y) v3.AddArg(v4) - v.AddArg(v3) + v.AddArg3(v0, v2, v3) return true } } @@ -24316,20 +23204,17 @@ func rewriteValueARM64_OpLsh64x16(v *Value) bool { v.reset(OpARM64CSEL) v.Aux = OpARM64LessThanU v0 := b.NewValue0(v.Pos, OpARM64SLL, t) - v0.AddArg(x) v1 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) v1.AddArg(y) - v0.AddArg(v1) - v.AddArg(v0) + v0.AddArg2(x, v1) v2 := b.NewValue0(v.Pos, OpConst64, t) v2.AuxInt = 0 - v.AddArg(v2) v3 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) v3.AuxInt = 64 v4 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) v4.AddArg(y) v3.AddArg(v4) - v.AddArg(v3) + v.AddArg3(v0, v2, v3) return true } } @@ -24347,20 +23232,17 @@ func rewriteValueARM64_OpLsh64x32(v *Value) bool { v.reset(OpARM64CSEL) v.Aux = OpARM64LessThanU v0 := b.NewValue0(v.Pos, OpARM64SLL, t) - v0.AddArg(x) v1 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) v1.AddArg(y) - v0.AddArg(v1) - v.AddArg(v0) + v0.AddArg2(x, v1) v2 := b.NewValue0(v.Pos, OpConst64, t) v2.AuxInt = 0 - v.AddArg(v2) v3 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) v3.AuxInt = 64 v4 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) v4.AddArg(y) v3.AddArg(v4) - v.AddArg(v3) + v.AddArg3(v0, v2, v3) return true } } @@ -24377,16 +23259,13 @@ func rewriteValueARM64_OpLsh64x64(v *Value) bool { v.reset(OpARM64CSEL) v.Aux = OpARM64LessThanU v0 := b.NewValue0(v.Pos, OpARM64SLL, t) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) + v0.AddArg2(x, y) v1 := b.NewValue0(v.Pos, OpConst64, t) v1.AuxInt = 0 - v.AddArg(v1) v2 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) v2.AuxInt = 64 v2.AddArg(y) - v.AddArg(v2) + v.AddArg3(v0, v1, v2) return true } } @@ -24404,20 +23283,17 @@ func rewriteValueARM64_OpLsh64x8(v *Value) bool { v.reset(OpARM64CSEL) v.Aux = OpARM64LessThanU v0 := b.NewValue0(v.Pos, OpARM64SLL, t) - v0.AddArg(x) v1 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) v1.AddArg(y) - v0.AddArg(v1) - v.AddArg(v0) + v0.AddArg2(x, v1) v2 := b.NewValue0(v.Pos, OpConst64, t) v2.AuxInt = 0 - v.AddArg(v2) v3 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) v3.AuxInt = 64 v4 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) v4.AddArg(y) v3.AddArg(v4) - v.AddArg(v3) + v.AddArg3(v0, v2, v3) return true } } @@ -24435,20 +23311,17 @@ func rewriteValueARM64_OpLsh8x16(v *Value) bool { v.reset(OpARM64CSEL) v.Aux = OpARM64LessThanU v0 := b.NewValue0(v.Pos, OpARM64SLL, t) - v0.AddArg(x) v1 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) v1.AddArg(y) - v0.AddArg(v1) - v.AddArg(v0) + v0.AddArg2(x, v1) v2 := b.NewValue0(v.Pos, OpConst64, t) v2.AuxInt = 0 - v.AddArg(v2) v3 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) v3.AuxInt = 64 v4 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) v4.AddArg(y) v3.AddArg(v4) - v.AddArg(v3) + v.AddArg3(v0, v2, v3) return true } } @@ -24466,20 +23339,17 @@ func rewriteValueARM64_OpLsh8x32(v *Value) bool { v.reset(OpARM64CSEL) v.Aux = OpARM64LessThanU v0 := b.NewValue0(v.Pos, OpARM64SLL, t) - v0.AddArg(x) v1 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) v1.AddArg(y) - v0.AddArg(v1) - v.AddArg(v0) + v0.AddArg2(x, v1) v2 := b.NewValue0(v.Pos, OpConst64, t) v2.AuxInt = 0 - v.AddArg(v2) v3 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) v3.AuxInt = 64 v4 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) v4.AddArg(y) v3.AddArg(v4) - v.AddArg(v3) + v.AddArg3(v0, v2, v3) return true } } @@ -24496,16 +23366,13 @@ func rewriteValueARM64_OpLsh8x64(v *Value) bool { v.reset(OpARM64CSEL) v.Aux = OpARM64LessThanU v0 := b.NewValue0(v.Pos, OpARM64SLL, t) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) + v0.AddArg2(x, y) v1 := b.NewValue0(v.Pos, OpConst64, t) v1.AuxInt = 0 - v.AddArg(v1) v2 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) v2.AuxInt = 64 v2.AddArg(y) - v.AddArg(v2) + v.AddArg3(v0, v1, v2) return true } } @@ -24523,20 +23390,17 @@ func rewriteValueARM64_OpLsh8x8(v *Value) bool { v.reset(OpARM64CSEL) v.Aux = OpARM64LessThanU v0 := b.NewValue0(v.Pos, OpARM64SLL, t) - v0.AddArg(x) v1 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) v1.AddArg(y) - v0.AddArg(v1) - v.AddArg(v0) + v0.AddArg2(x, v1) v2 := b.NewValue0(v.Pos, OpConst64, t) v2.AuxInt = 0 - v.AddArg(v2) v3 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) v3.AuxInt = 64 v4 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) v4.AddArg(y) v3.AddArg(v4) - v.AddArg(v3) + v.AddArg3(v0, v2, v3) return true } } @@ -24553,10 +23417,9 @@ func rewriteValueARM64_OpMod16(v *Value) bool { v.reset(OpARM64MODW) v0 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32) v1.AddArg(y) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } } @@ -24573,10 +23436,9 @@ func rewriteValueARM64_OpMod16u(v *Value) bool { v.reset(OpARM64UMODW) v0 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) v1.AddArg(y) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } } @@ -24593,10 +23455,9 @@ func rewriteValueARM64_OpMod8(v *Value) bool { v.reset(OpARM64MODW) v0 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32) v1.AddArg(y) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } } @@ -24613,10 +23474,9 @@ func rewriteValueARM64_OpMod8u(v *Value) bool { v.reset(OpARM64UMODW) v0 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) v1.AddArg(y) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } } @@ -24634,9 +23494,7 @@ func rewriteValueARM64_OpMove(v *Value) bool { break } mem := v_2 - v.reset(OpCopy) - v.Type = mem.Type - v.AddArg(mem) + v.copyOf(mem) return true } // match: (Move [1] dst src mem) @@ -24649,12 +23507,9 @@ func rewriteValueARM64_OpMove(v *Value) bool { src := v_1 mem := v_2 v.reset(OpARM64MOVBstore) - v.AddArg(dst) v0 := b.NewValue0(v.Pos, OpARM64MOVBUload, typ.UInt8) - v0.AddArg(src) - v0.AddArg(mem) - v.AddArg(v0) - v.AddArg(mem) + v0.AddArg2(src, mem) + v.AddArg3(dst, v0, mem) return true } // match: (Move [2] dst src mem) @@ -24667,12 +23522,9 @@ func rewriteValueARM64_OpMove(v *Value) bool { src := v_1 mem := v_2 v.reset(OpARM64MOVHstore) - v.AddArg(dst) v0 := b.NewValue0(v.Pos, OpARM64MOVHUload, typ.UInt16) - v0.AddArg(src) - v0.AddArg(mem) - v.AddArg(v0) - v.AddArg(mem) + v0.AddArg2(src, mem) + v.AddArg3(dst, v0, mem) return true } // match: (Move [4] dst src mem) @@ -24685,12 +23537,9 @@ func rewriteValueARM64_OpMove(v *Value) bool { src := v_1 mem := v_2 v.reset(OpARM64MOVWstore) - v.AddArg(dst) v0 := b.NewValue0(v.Pos, OpARM64MOVWUload, typ.UInt32) - v0.AddArg(src) - v0.AddArg(mem) - v.AddArg(v0) - v.AddArg(mem) + v0.AddArg2(src, mem) + v.AddArg3(dst, v0, mem) return true } // match: (Move [8] dst src mem) @@ -24703,12 +23552,9 @@ func rewriteValueARM64_OpMove(v *Value) bool { src := v_1 mem := v_2 v.reset(OpARM64MOVDstore) - v.AddArg(dst) v0 := b.NewValue0(v.Pos, OpARM64MOVDload, typ.UInt64) - v0.AddArg(src) - v0.AddArg(mem) - v.AddArg(v0) - v.AddArg(mem) + v0.AddArg2(src, mem) + v.AddArg3(dst, v0, mem) return true } // match: (Move [3] dst src mem) @@ -24722,20 +23568,14 @@ func rewriteValueARM64_OpMove(v *Value) bool { mem := v_2 v.reset(OpARM64MOVBstore) v.AuxInt = 2 - v.AddArg(dst) v0 := b.NewValue0(v.Pos, OpARM64MOVBUload, typ.UInt8) v0.AuxInt = 2 - v0.AddArg(src) - v0.AddArg(mem) - v.AddArg(v0) + v0.AddArg2(src, mem) v1 := b.NewValue0(v.Pos, OpARM64MOVHstore, types.TypeMem) - v1.AddArg(dst) v2 := b.NewValue0(v.Pos, OpARM64MOVHUload, typ.UInt16) - v2.AddArg(src) - v2.AddArg(mem) - v1.AddArg(v2) - v1.AddArg(mem) - v.AddArg(v1) + v2.AddArg2(src, mem) + v1.AddArg3(dst, v2, mem) + v.AddArg3(dst, v0, v1) return true } // match: (Move [5] dst src mem) @@ -24749,20 +23589,14 @@ func rewriteValueARM64_OpMove(v *Value) bool { mem := v_2 v.reset(OpARM64MOVBstore) v.AuxInt = 4 - v.AddArg(dst) v0 := b.NewValue0(v.Pos, OpARM64MOVBUload, typ.UInt8) v0.AuxInt = 4 - v0.AddArg(src) - v0.AddArg(mem) - v.AddArg(v0) + v0.AddArg2(src, mem) v1 := b.NewValue0(v.Pos, OpARM64MOVWstore, types.TypeMem) - v1.AddArg(dst) v2 := b.NewValue0(v.Pos, OpARM64MOVWUload, typ.UInt32) - v2.AddArg(src) - v2.AddArg(mem) - v1.AddArg(v2) - v1.AddArg(mem) - v.AddArg(v1) + v2.AddArg2(src, mem) + v1.AddArg3(dst, v2, mem) + v.AddArg3(dst, v0, v1) return true } // match: (Move [6] dst src mem) @@ -24776,20 +23610,14 @@ func rewriteValueARM64_OpMove(v *Value) bool { mem := v_2 v.reset(OpARM64MOVHstore) v.AuxInt = 4 - v.AddArg(dst) v0 := b.NewValue0(v.Pos, OpARM64MOVHUload, typ.UInt16) v0.AuxInt = 4 - v0.AddArg(src) - v0.AddArg(mem) - v.AddArg(v0) + v0.AddArg2(src, mem) v1 := b.NewValue0(v.Pos, OpARM64MOVWstore, types.TypeMem) - v1.AddArg(dst) v2 := b.NewValue0(v.Pos, OpARM64MOVWUload, typ.UInt32) - v2.AddArg(src) - v2.AddArg(mem) - v1.AddArg(v2) - v1.AddArg(mem) - v.AddArg(v1) + v2.AddArg2(src, mem) + v1.AddArg3(dst, v2, mem) + v.AddArg3(dst, v0, v1) return true } // match: (Move [7] dst src mem) @@ -24803,29 +23631,20 @@ func rewriteValueARM64_OpMove(v *Value) bool { mem := v_2 v.reset(OpARM64MOVBstore) v.AuxInt = 6 - v.AddArg(dst) v0 := b.NewValue0(v.Pos, OpARM64MOVBUload, typ.UInt8) v0.AuxInt = 6 - v0.AddArg(src) - v0.AddArg(mem) - v.AddArg(v0) + v0.AddArg2(src, mem) v1 := b.NewValue0(v.Pos, OpARM64MOVHstore, types.TypeMem) v1.AuxInt = 4 - v1.AddArg(dst) v2 := b.NewValue0(v.Pos, OpARM64MOVHUload, typ.UInt16) v2.AuxInt = 4 - v2.AddArg(src) - v2.AddArg(mem) - v1.AddArg(v2) + v2.AddArg2(src, mem) v3 := b.NewValue0(v.Pos, OpARM64MOVWstore, types.TypeMem) - v3.AddArg(dst) v4 := b.NewValue0(v.Pos, OpARM64MOVWUload, typ.UInt32) - v4.AddArg(src) - v4.AddArg(mem) - v3.AddArg(v4) - v3.AddArg(mem) - v1.AddArg(v3) - v.AddArg(v1) + v4.AddArg2(src, mem) + v3.AddArg3(dst, v4, mem) + v1.AddArg3(dst, v2, v3) + v.AddArg3(dst, v0, v1) return true } // match: (Move [12] dst src mem) @@ -24839,20 +23658,14 @@ func rewriteValueARM64_OpMove(v *Value) bool { mem := v_2 v.reset(OpARM64MOVWstore) v.AuxInt = 8 - v.AddArg(dst) v0 := b.NewValue0(v.Pos, OpARM64MOVWUload, typ.UInt32) v0.AuxInt = 8 - v0.AddArg(src) - v0.AddArg(mem) - v.AddArg(v0) + v0.AddArg2(src, mem) v1 := b.NewValue0(v.Pos, OpARM64MOVDstore, types.TypeMem) - v1.AddArg(dst) v2 := b.NewValue0(v.Pos, OpARM64MOVDload, typ.UInt64) - v2.AddArg(src) - v2.AddArg(mem) - v1.AddArg(v2) - v1.AddArg(mem) - v.AddArg(v1) + v2.AddArg2(src, mem) + v1.AddArg3(dst, v2, mem) + v.AddArg3(dst, v0, v1) return true } // match: (Move [16] dst src mem) @@ -24866,20 +23679,14 @@ func rewriteValueARM64_OpMove(v *Value) bool { mem := v_2 v.reset(OpARM64MOVDstore) v.AuxInt = 8 - v.AddArg(dst) v0 := b.NewValue0(v.Pos, OpARM64MOVDload, typ.UInt64) v0.AuxInt = 8 - v0.AddArg(src) - v0.AddArg(mem) - v.AddArg(v0) + v0.AddArg2(src, mem) v1 := b.NewValue0(v.Pos, OpARM64MOVDstore, types.TypeMem) - v1.AddArg(dst) v2 := b.NewValue0(v.Pos, OpARM64MOVDload, typ.UInt64) - v2.AddArg(src) - v2.AddArg(mem) - v1.AddArg(v2) - v1.AddArg(mem) - v.AddArg(v1) + v2.AddArg2(src, mem) + v1.AddArg3(dst, v2, mem) + v.AddArg3(dst, v0, v1) return true } // match: (Move [24] dst src mem) @@ -24893,29 +23700,20 @@ func rewriteValueARM64_OpMove(v *Value) bool { mem := v_2 v.reset(OpARM64MOVDstore) v.AuxInt = 16 - v.AddArg(dst) v0 := b.NewValue0(v.Pos, OpARM64MOVDload, typ.UInt64) v0.AuxInt = 16 - v0.AddArg(src) - v0.AddArg(mem) - v.AddArg(v0) + v0.AddArg2(src, mem) v1 := b.NewValue0(v.Pos, OpARM64MOVDstore, types.TypeMem) v1.AuxInt = 8 - v1.AddArg(dst) v2 := b.NewValue0(v.Pos, OpARM64MOVDload, typ.UInt64) v2.AuxInt = 8 - v2.AddArg(src) - v2.AddArg(mem) - v1.AddArg(v2) + v2.AddArg2(src, mem) v3 := b.NewValue0(v.Pos, OpARM64MOVDstore, types.TypeMem) - v3.AddArg(dst) v4 := b.NewValue0(v.Pos, OpARM64MOVDload, typ.UInt64) - v4.AddArg(src) - v4.AddArg(mem) - v3.AddArg(v4) - v3.AddArg(mem) - v1.AddArg(v3) - v.AddArg(v1) + v4.AddArg2(src, mem) + v3.AddArg3(dst, v4, mem) + v1.AddArg3(dst, v2, v3) + v.AddArg3(dst, v0, v1) return true } // match: (Move [s] dst src mem) @@ -24934,17 +23732,13 @@ func rewriteValueARM64_OpMove(v *Value) bool { v0 := b.NewValue0(v.Pos, OpOffPtr, dst.Type) v0.AuxInt = s - s%8 v0.AddArg(dst) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpOffPtr, src.Type) v1.AuxInt = s - s%8 v1.AddArg(src) - v.AddArg(v1) v2 := b.NewValue0(v.Pos, OpMove, types.TypeMem) v2.AuxInt = s - s%8 - v2.AddArg(dst) - v2.AddArg(src) - v2.AddArg(mem) - v.AddArg(v2) + v2.AddArg3(dst, src, mem) + v.AddArg3(v0, v1, v2) return true } // match: (Move [s] dst src mem) @@ -24960,18 +23754,13 @@ func rewriteValueARM64_OpMove(v *Value) bool { } v.reset(OpARM64MOVDstore) v.AuxInt = s - 8 - v.AddArg(dst) v0 := b.NewValue0(v.Pos, OpARM64MOVDload, typ.UInt64) v0.AuxInt = s - 8 - v0.AddArg(src) - v0.AddArg(mem) - v.AddArg(v0) + v0.AddArg2(src, mem) v1 := b.NewValue0(v.Pos, OpARM64DUFFCOPY, types.TypeMem) v1.AuxInt = 8 * (64 - (s-8)/16) - v1.AddArg(dst) - v1.AddArg(src) - v1.AddArg(mem) - v.AddArg(v1) + v1.AddArg3(dst, src, mem) + v.AddArg3(dst, v0, v1) return true } // match: (Move [s] dst src mem) @@ -24987,9 +23776,7 @@ func rewriteValueARM64_OpMove(v *Value) bool { } v.reset(OpARM64DUFFCOPY) v.AuxInt = 8 * (64 - s/16) - v.AddArg(dst) - v.AddArg(src) - v.AddArg(mem) + v.AddArg3(dst, src, mem) return true } // match: (Move [s] dst src mem) @@ -25004,13 +23791,10 @@ func rewriteValueARM64_OpMove(v *Value) bool { break } v.reset(OpARM64LoweredMove) - v.AddArg(dst) - v.AddArg(src) v0 := b.NewValue0(v.Pos, OpARM64ADDconst, src.Type) v0.AuxInt = s - 8 v0.AddArg(src) - v.AddArg(v0) - v.AddArg(mem) + v.AddArg4(dst, src, v0, mem) return true } return false @@ -25029,10 +23813,9 @@ func rewriteValueARM64_OpNeq16(v *Value) bool { v0 := b.NewValue0(v.Pos, OpARM64CMPW, types.TypeFlags) v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) v1.AddArg(x) - v0.AddArg(v1) v2 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) v2.AddArg(y) - v0.AddArg(v2) + v0.AddArg2(v1, v2) v.AddArg(v0) return true } @@ -25048,8 +23831,7 @@ func rewriteValueARM64_OpNeq32(v *Value) bool { y := v_1 v.reset(OpARM64NotEqual) v0 := b.NewValue0(v.Pos, OpARM64CMPW, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -25065,8 +23847,7 @@ func rewriteValueARM64_OpNeq32F(v *Value) bool { y := v_1 v.reset(OpARM64NotEqual) v0 := b.NewValue0(v.Pos, OpARM64FCMPS, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -25082,8 +23863,7 @@ func rewriteValueARM64_OpNeq64(v *Value) bool { y := v_1 v.reset(OpARM64NotEqual) v0 := b.NewValue0(v.Pos, OpARM64CMP, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -25099,8 +23879,7 @@ func rewriteValueARM64_OpNeq64F(v *Value) bool { y := v_1 v.reset(OpARM64NotEqual) v0 := b.NewValue0(v.Pos, OpARM64FCMPD, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -25119,10 +23898,9 @@ func rewriteValueARM64_OpNeq8(v *Value) bool { v0 := b.NewValue0(v.Pos, OpARM64CMPW, types.TypeFlags) v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) v1.AddArg(x) - v0.AddArg(v1) v2 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) v2.AddArg(y) - v0.AddArg(v2) + v0.AddArg2(v1, v2) v.AddArg(v0) return true } @@ -25138,8 +23916,7 @@ func rewriteValueARM64_OpNeqPtr(v *Value) bool { y := v_1 v.reset(OpARM64NotEqual) v0 := b.NewValue0(v.Pos, OpARM64CMP, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -25155,8 +23932,7 @@ func rewriteValueARM64_OpNot(v *Value) bool { v.reset(OpARM64XOR) v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) v0.AuxInt = 1 - v.AddArg(v0) - v.AddArg(x) + v.AddArg2(v0, x) return true } } @@ -25203,9 +23979,7 @@ func rewriteValueARM64_OpPanicBounds(v *Value) bool { } v.reset(OpARM64LoweredPanicBoundsA) v.AuxInt = kind - v.AddArg(x) - v.AddArg(y) - v.AddArg(mem) + v.AddArg3(x, y, mem) return true } // match: (PanicBounds [kind] x y mem) @@ -25221,9 +23995,7 @@ func rewriteValueARM64_OpPanicBounds(v *Value) bool { } v.reset(OpARM64LoweredPanicBoundsB) v.AuxInt = kind - v.AddArg(x) - v.AddArg(y) - v.AddArg(mem) + v.AddArg3(x, y, mem) return true } // match: (PanicBounds [kind] x y mem) @@ -25239,9 +24011,7 @@ func rewriteValueARM64_OpPanicBounds(v *Value) bool { } v.reset(OpARM64LoweredPanicBoundsC) v.AuxInt = kind - v.AddArg(x) - v.AddArg(y) - v.AddArg(mem) + v.AddArg3(x, y, mem) return true } return false @@ -25329,17 +24099,14 @@ func rewriteValueARM64_OpRotateLeft16(v *Value) bool { c := v_1.AuxInt v.reset(OpOr16) v0 := b.NewValue0(v.Pos, OpLsh16x64, t) - v0.AddArg(x) v1 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) v1.AuxInt = c & 15 - v0.AddArg(v1) - v.AddArg(v0) + v0.AddArg2(x, v1) v2 := b.NewValue0(v.Pos, OpRsh16Ux64, t) - v2.AddArg(x) v3 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) v3.AuxInt = -c & 15 - v2.AddArg(v3) - v.AddArg(v2) + v2.AddArg2(x, v3) + v.AddArg2(v0, v2) return true } return false @@ -25354,10 +24121,9 @@ func rewriteValueARM64_OpRotateLeft32(v *Value) bool { x := v_0 y := v_1 v.reset(OpARM64RORW) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpARM64NEG, y.Type) v0.AddArg(y) - v.AddArg(v0) + v.AddArg2(x, v0) return true } } @@ -25371,10 +24137,9 @@ func rewriteValueARM64_OpRotateLeft64(v *Value) bool { x := v_0 y := v_1 v.reset(OpARM64ROR) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpARM64NEG, y.Type) v0.AddArg(y) - v.AddArg(v0) + v.AddArg2(x, v0) return true } } @@ -25394,17 +24159,14 @@ func rewriteValueARM64_OpRotateLeft8(v *Value) bool { c := v_1.AuxInt v.reset(OpOr8) v0 := b.NewValue0(v.Pos, OpLsh8x64, t) - v0.AddArg(x) v1 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) v1.AuxInt = c & 7 - v0.AddArg(v1) - v.AddArg(v0) + v0.AddArg2(x, v1) v2 := b.NewValue0(v.Pos, OpRsh8Ux64, t) - v2.AddArg(x) v3 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) v3.AuxInt = -c & 7 - v2.AddArg(v3) - v.AddArg(v2) + v2.AddArg2(x, v3) + v.AddArg2(v0, v2) return true } return false @@ -25425,20 +24187,17 @@ func rewriteValueARM64_OpRsh16Ux16(v *Value) bool { v0 := b.NewValue0(v.Pos, OpARM64SRL, t) v1 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) v1.AddArg(x) - v0.AddArg(v1) v2 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) v2.AddArg(y) - v0.AddArg(v2) - v.AddArg(v0) + v0.AddArg2(v1, v2) v3 := b.NewValue0(v.Pos, OpConst64, t) v3.AuxInt = 0 - v.AddArg(v3) v4 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) v4.AuxInt = 64 v5 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) v5.AddArg(y) v4.AddArg(v5) - v.AddArg(v4) + v.AddArg3(v0, v3, v4) return true } } @@ -25458,20 +24217,17 @@ func rewriteValueARM64_OpRsh16Ux32(v *Value) bool { v0 := b.NewValue0(v.Pos, OpARM64SRL, t) v1 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) v1.AddArg(x) - v0.AddArg(v1) v2 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) v2.AddArg(y) - v0.AddArg(v2) - v.AddArg(v0) + v0.AddArg2(v1, v2) v3 := b.NewValue0(v.Pos, OpConst64, t) v3.AuxInt = 0 - v.AddArg(v3) v4 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) v4.AuxInt = 64 v5 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) v5.AddArg(y) v4.AddArg(v5) - v.AddArg(v4) + v.AddArg3(v0, v3, v4) return true } } @@ -25491,16 +24247,13 @@ func rewriteValueARM64_OpRsh16Ux64(v *Value) bool { v0 := b.NewValue0(v.Pos, OpARM64SRL, t) v1 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) v1.AddArg(x) - v0.AddArg(v1) - v0.AddArg(y) - v.AddArg(v0) + v0.AddArg2(v1, y) v2 := b.NewValue0(v.Pos, OpConst64, t) v2.AuxInt = 0 - v.AddArg(v2) v3 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) v3.AuxInt = 64 v3.AddArg(y) - v.AddArg(v3) + v.AddArg3(v0, v2, v3) return true } } @@ -25520,20 +24273,17 @@ func rewriteValueARM64_OpRsh16Ux8(v *Value) bool { v0 := b.NewValue0(v.Pos, OpARM64SRL, t) v1 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) v1.AddArg(x) - v0.AddArg(v1) v2 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) v2.AddArg(y) - v0.AddArg(v2) - v.AddArg(v0) + v0.AddArg2(v1, v2) v3 := b.NewValue0(v.Pos, OpConst64, t) v3.AuxInt = 0 - v.AddArg(v3) v4 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) v4.AuxInt = 64 v5 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) v5.AddArg(y) v4.AddArg(v5) - v.AddArg(v4) + v.AddArg3(v0, v3, v4) return true } } @@ -25550,22 +24300,19 @@ func rewriteValueARM64_OpRsh16x16(v *Value) bool { v.reset(OpARM64SRA) v0 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpARM64CSEL, y.Type) v1.Aux = OpARM64LessThanU v2 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) v2.AddArg(y) - v1.AddArg(v2) v3 := b.NewValue0(v.Pos, OpConst64, y.Type) v3.AuxInt = 63 - v1.AddArg(v3) v4 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) v4.AuxInt = 64 v5 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) v5.AddArg(y) v4.AddArg(v5) - v1.AddArg(v4) - v.AddArg(v1) + v1.AddArg3(v2, v3, v4) + v.AddArg2(v0, v1) return true } } @@ -25582,22 +24329,19 @@ func rewriteValueARM64_OpRsh16x32(v *Value) bool { v.reset(OpARM64SRA) v0 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpARM64CSEL, y.Type) v1.Aux = OpARM64LessThanU v2 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) v2.AddArg(y) - v1.AddArg(v2) v3 := b.NewValue0(v.Pos, OpConst64, y.Type) v3.AuxInt = 63 - v1.AddArg(v3) v4 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) v4.AuxInt = 64 v5 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) v5.AddArg(y) v4.AddArg(v5) - v1.AddArg(v4) - v.AddArg(v1) + v1.AddArg3(v2, v3, v4) + v.AddArg2(v0, v1) return true } } @@ -25614,18 +24358,15 @@ func rewriteValueARM64_OpRsh16x64(v *Value) bool { v.reset(OpARM64SRA) v0 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpARM64CSEL, y.Type) v1.Aux = OpARM64LessThanU - v1.AddArg(y) v2 := b.NewValue0(v.Pos, OpConst64, y.Type) v2.AuxInt = 63 - v1.AddArg(v2) v3 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) v3.AuxInt = 64 v3.AddArg(y) - v1.AddArg(v3) - v.AddArg(v1) + v1.AddArg3(y, v2, v3) + v.AddArg2(v0, v1) return true } } @@ -25642,22 +24383,19 @@ func rewriteValueARM64_OpRsh16x8(v *Value) bool { v.reset(OpARM64SRA) v0 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpARM64CSEL, y.Type) v1.Aux = OpARM64LessThanU v2 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) v2.AddArg(y) - v1.AddArg(v2) v3 := b.NewValue0(v.Pos, OpConst64, y.Type) v3.AuxInt = 63 - v1.AddArg(v3) v4 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) v4.AuxInt = 64 v5 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) v5.AddArg(y) v4.AddArg(v5) - v1.AddArg(v4) - v.AddArg(v1) + v1.AddArg3(v2, v3, v4) + v.AddArg2(v0, v1) return true } } @@ -25677,20 +24415,17 @@ func rewriteValueARM64_OpRsh32Ux16(v *Value) bool { v0 := b.NewValue0(v.Pos, OpARM64SRL, t) v1 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) v1.AddArg(x) - v0.AddArg(v1) v2 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) v2.AddArg(y) - v0.AddArg(v2) - v.AddArg(v0) + v0.AddArg2(v1, v2) v3 := b.NewValue0(v.Pos, OpConst64, t) v3.AuxInt = 0 - v.AddArg(v3) v4 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) v4.AuxInt = 64 v5 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) v5.AddArg(y) v4.AddArg(v5) - v.AddArg(v4) + v.AddArg3(v0, v3, v4) return true } } @@ -25710,20 +24445,17 @@ func rewriteValueARM64_OpRsh32Ux32(v *Value) bool { v0 := b.NewValue0(v.Pos, OpARM64SRL, t) v1 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) v1.AddArg(x) - v0.AddArg(v1) v2 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) v2.AddArg(y) - v0.AddArg(v2) - v.AddArg(v0) + v0.AddArg2(v1, v2) v3 := b.NewValue0(v.Pos, OpConst64, t) v3.AuxInt = 0 - v.AddArg(v3) v4 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) v4.AuxInt = 64 v5 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) v5.AddArg(y) v4.AddArg(v5) - v.AddArg(v4) + v.AddArg3(v0, v3, v4) return true } } @@ -25743,16 +24475,13 @@ func rewriteValueARM64_OpRsh32Ux64(v *Value) bool { v0 := b.NewValue0(v.Pos, OpARM64SRL, t) v1 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) v1.AddArg(x) - v0.AddArg(v1) - v0.AddArg(y) - v.AddArg(v0) + v0.AddArg2(v1, y) v2 := b.NewValue0(v.Pos, OpConst64, t) v2.AuxInt = 0 - v.AddArg(v2) v3 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) v3.AuxInt = 64 v3.AddArg(y) - v.AddArg(v3) + v.AddArg3(v0, v2, v3) return true } } @@ -25772,20 +24501,17 @@ func rewriteValueARM64_OpRsh32Ux8(v *Value) bool { v0 := b.NewValue0(v.Pos, OpARM64SRL, t) v1 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) v1.AddArg(x) - v0.AddArg(v1) v2 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) v2.AddArg(y) - v0.AddArg(v2) - v.AddArg(v0) + v0.AddArg2(v1, v2) v3 := b.NewValue0(v.Pos, OpConst64, t) v3.AuxInt = 0 - v.AddArg(v3) v4 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) v4.AuxInt = 64 v5 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) v5.AddArg(y) v4.AddArg(v5) - v.AddArg(v4) + v.AddArg3(v0, v3, v4) return true } } @@ -25802,22 +24528,19 @@ func rewriteValueARM64_OpRsh32x16(v *Value) bool { v.reset(OpARM64SRA) v0 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpARM64CSEL, y.Type) v1.Aux = OpARM64LessThanU v2 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) v2.AddArg(y) - v1.AddArg(v2) v3 := b.NewValue0(v.Pos, OpConst64, y.Type) v3.AuxInt = 63 - v1.AddArg(v3) v4 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) v4.AuxInt = 64 v5 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) v5.AddArg(y) v4.AddArg(v5) - v1.AddArg(v4) - v.AddArg(v1) + v1.AddArg3(v2, v3, v4) + v.AddArg2(v0, v1) return true } } @@ -25834,22 +24557,19 @@ func rewriteValueARM64_OpRsh32x32(v *Value) bool { v.reset(OpARM64SRA) v0 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpARM64CSEL, y.Type) v1.Aux = OpARM64LessThanU v2 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) v2.AddArg(y) - v1.AddArg(v2) v3 := b.NewValue0(v.Pos, OpConst64, y.Type) v3.AuxInt = 63 - v1.AddArg(v3) v4 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) v4.AuxInt = 64 v5 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) v5.AddArg(y) v4.AddArg(v5) - v1.AddArg(v4) - v.AddArg(v1) + v1.AddArg3(v2, v3, v4) + v.AddArg2(v0, v1) return true } } @@ -25866,18 +24586,15 @@ func rewriteValueARM64_OpRsh32x64(v *Value) bool { v.reset(OpARM64SRA) v0 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpARM64CSEL, y.Type) v1.Aux = OpARM64LessThanU - v1.AddArg(y) v2 := b.NewValue0(v.Pos, OpConst64, y.Type) v2.AuxInt = 63 - v1.AddArg(v2) v3 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) v3.AuxInt = 64 v3.AddArg(y) - v1.AddArg(v3) - v.AddArg(v1) + v1.AddArg3(y, v2, v3) + v.AddArg2(v0, v1) return true } } @@ -25894,22 +24611,19 @@ func rewriteValueARM64_OpRsh32x8(v *Value) bool { v.reset(OpARM64SRA) v0 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpARM64CSEL, y.Type) v1.Aux = OpARM64LessThanU v2 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) v2.AddArg(y) - v1.AddArg(v2) v3 := b.NewValue0(v.Pos, OpConst64, y.Type) v3.AuxInt = 63 - v1.AddArg(v3) v4 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) v4.AuxInt = 64 v5 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) v5.AddArg(y) v4.AddArg(v5) - v1.AddArg(v4) - v.AddArg(v1) + v1.AddArg3(v2, v3, v4) + v.AddArg2(v0, v1) return true } } @@ -25927,20 +24641,17 @@ func rewriteValueARM64_OpRsh64Ux16(v *Value) bool { v.reset(OpARM64CSEL) v.Aux = OpARM64LessThanU v0 := b.NewValue0(v.Pos, OpARM64SRL, t) - v0.AddArg(x) v1 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) v1.AddArg(y) - v0.AddArg(v1) - v.AddArg(v0) + v0.AddArg2(x, v1) v2 := b.NewValue0(v.Pos, OpConst64, t) v2.AuxInt = 0 - v.AddArg(v2) v3 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) v3.AuxInt = 64 v4 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) v4.AddArg(y) v3.AddArg(v4) - v.AddArg(v3) + v.AddArg3(v0, v2, v3) return true } } @@ -25958,20 +24669,17 @@ func rewriteValueARM64_OpRsh64Ux32(v *Value) bool { v.reset(OpARM64CSEL) v.Aux = OpARM64LessThanU v0 := b.NewValue0(v.Pos, OpARM64SRL, t) - v0.AddArg(x) v1 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) v1.AddArg(y) - v0.AddArg(v1) - v.AddArg(v0) + v0.AddArg2(x, v1) v2 := b.NewValue0(v.Pos, OpConst64, t) v2.AuxInt = 0 - v.AddArg(v2) v3 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) v3.AuxInt = 64 v4 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) v4.AddArg(y) v3.AddArg(v4) - v.AddArg(v3) + v.AddArg3(v0, v2, v3) return true } } @@ -25988,16 +24696,13 @@ func rewriteValueARM64_OpRsh64Ux64(v *Value) bool { v.reset(OpARM64CSEL) v.Aux = OpARM64LessThanU v0 := b.NewValue0(v.Pos, OpARM64SRL, t) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) + v0.AddArg2(x, y) v1 := b.NewValue0(v.Pos, OpConst64, t) v1.AuxInt = 0 - v.AddArg(v1) v2 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) v2.AuxInt = 64 v2.AddArg(y) - v.AddArg(v2) + v.AddArg3(v0, v1, v2) return true } } @@ -26015,20 +24720,17 @@ func rewriteValueARM64_OpRsh64Ux8(v *Value) bool { v.reset(OpARM64CSEL) v.Aux = OpARM64LessThanU v0 := b.NewValue0(v.Pos, OpARM64SRL, t) - v0.AddArg(x) v1 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) v1.AddArg(y) - v0.AddArg(v1) - v.AddArg(v0) + v0.AddArg2(x, v1) v2 := b.NewValue0(v.Pos, OpConst64, t) v2.AuxInt = 0 - v.AddArg(v2) v3 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) v3.AuxInt = 64 v4 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) v4.AddArg(y) v3.AddArg(v4) - v.AddArg(v3) + v.AddArg3(v0, v2, v3) return true } } @@ -26043,22 +24745,19 @@ func rewriteValueARM64_OpRsh64x16(v *Value) bool { x := v_0 y := v_1 v.reset(OpARM64SRA) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpARM64CSEL, y.Type) v0.Aux = OpARM64LessThanU v1 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) v1.AddArg(y) - v0.AddArg(v1) v2 := b.NewValue0(v.Pos, OpConst64, y.Type) v2.AuxInt = 63 - v0.AddArg(v2) v3 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) v3.AuxInt = 64 v4 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) v4.AddArg(y) v3.AddArg(v4) - v0.AddArg(v3) - v.AddArg(v0) + v0.AddArg3(v1, v2, v3) + v.AddArg2(x, v0) return true } } @@ -26073,22 +24772,19 @@ func rewriteValueARM64_OpRsh64x32(v *Value) bool { x := v_0 y := v_1 v.reset(OpARM64SRA) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpARM64CSEL, y.Type) v0.Aux = OpARM64LessThanU v1 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) v1.AddArg(y) - v0.AddArg(v1) v2 := b.NewValue0(v.Pos, OpConst64, y.Type) v2.AuxInt = 63 - v0.AddArg(v2) v3 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) v3.AuxInt = 64 v4 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) v4.AddArg(y) v3.AddArg(v4) - v0.AddArg(v3) - v.AddArg(v0) + v0.AddArg3(v1, v2, v3) + v.AddArg2(x, v0) return true } } @@ -26102,18 +24798,15 @@ func rewriteValueARM64_OpRsh64x64(v *Value) bool { x := v_0 y := v_1 v.reset(OpARM64SRA) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpARM64CSEL, y.Type) v0.Aux = OpARM64LessThanU - v0.AddArg(y) v1 := b.NewValue0(v.Pos, OpConst64, y.Type) v1.AuxInt = 63 - v0.AddArg(v1) v2 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) v2.AuxInt = 64 v2.AddArg(y) - v0.AddArg(v2) - v.AddArg(v0) + v0.AddArg3(y, v1, v2) + v.AddArg2(x, v0) return true } } @@ -26128,22 +24821,19 @@ func rewriteValueARM64_OpRsh64x8(v *Value) bool { x := v_0 y := v_1 v.reset(OpARM64SRA) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpARM64CSEL, y.Type) v0.Aux = OpARM64LessThanU v1 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) v1.AddArg(y) - v0.AddArg(v1) v2 := b.NewValue0(v.Pos, OpConst64, y.Type) v2.AuxInt = 63 - v0.AddArg(v2) v3 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) v3.AuxInt = 64 v4 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) v4.AddArg(y) v3.AddArg(v4) - v0.AddArg(v3) - v.AddArg(v0) + v0.AddArg3(v1, v2, v3) + v.AddArg2(x, v0) return true } } @@ -26163,20 +24853,17 @@ func rewriteValueARM64_OpRsh8Ux16(v *Value) bool { v0 := b.NewValue0(v.Pos, OpARM64SRL, t) v1 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) v1.AddArg(x) - v0.AddArg(v1) v2 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) v2.AddArg(y) - v0.AddArg(v2) - v.AddArg(v0) + v0.AddArg2(v1, v2) v3 := b.NewValue0(v.Pos, OpConst64, t) v3.AuxInt = 0 - v.AddArg(v3) v4 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) v4.AuxInt = 64 v5 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) v5.AddArg(y) v4.AddArg(v5) - v.AddArg(v4) + v.AddArg3(v0, v3, v4) return true } } @@ -26196,20 +24883,17 @@ func rewriteValueARM64_OpRsh8Ux32(v *Value) bool { v0 := b.NewValue0(v.Pos, OpARM64SRL, t) v1 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) v1.AddArg(x) - v0.AddArg(v1) v2 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) v2.AddArg(y) - v0.AddArg(v2) - v.AddArg(v0) + v0.AddArg2(v1, v2) v3 := b.NewValue0(v.Pos, OpConst64, t) v3.AuxInt = 0 - v.AddArg(v3) v4 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) v4.AuxInt = 64 v5 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) v5.AddArg(y) v4.AddArg(v5) - v.AddArg(v4) + v.AddArg3(v0, v3, v4) return true } } @@ -26229,16 +24913,13 @@ func rewriteValueARM64_OpRsh8Ux64(v *Value) bool { v0 := b.NewValue0(v.Pos, OpARM64SRL, t) v1 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) v1.AddArg(x) - v0.AddArg(v1) - v0.AddArg(y) - v.AddArg(v0) + v0.AddArg2(v1, y) v2 := b.NewValue0(v.Pos, OpConst64, t) v2.AuxInt = 0 - v.AddArg(v2) v3 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) v3.AuxInt = 64 v3.AddArg(y) - v.AddArg(v3) + v.AddArg3(v0, v2, v3) return true } } @@ -26258,20 +24939,17 @@ func rewriteValueARM64_OpRsh8Ux8(v *Value) bool { v0 := b.NewValue0(v.Pos, OpARM64SRL, t) v1 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) v1.AddArg(x) - v0.AddArg(v1) v2 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) v2.AddArg(y) - v0.AddArg(v2) - v.AddArg(v0) + v0.AddArg2(v1, v2) v3 := b.NewValue0(v.Pos, OpConst64, t) v3.AuxInt = 0 - v.AddArg(v3) v4 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) v4.AuxInt = 64 v5 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) v5.AddArg(y) v4.AddArg(v5) - v.AddArg(v4) + v.AddArg3(v0, v3, v4) return true } } @@ -26288,22 +24966,19 @@ func rewriteValueARM64_OpRsh8x16(v *Value) bool { v.reset(OpARM64SRA) v0 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpARM64CSEL, y.Type) v1.Aux = OpARM64LessThanU v2 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) v2.AddArg(y) - v1.AddArg(v2) v3 := b.NewValue0(v.Pos, OpConst64, y.Type) v3.AuxInt = 63 - v1.AddArg(v3) v4 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) v4.AuxInt = 64 v5 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) v5.AddArg(y) v4.AddArg(v5) - v1.AddArg(v4) - v.AddArg(v1) + v1.AddArg3(v2, v3, v4) + v.AddArg2(v0, v1) return true } } @@ -26320,22 +24995,19 @@ func rewriteValueARM64_OpRsh8x32(v *Value) bool { v.reset(OpARM64SRA) v0 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpARM64CSEL, y.Type) v1.Aux = OpARM64LessThanU v2 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) v2.AddArg(y) - v1.AddArg(v2) v3 := b.NewValue0(v.Pos, OpConst64, y.Type) v3.AuxInt = 63 - v1.AddArg(v3) v4 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) v4.AuxInt = 64 v5 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) v5.AddArg(y) v4.AddArg(v5) - v1.AddArg(v4) - v.AddArg(v1) + v1.AddArg3(v2, v3, v4) + v.AddArg2(v0, v1) return true } } @@ -26352,18 +25024,15 @@ func rewriteValueARM64_OpRsh8x64(v *Value) bool { v.reset(OpARM64SRA) v0 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpARM64CSEL, y.Type) v1.Aux = OpARM64LessThanU - v1.AddArg(y) v2 := b.NewValue0(v.Pos, OpConst64, y.Type) v2.AuxInt = 63 - v1.AddArg(v2) v3 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) v3.AuxInt = 64 v3.AddArg(y) - v1.AddArg(v3) - v.AddArg(v1) + v1.AddArg3(y, v2, v3) + v.AddArg2(v0, v1) return true } } @@ -26380,22 +25049,19 @@ func rewriteValueARM64_OpRsh8x8(v *Value) bool { v.reset(OpARM64SRA) v0 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpARM64CSEL, y.Type) v1.Aux = OpARM64LessThanU v2 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) v2.AddArg(y) - v1.AddArg(v2) v3 := b.NewValue0(v.Pos, OpConst64, y.Type) v3.AuxInt = 63 - v1.AddArg(v3) v4 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) v4.AuxInt = 64 v5 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) v5.AddArg(y) v4.AddArg(v5) - v1.AddArg(v4) - v.AddArg(v1) + v1.AddArg3(v2, v3, v4) + v.AddArg2(v0, v1) return true } } @@ -26415,14 +25081,12 @@ func rewriteValueARM64_OpSelect0(v *Value) bool { v.reset(OpSelect0) v.Type = typ.UInt64 v0 := b.NewValue0(v.Pos, OpARM64ADCSflags, types.NewTuple(typ.UInt64, types.TypeFlags)) - v0.AddArg(x) - v0.AddArg(y) v1 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) v2 := b.NewValue0(v.Pos, OpARM64ADDSconstflags, types.NewTuple(typ.UInt64, types.TypeFlags)) v2.AuxInt = -1 v2.AddArg(c) v1.AddArg(v2) - v0.AddArg(v1) + v0.AddArg3(x, y, v1) v.AddArg(v0) return true } @@ -26438,13 +25102,11 @@ func rewriteValueARM64_OpSelect0(v *Value) bool { v.reset(OpSelect0) v.Type = typ.UInt64 v0 := b.NewValue0(v.Pos, OpARM64SBCSflags, types.NewTuple(typ.UInt64, types.TypeFlags)) - v0.AddArg(x) - v0.AddArg(y) v1 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) v2 := b.NewValue0(v.Pos, OpARM64NEGSflags, types.NewTuple(typ.UInt64, types.TypeFlags)) v2.AddArg(bo) v1.AddArg(v2) - v0.AddArg(v1) + v0.AddArg3(x, y, v1) v.AddArg(v0) return true } @@ -26467,14 +25129,12 @@ func rewriteValueARM64_OpSelect1(v *Value) bool { v.Type = typ.UInt64 v0 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) v1 := b.NewValue0(v.Pos, OpARM64ADCSflags, types.NewTuple(typ.UInt64, types.TypeFlags)) - v1.AddArg(x) - v1.AddArg(y) v2 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) v3 := b.NewValue0(v.Pos, OpARM64ADDSconstflags, types.NewTuple(typ.UInt64, types.TypeFlags)) v3.AuxInt = -1 v3.AddArg(c) v2.AddArg(v3) - v1.AddArg(v2) + v1.AddArg3(x, y, v2) v0.AddArg(v1) v.AddArg(v0) return true @@ -26493,13 +25153,11 @@ func rewriteValueARM64_OpSelect1(v *Value) bool { v0 := b.NewValue0(v.Pos, OpARM64NGCzerocarry, typ.UInt64) v1 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) v2 := b.NewValue0(v.Pos, OpARM64SBCSflags, types.NewTuple(typ.UInt64, types.TypeFlags)) - v2.AddArg(x) - v2.AddArg(y) v3 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) v4 := b.NewValue0(v.Pos, OpARM64NEGSflags, types.NewTuple(typ.UInt64, types.TypeFlags)) v4.AddArg(bo) v3.AddArg(v4) - v2.AddArg(v3) + v2.AddArg3(x, y, v3) v1.AddArg(v2) v0.AddArg(v1) v.AddArg(v0) @@ -26539,9 +25197,7 @@ func rewriteValueARM64_OpStore(v *Value) bool { break } v.reset(OpARM64MOVBstore) - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } // match: (Store {t} ptr val mem) @@ -26556,9 +25212,7 @@ func rewriteValueARM64_OpStore(v *Value) bool { break } v.reset(OpARM64MOVHstore) - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } // match: (Store {t} ptr val mem) @@ -26573,9 +25227,7 @@ func rewriteValueARM64_OpStore(v *Value) bool { break } v.reset(OpARM64MOVWstore) - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } // match: (Store {t} ptr val mem) @@ -26590,9 +25242,7 @@ func rewriteValueARM64_OpStore(v *Value) bool { break } v.reset(OpARM64MOVDstore) - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } // match: (Store {t} ptr val mem) @@ -26607,9 +25257,7 @@ func rewriteValueARM64_OpStore(v *Value) bool { break } v.reset(OpARM64FMOVSstore) - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } // match: (Store {t} ptr val mem) @@ -26624,9 +25272,7 @@ func rewriteValueARM64_OpStore(v *Value) bool { break } v.reset(OpARM64FMOVDstore) - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } return false @@ -26644,9 +25290,7 @@ func rewriteValueARM64_OpZero(v *Value) bool { break } mem := v_1 - v.reset(OpCopy) - v.Type = mem.Type - v.AddArg(mem) + v.copyOf(mem) return true } // match: (Zero [1] ptr mem) @@ -26658,11 +25302,9 @@ func rewriteValueARM64_OpZero(v *Value) bool { ptr := v_0 mem := v_1 v.reset(OpARM64MOVBstore) - v.AddArg(ptr) v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) v0.AuxInt = 0 - v.AddArg(v0) - v.AddArg(mem) + v.AddArg3(ptr, v0, mem) return true } // match: (Zero [2] ptr mem) @@ -26674,11 +25316,9 @@ func rewriteValueARM64_OpZero(v *Value) bool { ptr := v_0 mem := v_1 v.reset(OpARM64MOVHstore) - v.AddArg(ptr) v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) v0.AuxInt = 0 - v.AddArg(v0) - v.AddArg(mem) + v.AddArg3(ptr, v0, mem) return true } // match: (Zero [4] ptr mem) @@ -26690,11 +25330,9 @@ func rewriteValueARM64_OpZero(v *Value) bool { ptr := v_0 mem := v_1 v.reset(OpARM64MOVWstore) - v.AddArg(ptr) v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) v0.AuxInt = 0 - v.AddArg(v0) - v.AddArg(mem) + v.AddArg3(ptr, v0, mem) return true } // match: (Zero [8] ptr mem) @@ -26706,11 +25344,9 @@ func rewriteValueARM64_OpZero(v *Value) bool { ptr := v_0 mem := v_1 v.reset(OpARM64MOVDstore) - v.AddArg(ptr) v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) v0.AuxInt = 0 - v.AddArg(v0) - v.AddArg(mem) + v.AddArg3(ptr, v0, mem) return true } // match: (Zero [3] ptr mem) @@ -26723,17 +25359,13 @@ func rewriteValueARM64_OpZero(v *Value) bool { mem := v_1 v.reset(OpARM64MOVBstore) v.AuxInt = 2 - v.AddArg(ptr) v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) v0.AuxInt = 0 - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpARM64MOVHstore, types.TypeMem) - v1.AddArg(ptr) v2 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) v2.AuxInt = 0 - v1.AddArg(v2) - v1.AddArg(mem) - v.AddArg(v1) + v1.AddArg3(ptr, v2, mem) + v.AddArg3(ptr, v0, v1) return true } // match: (Zero [5] ptr mem) @@ -26746,17 +25378,13 @@ func rewriteValueARM64_OpZero(v *Value) bool { mem := v_1 v.reset(OpARM64MOVBstore) v.AuxInt = 4 - v.AddArg(ptr) v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) v0.AuxInt = 0 - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpARM64MOVWstore, types.TypeMem) - v1.AddArg(ptr) v2 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) v2.AuxInt = 0 - v1.AddArg(v2) - v1.AddArg(mem) - v.AddArg(v1) + v1.AddArg3(ptr, v2, mem) + v.AddArg3(ptr, v0, v1) return true } // match: (Zero [6] ptr mem) @@ -26769,17 +25397,13 @@ func rewriteValueARM64_OpZero(v *Value) bool { mem := v_1 v.reset(OpARM64MOVHstore) v.AuxInt = 4 - v.AddArg(ptr) v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) v0.AuxInt = 0 - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpARM64MOVWstore, types.TypeMem) - v1.AddArg(ptr) v2 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) v2.AuxInt = 0 - v1.AddArg(v2) - v1.AddArg(mem) - v.AddArg(v1) + v1.AddArg3(ptr, v2, mem) + v.AddArg3(ptr, v0, v1) return true } // match: (Zero [7] ptr mem) @@ -26792,24 +25416,18 @@ func rewriteValueARM64_OpZero(v *Value) bool { mem := v_1 v.reset(OpARM64MOVBstore) v.AuxInt = 6 - v.AddArg(ptr) v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) v0.AuxInt = 0 - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpARM64MOVHstore, types.TypeMem) v1.AuxInt = 4 - v1.AddArg(ptr) v2 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) v2.AuxInt = 0 - v1.AddArg(v2) v3 := b.NewValue0(v.Pos, OpARM64MOVWstore, types.TypeMem) - v3.AddArg(ptr) v4 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) v4.AuxInt = 0 - v3.AddArg(v4) - v3.AddArg(mem) - v1.AddArg(v3) - v.AddArg(v1) + v3.AddArg3(ptr, v4, mem) + v1.AddArg3(ptr, v2, v3) + v.AddArg3(ptr, v0, v1) return true } // match: (Zero [9] ptr mem) @@ -26822,17 +25440,13 @@ func rewriteValueARM64_OpZero(v *Value) bool { mem := v_1 v.reset(OpARM64MOVBstore) v.AuxInt = 8 - v.AddArg(ptr) v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) v0.AuxInt = 0 - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpARM64MOVDstore, types.TypeMem) - v1.AddArg(ptr) v2 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) v2.AuxInt = 0 - v1.AddArg(v2) - v1.AddArg(mem) - v.AddArg(v1) + v1.AddArg3(ptr, v2, mem) + v.AddArg3(ptr, v0, v1) return true } // match: (Zero [10] ptr mem) @@ -26845,17 +25459,13 @@ func rewriteValueARM64_OpZero(v *Value) bool { mem := v_1 v.reset(OpARM64MOVHstore) v.AuxInt = 8 - v.AddArg(ptr) v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) v0.AuxInt = 0 - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpARM64MOVDstore, types.TypeMem) - v1.AddArg(ptr) v2 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) v2.AuxInt = 0 - v1.AddArg(v2) - v1.AddArg(mem) - v.AddArg(v1) + v1.AddArg3(ptr, v2, mem) + v.AddArg3(ptr, v0, v1) return true } // match: (Zero [11] ptr mem) @@ -26868,24 +25478,18 @@ func rewriteValueARM64_OpZero(v *Value) bool { mem := v_1 v.reset(OpARM64MOVBstore) v.AuxInt = 10 - v.AddArg(ptr) v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) v0.AuxInt = 0 - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpARM64MOVHstore, types.TypeMem) v1.AuxInt = 8 - v1.AddArg(ptr) v2 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) v2.AuxInt = 0 - v1.AddArg(v2) v3 := b.NewValue0(v.Pos, OpARM64MOVDstore, types.TypeMem) - v3.AddArg(ptr) v4 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) v4.AuxInt = 0 - v3.AddArg(v4) - v3.AddArg(mem) - v1.AddArg(v3) - v.AddArg(v1) + v3.AddArg3(ptr, v4, mem) + v1.AddArg3(ptr, v2, v3) + v.AddArg3(ptr, v0, v1) return true } // match: (Zero [12] ptr mem) @@ -26898,17 +25502,13 @@ func rewriteValueARM64_OpZero(v *Value) bool { mem := v_1 v.reset(OpARM64MOVWstore) v.AuxInt = 8 - v.AddArg(ptr) v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) v0.AuxInt = 0 - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpARM64MOVDstore, types.TypeMem) - v1.AddArg(ptr) v2 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) v2.AuxInt = 0 - v1.AddArg(v2) - v1.AddArg(mem) - v.AddArg(v1) + v1.AddArg3(ptr, v2, mem) + v.AddArg3(ptr, v0, v1) return true } // match: (Zero [13] ptr mem) @@ -26921,24 +25521,18 @@ func rewriteValueARM64_OpZero(v *Value) bool { mem := v_1 v.reset(OpARM64MOVBstore) v.AuxInt = 12 - v.AddArg(ptr) v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) v0.AuxInt = 0 - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpARM64MOVWstore, types.TypeMem) v1.AuxInt = 8 - v1.AddArg(ptr) v2 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) v2.AuxInt = 0 - v1.AddArg(v2) v3 := b.NewValue0(v.Pos, OpARM64MOVDstore, types.TypeMem) - v3.AddArg(ptr) v4 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) v4.AuxInt = 0 - v3.AddArg(v4) - v3.AddArg(mem) - v1.AddArg(v3) - v.AddArg(v1) + v3.AddArg3(ptr, v4, mem) + v1.AddArg3(ptr, v2, v3) + v.AddArg3(ptr, v0, v1) return true } // match: (Zero [14] ptr mem) @@ -26951,24 +25545,18 @@ func rewriteValueARM64_OpZero(v *Value) bool { mem := v_1 v.reset(OpARM64MOVHstore) v.AuxInt = 12 - v.AddArg(ptr) v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) v0.AuxInt = 0 - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpARM64MOVWstore, types.TypeMem) v1.AuxInt = 8 - v1.AddArg(ptr) v2 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) v2.AuxInt = 0 - v1.AddArg(v2) v3 := b.NewValue0(v.Pos, OpARM64MOVDstore, types.TypeMem) - v3.AddArg(ptr) v4 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) v4.AuxInt = 0 - v3.AddArg(v4) - v3.AddArg(mem) - v1.AddArg(v3) - v.AddArg(v1) + v3.AddArg3(ptr, v4, mem) + v1.AddArg3(ptr, v2, v3) + v.AddArg3(ptr, v0, v1) return true } // match: (Zero [15] ptr mem) @@ -26981,31 +25569,23 @@ func rewriteValueARM64_OpZero(v *Value) bool { mem := v_1 v.reset(OpARM64MOVBstore) v.AuxInt = 14 - v.AddArg(ptr) v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) v0.AuxInt = 0 - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpARM64MOVHstore, types.TypeMem) v1.AuxInt = 12 - v1.AddArg(ptr) v2 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) v2.AuxInt = 0 - v1.AddArg(v2) v3 := b.NewValue0(v.Pos, OpARM64MOVWstore, types.TypeMem) v3.AuxInt = 8 - v3.AddArg(ptr) v4 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) v4.AuxInt = 0 - v3.AddArg(v4) v5 := b.NewValue0(v.Pos, OpARM64MOVDstore, types.TypeMem) - v5.AddArg(ptr) v6 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) v6.AuxInt = 0 - v5.AddArg(v6) - v5.AddArg(mem) - v3.AddArg(v5) - v1.AddArg(v3) - v.AddArg(v1) + v5.AddArg3(ptr, v6, mem) + v3.AddArg3(ptr, v4, v5) + v1.AddArg3(ptr, v2, v3) + v.AddArg3(ptr, v0, v1) return true } // match: (Zero [16] ptr mem) @@ -27018,14 +25598,11 @@ func rewriteValueARM64_OpZero(v *Value) bool { mem := v_1 v.reset(OpARM64STP) v.AuxInt = 0 - v.AddArg(ptr) v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) v0.AuxInt = 0 - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) v1.AuxInt = 0 - v.AddArg(v1) - v.AddArg(mem) + v.AddArg4(ptr, v0, v1, mem) return true } // match: (Zero [32] ptr mem) @@ -27038,24 +25615,18 @@ func rewriteValueARM64_OpZero(v *Value) bool { mem := v_1 v.reset(OpARM64STP) v.AuxInt = 16 - v.AddArg(ptr) v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) v0.AuxInt = 0 - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) v1.AuxInt = 0 - v.AddArg(v1) v2 := b.NewValue0(v.Pos, OpARM64STP, types.TypeMem) v2.AuxInt = 0 - v2.AddArg(ptr) v3 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) v3.AuxInt = 0 - v2.AddArg(v3) v4 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) v4.AuxInt = 0 - v2.AddArg(v4) - v2.AddArg(mem) - v.AddArg(v2) + v2.AddArg4(ptr, v3, v4, mem) + v.AddArg4(ptr, v0, v1, v2) return true } // match: (Zero [48] ptr mem) @@ -27068,34 +25639,25 @@ func rewriteValueARM64_OpZero(v *Value) bool { mem := v_1 v.reset(OpARM64STP) v.AuxInt = 32 - v.AddArg(ptr) v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) v0.AuxInt = 0 - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) v1.AuxInt = 0 - v.AddArg(v1) v2 := b.NewValue0(v.Pos, OpARM64STP, types.TypeMem) v2.AuxInt = 16 - v2.AddArg(ptr) v3 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) v3.AuxInt = 0 - v2.AddArg(v3) v4 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) v4.AuxInt = 0 - v2.AddArg(v4) v5 := b.NewValue0(v.Pos, OpARM64STP, types.TypeMem) v5.AuxInt = 0 - v5.AddArg(ptr) v6 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) v6.AuxInt = 0 - v5.AddArg(v6) v7 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) v7.AuxInt = 0 - v5.AddArg(v7) - v5.AddArg(mem) - v2.AddArg(v5) - v.AddArg(v2) + v5.AddArg4(ptr, v6, v7, mem) + v2.AddArg4(ptr, v3, v4, v5) + v.AddArg4(ptr, v0, v1, v2) return true } // match: (Zero [64] ptr mem) @@ -27108,44 +25670,32 @@ func rewriteValueARM64_OpZero(v *Value) bool { mem := v_1 v.reset(OpARM64STP) v.AuxInt = 48 - v.AddArg(ptr) v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) v0.AuxInt = 0 - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) v1.AuxInt = 0 - v.AddArg(v1) v2 := b.NewValue0(v.Pos, OpARM64STP, types.TypeMem) v2.AuxInt = 32 - v2.AddArg(ptr) v3 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) v3.AuxInt = 0 - v2.AddArg(v3) v4 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) v4.AuxInt = 0 - v2.AddArg(v4) v5 := b.NewValue0(v.Pos, OpARM64STP, types.TypeMem) v5.AuxInt = 16 - v5.AddArg(ptr) v6 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) v6.AuxInt = 0 - v5.AddArg(v6) v7 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) v7.AuxInt = 0 - v5.AddArg(v7) v8 := b.NewValue0(v.Pos, OpARM64STP, types.TypeMem) v8.AuxInt = 0 - v8.AddArg(ptr) v9 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) v9.AuxInt = 0 - v8.AddArg(v9) v10 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) v10.AuxInt = 0 - v8.AddArg(v10) - v8.AddArg(mem) - v5.AddArg(v8) - v2.AddArg(v5) - v.AddArg(v2) + v8.AddArg4(ptr, v9, v10, mem) + v5.AddArg4(ptr, v6, v7, v8) + v2.AddArg4(ptr, v3, v4, v5) + v.AddArg4(ptr, v0, v1, v2) return true } // match: (Zero [s] ptr mem) @@ -27163,12 +25713,10 @@ func rewriteValueARM64_OpZero(v *Value) bool { v0 := b.NewValue0(v.Pos, OpOffPtr, ptr.Type) v0.AuxInt = s - 8 v0.AddArg(ptr) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpZero, types.TypeMem) v1.AuxInt = s - s%16 - v1.AddArg(ptr) - v1.AddArg(mem) - v.AddArg(v1) + v1.AddArg2(ptr, mem) + v.AddArg2(v0, v1) return true } // match: (Zero [s] ptr mem) @@ -27186,12 +25734,10 @@ func rewriteValueARM64_OpZero(v *Value) bool { v0 := b.NewValue0(v.Pos, OpOffPtr, ptr.Type) v0.AuxInt = s - 16 v0.AddArg(ptr) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpZero, types.TypeMem) v1.AuxInt = s - s%16 - v1.AddArg(ptr) - v1.AddArg(mem) - v.AddArg(v1) + v1.AddArg2(ptr, mem) + v.AddArg2(v0, v1) return true } // match: (Zero [s] ptr mem) @@ -27206,8 +25752,7 @@ func rewriteValueARM64_OpZero(v *Value) bool { } v.reset(OpARM64DUFFZERO) v.AuxInt = 4 * (64 - s/16) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (Zero [s] ptr mem) @@ -27221,12 +25766,10 @@ func rewriteValueARM64_OpZero(v *Value) bool { break } v.reset(OpARM64LoweredZero) - v.AddArg(ptr) v0 := b.NewValue0(v.Pos, OpARM64ADDconst, ptr.Type) v0.AuxInt = s - 16 v0.AddArg(ptr) - v.AddArg(v0) - v.AddArg(mem) + v.AddArg3(ptr, v0, mem) return true } return false @@ -27251,11 +25794,10 @@ func rewriteBlockARM64(b *Block) bool { if !(x.Uses == 1) { break } - b.Reset(BlockARM64EQ) v0 := b.NewValue0(v_0.Pos, OpARM64TSTWconst, types.TypeFlags) v0.AuxInt = c v0.AddArg(y) - b.AddControl(v0) + b.resetWithControl(BlockARM64EQ, v0) return true } // match: (EQ (CMPconst [0] z:(AND x y)) yes no) @@ -27279,11 +25821,9 @@ func rewriteBlockARM64(b *Block) bool { if !(z.Uses == 1) { continue } - b.Reset(BlockARM64EQ) v0 := b.NewValue0(v_0.Pos, OpARM64TST, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - b.AddControl(v0) + v0.AddArg2(x, y) + b.resetWithControl(BlockARM64EQ, v0) return true } break @@ -27309,11 +25849,9 @@ func rewriteBlockARM64(b *Block) bool { if !(z.Uses == 1) { continue } - b.Reset(BlockARM64EQ) v0 := b.NewValue0(v_0.Pos, OpARM64TSTW, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - b.AddControl(v0) + v0.AddArg2(x, y) + b.resetWithControl(BlockARM64EQ, v0) return true } break @@ -27335,11 +25873,10 @@ func rewriteBlockARM64(b *Block) bool { if !(x.Uses == 1) { break } - b.Reset(BlockARM64EQ) v0 := b.NewValue0(v_0.Pos, OpARM64TSTconst, types.TypeFlags) v0.AuxInt = c v0.AddArg(y) - b.AddControl(v0) + b.resetWithControl(BlockARM64EQ, v0) return true } // match: (EQ (CMPconst [0] x:(ADDconst [c] y)) yes no) @@ -27359,11 +25896,10 @@ func rewriteBlockARM64(b *Block) bool { if !(x.Uses == 1) { break } - b.Reset(BlockARM64EQ) v0 := b.NewValue0(v_0.Pos, OpARM64CMNconst, types.TypeFlags) v0.AuxInt = c v0.AddArg(y) - b.AddControl(v0) + b.resetWithControl(BlockARM64EQ, v0) return true } // match: (EQ (CMPWconst [0] x:(ADDconst [c] y)) yes no) @@ -27383,11 +25919,10 @@ func rewriteBlockARM64(b *Block) bool { if !(x.Uses == 1) { break } - b.Reset(BlockARM64EQ) v0 := b.NewValue0(v_0.Pos, OpARM64CMNWconst, types.TypeFlags) v0.AuxInt = c v0.AddArg(y) - b.AddControl(v0) + b.resetWithControl(BlockARM64EQ, v0) return true } // match: (EQ (CMPconst [0] z:(ADD x y)) yes no) @@ -27411,11 +25946,9 @@ func rewriteBlockARM64(b *Block) bool { if !(z.Uses == 1) { continue } - b.Reset(BlockARM64EQ) v0 := b.NewValue0(v_0.Pos, OpARM64CMN, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - b.AddControl(v0) + v0.AddArg2(x, y) + b.resetWithControl(BlockARM64EQ, v0) return true } break @@ -27441,11 +25974,9 @@ func rewriteBlockARM64(b *Block) bool { if !(z.Uses == 1) { continue } - b.Reset(BlockARM64EQ) v0 := b.NewValue0(v_0.Pos, OpARM64CMNW, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - b.AddControl(v0) + v0.AddArg2(x, y) + b.resetWithControl(BlockARM64EQ, v0) return true } break @@ -27465,11 +25996,9 @@ func rewriteBlockARM64(b *Block) bool { if !(z.Uses == 1) { break } - b.Reset(BlockARM64EQ) v0 := b.NewValue0(v_0.Pos, OpARM64CMN, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - b.AddControl(v0) + v0.AddArg2(x, y) + b.resetWithControl(BlockARM64EQ, v0) return true } // match: (EQ (CMPW x z:(NEG y)) yes no) @@ -27487,11 +26016,9 @@ func rewriteBlockARM64(b *Block) bool { if !(z.Uses == 1) { break } - b.Reset(BlockARM64EQ) v0 := b.NewValue0(v_0.Pos, OpARM64CMNW, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - b.AddControl(v0) + v0.AddArg2(x, y) + b.resetWithControl(BlockARM64EQ, v0) return true } // match: (EQ (CMPconst [0] x) yes no) @@ -27502,8 +26029,7 @@ func rewriteBlockARM64(b *Block) bool { break } x := v_0.Args[0] - b.Reset(BlockARM64Z) - b.AddControl(x) + b.resetWithControl(BlockARM64Z, x) return true } // match: (EQ (CMPWconst [0] x) yes no) @@ -27514,8 +26040,7 @@ func rewriteBlockARM64(b *Block) bool { break } x := v_0.Args[0] - b.Reset(BlockARM64ZW) - b.AddControl(x) + b.resetWithControl(BlockARM64ZW, x) return true } // match: (EQ (CMPconst [0] z:(MADD a x y)) yes no) @@ -27536,14 +26061,11 @@ func rewriteBlockARM64(b *Block) bool { if !(z.Uses == 1) { break } - b.Reset(BlockARM64EQ) v0 := b.NewValue0(v_0.Pos, OpARM64CMN, types.TypeFlags) - v0.AddArg(a) v1 := b.NewValue0(v_0.Pos, OpARM64MUL, x.Type) - v1.AddArg(x) - v1.AddArg(y) - v0.AddArg(v1) - b.AddControl(v0) + v1.AddArg2(x, y) + v0.AddArg2(a, v1) + b.resetWithControl(BlockARM64EQ, v0) return true } // match: (EQ (CMPconst [0] z:(MSUB a x y)) yes no) @@ -27564,14 +26086,11 @@ func rewriteBlockARM64(b *Block) bool { if !(z.Uses == 1) { break } - b.Reset(BlockARM64EQ) v0 := b.NewValue0(v_0.Pos, OpARM64CMP, types.TypeFlags) - v0.AddArg(a) v1 := b.NewValue0(v_0.Pos, OpARM64MUL, x.Type) - v1.AddArg(x) - v1.AddArg(y) - v0.AddArg(v1) - b.AddControl(v0) + v1.AddArg2(x, y) + v0.AddArg2(a, v1) + b.resetWithControl(BlockARM64EQ, v0) return true } // match: (EQ (CMPWconst [0] z:(MADDW a x y)) yes no) @@ -27592,14 +26111,11 @@ func rewriteBlockARM64(b *Block) bool { if !(z.Uses == 1) { break } - b.Reset(BlockARM64EQ) v0 := b.NewValue0(v_0.Pos, OpARM64CMNW, types.TypeFlags) - v0.AddArg(a) v1 := b.NewValue0(v_0.Pos, OpARM64MULW, x.Type) - v1.AddArg(x) - v1.AddArg(y) - v0.AddArg(v1) - b.AddControl(v0) + v1.AddArg2(x, y) + v0.AddArg2(a, v1) + b.resetWithControl(BlockARM64EQ, v0) return true } // match: (EQ (CMPWconst [0] z:(MSUBW a x y)) yes no) @@ -27620,14 +26136,11 @@ func rewriteBlockARM64(b *Block) bool { if !(z.Uses == 1) { break } - b.Reset(BlockARM64EQ) v0 := b.NewValue0(v_0.Pos, OpARM64CMPW, types.TypeFlags) - v0.AddArg(a) v1 := b.NewValue0(v_0.Pos, OpARM64MULW, x.Type) - v1.AddArg(x) - v1.AddArg(y) - v0.AddArg(v1) - b.AddControl(v0) + v1.AddArg2(x, y) + v0.AddArg2(a, v1) + b.resetWithControl(BlockARM64EQ, v0) return true } // match: (EQ (TSTconst [c] x) yes no) @@ -27640,8 +26153,7 @@ func rewriteBlockARM64(b *Block) bool { if !(oneBit(c)) { break } - b.Reset(BlockARM64TBZ) - b.AddControl(x) + b.resetWithControl(BlockARM64TBZ, x) b.Aux = ntz(c) return true } @@ -27655,8 +26167,7 @@ func rewriteBlockARM64(b *Block) bool { if !(oneBit(int64(uint32(c)))) { break } - b.Reset(BlockARM64TBZ) - b.AddControl(x) + b.resetWithControl(BlockARM64TBZ, x) b.Aux = ntz(int64(uint32(c))) return true } @@ -27699,8 +26210,7 @@ func rewriteBlockARM64(b *Block) bool { for b.Controls[0].Op == OpARM64InvertFlags { v_0 := b.Controls[0] cmp := v_0.Args[0] - b.Reset(BlockARM64EQ) - b.AddControl(cmp) + b.resetWithControl(BlockARM64EQ, cmp) return true } case BlockARM64FGE: @@ -27709,8 +26219,7 @@ func rewriteBlockARM64(b *Block) bool { for b.Controls[0].Op == OpARM64InvertFlags { v_0 := b.Controls[0] cmp := v_0.Args[0] - b.Reset(BlockARM64FLE) - b.AddControl(cmp) + b.resetWithControl(BlockARM64FLE, cmp) return true } case BlockARM64FGT: @@ -27719,8 +26228,7 @@ func rewriteBlockARM64(b *Block) bool { for b.Controls[0].Op == OpARM64InvertFlags { v_0 := b.Controls[0] cmp := v_0.Args[0] - b.Reset(BlockARM64FLT) - b.AddControl(cmp) + b.resetWithControl(BlockARM64FLT, cmp) return true } case BlockARM64FLE: @@ -27729,8 +26237,7 @@ func rewriteBlockARM64(b *Block) bool { for b.Controls[0].Op == OpARM64InvertFlags { v_0 := b.Controls[0] cmp := v_0.Args[0] - b.Reset(BlockARM64FGE) - b.AddControl(cmp) + b.resetWithControl(BlockARM64FGE, cmp) return true } case BlockARM64FLT: @@ -27739,8 +26246,7 @@ func rewriteBlockARM64(b *Block) bool { for b.Controls[0].Op == OpARM64InvertFlags { v_0 := b.Controls[0] cmp := v_0.Args[0] - b.Reset(BlockARM64FGT) - b.AddControl(cmp) + b.resetWithControl(BlockARM64FGT, cmp) return true } case BlockARM64GE: @@ -27761,11 +26267,10 @@ func rewriteBlockARM64(b *Block) bool { if !(x.Uses == 1) { break } - b.Reset(BlockARM64GE) v0 := b.NewValue0(v_0.Pos, OpARM64TSTWconst, types.TypeFlags) v0.AuxInt = c v0.AddArg(y) - b.AddControl(v0) + b.resetWithControl(BlockARM64GE, v0) return true } // match: (GE (CMPconst [0] z:(AND x y)) yes no) @@ -27789,11 +26294,9 @@ func rewriteBlockARM64(b *Block) bool { if !(z.Uses == 1) { continue } - b.Reset(BlockARM64GE) v0 := b.NewValue0(v_0.Pos, OpARM64TST, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - b.AddControl(v0) + v0.AddArg2(x, y) + b.resetWithControl(BlockARM64GE, v0) return true } break @@ -27819,11 +26322,9 @@ func rewriteBlockARM64(b *Block) bool { if !(z.Uses == 1) { continue } - b.Reset(BlockARM64GE) v0 := b.NewValue0(v_0.Pos, OpARM64TSTW, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - b.AddControl(v0) + v0.AddArg2(x, y) + b.resetWithControl(BlockARM64GE, v0) return true } break @@ -27845,11 +26346,10 @@ func rewriteBlockARM64(b *Block) bool { if !(x.Uses == 1) { break } - b.Reset(BlockARM64GE) v0 := b.NewValue0(v_0.Pos, OpARM64TSTconst, types.TypeFlags) v0.AuxInt = c v0.AddArg(y) - b.AddControl(v0) + b.resetWithControl(BlockARM64GE, v0) return true } // match: (GE (CMPconst [0] x:(ADDconst [c] y)) yes no) @@ -27869,11 +26369,10 @@ func rewriteBlockARM64(b *Block) bool { if !(x.Uses == 1) { break } - b.Reset(BlockARM64GE) v0 := b.NewValue0(v_0.Pos, OpARM64CMNconst, types.TypeFlags) v0.AuxInt = c v0.AddArg(y) - b.AddControl(v0) + b.resetWithControl(BlockARM64GE, v0) return true } // match: (GE (CMPWconst [0] x:(ADDconst [c] y)) yes no) @@ -27893,11 +26392,10 @@ func rewriteBlockARM64(b *Block) bool { if !(x.Uses == 1) { break } - b.Reset(BlockARM64GE) v0 := b.NewValue0(v_0.Pos, OpARM64CMNWconst, types.TypeFlags) v0.AuxInt = c v0.AddArg(y) - b.AddControl(v0) + b.resetWithControl(BlockARM64GE, v0) return true } // match: (GE (CMPconst [0] z:(ADD x y)) yes no) @@ -27921,11 +26419,9 @@ func rewriteBlockARM64(b *Block) bool { if !(z.Uses == 1) { continue } - b.Reset(BlockARM64GE) v0 := b.NewValue0(v_0.Pos, OpARM64CMN, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - b.AddControl(v0) + v0.AddArg2(x, y) + b.resetWithControl(BlockARM64GE, v0) return true } break @@ -27951,11 +26447,9 @@ func rewriteBlockARM64(b *Block) bool { if !(z.Uses == 1) { continue } - b.Reset(BlockARM64GE) v0 := b.NewValue0(v_0.Pos, OpARM64CMNW, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - b.AddControl(v0) + v0.AddArg2(x, y) + b.resetWithControl(BlockARM64GE, v0) return true } break @@ -27975,11 +26469,9 @@ func rewriteBlockARM64(b *Block) bool { if !(z.Uses == 1) { break } - b.Reset(BlockARM64GE) v0 := b.NewValue0(v_0.Pos, OpARM64CMN, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - b.AddControl(v0) + v0.AddArg2(x, y) + b.resetWithControl(BlockARM64GE, v0) return true } // match: (GE (CMPW x z:(NEG y)) yes no) @@ -27997,11 +26489,9 @@ func rewriteBlockARM64(b *Block) bool { if !(z.Uses == 1) { break } - b.Reset(BlockARM64GE) v0 := b.NewValue0(v_0.Pos, OpARM64CMNW, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - b.AddControl(v0) + v0.AddArg2(x, y) + b.resetWithControl(BlockARM64GE, v0) return true } // match: (GE (CMPconst [0] z:(MADD a x y)) yes no) @@ -28022,14 +26512,11 @@ func rewriteBlockARM64(b *Block) bool { if !(z.Uses == 1) { break } - b.Reset(BlockARM64GE) v0 := b.NewValue0(v_0.Pos, OpARM64CMN, types.TypeFlags) - v0.AddArg(a) v1 := b.NewValue0(v_0.Pos, OpARM64MUL, x.Type) - v1.AddArg(x) - v1.AddArg(y) - v0.AddArg(v1) - b.AddControl(v0) + v1.AddArg2(x, y) + v0.AddArg2(a, v1) + b.resetWithControl(BlockARM64GE, v0) return true } // match: (GE (CMPconst [0] z:(MSUB a x y)) yes no) @@ -28050,14 +26537,11 @@ func rewriteBlockARM64(b *Block) bool { if !(z.Uses == 1) { break } - b.Reset(BlockARM64GE) v0 := b.NewValue0(v_0.Pos, OpARM64CMP, types.TypeFlags) - v0.AddArg(a) v1 := b.NewValue0(v_0.Pos, OpARM64MUL, x.Type) - v1.AddArg(x) - v1.AddArg(y) - v0.AddArg(v1) - b.AddControl(v0) + v1.AddArg2(x, y) + v0.AddArg2(a, v1) + b.resetWithControl(BlockARM64GE, v0) return true } // match: (GE (CMPWconst [0] z:(MADDW a x y)) yes no) @@ -28078,14 +26562,11 @@ func rewriteBlockARM64(b *Block) bool { if !(z.Uses == 1) { break } - b.Reset(BlockARM64GE) v0 := b.NewValue0(v_0.Pos, OpARM64CMNW, types.TypeFlags) - v0.AddArg(a) v1 := b.NewValue0(v_0.Pos, OpARM64MULW, x.Type) - v1.AddArg(x) - v1.AddArg(y) - v0.AddArg(v1) - b.AddControl(v0) + v1.AddArg2(x, y) + v0.AddArg2(a, v1) + b.resetWithControl(BlockARM64GE, v0) return true } // match: (GE (CMPWconst [0] z:(MSUBW a x y)) yes no) @@ -28106,14 +26587,11 @@ func rewriteBlockARM64(b *Block) bool { if !(z.Uses == 1) { break } - b.Reset(BlockARM64GE) v0 := b.NewValue0(v_0.Pos, OpARM64CMPW, types.TypeFlags) - v0.AddArg(a) v1 := b.NewValue0(v_0.Pos, OpARM64MULW, x.Type) - v1.AddArg(x) - v1.AddArg(y) - v0.AddArg(v1) - b.AddControl(v0) + v1.AddArg2(x, y) + v0.AddArg2(a, v1) + b.resetWithControl(BlockARM64GE, v0) return true } // match: (GE (CMPWconst [0] x) yes no) @@ -28124,8 +26602,7 @@ func rewriteBlockARM64(b *Block) bool { break } x := v_0.Args[0] - b.Reset(BlockARM64TBZ) - b.AddControl(x) + b.resetWithControl(BlockARM64TBZ, x) b.Aux = int64(31) return true } @@ -28137,8 +26614,7 @@ func rewriteBlockARM64(b *Block) bool { break } x := v_0.Args[0] - b.Reset(BlockARM64TBZ) - b.AddControl(x) + b.resetWithControl(BlockARM64TBZ, x) b.Aux = int64(63) return true } @@ -28179,8 +26655,7 @@ func rewriteBlockARM64(b *Block) bool { for b.Controls[0].Op == OpARM64InvertFlags { v_0 := b.Controls[0] cmp := v_0.Args[0] - b.Reset(BlockARM64LE) - b.AddControl(cmp) + b.resetWithControl(BlockARM64LE, cmp) return true } case BlockARM64GT: @@ -28201,11 +26676,10 @@ func rewriteBlockARM64(b *Block) bool { if !(x.Uses == 1) { break } - b.Reset(BlockARM64GT) v0 := b.NewValue0(v_0.Pos, OpARM64TSTWconst, types.TypeFlags) v0.AuxInt = c v0.AddArg(y) - b.AddControl(v0) + b.resetWithControl(BlockARM64GT, v0) return true } // match: (GT (CMPconst [0] z:(AND x y)) yes no) @@ -28229,11 +26703,9 @@ func rewriteBlockARM64(b *Block) bool { if !(z.Uses == 1) { continue } - b.Reset(BlockARM64GT) v0 := b.NewValue0(v_0.Pos, OpARM64TST, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - b.AddControl(v0) + v0.AddArg2(x, y) + b.resetWithControl(BlockARM64GT, v0) return true } break @@ -28259,11 +26731,9 @@ func rewriteBlockARM64(b *Block) bool { if !(z.Uses == 1) { continue } - b.Reset(BlockARM64GT) v0 := b.NewValue0(v_0.Pos, OpARM64TSTW, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - b.AddControl(v0) + v0.AddArg2(x, y) + b.resetWithControl(BlockARM64GT, v0) return true } break @@ -28285,11 +26755,10 @@ func rewriteBlockARM64(b *Block) bool { if !(x.Uses == 1) { break } - b.Reset(BlockARM64GT) v0 := b.NewValue0(v_0.Pos, OpARM64TSTconst, types.TypeFlags) v0.AuxInt = c v0.AddArg(y) - b.AddControl(v0) + b.resetWithControl(BlockARM64GT, v0) return true } // match: (GT (CMPconst [0] x:(ADDconst [c] y)) yes no) @@ -28309,11 +26778,10 @@ func rewriteBlockARM64(b *Block) bool { if !(x.Uses == 1) { break } - b.Reset(BlockARM64GT) v0 := b.NewValue0(v_0.Pos, OpARM64CMNconst, types.TypeFlags) v0.AuxInt = c v0.AddArg(y) - b.AddControl(v0) + b.resetWithControl(BlockARM64GT, v0) return true } // match: (GT (CMPWconst [0] x:(ADDconst [c] y)) yes no) @@ -28333,11 +26801,10 @@ func rewriteBlockARM64(b *Block) bool { if !(x.Uses == 1) { break } - b.Reset(BlockARM64GT) v0 := b.NewValue0(v_0.Pos, OpARM64CMNWconst, types.TypeFlags) v0.AuxInt = c v0.AddArg(y) - b.AddControl(v0) + b.resetWithControl(BlockARM64GT, v0) return true } // match: (GT (CMPconst [0] z:(ADD x y)) yes no) @@ -28361,11 +26828,9 @@ func rewriteBlockARM64(b *Block) bool { if !(z.Uses == 1) { continue } - b.Reset(BlockARM64GT) v0 := b.NewValue0(v_0.Pos, OpARM64CMN, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - b.AddControl(v0) + v0.AddArg2(x, y) + b.resetWithControl(BlockARM64GT, v0) return true } break @@ -28391,11 +26856,9 @@ func rewriteBlockARM64(b *Block) bool { if !(z.Uses == 1) { continue } - b.Reset(BlockARM64GT) v0 := b.NewValue0(v_0.Pos, OpARM64CMNW, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - b.AddControl(v0) + v0.AddArg2(x, y) + b.resetWithControl(BlockARM64GT, v0) return true } break @@ -28415,11 +26878,9 @@ func rewriteBlockARM64(b *Block) bool { if !(z.Uses == 1) { break } - b.Reset(BlockARM64GT) v0 := b.NewValue0(v_0.Pos, OpARM64CMN, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - b.AddControl(v0) + v0.AddArg2(x, y) + b.resetWithControl(BlockARM64GT, v0) return true } // match: (GT (CMPW x z:(NEG y)) yes no) @@ -28437,11 +26898,9 @@ func rewriteBlockARM64(b *Block) bool { if !(z.Uses == 1) { break } - b.Reset(BlockARM64GT) v0 := b.NewValue0(v_0.Pos, OpARM64CMNW, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - b.AddControl(v0) + v0.AddArg2(x, y) + b.resetWithControl(BlockARM64GT, v0) return true } // match: (GT (CMPconst [0] z:(MADD a x y)) yes no) @@ -28462,14 +26921,11 @@ func rewriteBlockARM64(b *Block) bool { if !(z.Uses == 1) { break } - b.Reset(BlockARM64GT) v0 := b.NewValue0(v_0.Pos, OpARM64CMN, types.TypeFlags) - v0.AddArg(a) v1 := b.NewValue0(v_0.Pos, OpARM64MUL, x.Type) - v1.AddArg(x) - v1.AddArg(y) - v0.AddArg(v1) - b.AddControl(v0) + v1.AddArg2(x, y) + v0.AddArg2(a, v1) + b.resetWithControl(BlockARM64GT, v0) return true } // match: (GT (CMPconst [0] z:(MSUB a x y)) yes no) @@ -28490,14 +26946,11 @@ func rewriteBlockARM64(b *Block) bool { if !(z.Uses == 1) { break } - b.Reset(BlockARM64GT) v0 := b.NewValue0(v_0.Pos, OpARM64CMP, types.TypeFlags) - v0.AddArg(a) v1 := b.NewValue0(v_0.Pos, OpARM64MUL, x.Type) - v1.AddArg(x) - v1.AddArg(y) - v0.AddArg(v1) - b.AddControl(v0) + v1.AddArg2(x, y) + v0.AddArg2(a, v1) + b.resetWithControl(BlockARM64GT, v0) return true } // match: (GT (CMPWconst [0] z:(MADDW a x y)) yes no) @@ -28518,14 +26971,11 @@ func rewriteBlockARM64(b *Block) bool { if !(z.Uses == 1) { break } - b.Reset(BlockARM64GT) v0 := b.NewValue0(v_0.Pos, OpARM64CMNW, types.TypeFlags) - v0.AddArg(a) v1 := b.NewValue0(v_0.Pos, OpARM64MULW, x.Type) - v1.AddArg(x) - v1.AddArg(y) - v0.AddArg(v1) - b.AddControl(v0) + v1.AddArg2(x, y) + v0.AddArg2(a, v1) + b.resetWithControl(BlockARM64GT, v0) return true } // match: (GT (CMPWconst [0] z:(MSUBW a x y)) yes no) @@ -28546,14 +26996,11 @@ func rewriteBlockARM64(b *Block) bool { if !(z.Uses == 1) { break } - b.Reset(BlockARM64GT) v0 := b.NewValue0(v_0.Pos, OpARM64CMPW, types.TypeFlags) - v0.AddArg(a) v1 := b.NewValue0(v_0.Pos, OpARM64MULW, x.Type) - v1.AddArg(x) - v1.AddArg(y) - v0.AddArg(v1) - b.AddControl(v0) + v1.AddArg2(x, y) + v0.AddArg2(a, v1) + b.resetWithControl(BlockARM64GT, v0) return true } // match: (GT (FlagEQ) yes no) @@ -28594,8 +27041,7 @@ func rewriteBlockARM64(b *Block) bool { for b.Controls[0].Op == OpARM64InvertFlags { v_0 := b.Controls[0] cmp := v_0.Args[0] - b.Reset(BlockARM64LT) - b.AddControl(cmp) + b.resetWithControl(BlockARM64LT, cmp) return true } case BlockIf: @@ -28604,8 +27050,7 @@ func rewriteBlockARM64(b *Block) bool { for b.Controls[0].Op == OpARM64Equal { v_0 := b.Controls[0] cc := v_0.Args[0] - b.Reset(BlockARM64EQ) - b.AddControl(cc) + b.resetWithControl(BlockARM64EQ, cc) return true } // match: (If (NotEqual cc) yes no) @@ -28613,8 +27058,7 @@ func rewriteBlockARM64(b *Block) bool { for b.Controls[0].Op == OpARM64NotEqual { v_0 := b.Controls[0] cc := v_0.Args[0] - b.Reset(BlockARM64NE) - b.AddControl(cc) + b.resetWithControl(BlockARM64NE, cc) return true } // match: (If (LessThan cc) yes no) @@ -28622,8 +27066,7 @@ func rewriteBlockARM64(b *Block) bool { for b.Controls[0].Op == OpARM64LessThan { v_0 := b.Controls[0] cc := v_0.Args[0] - b.Reset(BlockARM64LT) - b.AddControl(cc) + b.resetWithControl(BlockARM64LT, cc) return true } // match: (If (LessThanU cc) yes no) @@ -28631,8 +27074,7 @@ func rewriteBlockARM64(b *Block) bool { for b.Controls[0].Op == OpARM64LessThanU { v_0 := b.Controls[0] cc := v_0.Args[0] - b.Reset(BlockARM64ULT) - b.AddControl(cc) + b.resetWithControl(BlockARM64ULT, cc) return true } // match: (If (LessEqual cc) yes no) @@ -28640,8 +27082,7 @@ func rewriteBlockARM64(b *Block) bool { for b.Controls[0].Op == OpARM64LessEqual { v_0 := b.Controls[0] cc := v_0.Args[0] - b.Reset(BlockARM64LE) - b.AddControl(cc) + b.resetWithControl(BlockARM64LE, cc) return true } // match: (If (LessEqualU cc) yes no) @@ -28649,8 +27090,7 @@ func rewriteBlockARM64(b *Block) bool { for b.Controls[0].Op == OpARM64LessEqualU { v_0 := b.Controls[0] cc := v_0.Args[0] - b.Reset(BlockARM64ULE) - b.AddControl(cc) + b.resetWithControl(BlockARM64ULE, cc) return true } // match: (If (GreaterThan cc) yes no) @@ -28658,8 +27098,7 @@ func rewriteBlockARM64(b *Block) bool { for b.Controls[0].Op == OpARM64GreaterThan { v_0 := b.Controls[0] cc := v_0.Args[0] - b.Reset(BlockARM64GT) - b.AddControl(cc) + b.resetWithControl(BlockARM64GT, cc) return true } // match: (If (GreaterThanU cc) yes no) @@ -28667,8 +27106,7 @@ func rewriteBlockARM64(b *Block) bool { for b.Controls[0].Op == OpARM64GreaterThanU { v_0 := b.Controls[0] cc := v_0.Args[0] - b.Reset(BlockARM64UGT) - b.AddControl(cc) + b.resetWithControl(BlockARM64UGT, cc) return true } // match: (If (GreaterEqual cc) yes no) @@ -28676,8 +27114,7 @@ func rewriteBlockARM64(b *Block) bool { for b.Controls[0].Op == OpARM64GreaterEqual { v_0 := b.Controls[0] cc := v_0.Args[0] - b.Reset(BlockARM64GE) - b.AddControl(cc) + b.resetWithControl(BlockARM64GE, cc) return true } // match: (If (GreaterEqualU cc) yes no) @@ -28685,8 +27122,7 @@ func rewriteBlockARM64(b *Block) bool { for b.Controls[0].Op == OpARM64GreaterEqualU { v_0 := b.Controls[0] cc := v_0.Args[0] - b.Reset(BlockARM64UGE) - b.AddControl(cc) + b.resetWithControl(BlockARM64UGE, cc) return true } // match: (If (LessThanF cc) yes no) @@ -28694,8 +27130,7 @@ func rewriteBlockARM64(b *Block) bool { for b.Controls[0].Op == OpARM64LessThanF { v_0 := b.Controls[0] cc := v_0.Args[0] - b.Reset(BlockARM64FLT) - b.AddControl(cc) + b.resetWithControl(BlockARM64FLT, cc) return true } // match: (If (LessEqualF cc) yes no) @@ -28703,8 +27138,7 @@ func rewriteBlockARM64(b *Block) bool { for b.Controls[0].Op == OpARM64LessEqualF { v_0 := b.Controls[0] cc := v_0.Args[0] - b.Reset(BlockARM64FLE) - b.AddControl(cc) + b.resetWithControl(BlockARM64FLE, cc) return true } // match: (If (GreaterThanF cc) yes no) @@ -28712,8 +27146,7 @@ func rewriteBlockARM64(b *Block) bool { for b.Controls[0].Op == OpARM64GreaterThanF { v_0 := b.Controls[0] cc := v_0.Args[0] - b.Reset(BlockARM64FGT) - b.AddControl(cc) + b.resetWithControl(BlockARM64FGT, cc) return true } // match: (If (GreaterEqualF cc) yes no) @@ -28721,16 +27154,14 @@ func rewriteBlockARM64(b *Block) bool { for b.Controls[0].Op == OpARM64GreaterEqualF { v_0 := b.Controls[0] cc := v_0.Args[0] - b.Reset(BlockARM64FGE) - b.AddControl(cc) + b.resetWithControl(BlockARM64FGE, cc) return true } // match: (If cond yes no) // result: (NZ cond yes no) for { cond := b.Controls[0] - b.Reset(BlockARM64NZ) - b.AddControl(cond) + b.resetWithControl(BlockARM64NZ, cond) return true } case BlockARM64LE: @@ -28751,11 +27182,10 @@ func rewriteBlockARM64(b *Block) bool { if !(x.Uses == 1) { break } - b.Reset(BlockARM64LE) v0 := b.NewValue0(v_0.Pos, OpARM64TSTWconst, types.TypeFlags) v0.AuxInt = c v0.AddArg(y) - b.AddControl(v0) + b.resetWithControl(BlockARM64LE, v0) return true } // match: (LE (CMPconst [0] z:(AND x y)) yes no) @@ -28779,11 +27209,9 @@ func rewriteBlockARM64(b *Block) bool { if !(z.Uses == 1) { continue } - b.Reset(BlockARM64LE) v0 := b.NewValue0(v_0.Pos, OpARM64TST, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - b.AddControl(v0) + v0.AddArg2(x, y) + b.resetWithControl(BlockARM64LE, v0) return true } break @@ -28809,11 +27237,9 @@ func rewriteBlockARM64(b *Block) bool { if !(z.Uses == 1) { continue } - b.Reset(BlockARM64LE) v0 := b.NewValue0(v_0.Pos, OpARM64TSTW, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - b.AddControl(v0) + v0.AddArg2(x, y) + b.resetWithControl(BlockARM64LE, v0) return true } break @@ -28835,11 +27261,10 @@ func rewriteBlockARM64(b *Block) bool { if !(x.Uses == 1) { break } - b.Reset(BlockARM64LE) v0 := b.NewValue0(v_0.Pos, OpARM64TSTconst, types.TypeFlags) v0.AuxInt = c v0.AddArg(y) - b.AddControl(v0) + b.resetWithControl(BlockARM64LE, v0) return true } // match: (LE (CMPconst [0] x:(ADDconst [c] y)) yes no) @@ -28859,11 +27284,10 @@ func rewriteBlockARM64(b *Block) bool { if !(x.Uses == 1) { break } - b.Reset(BlockARM64LE) v0 := b.NewValue0(v_0.Pos, OpARM64CMNconst, types.TypeFlags) v0.AuxInt = c v0.AddArg(y) - b.AddControl(v0) + b.resetWithControl(BlockARM64LE, v0) return true } // match: (LE (CMPWconst [0] x:(ADDconst [c] y)) yes no) @@ -28883,11 +27307,10 @@ func rewriteBlockARM64(b *Block) bool { if !(x.Uses == 1) { break } - b.Reset(BlockARM64LE) v0 := b.NewValue0(v_0.Pos, OpARM64CMNWconst, types.TypeFlags) v0.AuxInt = c v0.AddArg(y) - b.AddControl(v0) + b.resetWithControl(BlockARM64LE, v0) return true } // match: (LE (CMPconst [0] z:(ADD x y)) yes no) @@ -28911,11 +27334,9 @@ func rewriteBlockARM64(b *Block) bool { if !(z.Uses == 1) { continue } - b.Reset(BlockARM64LE) v0 := b.NewValue0(v_0.Pos, OpARM64CMN, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - b.AddControl(v0) + v0.AddArg2(x, y) + b.resetWithControl(BlockARM64LE, v0) return true } break @@ -28941,11 +27362,9 @@ func rewriteBlockARM64(b *Block) bool { if !(z.Uses == 1) { continue } - b.Reset(BlockARM64LE) v0 := b.NewValue0(v_0.Pos, OpARM64CMNW, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - b.AddControl(v0) + v0.AddArg2(x, y) + b.resetWithControl(BlockARM64LE, v0) return true } break @@ -28965,11 +27384,9 @@ func rewriteBlockARM64(b *Block) bool { if !(z.Uses == 1) { break } - b.Reset(BlockARM64LE) v0 := b.NewValue0(v_0.Pos, OpARM64CMN, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - b.AddControl(v0) + v0.AddArg2(x, y) + b.resetWithControl(BlockARM64LE, v0) return true } // match: (LE (CMPW x z:(NEG y)) yes no) @@ -28987,11 +27404,9 @@ func rewriteBlockARM64(b *Block) bool { if !(z.Uses == 1) { break } - b.Reset(BlockARM64LE) v0 := b.NewValue0(v_0.Pos, OpARM64CMNW, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - b.AddControl(v0) + v0.AddArg2(x, y) + b.resetWithControl(BlockARM64LE, v0) return true } // match: (LE (CMPconst [0] z:(MADD a x y)) yes no) @@ -29012,14 +27427,11 @@ func rewriteBlockARM64(b *Block) bool { if !(z.Uses == 1) { break } - b.Reset(BlockARM64LE) v0 := b.NewValue0(v_0.Pos, OpARM64CMN, types.TypeFlags) - v0.AddArg(a) v1 := b.NewValue0(v_0.Pos, OpARM64MUL, x.Type) - v1.AddArg(x) - v1.AddArg(y) - v0.AddArg(v1) - b.AddControl(v0) + v1.AddArg2(x, y) + v0.AddArg2(a, v1) + b.resetWithControl(BlockARM64LE, v0) return true } // match: (LE (CMPconst [0] z:(MSUB a x y)) yes no) @@ -29040,14 +27452,11 @@ func rewriteBlockARM64(b *Block) bool { if !(z.Uses == 1) { break } - b.Reset(BlockARM64LE) v0 := b.NewValue0(v_0.Pos, OpARM64CMP, types.TypeFlags) - v0.AddArg(a) v1 := b.NewValue0(v_0.Pos, OpARM64MUL, x.Type) - v1.AddArg(x) - v1.AddArg(y) - v0.AddArg(v1) - b.AddControl(v0) + v1.AddArg2(x, y) + v0.AddArg2(a, v1) + b.resetWithControl(BlockARM64LE, v0) return true } // match: (LE (CMPWconst [0] z:(MADDW a x y)) yes no) @@ -29068,14 +27477,11 @@ func rewriteBlockARM64(b *Block) bool { if !(z.Uses == 1) { break } - b.Reset(BlockARM64LE) v0 := b.NewValue0(v_0.Pos, OpARM64CMNW, types.TypeFlags) - v0.AddArg(a) v1 := b.NewValue0(v_0.Pos, OpARM64MULW, x.Type) - v1.AddArg(x) - v1.AddArg(y) - v0.AddArg(v1) - b.AddControl(v0) + v1.AddArg2(x, y) + v0.AddArg2(a, v1) + b.resetWithControl(BlockARM64LE, v0) return true } // match: (LE (CMPWconst [0] z:(MSUBW a x y)) yes no) @@ -29096,14 +27502,11 @@ func rewriteBlockARM64(b *Block) bool { if !(z.Uses == 1) { break } - b.Reset(BlockARM64LE) v0 := b.NewValue0(v_0.Pos, OpARM64CMPW, types.TypeFlags) - v0.AddArg(a) v1 := b.NewValue0(v_0.Pos, OpARM64MULW, x.Type) - v1.AddArg(x) - v1.AddArg(y) - v0.AddArg(v1) - b.AddControl(v0) + v1.AddArg2(x, y) + v0.AddArg2(a, v1) + b.resetWithControl(BlockARM64LE, v0) return true } // match: (LE (FlagEQ) yes no) @@ -29143,8 +27546,7 @@ func rewriteBlockARM64(b *Block) bool { for b.Controls[0].Op == OpARM64InvertFlags { v_0 := b.Controls[0] cmp := v_0.Args[0] - b.Reset(BlockARM64GE) - b.AddControl(cmp) + b.resetWithControl(BlockARM64GE, cmp) return true } case BlockARM64LT: @@ -29165,11 +27567,10 @@ func rewriteBlockARM64(b *Block) bool { if !(x.Uses == 1) { break } - b.Reset(BlockARM64LT) v0 := b.NewValue0(v_0.Pos, OpARM64TSTWconst, types.TypeFlags) v0.AuxInt = c v0.AddArg(y) - b.AddControl(v0) + b.resetWithControl(BlockARM64LT, v0) return true } // match: (LT (CMPconst [0] z:(AND x y)) yes no) @@ -29193,11 +27594,9 @@ func rewriteBlockARM64(b *Block) bool { if !(z.Uses == 1) { continue } - b.Reset(BlockARM64LT) v0 := b.NewValue0(v_0.Pos, OpARM64TST, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - b.AddControl(v0) + v0.AddArg2(x, y) + b.resetWithControl(BlockARM64LT, v0) return true } break @@ -29223,11 +27622,9 @@ func rewriteBlockARM64(b *Block) bool { if !(z.Uses == 1) { continue } - b.Reset(BlockARM64LT) v0 := b.NewValue0(v_0.Pos, OpARM64TSTW, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - b.AddControl(v0) + v0.AddArg2(x, y) + b.resetWithControl(BlockARM64LT, v0) return true } break @@ -29249,11 +27646,10 @@ func rewriteBlockARM64(b *Block) bool { if !(x.Uses == 1) { break } - b.Reset(BlockARM64LT) v0 := b.NewValue0(v_0.Pos, OpARM64TSTconst, types.TypeFlags) v0.AuxInt = c v0.AddArg(y) - b.AddControl(v0) + b.resetWithControl(BlockARM64LT, v0) return true } // match: (LT (CMPconst [0] x:(ADDconst [c] y)) yes no) @@ -29273,11 +27669,10 @@ func rewriteBlockARM64(b *Block) bool { if !(x.Uses == 1) { break } - b.Reset(BlockARM64LT) v0 := b.NewValue0(v_0.Pos, OpARM64CMNconst, types.TypeFlags) v0.AuxInt = c v0.AddArg(y) - b.AddControl(v0) + b.resetWithControl(BlockARM64LT, v0) return true } // match: (LT (CMPWconst [0] x:(ADDconst [c] y)) yes no) @@ -29297,11 +27692,10 @@ func rewriteBlockARM64(b *Block) bool { if !(x.Uses == 1) { break } - b.Reset(BlockARM64LT) v0 := b.NewValue0(v_0.Pos, OpARM64CMNWconst, types.TypeFlags) v0.AuxInt = c v0.AddArg(y) - b.AddControl(v0) + b.resetWithControl(BlockARM64LT, v0) return true } // match: (LT (CMPconst [0] z:(ADD x y)) yes no) @@ -29325,11 +27719,9 @@ func rewriteBlockARM64(b *Block) bool { if !(z.Uses == 1) { continue } - b.Reset(BlockARM64LT) v0 := b.NewValue0(v_0.Pos, OpARM64CMN, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - b.AddControl(v0) + v0.AddArg2(x, y) + b.resetWithControl(BlockARM64LT, v0) return true } break @@ -29355,11 +27747,9 @@ func rewriteBlockARM64(b *Block) bool { if !(z.Uses == 1) { continue } - b.Reset(BlockARM64LT) v0 := b.NewValue0(v_0.Pos, OpARM64CMNW, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - b.AddControl(v0) + v0.AddArg2(x, y) + b.resetWithControl(BlockARM64LT, v0) return true } break @@ -29379,11 +27769,9 @@ func rewriteBlockARM64(b *Block) bool { if !(z.Uses == 1) { break } - b.Reset(BlockARM64LT) v0 := b.NewValue0(v_0.Pos, OpARM64CMN, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - b.AddControl(v0) + v0.AddArg2(x, y) + b.resetWithControl(BlockARM64LT, v0) return true } // match: (LT (CMPW x z:(NEG y)) yes no) @@ -29401,11 +27789,9 @@ func rewriteBlockARM64(b *Block) bool { if !(z.Uses == 1) { break } - b.Reset(BlockARM64LT) v0 := b.NewValue0(v_0.Pos, OpARM64CMNW, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - b.AddControl(v0) + v0.AddArg2(x, y) + b.resetWithControl(BlockARM64LT, v0) return true } // match: (LT (CMPconst [0] z:(MADD a x y)) yes no) @@ -29426,14 +27812,11 @@ func rewriteBlockARM64(b *Block) bool { if !(z.Uses == 1) { break } - b.Reset(BlockARM64LT) v0 := b.NewValue0(v_0.Pos, OpARM64CMN, types.TypeFlags) - v0.AddArg(a) v1 := b.NewValue0(v_0.Pos, OpARM64MUL, x.Type) - v1.AddArg(x) - v1.AddArg(y) - v0.AddArg(v1) - b.AddControl(v0) + v1.AddArg2(x, y) + v0.AddArg2(a, v1) + b.resetWithControl(BlockARM64LT, v0) return true } // match: (LT (CMPconst [0] z:(MSUB a x y)) yes no) @@ -29454,14 +27837,11 @@ func rewriteBlockARM64(b *Block) bool { if !(z.Uses == 1) { break } - b.Reset(BlockARM64LT) v0 := b.NewValue0(v_0.Pos, OpARM64CMP, types.TypeFlags) - v0.AddArg(a) v1 := b.NewValue0(v_0.Pos, OpARM64MUL, x.Type) - v1.AddArg(x) - v1.AddArg(y) - v0.AddArg(v1) - b.AddControl(v0) + v1.AddArg2(x, y) + v0.AddArg2(a, v1) + b.resetWithControl(BlockARM64LT, v0) return true } // match: (LT (CMPWconst [0] z:(MADDW a x y)) yes no) @@ -29482,14 +27862,11 @@ func rewriteBlockARM64(b *Block) bool { if !(z.Uses == 1) { break } - b.Reset(BlockARM64LT) v0 := b.NewValue0(v_0.Pos, OpARM64CMNW, types.TypeFlags) - v0.AddArg(a) v1 := b.NewValue0(v_0.Pos, OpARM64MULW, x.Type) - v1.AddArg(x) - v1.AddArg(y) - v0.AddArg(v1) - b.AddControl(v0) + v1.AddArg2(x, y) + v0.AddArg2(a, v1) + b.resetWithControl(BlockARM64LT, v0) return true } // match: (LT (CMPWconst [0] z:(MSUBW a x y)) yes no) @@ -29510,14 +27887,11 @@ func rewriteBlockARM64(b *Block) bool { if !(z.Uses == 1) { break } - b.Reset(BlockARM64LT) v0 := b.NewValue0(v_0.Pos, OpARM64CMPW, types.TypeFlags) - v0.AddArg(a) v1 := b.NewValue0(v_0.Pos, OpARM64MULW, x.Type) - v1.AddArg(x) - v1.AddArg(y) - v0.AddArg(v1) - b.AddControl(v0) + v1.AddArg2(x, y) + v0.AddArg2(a, v1) + b.resetWithControl(BlockARM64LT, v0) return true } // match: (LT (CMPWconst [0] x) yes no) @@ -29528,8 +27902,7 @@ func rewriteBlockARM64(b *Block) bool { break } x := v_0.Args[0] - b.Reset(BlockARM64TBNZ) - b.AddControl(x) + b.resetWithControl(BlockARM64TBNZ, x) b.Aux = int64(31) return true } @@ -29541,8 +27914,7 @@ func rewriteBlockARM64(b *Block) bool { break } x := v_0.Args[0] - b.Reset(BlockARM64TBNZ) - b.AddControl(x) + b.resetWithControl(BlockARM64TBNZ, x) b.Aux = int64(63) return true } @@ -29584,8 +27956,7 @@ func rewriteBlockARM64(b *Block) bool { for b.Controls[0].Op == OpARM64InvertFlags { v_0 := b.Controls[0] cmp := v_0.Args[0] - b.Reset(BlockARM64GT) - b.AddControl(cmp) + b.resetWithControl(BlockARM64GT, cmp) return true } case BlockARM64NE: @@ -29606,11 +27977,10 @@ func rewriteBlockARM64(b *Block) bool { if !(x.Uses == 1) { break } - b.Reset(BlockARM64NE) v0 := b.NewValue0(v_0.Pos, OpARM64TSTWconst, types.TypeFlags) v0.AuxInt = c v0.AddArg(y) - b.AddControl(v0) + b.resetWithControl(BlockARM64NE, v0) return true } // match: (NE (CMPconst [0] z:(AND x y)) yes no) @@ -29634,11 +28004,9 @@ func rewriteBlockARM64(b *Block) bool { if !(z.Uses == 1) { continue } - b.Reset(BlockARM64NE) v0 := b.NewValue0(v_0.Pos, OpARM64TST, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - b.AddControl(v0) + v0.AddArg2(x, y) + b.resetWithControl(BlockARM64NE, v0) return true } break @@ -29664,11 +28032,9 @@ func rewriteBlockARM64(b *Block) bool { if !(z.Uses == 1) { continue } - b.Reset(BlockARM64NE) v0 := b.NewValue0(v_0.Pos, OpARM64TSTW, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - b.AddControl(v0) + v0.AddArg2(x, y) + b.resetWithControl(BlockARM64NE, v0) return true } break @@ -29690,11 +28056,10 @@ func rewriteBlockARM64(b *Block) bool { if !(x.Uses == 1) { break } - b.Reset(BlockARM64NE) v0 := b.NewValue0(v_0.Pos, OpARM64TSTconst, types.TypeFlags) v0.AuxInt = c v0.AddArg(y) - b.AddControl(v0) + b.resetWithControl(BlockARM64NE, v0) return true } // match: (NE (CMPconst [0] x:(ADDconst [c] y)) yes no) @@ -29714,11 +28079,10 @@ func rewriteBlockARM64(b *Block) bool { if !(x.Uses == 1) { break } - b.Reset(BlockARM64NE) v0 := b.NewValue0(v_0.Pos, OpARM64CMNconst, types.TypeFlags) v0.AuxInt = c v0.AddArg(y) - b.AddControl(v0) + b.resetWithControl(BlockARM64NE, v0) return true } // match: (NE (CMPWconst [0] x:(ADDconst [c] y)) yes no) @@ -29738,11 +28102,10 @@ func rewriteBlockARM64(b *Block) bool { if !(x.Uses == 1) { break } - b.Reset(BlockARM64NE) v0 := b.NewValue0(v_0.Pos, OpARM64CMNWconst, types.TypeFlags) v0.AuxInt = c v0.AddArg(y) - b.AddControl(v0) + b.resetWithControl(BlockARM64NE, v0) return true } // match: (NE (CMPconst [0] z:(ADD x y)) yes no) @@ -29766,11 +28129,9 @@ func rewriteBlockARM64(b *Block) bool { if !(z.Uses == 1) { continue } - b.Reset(BlockARM64NE) v0 := b.NewValue0(v_0.Pos, OpARM64CMN, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - b.AddControl(v0) + v0.AddArg2(x, y) + b.resetWithControl(BlockARM64NE, v0) return true } break @@ -29796,11 +28157,9 @@ func rewriteBlockARM64(b *Block) bool { if !(z.Uses == 1) { continue } - b.Reset(BlockARM64NE) v0 := b.NewValue0(v_0.Pos, OpARM64CMNW, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - b.AddControl(v0) + v0.AddArg2(x, y) + b.resetWithControl(BlockARM64NE, v0) return true } break @@ -29820,11 +28179,9 @@ func rewriteBlockARM64(b *Block) bool { if !(z.Uses == 1) { break } - b.Reset(BlockARM64NE) v0 := b.NewValue0(v_0.Pos, OpARM64CMN, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - b.AddControl(v0) + v0.AddArg2(x, y) + b.resetWithControl(BlockARM64NE, v0) return true } // match: (NE (CMPW x z:(NEG y)) yes no) @@ -29842,11 +28199,9 @@ func rewriteBlockARM64(b *Block) bool { if !(z.Uses == 1) { break } - b.Reset(BlockARM64NE) v0 := b.NewValue0(v_0.Pos, OpARM64CMNW, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - b.AddControl(v0) + v0.AddArg2(x, y) + b.resetWithControl(BlockARM64NE, v0) return true } // match: (NE (CMPconst [0] x) yes no) @@ -29857,8 +28212,7 @@ func rewriteBlockARM64(b *Block) bool { break } x := v_0.Args[0] - b.Reset(BlockARM64NZ) - b.AddControl(x) + b.resetWithControl(BlockARM64NZ, x) return true } // match: (NE (CMPWconst [0] x) yes no) @@ -29869,8 +28223,7 @@ func rewriteBlockARM64(b *Block) bool { break } x := v_0.Args[0] - b.Reset(BlockARM64NZW) - b.AddControl(x) + b.resetWithControl(BlockARM64NZW, x) return true } // match: (NE (CMPconst [0] z:(MADD a x y)) yes no) @@ -29891,14 +28244,11 @@ func rewriteBlockARM64(b *Block) bool { if !(z.Uses == 1) { break } - b.Reset(BlockARM64NE) v0 := b.NewValue0(v_0.Pos, OpARM64CMN, types.TypeFlags) - v0.AddArg(a) v1 := b.NewValue0(v_0.Pos, OpARM64MUL, x.Type) - v1.AddArg(x) - v1.AddArg(y) - v0.AddArg(v1) - b.AddControl(v0) + v1.AddArg2(x, y) + v0.AddArg2(a, v1) + b.resetWithControl(BlockARM64NE, v0) return true } // match: (NE (CMPconst [0] z:(MSUB a x y)) yes no) @@ -29919,14 +28269,11 @@ func rewriteBlockARM64(b *Block) bool { if !(z.Uses == 1) { break } - b.Reset(BlockARM64NE) v0 := b.NewValue0(v_0.Pos, OpARM64CMP, types.TypeFlags) - v0.AddArg(a) v1 := b.NewValue0(v_0.Pos, OpARM64MUL, x.Type) - v1.AddArg(x) - v1.AddArg(y) - v0.AddArg(v1) - b.AddControl(v0) + v1.AddArg2(x, y) + v0.AddArg2(a, v1) + b.resetWithControl(BlockARM64NE, v0) return true } // match: (NE (CMPWconst [0] z:(MADDW a x y)) yes no) @@ -29947,14 +28294,11 @@ func rewriteBlockARM64(b *Block) bool { if !(z.Uses == 1) { break } - b.Reset(BlockARM64NE) v0 := b.NewValue0(v_0.Pos, OpARM64CMNW, types.TypeFlags) - v0.AddArg(a) v1 := b.NewValue0(v_0.Pos, OpARM64MULW, x.Type) - v1.AddArg(x) - v1.AddArg(y) - v0.AddArg(v1) - b.AddControl(v0) + v1.AddArg2(x, y) + v0.AddArg2(a, v1) + b.resetWithControl(BlockARM64NE, v0) return true } // match: (NE (CMPWconst [0] z:(MSUBW a x y)) yes no) @@ -29975,14 +28319,11 @@ func rewriteBlockARM64(b *Block) bool { if !(z.Uses == 1) { break } - b.Reset(BlockARM64NE) v0 := b.NewValue0(v_0.Pos, OpARM64CMPW, types.TypeFlags) - v0.AddArg(a) v1 := b.NewValue0(v_0.Pos, OpARM64MULW, x.Type) - v1.AddArg(x) - v1.AddArg(y) - v0.AddArg(v1) - b.AddControl(v0) + v1.AddArg2(x, y) + v0.AddArg2(a, v1) + b.resetWithControl(BlockARM64NE, v0) return true } // match: (NE (TSTconst [c] x) yes no) @@ -29995,8 +28336,7 @@ func rewriteBlockARM64(b *Block) bool { if !(oneBit(c)) { break } - b.Reset(BlockARM64TBNZ) - b.AddControl(x) + b.resetWithControl(BlockARM64TBNZ, x) b.Aux = ntz(c) return true } @@ -30010,8 +28350,7 @@ func rewriteBlockARM64(b *Block) bool { if !(oneBit(int64(uint32(c)))) { break } - b.Reset(BlockARM64TBNZ) - b.AddControl(x) + b.resetWithControl(BlockARM64TBNZ, x) b.Aux = ntz(int64(uint32(c))) return true } @@ -30051,8 +28390,7 @@ func rewriteBlockARM64(b *Block) bool { for b.Controls[0].Op == OpARM64InvertFlags { v_0 := b.Controls[0] cmp := v_0.Args[0] - b.Reset(BlockARM64NE) - b.AddControl(cmp) + b.resetWithControl(BlockARM64NE, cmp) return true } case BlockARM64NZ: @@ -30061,8 +28399,7 @@ func rewriteBlockARM64(b *Block) bool { for b.Controls[0].Op == OpARM64Equal { v_0 := b.Controls[0] cc := v_0.Args[0] - b.Reset(BlockARM64EQ) - b.AddControl(cc) + b.resetWithControl(BlockARM64EQ, cc) return true } // match: (NZ (NotEqual cc) yes no) @@ -30070,8 +28407,7 @@ func rewriteBlockARM64(b *Block) bool { for b.Controls[0].Op == OpARM64NotEqual { v_0 := b.Controls[0] cc := v_0.Args[0] - b.Reset(BlockARM64NE) - b.AddControl(cc) + b.resetWithControl(BlockARM64NE, cc) return true } // match: (NZ (LessThan cc) yes no) @@ -30079,8 +28415,7 @@ func rewriteBlockARM64(b *Block) bool { for b.Controls[0].Op == OpARM64LessThan { v_0 := b.Controls[0] cc := v_0.Args[0] - b.Reset(BlockARM64LT) - b.AddControl(cc) + b.resetWithControl(BlockARM64LT, cc) return true } // match: (NZ (LessThanU cc) yes no) @@ -30088,8 +28423,7 @@ func rewriteBlockARM64(b *Block) bool { for b.Controls[0].Op == OpARM64LessThanU { v_0 := b.Controls[0] cc := v_0.Args[0] - b.Reset(BlockARM64ULT) - b.AddControl(cc) + b.resetWithControl(BlockARM64ULT, cc) return true } // match: (NZ (LessEqual cc) yes no) @@ -30097,8 +28431,7 @@ func rewriteBlockARM64(b *Block) bool { for b.Controls[0].Op == OpARM64LessEqual { v_0 := b.Controls[0] cc := v_0.Args[0] - b.Reset(BlockARM64LE) - b.AddControl(cc) + b.resetWithControl(BlockARM64LE, cc) return true } // match: (NZ (LessEqualU cc) yes no) @@ -30106,8 +28439,7 @@ func rewriteBlockARM64(b *Block) bool { for b.Controls[0].Op == OpARM64LessEqualU { v_0 := b.Controls[0] cc := v_0.Args[0] - b.Reset(BlockARM64ULE) - b.AddControl(cc) + b.resetWithControl(BlockARM64ULE, cc) return true } // match: (NZ (GreaterThan cc) yes no) @@ -30115,8 +28447,7 @@ func rewriteBlockARM64(b *Block) bool { for b.Controls[0].Op == OpARM64GreaterThan { v_0 := b.Controls[0] cc := v_0.Args[0] - b.Reset(BlockARM64GT) - b.AddControl(cc) + b.resetWithControl(BlockARM64GT, cc) return true } // match: (NZ (GreaterThanU cc) yes no) @@ -30124,8 +28455,7 @@ func rewriteBlockARM64(b *Block) bool { for b.Controls[0].Op == OpARM64GreaterThanU { v_0 := b.Controls[0] cc := v_0.Args[0] - b.Reset(BlockARM64UGT) - b.AddControl(cc) + b.resetWithControl(BlockARM64UGT, cc) return true } // match: (NZ (GreaterEqual cc) yes no) @@ -30133,8 +28463,7 @@ func rewriteBlockARM64(b *Block) bool { for b.Controls[0].Op == OpARM64GreaterEqual { v_0 := b.Controls[0] cc := v_0.Args[0] - b.Reset(BlockARM64GE) - b.AddControl(cc) + b.resetWithControl(BlockARM64GE, cc) return true } // match: (NZ (GreaterEqualU cc) yes no) @@ -30142,8 +28471,7 @@ func rewriteBlockARM64(b *Block) bool { for b.Controls[0].Op == OpARM64GreaterEqualU { v_0 := b.Controls[0] cc := v_0.Args[0] - b.Reset(BlockARM64UGE) - b.AddControl(cc) + b.resetWithControl(BlockARM64UGE, cc) return true } // match: (NZ (LessThanF cc) yes no) @@ -30151,8 +28479,7 @@ func rewriteBlockARM64(b *Block) bool { for b.Controls[0].Op == OpARM64LessThanF { v_0 := b.Controls[0] cc := v_0.Args[0] - b.Reset(BlockARM64FLT) - b.AddControl(cc) + b.resetWithControl(BlockARM64FLT, cc) return true } // match: (NZ (LessEqualF cc) yes no) @@ -30160,8 +28487,7 @@ func rewriteBlockARM64(b *Block) bool { for b.Controls[0].Op == OpARM64LessEqualF { v_0 := b.Controls[0] cc := v_0.Args[0] - b.Reset(BlockARM64FLE) - b.AddControl(cc) + b.resetWithControl(BlockARM64FLE, cc) return true } // match: (NZ (GreaterThanF cc) yes no) @@ -30169,8 +28495,7 @@ func rewriteBlockARM64(b *Block) bool { for b.Controls[0].Op == OpARM64GreaterThanF { v_0 := b.Controls[0] cc := v_0.Args[0] - b.Reset(BlockARM64FGT) - b.AddControl(cc) + b.resetWithControl(BlockARM64FGT, cc) return true } // match: (NZ (GreaterEqualF cc) yes no) @@ -30178,8 +28503,7 @@ func rewriteBlockARM64(b *Block) bool { for b.Controls[0].Op == OpARM64GreaterEqualF { v_0 := b.Controls[0] cc := v_0.Args[0] - b.Reset(BlockARM64FGE) - b.AddControl(cc) + b.resetWithControl(BlockARM64FGE, cc) return true } // match: (NZ (ANDconst [c] x) yes no) @@ -30192,8 +28516,7 @@ func rewriteBlockARM64(b *Block) bool { if !(oneBit(c)) { break } - b.Reset(BlockARM64TBNZ) - b.AddControl(x) + b.resetWithControl(BlockARM64TBNZ, x) b.Aux = ntz(c) return true } @@ -30231,8 +28554,7 @@ func rewriteBlockARM64(b *Block) bool { if !(oneBit(int64(uint32(c)))) { break } - b.Reset(BlockARM64TBNZ) - b.AddControl(x) + b.resetWithControl(BlockARM64TBNZ, x) b.Aux = ntz(int64(uint32(c))) return true } @@ -30299,8 +28621,7 @@ func rewriteBlockARM64(b *Block) bool { for b.Controls[0].Op == OpARM64InvertFlags { v_0 := b.Controls[0] cmp := v_0.Args[0] - b.Reset(BlockARM64ULE) - b.AddControl(cmp) + b.resetWithControl(BlockARM64ULE, cmp) return true } case BlockARM64UGT: @@ -30342,8 +28663,7 @@ func rewriteBlockARM64(b *Block) bool { for b.Controls[0].Op == OpARM64InvertFlags { v_0 := b.Controls[0] cmp := v_0.Args[0] - b.Reset(BlockARM64ULT) - b.AddControl(cmp) + b.resetWithControl(BlockARM64ULT, cmp) return true } case BlockARM64ULE: @@ -30384,8 +28704,7 @@ func rewriteBlockARM64(b *Block) bool { for b.Controls[0].Op == OpARM64InvertFlags { v_0 := b.Controls[0] cmp := v_0.Args[0] - b.Reset(BlockARM64UGE) - b.AddControl(cmp) + b.resetWithControl(BlockARM64UGE, cmp) return true } case BlockARM64ULT: @@ -30427,8 +28746,7 @@ func rewriteBlockARM64(b *Block) bool { for b.Controls[0].Op == OpARM64InvertFlags { v_0 := b.Controls[0] cmp := v_0.Args[0] - b.Reset(BlockARM64UGT) - b.AddControl(cmp) + b.resetWithControl(BlockARM64UGT, cmp) return true } case BlockARM64Z: @@ -30442,8 +28760,7 @@ func rewriteBlockARM64(b *Block) bool { if !(oneBit(c)) { break } - b.Reset(BlockARM64TBZ) - b.AddControl(x) + b.resetWithControl(BlockARM64TBZ, x) b.Aux = ntz(c) return true } @@ -30481,8 +28798,7 @@ func rewriteBlockARM64(b *Block) bool { if !(oneBit(int64(uint32(c)))) { break } - b.Reset(BlockARM64TBZ) - b.AddControl(x) + b.resetWithControl(BlockARM64TBZ, x) b.Aux = ntz(int64(uint32(c))) return true } diff --git a/src/cmd/compile/internal/ssa/rewriteMIPS.go b/src/cmd/compile/internal/ssa/rewriteMIPS.go index f4d774c96f..5815874db9 100644 --- a/src/cmd/compile/internal/ssa/rewriteMIPS.go +++ b/src/cmd/compile/internal/ssa/rewriteMIPS.go @@ -129,6 +129,9 @@ func rewriteValueMIPS(v *Value) bool { case OpCvt64Fto32F: v.Op = OpMIPSMOVDF return true + case OpCvtBoolToUint8: + v.Op = OpCopy + return true case OpDiv16: return rewriteValueMIPS_OpDiv16(v) case OpDiv16u: @@ -593,11 +596,9 @@ func rewriteValueMIPS_OpAdd32withcarry(v *Value) bool { y := v_1 c := v_2 v.reset(OpMIPSADD) - v.AddArg(c) v0 := b.NewValue0(v.Pos, OpMIPSADD, t) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) + v0.AddArg2(x, y) + v.AddArg2(c, v0) return true } } @@ -622,39 +623,33 @@ func rewriteValueMIPS_OpAtomicAnd8(v *Value) bool { v0 := b.NewValue0(v.Pos, OpMIPSAND, typ.UInt32Ptr) v1 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32) v1.AuxInt = ^3 - v0.AddArg(v1) - v0.AddArg(ptr) - v.AddArg(v0) + v0.AddArg2(v1, ptr) v2 := b.NewValue0(v.Pos, OpMIPSOR, typ.UInt32) v3 := b.NewValue0(v.Pos, OpMIPSSLL, typ.UInt32) v4 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) v4.AddArg(val) - v3.AddArg(v4) v5 := b.NewValue0(v.Pos, OpMIPSSLLconst, typ.UInt32) v5.AuxInt = 3 v6 := b.NewValue0(v.Pos, OpMIPSANDconst, typ.UInt32) v6.AuxInt = 3 v6.AddArg(ptr) v5.AddArg(v6) - v3.AddArg(v5) - v2.AddArg(v3) + v3.AddArg2(v4, v5) v7 := b.NewValue0(v.Pos, OpMIPSNORconst, typ.UInt32) v7.AuxInt = 0 v8 := b.NewValue0(v.Pos, OpMIPSSLL, typ.UInt32) v9 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32) v9.AuxInt = 0xff - v8.AddArg(v9) v10 := b.NewValue0(v.Pos, OpMIPSSLLconst, typ.UInt32) v10.AuxInt = 3 v11 := b.NewValue0(v.Pos, OpMIPSANDconst, typ.UInt32) v11.AuxInt = 3 v11.AddArg(ptr) v10.AddArg(v11) - v8.AddArg(v10) + v8.AddArg2(v9, v10) v7.AddArg(v8) - v2.AddArg(v7) - v.AddArg(v2) - v.AddArg(mem) + v2.AddArg2(v3, v7) + v.AddArg3(v0, v2, mem) return true } // match: (AtomicAnd8 ptr val mem) @@ -671,14 +666,11 @@ func rewriteValueMIPS_OpAtomicAnd8(v *Value) bool { v0 := b.NewValue0(v.Pos, OpMIPSAND, typ.UInt32Ptr) v1 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32) v1.AuxInt = ^3 - v0.AddArg(v1) - v0.AddArg(ptr) - v.AddArg(v0) + v0.AddArg2(v1, ptr) v2 := b.NewValue0(v.Pos, OpMIPSOR, typ.UInt32) v3 := b.NewValue0(v.Pos, OpMIPSSLL, typ.UInt32) v4 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) v4.AddArg(val) - v3.AddArg(v4) v5 := b.NewValue0(v.Pos, OpMIPSSLLconst, typ.UInt32) v5.AuxInt = 3 v6 := b.NewValue0(v.Pos, OpMIPSANDconst, typ.UInt32) @@ -688,14 +680,12 @@ func rewriteValueMIPS_OpAtomicAnd8(v *Value) bool { v7.AddArg(ptr) v6.AddArg(v7) v5.AddArg(v6) - v3.AddArg(v5) - v2.AddArg(v3) + v3.AddArg2(v4, v5) v8 := b.NewValue0(v.Pos, OpMIPSNORconst, typ.UInt32) v8.AuxInt = 0 v9 := b.NewValue0(v.Pos, OpMIPSSLL, typ.UInt32) v10 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32) v10.AuxInt = 0xff - v9.AddArg(v10) v11 := b.NewValue0(v.Pos, OpMIPSSLLconst, typ.UInt32) v11.AuxInt = 3 v12 := b.NewValue0(v.Pos, OpMIPSANDconst, typ.UInt32) @@ -705,11 +695,10 @@ func rewriteValueMIPS_OpAtomicAnd8(v *Value) bool { v13.AddArg(ptr) v12.AddArg(v13) v11.AddArg(v12) - v9.AddArg(v11) + v9.AddArg2(v10, v11) v8.AddArg(v9) - v2.AddArg(v8) - v.AddArg(v2) - v.AddArg(mem) + v2.AddArg2(v3, v8) + v.AddArg3(v0, v2, mem) return true } return false @@ -735,22 +724,18 @@ func rewriteValueMIPS_OpAtomicOr8(v *Value) bool { v0 := b.NewValue0(v.Pos, OpMIPSAND, typ.UInt32Ptr) v1 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32) v1.AuxInt = ^3 - v0.AddArg(v1) - v0.AddArg(ptr) - v.AddArg(v0) + v0.AddArg2(v1, ptr) v2 := b.NewValue0(v.Pos, OpMIPSSLL, typ.UInt32) v3 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) v3.AddArg(val) - v2.AddArg(v3) v4 := b.NewValue0(v.Pos, OpMIPSSLLconst, typ.UInt32) v4.AuxInt = 3 v5 := b.NewValue0(v.Pos, OpMIPSANDconst, typ.UInt32) v5.AuxInt = 3 v5.AddArg(ptr) v4.AddArg(v5) - v2.AddArg(v4) - v.AddArg(v2) - v.AddArg(mem) + v2.AddArg2(v3, v4) + v.AddArg3(v0, v2, mem) return true } // match: (AtomicOr8 ptr val mem) @@ -767,13 +752,10 @@ func rewriteValueMIPS_OpAtomicOr8(v *Value) bool { v0 := b.NewValue0(v.Pos, OpMIPSAND, typ.UInt32Ptr) v1 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32) v1.AuxInt = ^3 - v0.AddArg(v1) - v0.AddArg(ptr) - v.AddArg(v0) + v0.AddArg2(v1, ptr) v2 := b.NewValue0(v.Pos, OpMIPSSLL, typ.UInt32) v3 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) v3.AddArg(val) - v2.AddArg(v3) v4 := b.NewValue0(v.Pos, OpMIPSSLLconst, typ.UInt32) v4.AuxInt = 3 v5 := b.NewValue0(v.Pos, OpMIPSANDconst, typ.UInt32) @@ -783,9 +765,8 @@ func rewriteValueMIPS_OpAtomicOr8(v *Value) bool { v6.AddArg(ptr) v5.AddArg(v6) v4.AddArg(v5) - v2.AddArg(v4) - v.AddArg(v2) - v.AddArg(mem) + v2.AddArg2(v3, v4) + v.AddArg3(v0, v2, mem) return true } return false @@ -804,11 +785,9 @@ func rewriteValueMIPS_OpAvg32u(v *Value) bool { v0 := b.NewValue0(v.Pos, OpMIPSSRLconst, t) v0.AuxInt = 1 v1 := b.NewValue0(v.Pos, OpMIPSSUB, t) - v1.AddArg(x) - v1.AddArg(y) + v1.AddArg2(x, y) v0.AddArg(v1) - v.AddArg(v0) - v.AddArg(y) + v.AddArg2(v0, y) return true } } @@ -824,10 +803,9 @@ func rewriteValueMIPS_OpBitLen32(v *Value) bool { v.reset(OpMIPSSUB) v0 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32) v0.AuxInt = 32 - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpMIPSCLZ, t) v1.AddArg(x) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } } @@ -888,18 +866,16 @@ func rewriteValueMIPS_OpCtz32(v *Value) bool { v.reset(OpMIPSSUB) v0 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32) v0.AuxInt = 32 - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpMIPSCLZ, t) v2 := b.NewValue0(v.Pos, OpMIPSSUBconst, t) v2.AuxInt = 1 v3 := b.NewValue0(v.Pos, OpMIPSAND, t) - v3.AddArg(x) v4 := b.NewValue0(v.Pos, OpMIPSNEG, t) v4.AddArg(x) - v3.AddArg(v4) + v3.AddArg2(x, v4) v2.AddArg(v3) v1.AddArg(v2) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } } @@ -917,10 +893,9 @@ func rewriteValueMIPS_OpDiv16(v *Value) bool { v0 := b.NewValue0(v.Pos, OpMIPSDIV, types.NewTuple(typ.Int32, typ.Int32)) v1 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32) v1.AddArg(x) - v0.AddArg(v1) v2 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32) v2.AddArg(y) - v0.AddArg(v2) + v0.AddArg2(v1, v2) v.AddArg(v0) return true } @@ -939,10 +914,9 @@ func rewriteValueMIPS_OpDiv16u(v *Value) bool { v0 := b.NewValue0(v.Pos, OpMIPSDIVU, types.NewTuple(typ.UInt32, typ.UInt32)) v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) v1.AddArg(x) - v0.AddArg(v1) v2 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) v2.AddArg(y) - v0.AddArg(v2) + v0.AddArg2(v1, v2) v.AddArg(v0) return true } @@ -959,8 +933,7 @@ func rewriteValueMIPS_OpDiv32(v *Value) bool { y := v_1 v.reset(OpSelect1) v0 := b.NewValue0(v.Pos, OpMIPSDIV, types.NewTuple(typ.Int32, typ.Int32)) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -977,8 +950,7 @@ func rewriteValueMIPS_OpDiv32u(v *Value) bool { y := v_1 v.reset(OpSelect1) v0 := b.NewValue0(v.Pos, OpMIPSDIVU, types.NewTuple(typ.UInt32, typ.UInt32)) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -997,10 +969,9 @@ func rewriteValueMIPS_OpDiv8(v *Value) bool { v0 := b.NewValue0(v.Pos, OpMIPSDIV, types.NewTuple(typ.Int32, typ.Int32)) v1 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32) v1.AddArg(x) - v0.AddArg(v1) v2 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32) v2.AddArg(y) - v0.AddArg(v2) + v0.AddArg2(v1, v2) v.AddArg(v0) return true } @@ -1019,10 +990,9 @@ func rewriteValueMIPS_OpDiv8u(v *Value) bool { v0 := b.NewValue0(v.Pos, OpMIPSDIVU, types.NewTuple(typ.UInt32, typ.UInt32)) v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) v1.AddArg(x) - v0.AddArg(v1) v2 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) v2.AddArg(y) - v0.AddArg(v2) + v0.AddArg2(v1, v2) v.AddArg(v0) return true } @@ -1042,10 +1012,9 @@ func rewriteValueMIPS_OpEq16(v *Value) bool { v0 := b.NewValue0(v.Pos, OpMIPSXOR, typ.UInt32) v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) v1.AddArg(x) - v0.AddArg(v1) v2 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) v2.AddArg(y) - v0.AddArg(v2) + v0.AddArg2(v1, v2) v.AddArg(v0) return true } @@ -1063,8 +1032,7 @@ func rewriteValueMIPS_OpEq32(v *Value) bool { v.reset(OpMIPSSGTUconst) v.AuxInt = 1 v0 := b.NewValue0(v.Pos, OpMIPSXOR, typ.UInt32) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -1080,8 +1048,7 @@ func rewriteValueMIPS_OpEq32F(v *Value) bool { y := v_1 v.reset(OpMIPSFPFlagTrue) v0 := b.NewValue0(v.Pos, OpMIPSCMPEQF, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -1097,8 +1064,7 @@ func rewriteValueMIPS_OpEq64F(v *Value) bool { y := v_1 v.reset(OpMIPSFPFlagTrue) v0 := b.NewValue0(v.Pos, OpMIPSCMPEQD, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -1118,10 +1084,9 @@ func rewriteValueMIPS_OpEq8(v *Value) bool { v0 := b.NewValue0(v.Pos, OpMIPSXOR, typ.UInt32) v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) v1.AddArg(x) - v0.AddArg(v1) v2 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) v2.AddArg(y) - v0.AddArg(v2) + v0.AddArg2(v1, v2) v.AddArg(v0) return true } @@ -1139,8 +1104,7 @@ func rewriteValueMIPS_OpEqB(v *Value) bool { v.reset(OpMIPSXORconst) v.AuxInt = 1 v0 := b.NewValue0(v.Pos, OpMIPSXOR, typ.Bool) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -1158,8 +1122,7 @@ func rewriteValueMIPS_OpEqPtr(v *Value) bool { v.reset(OpMIPSSGTUconst) v.AuxInt = 1 v0 := b.NewValue0(v.Pos, OpMIPSXOR, typ.UInt32) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -1175,8 +1138,7 @@ func rewriteValueMIPS_OpGeq32F(v *Value) bool { y := v_1 v.reset(OpMIPSFPFlagTrue) v0 := b.NewValue0(v.Pos, OpMIPSCMPGEF, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -1192,8 +1154,7 @@ func rewriteValueMIPS_OpGeq64F(v *Value) bool { y := v_1 v.reset(OpMIPSFPFlagTrue) v0 := b.NewValue0(v.Pos, OpMIPSCMPGED, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -1209,8 +1170,7 @@ func rewriteValueMIPS_OpGreater32F(v *Value) bool { y := v_1 v.reset(OpMIPSFPFlagTrue) v0 := b.NewValue0(v.Pos, OpMIPSCMPGTF, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -1226,8 +1186,7 @@ func rewriteValueMIPS_OpGreater64F(v *Value) bool { y := v_1 v.reset(OpMIPSFPFlagTrue) v0 := b.NewValue0(v.Pos, OpMIPSCMPGTD, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -1244,8 +1203,7 @@ func rewriteValueMIPS_OpHmul32(v *Value) bool { y := v_1 v.reset(OpSelect0) v0 := b.NewValue0(v.Pos, OpMIPSMULT, types.NewTuple(typ.Int32, typ.Int32)) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -1262,8 +1220,7 @@ func rewriteValueMIPS_OpHmul32u(v *Value) bool { y := v_1 v.reset(OpSelect0) v0 := b.NewValue0(v.Pos, OpMIPSMULTU, types.NewTuple(typ.UInt32, typ.UInt32)) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -1277,8 +1234,7 @@ func rewriteValueMIPS_OpIsInBounds(v *Value) bool { idx := v_0 len := v_1 v.reset(OpMIPSSGTU) - v.AddArg(len) - v.AddArg(idx) + v.AddArg2(len, idx) return true } } @@ -1291,10 +1247,9 @@ func rewriteValueMIPS_OpIsNonNil(v *Value) bool { for { ptr := v_0 v.reset(OpMIPSSGTU) - v.AddArg(ptr) v0 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32) v0.AuxInt = 0 - v.AddArg(v0) + v.AddArg2(ptr, v0) return true } } @@ -1311,8 +1266,7 @@ func rewriteValueMIPS_OpIsSliceInBounds(v *Value) bool { v.reset(OpMIPSXORconst) v.AuxInt = 1 v0 := b.NewValue0(v.Pos, OpMIPSSGTU, typ.Bool) - v0.AddArg(idx) - v0.AddArg(len) + v0.AddArg2(idx, len) v.AddArg(v0) return true } @@ -1332,10 +1286,9 @@ func rewriteValueMIPS_OpLeq16(v *Value) bool { v0 := b.NewValue0(v.Pos, OpMIPSSGT, typ.Bool) v1 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32) v1.AddArg(x) - v0.AddArg(v1) v2 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32) v2.AddArg(y) - v0.AddArg(v2) + v0.AddArg2(v1, v2) v.AddArg(v0) return true } @@ -1355,10 +1308,9 @@ func rewriteValueMIPS_OpLeq16U(v *Value) bool { v0 := b.NewValue0(v.Pos, OpMIPSSGTU, typ.Bool) v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) v1.AddArg(x) - v0.AddArg(v1) v2 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) v2.AddArg(y) - v0.AddArg(v2) + v0.AddArg2(v1, v2) v.AddArg(v0) return true } @@ -1376,8 +1328,7 @@ func rewriteValueMIPS_OpLeq32(v *Value) bool { v.reset(OpMIPSXORconst) v.AuxInt = 1 v0 := b.NewValue0(v.Pos, OpMIPSSGT, typ.Bool) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -1393,8 +1344,7 @@ func rewriteValueMIPS_OpLeq32F(v *Value) bool { y := v_1 v.reset(OpMIPSFPFlagTrue) v0 := b.NewValue0(v.Pos, OpMIPSCMPGEF, types.TypeFlags) - v0.AddArg(y) - v0.AddArg(x) + v0.AddArg2(y, x) v.AddArg(v0) return true } @@ -1412,8 +1362,7 @@ func rewriteValueMIPS_OpLeq32U(v *Value) bool { v.reset(OpMIPSXORconst) v.AuxInt = 1 v0 := b.NewValue0(v.Pos, OpMIPSSGTU, typ.Bool) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -1429,8 +1378,7 @@ func rewriteValueMIPS_OpLeq64F(v *Value) bool { y := v_1 v.reset(OpMIPSFPFlagTrue) v0 := b.NewValue0(v.Pos, OpMIPSCMPGED, types.TypeFlags) - v0.AddArg(y) - v0.AddArg(x) + v0.AddArg2(y, x) v.AddArg(v0) return true } @@ -1450,10 +1398,9 @@ func rewriteValueMIPS_OpLeq8(v *Value) bool { v0 := b.NewValue0(v.Pos, OpMIPSSGT, typ.Bool) v1 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32) v1.AddArg(x) - v0.AddArg(v1) v2 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32) v2.AddArg(y) - v0.AddArg(v2) + v0.AddArg2(v1, v2) v.AddArg(v0) return true } @@ -1473,10 +1420,9 @@ func rewriteValueMIPS_OpLeq8U(v *Value) bool { v0 := b.NewValue0(v.Pos, OpMIPSSGTU, typ.Bool) v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) v1.AddArg(x) - v0.AddArg(v1) v2 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) v2.AddArg(y) - v0.AddArg(v2) + v0.AddArg2(v1, v2) v.AddArg(v0) return true } @@ -1494,10 +1440,9 @@ func rewriteValueMIPS_OpLess16(v *Value) bool { v.reset(OpMIPSSGT) v0 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32) v0.AddArg(y) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32) v1.AddArg(x) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } } @@ -1514,10 +1459,9 @@ func rewriteValueMIPS_OpLess16U(v *Value) bool { v.reset(OpMIPSSGTU) v0 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) v0.AddArg(y) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) v1.AddArg(x) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } } @@ -1530,8 +1474,7 @@ func rewriteValueMIPS_OpLess32(v *Value) bool { x := v_0 y := v_1 v.reset(OpMIPSSGT) - v.AddArg(y) - v.AddArg(x) + v.AddArg2(y, x) return true } } @@ -1546,8 +1489,7 @@ func rewriteValueMIPS_OpLess32F(v *Value) bool { y := v_1 v.reset(OpMIPSFPFlagTrue) v0 := b.NewValue0(v.Pos, OpMIPSCMPGTF, types.TypeFlags) - v0.AddArg(y) - v0.AddArg(x) + v0.AddArg2(y, x) v.AddArg(v0) return true } @@ -1561,8 +1503,7 @@ func rewriteValueMIPS_OpLess32U(v *Value) bool { x := v_0 y := v_1 v.reset(OpMIPSSGTU) - v.AddArg(y) - v.AddArg(x) + v.AddArg2(y, x) return true } } @@ -1577,8 +1518,7 @@ func rewriteValueMIPS_OpLess64F(v *Value) bool { y := v_1 v.reset(OpMIPSFPFlagTrue) v0 := b.NewValue0(v.Pos, OpMIPSCMPGTD, types.TypeFlags) - v0.AddArg(y) - v0.AddArg(x) + v0.AddArg2(y, x) v.AddArg(v0) return true } @@ -1596,10 +1536,9 @@ func rewriteValueMIPS_OpLess8(v *Value) bool { v.reset(OpMIPSSGT) v0 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32) v0.AddArg(y) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32) v1.AddArg(x) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } } @@ -1616,10 +1555,9 @@ func rewriteValueMIPS_OpLess8U(v *Value) bool { v.reset(OpMIPSSGTU) v0 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) v0.AddArg(y) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) v1.AddArg(x) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } } @@ -1637,8 +1575,7 @@ func rewriteValueMIPS_OpLoad(v *Value) bool { break } v.reset(OpMIPSMOVBUload) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (Load ptr mem) @@ -1652,8 +1589,7 @@ func rewriteValueMIPS_OpLoad(v *Value) bool { break } v.reset(OpMIPSMOVBload) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (Load ptr mem) @@ -1667,8 +1603,7 @@ func rewriteValueMIPS_OpLoad(v *Value) bool { break } v.reset(OpMIPSMOVBUload) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (Load ptr mem) @@ -1682,8 +1617,7 @@ func rewriteValueMIPS_OpLoad(v *Value) bool { break } v.reset(OpMIPSMOVHload) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (Load ptr mem) @@ -1697,8 +1631,7 @@ func rewriteValueMIPS_OpLoad(v *Value) bool { break } v.reset(OpMIPSMOVHUload) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (Load ptr mem) @@ -1712,8 +1645,7 @@ func rewriteValueMIPS_OpLoad(v *Value) bool { break } v.reset(OpMIPSMOVWload) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (Load ptr mem) @@ -1727,8 +1659,7 @@ func rewriteValueMIPS_OpLoad(v *Value) bool { break } v.reset(OpMIPSMOVFload) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (Load ptr mem) @@ -1742,8 +1673,7 @@ func rewriteValueMIPS_OpLoad(v *Value) bool { break } v.reset(OpMIPSMOVDload) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } return false @@ -1774,20 +1704,17 @@ func rewriteValueMIPS_OpLsh16x16(v *Value) bool { y := v_1 v.reset(OpMIPSCMOVZ) v0 := b.NewValue0(v.Pos, OpMIPSSLL, t) - v0.AddArg(x) v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) v1.AddArg(y) - v0.AddArg(v1) - v.AddArg(v0) + v0.AddArg2(x, v1) v2 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32) v2.AuxInt = 0 - v.AddArg(v2) v3 := b.NewValue0(v.Pos, OpMIPSSGTUconst, typ.Bool) v3.AuxInt = 32 v4 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) v4.AddArg(y) v3.AddArg(v4) - v.AddArg(v3) + v.AddArg3(v0, v2, v3) return true } } @@ -1804,16 +1731,13 @@ func rewriteValueMIPS_OpLsh16x32(v *Value) bool { y := v_1 v.reset(OpMIPSCMOVZ) v0 := b.NewValue0(v.Pos, OpMIPSSLL, t) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) + v0.AddArg2(x, y) v1 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32) v1.AuxInt = 0 - v.AddArg(v1) v2 := b.NewValue0(v.Pos, OpMIPSSGTUconst, typ.Bool) v2.AuxInt = 32 v2.AddArg(y) - v.AddArg(v2) + v.AddArg3(v0, v1, v2) return true } } @@ -1867,20 +1791,17 @@ func rewriteValueMIPS_OpLsh16x8(v *Value) bool { y := v_1 v.reset(OpMIPSCMOVZ) v0 := b.NewValue0(v.Pos, OpMIPSSLL, t) - v0.AddArg(x) v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) v1.AddArg(y) - v0.AddArg(v1) - v.AddArg(v0) + v0.AddArg2(x, v1) v2 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32) v2.AuxInt = 0 - v.AddArg(v2) v3 := b.NewValue0(v.Pos, OpMIPSSGTUconst, typ.Bool) v3.AuxInt = 32 v4 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) v4.AddArg(y) v3.AddArg(v4) - v.AddArg(v3) + v.AddArg3(v0, v2, v3) return true } } @@ -1897,20 +1818,17 @@ func rewriteValueMIPS_OpLsh32x16(v *Value) bool { y := v_1 v.reset(OpMIPSCMOVZ) v0 := b.NewValue0(v.Pos, OpMIPSSLL, t) - v0.AddArg(x) v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) v1.AddArg(y) - v0.AddArg(v1) - v.AddArg(v0) + v0.AddArg2(x, v1) v2 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32) v2.AuxInt = 0 - v.AddArg(v2) v3 := b.NewValue0(v.Pos, OpMIPSSGTUconst, typ.Bool) v3.AuxInt = 32 v4 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) v4.AddArg(y) v3.AddArg(v4) - v.AddArg(v3) + v.AddArg3(v0, v2, v3) return true } } @@ -1927,16 +1845,13 @@ func rewriteValueMIPS_OpLsh32x32(v *Value) bool { y := v_1 v.reset(OpMIPSCMOVZ) v0 := b.NewValue0(v.Pos, OpMIPSSLL, t) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) + v0.AddArg2(x, y) v1 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32) v1.AuxInt = 0 - v.AddArg(v1) v2 := b.NewValue0(v.Pos, OpMIPSSGTUconst, typ.Bool) v2.AuxInt = 32 v2.AddArg(y) - v.AddArg(v2) + v.AddArg3(v0, v1, v2) return true } } @@ -1990,20 +1905,17 @@ func rewriteValueMIPS_OpLsh32x8(v *Value) bool { y := v_1 v.reset(OpMIPSCMOVZ) v0 := b.NewValue0(v.Pos, OpMIPSSLL, t) - v0.AddArg(x) v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) v1.AddArg(y) - v0.AddArg(v1) - v.AddArg(v0) + v0.AddArg2(x, v1) v2 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32) v2.AuxInt = 0 - v.AddArg(v2) v3 := b.NewValue0(v.Pos, OpMIPSSGTUconst, typ.Bool) v3.AuxInt = 32 v4 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) v4.AddArg(y) v3.AddArg(v4) - v.AddArg(v3) + v.AddArg3(v0, v2, v3) return true } } @@ -2020,20 +1932,17 @@ func rewriteValueMIPS_OpLsh8x16(v *Value) bool { y := v_1 v.reset(OpMIPSCMOVZ) v0 := b.NewValue0(v.Pos, OpMIPSSLL, t) - v0.AddArg(x) v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) v1.AddArg(y) - v0.AddArg(v1) - v.AddArg(v0) + v0.AddArg2(x, v1) v2 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32) v2.AuxInt = 0 - v.AddArg(v2) v3 := b.NewValue0(v.Pos, OpMIPSSGTUconst, typ.Bool) v3.AuxInt = 32 v4 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) v4.AddArg(y) v3.AddArg(v4) - v.AddArg(v3) + v.AddArg3(v0, v2, v3) return true } } @@ -2050,16 +1959,13 @@ func rewriteValueMIPS_OpLsh8x32(v *Value) bool { y := v_1 v.reset(OpMIPSCMOVZ) v0 := b.NewValue0(v.Pos, OpMIPSSLL, t) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) + v0.AddArg2(x, y) v1 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32) v1.AuxInt = 0 - v.AddArg(v1) v2 := b.NewValue0(v.Pos, OpMIPSSGTUconst, typ.Bool) v2.AuxInt = 32 v2.AddArg(y) - v.AddArg(v2) + v.AddArg3(v0, v1, v2) return true } } @@ -2113,20 +2019,17 @@ func rewriteValueMIPS_OpLsh8x8(v *Value) bool { y := v_1 v.reset(OpMIPSCMOVZ) v0 := b.NewValue0(v.Pos, OpMIPSSLL, t) - v0.AddArg(x) v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) v1.AddArg(y) - v0.AddArg(v1) - v.AddArg(v0) + v0.AddArg2(x, v1) v2 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32) v2.AuxInt = 0 - v.AddArg(v2) v3 := b.NewValue0(v.Pos, OpMIPSSGTUconst, typ.Bool) v3.AuxInt = 32 v4 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) v4.AddArg(y) v3.AddArg(v4) - v.AddArg(v3) + v.AddArg3(v0, v2, v3) return true } } @@ -2159,8 +2062,7 @@ func rewriteValueMIPS_OpMIPSADD(v *Value) bool { } y := v_1.Args[0] v.reset(OpMIPSSUB) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } break @@ -2192,9 +2094,7 @@ func rewriteValueMIPS_OpMIPSADDconst(v *Value) bool { break } x := v_0 - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (ADDconst [c] (MOVWconst [d])) @@ -2266,9 +2166,7 @@ func rewriteValueMIPS_OpMIPSAND(v *Value) bool { if x != v_1 { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (AND (SGTUconst [1] x) (SGTUconst [1] y)) @@ -2286,8 +2184,7 @@ func rewriteValueMIPS_OpMIPSAND(v *Value) bool { v.reset(OpMIPSSGTUconst) v.AuxInt = 1 v0 := b.NewValue0(v.Pos, OpMIPSOR, x.Type) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -2314,9 +2211,7 @@ func rewriteValueMIPS_OpMIPSANDconst(v *Value) bool { break } x := v_0 - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (ANDconst [c] (MOVWconst [d])) @@ -2358,9 +2253,7 @@ func rewriteValueMIPS_OpMIPSCMOVZ(v *Value) bool { if v_2.Op != OpMIPSMOVWconst || v_2.AuxInt != 0 { break } - v.reset(OpCopy) - v.Type = f.Type - v.AddArg(f) + v.copyOf(f) return true } // match: (CMOVZ a _ (MOVWconst [c])) @@ -2375,9 +2268,7 @@ func rewriteValueMIPS_OpMIPSCMOVZ(v *Value) bool { if !(c != 0) { break } - v.reset(OpCopy) - v.Type = a.Type - v.AddArg(a) + v.copyOf(a) return true } // match: (CMOVZ a (MOVWconst [0]) c) @@ -2389,8 +2280,7 @@ func rewriteValueMIPS_OpMIPSCMOVZ(v *Value) bool { } c := v_2 v.reset(OpMIPSCMOVZzero) - v.AddArg(a) - v.AddArg(c) + v.AddArg2(a, c) return true } return false @@ -2420,9 +2310,7 @@ func rewriteValueMIPS_OpMIPSCMOVZzero(v *Value) bool { if !(c != 0) { break } - v.reset(OpCopy) - v.Type = a.Type - v.AddArg(a) + v.copyOf(a) return true } return false @@ -2446,8 +2334,7 @@ func rewriteValueMIPS_OpMIPSLoweredAtomicAdd(v *Value) bool { } v.reset(OpMIPSLoweredAtomicAddconst) v.AuxInt = c - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } return false @@ -2465,8 +2352,7 @@ func rewriteValueMIPS_OpMIPSLoweredAtomicStore32(v *Value) bool { } mem := v_2 v.reset(OpMIPSLoweredAtomicStorezero) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } return false @@ -2493,8 +2379,7 @@ func rewriteValueMIPS_OpMIPSMOVBUload(v *Value) bool { v.reset(OpMIPSMOVBUload) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVBUload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem) @@ -2516,8 +2401,7 @@ func rewriteValueMIPS_OpMIPSMOVBUload(v *Value) bool { v.reset(OpMIPSMOVBUload) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVBUload [off] {sym} ptr (MOVBstore [off2] {sym2} ptr2 x _)) @@ -2532,9 +2416,8 @@ func rewriteValueMIPS_OpMIPSMOVBUload(v *Value) bool { } off2 := v_1.AuxInt sym2 := v_1.Aux - _ = v_1.Args[2] - ptr2 := v_1.Args[0] x := v_1.Args[1] + ptr2 := v_1.Args[0] if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) { break } @@ -2554,7 +2437,6 @@ func rewriteValueMIPS_OpMIPSMOVBUreg(v *Value) bool { if x.Op != OpMIPSMOVBUload { break } - _ = x.Args[1] v.reset(OpMIPSMOVWreg) v.AddArg(x) return true @@ -2588,12 +2470,10 @@ func rewriteValueMIPS_OpMIPSMOVBUreg(v *Value) bool { } b = x.Block v0 := b.NewValue0(x.Pos, OpMIPSMOVBUload, t) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v0.AuxInt = off v0.Aux = sym - v0.AddArg(ptr) - v0.AddArg(mem) + v0.AddArg2(ptr, mem) return true } // match: (MOVBUreg (ANDconst [c] x)) @@ -2644,8 +2524,7 @@ func rewriteValueMIPS_OpMIPSMOVBload(v *Value) bool { v.reset(OpMIPSMOVBload) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVBload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem) @@ -2667,8 +2546,7 @@ func rewriteValueMIPS_OpMIPSMOVBload(v *Value) bool { v.reset(OpMIPSMOVBload) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVBload [off] {sym} ptr (MOVBstore [off2] {sym2} ptr2 x _)) @@ -2683,9 +2561,8 @@ func rewriteValueMIPS_OpMIPSMOVBload(v *Value) bool { } off2 := v_1.AuxInt sym2 := v_1.Aux - _ = v_1.Args[2] - ptr2 := v_1.Args[0] x := v_1.Args[1] + ptr2 := v_1.Args[0] if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) { break } @@ -2705,7 +2582,6 @@ func rewriteValueMIPS_OpMIPSMOVBreg(v *Value) bool { if x.Op != OpMIPSMOVBload { break } - _ = x.Args[1] v.reset(OpMIPSMOVWreg) v.AddArg(x) return true @@ -2739,12 +2615,10 @@ func rewriteValueMIPS_OpMIPSMOVBreg(v *Value) bool { } b = x.Block v0 := b.NewValue0(x.Pos, OpMIPSMOVBload, t) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v0.AuxInt = off v0.Aux = sym - v0.AddArg(ptr) - v0.AddArg(mem) + v0.AddArg2(ptr, mem) return true } // match: (MOVBreg (ANDconst [c] x)) @@ -2801,9 +2675,7 @@ func rewriteValueMIPS_OpMIPSMOVBstore(v *Value) bool { v.reset(OpMIPSMOVBstore) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } // match: (MOVBstore [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) val mem) @@ -2826,9 +2698,7 @@ func rewriteValueMIPS_OpMIPSMOVBstore(v *Value) bool { v.reset(OpMIPSMOVBstore) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } // match: (MOVBstore [off] {sym} ptr (MOVWconst [0]) mem) @@ -2844,8 +2714,7 @@ func rewriteValueMIPS_OpMIPSMOVBstore(v *Value) bool { v.reset(OpMIPSMOVBstorezero) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVBstore [off] {sym} ptr (MOVBreg x) mem) @@ -2862,9 +2731,7 @@ func rewriteValueMIPS_OpMIPSMOVBstore(v *Value) bool { v.reset(OpMIPSMOVBstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(x) - v.AddArg(mem) + v.AddArg3(ptr, x, mem) return true } // match: (MOVBstore [off] {sym} ptr (MOVBUreg x) mem) @@ -2881,9 +2748,7 @@ func rewriteValueMIPS_OpMIPSMOVBstore(v *Value) bool { v.reset(OpMIPSMOVBstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(x) - v.AddArg(mem) + v.AddArg3(ptr, x, mem) return true } // match: (MOVBstore [off] {sym} ptr (MOVHreg x) mem) @@ -2900,9 +2765,7 @@ func rewriteValueMIPS_OpMIPSMOVBstore(v *Value) bool { v.reset(OpMIPSMOVBstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(x) - v.AddArg(mem) + v.AddArg3(ptr, x, mem) return true } // match: (MOVBstore [off] {sym} ptr (MOVHUreg x) mem) @@ -2919,9 +2782,7 @@ func rewriteValueMIPS_OpMIPSMOVBstore(v *Value) bool { v.reset(OpMIPSMOVBstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(x) - v.AddArg(mem) + v.AddArg3(ptr, x, mem) return true } // match: (MOVBstore [off] {sym} ptr (MOVWreg x) mem) @@ -2938,9 +2799,7 @@ func rewriteValueMIPS_OpMIPSMOVBstore(v *Value) bool { v.reset(OpMIPSMOVBstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(x) - v.AddArg(mem) + v.AddArg3(ptr, x, mem) return true } return false @@ -2967,8 +2826,7 @@ func rewriteValueMIPS_OpMIPSMOVBstorezero(v *Value) bool { v.reset(OpMIPSMOVBstorezero) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVBstorezero [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem) @@ -2990,8 +2848,7 @@ func rewriteValueMIPS_OpMIPSMOVBstorezero(v *Value) bool { v.reset(OpMIPSMOVBstorezero) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } return false @@ -3018,8 +2875,7 @@ func rewriteValueMIPS_OpMIPSMOVDload(v *Value) bool { v.reset(OpMIPSMOVDload) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVDload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem) @@ -3041,8 +2897,7 @@ func rewriteValueMIPS_OpMIPSMOVDload(v *Value) bool { v.reset(OpMIPSMOVDload) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVDload [off] {sym} ptr (MOVDstore [off2] {sym2} ptr2 x _)) @@ -3057,15 +2912,12 @@ func rewriteValueMIPS_OpMIPSMOVDload(v *Value) bool { } off2 := v_1.AuxInt sym2 := v_1.Aux - _ = v_1.Args[2] - ptr2 := v_1.Args[0] x := v_1.Args[1] + ptr2 := v_1.Args[0] if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } return false @@ -3094,9 +2946,7 @@ func rewriteValueMIPS_OpMIPSMOVDstore(v *Value) bool { v.reset(OpMIPSMOVDstore) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } // match: (MOVDstore [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) val mem) @@ -3119,9 +2969,7 @@ func rewriteValueMIPS_OpMIPSMOVDstore(v *Value) bool { v.reset(OpMIPSMOVDstore) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } return false @@ -3148,8 +2996,7 @@ func rewriteValueMIPS_OpMIPSMOVFload(v *Value) bool { v.reset(OpMIPSMOVFload) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVFload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem) @@ -3171,8 +3018,7 @@ func rewriteValueMIPS_OpMIPSMOVFload(v *Value) bool { v.reset(OpMIPSMOVFload) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVFload [off] {sym} ptr (MOVFstore [off2] {sym2} ptr2 x _)) @@ -3187,15 +3033,12 @@ func rewriteValueMIPS_OpMIPSMOVFload(v *Value) bool { } off2 := v_1.AuxInt sym2 := v_1.Aux - _ = v_1.Args[2] - ptr2 := v_1.Args[0] x := v_1.Args[1] + ptr2 := v_1.Args[0] if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } return false @@ -3224,9 +3067,7 @@ func rewriteValueMIPS_OpMIPSMOVFstore(v *Value) bool { v.reset(OpMIPSMOVFstore) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } // match: (MOVFstore [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) val mem) @@ -3249,9 +3090,7 @@ func rewriteValueMIPS_OpMIPSMOVFstore(v *Value) bool { v.reset(OpMIPSMOVFstore) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } return false @@ -3278,8 +3117,7 @@ func rewriteValueMIPS_OpMIPSMOVHUload(v *Value) bool { v.reset(OpMIPSMOVHUload) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVHUload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem) @@ -3301,8 +3139,7 @@ func rewriteValueMIPS_OpMIPSMOVHUload(v *Value) bool { v.reset(OpMIPSMOVHUload) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVHUload [off] {sym} ptr (MOVHstore [off2] {sym2} ptr2 x _)) @@ -3317,9 +3154,8 @@ func rewriteValueMIPS_OpMIPSMOVHUload(v *Value) bool { } off2 := v_1.AuxInt sym2 := v_1.Aux - _ = v_1.Args[2] - ptr2 := v_1.Args[0] x := v_1.Args[1] + ptr2 := v_1.Args[0] if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) { break } @@ -3339,7 +3175,6 @@ func rewriteValueMIPS_OpMIPSMOVHUreg(v *Value) bool { if x.Op != OpMIPSMOVBUload { break } - _ = x.Args[1] v.reset(OpMIPSMOVWreg) v.AddArg(x) return true @@ -3351,7 +3186,6 @@ func rewriteValueMIPS_OpMIPSMOVHUreg(v *Value) bool { if x.Op != OpMIPSMOVHUload { break } - _ = x.Args[1] v.reset(OpMIPSMOVWreg) v.AddArg(x) return true @@ -3396,12 +3230,10 @@ func rewriteValueMIPS_OpMIPSMOVHUreg(v *Value) bool { } b = x.Block v0 := b.NewValue0(x.Pos, OpMIPSMOVHUload, t) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v0.AuxInt = off v0.Aux = sym - v0.AddArg(ptr) - v0.AddArg(mem) + v0.AddArg2(ptr, mem) return true } // match: (MOVHUreg (ANDconst [c] x)) @@ -3452,8 +3284,7 @@ func rewriteValueMIPS_OpMIPSMOVHload(v *Value) bool { v.reset(OpMIPSMOVHload) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVHload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem) @@ -3475,8 +3306,7 @@ func rewriteValueMIPS_OpMIPSMOVHload(v *Value) bool { v.reset(OpMIPSMOVHload) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVHload [off] {sym} ptr (MOVHstore [off2] {sym2} ptr2 x _)) @@ -3491,9 +3321,8 @@ func rewriteValueMIPS_OpMIPSMOVHload(v *Value) bool { } off2 := v_1.AuxInt sym2 := v_1.Aux - _ = v_1.Args[2] - ptr2 := v_1.Args[0] x := v_1.Args[1] + ptr2 := v_1.Args[0] if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) { break } @@ -3513,7 +3342,6 @@ func rewriteValueMIPS_OpMIPSMOVHreg(v *Value) bool { if x.Op != OpMIPSMOVBload { break } - _ = x.Args[1] v.reset(OpMIPSMOVWreg) v.AddArg(x) return true @@ -3525,7 +3353,6 @@ func rewriteValueMIPS_OpMIPSMOVHreg(v *Value) bool { if x.Op != OpMIPSMOVBUload { break } - _ = x.Args[1] v.reset(OpMIPSMOVWreg) v.AddArg(x) return true @@ -3537,7 +3364,6 @@ func rewriteValueMIPS_OpMIPSMOVHreg(v *Value) bool { if x.Op != OpMIPSMOVHload { break } - _ = x.Args[1] v.reset(OpMIPSMOVWreg) v.AddArg(x) return true @@ -3593,12 +3419,10 @@ func rewriteValueMIPS_OpMIPSMOVHreg(v *Value) bool { } b = x.Block v0 := b.NewValue0(x.Pos, OpMIPSMOVHload, t) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v0.AuxInt = off v0.Aux = sym - v0.AddArg(ptr) - v0.AddArg(mem) + v0.AddArg2(ptr, mem) return true } // match: (MOVHreg (ANDconst [c] x)) @@ -3655,9 +3479,7 @@ func rewriteValueMIPS_OpMIPSMOVHstore(v *Value) bool { v.reset(OpMIPSMOVHstore) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } // match: (MOVHstore [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) val mem) @@ -3680,9 +3502,7 @@ func rewriteValueMIPS_OpMIPSMOVHstore(v *Value) bool { v.reset(OpMIPSMOVHstore) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } // match: (MOVHstore [off] {sym} ptr (MOVWconst [0]) mem) @@ -3698,8 +3518,7 @@ func rewriteValueMIPS_OpMIPSMOVHstore(v *Value) bool { v.reset(OpMIPSMOVHstorezero) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVHstore [off] {sym} ptr (MOVHreg x) mem) @@ -3716,9 +3535,7 @@ func rewriteValueMIPS_OpMIPSMOVHstore(v *Value) bool { v.reset(OpMIPSMOVHstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(x) - v.AddArg(mem) + v.AddArg3(ptr, x, mem) return true } // match: (MOVHstore [off] {sym} ptr (MOVHUreg x) mem) @@ -3735,9 +3552,7 @@ func rewriteValueMIPS_OpMIPSMOVHstore(v *Value) bool { v.reset(OpMIPSMOVHstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(x) - v.AddArg(mem) + v.AddArg3(ptr, x, mem) return true } // match: (MOVHstore [off] {sym} ptr (MOVWreg x) mem) @@ -3754,9 +3569,7 @@ func rewriteValueMIPS_OpMIPSMOVHstore(v *Value) bool { v.reset(OpMIPSMOVHstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(x) - v.AddArg(mem) + v.AddArg3(ptr, x, mem) return true } return false @@ -3783,8 +3596,7 @@ func rewriteValueMIPS_OpMIPSMOVHstorezero(v *Value) bool { v.reset(OpMIPSMOVHstorezero) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVHstorezero [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem) @@ -3806,8 +3618,7 @@ func rewriteValueMIPS_OpMIPSMOVHstorezero(v *Value) bool { v.reset(OpMIPSMOVHstorezero) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } return false @@ -3834,8 +3645,7 @@ func rewriteValueMIPS_OpMIPSMOVWload(v *Value) bool { v.reset(OpMIPSMOVWload) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVWload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem) @@ -3857,8 +3667,7 @@ func rewriteValueMIPS_OpMIPSMOVWload(v *Value) bool { v.reset(OpMIPSMOVWload) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVWload [off] {sym} ptr (MOVWstore [off2] {sym2} ptr2 x _)) @@ -3873,15 +3682,12 @@ func rewriteValueMIPS_OpMIPSMOVWload(v *Value) bool { } off2 := v_1.AuxInt sym2 := v_1.Aux - _ = v_1.Args[2] - ptr2 := v_1.Args[0] x := v_1.Args[1] + ptr2 := v_1.Args[0] if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } return false @@ -3937,9 +3743,7 @@ func rewriteValueMIPS_OpMIPSMOVWstore(v *Value) bool { v.reset(OpMIPSMOVWstore) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } // match: (MOVWstore [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) val mem) @@ -3962,9 +3766,7 @@ func rewriteValueMIPS_OpMIPSMOVWstore(v *Value) bool { v.reset(OpMIPSMOVWstore) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } // match: (MOVWstore [off] {sym} ptr (MOVWconst [0]) mem) @@ -3980,8 +3782,7 @@ func rewriteValueMIPS_OpMIPSMOVWstore(v *Value) bool { v.reset(OpMIPSMOVWstorezero) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVWstore [off] {sym} ptr (MOVWreg x) mem) @@ -3998,9 +3799,7 @@ func rewriteValueMIPS_OpMIPSMOVWstore(v *Value) bool { v.reset(OpMIPSMOVWstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(x) - v.AddArg(mem) + v.AddArg3(ptr, x, mem) return true } return false @@ -4027,8 +3826,7 @@ func rewriteValueMIPS_OpMIPSMOVWstorezero(v *Value) bool { v.reset(OpMIPSMOVWstorezero) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVWstorezero [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem) @@ -4050,8 +3848,7 @@ func rewriteValueMIPS_OpMIPSMOVWstorezero(v *Value) bool { v.reset(OpMIPSMOVWstorezero) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } return false @@ -4080,9 +3877,7 @@ func rewriteValueMIPS_OpMIPSMUL(v *Value) bool { continue } x := v_1 - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } break @@ -4220,9 +4015,7 @@ func rewriteValueMIPS_OpMIPSOR(v *Value) bool { if x != v_1 { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (OR (SGTUzero x) (SGTUzero y)) @@ -4239,8 +4032,7 @@ func rewriteValueMIPS_OpMIPSOR(v *Value) bool { y := v_1.Args[0] v.reset(OpMIPSSGTUzero) v0 := b.NewValue0(v.Pos, OpMIPSOR, x.Type) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -4257,9 +4049,7 @@ func rewriteValueMIPS_OpMIPSORconst(v *Value) bool { break } x := v_0 - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (ORconst [-1] _) @@ -4880,9 +4670,7 @@ func rewriteValueMIPS_OpMIPSSUBconst(v *Value) bool { break } x := v_0 - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (SUBconst [c] (MOVWconst [d])) @@ -4968,9 +4756,7 @@ func rewriteValueMIPS_OpMIPSXORconst(v *Value) bool { break } x := v_0 - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (XORconst [-1] x) @@ -5027,10 +4813,9 @@ func rewriteValueMIPS_OpMod16(v *Value) bool { v0 := b.NewValue0(v.Pos, OpMIPSDIV, types.NewTuple(typ.Int32, typ.Int32)) v1 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32) v1.AddArg(x) - v0.AddArg(v1) v2 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32) v2.AddArg(y) - v0.AddArg(v2) + v0.AddArg2(v1, v2) v.AddArg(v0) return true } @@ -5049,10 +4834,9 @@ func rewriteValueMIPS_OpMod16u(v *Value) bool { v0 := b.NewValue0(v.Pos, OpMIPSDIVU, types.NewTuple(typ.UInt32, typ.UInt32)) v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) v1.AddArg(x) - v0.AddArg(v1) v2 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) v2.AddArg(y) - v0.AddArg(v2) + v0.AddArg2(v1, v2) v.AddArg(v0) return true } @@ -5069,8 +4853,7 @@ func rewriteValueMIPS_OpMod32(v *Value) bool { y := v_1 v.reset(OpSelect0) v0 := b.NewValue0(v.Pos, OpMIPSDIV, types.NewTuple(typ.Int32, typ.Int32)) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -5087,8 +4870,7 @@ func rewriteValueMIPS_OpMod32u(v *Value) bool { y := v_1 v.reset(OpSelect0) v0 := b.NewValue0(v.Pos, OpMIPSDIVU, types.NewTuple(typ.UInt32, typ.UInt32)) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -5107,10 +4889,9 @@ func rewriteValueMIPS_OpMod8(v *Value) bool { v0 := b.NewValue0(v.Pos, OpMIPSDIV, types.NewTuple(typ.Int32, typ.Int32)) v1 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32) v1.AddArg(x) - v0.AddArg(v1) v2 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32) v2.AddArg(y) - v0.AddArg(v2) + v0.AddArg2(v1, v2) v.AddArg(v0) return true } @@ -5129,10 +4910,9 @@ func rewriteValueMIPS_OpMod8u(v *Value) bool { v0 := b.NewValue0(v.Pos, OpMIPSDIVU, types.NewTuple(typ.UInt32, typ.UInt32)) v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) v1.AddArg(x) - v0.AddArg(v1) v2 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) v2.AddArg(y) - v0.AddArg(v2) + v0.AddArg2(v1, v2) v.AddArg(v0) return true } @@ -5151,9 +4931,7 @@ func rewriteValueMIPS_OpMove(v *Value) bool { break } mem := v_2 - v.reset(OpCopy) - v.Type = mem.Type - v.AddArg(mem) + v.copyOf(mem) return true } // match: (Move [1] dst src mem) @@ -5166,12 +4944,9 @@ func rewriteValueMIPS_OpMove(v *Value) bool { src := v_1 mem := v_2 v.reset(OpMIPSMOVBstore) - v.AddArg(dst) v0 := b.NewValue0(v.Pos, OpMIPSMOVBUload, typ.UInt8) - v0.AddArg(src) - v0.AddArg(mem) - v.AddArg(v0) - v.AddArg(mem) + v0.AddArg2(src, mem) + v.AddArg3(dst, v0, mem) return true } // match: (Move [2] {t} dst src mem) @@ -5189,12 +4964,9 @@ func rewriteValueMIPS_OpMove(v *Value) bool { break } v.reset(OpMIPSMOVHstore) - v.AddArg(dst) v0 := b.NewValue0(v.Pos, OpMIPSMOVHUload, typ.UInt16) - v0.AddArg(src) - v0.AddArg(mem) - v.AddArg(v0) - v.AddArg(mem) + v0.AddArg2(src, mem) + v.AddArg3(dst, v0, mem) return true } // match: (Move [2] dst src mem) @@ -5208,20 +4980,14 @@ func rewriteValueMIPS_OpMove(v *Value) bool { mem := v_2 v.reset(OpMIPSMOVBstore) v.AuxInt = 1 - v.AddArg(dst) v0 := b.NewValue0(v.Pos, OpMIPSMOVBUload, typ.UInt8) v0.AuxInt = 1 - v0.AddArg(src) - v0.AddArg(mem) - v.AddArg(v0) + v0.AddArg2(src, mem) v1 := b.NewValue0(v.Pos, OpMIPSMOVBstore, types.TypeMem) - v1.AddArg(dst) v2 := b.NewValue0(v.Pos, OpMIPSMOVBUload, typ.UInt8) - v2.AddArg(src) - v2.AddArg(mem) - v1.AddArg(v2) - v1.AddArg(mem) - v.AddArg(v1) + v2.AddArg2(src, mem) + v1.AddArg3(dst, v2, mem) + v.AddArg3(dst, v0, v1) return true } // match: (Move [4] {t} dst src mem) @@ -5239,12 +5005,9 @@ func rewriteValueMIPS_OpMove(v *Value) bool { break } v.reset(OpMIPSMOVWstore) - v.AddArg(dst) v0 := b.NewValue0(v.Pos, OpMIPSMOVWload, typ.UInt32) - v0.AddArg(src) - v0.AddArg(mem) - v.AddArg(v0) - v.AddArg(mem) + v0.AddArg2(src, mem) + v.AddArg3(dst, v0, mem) return true } // match: (Move [4] {t} dst src mem) @@ -5263,20 +5026,14 @@ func rewriteValueMIPS_OpMove(v *Value) bool { } v.reset(OpMIPSMOVHstore) v.AuxInt = 2 - v.AddArg(dst) v0 := b.NewValue0(v.Pos, OpMIPSMOVHUload, typ.UInt16) v0.AuxInt = 2 - v0.AddArg(src) - v0.AddArg(mem) - v.AddArg(v0) + v0.AddArg2(src, mem) v1 := b.NewValue0(v.Pos, OpMIPSMOVHstore, types.TypeMem) - v1.AddArg(dst) v2 := b.NewValue0(v.Pos, OpMIPSMOVHUload, typ.UInt16) - v2.AddArg(src) - v2.AddArg(mem) - v1.AddArg(v2) - v1.AddArg(mem) - v.AddArg(v1) + v2.AddArg2(src, mem) + v1.AddArg3(dst, v2, mem) + v.AddArg3(dst, v0, v1) return true } // match: (Move [4] dst src mem) @@ -5290,38 +5047,26 @@ func rewriteValueMIPS_OpMove(v *Value) bool { mem := v_2 v.reset(OpMIPSMOVBstore) v.AuxInt = 3 - v.AddArg(dst) v0 := b.NewValue0(v.Pos, OpMIPSMOVBUload, typ.UInt8) v0.AuxInt = 3 - v0.AddArg(src) - v0.AddArg(mem) - v.AddArg(v0) + v0.AddArg2(src, mem) v1 := b.NewValue0(v.Pos, OpMIPSMOVBstore, types.TypeMem) v1.AuxInt = 2 - v1.AddArg(dst) v2 := b.NewValue0(v.Pos, OpMIPSMOVBUload, typ.UInt8) v2.AuxInt = 2 - v2.AddArg(src) - v2.AddArg(mem) - v1.AddArg(v2) + v2.AddArg2(src, mem) v3 := b.NewValue0(v.Pos, OpMIPSMOVBstore, types.TypeMem) v3.AuxInt = 1 - v3.AddArg(dst) v4 := b.NewValue0(v.Pos, OpMIPSMOVBUload, typ.UInt8) v4.AuxInt = 1 - v4.AddArg(src) - v4.AddArg(mem) - v3.AddArg(v4) + v4.AddArg2(src, mem) v5 := b.NewValue0(v.Pos, OpMIPSMOVBstore, types.TypeMem) - v5.AddArg(dst) v6 := b.NewValue0(v.Pos, OpMIPSMOVBUload, typ.UInt8) - v6.AddArg(src) - v6.AddArg(mem) - v5.AddArg(v6) - v5.AddArg(mem) - v3.AddArg(v5) - v1.AddArg(v3) - v.AddArg(v1) + v6.AddArg2(src, mem) + v5.AddArg3(dst, v6, mem) + v3.AddArg3(dst, v4, v5) + v1.AddArg3(dst, v2, v3) + v.AddArg3(dst, v0, v1) return true } // match: (Move [3] dst src mem) @@ -5335,29 +5080,20 @@ func rewriteValueMIPS_OpMove(v *Value) bool { mem := v_2 v.reset(OpMIPSMOVBstore) v.AuxInt = 2 - v.AddArg(dst) v0 := b.NewValue0(v.Pos, OpMIPSMOVBUload, typ.UInt8) v0.AuxInt = 2 - v0.AddArg(src) - v0.AddArg(mem) - v.AddArg(v0) + v0.AddArg2(src, mem) v1 := b.NewValue0(v.Pos, OpMIPSMOVBstore, types.TypeMem) v1.AuxInt = 1 - v1.AddArg(dst) v2 := b.NewValue0(v.Pos, OpMIPSMOVBUload, typ.UInt8) v2.AuxInt = 1 - v2.AddArg(src) - v2.AddArg(mem) - v1.AddArg(v2) + v2.AddArg2(src, mem) v3 := b.NewValue0(v.Pos, OpMIPSMOVBstore, types.TypeMem) - v3.AddArg(dst) v4 := b.NewValue0(v.Pos, OpMIPSMOVBUload, typ.UInt8) - v4.AddArg(src) - v4.AddArg(mem) - v3.AddArg(v4) - v3.AddArg(mem) - v1.AddArg(v3) - v.AddArg(v1) + v4.AddArg2(src, mem) + v3.AddArg3(dst, v4, mem) + v1.AddArg3(dst, v2, v3) + v.AddArg3(dst, v0, v1) return true } // match: (Move [8] {t} dst src mem) @@ -5376,20 +5112,14 @@ func rewriteValueMIPS_OpMove(v *Value) bool { } v.reset(OpMIPSMOVWstore) v.AuxInt = 4 - v.AddArg(dst) v0 := b.NewValue0(v.Pos, OpMIPSMOVWload, typ.UInt32) v0.AuxInt = 4 - v0.AddArg(src) - v0.AddArg(mem) - v.AddArg(v0) + v0.AddArg2(src, mem) v1 := b.NewValue0(v.Pos, OpMIPSMOVWstore, types.TypeMem) - v1.AddArg(dst) v2 := b.NewValue0(v.Pos, OpMIPSMOVWload, typ.UInt32) - v2.AddArg(src) - v2.AddArg(mem) - v1.AddArg(v2) - v1.AddArg(mem) - v.AddArg(v1) + v2.AddArg2(src, mem) + v1.AddArg3(dst, v2, mem) + v.AddArg3(dst, v0, v1) return true } // match: (Move [8] {t} dst src mem) @@ -5408,38 +5138,26 @@ func rewriteValueMIPS_OpMove(v *Value) bool { } v.reset(OpMIPSMOVHstore) v.AuxInt = 6 - v.AddArg(dst) v0 := b.NewValue0(v.Pos, OpMIPSMOVHload, typ.Int16) v0.AuxInt = 6 - v0.AddArg(src) - v0.AddArg(mem) - v.AddArg(v0) + v0.AddArg2(src, mem) v1 := b.NewValue0(v.Pos, OpMIPSMOVHstore, types.TypeMem) v1.AuxInt = 4 - v1.AddArg(dst) v2 := b.NewValue0(v.Pos, OpMIPSMOVHload, typ.Int16) v2.AuxInt = 4 - v2.AddArg(src) - v2.AddArg(mem) - v1.AddArg(v2) + v2.AddArg2(src, mem) v3 := b.NewValue0(v.Pos, OpMIPSMOVHstore, types.TypeMem) v3.AuxInt = 2 - v3.AddArg(dst) v4 := b.NewValue0(v.Pos, OpMIPSMOVHload, typ.Int16) v4.AuxInt = 2 - v4.AddArg(src) - v4.AddArg(mem) - v3.AddArg(v4) + v4.AddArg2(src, mem) v5 := b.NewValue0(v.Pos, OpMIPSMOVHstore, types.TypeMem) - v5.AddArg(dst) v6 := b.NewValue0(v.Pos, OpMIPSMOVHload, typ.Int16) - v6.AddArg(src) - v6.AddArg(mem) - v5.AddArg(v6) - v5.AddArg(mem) - v3.AddArg(v5) - v1.AddArg(v3) - v.AddArg(v1) + v6.AddArg2(src, mem) + v5.AddArg3(dst, v6, mem) + v3.AddArg3(dst, v4, v5) + v1.AddArg3(dst, v2, v3) + v.AddArg3(dst, v0, v1) return true } // match: (Move [6] {t} dst src mem) @@ -5458,29 +5176,20 @@ func rewriteValueMIPS_OpMove(v *Value) bool { } v.reset(OpMIPSMOVHstore) v.AuxInt = 4 - v.AddArg(dst) v0 := b.NewValue0(v.Pos, OpMIPSMOVHload, typ.Int16) v0.AuxInt = 4 - v0.AddArg(src) - v0.AddArg(mem) - v.AddArg(v0) + v0.AddArg2(src, mem) v1 := b.NewValue0(v.Pos, OpMIPSMOVHstore, types.TypeMem) v1.AuxInt = 2 - v1.AddArg(dst) v2 := b.NewValue0(v.Pos, OpMIPSMOVHload, typ.Int16) v2.AuxInt = 2 - v2.AddArg(src) - v2.AddArg(mem) - v1.AddArg(v2) + v2.AddArg2(src, mem) v3 := b.NewValue0(v.Pos, OpMIPSMOVHstore, types.TypeMem) - v3.AddArg(dst) v4 := b.NewValue0(v.Pos, OpMIPSMOVHload, typ.Int16) - v4.AddArg(src) - v4.AddArg(mem) - v3.AddArg(v4) - v3.AddArg(mem) - v1.AddArg(v3) - v.AddArg(v1) + v4.AddArg2(src, mem) + v3.AddArg3(dst, v4, mem) + v1.AddArg3(dst, v2, v3) + v.AddArg3(dst, v0, v1) return true } // match: (Move [12] {t} dst src mem) @@ -5499,29 +5208,20 @@ func rewriteValueMIPS_OpMove(v *Value) bool { } v.reset(OpMIPSMOVWstore) v.AuxInt = 8 - v.AddArg(dst) v0 := b.NewValue0(v.Pos, OpMIPSMOVWload, typ.UInt32) v0.AuxInt = 8 - v0.AddArg(src) - v0.AddArg(mem) - v.AddArg(v0) + v0.AddArg2(src, mem) v1 := b.NewValue0(v.Pos, OpMIPSMOVWstore, types.TypeMem) v1.AuxInt = 4 - v1.AddArg(dst) v2 := b.NewValue0(v.Pos, OpMIPSMOVWload, typ.UInt32) v2.AuxInt = 4 - v2.AddArg(src) - v2.AddArg(mem) - v1.AddArg(v2) + v2.AddArg2(src, mem) v3 := b.NewValue0(v.Pos, OpMIPSMOVWstore, types.TypeMem) - v3.AddArg(dst) v4 := b.NewValue0(v.Pos, OpMIPSMOVWload, typ.UInt32) - v4.AddArg(src) - v4.AddArg(mem) - v3.AddArg(v4) - v3.AddArg(mem) - v1.AddArg(v3) - v.AddArg(v1) + v4.AddArg2(src, mem) + v3.AddArg3(dst, v4, mem) + v1.AddArg3(dst, v2, v3) + v.AddArg3(dst, v0, v1) return true } // match: (Move [16] {t} dst src mem) @@ -5540,38 +5240,26 @@ func rewriteValueMIPS_OpMove(v *Value) bool { } v.reset(OpMIPSMOVWstore) v.AuxInt = 12 - v.AddArg(dst) v0 := b.NewValue0(v.Pos, OpMIPSMOVWload, typ.UInt32) v0.AuxInt = 12 - v0.AddArg(src) - v0.AddArg(mem) - v.AddArg(v0) + v0.AddArg2(src, mem) v1 := b.NewValue0(v.Pos, OpMIPSMOVWstore, types.TypeMem) v1.AuxInt = 8 - v1.AddArg(dst) v2 := b.NewValue0(v.Pos, OpMIPSMOVWload, typ.UInt32) v2.AuxInt = 8 - v2.AddArg(src) - v2.AddArg(mem) - v1.AddArg(v2) + v2.AddArg2(src, mem) v3 := b.NewValue0(v.Pos, OpMIPSMOVWstore, types.TypeMem) v3.AuxInt = 4 - v3.AddArg(dst) v4 := b.NewValue0(v.Pos, OpMIPSMOVWload, typ.UInt32) v4.AuxInt = 4 - v4.AddArg(src) - v4.AddArg(mem) - v3.AddArg(v4) + v4.AddArg2(src, mem) v5 := b.NewValue0(v.Pos, OpMIPSMOVWstore, types.TypeMem) - v5.AddArg(dst) v6 := b.NewValue0(v.Pos, OpMIPSMOVWload, typ.UInt32) - v6.AddArg(src) - v6.AddArg(mem) - v5.AddArg(v6) - v5.AddArg(mem) - v3.AddArg(v5) - v1.AddArg(v3) - v.AddArg(v1) + v6.AddArg2(src, mem) + v5.AddArg3(dst, v6, mem) + v3.AddArg3(dst, v4, v5) + v1.AddArg3(dst, v2, v3) + v.AddArg3(dst, v0, v1) return true } // match: (Move [s] {t} dst src mem) @@ -5588,13 +5276,10 @@ func rewriteValueMIPS_OpMove(v *Value) bool { } v.reset(OpMIPSLoweredMove) v.AuxInt = t.(*types.Type).Alignment() - v.AddArg(dst) - v.AddArg(src) v0 := b.NewValue0(v.Pos, OpMIPSADDconst, src.Type) v0.AuxInt = s - moveSize(t.(*types.Type).Alignment(), config) v0.AddArg(src) - v.AddArg(v0) - v.AddArg(mem) + v.AddArg4(dst, src, v0, mem) return true } return false @@ -5613,14 +5298,12 @@ func rewriteValueMIPS_OpNeq16(v *Value) bool { v0 := b.NewValue0(v.Pos, OpMIPSXOR, typ.UInt32) v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) v1.AddArg(x) - v0.AddArg(v1) v2 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) v2.AddArg(y) - v0.AddArg(v2) - v.AddArg(v0) + v0.AddArg2(v1, v2) v3 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32) v3.AuxInt = 0 - v.AddArg(v3) + v.AddArg2(v0, v3) return true } } @@ -5636,12 +5319,10 @@ func rewriteValueMIPS_OpNeq32(v *Value) bool { y := v_1 v.reset(OpMIPSSGTU) v0 := b.NewValue0(v.Pos, OpMIPSXOR, typ.UInt32) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) + v0.AddArg2(x, y) v1 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32) v1.AuxInt = 0 - v.AddArg(v1) + v.AddArg2(v0, v1) return true } } @@ -5656,8 +5337,7 @@ func rewriteValueMIPS_OpNeq32F(v *Value) bool { y := v_1 v.reset(OpMIPSFPFlagFalse) v0 := b.NewValue0(v.Pos, OpMIPSCMPEQF, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -5673,8 +5353,7 @@ func rewriteValueMIPS_OpNeq64F(v *Value) bool { y := v_1 v.reset(OpMIPSFPFlagFalse) v0 := b.NewValue0(v.Pos, OpMIPSCMPEQD, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -5693,14 +5372,12 @@ func rewriteValueMIPS_OpNeq8(v *Value) bool { v0 := b.NewValue0(v.Pos, OpMIPSXOR, typ.UInt32) v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) v1.AddArg(x) - v0.AddArg(v1) v2 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) v2.AddArg(y) - v0.AddArg(v2) - v.AddArg(v0) + v0.AddArg2(v1, v2) v3 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32) v3.AuxInt = 0 - v.AddArg(v3) + v.AddArg2(v0, v3) return true } } @@ -5716,12 +5393,10 @@ func rewriteValueMIPS_OpNeqPtr(v *Value) bool { y := v_1 v.reset(OpMIPSSGTU) v0 := b.NewValue0(v.Pos, OpMIPSXOR, typ.UInt32) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) + v0.AddArg2(x, y) v1 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32) v1.AuxInt = 0 - v.AddArg(v1) + v.AddArg2(v0, v1) return true } } @@ -5780,9 +5455,7 @@ func rewriteValueMIPS_OpPanicBounds(v *Value) bool { } v.reset(OpMIPSLoweredPanicBoundsA) v.AuxInt = kind - v.AddArg(x) - v.AddArg(y) - v.AddArg(mem) + v.AddArg3(x, y, mem) return true } // match: (PanicBounds [kind] x y mem) @@ -5798,9 +5471,7 @@ func rewriteValueMIPS_OpPanicBounds(v *Value) bool { } v.reset(OpMIPSLoweredPanicBoundsB) v.AuxInt = kind - v.AddArg(x) - v.AddArg(y) - v.AddArg(mem) + v.AddArg3(x, y, mem) return true } // match: (PanicBounds [kind] x y mem) @@ -5816,9 +5487,7 @@ func rewriteValueMIPS_OpPanicBounds(v *Value) bool { } v.reset(OpMIPSLoweredPanicBoundsC) v.AuxInt = kind - v.AddArg(x) - v.AddArg(y) - v.AddArg(mem) + v.AddArg3(x, y, mem) return true } return false @@ -5842,10 +5511,7 @@ func rewriteValueMIPS_OpPanicExtend(v *Value) bool { } v.reset(OpMIPSLoweredPanicExtendA) v.AuxInt = kind - v.AddArg(hi) - v.AddArg(lo) - v.AddArg(y) - v.AddArg(mem) + v.AddArg4(hi, lo, y, mem) return true } // match: (PanicExtend [kind] hi lo y mem) @@ -5862,10 +5528,7 @@ func rewriteValueMIPS_OpPanicExtend(v *Value) bool { } v.reset(OpMIPSLoweredPanicExtendB) v.AuxInt = kind - v.AddArg(hi) - v.AddArg(lo) - v.AddArg(y) - v.AddArg(mem) + v.AddArg4(hi, lo, y, mem) return true } // match: (PanicExtend [kind] hi lo y mem) @@ -5882,10 +5545,7 @@ func rewriteValueMIPS_OpPanicExtend(v *Value) bool { } v.reset(OpMIPSLoweredPanicExtendC) v.AuxInt = kind - v.AddArg(hi) - v.AddArg(lo) - v.AddArg(y) - v.AddArg(mem) + v.AddArg4(hi, lo, y, mem) return true } return false @@ -5906,17 +5566,14 @@ func rewriteValueMIPS_OpRotateLeft16(v *Value) bool { c := v_1.AuxInt v.reset(OpOr16) v0 := b.NewValue0(v.Pos, OpLsh16x32, t) - v0.AddArg(x) v1 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32) v1.AuxInt = c & 15 - v0.AddArg(v1) - v.AddArg(v0) + v0.AddArg2(x, v1) v2 := b.NewValue0(v.Pos, OpRsh16Ux32, t) - v2.AddArg(x) v3 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32) v3.AuxInt = -c & 15 - v2.AddArg(v3) - v.AddArg(v2) + v2.AddArg2(x, v3) + v.AddArg2(v0, v2) return true } return false @@ -5937,17 +5594,14 @@ func rewriteValueMIPS_OpRotateLeft32(v *Value) bool { c := v_1.AuxInt v.reset(OpOr32) v0 := b.NewValue0(v.Pos, OpLsh32x32, t) - v0.AddArg(x) v1 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32) v1.AuxInt = c & 31 - v0.AddArg(v1) - v.AddArg(v0) + v0.AddArg2(x, v1) v2 := b.NewValue0(v.Pos, OpRsh32Ux32, t) - v2.AddArg(x) v3 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32) v3.AuxInt = -c & 31 - v2.AddArg(v3) - v.AddArg(v2) + v2.AddArg2(x, v3) + v.AddArg2(v0, v2) return true } return false @@ -5968,17 +5622,14 @@ func rewriteValueMIPS_OpRotateLeft64(v *Value) bool { c := v_1.AuxInt v.reset(OpOr64) v0 := b.NewValue0(v.Pos, OpLsh64x32, t) - v0.AddArg(x) v1 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32) v1.AuxInt = c & 63 - v0.AddArg(v1) - v.AddArg(v0) + v0.AddArg2(x, v1) v2 := b.NewValue0(v.Pos, OpRsh64Ux32, t) - v2.AddArg(x) v3 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32) v3.AuxInt = -c & 63 - v2.AddArg(v3) - v.AddArg(v2) + v2.AddArg2(x, v3) + v.AddArg2(v0, v2) return true } return false @@ -5999,17 +5650,14 @@ func rewriteValueMIPS_OpRotateLeft8(v *Value) bool { c := v_1.AuxInt v.reset(OpOr8) v0 := b.NewValue0(v.Pos, OpLsh8x32, t) - v0.AddArg(x) v1 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32) v1.AuxInt = c & 7 - v0.AddArg(v1) - v.AddArg(v0) + v0.AddArg2(x, v1) v2 := b.NewValue0(v.Pos, OpRsh8Ux32, t) - v2.AddArg(x) v3 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32) v3.AuxInt = -c & 7 - v2.AddArg(v3) - v.AddArg(v2) + v2.AddArg2(x, v3) + v.AddArg2(v0, v2) return true } return false @@ -6029,20 +5677,17 @@ func rewriteValueMIPS_OpRsh16Ux16(v *Value) bool { v0 := b.NewValue0(v.Pos, OpMIPSSRL, t) v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) v1.AddArg(x) - v0.AddArg(v1) v2 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) v2.AddArg(y) - v0.AddArg(v2) - v.AddArg(v0) + v0.AddArg2(v1, v2) v3 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32) v3.AuxInt = 0 - v.AddArg(v3) v4 := b.NewValue0(v.Pos, OpMIPSSGTUconst, typ.Bool) v4.AuxInt = 32 v5 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) v5.AddArg(y) v4.AddArg(v5) - v.AddArg(v4) + v.AddArg3(v0, v3, v4) return true } } @@ -6061,16 +5706,13 @@ func rewriteValueMIPS_OpRsh16Ux32(v *Value) bool { v0 := b.NewValue0(v.Pos, OpMIPSSRL, t) v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) v1.AddArg(x) - v0.AddArg(v1) - v0.AddArg(y) - v.AddArg(v0) + v0.AddArg2(v1, y) v2 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32) v2.AuxInt = 0 - v.AddArg(v2) v3 := b.NewValue0(v.Pos, OpMIPSSGTUconst, typ.Bool) v3.AuxInt = 32 v3.AddArg(y) - v.AddArg(v3) + v.AddArg3(v0, v2, v3) return true } } @@ -6131,20 +5773,17 @@ func rewriteValueMIPS_OpRsh16Ux8(v *Value) bool { v0 := b.NewValue0(v.Pos, OpMIPSSRL, t) v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) v1.AddArg(x) - v0.AddArg(v1) v2 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) v2.AddArg(y) - v0.AddArg(v2) - v.AddArg(v0) + v0.AddArg2(v1, v2) v3 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32) v3.AuxInt = 0 - v.AddArg(v3) v4 := b.NewValue0(v.Pos, OpMIPSSGTUconst, typ.Bool) v4.AuxInt = 32 v5 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) v5.AddArg(y) v4.AddArg(v5) - v.AddArg(v4) + v.AddArg3(v0, v3, v4) return true } } @@ -6161,21 +5800,18 @@ func rewriteValueMIPS_OpRsh16x16(v *Value) bool { v.reset(OpMIPSSRA) v0 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpMIPSCMOVZ, typ.UInt32) v2 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) v2.AddArg(y) - v1.AddArg(v2) v3 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32) v3.AuxInt = -1 - v1.AddArg(v3) v4 := b.NewValue0(v.Pos, OpMIPSSGTUconst, typ.Bool) v4.AuxInt = 32 v5 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) v5.AddArg(y) v4.AddArg(v5) - v1.AddArg(v4) - v.AddArg(v1) + v1.AddArg3(v2, v3, v4) + v.AddArg2(v0, v1) return true } } @@ -6192,17 +5828,14 @@ func rewriteValueMIPS_OpRsh16x32(v *Value) bool { v.reset(OpMIPSSRA) v0 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpMIPSCMOVZ, typ.UInt32) - v1.AddArg(y) v2 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32) v2.AuxInt = -1 - v1.AddArg(v2) v3 := b.NewValue0(v.Pos, OpMIPSSGTUconst, typ.Bool) v3.AuxInt = 32 v3.AddArg(y) - v1.AddArg(v3) - v.AddArg(v1) + v1.AddArg3(y, v2, v3) + v.AddArg2(v0, v1) return true } } @@ -6266,21 +5899,18 @@ func rewriteValueMIPS_OpRsh16x8(v *Value) bool { v.reset(OpMIPSSRA) v0 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpMIPSCMOVZ, typ.UInt32) v2 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) v2.AddArg(y) - v1.AddArg(v2) v3 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32) v3.AuxInt = -1 - v1.AddArg(v3) v4 := b.NewValue0(v.Pos, OpMIPSSGTUconst, typ.Bool) v4.AuxInt = 32 v5 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) v5.AddArg(y) v4.AddArg(v5) - v1.AddArg(v4) - v.AddArg(v1) + v1.AddArg3(v2, v3, v4) + v.AddArg2(v0, v1) return true } } @@ -6297,20 +5927,17 @@ func rewriteValueMIPS_OpRsh32Ux16(v *Value) bool { y := v_1 v.reset(OpMIPSCMOVZ) v0 := b.NewValue0(v.Pos, OpMIPSSRL, t) - v0.AddArg(x) v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) v1.AddArg(y) - v0.AddArg(v1) - v.AddArg(v0) + v0.AddArg2(x, v1) v2 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32) v2.AuxInt = 0 - v.AddArg(v2) v3 := b.NewValue0(v.Pos, OpMIPSSGTUconst, typ.Bool) v3.AuxInt = 32 v4 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) v4.AddArg(y) v3.AddArg(v4) - v.AddArg(v3) + v.AddArg3(v0, v2, v3) return true } } @@ -6327,16 +5954,13 @@ func rewriteValueMIPS_OpRsh32Ux32(v *Value) bool { y := v_1 v.reset(OpMIPSCMOVZ) v0 := b.NewValue0(v.Pos, OpMIPSSRL, t) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) + v0.AddArg2(x, y) v1 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32) v1.AuxInt = 0 - v.AddArg(v1) v2 := b.NewValue0(v.Pos, OpMIPSSGTUconst, typ.Bool) v2.AuxInt = 32 v2.AddArg(y) - v.AddArg(v2) + v.AddArg3(v0, v1, v2) return true } } @@ -6390,20 +6014,17 @@ func rewriteValueMIPS_OpRsh32Ux8(v *Value) bool { y := v_1 v.reset(OpMIPSCMOVZ) v0 := b.NewValue0(v.Pos, OpMIPSSRL, t) - v0.AddArg(x) v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) v1.AddArg(y) - v0.AddArg(v1) - v.AddArg(v0) + v0.AddArg2(x, v1) v2 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32) v2.AuxInt = 0 - v.AddArg(v2) v3 := b.NewValue0(v.Pos, OpMIPSSGTUconst, typ.Bool) v3.AuxInt = 32 v4 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) v4.AddArg(y) v3.AddArg(v4) - v.AddArg(v3) + v.AddArg3(v0, v2, v3) return true } } @@ -6418,21 +6039,18 @@ func rewriteValueMIPS_OpRsh32x16(v *Value) bool { x := v_0 y := v_1 v.reset(OpMIPSSRA) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpMIPSCMOVZ, typ.UInt32) v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) v1.AddArg(y) - v0.AddArg(v1) v2 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32) v2.AuxInt = -1 - v0.AddArg(v2) v3 := b.NewValue0(v.Pos, OpMIPSSGTUconst, typ.Bool) v3.AuxInt = 32 v4 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) v4.AddArg(y) v3.AddArg(v4) - v0.AddArg(v3) - v.AddArg(v0) + v0.AddArg3(v1, v2, v3) + v.AddArg2(x, v0) return true } } @@ -6447,17 +6065,14 @@ func rewriteValueMIPS_OpRsh32x32(v *Value) bool { x := v_0 y := v_1 v.reset(OpMIPSSRA) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpMIPSCMOVZ, typ.UInt32) - v0.AddArg(y) v1 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32) v1.AuxInt = -1 - v0.AddArg(v1) v2 := b.NewValue0(v.Pos, OpMIPSSGTUconst, typ.Bool) v2.AuxInt = 32 v2.AddArg(y) - v0.AddArg(v2) - v.AddArg(v0) + v0.AddArg3(y, v1, v2) + v.AddArg2(x, v0) return true } } @@ -6511,21 +6126,18 @@ func rewriteValueMIPS_OpRsh32x8(v *Value) bool { x := v_0 y := v_1 v.reset(OpMIPSSRA) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpMIPSCMOVZ, typ.UInt32) v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) v1.AddArg(y) - v0.AddArg(v1) v2 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32) v2.AuxInt = -1 - v0.AddArg(v2) v3 := b.NewValue0(v.Pos, OpMIPSSGTUconst, typ.Bool) v3.AuxInt = 32 v4 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) v4.AddArg(y) v3.AddArg(v4) - v0.AddArg(v3) - v.AddArg(v0) + v0.AddArg3(v1, v2, v3) + v.AddArg2(x, v0) return true } } @@ -6544,20 +6156,17 @@ func rewriteValueMIPS_OpRsh8Ux16(v *Value) bool { v0 := b.NewValue0(v.Pos, OpMIPSSRL, t) v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) v1.AddArg(x) - v0.AddArg(v1) v2 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) v2.AddArg(y) - v0.AddArg(v2) - v.AddArg(v0) + v0.AddArg2(v1, v2) v3 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32) v3.AuxInt = 0 - v.AddArg(v3) v4 := b.NewValue0(v.Pos, OpMIPSSGTUconst, typ.Bool) v4.AuxInt = 32 v5 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) v5.AddArg(y) v4.AddArg(v5) - v.AddArg(v4) + v.AddArg3(v0, v3, v4) return true } } @@ -6576,16 +6185,13 @@ func rewriteValueMIPS_OpRsh8Ux32(v *Value) bool { v0 := b.NewValue0(v.Pos, OpMIPSSRL, t) v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) v1.AddArg(x) - v0.AddArg(v1) - v0.AddArg(y) - v.AddArg(v0) + v0.AddArg2(v1, y) v2 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32) v2.AuxInt = 0 - v.AddArg(v2) v3 := b.NewValue0(v.Pos, OpMIPSSGTUconst, typ.Bool) v3.AuxInt = 32 v3.AddArg(y) - v.AddArg(v3) + v.AddArg3(v0, v2, v3) return true } } @@ -6646,20 +6252,17 @@ func rewriteValueMIPS_OpRsh8Ux8(v *Value) bool { v0 := b.NewValue0(v.Pos, OpMIPSSRL, t) v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) v1.AddArg(x) - v0.AddArg(v1) v2 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) v2.AddArg(y) - v0.AddArg(v2) - v.AddArg(v0) + v0.AddArg2(v1, v2) v3 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32) v3.AuxInt = 0 - v.AddArg(v3) v4 := b.NewValue0(v.Pos, OpMIPSSGTUconst, typ.Bool) v4.AuxInt = 32 v5 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) v5.AddArg(y) v4.AddArg(v5) - v.AddArg(v4) + v.AddArg3(v0, v3, v4) return true } } @@ -6676,21 +6279,18 @@ func rewriteValueMIPS_OpRsh8x16(v *Value) bool { v.reset(OpMIPSSRA) v0 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpMIPSCMOVZ, typ.UInt32) v2 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) v2.AddArg(y) - v1.AddArg(v2) v3 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32) v3.AuxInt = -1 - v1.AddArg(v3) v4 := b.NewValue0(v.Pos, OpMIPSSGTUconst, typ.Bool) v4.AuxInt = 32 v5 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) v5.AddArg(y) v4.AddArg(v5) - v1.AddArg(v4) - v.AddArg(v1) + v1.AddArg3(v2, v3, v4) + v.AddArg2(v0, v1) return true } } @@ -6707,17 +6307,14 @@ func rewriteValueMIPS_OpRsh8x32(v *Value) bool { v.reset(OpMIPSSRA) v0 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpMIPSCMOVZ, typ.UInt32) - v1.AddArg(y) v2 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32) v2.AuxInt = -1 - v1.AddArg(v2) v3 := b.NewValue0(v.Pos, OpMIPSSGTUconst, typ.Bool) v3.AuxInt = 32 v3.AddArg(y) - v1.AddArg(v3) - v.AddArg(v1) + v1.AddArg3(y, v2, v3) + v.AddArg2(v0, v1) return true } } @@ -6781,21 +6378,18 @@ func rewriteValueMIPS_OpRsh8x8(v *Value) bool { v.reset(OpMIPSSRA) v0 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpMIPSCMOVZ, typ.UInt32) v2 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) v2.AddArg(y) - v1.AddArg(v2) v3 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32) v3.AuxInt = -1 - v1.AddArg(v3) v4 := b.NewValue0(v.Pos, OpMIPSSGTUconst, typ.Bool) v4.AuxInt = 32 v5 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) v5.AddArg(y) v4.AddArg(v5) - v1.AddArg(v4) - v.AddArg(v1) + v1.AddArg3(v2, v3, v4) + v.AddArg2(v0, v1) return true } } @@ -6814,8 +6408,7 @@ func rewriteValueMIPS_OpSelect0(v *Value) bool { x := v_0.Args[0] v.reset(OpMIPSADD) v.Type = t.FieldType(0) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (Select0 (Sub32carry x y)) @@ -6829,8 +6422,7 @@ func rewriteValueMIPS_OpSelect0(v *Value) bool { x := v_0.Args[0] v.reset(OpMIPSSUB) v.Type = t.FieldType(0) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (Select0 (MULTU (MOVWconst [0]) _ )) @@ -6839,7 +6431,6 @@ func rewriteValueMIPS_OpSelect0(v *Value) bool { if v_0.Op != OpMIPSMULTU { break } - _ = v_0.Args[1] v_0_0 := v_0.Args[0] v_0_1 := v_0.Args[1] for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { @@ -6858,7 +6449,6 @@ func rewriteValueMIPS_OpSelect0(v *Value) bool { if v_0.Op != OpMIPSMULTU { break } - _ = v_0.Args[1] v_0_0 := v_0.Args[0] v_0_1 := v_0.Args[1] for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { @@ -6889,11 +6479,9 @@ func rewriteValueMIPS_OpSelect0(v *Value) bool { v0 := b.NewValue0(v.Pos, OpMIPSADDconst, x.Type) v0.AuxInt = -1 v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32) v1.AuxInt = 0 - v.AddArg(v1) - v.AddArg(x) + v.AddArg3(v0, v1, x) return true } break @@ -7007,11 +6595,9 @@ func rewriteValueMIPS_OpSelect1(v *Value) bool { x := v_0.Args[0] v.reset(OpMIPSSGTU) v.Type = typ.Bool - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpMIPSADD, t.FieldType(0)) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) + v0.AddArg2(x, y) + v.AddArg2(x, v0) return true } // match: (Select1 (Sub32carry x y)) @@ -7026,10 +6612,8 @@ func rewriteValueMIPS_OpSelect1(v *Value) bool { v.reset(OpMIPSSGTU) v.Type = typ.Bool v0 := b.NewValue0(v.Pos, OpMIPSSUB, t.FieldType(0)) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) - v.AddArg(x) + v0.AddArg2(x, y) + v.AddArg2(v0, x) return true } // match: (Select1 (MULTU (MOVWconst [0]) _ )) @@ -7038,7 +6622,6 @@ func rewriteValueMIPS_OpSelect1(v *Value) bool { if v_0.Op != OpMIPSMULTU { break } - _ = v_0.Args[1] v_0_0 := v_0.Args[0] v_0_1 := v_0.Args[1] for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { @@ -7065,9 +6648,7 @@ func rewriteValueMIPS_OpSelect1(v *Value) bool { continue } x := v_0_1 - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } break @@ -7231,9 +6812,7 @@ func rewriteValueMIPS_OpStore(v *Value) bool { break } v.reset(OpMIPSMOVBstore) - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } // match: (Store {t} ptr val mem) @@ -7248,9 +6827,7 @@ func rewriteValueMIPS_OpStore(v *Value) bool { break } v.reset(OpMIPSMOVHstore) - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } // match: (Store {t} ptr val mem) @@ -7265,9 +6842,7 @@ func rewriteValueMIPS_OpStore(v *Value) bool { break } v.reset(OpMIPSMOVWstore) - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } // match: (Store {t} ptr val mem) @@ -7282,9 +6857,7 @@ func rewriteValueMIPS_OpStore(v *Value) bool { break } v.reset(OpMIPSMOVFstore) - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } // match: (Store {t} ptr val mem) @@ -7299,9 +6872,7 @@ func rewriteValueMIPS_OpStore(v *Value) bool { break } v.reset(OpMIPSMOVDstore) - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } return false @@ -7320,10 +6891,8 @@ func rewriteValueMIPS_OpSub32withcarry(v *Value) bool { c := v_2 v.reset(OpMIPSSUB) v0 := b.NewValue0(v.Pos, OpMIPSSUB, t) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) - v.AddArg(c) + v0.AddArg2(x, y) + v.AddArg2(v0, c) return true } } @@ -7340,9 +6909,7 @@ func rewriteValueMIPS_OpZero(v *Value) bool { break } mem := v_1 - v.reset(OpCopy) - v.Type = mem.Type - v.AddArg(mem) + v.copyOf(mem) return true } // match: (Zero [1] ptr mem) @@ -7354,11 +6921,9 @@ func rewriteValueMIPS_OpZero(v *Value) bool { ptr := v_0 mem := v_1 v.reset(OpMIPSMOVBstore) - v.AddArg(ptr) v0 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32) v0.AuxInt = 0 - v.AddArg(v0) - v.AddArg(mem) + v.AddArg3(ptr, v0, mem) return true } // match: (Zero [2] {t} ptr mem) @@ -7375,11 +6940,9 @@ func rewriteValueMIPS_OpZero(v *Value) bool { break } v.reset(OpMIPSMOVHstore) - v.AddArg(ptr) v0 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32) v0.AuxInt = 0 - v.AddArg(v0) - v.AddArg(mem) + v.AddArg3(ptr, v0, mem) return true } // match: (Zero [2] ptr mem) @@ -7392,18 +6955,14 @@ func rewriteValueMIPS_OpZero(v *Value) bool { mem := v_1 v.reset(OpMIPSMOVBstore) v.AuxInt = 1 - v.AddArg(ptr) v0 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32) v0.AuxInt = 0 - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpMIPSMOVBstore, types.TypeMem) v1.AuxInt = 0 - v1.AddArg(ptr) v2 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32) v2.AuxInt = 0 - v1.AddArg(v2) - v1.AddArg(mem) - v.AddArg(v1) + v1.AddArg3(ptr, v2, mem) + v.AddArg3(ptr, v0, v1) return true } // match: (Zero [4] {t} ptr mem) @@ -7420,11 +6979,9 @@ func rewriteValueMIPS_OpZero(v *Value) bool { break } v.reset(OpMIPSMOVWstore) - v.AddArg(ptr) v0 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32) v0.AuxInt = 0 - v.AddArg(v0) - v.AddArg(mem) + v.AddArg3(ptr, v0, mem) return true } // match: (Zero [4] {t} ptr mem) @@ -7442,18 +6999,14 @@ func rewriteValueMIPS_OpZero(v *Value) bool { } v.reset(OpMIPSMOVHstore) v.AuxInt = 2 - v.AddArg(ptr) v0 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32) v0.AuxInt = 0 - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpMIPSMOVHstore, types.TypeMem) v1.AuxInt = 0 - v1.AddArg(ptr) v2 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32) v2.AuxInt = 0 - v1.AddArg(v2) - v1.AddArg(mem) - v.AddArg(v1) + v1.AddArg3(ptr, v2, mem) + v.AddArg3(ptr, v0, v1) return true } // match: (Zero [4] ptr mem) @@ -7466,32 +7019,24 @@ func rewriteValueMIPS_OpZero(v *Value) bool { mem := v_1 v.reset(OpMIPSMOVBstore) v.AuxInt = 3 - v.AddArg(ptr) v0 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32) v0.AuxInt = 0 - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpMIPSMOVBstore, types.TypeMem) v1.AuxInt = 2 - v1.AddArg(ptr) v2 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32) v2.AuxInt = 0 - v1.AddArg(v2) v3 := b.NewValue0(v.Pos, OpMIPSMOVBstore, types.TypeMem) v3.AuxInt = 1 - v3.AddArg(ptr) v4 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32) v4.AuxInt = 0 - v3.AddArg(v4) v5 := b.NewValue0(v.Pos, OpMIPSMOVBstore, types.TypeMem) v5.AuxInt = 0 - v5.AddArg(ptr) v6 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32) v6.AuxInt = 0 - v5.AddArg(v6) - v5.AddArg(mem) - v3.AddArg(v5) - v1.AddArg(v3) - v.AddArg(v1) + v5.AddArg3(ptr, v6, mem) + v3.AddArg3(ptr, v4, v5) + v1.AddArg3(ptr, v2, v3) + v.AddArg3(ptr, v0, v1) return true } // match: (Zero [3] ptr mem) @@ -7504,25 +7049,19 @@ func rewriteValueMIPS_OpZero(v *Value) bool { mem := v_1 v.reset(OpMIPSMOVBstore) v.AuxInt = 2 - v.AddArg(ptr) v0 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32) v0.AuxInt = 0 - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpMIPSMOVBstore, types.TypeMem) v1.AuxInt = 1 - v1.AddArg(ptr) v2 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32) v2.AuxInt = 0 - v1.AddArg(v2) v3 := b.NewValue0(v.Pos, OpMIPSMOVBstore, types.TypeMem) v3.AuxInt = 0 - v3.AddArg(ptr) v4 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32) v4.AuxInt = 0 - v3.AddArg(v4) - v3.AddArg(mem) - v1.AddArg(v3) - v.AddArg(v1) + v3.AddArg3(ptr, v4, mem) + v1.AddArg3(ptr, v2, v3) + v.AddArg3(ptr, v0, v1) return true } // match: (Zero [6] {t} ptr mem) @@ -7540,25 +7079,19 @@ func rewriteValueMIPS_OpZero(v *Value) bool { } v.reset(OpMIPSMOVHstore) v.AuxInt = 4 - v.AddArg(ptr) v0 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32) v0.AuxInt = 0 - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpMIPSMOVHstore, types.TypeMem) v1.AuxInt = 2 - v1.AddArg(ptr) v2 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32) v2.AuxInt = 0 - v1.AddArg(v2) v3 := b.NewValue0(v.Pos, OpMIPSMOVHstore, types.TypeMem) v3.AuxInt = 0 - v3.AddArg(ptr) v4 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32) v4.AuxInt = 0 - v3.AddArg(v4) - v3.AddArg(mem) - v1.AddArg(v3) - v.AddArg(v1) + v3.AddArg3(ptr, v4, mem) + v1.AddArg3(ptr, v2, v3) + v.AddArg3(ptr, v0, v1) return true } // match: (Zero [8] {t} ptr mem) @@ -7576,18 +7109,14 @@ func rewriteValueMIPS_OpZero(v *Value) bool { } v.reset(OpMIPSMOVWstore) v.AuxInt = 4 - v.AddArg(ptr) v0 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32) v0.AuxInt = 0 - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpMIPSMOVWstore, types.TypeMem) v1.AuxInt = 0 - v1.AddArg(ptr) v2 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32) v2.AuxInt = 0 - v1.AddArg(v2) - v1.AddArg(mem) - v.AddArg(v1) + v1.AddArg3(ptr, v2, mem) + v.AddArg3(ptr, v0, v1) return true } // match: (Zero [12] {t} ptr mem) @@ -7605,25 +7134,19 @@ func rewriteValueMIPS_OpZero(v *Value) bool { } v.reset(OpMIPSMOVWstore) v.AuxInt = 8 - v.AddArg(ptr) v0 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32) v0.AuxInt = 0 - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpMIPSMOVWstore, types.TypeMem) v1.AuxInt = 4 - v1.AddArg(ptr) v2 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32) v2.AuxInt = 0 - v1.AddArg(v2) v3 := b.NewValue0(v.Pos, OpMIPSMOVWstore, types.TypeMem) v3.AuxInt = 0 - v3.AddArg(ptr) v4 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32) v4.AuxInt = 0 - v3.AddArg(v4) - v3.AddArg(mem) - v1.AddArg(v3) - v.AddArg(v1) + v3.AddArg3(ptr, v4, mem) + v1.AddArg3(ptr, v2, v3) + v.AddArg3(ptr, v0, v1) return true } // match: (Zero [16] {t} ptr mem) @@ -7641,32 +7164,24 @@ func rewriteValueMIPS_OpZero(v *Value) bool { } v.reset(OpMIPSMOVWstore) v.AuxInt = 12 - v.AddArg(ptr) v0 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32) v0.AuxInt = 0 - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpMIPSMOVWstore, types.TypeMem) v1.AuxInt = 8 - v1.AddArg(ptr) v2 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32) v2.AuxInt = 0 - v1.AddArg(v2) v3 := b.NewValue0(v.Pos, OpMIPSMOVWstore, types.TypeMem) v3.AuxInt = 4 - v3.AddArg(ptr) v4 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32) v4.AuxInt = 0 - v3.AddArg(v4) v5 := b.NewValue0(v.Pos, OpMIPSMOVWstore, types.TypeMem) v5.AuxInt = 0 - v5.AddArg(ptr) v6 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32) v6.AuxInt = 0 - v5.AddArg(v6) - v5.AddArg(mem) - v3.AddArg(v5) - v1.AddArg(v3) - v.AddArg(v1) + v5.AddArg3(ptr, v6, mem) + v3.AddArg3(ptr, v4, v5) + v1.AddArg3(ptr, v2, v3) + v.AddArg3(ptr, v0, v1) return true } // match: (Zero [s] {t} ptr mem) @@ -7682,12 +7197,10 @@ func rewriteValueMIPS_OpZero(v *Value) bool { } v.reset(OpMIPSLoweredZero) v.AuxInt = t.(*types.Type).Alignment() - v.AddArg(ptr) v0 := b.NewValue0(v.Pos, OpMIPSADDconst, ptr.Type) v0.AuxInt = s - moveSize(t.(*types.Type).Alignment(), config) v0.AddArg(ptr) - v.AddArg(v0) - v.AddArg(mem) + v.AddArg3(ptr, v0, mem) return true } return false @@ -7702,10 +7215,9 @@ func rewriteValueMIPS_OpZeromask(v *Value) bool { x := v_0 v.reset(OpMIPSNEG) v0 := b.NewValue0(v.Pos, OpMIPSSGTU, typ.Bool) - v0.AddArg(x) v1 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32) v1.AuxInt = 0 - v0.AddArg(v1) + v0.AddArg2(x, v1) v.AddArg(v0) return true } @@ -7718,8 +7230,7 @@ func rewriteBlockMIPS(b *Block) bool { for b.Controls[0].Op == OpMIPSFPFlagTrue { v_0 := b.Controls[0] cmp := v_0.Args[0] - b.Reset(BlockMIPSFPF) - b.AddControl(cmp) + b.resetWithControl(BlockMIPSFPF, cmp) return true } // match: (EQ (FPFlagFalse cmp) yes no) @@ -7727,8 +7238,7 @@ func rewriteBlockMIPS(b *Block) bool { for b.Controls[0].Op == OpMIPSFPFlagFalse { v_0 := b.Controls[0] cmp := v_0.Args[0] - b.Reset(BlockMIPSFPT) - b.AddControl(cmp) + b.resetWithControl(BlockMIPSFPT, cmp) return true } // match: (EQ (XORconst [1] cmp:(SGT _ _)) yes no) @@ -7742,9 +7252,7 @@ func rewriteBlockMIPS(b *Block) bool { if cmp.Op != OpMIPSSGT { break } - _ = cmp.Args[1] - b.Reset(BlockMIPSNE) - b.AddControl(cmp) + b.resetWithControl(BlockMIPSNE, cmp) return true } // match: (EQ (XORconst [1] cmp:(SGTU _ _)) yes no) @@ -7758,9 +7266,7 @@ func rewriteBlockMIPS(b *Block) bool { if cmp.Op != OpMIPSSGTU { break } - _ = cmp.Args[1] - b.Reset(BlockMIPSNE) - b.AddControl(cmp) + b.resetWithControl(BlockMIPSNE, cmp) return true } // match: (EQ (XORconst [1] cmp:(SGTconst _)) yes no) @@ -7774,8 +7280,7 @@ func rewriteBlockMIPS(b *Block) bool { if cmp.Op != OpMIPSSGTconst { break } - b.Reset(BlockMIPSNE) - b.AddControl(cmp) + b.resetWithControl(BlockMIPSNE, cmp) return true } // match: (EQ (XORconst [1] cmp:(SGTUconst _)) yes no) @@ -7789,8 +7294,7 @@ func rewriteBlockMIPS(b *Block) bool { if cmp.Op != OpMIPSSGTUconst { break } - b.Reset(BlockMIPSNE) - b.AddControl(cmp) + b.resetWithControl(BlockMIPSNE, cmp) return true } // match: (EQ (XORconst [1] cmp:(SGTzero _)) yes no) @@ -7804,8 +7308,7 @@ func rewriteBlockMIPS(b *Block) bool { if cmp.Op != OpMIPSSGTzero { break } - b.Reset(BlockMIPSNE) - b.AddControl(cmp) + b.resetWithControl(BlockMIPSNE, cmp) return true } // match: (EQ (XORconst [1] cmp:(SGTUzero _)) yes no) @@ -7819,8 +7322,7 @@ func rewriteBlockMIPS(b *Block) bool { if cmp.Op != OpMIPSSGTUzero { break } - b.Reset(BlockMIPSNE) - b.AddControl(cmp) + b.resetWithControl(BlockMIPSNE, cmp) return true } // match: (EQ (SGTUconst [1] x) yes no) @@ -7831,8 +7333,7 @@ func rewriteBlockMIPS(b *Block) bool { break } x := v_0.Args[0] - b.Reset(BlockMIPSNE) - b.AddControl(x) + b.resetWithControl(BlockMIPSNE, x) return true } // match: (EQ (SGTUzero x) yes no) @@ -7840,8 +7341,7 @@ func rewriteBlockMIPS(b *Block) bool { for b.Controls[0].Op == OpMIPSSGTUzero { v_0 := b.Controls[0] x := v_0.Args[0] - b.Reset(BlockMIPSEQ) - b.AddControl(x) + b.resetWithControl(BlockMIPSEQ, x) return true } // match: (EQ (SGTconst [0] x) yes no) @@ -7852,8 +7352,7 @@ func rewriteBlockMIPS(b *Block) bool { break } x := v_0.Args[0] - b.Reset(BlockMIPSGEZ) - b.AddControl(x) + b.resetWithControl(BlockMIPSGEZ, x) return true } // match: (EQ (SGTzero x) yes no) @@ -7861,8 +7360,7 @@ func rewriteBlockMIPS(b *Block) bool { for b.Controls[0].Op == OpMIPSSGTzero { v_0 := b.Controls[0] x := v_0.Args[0] - b.Reset(BlockMIPSLEZ) - b.AddControl(x) + b.resetWithControl(BlockMIPSLEZ, x) return true } // match: (EQ (MOVWconst [0]) yes no) @@ -7945,8 +7443,7 @@ func rewriteBlockMIPS(b *Block) bool { // result: (NE cond yes no) for { cond := b.Controls[0] - b.Reset(BlockMIPSNE) - b.AddControl(cond) + b.resetWithControl(BlockMIPSNE, cond) return true } case BlockMIPSLEZ: @@ -8007,8 +7504,7 @@ func rewriteBlockMIPS(b *Block) bool { for b.Controls[0].Op == OpMIPSFPFlagTrue { v_0 := b.Controls[0] cmp := v_0.Args[0] - b.Reset(BlockMIPSFPT) - b.AddControl(cmp) + b.resetWithControl(BlockMIPSFPT, cmp) return true } // match: (NE (FPFlagFalse cmp) yes no) @@ -8016,8 +7512,7 @@ func rewriteBlockMIPS(b *Block) bool { for b.Controls[0].Op == OpMIPSFPFlagFalse { v_0 := b.Controls[0] cmp := v_0.Args[0] - b.Reset(BlockMIPSFPF) - b.AddControl(cmp) + b.resetWithControl(BlockMIPSFPF, cmp) return true } // match: (NE (XORconst [1] cmp:(SGT _ _)) yes no) @@ -8031,9 +7526,7 @@ func rewriteBlockMIPS(b *Block) bool { if cmp.Op != OpMIPSSGT { break } - _ = cmp.Args[1] - b.Reset(BlockMIPSEQ) - b.AddControl(cmp) + b.resetWithControl(BlockMIPSEQ, cmp) return true } // match: (NE (XORconst [1] cmp:(SGTU _ _)) yes no) @@ -8047,9 +7540,7 @@ func rewriteBlockMIPS(b *Block) bool { if cmp.Op != OpMIPSSGTU { break } - _ = cmp.Args[1] - b.Reset(BlockMIPSEQ) - b.AddControl(cmp) + b.resetWithControl(BlockMIPSEQ, cmp) return true } // match: (NE (XORconst [1] cmp:(SGTconst _)) yes no) @@ -8063,8 +7554,7 @@ func rewriteBlockMIPS(b *Block) bool { if cmp.Op != OpMIPSSGTconst { break } - b.Reset(BlockMIPSEQ) - b.AddControl(cmp) + b.resetWithControl(BlockMIPSEQ, cmp) return true } // match: (NE (XORconst [1] cmp:(SGTUconst _)) yes no) @@ -8078,8 +7568,7 @@ func rewriteBlockMIPS(b *Block) bool { if cmp.Op != OpMIPSSGTUconst { break } - b.Reset(BlockMIPSEQ) - b.AddControl(cmp) + b.resetWithControl(BlockMIPSEQ, cmp) return true } // match: (NE (XORconst [1] cmp:(SGTzero _)) yes no) @@ -8093,8 +7582,7 @@ func rewriteBlockMIPS(b *Block) bool { if cmp.Op != OpMIPSSGTzero { break } - b.Reset(BlockMIPSEQ) - b.AddControl(cmp) + b.resetWithControl(BlockMIPSEQ, cmp) return true } // match: (NE (XORconst [1] cmp:(SGTUzero _)) yes no) @@ -8108,8 +7596,7 @@ func rewriteBlockMIPS(b *Block) bool { if cmp.Op != OpMIPSSGTUzero { break } - b.Reset(BlockMIPSEQ) - b.AddControl(cmp) + b.resetWithControl(BlockMIPSEQ, cmp) return true } // match: (NE (SGTUconst [1] x) yes no) @@ -8120,8 +7607,7 @@ func rewriteBlockMIPS(b *Block) bool { break } x := v_0.Args[0] - b.Reset(BlockMIPSEQ) - b.AddControl(x) + b.resetWithControl(BlockMIPSEQ, x) return true } // match: (NE (SGTUzero x) yes no) @@ -8129,8 +7615,7 @@ func rewriteBlockMIPS(b *Block) bool { for b.Controls[0].Op == OpMIPSSGTUzero { v_0 := b.Controls[0] x := v_0.Args[0] - b.Reset(BlockMIPSNE) - b.AddControl(x) + b.resetWithControl(BlockMIPSNE, x) return true } // match: (NE (SGTconst [0] x) yes no) @@ -8141,8 +7626,7 @@ func rewriteBlockMIPS(b *Block) bool { break } x := v_0.Args[0] - b.Reset(BlockMIPSLTZ) - b.AddControl(x) + b.resetWithControl(BlockMIPSLTZ, x) return true } // match: (NE (SGTzero x) yes no) @@ -8150,8 +7634,7 @@ func rewriteBlockMIPS(b *Block) bool { for b.Controls[0].Op == OpMIPSSGTzero { v_0 := b.Controls[0] x := v_0.Args[0] - b.Reset(BlockMIPSGTZ) - b.AddControl(x) + b.resetWithControl(BlockMIPSGTZ, x) return true } // match: (NE (MOVWconst [0]) yes no) diff --git a/src/cmd/compile/internal/ssa/rewriteMIPS64.go b/src/cmd/compile/internal/ssa/rewriteMIPS64.go index 4139361b11..125c33d002 100644 --- a/src/cmd/compile/internal/ssa/rewriteMIPS64.go +++ b/src/cmd/compile/internal/ssa/rewriteMIPS64.go @@ -154,6 +154,9 @@ func rewriteValueMIPS64(v *Value) bool { case OpCvt64to64F: v.Op = OpMIPS64MOVVD return true + case OpCvtBoolToUint8: + v.Op = OpCopy + return true case OpDiv16: return rewriteValueMIPS64_OpDiv16(v) case OpDiv16u: @@ -703,11 +706,9 @@ func rewriteValueMIPS64_OpAvg64u(v *Value) bool { v0 := b.NewValue0(v.Pos, OpMIPS64SRLVconst, t) v0.AuxInt = 1 v1 := b.NewValue0(v.Pos, OpMIPS64SUBV, t) - v1.AddArg(x) - v1.AddArg(y) + v1.AddArg2(x, y) v0.AddArg(v1) - v.AddArg(v0) - v.AddArg(y) + v.AddArg2(v0, y) return true } } @@ -722,8 +723,7 @@ func rewriteValueMIPS64_OpCom16(v *Value) bool { v.reset(OpMIPS64NOR) v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) v0.AuxInt = 0 - v.AddArg(v0) - v.AddArg(x) + v.AddArg2(v0, x) return true } } @@ -738,8 +738,7 @@ func rewriteValueMIPS64_OpCom32(v *Value) bool { v.reset(OpMIPS64NOR) v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) v0.AuxInt = 0 - v.AddArg(v0) - v.AddArg(x) + v.AddArg2(v0, x) return true } } @@ -754,8 +753,7 @@ func rewriteValueMIPS64_OpCom64(v *Value) bool { v.reset(OpMIPS64NOR) v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) v0.AuxInt = 0 - v.AddArg(v0) - v.AddArg(x) + v.AddArg2(v0, x) return true } } @@ -770,8 +768,7 @@ func rewriteValueMIPS64_OpCom8(v *Value) bool { v.reset(OpMIPS64NOR) v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) v0.AuxInt = 0 - v.AddArg(v0) - v.AddArg(x) + v.AddArg2(v0, x) return true } } @@ -798,10 +795,9 @@ func rewriteValueMIPS64_OpDiv16(v *Value) bool { v0 := b.NewValue0(v.Pos, OpMIPS64DIVV, types.NewTuple(typ.Int64, typ.Int64)) v1 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64) v1.AddArg(x) - v0.AddArg(v1) v2 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64) v2.AddArg(y) - v0.AddArg(v2) + v0.AddArg2(v1, v2) v.AddArg(v0) return true } @@ -820,10 +816,9 @@ func rewriteValueMIPS64_OpDiv16u(v *Value) bool { v0 := b.NewValue0(v.Pos, OpMIPS64DIVVU, types.NewTuple(typ.UInt64, typ.UInt64)) v1 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) v1.AddArg(x) - v0.AddArg(v1) v2 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) v2.AddArg(y) - v0.AddArg(v2) + v0.AddArg2(v1, v2) v.AddArg(v0) return true } @@ -842,10 +837,9 @@ func rewriteValueMIPS64_OpDiv32(v *Value) bool { v0 := b.NewValue0(v.Pos, OpMIPS64DIVV, types.NewTuple(typ.Int64, typ.Int64)) v1 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64) v1.AddArg(x) - v0.AddArg(v1) v2 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64) v2.AddArg(y) - v0.AddArg(v2) + v0.AddArg2(v1, v2) v.AddArg(v0) return true } @@ -864,10 +858,9 @@ func rewriteValueMIPS64_OpDiv32u(v *Value) bool { v0 := b.NewValue0(v.Pos, OpMIPS64DIVVU, types.NewTuple(typ.UInt64, typ.UInt64)) v1 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) v1.AddArg(x) - v0.AddArg(v1) v2 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) v2.AddArg(y) - v0.AddArg(v2) + v0.AddArg2(v1, v2) v.AddArg(v0) return true } @@ -884,8 +877,7 @@ func rewriteValueMIPS64_OpDiv64(v *Value) bool { y := v_1 v.reset(OpSelect1) v0 := b.NewValue0(v.Pos, OpMIPS64DIVV, types.NewTuple(typ.Int64, typ.Int64)) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -902,8 +894,7 @@ func rewriteValueMIPS64_OpDiv64u(v *Value) bool { y := v_1 v.reset(OpSelect1) v0 := b.NewValue0(v.Pos, OpMIPS64DIVVU, types.NewTuple(typ.UInt64, typ.UInt64)) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -922,10 +913,9 @@ func rewriteValueMIPS64_OpDiv8(v *Value) bool { v0 := b.NewValue0(v.Pos, OpMIPS64DIVV, types.NewTuple(typ.Int64, typ.Int64)) v1 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64) v1.AddArg(x) - v0.AddArg(v1) v2 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64) v2.AddArg(y) - v0.AddArg(v2) + v0.AddArg2(v1, v2) v.AddArg(v0) return true } @@ -944,10 +934,9 @@ func rewriteValueMIPS64_OpDiv8u(v *Value) bool { v0 := b.NewValue0(v.Pos, OpMIPS64DIVVU, types.NewTuple(typ.UInt64, typ.UInt64)) v1 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) v1.AddArg(x) - v0.AddArg(v1) v2 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) v2.AddArg(y) - v0.AddArg(v2) + v0.AddArg2(v1, v2) v.AddArg(v0) return true } @@ -965,15 +954,13 @@ func rewriteValueMIPS64_OpEq16(v *Value) bool { v.reset(OpMIPS64SGTU) v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) v0.AuxInt = 1 - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpMIPS64XOR, typ.UInt64) v2 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) v2.AddArg(x) - v1.AddArg(v2) v3 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) v3.AddArg(y) - v1.AddArg(v3) - v.AddArg(v1) + v1.AddArg2(v2, v3) + v.AddArg2(v0, v1) return true } } @@ -990,15 +977,13 @@ func rewriteValueMIPS64_OpEq32(v *Value) bool { v.reset(OpMIPS64SGTU) v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) v0.AuxInt = 1 - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpMIPS64XOR, typ.UInt64) v2 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) v2.AddArg(x) - v1.AddArg(v2) v3 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) v3.AddArg(y) - v1.AddArg(v3) - v.AddArg(v1) + v1.AddArg2(v2, v3) + v.AddArg2(v0, v1) return true } } @@ -1013,8 +998,7 @@ func rewriteValueMIPS64_OpEq32F(v *Value) bool { y := v_1 v.reset(OpMIPS64FPFlagTrue) v0 := b.NewValue0(v.Pos, OpMIPS64CMPEQF, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -1032,11 +1016,9 @@ func rewriteValueMIPS64_OpEq64(v *Value) bool { v.reset(OpMIPS64SGTU) v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) v0.AuxInt = 1 - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpMIPS64XOR, typ.UInt64) - v1.AddArg(x) - v1.AddArg(y) - v.AddArg(v1) + v1.AddArg2(x, y) + v.AddArg2(v0, v1) return true } } @@ -1051,8 +1033,7 @@ func rewriteValueMIPS64_OpEq64F(v *Value) bool { y := v_1 v.reset(OpMIPS64FPFlagTrue) v0 := b.NewValue0(v.Pos, OpMIPS64CMPEQD, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -1070,15 +1051,13 @@ func rewriteValueMIPS64_OpEq8(v *Value) bool { v.reset(OpMIPS64SGTU) v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) v0.AuxInt = 1 - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpMIPS64XOR, typ.UInt64) v2 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) v2.AddArg(x) - v1.AddArg(v2) v3 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) v3.AddArg(y) - v1.AddArg(v3) - v.AddArg(v1) + v1.AddArg2(v2, v3) + v.AddArg2(v0, v1) return true } } @@ -1095,11 +1074,9 @@ func rewriteValueMIPS64_OpEqB(v *Value) bool { v.reset(OpMIPS64XOR) v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) v0.AuxInt = 1 - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpMIPS64XOR, typ.Bool) - v1.AddArg(x) - v1.AddArg(y) - v.AddArg(v1) + v1.AddArg2(x, y) + v.AddArg2(v0, v1) return true } } @@ -1116,11 +1093,9 @@ func rewriteValueMIPS64_OpEqPtr(v *Value) bool { v.reset(OpMIPS64SGTU) v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) v0.AuxInt = 1 - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpMIPS64XOR, typ.UInt64) - v1.AddArg(x) - v1.AddArg(y) - v.AddArg(v1) + v1.AddArg2(x, y) + v.AddArg2(v0, v1) return true } } @@ -1135,8 +1110,7 @@ func rewriteValueMIPS64_OpGeq32F(v *Value) bool { y := v_1 v.reset(OpMIPS64FPFlagTrue) v0 := b.NewValue0(v.Pos, OpMIPS64CMPGEF, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -1152,8 +1126,7 @@ func rewriteValueMIPS64_OpGeq64F(v *Value) bool { y := v_1 v.reset(OpMIPS64FPFlagTrue) v0 := b.NewValue0(v.Pos, OpMIPS64CMPGED, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -1169,8 +1142,7 @@ func rewriteValueMIPS64_OpGreater32F(v *Value) bool { y := v_1 v.reset(OpMIPS64FPFlagTrue) v0 := b.NewValue0(v.Pos, OpMIPS64CMPGTF, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -1186,8 +1158,7 @@ func rewriteValueMIPS64_OpGreater64F(v *Value) bool { y := v_1 v.reset(OpMIPS64FPFlagTrue) v0 := b.NewValue0(v.Pos, OpMIPS64CMPGTD, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -1208,10 +1179,9 @@ func rewriteValueMIPS64_OpHmul32(v *Value) bool { v1 := b.NewValue0(v.Pos, OpMIPS64MULV, types.NewTuple(typ.Int64, typ.Int64)) v2 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64) v2.AddArg(x) - v1.AddArg(v2) v3 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64) v3.AddArg(y) - v1.AddArg(v3) + v1.AddArg2(v2, v3) v0.AddArg(v1) v.AddArg(v0) return true @@ -1233,10 +1203,9 @@ func rewriteValueMIPS64_OpHmul32u(v *Value) bool { v1 := b.NewValue0(v.Pos, OpMIPS64MULVU, types.NewTuple(typ.UInt64, typ.UInt64)) v2 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) v2.AddArg(x) - v1.AddArg(v2) v3 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) v3.AddArg(y) - v1.AddArg(v3) + v1.AddArg2(v2, v3) v0.AddArg(v1) v.AddArg(v0) return true @@ -1254,8 +1223,7 @@ func rewriteValueMIPS64_OpHmul64(v *Value) bool { y := v_1 v.reset(OpSelect0) v0 := b.NewValue0(v.Pos, OpMIPS64MULV, types.NewTuple(typ.Int64, typ.Int64)) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -1272,8 +1240,7 @@ func rewriteValueMIPS64_OpHmul64u(v *Value) bool { y := v_1 v.reset(OpSelect0) v0 := b.NewValue0(v.Pos, OpMIPS64MULVU, types.NewTuple(typ.UInt64, typ.UInt64)) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -1287,8 +1254,7 @@ func rewriteValueMIPS64_OpIsInBounds(v *Value) bool { idx := v_0 len := v_1 v.reset(OpMIPS64SGTU) - v.AddArg(len) - v.AddArg(idx) + v.AddArg2(len, idx) return true } } @@ -1301,10 +1267,9 @@ func rewriteValueMIPS64_OpIsNonNil(v *Value) bool { for { ptr := v_0 v.reset(OpMIPS64SGTU) - v.AddArg(ptr) v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) v0.AuxInt = 0 - v.AddArg(v0) + v.AddArg2(ptr, v0) return true } } @@ -1321,11 +1286,9 @@ func rewriteValueMIPS64_OpIsSliceInBounds(v *Value) bool { v.reset(OpMIPS64XOR) v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) v0.AuxInt = 1 - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool) - v1.AddArg(idx) - v1.AddArg(len) - v.AddArg(v1) + v1.AddArg2(idx, len) + v.AddArg2(v0, v1) return true } } @@ -1342,15 +1305,13 @@ func rewriteValueMIPS64_OpLeq16(v *Value) bool { v.reset(OpMIPS64XOR) v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) v0.AuxInt = 1 - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpMIPS64SGT, typ.Bool) v2 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64) v2.AddArg(x) - v1.AddArg(v2) v3 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64) v3.AddArg(y) - v1.AddArg(v3) - v.AddArg(v1) + v1.AddArg2(v2, v3) + v.AddArg2(v0, v1) return true } } @@ -1367,15 +1328,13 @@ func rewriteValueMIPS64_OpLeq16U(v *Value) bool { v.reset(OpMIPS64XOR) v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) v0.AuxInt = 1 - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool) v2 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) v2.AddArg(x) - v1.AddArg(v2) v3 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) v3.AddArg(y) - v1.AddArg(v3) - v.AddArg(v1) + v1.AddArg2(v2, v3) + v.AddArg2(v0, v1) return true } } @@ -1392,15 +1351,13 @@ func rewriteValueMIPS64_OpLeq32(v *Value) bool { v.reset(OpMIPS64XOR) v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) v0.AuxInt = 1 - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpMIPS64SGT, typ.Bool) v2 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64) v2.AddArg(x) - v1.AddArg(v2) v3 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64) v3.AddArg(y) - v1.AddArg(v3) - v.AddArg(v1) + v1.AddArg2(v2, v3) + v.AddArg2(v0, v1) return true } } @@ -1415,8 +1372,7 @@ func rewriteValueMIPS64_OpLeq32F(v *Value) bool { y := v_1 v.reset(OpMIPS64FPFlagTrue) v0 := b.NewValue0(v.Pos, OpMIPS64CMPGEF, types.TypeFlags) - v0.AddArg(y) - v0.AddArg(x) + v0.AddArg2(y, x) v.AddArg(v0) return true } @@ -1434,15 +1390,13 @@ func rewriteValueMIPS64_OpLeq32U(v *Value) bool { v.reset(OpMIPS64XOR) v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) v0.AuxInt = 1 - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool) v2 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) v2.AddArg(x) - v1.AddArg(v2) v3 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) v3.AddArg(y) - v1.AddArg(v3) - v.AddArg(v1) + v1.AddArg2(v2, v3) + v.AddArg2(v0, v1) return true } } @@ -1459,11 +1413,9 @@ func rewriteValueMIPS64_OpLeq64(v *Value) bool { v.reset(OpMIPS64XOR) v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) v0.AuxInt = 1 - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpMIPS64SGT, typ.Bool) - v1.AddArg(x) - v1.AddArg(y) - v.AddArg(v1) + v1.AddArg2(x, y) + v.AddArg2(v0, v1) return true } } @@ -1478,8 +1430,7 @@ func rewriteValueMIPS64_OpLeq64F(v *Value) bool { y := v_1 v.reset(OpMIPS64FPFlagTrue) v0 := b.NewValue0(v.Pos, OpMIPS64CMPGED, types.TypeFlags) - v0.AddArg(y) - v0.AddArg(x) + v0.AddArg2(y, x) v.AddArg(v0) return true } @@ -1497,11 +1448,9 @@ func rewriteValueMIPS64_OpLeq64U(v *Value) bool { v.reset(OpMIPS64XOR) v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) v0.AuxInt = 1 - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool) - v1.AddArg(x) - v1.AddArg(y) - v.AddArg(v1) + v1.AddArg2(x, y) + v.AddArg2(v0, v1) return true } } @@ -1518,15 +1467,13 @@ func rewriteValueMIPS64_OpLeq8(v *Value) bool { v.reset(OpMIPS64XOR) v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) v0.AuxInt = 1 - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpMIPS64SGT, typ.Bool) v2 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64) v2.AddArg(x) - v1.AddArg(v2) v3 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64) v3.AddArg(y) - v1.AddArg(v3) - v.AddArg(v1) + v1.AddArg2(v2, v3) + v.AddArg2(v0, v1) return true } } @@ -1543,15 +1490,13 @@ func rewriteValueMIPS64_OpLeq8U(v *Value) bool { v.reset(OpMIPS64XOR) v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) v0.AuxInt = 1 - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool) v2 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) v2.AddArg(x) - v1.AddArg(v2) v3 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) v3.AddArg(y) - v1.AddArg(v3) - v.AddArg(v1) + v1.AddArg2(v2, v3) + v.AddArg2(v0, v1) return true } } @@ -1568,10 +1513,9 @@ func rewriteValueMIPS64_OpLess16(v *Value) bool { v.reset(OpMIPS64SGT) v0 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64) v0.AddArg(y) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64) v1.AddArg(x) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } } @@ -1588,10 +1532,9 @@ func rewriteValueMIPS64_OpLess16U(v *Value) bool { v.reset(OpMIPS64SGTU) v0 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) v0.AddArg(y) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) v1.AddArg(x) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } } @@ -1608,10 +1551,9 @@ func rewriteValueMIPS64_OpLess32(v *Value) bool { v.reset(OpMIPS64SGT) v0 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64) v0.AddArg(y) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64) v1.AddArg(x) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } } @@ -1626,8 +1568,7 @@ func rewriteValueMIPS64_OpLess32F(v *Value) bool { y := v_1 v.reset(OpMIPS64FPFlagTrue) v0 := b.NewValue0(v.Pos, OpMIPS64CMPGTF, types.TypeFlags) - v0.AddArg(y) - v0.AddArg(x) + v0.AddArg2(y, x) v.AddArg(v0) return true } @@ -1645,10 +1586,9 @@ func rewriteValueMIPS64_OpLess32U(v *Value) bool { v.reset(OpMIPS64SGTU) v0 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) v0.AddArg(y) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) v1.AddArg(x) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } } @@ -1661,8 +1601,7 @@ func rewriteValueMIPS64_OpLess64(v *Value) bool { x := v_0 y := v_1 v.reset(OpMIPS64SGT) - v.AddArg(y) - v.AddArg(x) + v.AddArg2(y, x) return true } } @@ -1677,8 +1616,7 @@ func rewriteValueMIPS64_OpLess64F(v *Value) bool { y := v_1 v.reset(OpMIPS64FPFlagTrue) v0 := b.NewValue0(v.Pos, OpMIPS64CMPGTD, types.TypeFlags) - v0.AddArg(y) - v0.AddArg(x) + v0.AddArg2(y, x) v.AddArg(v0) return true } @@ -1692,8 +1630,7 @@ func rewriteValueMIPS64_OpLess64U(v *Value) bool { x := v_0 y := v_1 v.reset(OpMIPS64SGTU) - v.AddArg(y) - v.AddArg(x) + v.AddArg2(y, x) return true } } @@ -1710,10 +1647,9 @@ func rewriteValueMIPS64_OpLess8(v *Value) bool { v.reset(OpMIPS64SGT) v0 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64) v0.AddArg(y) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64) v1.AddArg(x) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } } @@ -1730,10 +1666,9 @@ func rewriteValueMIPS64_OpLess8U(v *Value) bool { v.reset(OpMIPS64SGTU) v0 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) v0.AddArg(y) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) v1.AddArg(x) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } } @@ -1751,8 +1686,7 @@ func rewriteValueMIPS64_OpLoad(v *Value) bool { break } v.reset(OpMIPS64MOVBUload) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (Load ptr mem) @@ -1766,8 +1700,7 @@ func rewriteValueMIPS64_OpLoad(v *Value) bool { break } v.reset(OpMIPS64MOVBload) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (Load ptr mem) @@ -1781,8 +1714,7 @@ func rewriteValueMIPS64_OpLoad(v *Value) bool { break } v.reset(OpMIPS64MOVBUload) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (Load ptr mem) @@ -1796,8 +1728,7 @@ func rewriteValueMIPS64_OpLoad(v *Value) bool { break } v.reset(OpMIPS64MOVHload) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (Load ptr mem) @@ -1811,8 +1742,7 @@ func rewriteValueMIPS64_OpLoad(v *Value) bool { break } v.reset(OpMIPS64MOVHUload) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (Load ptr mem) @@ -1826,8 +1756,7 @@ func rewriteValueMIPS64_OpLoad(v *Value) bool { break } v.reset(OpMIPS64MOVWload) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (Load ptr mem) @@ -1841,8 +1770,7 @@ func rewriteValueMIPS64_OpLoad(v *Value) bool { break } v.reset(OpMIPS64MOVWUload) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (Load ptr mem) @@ -1856,8 +1784,7 @@ func rewriteValueMIPS64_OpLoad(v *Value) bool { break } v.reset(OpMIPS64MOVVload) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (Load ptr mem) @@ -1871,8 +1798,7 @@ func rewriteValueMIPS64_OpLoad(v *Value) bool { break } v.reset(OpMIPS64MOVFload) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (Load ptr mem) @@ -1886,8 +1812,7 @@ func rewriteValueMIPS64_OpLoad(v *Value) bool { break } v.reset(OpMIPS64MOVDload) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } return false @@ -1921,18 +1846,15 @@ func rewriteValueMIPS64_OpLsh16x16(v *Value) bool { v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool) v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) v2.AuxInt = 64 - v1.AddArg(v2) v3 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) v3.AddArg(y) - v1.AddArg(v3) + v1.AddArg2(v2, v3) v0.AddArg(v1) - v.AddArg(v0) v4 := b.NewValue0(v.Pos, OpMIPS64SLLV, t) - v4.AddArg(x) v5 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) v5.AddArg(y) - v4.AddArg(v5) - v.AddArg(v4) + v4.AddArg2(x, v5) + v.AddArg2(v0, v4) return true } } @@ -1952,18 +1874,15 @@ func rewriteValueMIPS64_OpLsh16x32(v *Value) bool { v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool) v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) v2.AuxInt = 64 - v1.AddArg(v2) v3 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) v3.AddArg(y) - v1.AddArg(v3) + v1.AddArg2(v2, v3) v0.AddArg(v1) - v.AddArg(v0) v4 := b.NewValue0(v.Pos, OpMIPS64SLLV, t) - v4.AddArg(x) v5 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) v5.AddArg(y) - v4.AddArg(v5) - v.AddArg(v4) + v4.AddArg2(x, v5) + v.AddArg2(v0, v4) return true } } @@ -1983,14 +1902,11 @@ func rewriteValueMIPS64_OpLsh16x64(v *Value) bool { v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool) v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) v2.AuxInt = 64 - v1.AddArg(v2) - v1.AddArg(y) + v1.AddArg2(v2, y) v0.AddArg(v1) - v.AddArg(v0) v3 := b.NewValue0(v.Pos, OpMIPS64SLLV, t) - v3.AddArg(x) - v3.AddArg(y) - v.AddArg(v3) + v3.AddArg2(x, y) + v.AddArg2(v0, v3) return true } } @@ -2010,18 +1926,15 @@ func rewriteValueMIPS64_OpLsh16x8(v *Value) bool { v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool) v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) v2.AuxInt = 64 - v1.AddArg(v2) v3 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) v3.AddArg(y) - v1.AddArg(v3) + v1.AddArg2(v2, v3) v0.AddArg(v1) - v.AddArg(v0) v4 := b.NewValue0(v.Pos, OpMIPS64SLLV, t) - v4.AddArg(x) v5 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) v5.AddArg(y) - v4.AddArg(v5) - v.AddArg(v4) + v4.AddArg2(x, v5) + v.AddArg2(v0, v4) return true } } @@ -2041,18 +1954,15 @@ func rewriteValueMIPS64_OpLsh32x16(v *Value) bool { v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool) v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) v2.AuxInt = 64 - v1.AddArg(v2) v3 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) v3.AddArg(y) - v1.AddArg(v3) + v1.AddArg2(v2, v3) v0.AddArg(v1) - v.AddArg(v0) v4 := b.NewValue0(v.Pos, OpMIPS64SLLV, t) - v4.AddArg(x) v5 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) v5.AddArg(y) - v4.AddArg(v5) - v.AddArg(v4) + v4.AddArg2(x, v5) + v.AddArg2(v0, v4) return true } } @@ -2072,18 +1982,15 @@ func rewriteValueMIPS64_OpLsh32x32(v *Value) bool { v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool) v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) v2.AuxInt = 64 - v1.AddArg(v2) v3 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) v3.AddArg(y) - v1.AddArg(v3) + v1.AddArg2(v2, v3) v0.AddArg(v1) - v.AddArg(v0) v4 := b.NewValue0(v.Pos, OpMIPS64SLLV, t) - v4.AddArg(x) v5 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) v5.AddArg(y) - v4.AddArg(v5) - v.AddArg(v4) + v4.AddArg2(x, v5) + v.AddArg2(v0, v4) return true } } @@ -2103,14 +2010,11 @@ func rewriteValueMIPS64_OpLsh32x64(v *Value) bool { v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool) v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) v2.AuxInt = 64 - v1.AddArg(v2) - v1.AddArg(y) + v1.AddArg2(v2, y) v0.AddArg(v1) - v.AddArg(v0) v3 := b.NewValue0(v.Pos, OpMIPS64SLLV, t) - v3.AddArg(x) - v3.AddArg(y) - v.AddArg(v3) + v3.AddArg2(x, y) + v.AddArg2(v0, v3) return true } } @@ -2130,18 +2034,15 @@ func rewriteValueMIPS64_OpLsh32x8(v *Value) bool { v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool) v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) v2.AuxInt = 64 - v1.AddArg(v2) v3 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) v3.AddArg(y) - v1.AddArg(v3) + v1.AddArg2(v2, v3) v0.AddArg(v1) - v.AddArg(v0) v4 := b.NewValue0(v.Pos, OpMIPS64SLLV, t) - v4.AddArg(x) v5 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) v5.AddArg(y) - v4.AddArg(v5) - v.AddArg(v4) + v4.AddArg2(x, v5) + v.AddArg2(v0, v4) return true } } @@ -2161,18 +2062,15 @@ func rewriteValueMIPS64_OpLsh64x16(v *Value) bool { v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool) v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) v2.AuxInt = 64 - v1.AddArg(v2) v3 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) v3.AddArg(y) - v1.AddArg(v3) + v1.AddArg2(v2, v3) v0.AddArg(v1) - v.AddArg(v0) v4 := b.NewValue0(v.Pos, OpMIPS64SLLV, t) - v4.AddArg(x) v5 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) v5.AddArg(y) - v4.AddArg(v5) - v.AddArg(v4) + v4.AddArg2(x, v5) + v.AddArg2(v0, v4) return true } } @@ -2192,18 +2090,15 @@ func rewriteValueMIPS64_OpLsh64x32(v *Value) bool { v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool) v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) v2.AuxInt = 64 - v1.AddArg(v2) v3 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) v3.AddArg(y) - v1.AddArg(v3) + v1.AddArg2(v2, v3) v0.AddArg(v1) - v.AddArg(v0) v4 := b.NewValue0(v.Pos, OpMIPS64SLLV, t) - v4.AddArg(x) v5 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) v5.AddArg(y) - v4.AddArg(v5) - v.AddArg(v4) + v4.AddArg2(x, v5) + v.AddArg2(v0, v4) return true } } @@ -2223,14 +2118,11 @@ func rewriteValueMIPS64_OpLsh64x64(v *Value) bool { v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool) v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) v2.AuxInt = 64 - v1.AddArg(v2) - v1.AddArg(y) + v1.AddArg2(v2, y) v0.AddArg(v1) - v.AddArg(v0) v3 := b.NewValue0(v.Pos, OpMIPS64SLLV, t) - v3.AddArg(x) - v3.AddArg(y) - v.AddArg(v3) + v3.AddArg2(x, y) + v.AddArg2(v0, v3) return true } } @@ -2250,18 +2142,15 @@ func rewriteValueMIPS64_OpLsh64x8(v *Value) bool { v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool) v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) v2.AuxInt = 64 - v1.AddArg(v2) v3 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) v3.AddArg(y) - v1.AddArg(v3) + v1.AddArg2(v2, v3) v0.AddArg(v1) - v.AddArg(v0) v4 := b.NewValue0(v.Pos, OpMIPS64SLLV, t) - v4.AddArg(x) v5 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) v5.AddArg(y) - v4.AddArg(v5) - v.AddArg(v4) + v4.AddArg2(x, v5) + v.AddArg2(v0, v4) return true } } @@ -2281,18 +2170,15 @@ func rewriteValueMIPS64_OpLsh8x16(v *Value) bool { v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool) v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) v2.AuxInt = 64 - v1.AddArg(v2) v3 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) v3.AddArg(y) - v1.AddArg(v3) + v1.AddArg2(v2, v3) v0.AddArg(v1) - v.AddArg(v0) v4 := b.NewValue0(v.Pos, OpMIPS64SLLV, t) - v4.AddArg(x) v5 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) v5.AddArg(y) - v4.AddArg(v5) - v.AddArg(v4) + v4.AddArg2(x, v5) + v.AddArg2(v0, v4) return true } } @@ -2312,18 +2198,15 @@ func rewriteValueMIPS64_OpLsh8x32(v *Value) bool { v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool) v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) v2.AuxInt = 64 - v1.AddArg(v2) v3 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) v3.AddArg(y) - v1.AddArg(v3) + v1.AddArg2(v2, v3) v0.AddArg(v1) - v.AddArg(v0) v4 := b.NewValue0(v.Pos, OpMIPS64SLLV, t) - v4.AddArg(x) v5 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) v5.AddArg(y) - v4.AddArg(v5) - v.AddArg(v4) + v4.AddArg2(x, v5) + v.AddArg2(v0, v4) return true } } @@ -2343,14 +2226,11 @@ func rewriteValueMIPS64_OpLsh8x64(v *Value) bool { v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool) v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) v2.AuxInt = 64 - v1.AddArg(v2) - v1.AddArg(y) + v1.AddArg2(v2, y) v0.AddArg(v1) - v.AddArg(v0) v3 := b.NewValue0(v.Pos, OpMIPS64SLLV, t) - v3.AddArg(x) - v3.AddArg(y) - v.AddArg(v3) + v3.AddArg2(x, y) + v.AddArg2(v0, v3) return true } } @@ -2370,18 +2250,15 @@ func rewriteValueMIPS64_OpLsh8x8(v *Value) bool { v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool) v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) v2.AuxInt = 64 - v1.AddArg(v2) v3 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) v3.AddArg(y) - v1.AddArg(v3) + v1.AddArg2(v2, v3) v0.AddArg(v1) - v.AddArg(v0) v4 := b.NewValue0(v.Pos, OpMIPS64SLLV, t) - v4.AddArg(x) v5 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) v5.AddArg(y) - v4.AddArg(v5) - v.AddArg(v4) + v4.AddArg2(x, v5) + v.AddArg2(v0, v4) return true } } @@ -2418,8 +2295,7 @@ func rewriteValueMIPS64_OpMIPS64ADDV(v *Value) bool { } y := v_1.Args[0] v.reset(OpMIPS64SUBV) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } break @@ -2451,9 +2327,7 @@ func rewriteValueMIPS64_OpMIPS64ADDVconst(v *Value) bool { break } x := v_0 - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (ADDVconst [c] (MOVVconst [d])) @@ -2536,9 +2410,7 @@ func rewriteValueMIPS64_OpMIPS64AND(v *Value) bool { if x != v_1 { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } return false @@ -2562,9 +2434,7 @@ func rewriteValueMIPS64_OpMIPS64ANDconst(v *Value) bool { break } x := v_0 - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (ANDconst [c] (MOVVconst [d])) @@ -2614,8 +2484,7 @@ func rewriteValueMIPS64_OpMIPS64LoweredAtomicAdd32(v *Value) bool { } v.reset(OpMIPS64LoweredAtomicAddconst32) v.AuxInt = c - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } return false @@ -2639,8 +2508,7 @@ func rewriteValueMIPS64_OpMIPS64LoweredAtomicAdd64(v *Value) bool { } v.reset(OpMIPS64LoweredAtomicAddconst64) v.AuxInt = c - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } return false @@ -2658,8 +2526,7 @@ func rewriteValueMIPS64_OpMIPS64LoweredAtomicStore32(v *Value) bool { } mem := v_2 v.reset(OpMIPS64LoweredAtomicStorezero32) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } return false @@ -2677,8 +2544,7 @@ func rewriteValueMIPS64_OpMIPS64LoweredAtomicStore64(v *Value) bool { } mem := v_2 v.reset(OpMIPS64LoweredAtomicStorezero64) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } return false @@ -2704,8 +2570,7 @@ func rewriteValueMIPS64_OpMIPS64MOVBUload(v *Value) bool { v.reset(OpMIPS64MOVBUload) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVBUload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) @@ -2727,8 +2592,7 @@ func rewriteValueMIPS64_OpMIPS64MOVBUload(v *Value) bool { v.reset(OpMIPS64MOVBUload) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } return false @@ -2742,7 +2606,6 @@ func rewriteValueMIPS64_OpMIPS64MOVBUreg(v *Value) bool { if x.Op != OpMIPS64MOVBUload { break } - _ = x.Args[1] v.reset(OpMIPS64MOVVreg) v.AddArg(x) return true @@ -2792,8 +2655,7 @@ func rewriteValueMIPS64_OpMIPS64MOVBload(v *Value) bool { v.reset(OpMIPS64MOVBload) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVBload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) @@ -2815,8 +2677,7 @@ func rewriteValueMIPS64_OpMIPS64MOVBload(v *Value) bool { v.reset(OpMIPS64MOVBload) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } return false @@ -2830,7 +2691,6 @@ func rewriteValueMIPS64_OpMIPS64MOVBreg(v *Value) bool { if x.Op != OpMIPS64MOVBload { break } - _ = x.Args[1] v.reset(OpMIPS64MOVVreg) v.AddArg(x) return true @@ -2882,9 +2742,7 @@ func rewriteValueMIPS64_OpMIPS64MOVBstore(v *Value) bool { v.reset(OpMIPS64MOVBstore) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } // match: (MOVBstore [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) val mem) @@ -2907,9 +2765,7 @@ func rewriteValueMIPS64_OpMIPS64MOVBstore(v *Value) bool { v.reset(OpMIPS64MOVBstore) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } // match: (MOVBstore [off] {sym} ptr (MOVVconst [0]) mem) @@ -2925,8 +2781,7 @@ func rewriteValueMIPS64_OpMIPS64MOVBstore(v *Value) bool { v.reset(OpMIPS64MOVBstorezero) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVBstore [off] {sym} ptr (MOVBreg x) mem) @@ -2943,9 +2798,7 @@ func rewriteValueMIPS64_OpMIPS64MOVBstore(v *Value) bool { v.reset(OpMIPS64MOVBstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(x) - v.AddArg(mem) + v.AddArg3(ptr, x, mem) return true } // match: (MOVBstore [off] {sym} ptr (MOVBUreg x) mem) @@ -2962,9 +2815,7 @@ func rewriteValueMIPS64_OpMIPS64MOVBstore(v *Value) bool { v.reset(OpMIPS64MOVBstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(x) - v.AddArg(mem) + v.AddArg3(ptr, x, mem) return true } // match: (MOVBstore [off] {sym} ptr (MOVHreg x) mem) @@ -2981,9 +2832,7 @@ func rewriteValueMIPS64_OpMIPS64MOVBstore(v *Value) bool { v.reset(OpMIPS64MOVBstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(x) - v.AddArg(mem) + v.AddArg3(ptr, x, mem) return true } // match: (MOVBstore [off] {sym} ptr (MOVHUreg x) mem) @@ -3000,9 +2849,7 @@ func rewriteValueMIPS64_OpMIPS64MOVBstore(v *Value) bool { v.reset(OpMIPS64MOVBstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(x) - v.AddArg(mem) + v.AddArg3(ptr, x, mem) return true } // match: (MOVBstore [off] {sym} ptr (MOVWreg x) mem) @@ -3019,9 +2866,7 @@ func rewriteValueMIPS64_OpMIPS64MOVBstore(v *Value) bool { v.reset(OpMIPS64MOVBstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(x) - v.AddArg(mem) + v.AddArg3(ptr, x, mem) return true } // match: (MOVBstore [off] {sym} ptr (MOVWUreg x) mem) @@ -3038,9 +2883,7 @@ func rewriteValueMIPS64_OpMIPS64MOVBstore(v *Value) bool { v.reset(OpMIPS64MOVBstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(x) - v.AddArg(mem) + v.AddArg3(ptr, x, mem) return true } return false @@ -3066,8 +2909,7 @@ func rewriteValueMIPS64_OpMIPS64MOVBstorezero(v *Value) bool { v.reset(OpMIPS64MOVBstorezero) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVBstorezero [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) @@ -3089,8 +2931,7 @@ func rewriteValueMIPS64_OpMIPS64MOVBstorezero(v *Value) bool { v.reset(OpMIPS64MOVBstorezero) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } return false @@ -3116,8 +2957,7 @@ func rewriteValueMIPS64_OpMIPS64MOVDload(v *Value) bool { v.reset(OpMIPS64MOVDload) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVDload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) @@ -3139,8 +2979,7 @@ func rewriteValueMIPS64_OpMIPS64MOVDload(v *Value) bool { v.reset(OpMIPS64MOVDload) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } return false @@ -3168,9 +3007,7 @@ func rewriteValueMIPS64_OpMIPS64MOVDstore(v *Value) bool { v.reset(OpMIPS64MOVDstore) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } // match: (MOVDstore [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) val mem) @@ -3193,9 +3030,7 @@ func rewriteValueMIPS64_OpMIPS64MOVDstore(v *Value) bool { v.reset(OpMIPS64MOVDstore) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } return false @@ -3221,8 +3056,7 @@ func rewriteValueMIPS64_OpMIPS64MOVFload(v *Value) bool { v.reset(OpMIPS64MOVFload) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVFload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) @@ -3244,8 +3078,7 @@ func rewriteValueMIPS64_OpMIPS64MOVFload(v *Value) bool { v.reset(OpMIPS64MOVFload) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } return false @@ -3273,9 +3106,7 @@ func rewriteValueMIPS64_OpMIPS64MOVFstore(v *Value) bool { v.reset(OpMIPS64MOVFstore) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } // match: (MOVFstore [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) val mem) @@ -3298,9 +3129,7 @@ func rewriteValueMIPS64_OpMIPS64MOVFstore(v *Value) bool { v.reset(OpMIPS64MOVFstore) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } return false @@ -3326,8 +3155,7 @@ func rewriteValueMIPS64_OpMIPS64MOVHUload(v *Value) bool { v.reset(OpMIPS64MOVHUload) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVHUload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) @@ -3349,8 +3177,7 @@ func rewriteValueMIPS64_OpMIPS64MOVHUload(v *Value) bool { v.reset(OpMIPS64MOVHUload) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } return false @@ -3364,7 +3191,6 @@ func rewriteValueMIPS64_OpMIPS64MOVHUreg(v *Value) bool { if x.Op != OpMIPS64MOVBUload { break } - _ = x.Args[1] v.reset(OpMIPS64MOVVreg) v.AddArg(x) return true @@ -3376,7 +3202,6 @@ func rewriteValueMIPS64_OpMIPS64MOVHUreg(v *Value) bool { if x.Op != OpMIPS64MOVHUload { break } - _ = x.Args[1] v.reset(OpMIPS64MOVVreg) v.AddArg(x) return true @@ -3437,8 +3262,7 @@ func rewriteValueMIPS64_OpMIPS64MOVHload(v *Value) bool { v.reset(OpMIPS64MOVHload) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVHload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) @@ -3460,8 +3284,7 @@ func rewriteValueMIPS64_OpMIPS64MOVHload(v *Value) bool { v.reset(OpMIPS64MOVHload) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } return false @@ -3475,7 +3298,6 @@ func rewriteValueMIPS64_OpMIPS64MOVHreg(v *Value) bool { if x.Op != OpMIPS64MOVBload { break } - _ = x.Args[1] v.reset(OpMIPS64MOVVreg) v.AddArg(x) return true @@ -3487,7 +3309,6 @@ func rewriteValueMIPS64_OpMIPS64MOVHreg(v *Value) bool { if x.Op != OpMIPS64MOVBUload { break } - _ = x.Args[1] v.reset(OpMIPS64MOVVreg) v.AddArg(x) return true @@ -3499,7 +3320,6 @@ func rewriteValueMIPS64_OpMIPS64MOVHreg(v *Value) bool { if x.Op != OpMIPS64MOVHload { break } - _ = x.Args[1] v.reset(OpMIPS64MOVVreg) v.AddArg(x) return true @@ -3573,9 +3393,7 @@ func rewriteValueMIPS64_OpMIPS64MOVHstore(v *Value) bool { v.reset(OpMIPS64MOVHstore) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } // match: (MOVHstore [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) val mem) @@ -3598,9 +3416,7 @@ func rewriteValueMIPS64_OpMIPS64MOVHstore(v *Value) bool { v.reset(OpMIPS64MOVHstore) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } // match: (MOVHstore [off] {sym} ptr (MOVVconst [0]) mem) @@ -3616,8 +3432,7 @@ func rewriteValueMIPS64_OpMIPS64MOVHstore(v *Value) bool { v.reset(OpMIPS64MOVHstorezero) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVHstore [off] {sym} ptr (MOVHreg x) mem) @@ -3634,9 +3449,7 @@ func rewriteValueMIPS64_OpMIPS64MOVHstore(v *Value) bool { v.reset(OpMIPS64MOVHstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(x) - v.AddArg(mem) + v.AddArg3(ptr, x, mem) return true } // match: (MOVHstore [off] {sym} ptr (MOVHUreg x) mem) @@ -3653,9 +3466,7 @@ func rewriteValueMIPS64_OpMIPS64MOVHstore(v *Value) bool { v.reset(OpMIPS64MOVHstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(x) - v.AddArg(mem) + v.AddArg3(ptr, x, mem) return true } // match: (MOVHstore [off] {sym} ptr (MOVWreg x) mem) @@ -3672,9 +3483,7 @@ func rewriteValueMIPS64_OpMIPS64MOVHstore(v *Value) bool { v.reset(OpMIPS64MOVHstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(x) - v.AddArg(mem) + v.AddArg3(ptr, x, mem) return true } // match: (MOVHstore [off] {sym} ptr (MOVWUreg x) mem) @@ -3691,9 +3500,7 @@ func rewriteValueMIPS64_OpMIPS64MOVHstore(v *Value) bool { v.reset(OpMIPS64MOVHstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(x) - v.AddArg(mem) + v.AddArg3(ptr, x, mem) return true } return false @@ -3719,8 +3526,7 @@ func rewriteValueMIPS64_OpMIPS64MOVHstorezero(v *Value) bool { v.reset(OpMIPS64MOVHstorezero) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVHstorezero [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) @@ -3742,8 +3548,7 @@ func rewriteValueMIPS64_OpMIPS64MOVHstorezero(v *Value) bool { v.reset(OpMIPS64MOVHstorezero) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } return false @@ -3769,8 +3574,7 @@ func rewriteValueMIPS64_OpMIPS64MOVVload(v *Value) bool { v.reset(OpMIPS64MOVVload) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVVload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) @@ -3792,8 +3596,7 @@ func rewriteValueMIPS64_OpMIPS64MOVVload(v *Value) bool { v.reset(OpMIPS64MOVVload) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } return false @@ -3848,9 +3651,7 @@ func rewriteValueMIPS64_OpMIPS64MOVVstore(v *Value) bool { v.reset(OpMIPS64MOVVstore) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } // match: (MOVVstore [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) val mem) @@ -3873,9 +3674,7 @@ func rewriteValueMIPS64_OpMIPS64MOVVstore(v *Value) bool { v.reset(OpMIPS64MOVVstore) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } // match: (MOVVstore [off] {sym} ptr (MOVVconst [0]) mem) @@ -3891,8 +3690,7 @@ func rewriteValueMIPS64_OpMIPS64MOVVstore(v *Value) bool { v.reset(OpMIPS64MOVVstorezero) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } return false @@ -3918,8 +3716,7 @@ func rewriteValueMIPS64_OpMIPS64MOVVstorezero(v *Value) bool { v.reset(OpMIPS64MOVVstorezero) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVVstorezero [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) @@ -3941,8 +3738,7 @@ func rewriteValueMIPS64_OpMIPS64MOVVstorezero(v *Value) bool { v.reset(OpMIPS64MOVVstorezero) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } return false @@ -3968,8 +3764,7 @@ func rewriteValueMIPS64_OpMIPS64MOVWUload(v *Value) bool { v.reset(OpMIPS64MOVWUload) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVWUload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) @@ -3991,8 +3786,7 @@ func rewriteValueMIPS64_OpMIPS64MOVWUload(v *Value) bool { v.reset(OpMIPS64MOVWUload) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } return false @@ -4006,7 +3800,6 @@ func rewriteValueMIPS64_OpMIPS64MOVWUreg(v *Value) bool { if x.Op != OpMIPS64MOVBUload { break } - _ = x.Args[1] v.reset(OpMIPS64MOVVreg) v.AddArg(x) return true @@ -4018,7 +3811,6 @@ func rewriteValueMIPS64_OpMIPS64MOVWUreg(v *Value) bool { if x.Op != OpMIPS64MOVHUload { break } - _ = x.Args[1] v.reset(OpMIPS64MOVVreg) v.AddArg(x) return true @@ -4030,7 +3822,6 @@ func rewriteValueMIPS64_OpMIPS64MOVWUreg(v *Value) bool { if x.Op != OpMIPS64MOVWUload { break } - _ = x.Args[1] v.reset(OpMIPS64MOVVreg) v.AddArg(x) return true @@ -4102,8 +3893,7 @@ func rewriteValueMIPS64_OpMIPS64MOVWload(v *Value) bool { v.reset(OpMIPS64MOVWload) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVWload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) @@ -4125,8 +3915,7 @@ func rewriteValueMIPS64_OpMIPS64MOVWload(v *Value) bool { v.reset(OpMIPS64MOVWload) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } return false @@ -4140,7 +3929,6 @@ func rewriteValueMIPS64_OpMIPS64MOVWreg(v *Value) bool { if x.Op != OpMIPS64MOVBload { break } - _ = x.Args[1] v.reset(OpMIPS64MOVVreg) v.AddArg(x) return true @@ -4152,7 +3940,6 @@ func rewriteValueMIPS64_OpMIPS64MOVWreg(v *Value) bool { if x.Op != OpMIPS64MOVBUload { break } - _ = x.Args[1] v.reset(OpMIPS64MOVVreg) v.AddArg(x) return true @@ -4164,7 +3951,6 @@ func rewriteValueMIPS64_OpMIPS64MOVWreg(v *Value) bool { if x.Op != OpMIPS64MOVHload { break } - _ = x.Args[1] v.reset(OpMIPS64MOVVreg) v.AddArg(x) return true @@ -4176,7 +3962,6 @@ func rewriteValueMIPS64_OpMIPS64MOVWreg(v *Value) bool { if x.Op != OpMIPS64MOVHUload { break } - _ = x.Args[1] v.reset(OpMIPS64MOVVreg) v.AddArg(x) return true @@ -4188,7 +3973,6 @@ func rewriteValueMIPS64_OpMIPS64MOVWreg(v *Value) bool { if x.Op != OpMIPS64MOVWload { break } - _ = x.Args[1] v.reset(OpMIPS64MOVVreg) v.AddArg(x) return true @@ -4273,9 +4057,7 @@ func rewriteValueMIPS64_OpMIPS64MOVWstore(v *Value) bool { v.reset(OpMIPS64MOVWstore) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } // match: (MOVWstore [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) val mem) @@ -4298,9 +4080,7 @@ func rewriteValueMIPS64_OpMIPS64MOVWstore(v *Value) bool { v.reset(OpMIPS64MOVWstore) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } // match: (MOVWstore [off] {sym} ptr (MOVVconst [0]) mem) @@ -4316,8 +4096,7 @@ func rewriteValueMIPS64_OpMIPS64MOVWstore(v *Value) bool { v.reset(OpMIPS64MOVWstorezero) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVWstore [off] {sym} ptr (MOVWreg x) mem) @@ -4334,9 +4113,7 @@ func rewriteValueMIPS64_OpMIPS64MOVWstore(v *Value) bool { v.reset(OpMIPS64MOVWstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(x) - v.AddArg(mem) + v.AddArg3(ptr, x, mem) return true } // match: (MOVWstore [off] {sym} ptr (MOVWUreg x) mem) @@ -4353,9 +4130,7 @@ func rewriteValueMIPS64_OpMIPS64MOVWstore(v *Value) bool { v.reset(OpMIPS64MOVWstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(x) - v.AddArg(mem) + v.AddArg3(ptr, x, mem) return true } return false @@ -4381,8 +4156,7 @@ func rewriteValueMIPS64_OpMIPS64MOVWstorezero(v *Value) bool { v.reset(OpMIPS64MOVWstorezero) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVWstorezero [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) @@ -4404,8 +4178,7 @@ func rewriteValueMIPS64_OpMIPS64MOVWstorezero(v *Value) bool { v.reset(OpMIPS64MOVWstorezero) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } return false @@ -4496,9 +4269,7 @@ func rewriteValueMIPS64_OpMIPS64OR(v *Value) bool { if x != v_1 { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } return false @@ -4512,9 +4283,7 @@ func rewriteValueMIPS64_OpMIPS64ORconst(v *Value) bool { break } x := v_0 - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (ORconst [-1] _) @@ -5073,9 +4842,7 @@ func rewriteValueMIPS64_OpMIPS64SUBVconst(v *Value) bool { break } x := v_0 - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (SUBVconst [c] (MOVVconst [d])) @@ -5173,9 +4940,7 @@ func rewriteValueMIPS64_OpMIPS64XORconst(v *Value) bool { break } x := v_0 - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (XORconst [-1] x) @@ -5236,10 +5001,9 @@ func rewriteValueMIPS64_OpMod16(v *Value) bool { v0 := b.NewValue0(v.Pos, OpMIPS64DIVV, types.NewTuple(typ.Int64, typ.Int64)) v1 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64) v1.AddArg(x) - v0.AddArg(v1) v2 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64) v2.AddArg(y) - v0.AddArg(v2) + v0.AddArg2(v1, v2) v.AddArg(v0) return true } @@ -5258,10 +5022,9 @@ func rewriteValueMIPS64_OpMod16u(v *Value) bool { v0 := b.NewValue0(v.Pos, OpMIPS64DIVVU, types.NewTuple(typ.UInt64, typ.UInt64)) v1 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) v1.AddArg(x) - v0.AddArg(v1) v2 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) v2.AddArg(y) - v0.AddArg(v2) + v0.AddArg2(v1, v2) v.AddArg(v0) return true } @@ -5280,10 +5043,9 @@ func rewriteValueMIPS64_OpMod32(v *Value) bool { v0 := b.NewValue0(v.Pos, OpMIPS64DIVV, types.NewTuple(typ.Int64, typ.Int64)) v1 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64) v1.AddArg(x) - v0.AddArg(v1) v2 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64) v2.AddArg(y) - v0.AddArg(v2) + v0.AddArg2(v1, v2) v.AddArg(v0) return true } @@ -5302,10 +5064,9 @@ func rewriteValueMIPS64_OpMod32u(v *Value) bool { v0 := b.NewValue0(v.Pos, OpMIPS64DIVVU, types.NewTuple(typ.UInt64, typ.UInt64)) v1 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) v1.AddArg(x) - v0.AddArg(v1) v2 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) v2.AddArg(y) - v0.AddArg(v2) + v0.AddArg2(v1, v2) v.AddArg(v0) return true } @@ -5322,8 +5083,7 @@ func rewriteValueMIPS64_OpMod64(v *Value) bool { y := v_1 v.reset(OpSelect0) v0 := b.NewValue0(v.Pos, OpMIPS64DIVV, types.NewTuple(typ.Int64, typ.Int64)) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -5340,8 +5100,7 @@ func rewriteValueMIPS64_OpMod64u(v *Value) bool { y := v_1 v.reset(OpSelect0) v0 := b.NewValue0(v.Pos, OpMIPS64DIVVU, types.NewTuple(typ.UInt64, typ.UInt64)) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -5360,10 +5119,9 @@ func rewriteValueMIPS64_OpMod8(v *Value) bool { v0 := b.NewValue0(v.Pos, OpMIPS64DIVV, types.NewTuple(typ.Int64, typ.Int64)) v1 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64) v1.AddArg(x) - v0.AddArg(v1) v2 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64) v2.AddArg(y) - v0.AddArg(v2) + v0.AddArg2(v1, v2) v.AddArg(v0) return true } @@ -5382,10 +5140,9 @@ func rewriteValueMIPS64_OpMod8u(v *Value) bool { v0 := b.NewValue0(v.Pos, OpMIPS64DIVVU, types.NewTuple(typ.UInt64, typ.UInt64)) v1 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) v1.AddArg(x) - v0.AddArg(v1) v2 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) v2.AddArg(y) - v0.AddArg(v2) + v0.AddArg2(v1, v2) v.AddArg(v0) return true } @@ -5404,9 +5161,7 @@ func rewriteValueMIPS64_OpMove(v *Value) bool { break } mem := v_2 - v.reset(OpCopy) - v.Type = mem.Type - v.AddArg(mem) + v.copyOf(mem) return true } // match: (Move [1] dst src mem) @@ -5419,12 +5174,9 @@ func rewriteValueMIPS64_OpMove(v *Value) bool { src := v_1 mem := v_2 v.reset(OpMIPS64MOVBstore) - v.AddArg(dst) v0 := b.NewValue0(v.Pos, OpMIPS64MOVBload, typ.Int8) - v0.AddArg(src) - v0.AddArg(mem) - v.AddArg(v0) - v.AddArg(mem) + v0.AddArg2(src, mem) + v.AddArg3(dst, v0, mem) return true } // match: (Move [2] {t} dst src mem) @@ -5442,12 +5194,9 @@ func rewriteValueMIPS64_OpMove(v *Value) bool { break } v.reset(OpMIPS64MOVHstore) - v.AddArg(dst) v0 := b.NewValue0(v.Pos, OpMIPS64MOVHload, typ.Int16) - v0.AddArg(src) - v0.AddArg(mem) - v.AddArg(v0) - v.AddArg(mem) + v0.AddArg2(src, mem) + v.AddArg3(dst, v0, mem) return true } // match: (Move [2] dst src mem) @@ -5461,20 +5210,14 @@ func rewriteValueMIPS64_OpMove(v *Value) bool { mem := v_2 v.reset(OpMIPS64MOVBstore) v.AuxInt = 1 - v.AddArg(dst) v0 := b.NewValue0(v.Pos, OpMIPS64MOVBload, typ.Int8) v0.AuxInt = 1 - v0.AddArg(src) - v0.AddArg(mem) - v.AddArg(v0) + v0.AddArg2(src, mem) v1 := b.NewValue0(v.Pos, OpMIPS64MOVBstore, types.TypeMem) - v1.AddArg(dst) v2 := b.NewValue0(v.Pos, OpMIPS64MOVBload, typ.Int8) - v2.AddArg(src) - v2.AddArg(mem) - v1.AddArg(v2) - v1.AddArg(mem) - v.AddArg(v1) + v2.AddArg2(src, mem) + v1.AddArg3(dst, v2, mem) + v.AddArg3(dst, v0, v1) return true } // match: (Move [4] {t} dst src mem) @@ -5492,12 +5235,9 @@ func rewriteValueMIPS64_OpMove(v *Value) bool { break } v.reset(OpMIPS64MOVWstore) - v.AddArg(dst) v0 := b.NewValue0(v.Pos, OpMIPS64MOVWload, typ.Int32) - v0.AddArg(src) - v0.AddArg(mem) - v.AddArg(v0) - v.AddArg(mem) + v0.AddArg2(src, mem) + v.AddArg3(dst, v0, mem) return true } // match: (Move [4] {t} dst src mem) @@ -5516,20 +5256,14 @@ func rewriteValueMIPS64_OpMove(v *Value) bool { } v.reset(OpMIPS64MOVHstore) v.AuxInt = 2 - v.AddArg(dst) v0 := b.NewValue0(v.Pos, OpMIPS64MOVHload, typ.Int16) v0.AuxInt = 2 - v0.AddArg(src) - v0.AddArg(mem) - v.AddArg(v0) + v0.AddArg2(src, mem) v1 := b.NewValue0(v.Pos, OpMIPS64MOVHstore, types.TypeMem) - v1.AddArg(dst) v2 := b.NewValue0(v.Pos, OpMIPS64MOVHload, typ.Int16) - v2.AddArg(src) - v2.AddArg(mem) - v1.AddArg(v2) - v1.AddArg(mem) - v.AddArg(v1) + v2.AddArg2(src, mem) + v1.AddArg3(dst, v2, mem) + v.AddArg3(dst, v0, v1) return true } // match: (Move [4] dst src mem) @@ -5543,38 +5277,26 @@ func rewriteValueMIPS64_OpMove(v *Value) bool { mem := v_2 v.reset(OpMIPS64MOVBstore) v.AuxInt = 3 - v.AddArg(dst) v0 := b.NewValue0(v.Pos, OpMIPS64MOVBload, typ.Int8) v0.AuxInt = 3 - v0.AddArg(src) - v0.AddArg(mem) - v.AddArg(v0) + v0.AddArg2(src, mem) v1 := b.NewValue0(v.Pos, OpMIPS64MOVBstore, types.TypeMem) v1.AuxInt = 2 - v1.AddArg(dst) v2 := b.NewValue0(v.Pos, OpMIPS64MOVBload, typ.Int8) v2.AuxInt = 2 - v2.AddArg(src) - v2.AddArg(mem) - v1.AddArg(v2) + v2.AddArg2(src, mem) v3 := b.NewValue0(v.Pos, OpMIPS64MOVBstore, types.TypeMem) v3.AuxInt = 1 - v3.AddArg(dst) v4 := b.NewValue0(v.Pos, OpMIPS64MOVBload, typ.Int8) v4.AuxInt = 1 - v4.AddArg(src) - v4.AddArg(mem) - v3.AddArg(v4) + v4.AddArg2(src, mem) v5 := b.NewValue0(v.Pos, OpMIPS64MOVBstore, types.TypeMem) - v5.AddArg(dst) v6 := b.NewValue0(v.Pos, OpMIPS64MOVBload, typ.Int8) - v6.AddArg(src) - v6.AddArg(mem) - v5.AddArg(v6) - v5.AddArg(mem) - v3.AddArg(v5) - v1.AddArg(v3) - v.AddArg(v1) + v6.AddArg2(src, mem) + v5.AddArg3(dst, v6, mem) + v3.AddArg3(dst, v4, v5) + v1.AddArg3(dst, v2, v3) + v.AddArg3(dst, v0, v1) return true } // match: (Move [8] {t} dst src mem) @@ -5592,12 +5314,9 @@ func rewriteValueMIPS64_OpMove(v *Value) bool { break } v.reset(OpMIPS64MOVVstore) - v.AddArg(dst) v0 := b.NewValue0(v.Pos, OpMIPS64MOVVload, typ.UInt64) - v0.AddArg(src) - v0.AddArg(mem) - v.AddArg(v0) - v.AddArg(mem) + v0.AddArg2(src, mem) + v.AddArg3(dst, v0, mem) return true } // match: (Move [8] {t} dst src mem) @@ -5616,20 +5335,14 @@ func rewriteValueMIPS64_OpMove(v *Value) bool { } v.reset(OpMIPS64MOVWstore) v.AuxInt = 4 - v.AddArg(dst) v0 := b.NewValue0(v.Pos, OpMIPS64MOVWload, typ.Int32) v0.AuxInt = 4 - v0.AddArg(src) - v0.AddArg(mem) - v.AddArg(v0) + v0.AddArg2(src, mem) v1 := b.NewValue0(v.Pos, OpMIPS64MOVWstore, types.TypeMem) - v1.AddArg(dst) v2 := b.NewValue0(v.Pos, OpMIPS64MOVWload, typ.Int32) - v2.AddArg(src) - v2.AddArg(mem) - v1.AddArg(v2) - v1.AddArg(mem) - v.AddArg(v1) + v2.AddArg2(src, mem) + v1.AddArg3(dst, v2, mem) + v.AddArg3(dst, v0, v1) return true } // match: (Move [8] {t} dst src mem) @@ -5648,38 +5361,26 @@ func rewriteValueMIPS64_OpMove(v *Value) bool { } v.reset(OpMIPS64MOVHstore) v.AuxInt = 6 - v.AddArg(dst) v0 := b.NewValue0(v.Pos, OpMIPS64MOVHload, typ.Int16) v0.AuxInt = 6 - v0.AddArg(src) - v0.AddArg(mem) - v.AddArg(v0) + v0.AddArg2(src, mem) v1 := b.NewValue0(v.Pos, OpMIPS64MOVHstore, types.TypeMem) v1.AuxInt = 4 - v1.AddArg(dst) v2 := b.NewValue0(v.Pos, OpMIPS64MOVHload, typ.Int16) v2.AuxInt = 4 - v2.AddArg(src) - v2.AddArg(mem) - v1.AddArg(v2) + v2.AddArg2(src, mem) v3 := b.NewValue0(v.Pos, OpMIPS64MOVHstore, types.TypeMem) v3.AuxInt = 2 - v3.AddArg(dst) v4 := b.NewValue0(v.Pos, OpMIPS64MOVHload, typ.Int16) v4.AuxInt = 2 - v4.AddArg(src) - v4.AddArg(mem) - v3.AddArg(v4) + v4.AddArg2(src, mem) v5 := b.NewValue0(v.Pos, OpMIPS64MOVHstore, types.TypeMem) - v5.AddArg(dst) v6 := b.NewValue0(v.Pos, OpMIPS64MOVHload, typ.Int16) - v6.AddArg(src) - v6.AddArg(mem) - v5.AddArg(v6) - v5.AddArg(mem) - v3.AddArg(v5) - v1.AddArg(v3) - v.AddArg(v1) + v6.AddArg2(src, mem) + v5.AddArg3(dst, v6, mem) + v3.AddArg3(dst, v4, v5) + v1.AddArg3(dst, v2, v3) + v.AddArg3(dst, v0, v1) return true } // match: (Move [3] dst src mem) @@ -5693,29 +5394,20 @@ func rewriteValueMIPS64_OpMove(v *Value) bool { mem := v_2 v.reset(OpMIPS64MOVBstore) v.AuxInt = 2 - v.AddArg(dst) v0 := b.NewValue0(v.Pos, OpMIPS64MOVBload, typ.Int8) v0.AuxInt = 2 - v0.AddArg(src) - v0.AddArg(mem) - v.AddArg(v0) + v0.AddArg2(src, mem) v1 := b.NewValue0(v.Pos, OpMIPS64MOVBstore, types.TypeMem) v1.AuxInt = 1 - v1.AddArg(dst) v2 := b.NewValue0(v.Pos, OpMIPS64MOVBload, typ.Int8) v2.AuxInt = 1 - v2.AddArg(src) - v2.AddArg(mem) - v1.AddArg(v2) + v2.AddArg2(src, mem) v3 := b.NewValue0(v.Pos, OpMIPS64MOVBstore, types.TypeMem) - v3.AddArg(dst) v4 := b.NewValue0(v.Pos, OpMIPS64MOVBload, typ.Int8) - v4.AddArg(src) - v4.AddArg(mem) - v3.AddArg(v4) - v3.AddArg(mem) - v1.AddArg(v3) - v.AddArg(v1) + v4.AddArg2(src, mem) + v3.AddArg3(dst, v4, mem) + v1.AddArg3(dst, v2, v3) + v.AddArg3(dst, v0, v1) return true } // match: (Move [6] {t} dst src mem) @@ -5734,29 +5426,20 @@ func rewriteValueMIPS64_OpMove(v *Value) bool { } v.reset(OpMIPS64MOVHstore) v.AuxInt = 4 - v.AddArg(dst) v0 := b.NewValue0(v.Pos, OpMIPS64MOVHload, typ.Int16) v0.AuxInt = 4 - v0.AddArg(src) - v0.AddArg(mem) - v.AddArg(v0) + v0.AddArg2(src, mem) v1 := b.NewValue0(v.Pos, OpMIPS64MOVHstore, types.TypeMem) v1.AuxInt = 2 - v1.AddArg(dst) v2 := b.NewValue0(v.Pos, OpMIPS64MOVHload, typ.Int16) v2.AuxInt = 2 - v2.AddArg(src) - v2.AddArg(mem) - v1.AddArg(v2) + v2.AddArg2(src, mem) v3 := b.NewValue0(v.Pos, OpMIPS64MOVHstore, types.TypeMem) - v3.AddArg(dst) v4 := b.NewValue0(v.Pos, OpMIPS64MOVHload, typ.Int16) - v4.AddArg(src) - v4.AddArg(mem) - v3.AddArg(v4) - v3.AddArg(mem) - v1.AddArg(v3) - v.AddArg(v1) + v4.AddArg2(src, mem) + v3.AddArg3(dst, v4, mem) + v1.AddArg3(dst, v2, v3) + v.AddArg3(dst, v0, v1) return true } // match: (Move [12] {t} dst src mem) @@ -5775,29 +5458,20 @@ func rewriteValueMIPS64_OpMove(v *Value) bool { } v.reset(OpMIPS64MOVWstore) v.AuxInt = 8 - v.AddArg(dst) v0 := b.NewValue0(v.Pos, OpMIPS64MOVWload, typ.Int32) v0.AuxInt = 8 - v0.AddArg(src) - v0.AddArg(mem) - v.AddArg(v0) + v0.AddArg2(src, mem) v1 := b.NewValue0(v.Pos, OpMIPS64MOVWstore, types.TypeMem) v1.AuxInt = 4 - v1.AddArg(dst) v2 := b.NewValue0(v.Pos, OpMIPS64MOVWload, typ.Int32) v2.AuxInt = 4 - v2.AddArg(src) - v2.AddArg(mem) - v1.AddArg(v2) + v2.AddArg2(src, mem) v3 := b.NewValue0(v.Pos, OpMIPS64MOVWstore, types.TypeMem) - v3.AddArg(dst) v4 := b.NewValue0(v.Pos, OpMIPS64MOVWload, typ.Int32) - v4.AddArg(src) - v4.AddArg(mem) - v3.AddArg(v4) - v3.AddArg(mem) - v1.AddArg(v3) - v.AddArg(v1) + v4.AddArg2(src, mem) + v3.AddArg3(dst, v4, mem) + v1.AddArg3(dst, v2, v3) + v.AddArg3(dst, v0, v1) return true } // match: (Move [16] {t} dst src mem) @@ -5816,20 +5490,14 @@ func rewriteValueMIPS64_OpMove(v *Value) bool { } v.reset(OpMIPS64MOVVstore) v.AuxInt = 8 - v.AddArg(dst) v0 := b.NewValue0(v.Pos, OpMIPS64MOVVload, typ.UInt64) v0.AuxInt = 8 - v0.AddArg(src) - v0.AddArg(mem) - v.AddArg(v0) + v0.AddArg2(src, mem) v1 := b.NewValue0(v.Pos, OpMIPS64MOVVstore, types.TypeMem) - v1.AddArg(dst) v2 := b.NewValue0(v.Pos, OpMIPS64MOVVload, typ.UInt64) - v2.AddArg(src) - v2.AddArg(mem) - v1.AddArg(v2) - v1.AddArg(mem) - v.AddArg(v1) + v2.AddArg2(src, mem) + v1.AddArg3(dst, v2, mem) + v.AddArg3(dst, v0, v1) return true } // match: (Move [24] {t} dst src mem) @@ -5848,29 +5516,20 @@ func rewriteValueMIPS64_OpMove(v *Value) bool { } v.reset(OpMIPS64MOVVstore) v.AuxInt = 16 - v.AddArg(dst) v0 := b.NewValue0(v.Pos, OpMIPS64MOVVload, typ.UInt64) v0.AuxInt = 16 - v0.AddArg(src) - v0.AddArg(mem) - v.AddArg(v0) + v0.AddArg2(src, mem) v1 := b.NewValue0(v.Pos, OpMIPS64MOVVstore, types.TypeMem) v1.AuxInt = 8 - v1.AddArg(dst) v2 := b.NewValue0(v.Pos, OpMIPS64MOVVload, typ.UInt64) v2.AuxInt = 8 - v2.AddArg(src) - v2.AddArg(mem) - v1.AddArg(v2) + v2.AddArg2(src, mem) v3 := b.NewValue0(v.Pos, OpMIPS64MOVVstore, types.TypeMem) - v3.AddArg(dst) v4 := b.NewValue0(v.Pos, OpMIPS64MOVVload, typ.UInt64) - v4.AddArg(src) - v4.AddArg(mem) - v3.AddArg(v4) - v3.AddArg(mem) - v1.AddArg(v3) - v.AddArg(v1) + v4.AddArg2(src, mem) + v3.AddArg3(dst, v4, mem) + v1.AddArg3(dst, v2, v3) + v.AddArg3(dst, v0, v1) return true } // match: (Move [s] {t} dst src mem) @@ -5887,9 +5546,7 @@ func rewriteValueMIPS64_OpMove(v *Value) bool { } v.reset(OpMIPS64DUFFCOPY) v.AuxInt = 16 * (128 - s/8) - v.AddArg(dst) - v.AddArg(src) - v.AddArg(mem) + v.AddArg3(dst, src, mem) return true } // match: (Move [s] {t} dst src mem) @@ -5906,13 +5563,10 @@ func rewriteValueMIPS64_OpMove(v *Value) bool { } v.reset(OpMIPS64LoweredMove) v.AuxInt = t.(*types.Type).Alignment() - v.AddArg(dst) - v.AddArg(src) v0 := b.NewValue0(v.Pos, OpMIPS64ADDVconst, src.Type) v0.AuxInt = s - moveSize(t.(*types.Type).Alignment(), config) v0.AddArg(src) - v.AddArg(v0) - v.AddArg(mem) + v.AddArg4(dst, src, v0, mem) return true } return false @@ -5929,8 +5583,7 @@ func rewriteValueMIPS64_OpMul16(v *Value) bool { y := v_1 v.reset(OpSelect1) v0 := b.NewValue0(v.Pos, OpMIPS64MULVU, types.NewTuple(typ.UInt64, typ.UInt64)) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -5947,8 +5600,7 @@ func rewriteValueMIPS64_OpMul32(v *Value) bool { y := v_1 v.reset(OpSelect1) v0 := b.NewValue0(v.Pos, OpMIPS64MULVU, types.NewTuple(typ.UInt64, typ.UInt64)) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -5965,8 +5617,7 @@ func rewriteValueMIPS64_OpMul64(v *Value) bool { y := v_1 v.reset(OpSelect1) v0 := b.NewValue0(v.Pos, OpMIPS64MULVU, types.NewTuple(typ.UInt64, typ.UInt64)) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -5983,8 +5634,7 @@ func rewriteValueMIPS64_OpMul8(v *Value) bool { y := v_1 v.reset(OpSelect1) v0 := b.NewValue0(v.Pos, OpMIPS64MULVU, types.NewTuple(typ.UInt64, typ.UInt64)) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -6003,14 +5653,12 @@ func rewriteValueMIPS64_OpNeq16(v *Value) bool { v0 := b.NewValue0(v.Pos, OpMIPS64XOR, typ.UInt64) v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) v1.AddArg(x) - v0.AddArg(v1) v2 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) v2.AddArg(y) - v0.AddArg(v2) - v.AddArg(v0) + v0.AddArg2(v1, v2) v3 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) v3.AuxInt = 0 - v.AddArg(v3) + v.AddArg2(v0, v3) return true } } @@ -6028,14 +5676,12 @@ func rewriteValueMIPS64_OpNeq32(v *Value) bool { v0 := b.NewValue0(v.Pos, OpMIPS64XOR, typ.UInt64) v1 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) v1.AddArg(x) - v0.AddArg(v1) v2 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) v2.AddArg(y) - v0.AddArg(v2) - v.AddArg(v0) + v0.AddArg2(v1, v2) v3 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) v3.AuxInt = 0 - v.AddArg(v3) + v.AddArg2(v0, v3) return true } } @@ -6050,8 +5696,7 @@ func rewriteValueMIPS64_OpNeq32F(v *Value) bool { y := v_1 v.reset(OpMIPS64FPFlagFalse) v0 := b.NewValue0(v.Pos, OpMIPS64CMPEQF, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -6068,12 +5713,10 @@ func rewriteValueMIPS64_OpNeq64(v *Value) bool { y := v_1 v.reset(OpMIPS64SGTU) v0 := b.NewValue0(v.Pos, OpMIPS64XOR, typ.UInt64) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) + v0.AddArg2(x, y) v1 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) v1.AuxInt = 0 - v.AddArg(v1) + v.AddArg2(v0, v1) return true } } @@ -6088,8 +5731,7 @@ func rewriteValueMIPS64_OpNeq64F(v *Value) bool { y := v_1 v.reset(OpMIPS64FPFlagFalse) v0 := b.NewValue0(v.Pos, OpMIPS64CMPEQD, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -6108,14 +5750,12 @@ func rewriteValueMIPS64_OpNeq8(v *Value) bool { v0 := b.NewValue0(v.Pos, OpMIPS64XOR, typ.UInt64) v1 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) v1.AddArg(x) - v0.AddArg(v1) v2 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) v2.AddArg(y) - v0.AddArg(v2) - v.AddArg(v0) + v0.AddArg2(v1, v2) v3 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) v3.AuxInt = 0 - v.AddArg(v3) + v.AddArg2(v0, v3) return true } } @@ -6131,12 +5771,10 @@ func rewriteValueMIPS64_OpNeqPtr(v *Value) bool { y := v_1 v.reset(OpMIPS64SGTU) v0 := b.NewValue0(v.Pos, OpMIPS64XOR, typ.UInt64) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) + v0.AddArg2(x, y) v1 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) v1.AuxInt = 0 - v.AddArg(v1) + v.AddArg2(v0, v1) return true } } @@ -6195,9 +5833,7 @@ func rewriteValueMIPS64_OpPanicBounds(v *Value) bool { } v.reset(OpMIPS64LoweredPanicBoundsA) v.AuxInt = kind - v.AddArg(x) - v.AddArg(y) - v.AddArg(mem) + v.AddArg3(x, y, mem) return true } // match: (PanicBounds [kind] x y mem) @@ -6213,9 +5849,7 @@ func rewriteValueMIPS64_OpPanicBounds(v *Value) bool { } v.reset(OpMIPS64LoweredPanicBoundsB) v.AuxInt = kind - v.AddArg(x) - v.AddArg(y) - v.AddArg(mem) + v.AddArg3(x, y, mem) return true } // match: (PanicBounds [kind] x y mem) @@ -6231,9 +5865,7 @@ func rewriteValueMIPS64_OpPanicBounds(v *Value) bool { } v.reset(OpMIPS64LoweredPanicBoundsC) v.AuxInt = kind - v.AddArg(x) - v.AddArg(y) - v.AddArg(mem) + v.AddArg3(x, y, mem) return true } return false @@ -6254,17 +5886,14 @@ func rewriteValueMIPS64_OpRotateLeft16(v *Value) bool { c := v_1.AuxInt v.reset(OpOr16) v0 := b.NewValue0(v.Pos, OpLsh16x64, t) - v0.AddArg(x) v1 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) v1.AuxInt = c & 15 - v0.AddArg(v1) - v.AddArg(v0) + v0.AddArg2(x, v1) v2 := b.NewValue0(v.Pos, OpRsh16Ux64, t) - v2.AddArg(x) v3 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) v3.AuxInt = -c & 15 - v2.AddArg(v3) - v.AddArg(v2) + v2.AddArg2(x, v3) + v.AddArg2(v0, v2) return true } return false @@ -6285,17 +5914,14 @@ func rewriteValueMIPS64_OpRotateLeft32(v *Value) bool { c := v_1.AuxInt v.reset(OpOr32) v0 := b.NewValue0(v.Pos, OpLsh32x64, t) - v0.AddArg(x) v1 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) v1.AuxInt = c & 31 - v0.AddArg(v1) - v.AddArg(v0) + v0.AddArg2(x, v1) v2 := b.NewValue0(v.Pos, OpRsh32Ux64, t) - v2.AddArg(x) v3 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) v3.AuxInt = -c & 31 - v2.AddArg(v3) - v.AddArg(v2) + v2.AddArg2(x, v3) + v.AddArg2(v0, v2) return true } return false @@ -6316,17 +5942,14 @@ func rewriteValueMIPS64_OpRotateLeft64(v *Value) bool { c := v_1.AuxInt v.reset(OpOr64) v0 := b.NewValue0(v.Pos, OpLsh64x64, t) - v0.AddArg(x) v1 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) v1.AuxInt = c & 63 - v0.AddArg(v1) - v.AddArg(v0) + v0.AddArg2(x, v1) v2 := b.NewValue0(v.Pos, OpRsh64Ux64, t) - v2.AddArg(x) v3 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) v3.AuxInt = -c & 63 - v2.AddArg(v3) - v.AddArg(v2) + v2.AddArg2(x, v3) + v.AddArg2(v0, v2) return true } return false @@ -6347,17 +5970,14 @@ func rewriteValueMIPS64_OpRotateLeft8(v *Value) bool { c := v_1.AuxInt v.reset(OpOr8) v0 := b.NewValue0(v.Pos, OpLsh8x64, t) - v0.AddArg(x) v1 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) v1.AuxInt = c & 7 - v0.AddArg(v1) - v.AddArg(v0) + v0.AddArg2(x, v1) v2 := b.NewValue0(v.Pos, OpRsh8Ux64, t) - v2.AddArg(x) v3 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) v3.AuxInt = -c & 7 - v2.AddArg(v3) - v.AddArg(v2) + v2.AddArg2(x, v3) + v.AddArg2(v0, v2) return true } return false @@ -6378,20 +5998,17 @@ func rewriteValueMIPS64_OpRsh16Ux16(v *Value) bool { v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool) v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) v2.AuxInt = 64 - v1.AddArg(v2) v3 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) v3.AddArg(y) - v1.AddArg(v3) + v1.AddArg2(v2, v3) v0.AddArg(v1) - v.AddArg(v0) v4 := b.NewValue0(v.Pos, OpMIPS64SRLV, t) v5 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) v5.AddArg(x) - v4.AddArg(v5) v6 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) v6.AddArg(y) - v4.AddArg(v6) - v.AddArg(v4) + v4.AddArg2(v5, v6) + v.AddArg2(v0, v4) return true } } @@ -6411,20 +6028,17 @@ func rewriteValueMIPS64_OpRsh16Ux32(v *Value) bool { v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool) v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) v2.AuxInt = 64 - v1.AddArg(v2) v3 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) v3.AddArg(y) - v1.AddArg(v3) + v1.AddArg2(v2, v3) v0.AddArg(v1) - v.AddArg(v0) v4 := b.NewValue0(v.Pos, OpMIPS64SRLV, t) v5 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) v5.AddArg(x) - v4.AddArg(v5) v6 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) v6.AddArg(y) - v4.AddArg(v6) - v.AddArg(v4) + v4.AddArg2(v5, v6) + v.AddArg2(v0, v4) return true } } @@ -6444,16 +6058,13 @@ func rewriteValueMIPS64_OpRsh16Ux64(v *Value) bool { v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool) v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) v2.AuxInt = 64 - v1.AddArg(v2) - v1.AddArg(y) + v1.AddArg2(v2, y) v0.AddArg(v1) - v.AddArg(v0) v3 := b.NewValue0(v.Pos, OpMIPS64SRLV, t) v4 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) v4.AddArg(x) - v3.AddArg(v4) - v3.AddArg(y) - v.AddArg(v3) + v3.AddArg2(v4, y) + v.AddArg2(v0, v3) return true } } @@ -6473,20 +6084,17 @@ func rewriteValueMIPS64_OpRsh16Ux8(v *Value) bool { v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool) v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) v2.AuxInt = 64 - v1.AddArg(v2) v3 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) v3.AddArg(y) - v1.AddArg(v3) + v1.AddArg2(v2, v3) v0.AddArg(v1) - v.AddArg(v0) v4 := b.NewValue0(v.Pos, OpMIPS64SRLV, t) v5 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) v5.AddArg(x) - v4.AddArg(v5) v6 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) v6.AddArg(y) - v4.AddArg(v6) - v.AddArg(v4) + v4.AddArg2(v5, v6) + v.AddArg2(v0, v4) return true } } @@ -6504,22 +6112,19 @@ func rewriteValueMIPS64_OpRsh16x16(v *Value) bool { v.reset(OpMIPS64SRAV) v0 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpMIPS64OR, t) v2 := b.NewValue0(v.Pos, OpMIPS64NEGV, t) v3 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool) v4 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) v4.AddArg(y) - v3.AddArg(v4) v5 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) v5.AuxInt = 63 - v3.AddArg(v5) + v3.AddArg2(v4, v5) v2.AddArg(v3) - v1.AddArg(v2) v6 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) v6.AddArg(y) - v1.AddArg(v6) - v.AddArg(v1) + v1.AddArg2(v2, v6) + v.AddArg2(v0, v1) return true } } @@ -6537,22 +6142,19 @@ func rewriteValueMIPS64_OpRsh16x32(v *Value) bool { v.reset(OpMIPS64SRAV) v0 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpMIPS64OR, t) v2 := b.NewValue0(v.Pos, OpMIPS64NEGV, t) v3 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool) v4 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) v4.AddArg(y) - v3.AddArg(v4) v5 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) v5.AuxInt = 63 - v3.AddArg(v5) + v3.AddArg2(v4, v5) v2.AddArg(v3) - v1.AddArg(v2) v6 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) v6.AddArg(y) - v1.AddArg(v6) - v.AddArg(v1) + v1.AddArg2(v2, v6) + v.AddArg2(v0, v1) return true } } @@ -6570,18 +6172,15 @@ func rewriteValueMIPS64_OpRsh16x64(v *Value) bool { v.reset(OpMIPS64SRAV) v0 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpMIPS64OR, t) v2 := b.NewValue0(v.Pos, OpMIPS64NEGV, t) v3 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool) - v3.AddArg(y) v4 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) v4.AuxInt = 63 - v3.AddArg(v4) + v3.AddArg2(y, v4) v2.AddArg(v3) - v1.AddArg(v2) - v1.AddArg(y) - v.AddArg(v1) + v1.AddArg2(v2, y) + v.AddArg2(v0, v1) return true } } @@ -6599,22 +6198,19 @@ func rewriteValueMIPS64_OpRsh16x8(v *Value) bool { v.reset(OpMIPS64SRAV) v0 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpMIPS64OR, t) v2 := b.NewValue0(v.Pos, OpMIPS64NEGV, t) v3 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool) v4 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) v4.AddArg(y) - v3.AddArg(v4) v5 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) v5.AuxInt = 63 - v3.AddArg(v5) + v3.AddArg2(v4, v5) v2.AddArg(v3) - v1.AddArg(v2) v6 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) v6.AddArg(y) - v1.AddArg(v6) - v.AddArg(v1) + v1.AddArg2(v2, v6) + v.AddArg2(v0, v1) return true } } @@ -6634,20 +6230,17 @@ func rewriteValueMIPS64_OpRsh32Ux16(v *Value) bool { v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool) v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) v2.AuxInt = 64 - v1.AddArg(v2) v3 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) v3.AddArg(y) - v1.AddArg(v3) + v1.AddArg2(v2, v3) v0.AddArg(v1) - v.AddArg(v0) v4 := b.NewValue0(v.Pos, OpMIPS64SRLV, t) v5 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) v5.AddArg(x) - v4.AddArg(v5) v6 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) v6.AddArg(y) - v4.AddArg(v6) - v.AddArg(v4) + v4.AddArg2(v5, v6) + v.AddArg2(v0, v4) return true } } @@ -6667,20 +6260,17 @@ func rewriteValueMIPS64_OpRsh32Ux32(v *Value) bool { v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool) v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) v2.AuxInt = 64 - v1.AddArg(v2) v3 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) v3.AddArg(y) - v1.AddArg(v3) + v1.AddArg2(v2, v3) v0.AddArg(v1) - v.AddArg(v0) v4 := b.NewValue0(v.Pos, OpMIPS64SRLV, t) v5 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) v5.AddArg(x) - v4.AddArg(v5) v6 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) v6.AddArg(y) - v4.AddArg(v6) - v.AddArg(v4) + v4.AddArg2(v5, v6) + v.AddArg2(v0, v4) return true } } @@ -6700,16 +6290,13 @@ func rewriteValueMIPS64_OpRsh32Ux64(v *Value) bool { v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool) v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) v2.AuxInt = 64 - v1.AddArg(v2) - v1.AddArg(y) + v1.AddArg2(v2, y) v0.AddArg(v1) - v.AddArg(v0) v3 := b.NewValue0(v.Pos, OpMIPS64SRLV, t) v4 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) v4.AddArg(x) - v3.AddArg(v4) - v3.AddArg(y) - v.AddArg(v3) + v3.AddArg2(v4, y) + v.AddArg2(v0, v3) return true } } @@ -6729,20 +6316,17 @@ func rewriteValueMIPS64_OpRsh32Ux8(v *Value) bool { v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool) v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) v2.AuxInt = 64 - v1.AddArg(v2) v3 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) v3.AddArg(y) - v1.AddArg(v3) + v1.AddArg2(v2, v3) v0.AddArg(v1) - v.AddArg(v0) v4 := b.NewValue0(v.Pos, OpMIPS64SRLV, t) v5 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) v5.AddArg(x) - v4.AddArg(v5) v6 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) v6.AddArg(y) - v4.AddArg(v6) - v.AddArg(v4) + v4.AddArg2(v5, v6) + v.AddArg2(v0, v4) return true } } @@ -6760,22 +6344,19 @@ func rewriteValueMIPS64_OpRsh32x16(v *Value) bool { v.reset(OpMIPS64SRAV) v0 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpMIPS64OR, t) v2 := b.NewValue0(v.Pos, OpMIPS64NEGV, t) v3 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool) v4 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) v4.AddArg(y) - v3.AddArg(v4) v5 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) v5.AuxInt = 63 - v3.AddArg(v5) + v3.AddArg2(v4, v5) v2.AddArg(v3) - v1.AddArg(v2) v6 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) v6.AddArg(y) - v1.AddArg(v6) - v.AddArg(v1) + v1.AddArg2(v2, v6) + v.AddArg2(v0, v1) return true } } @@ -6793,22 +6374,19 @@ func rewriteValueMIPS64_OpRsh32x32(v *Value) bool { v.reset(OpMIPS64SRAV) v0 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpMIPS64OR, t) v2 := b.NewValue0(v.Pos, OpMIPS64NEGV, t) v3 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool) v4 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) v4.AddArg(y) - v3.AddArg(v4) v5 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) v5.AuxInt = 63 - v3.AddArg(v5) + v3.AddArg2(v4, v5) v2.AddArg(v3) - v1.AddArg(v2) v6 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) v6.AddArg(y) - v1.AddArg(v6) - v.AddArg(v1) + v1.AddArg2(v2, v6) + v.AddArg2(v0, v1) return true } } @@ -6826,18 +6404,15 @@ func rewriteValueMIPS64_OpRsh32x64(v *Value) bool { v.reset(OpMIPS64SRAV) v0 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpMIPS64OR, t) v2 := b.NewValue0(v.Pos, OpMIPS64NEGV, t) v3 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool) - v3.AddArg(y) v4 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) v4.AuxInt = 63 - v3.AddArg(v4) + v3.AddArg2(y, v4) v2.AddArg(v3) - v1.AddArg(v2) - v1.AddArg(y) - v.AddArg(v1) + v1.AddArg2(v2, y) + v.AddArg2(v0, v1) return true } } @@ -6855,22 +6430,19 @@ func rewriteValueMIPS64_OpRsh32x8(v *Value) bool { v.reset(OpMIPS64SRAV) v0 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpMIPS64OR, t) v2 := b.NewValue0(v.Pos, OpMIPS64NEGV, t) v3 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool) v4 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) v4.AddArg(y) - v3.AddArg(v4) v5 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) v5.AuxInt = 63 - v3.AddArg(v5) + v3.AddArg2(v4, v5) v2.AddArg(v3) - v1.AddArg(v2) v6 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) v6.AddArg(y) - v1.AddArg(v6) - v.AddArg(v1) + v1.AddArg2(v2, v6) + v.AddArg2(v0, v1) return true } } @@ -6890,18 +6462,15 @@ func rewriteValueMIPS64_OpRsh64Ux16(v *Value) bool { v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool) v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) v2.AuxInt = 64 - v1.AddArg(v2) v3 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) v3.AddArg(y) - v1.AddArg(v3) + v1.AddArg2(v2, v3) v0.AddArg(v1) - v.AddArg(v0) v4 := b.NewValue0(v.Pos, OpMIPS64SRLV, t) - v4.AddArg(x) v5 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) v5.AddArg(y) - v4.AddArg(v5) - v.AddArg(v4) + v4.AddArg2(x, v5) + v.AddArg2(v0, v4) return true } } @@ -6921,18 +6490,15 @@ func rewriteValueMIPS64_OpRsh64Ux32(v *Value) bool { v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool) v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) v2.AuxInt = 64 - v1.AddArg(v2) v3 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) v3.AddArg(y) - v1.AddArg(v3) + v1.AddArg2(v2, v3) v0.AddArg(v1) - v.AddArg(v0) v4 := b.NewValue0(v.Pos, OpMIPS64SRLV, t) - v4.AddArg(x) v5 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) v5.AddArg(y) - v4.AddArg(v5) - v.AddArg(v4) + v4.AddArg2(x, v5) + v.AddArg2(v0, v4) return true } } @@ -6952,14 +6518,11 @@ func rewriteValueMIPS64_OpRsh64Ux64(v *Value) bool { v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool) v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) v2.AuxInt = 64 - v1.AddArg(v2) - v1.AddArg(y) + v1.AddArg2(v2, y) v0.AddArg(v1) - v.AddArg(v0) v3 := b.NewValue0(v.Pos, OpMIPS64SRLV, t) - v3.AddArg(x) - v3.AddArg(y) - v.AddArg(v3) + v3.AddArg2(x, y) + v.AddArg2(v0, v3) return true } } @@ -6979,18 +6542,15 @@ func rewriteValueMIPS64_OpRsh64Ux8(v *Value) bool { v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool) v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) v2.AuxInt = 64 - v1.AddArg(v2) v3 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) v3.AddArg(y) - v1.AddArg(v3) + v1.AddArg2(v2, v3) v0.AddArg(v1) - v.AddArg(v0) v4 := b.NewValue0(v.Pos, OpMIPS64SRLV, t) - v4.AddArg(x) v5 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) v5.AddArg(y) - v4.AddArg(v5) - v.AddArg(v4) + v4.AddArg2(x, v5) + v.AddArg2(v0, v4) return true } } @@ -7006,22 +6566,19 @@ func rewriteValueMIPS64_OpRsh64x16(v *Value) bool { x := v_0 y := v_1 v.reset(OpMIPS64SRAV) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpMIPS64OR, t) v1 := b.NewValue0(v.Pos, OpMIPS64NEGV, t) v2 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool) v3 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) v3.AddArg(y) - v2.AddArg(v3) v4 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) v4.AuxInt = 63 - v2.AddArg(v4) + v2.AddArg2(v3, v4) v1.AddArg(v2) - v0.AddArg(v1) v5 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) v5.AddArg(y) - v0.AddArg(v5) - v.AddArg(v0) + v0.AddArg2(v1, v5) + v.AddArg2(x, v0) return true } } @@ -7037,22 +6594,19 @@ func rewriteValueMIPS64_OpRsh64x32(v *Value) bool { x := v_0 y := v_1 v.reset(OpMIPS64SRAV) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpMIPS64OR, t) v1 := b.NewValue0(v.Pos, OpMIPS64NEGV, t) v2 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool) v3 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) v3.AddArg(y) - v2.AddArg(v3) v4 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) v4.AuxInt = 63 - v2.AddArg(v4) + v2.AddArg2(v3, v4) v1.AddArg(v2) - v0.AddArg(v1) v5 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) v5.AddArg(y) - v0.AddArg(v5) - v.AddArg(v0) + v0.AddArg2(v1, v5) + v.AddArg2(x, v0) return true } } @@ -7068,18 +6622,15 @@ func rewriteValueMIPS64_OpRsh64x64(v *Value) bool { x := v_0 y := v_1 v.reset(OpMIPS64SRAV) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpMIPS64OR, t) v1 := b.NewValue0(v.Pos, OpMIPS64NEGV, t) v2 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool) - v2.AddArg(y) v3 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) v3.AuxInt = 63 - v2.AddArg(v3) + v2.AddArg2(y, v3) v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - v.AddArg(v0) + v0.AddArg2(v1, y) + v.AddArg2(x, v0) return true } } @@ -7095,22 +6646,19 @@ func rewriteValueMIPS64_OpRsh64x8(v *Value) bool { x := v_0 y := v_1 v.reset(OpMIPS64SRAV) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpMIPS64OR, t) v1 := b.NewValue0(v.Pos, OpMIPS64NEGV, t) v2 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool) v3 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) v3.AddArg(y) - v2.AddArg(v3) v4 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) v4.AuxInt = 63 - v2.AddArg(v4) + v2.AddArg2(v3, v4) v1.AddArg(v2) - v0.AddArg(v1) v5 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) v5.AddArg(y) - v0.AddArg(v5) - v.AddArg(v0) + v0.AddArg2(v1, v5) + v.AddArg2(x, v0) return true } } @@ -7130,20 +6678,17 @@ func rewriteValueMIPS64_OpRsh8Ux16(v *Value) bool { v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool) v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) v2.AuxInt = 64 - v1.AddArg(v2) v3 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) v3.AddArg(y) - v1.AddArg(v3) + v1.AddArg2(v2, v3) v0.AddArg(v1) - v.AddArg(v0) v4 := b.NewValue0(v.Pos, OpMIPS64SRLV, t) v5 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) v5.AddArg(x) - v4.AddArg(v5) v6 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) v6.AddArg(y) - v4.AddArg(v6) - v.AddArg(v4) + v4.AddArg2(v5, v6) + v.AddArg2(v0, v4) return true } } @@ -7163,20 +6708,17 @@ func rewriteValueMIPS64_OpRsh8Ux32(v *Value) bool { v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool) v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) v2.AuxInt = 64 - v1.AddArg(v2) v3 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) v3.AddArg(y) - v1.AddArg(v3) + v1.AddArg2(v2, v3) v0.AddArg(v1) - v.AddArg(v0) v4 := b.NewValue0(v.Pos, OpMIPS64SRLV, t) v5 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) v5.AddArg(x) - v4.AddArg(v5) v6 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) v6.AddArg(y) - v4.AddArg(v6) - v.AddArg(v4) + v4.AddArg2(v5, v6) + v.AddArg2(v0, v4) return true } } @@ -7196,16 +6738,13 @@ func rewriteValueMIPS64_OpRsh8Ux64(v *Value) bool { v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool) v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) v2.AuxInt = 64 - v1.AddArg(v2) - v1.AddArg(y) + v1.AddArg2(v2, y) v0.AddArg(v1) - v.AddArg(v0) v3 := b.NewValue0(v.Pos, OpMIPS64SRLV, t) v4 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) v4.AddArg(x) - v3.AddArg(v4) - v3.AddArg(y) - v.AddArg(v3) + v3.AddArg2(v4, y) + v.AddArg2(v0, v3) return true } } @@ -7225,20 +6764,17 @@ func rewriteValueMIPS64_OpRsh8Ux8(v *Value) bool { v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool) v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) v2.AuxInt = 64 - v1.AddArg(v2) v3 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) v3.AddArg(y) - v1.AddArg(v3) + v1.AddArg2(v2, v3) v0.AddArg(v1) - v.AddArg(v0) v4 := b.NewValue0(v.Pos, OpMIPS64SRLV, t) v5 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) v5.AddArg(x) - v4.AddArg(v5) v6 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) v6.AddArg(y) - v4.AddArg(v6) - v.AddArg(v4) + v4.AddArg2(v5, v6) + v.AddArg2(v0, v4) return true } } @@ -7256,22 +6792,19 @@ func rewriteValueMIPS64_OpRsh8x16(v *Value) bool { v.reset(OpMIPS64SRAV) v0 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpMIPS64OR, t) v2 := b.NewValue0(v.Pos, OpMIPS64NEGV, t) v3 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool) v4 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) v4.AddArg(y) - v3.AddArg(v4) v5 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) v5.AuxInt = 63 - v3.AddArg(v5) + v3.AddArg2(v4, v5) v2.AddArg(v3) - v1.AddArg(v2) v6 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) v6.AddArg(y) - v1.AddArg(v6) - v.AddArg(v1) + v1.AddArg2(v2, v6) + v.AddArg2(v0, v1) return true } } @@ -7289,22 +6822,19 @@ func rewriteValueMIPS64_OpRsh8x32(v *Value) bool { v.reset(OpMIPS64SRAV) v0 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpMIPS64OR, t) v2 := b.NewValue0(v.Pos, OpMIPS64NEGV, t) v3 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool) v4 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) v4.AddArg(y) - v3.AddArg(v4) v5 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) v5.AuxInt = 63 - v3.AddArg(v5) + v3.AddArg2(v4, v5) v2.AddArg(v3) - v1.AddArg(v2) v6 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) v6.AddArg(y) - v1.AddArg(v6) - v.AddArg(v1) + v1.AddArg2(v2, v6) + v.AddArg2(v0, v1) return true } } @@ -7322,18 +6852,15 @@ func rewriteValueMIPS64_OpRsh8x64(v *Value) bool { v.reset(OpMIPS64SRAV) v0 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpMIPS64OR, t) v2 := b.NewValue0(v.Pos, OpMIPS64NEGV, t) v3 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool) - v3.AddArg(y) v4 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) v4.AuxInt = 63 - v3.AddArg(v4) + v3.AddArg2(y, v4) v2.AddArg(v3) - v1.AddArg(v2) - v1.AddArg(y) - v.AddArg(v1) + v1.AddArg2(v2, y) + v.AddArg2(v0, v1) return true } } @@ -7351,27 +6878,41 @@ func rewriteValueMIPS64_OpRsh8x8(v *Value) bool { v.reset(OpMIPS64SRAV) v0 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpMIPS64OR, t) v2 := b.NewValue0(v.Pos, OpMIPS64NEGV, t) v3 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool) v4 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) v4.AddArg(y) - v3.AddArg(v4) v5 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) v5.AuxInt = 63 - v3.AddArg(v5) + v3.AddArg2(v4, v5) v2.AddArg(v3) - v1.AddArg(v2) v6 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) v6.AddArg(y) - v1.AddArg(v6) - v.AddArg(v1) + v1.AddArg2(v2, v6) + v.AddArg2(v0, v1) return true } } func rewriteValueMIPS64_OpSelect0(v *Value) bool { v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Select0 (Mul64uover x y)) + // result: (Select1 (MULVU x y)) + for { + if v_0.Op != OpMul64uover { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + v.reset(OpSelect1) + v.Type = typ.UInt64 + v0 := b.NewValue0(v.Pos, OpMIPS64MULVU, types.NewTuple(typ.UInt64, typ.UInt64)) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } // match: (Select0 (DIVVU _ (MOVVconst [1]))) // result: (MOVVconst [0]) for { @@ -7455,6 +6996,27 @@ func rewriteValueMIPS64_OpSelect0(v *Value) bool { } func rewriteValueMIPS64_OpSelect1(v *Value) bool { v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Select1 (Mul64uover x y)) + // result: (SGTU (Select0 (MULVU x y)) (MOVVconst [0])) + for { + if v_0.Op != OpMul64uover { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + v.reset(OpMIPS64SGTU) + v.Type = typ.Bool + v0 := b.NewValue0(v.Pos, OpSelect0, typ.UInt64) + v1 := b.NewValue0(v.Pos, OpMIPS64MULVU, types.NewTuple(typ.UInt64, typ.UInt64)) + v1.AddArg2(x, y) + v0.AddArg(v1) + v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) + v2.AuxInt = 0 + v.AddArg2(v0, v2) + return true + } // match: (Select1 (MULVU x (MOVVconst [-1]))) // result: (NEGV x) for { @@ -7508,9 +7070,7 @@ func rewriteValueMIPS64_OpSelect1(v *Value) bool { if v_0_1.Op != OpMIPS64MOVVconst || v_0_1.AuxInt != 1 { continue } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } break @@ -7553,9 +7113,7 @@ func rewriteValueMIPS64_OpSelect1(v *Value) bool { if v_0_1.Op != OpMIPS64MOVVconst || v_0_1.AuxInt != 1 { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (Select1 (DIVVU x (MOVVconst [c]))) @@ -7680,9 +7238,7 @@ func rewriteValueMIPS64_OpStore(v *Value) bool { break } v.reset(OpMIPS64MOVBstore) - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } // match: (Store {t} ptr val mem) @@ -7697,9 +7253,7 @@ func rewriteValueMIPS64_OpStore(v *Value) bool { break } v.reset(OpMIPS64MOVHstore) - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } // match: (Store {t} ptr val mem) @@ -7714,9 +7268,7 @@ func rewriteValueMIPS64_OpStore(v *Value) bool { break } v.reset(OpMIPS64MOVWstore) - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } // match: (Store {t} ptr val mem) @@ -7731,9 +7283,7 @@ func rewriteValueMIPS64_OpStore(v *Value) bool { break } v.reset(OpMIPS64MOVVstore) - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } // match: (Store {t} ptr val mem) @@ -7748,9 +7298,7 @@ func rewriteValueMIPS64_OpStore(v *Value) bool { break } v.reset(OpMIPS64MOVFstore) - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } // match: (Store {t} ptr val mem) @@ -7765,9 +7313,7 @@ func rewriteValueMIPS64_OpStore(v *Value) bool { break } v.reset(OpMIPS64MOVDstore) - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } return false @@ -7785,9 +7331,7 @@ func rewriteValueMIPS64_OpZero(v *Value) bool { break } mem := v_1 - v.reset(OpCopy) - v.Type = mem.Type - v.AddArg(mem) + v.copyOf(mem) return true } // match: (Zero [1] ptr mem) @@ -7799,11 +7343,9 @@ func rewriteValueMIPS64_OpZero(v *Value) bool { ptr := v_0 mem := v_1 v.reset(OpMIPS64MOVBstore) - v.AddArg(ptr) v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) v0.AuxInt = 0 - v.AddArg(v0) - v.AddArg(mem) + v.AddArg3(ptr, v0, mem) return true } // match: (Zero [2] {t} ptr mem) @@ -7820,11 +7362,9 @@ func rewriteValueMIPS64_OpZero(v *Value) bool { break } v.reset(OpMIPS64MOVHstore) - v.AddArg(ptr) v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) v0.AuxInt = 0 - v.AddArg(v0) - v.AddArg(mem) + v.AddArg3(ptr, v0, mem) return true } // match: (Zero [2] ptr mem) @@ -7837,18 +7377,14 @@ func rewriteValueMIPS64_OpZero(v *Value) bool { mem := v_1 v.reset(OpMIPS64MOVBstore) v.AuxInt = 1 - v.AddArg(ptr) v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) v0.AuxInt = 0 - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpMIPS64MOVBstore, types.TypeMem) v1.AuxInt = 0 - v1.AddArg(ptr) v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) v2.AuxInt = 0 - v1.AddArg(v2) - v1.AddArg(mem) - v.AddArg(v1) + v1.AddArg3(ptr, v2, mem) + v.AddArg3(ptr, v0, v1) return true } // match: (Zero [4] {t} ptr mem) @@ -7865,11 +7401,9 @@ func rewriteValueMIPS64_OpZero(v *Value) bool { break } v.reset(OpMIPS64MOVWstore) - v.AddArg(ptr) v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) v0.AuxInt = 0 - v.AddArg(v0) - v.AddArg(mem) + v.AddArg3(ptr, v0, mem) return true } // match: (Zero [4] {t} ptr mem) @@ -7887,18 +7421,14 @@ func rewriteValueMIPS64_OpZero(v *Value) bool { } v.reset(OpMIPS64MOVHstore) v.AuxInt = 2 - v.AddArg(ptr) v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) v0.AuxInt = 0 - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpMIPS64MOVHstore, types.TypeMem) v1.AuxInt = 0 - v1.AddArg(ptr) v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) v2.AuxInt = 0 - v1.AddArg(v2) - v1.AddArg(mem) - v.AddArg(v1) + v1.AddArg3(ptr, v2, mem) + v.AddArg3(ptr, v0, v1) return true } // match: (Zero [4] ptr mem) @@ -7911,32 +7441,24 @@ func rewriteValueMIPS64_OpZero(v *Value) bool { mem := v_1 v.reset(OpMIPS64MOVBstore) v.AuxInt = 3 - v.AddArg(ptr) v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) v0.AuxInt = 0 - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpMIPS64MOVBstore, types.TypeMem) v1.AuxInt = 2 - v1.AddArg(ptr) v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) v2.AuxInt = 0 - v1.AddArg(v2) v3 := b.NewValue0(v.Pos, OpMIPS64MOVBstore, types.TypeMem) v3.AuxInt = 1 - v3.AddArg(ptr) v4 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) v4.AuxInt = 0 - v3.AddArg(v4) v5 := b.NewValue0(v.Pos, OpMIPS64MOVBstore, types.TypeMem) v5.AuxInt = 0 - v5.AddArg(ptr) v6 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) v6.AuxInt = 0 - v5.AddArg(v6) - v5.AddArg(mem) - v3.AddArg(v5) - v1.AddArg(v3) - v.AddArg(v1) + v5.AddArg3(ptr, v6, mem) + v3.AddArg3(ptr, v4, v5) + v1.AddArg3(ptr, v2, v3) + v.AddArg3(ptr, v0, v1) return true } // match: (Zero [8] {t} ptr mem) @@ -7953,11 +7475,9 @@ func rewriteValueMIPS64_OpZero(v *Value) bool { break } v.reset(OpMIPS64MOVVstore) - v.AddArg(ptr) v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) v0.AuxInt = 0 - v.AddArg(v0) - v.AddArg(mem) + v.AddArg3(ptr, v0, mem) return true } // match: (Zero [8] {t} ptr mem) @@ -7975,18 +7495,14 @@ func rewriteValueMIPS64_OpZero(v *Value) bool { } v.reset(OpMIPS64MOVWstore) v.AuxInt = 4 - v.AddArg(ptr) v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) v0.AuxInt = 0 - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpMIPS64MOVWstore, types.TypeMem) v1.AuxInt = 0 - v1.AddArg(ptr) v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) v2.AuxInt = 0 - v1.AddArg(v2) - v1.AddArg(mem) - v.AddArg(v1) + v1.AddArg3(ptr, v2, mem) + v.AddArg3(ptr, v0, v1) return true } // match: (Zero [8] {t} ptr mem) @@ -8004,32 +7520,24 @@ func rewriteValueMIPS64_OpZero(v *Value) bool { } v.reset(OpMIPS64MOVHstore) v.AuxInt = 6 - v.AddArg(ptr) v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) v0.AuxInt = 0 - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpMIPS64MOVHstore, types.TypeMem) v1.AuxInt = 4 - v1.AddArg(ptr) v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) v2.AuxInt = 0 - v1.AddArg(v2) v3 := b.NewValue0(v.Pos, OpMIPS64MOVHstore, types.TypeMem) v3.AuxInt = 2 - v3.AddArg(ptr) v4 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) v4.AuxInt = 0 - v3.AddArg(v4) v5 := b.NewValue0(v.Pos, OpMIPS64MOVHstore, types.TypeMem) v5.AuxInt = 0 - v5.AddArg(ptr) v6 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) v6.AuxInt = 0 - v5.AddArg(v6) - v5.AddArg(mem) - v3.AddArg(v5) - v1.AddArg(v3) - v.AddArg(v1) + v5.AddArg3(ptr, v6, mem) + v3.AddArg3(ptr, v4, v5) + v1.AddArg3(ptr, v2, v3) + v.AddArg3(ptr, v0, v1) return true } // match: (Zero [3] ptr mem) @@ -8042,25 +7550,19 @@ func rewriteValueMIPS64_OpZero(v *Value) bool { mem := v_1 v.reset(OpMIPS64MOVBstore) v.AuxInt = 2 - v.AddArg(ptr) v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) v0.AuxInt = 0 - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpMIPS64MOVBstore, types.TypeMem) v1.AuxInt = 1 - v1.AddArg(ptr) v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) v2.AuxInt = 0 - v1.AddArg(v2) v3 := b.NewValue0(v.Pos, OpMIPS64MOVBstore, types.TypeMem) v3.AuxInt = 0 - v3.AddArg(ptr) v4 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) v4.AuxInt = 0 - v3.AddArg(v4) - v3.AddArg(mem) - v1.AddArg(v3) - v.AddArg(v1) + v3.AddArg3(ptr, v4, mem) + v1.AddArg3(ptr, v2, v3) + v.AddArg3(ptr, v0, v1) return true } // match: (Zero [6] {t} ptr mem) @@ -8078,25 +7580,19 @@ func rewriteValueMIPS64_OpZero(v *Value) bool { } v.reset(OpMIPS64MOVHstore) v.AuxInt = 4 - v.AddArg(ptr) v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) v0.AuxInt = 0 - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpMIPS64MOVHstore, types.TypeMem) v1.AuxInt = 2 - v1.AddArg(ptr) v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) v2.AuxInt = 0 - v1.AddArg(v2) v3 := b.NewValue0(v.Pos, OpMIPS64MOVHstore, types.TypeMem) v3.AuxInt = 0 - v3.AddArg(ptr) v4 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) v4.AuxInt = 0 - v3.AddArg(v4) - v3.AddArg(mem) - v1.AddArg(v3) - v.AddArg(v1) + v3.AddArg3(ptr, v4, mem) + v1.AddArg3(ptr, v2, v3) + v.AddArg3(ptr, v0, v1) return true } // match: (Zero [12] {t} ptr mem) @@ -8114,25 +7610,19 @@ func rewriteValueMIPS64_OpZero(v *Value) bool { } v.reset(OpMIPS64MOVWstore) v.AuxInt = 8 - v.AddArg(ptr) v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) v0.AuxInt = 0 - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpMIPS64MOVWstore, types.TypeMem) v1.AuxInt = 4 - v1.AddArg(ptr) v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) v2.AuxInt = 0 - v1.AddArg(v2) v3 := b.NewValue0(v.Pos, OpMIPS64MOVWstore, types.TypeMem) v3.AuxInt = 0 - v3.AddArg(ptr) v4 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) v4.AuxInt = 0 - v3.AddArg(v4) - v3.AddArg(mem) - v1.AddArg(v3) - v.AddArg(v1) + v3.AddArg3(ptr, v4, mem) + v1.AddArg3(ptr, v2, v3) + v.AddArg3(ptr, v0, v1) return true } // match: (Zero [16] {t} ptr mem) @@ -8150,18 +7640,14 @@ func rewriteValueMIPS64_OpZero(v *Value) bool { } v.reset(OpMIPS64MOVVstore) v.AuxInt = 8 - v.AddArg(ptr) v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) v0.AuxInt = 0 - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpMIPS64MOVVstore, types.TypeMem) v1.AuxInt = 0 - v1.AddArg(ptr) v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) v2.AuxInt = 0 - v1.AddArg(v2) - v1.AddArg(mem) - v.AddArg(v1) + v1.AddArg3(ptr, v2, mem) + v.AddArg3(ptr, v0, v1) return true } // match: (Zero [24] {t} ptr mem) @@ -8179,25 +7665,19 @@ func rewriteValueMIPS64_OpZero(v *Value) bool { } v.reset(OpMIPS64MOVVstore) v.AuxInt = 16 - v.AddArg(ptr) v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) v0.AuxInt = 0 - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpMIPS64MOVVstore, types.TypeMem) v1.AuxInt = 8 - v1.AddArg(ptr) v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) v2.AuxInt = 0 - v1.AddArg(v2) v3 := b.NewValue0(v.Pos, OpMIPS64MOVVstore, types.TypeMem) v3.AuxInt = 0 - v3.AddArg(ptr) v4 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) v4.AuxInt = 0 - v3.AddArg(v4) - v3.AddArg(mem) - v1.AddArg(v3) - v.AddArg(v1) + v3.AddArg3(ptr, v4, mem) + v1.AddArg3(ptr, v2, v3) + v.AddArg3(ptr, v0, v1) return true } // match: (Zero [s] {t} ptr mem) @@ -8213,8 +7693,7 @@ func rewriteValueMIPS64_OpZero(v *Value) bool { } v.reset(OpMIPS64DUFFZERO) v.AuxInt = 8 * (128 - s/8) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (Zero [s] {t} ptr mem) @@ -8230,12 +7709,10 @@ func rewriteValueMIPS64_OpZero(v *Value) bool { } v.reset(OpMIPS64LoweredZero) v.AuxInt = t.(*types.Type).Alignment() - v.AddArg(ptr) v0 := b.NewValue0(v.Pos, OpMIPS64ADDVconst, ptr.Type) v0.AuxInt = s - moveSize(t.(*types.Type).Alignment(), config) v0.AddArg(ptr) - v.AddArg(v0) - v.AddArg(mem) + v.AddArg3(ptr, v0, mem) return true } return false @@ -8248,8 +7725,7 @@ func rewriteBlockMIPS64(b *Block) bool { for b.Controls[0].Op == OpMIPS64FPFlagTrue { v_0 := b.Controls[0] cmp := v_0.Args[0] - b.Reset(BlockMIPS64FPF) - b.AddControl(cmp) + b.resetWithControl(BlockMIPS64FPF, cmp) return true } // match: (EQ (FPFlagFalse cmp) yes no) @@ -8257,8 +7733,7 @@ func rewriteBlockMIPS64(b *Block) bool { for b.Controls[0].Op == OpMIPS64FPFlagFalse { v_0 := b.Controls[0] cmp := v_0.Args[0] - b.Reset(BlockMIPS64FPT) - b.AddControl(cmp) + b.resetWithControl(BlockMIPS64FPT, cmp) return true } // match: (EQ (XORconst [1] cmp:(SGT _ _)) yes no) @@ -8272,9 +7747,7 @@ func rewriteBlockMIPS64(b *Block) bool { if cmp.Op != OpMIPS64SGT { break } - _ = cmp.Args[1] - b.Reset(BlockMIPS64NE) - b.AddControl(cmp) + b.resetWithControl(BlockMIPS64NE, cmp) return true } // match: (EQ (XORconst [1] cmp:(SGTU _ _)) yes no) @@ -8288,9 +7761,7 @@ func rewriteBlockMIPS64(b *Block) bool { if cmp.Op != OpMIPS64SGTU { break } - _ = cmp.Args[1] - b.Reset(BlockMIPS64NE) - b.AddControl(cmp) + b.resetWithControl(BlockMIPS64NE, cmp) return true } // match: (EQ (XORconst [1] cmp:(SGTconst _)) yes no) @@ -8304,8 +7775,7 @@ func rewriteBlockMIPS64(b *Block) bool { if cmp.Op != OpMIPS64SGTconst { break } - b.Reset(BlockMIPS64NE) - b.AddControl(cmp) + b.resetWithControl(BlockMIPS64NE, cmp) return true } // match: (EQ (XORconst [1] cmp:(SGTUconst _)) yes no) @@ -8319,8 +7789,7 @@ func rewriteBlockMIPS64(b *Block) bool { if cmp.Op != OpMIPS64SGTUconst { break } - b.Reset(BlockMIPS64NE) - b.AddControl(cmp) + b.resetWithControl(BlockMIPS64NE, cmp) return true } // match: (EQ (SGTUconst [1] x) yes no) @@ -8331,8 +7800,7 @@ func rewriteBlockMIPS64(b *Block) bool { break } x := v_0.Args[0] - b.Reset(BlockMIPS64NE) - b.AddControl(x) + b.resetWithControl(BlockMIPS64NE, x) return true } // match: (EQ (SGTU x (MOVVconst [0])) yes no) @@ -8345,8 +7813,7 @@ func rewriteBlockMIPS64(b *Block) bool { if v_0_1.Op != OpMIPS64MOVVconst || v_0_1.AuxInt != 0 { break } - b.Reset(BlockMIPS64EQ) - b.AddControl(x) + b.resetWithControl(BlockMIPS64EQ, x) return true } // match: (EQ (SGTconst [0] x) yes no) @@ -8357,8 +7824,7 @@ func rewriteBlockMIPS64(b *Block) bool { break } x := v_0.Args[0] - b.Reset(BlockMIPS64GEZ) - b.AddControl(x) + b.resetWithControl(BlockMIPS64GEZ, x) return true } // match: (EQ (SGT x (MOVVconst [0])) yes no) @@ -8371,8 +7837,7 @@ func rewriteBlockMIPS64(b *Block) bool { if v_0_1.Op != OpMIPS64MOVVconst || v_0_1.AuxInt != 0 { break } - b.Reset(BlockMIPS64LEZ) - b.AddControl(x) + b.resetWithControl(BlockMIPS64LEZ, x) return true } // match: (EQ (MOVVconst [0]) yes no) @@ -8455,8 +7920,7 @@ func rewriteBlockMIPS64(b *Block) bool { // result: (NE cond yes no) for { cond := b.Controls[0] - b.Reset(BlockMIPS64NE) - b.AddControl(cond) + b.resetWithControl(BlockMIPS64NE, cond) return true } case BlockMIPS64LEZ: @@ -8517,8 +7981,7 @@ func rewriteBlockMIPS64(b *Block) bool { for b.Controls[0].Op == OpMIPS64FPFlagTrue { v_0 := b.Controls[0] cmp := v_0.Args[0] - b.Reset(BlockMIPS64FPT) - b.AddControl(cmp) + b.resetWithControl(BlockMIPS64FPT, cmp) return true } // match: (NE (FPFlagFalse cmp) yes no) @@ -8526,8 +7989,7 @@ func rewriteBlockMIPS64(b *Block) bool { for b.Controls[0].Op == OpMIPS64FPFlagFalse { v_0 := b.Controls[0] cmp := v_0.Args[0] - b.Reset(BlockMIPS64FPF) - b.AddControl(cmp) + b.resetWithControl(BlockMIPS64FPF, cmp) return true } // match: (NE (XORconst [1] cmp:(SGT _ _)) yes no) @@ -8541,9 +8003,7 @@ func rewriteBlockMIPS64(b *Block) bool { if cmp.Op != OpMIPS64SGT { break } - _ = cmp.Args[1] - b.Reset(BlockMIPS64EQ) - b.AddControl(cmp) + b.resetWithControl(BlockMIPS64EQ, cmp) return true } // match: (NE (XORconst [1] cmp:(SGTU _ _)) yes no) @@ -8557,9 +8017,7 @@ func rewriteBlockMIPS64(b *Block) bool { if cmp.Op != OpMIPS64SGTU { break } - _ = cmp.Args[1] - b.Reset(BlockMIPS64EQ) - b.AddControl(cmp) + b.resetWithControl(BlockMIPS64EQ, cmp) return true } // match: (NE (XORconst [1] cmp:(SGTconst _)) yes no) @@ -8573,8 +8031,7 @@ func rewriteBlockMIPS64(b *Block) bool { if cmp.Op != OpMIPS64SGTconst { break } - b.Reset(BlockMIPS64EQ) - b.AddControl(cmp) + b.resetWithControl(BlockMIPS64EQ, cmp) return true } // match: (NE (XORconst [1] cmp:(SGTUconst _)) yes no) @@ -8588,8 +8045,7 @@ func rewriteBlockMIPS64(b *Block) bool { if cmp.Op != OpMIPS64SGTUconst { break } - b.Reset(BlockMIPS64EQ) - b.AddControl(cmp) + b.resetWithControl(BlockMIPS64EQ, cmp) return true } // match: (NE (SGTUconst [1] x) yes no) @@ -8600,8 +8056,7 @@ func rewriteBlockMIPS64(b *Block) bool { break } x := v_0.Args[0] - b.Reset(BlockMIPS64EQ) - b.AddControl(x) + b.resetWithControl(BlockMIPS64EQ, x) return true } // match: (NE (SGTU x (MOVVconst [0])) yes no) @@ -8614,8 +8069,7 @@ func rewriteBlockMIPS64(b *Block) bool { if v_0_1.Op != OpMIPS64MOVVconst || v_0_1.AuxInt != 0 { break } - b.Reset(BlockMIPS64NE) - b.AddControl(x) + b.resetWithControl(BlockMIPS64NE, x) return true } // match: (NE (SGTconst [0] x) yes no) @@ -8626,8 +8080,7 @@ func rewriteBlockMIPS64(b *Block) bool { break } x := v_0.Args[0] - b.Reset(BlockMIPS64LTZ) - b.AddControl(x) + b.resetWithControl(BlockMIPS64LTZ, x) return true } // match: (NE (SGT x (MOVVconst [0])) yes no) @@ -8640,8 +8093,7 @@ func rewriteBlockMIPS64(b *Block) bool { if v_0_1.Op != OpMIPS64MOVVconst || v_0_1.AuxInt != 0 { break } - b.Reset(BlockMIPS64GTZ) - b.AddControl(x) + b.resetWithControl(BlockMIPS64GTZ, x) return true } // match: (NE (MOVVconst [0]) yes no) diff --git a/src/cmd/compile/internal/ssa/rewritePPC64.go b/src/cmd/compile/internal/ssa/rewritePPC64.go index c4eb25f38e..0094ba1b74 100644 --- a/src/cmd/compile/internal/ssa/rewritePPC64.go +++ b/src/cmd/compile/internal/ssa/rewritePPC64.go @@ -179,6 +179,9 @@ func rewriteValuePPC64(v *Value) bool { return rewriteValuePPC64_OpCvt64to32F(v) case OpCvt64to64F: return rewriteValuePPC64_OpCvt64to64F(v) + case OpCvtBoolToUint8: + v.Op = OpCopy + return true case OpDiv16: return rewriteValuePPC64_OpDiv16(v) case OpDiv16u: @@ -441,8 +444,12 @@ func rewriteValuePPC64(v *Value) bool { return rewriteValuePPC64_OpPPC64ADD(v) case OpPPC64ADDconst: return rewriteValuePPC64_OpPPC64ADDconst(v) + case OpPPC64ADDconstForCarry: + return rewriteValuePPC64_OpPPC64ADDconstForCarry(v) case OpPPC64AND: return rewriteValuePPC64_OpPPC64AND(v) + case OpPPC64ANDN: + return rewriteValuePPC64_OpPPC64ANDN(v) case OpPPC64ANDconst: return rewriteValuePPC64_OpPPC64ANDconst(v) case OpPPC64CMP: @@ -581,6 +588,8 @@ func rewriteValuePPC64(v *Value) bool { return rewriteValuePPC64_OpPPC64MTVSRD(v) case OpPPC64MaskIfNotCarry: return rewriteValuePPC64_OpPPC64MaskIfNotCarry(v) + case OpPPC64NOR: + return rewriteValuePPC64_OpPPC64NOR(v) case OpPPC64NotEqual: return rewriteValuePPC64_OpPPC64NotEqual(v) case OpPPC64OR: @@ -593,6 +602,18 @@ func rewriteValuePPC64(v *Value) bool { return rewriteValuePPC64_OpPPC64ROTL(v) case OpPPC64ROTLW: return rewriteValuePPC64_OpPPC64ROTLW(v) + case OpPPC64SLD: + return rewriteValuePPC64_OpPPC64SLD(v) + case OpPPC64SLW: + return rewriteValuePPC64_OpPPC64SLW(v) + case OpPPC64SRAD: + return rewriteValuePPC64_OpPPC64SRAD(v) + case OpPPC64SRAW: + return rewriteValuePPC64_OpPPC64SRAW(v) + case OpPPC64SRD: + return rewriteValuePPC64_OpPPC64SRD(v) + case OpPPC64SRW: + return rewriteValuePPC64_OpPPC64SRW(v) case OpPPC64SUB: return rewriteValuePPC64_OpPPC64SUB(v) case OpPPC64XOR: @@ -807,10 +828,7 @@ func rewriteValuePPC64_OpAtomicCompareAndSwap32(v *Value) bool { mem := v_3 v.reset(OpPPC64LoweredAtomicCas32) v.AuxInt = 1 - v.AddArg(ptr) - v.AddArg(old) - v.AddArg(new_) - v.AddArg(mem) + v.AddArg4(ptr, old, new_, mem) return true } } @@ -828,10 +846,7 @@ func rewriteValuePPC64_OpAtomicCompareAndSwap64(v *Value) bool { mem := v_3 v.reset(OpPPC64LoweredAtomicCas64) v.AuxInt = 1 - v.AddArg(ptr) - v.AddArg(old) - v.AddArg(new_) - v.AddArg(mem) + v.AddArg4(ptr, old, new_, mem) return true } } @@ -849,10 +864,7 @@ func rewriteValuePPC64_OpAtomicCompareAndSwapRel32(v *Value) bool { mem := v_3 v.reset(OpPPC64LoweredAtomicCas32) v.AuxInt = 0 - v.AddArg(ptr) - v.AddArg(old) - v.AddArg(new_) - v.AddArg(mem) + v.AddArg4(ptr, old, new_, mem) return true } } @@ -866,8 +878,7 @@ func rewriteValuePPC64_OpAtomicLoad32(v *Value) bool { mem := v_1 v.reset(OpPPC64LoweredAtomicLoad32) v.AuxInt = 1 - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } } @@ -881,8 +892,7 @@ func rewriteValuePPC64_OpAtomicLoad64(v *Value) bool { mem := v_1 v.reset(OpPPC64LoweredAtomicLoad64) v.AuxInt = 1 - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } } @@ -896,8 +906,7 @@ func rewriteValuePPC64_OpAtomicLoad8(v *Value) bool { mem := v_1 v.reset(OpPPC64LoweredAtomicLoad8) v.AuxInt = 1 - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } } @@ -911,8 +920,7 @@ func rewriteValuePPC64_OpAtomicLoadAcq32(v *Value) bool { mem := v_1 v.reset(OpPPC64LoweredAtomicLoad32) v.AuxInt = 0 - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } } @@ -926,8 +934,7 @@ func rewriteValuePPC64_OpAtomicLoadPtr(v *Value) bool { mem := v_1 v.reset(OpPPC64LoweredAtomicLoadPtr) v.AuxInt = 1 - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } } @@ -943,9 +950,7 @@ func rewriteValuePPC64_OpAtomicStore32(v *Value) bool { mem := v_2 v.reset(OpPPC64LoweredAtomicStore32) v.AuxInt = 1 - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } } @@ -961,9 +966,7 @@ func rewriteValuePPC64_OpAtomicStore64(v *Value) bool { mem := v_2 v.reset(OpPPC64LoweredAtomicStore64) v.AuxInt = 1 - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } } @@ -979,9 +982,7 @@ func rewriteValuePPC64_OpAtomicStore8(v *Value) bool { mem := v_2 v.reset(OpPPC64LoweredAtomicStore8) v.AuxInt = 1 - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } } @@ -997,9 +998,7 @@ func rewriteValuePPC64_OpAtomicStoreRel32(v *Value) bool { mem := v_2 v.reset(OpPPC64LoweredAtomicStore32) v.AuxInt = 0 - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } } @@ -1017,11 +1016,9 @@ func rewriteValuePPC64_OpAvg64u(v *Value) bool { v0 := b.NewValue0(v.Pos, OpPPC64SRDconst, t) v0.AuxInt = 1 v1 := b.NewValue0(v.Pos, OpPPC64SUB, t) - v1.AddArg(x) - v1.AddArg(y) + v1.AddArg2(x, y) v0.AddArg(v1) - v.AddArg(v0) - v.AddArg(y) + v.AddArg2(v0, y) return true } } @@ -1036,10 +1033,9 @@ func rewriteValuePPC64_OpBitLen32(v *Value) bool { v.reset(OpPPC64SUB) v0 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64) v0.AuxInt = 32 - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpPPC64CNTLZW, typ.Int) v1.AddArg(x) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } } @@ -1054,10 +1050,9 @@ func rewriteValuePPC64_OpBitLen64(v *Value) bool { v.reset(OpPPC64SUB) v0 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64) v0.AuxInt = 64 - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpPPC64CNTLZD, typ.Int) v1.AddArg(x) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } } @@ -1068,8 +1063,7 @@ func rewriteValuePPC64_OpCom16(v *Value) bool { for { x := v_0 v.reset(OpPPC64NOR) - v.AddArg(x) - v.AddArg(x) + v.AddArg2(x, x) return true } } @@ -1080,8 +1074,7 @@ func rewriteValuePPC64_OpCom32(v *Value) bool { for { x := v_0 v.reset(OpPPC64NOR) - v.AddArg(x) - v.AddArg(x) + v.AddArg2(x, x) return true } } @@ -1092,8 +1085,7 @@ func rewriteValuePPC64_OpCom64(v *Value) bool { for { x := v_0 v.reset(OpPPC64NOR) - v.AddArg(x) - v.AddArg(x) + v.AddArg2(x, x) return true } } @@ -1104,8 +1096,7 @@ func rewriteValuePPC64_OpCom8(v *Value) bool { for { x := v_0 v.reset(OpPPC64NOR) - v.AddArg(x) - v.AddArg(x) + v.AddArg2(x, x) return true } } @@ -1126,9 +1117,7 @@ func rewriteValuePPC64_OpCondSelect(v *Value) bool { } v.reset(OpPPC64ISEL) v.AuxInt = 2 - v.AddArg(x) - v.AddArg(y) - v.AddArg(bool) + v.AddArg3(x, y, bool) return true } // match: (CondSelect x y bool) @@ -1143,12 +1132,10 @@ func rewriteValuePPC64_OpCondSelect(v *Value) bool { } v.reset(OpPPC64ISEL) v.AuxInt = 2 - v.AddArg(x) - v.AddArg(y) v0 := b.NewValue0(v.Pos, OpPPC64CMPWconst, types.TypeFlags) v0.AuxInt = 0 v0.AddArg(bool) - v.AddArg(v0) + v.AddArg3(x, y, v0) return true } return false @@ -1171,8 +1158,7 @@ func rewriteValuePPC64_OpCopysign(v *Value) bool { x := v_0 y := v_1 v.reset(OpPPC64FCPSGN) - v.AddArg(y) - v.AddArg(x) + v.AddArg2(y, x) return true } } @@ -1190,8 +1176,7 @@ func rewriteValuePPC64_OpCtz16(v *Value) bool { v2 := b.NewValue0(v.Pos, OpPPC64ADDconst, typ.Int16) v2.AuxInt = -1 v2.AddArg(x) - v1.AddArg(v2) - v1.AddArg(x) + v1.AddArg2(v2, x) v0.AddArg(v1) v.AddArg(v0) return true @@ -1215,8 +1200,7 @@ func rewriteValuePPC64_OpCtz32(v *Value) bool { v2 := b.NewValue0(v.Pos, OpPPC64ADDconst, typ.Int) v2.AuxInt = -1 v2.AddArg(x) - v1.AddArg(v2) - v1.AddArg(x) + v1.AddArg2(v2, x) v0.AddArg(v1) v.AddArg(v0) return true @@ -1249,8 +1233,7 @@ func rewriteValuePPC64_OpCtz64(v *Value) bool { v1 := b.NewValue0(v.Pos, OpPPC64ADDconst, typ.Int64) v1.AuxInt = -1 v1.AddArg(x) - v0.AddArg(v1) - v0.AddArg(x) + v0.AddArg2(v1, x) v.AddArg(v0) return true } @@ -1277,8 +1260,7 @@ func rewriteValuePPC64_OpCtz8(v *Value) bool { v2 := b.NewValue0(v.Pos, OpPPC64ADDconst, typ.UInt8) v2.AuxInt = -1 v2.AddArg(x) - v1.AddArg(v2) - v1.AddArg(x) + v1.AddArg2(v2, x) v0.AddArg(v1) v.AddArg(v0) return true @@ -1421,10 +1403,9 @@ func rewriteValuePPC64_OpDiv16(v *Value) bool { v.reset(OpPPC64DIVW) v0 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32) v1.AddArg(y) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } } @@ -1441,10 +1422,9 @@ func rewriteValuePPC64_OpDiv16u(v *Value) bool { v.reset(OpPPC64DIVWU) v0 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) v1.AddArg(y) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } } @@ -1461,10 +1441,9 @@ func rewriteValuePPC64_OpDiv8(v *Value) bool { v.reset(OpPPC64DIVW) v0 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32) v1.AddArg(y) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } } @@ -1481,10 +1460,9 @@ func rewriteValuePPC64_OpDiv8u(v *Value) bool { v.reset(OpPPC64DIVWU) v0 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) v1.AddArg(y) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } } @@ -1507,10 +1485,9 @@ func rewriteValuePPC64_OpEq16(v *Value) bool { v0 := b.NewValue0(v.Pos, OpPPC64CMPW, types.TypeFlags) v1 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32) v1.AddArg(x) - v0.AddArg(v1) v2 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32) v2.AddArg(y) - v0.AddArg(v2) + v0.AddArg2(v1, v2) v.AddArg(v0) return true } @@ -1525,10 +1502,9 @@ func rewriteValuePPC64_OpEq16(v *Value) bool { v0 := b.NewValue0(v.Pos, OpPPC64CMPW, types.TypeFlags) v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) v1.AddArg(x) - v0.AddArg(v1) v2 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) v2.AddArg(y) - v0.AddArg(v2) + v0.AddArg2(v1, v2) v.AddArg(v0) return true } @@ -1544,8 +1520,7 @@ func rewriteValuePPC64_OpEq32(v *Value) bool { y := v_1 v.reset(OpPPC64Equal) v0 := b.NewValue0(v.Pos, OpPPC64CMPW, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -1561,8 +1536,7 @@ func rewriteValuePPC64_OpEq32F(v *Value) bool { y := v_1 v.reset(OpPPC64Equal) v0 := b.NewValue0(v.Pos, OpPPC64FCMPU, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -1578,8 +1552,7 @@ func rewriteValuePPC64_OpEq64(v *Value) bool { y := v_1 v.reset(OpPPC64Equal) v0 := b.NewValue0(v.Pos, OpPPC64CMP, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -1595,8 +1568,7 @@ func rewriteValuePPC64_OpEq64F(v *Value) bool { y := v_1 v.reset(OpPPC64Equal) v0 := b.NewValue0(v.Pos, OpPPC64FCMPU, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -1620,10 +1592,9 @@ func rewriteValuePPC64_OpEq8(v *Value) bool { v0 := b.NewValue0(v.Pos, OpPPC64CMPW, types.TypeFlags) v1 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32) v1.AddArg(x) - v0.AddArg(v1) v2 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32) v2.AddArg(y) - v0.AddArg(v2) + v0.AddArg2(v1, v2) v.AddArg(v0) return true } @@ -1638,10 +1609,9 @@ func rewriteValuePPC64_OpEq8(v *Value) bool { v0 := b.NewValue0(v.Pos, OpPPC64CMPW, types.TypeFlags) v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) v1.AddArg(x) - v0.AddArg(v1) v2 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) v2.AddArg(y) - v0.AddArg(v2) + v0.AddArg2(v1, v2) v.AddArg(v0) return true } @@ -1659,8 +1629,7 @@ func rewriteValuePPC64_OpEqB(v *Value) bool { v.reset(OpPPC64ANDconst) v.AuxInt = 1 v0 := b.NewValue0(v.Pos, OpPPC64EQV, typ.Int64) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -1676,8 +1645,7 @@ func rewriteValuePPC64_OpEqPtr(v *Value) bool { y := v_1 v.reset(OpPPC64Equal) v0 := b.NewValue0(v.Pos, OpPPC64CMP, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -1693,8 +1661,7 @@ func rewriteValuePPC64_OpGeq32F(v *Value) bool { y := v_1 v.reset(OpPPC64FGreaterEqual) v0 := b.NewValue0(v.Pos, OpPPC64FCMPU, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -1710,8 +1677,7 @@ func rewriteValuePPC64_OpGeq64F(v *Value) bool { y := v_1 v.reset(OpPPC64FGreaterEqual) v0 := b.NewValue0(v.Pos, OpPPC64FCMPU, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -1727,8 +1693,7 @@ func rewriteValuePPC64_OpGreater32F(v *Value) bool { y := v_1 v.reset(OpPPC64FGreaterThan) v0 := b.NewValue0(v.Pos, OpPPC64FCMPU, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -1744,8 +1709,7 @@ func rewriteValuePPC64_OpGreater64F(v *Value) bool { y := v_1 v.reset(OpPPC64FGreaterThan) v0 := b.NewValue0(v.Pos, OpPPC64FCMPU, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -1761,8 +1725,7 @@ func rewriteValuePPC64_OpIsInBounds(v *Value) bool { len := v_1 v.reset(OpPPC64LessThan) v0 := b.NewValue0(v.Pos, OpPPC64CMPU, types.TypeFlags) - v0.AddArg(idx) - v0.AddArg(len) + v0.AddArg2(idx, len) v.AddArg(v0) return true } @@ -1793,8 +1756,7 @@ func rewriteValuePPC64_OpIsSliceInBounds(v *Value) bool { len := v_1 v.reset(OpPPC64LessEqual) v0 := b.NewValue0(v.Pos, OpPPC64CMPU, types.TypeFlags) - v0.AddArg(idx) - v0.AddArg(len) + v0.AddArg2(idx, len) v.AddArg(v0) return true } @@ -1813,10 +1775,9 @@ func rewriteValuePPC64_OpLeq16(v *Value) bool { v0 := b.NewValue0(v.Pos, OpPPC64CMPW, types.TypeFlags) v1 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32) v1.AddArg(x) - v0.AddArg(v1) v2 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32) v2.AddArg(y) - v0.AddArg(v2) + v0.AddArg2(v1, v2) v.AddArg(v0) return true } @@ -1835,10 +1796,9 @@ func rewriteValuePPC64_OpLeq16U(v *Value) bool { v0 := b.NewValue0(v.Pos, OpPPC64CMPWU, types.TypeFlags) v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) v1.AddArg(x) - v0.AddArg(v1) v2 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) v2.AddArg(y) - v0.AddArg(v2) + v0.AddArg2(v1, v2) v.AddArg(v0) return true } @@ -1854,8 +1814,7 @@ func rewriteValuePPC64_OpLeq32(v *Value) bool { y := v_1 v.reset(OpPPC64LessEqual) v0 := b.NewValue0(v.Pos, OpPPC64CMPW, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -1871,8 +1830,7 @@ func rewriteValuePPC64_OpLeq32F(v *Value) bool { y := v_1 v.reset(OpPPC64FLessEqual) v0 := b.NewValue0(v.Pos, OpPPC64FCMPU, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -1888,8 +1846,7 @@ func rewriteValuePPC64_OpLeq32U(v *Value) bool { y := v_1 v.reset(OpPPC64LessEqual) v0 := b.NewValue0(v.Pos, OpPPC64CMPWU, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -1905,8 +1862,7 @@ func rewriteValuePPC64_OpLeq64(v *Value) bool { y := v_1 v.reset(OpPPC64LessEqual) v0 := b.NewValue0(v.Pos, OpPPC64CMP, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -1922,8 +1878,7 @@ func rewriteValuePPC64_OpLeq64F(v *Value) bool { y := v_1 v.reset(OpPPC64FLessEqual) v0 := b.NewValue0(v.Pos, OpPPC64FCMPU, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -1939,8 +1894,7 @@ func rewriteValuePPC64_OpLeq64U(v *Value) bool { y := v_1 v.reset(OpPPC64LessEqual) v0 := b.NewValue0(v.Pos, OpPPC64CMPU, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -1959,10 +1913,9 @@ func rewriteValuePPC64_OpLeq8(v *Value) bool { v0 := b.NewValue0(v.Pos, OpPPC64CMPW, types.TypeFlags) v1 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32) v1.AddArg(x) - v0.AddArg(v1) v2 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32) v2.AddArg(y) - v0.AddArg(v2) + v0.AddArg2(v1, v2) v.AddArg(v0) return true } @@ -1981,10 +1934,9 @@ func rewriteValuePPC64_OpLeq8U(v *Value) bool { v0 := b.NewValue0(v.Pos, OpPPC64CMPWU, types.TypeFlags) v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) v1.AddArg(x) - v0.AddArg(v1) v2 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) v2.AddArg(y) - v0.AddArg(v2) + v0.AddArg2(v1, v2) v.AddArg(v0) return true } @@ -2003,10 +1955,9 @@ func rewriteValuePPC64_OpLess16(v *Value) bool { v0 := b.NewValue0(v.Pos, OpPPC64CMPW, types.TypeFlags) v1 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32) v1.AddArg(x) - v0.AddArg(v1) v2 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32) v2.AddArg(y) - v0.AddArg(v2) + v0.AddArg2(v1, v2) v.AddArg(v0) return true } @@ -2025,10 +1976,9 @@ func rewriteValuePPC64_OpLess16U(v *Value) bool { v0 := b.NewValue0(v.Pos, OpPPC64CMPWU, types.TypeFlags) v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) v1.AddArg(x) - v0.AddArg(v1) v2 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) v2.AddArg(y) - v0.AddArg(v2) + v0.AddArg2(v1, v2) v.AddArg(v0) return true } @@ -2044,8 +1994,7 @@ func rewriteValuePPC64_OpLess32(v *Value) bool { y := v_1 v.reset(OpPPC64LessThan) v0 := b.NewValue0(v.Pos, OpPPC64CMPW, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -2061,8 +2010,7 @@ func rewriteValuePPC64_OpLess32F(v *Value) bool { y := v_1 v.reset(OpPPC64FLessThan) v0 := b.NewValue0(v.Pos, OpPPC64FCMPU, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -2078,8 +2026,7 @@ func rewriteValuePPC64_OpLess32U(v *Value) bool { y := v_1 v.reset(OpPPC64LessThan) v0 := b.NewValue0(v.Pos, OpPPC64CMPWU, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -2095,8 +2042,7 @@ func rewriteValuePPC64_OpLess64(v *Value) bool { y := v_1 v.reset(OpPPC64LessThan) v0 := b.NewValue0(v.Pos, OpPPC64CMP, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -2112,8 +2058,7 @@ func rewriteValuePPC64_OpLess64F(v *Value) bool { y := v_1 v.reset(OpPPC64FLessThan) v0 := b.NewValue0(v.Pos, OpPPC64FCMPU, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -2129,8 +2074,7 @@ func rewriteValuePPC64_OpLess64U(v *Value) bool { y := v_1 v.reset(OpPPC64LessThan) v0 := b.NewValue0(v.Pos, OpPPC64CMPU, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -2149,10 +2093,9 @@ func rewriteValuePPC64_OpLess8(v *Value) bool { v0 := b.NewValue0(v.Pos, OpPPC64CMPW, types.TypeFlags) v1 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32) v1.AddArg(x) - v0.AddArg(v1) v2 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32) v2.AddArg(y) - v0.AddArg(v2) + v0.AddArg2(v1, v2) v.AddArg(v0) return true } @@ -2171,10 +2114,9 @@ func rewriteValuePPC64_OpLess8U(v *Value) bool { v0 := b.NewValue0(v.Pos, OpPPC64CMPWU, types.TypeFlags) v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) v1.AddArg(x) - v0.AddArg(v1) v2 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) v2.AddArg(y) - v0.AddArg(v2) + v0.AddArg2(v1, v2) v.AddArg(v0) return true } @@ -2195,8 +2137,7 @@ func rewriteValuePPC64_OpLoad(v *Value) bool { break } v.reset(OpPPC64MOVDload) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (Load ptr mem) @@ -2210,8 +2151,7 @@ func rewriteValuePPC64_OpLoad(v *Value) bool { break } v.reset(OpPPC64MOVWload) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (Load ptr mem) @@ -2225,8 +2165,7 @@ func rewriteValuePPC64_OpLoad(v *Value) bool { break } v.reset(OpPPC64MOVWZload) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (Load ptr mem) @@ -2240,8 +2179,7 @@ func rewriteValuePPC64_OpLoad(v *Value) bool { break } v.reset(OpPPC64MOVHload) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (Load ptr mem) @@ -2255,8 +2193,7 @@ func rewriteValuePPC64_OpLoad(v *Value) bool { break } v.reset(OpPPC64MOVHZload) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (Load ptr mem) @@ -2270,8 +2207,7 @@ func rewriteValuePPC64_OpLoad(v *Value) bool { break } v.reset(OpPPC64MOVBZload) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (Load ptr mem) @@ -2286,8 +2222,7 @@ func rewriteValuePPC64_OpLoad(v *Value) bool { } v.reset(OpPPC64MOVBreg) v0 := b.NewValue0(v.Pos, OpPPC64MOVBZload, typ.UInt8) - v0.AddArg(ptr) - v0.AddArg(mem) + v0.AddArg2(ptr, mem) v.AddArg(v0) return true } @@ -2302,8 +2237,7 @@ func rewriteValuePPC64_OpLoad(v *Value) bool { break } v.reset(OpPPC64MOVBZload) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (Load ptr mem) @@ -2317,8 +2251,7 @@ func rewriteValuePPC64_OpLoad(v *Value) bool { break } v.reset(OpPPC64FMOVSload) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (Load ptr mem) @@ -2332,8 +2265,7 @@ func rewriteValuePPC64_OpLoad(v *Value) bool { break } v.reset(OpPPC64FMOVDload) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } return false @@ -2366,8 +2298,7 @@ func rewriteValuePPC64_OpLsh16x16(v *Value) bool { break } v.reset(OpPPC64SLW) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (Lsh16x16 x y) @@ -2376,9 +2307,7 @@ func rewriteValuePPC64_OpLsh16x16(v *Value) bool { x := v_0 y := v_1 v.reset(OpPPC64SLW) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpPPC64ORN, typ.Int64) - v0.AddArg(y) v1 := b.NewValue0(v.Pos, OpPPC64MaskIfNotCarry, typ.Int64) v2 := b.NewValue0(v.Pos, OpPPC64ADDconstForCarry, types.TypeFlags) v2.AuxInt = -16 @@ -2386,8 +2315,8 @@ func rewriteValuePPC64_OpLsh16x16(v *Value) bool { v3.AddArg(y) v2.AddArg(v3) v1.AddArg(v2) - v0.AddArg(v1) - v.AddArg(v0) + v0.AddArg2(y, v1) + v.AddArg2(x, v0) return true } } @@ -2396,23 +2325,6 @@ func rewriteValuePPC64_OpLsh16x32(v *Value) bool { v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (Lsh16x32 x (Const64 [c])) - // cond: uint32(c) < 16 - // result: (SLWconst x [c]) - for { - x := v_0 - if v_1.Op != OpConst64 { - break - } - c := v_1.AuxInt - if !(uint32(c) < 16) { - break - } - v.reset(OpPPC64SLWconst) - v.AuxInt = c - v.AddArg(x) - return true - } // match: (Lsh16x32 x (MOVDconst [c])) // cond: uint32(c) < 16 // result: (SLWconst x [c]) @@ -2440,8 +2352,7 @@ func rewriteValuePPC64_OpLsh16x32(v *Value) bool { break } v.reset(OpPPC64SLW) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (Lsh16x32 x y) @@ -2450,9 +2361,7 @@ func rewriteValuePPC64_OpLsh16x32(v *Value) bool { x := v_0 y := v_1 v.reset(OpPPC64SLW) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpPPC64ORN, typ.Int64) - v0.AddArg(y) v1 := b.NewValue0(v.Pos, OpPPC64MaskIfNotCarry, typ.Int64) v2 := b.NewValue0(v.Pos, OpPPC64ADDconstForCarry, types.TypeFlags) v2.AuxInt = -16 @@ -2460,8 +2369,8 @@ func rewriteValuePPC64_OpLsh16x32(v *Value) bool { v3.AddArg(y) v2.AddArg(v3) v1.AddArg(v2) - v0.AddArg(v1) - v.AddArg(v0) + v0.AddArg2(y, v1) + v.AddArg2(x, v0) return true } } @@ -2470,28 +2379,11 @@ func rewriteValuePPC64_OpLsh16x64(v *Value) bool { v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (Lsh16x64 x (Const64 [c])) - // cond: uint64(c) < 16 - // result: (SLWconst x [c]) - for { - x := v_0 - if v_1.Op != OpConst64 { - break - } - c := v_1.AuxInt - if !(uint64(c) < 16) { - break - } - v.reset(OpPPC64SLWconst) - v.AuxInt = c - v.AddArg(x) - return true - } - // match: (Lsh16x64 _ (Const64 [c])) + // match: (Lsh16x64 _ (MOVDconst [c])) // cond: uint64(c) >= 16 // result: (MOVDconst [0]) for { - if v_1.Op != OpConst64 { + if v_1.Op != OpPPC64MOVDconst { break } c := v_1.AuxInt @@ -2529,8 +2421,7 @@ func rewriteValuePPC64_OpLsh16x64(v *Value) bool { break } v.reset(OpPPC64SLW) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (Lsh16x64 x y) @@ -2539,16 +2430,14 @@ func rewriteValuePPC64_OpLsh16x64(v *Value) bool { x := v_0 y := v_1 v.reset(OpPPC64SLW) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpPPC64ORN, typ.Int64) - v0.AddArg(y) v1 := b.NewValue0(v.Pos, OpPPC64MaskIfNotCarry, typ.Int64) v2 := b.NewValue0(v.Pos, OpPPC64ADDconstForCarry, types.TypeFlags) v2.AuxInt = -16 v2.AddArg(y) v1.AddArg(v2) - v0.AddArg(v1) - v.AddArg(v0) + v0.AddArg2(y, v1) + v.AddArg2(x, v0) return true } } @@ -2567,8 +2456,7 @@ func rewriteValuePPC64_OpLsh16x8(v *Value) bool { break } v.reset(OpPPC64SLW) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (Lsh16x8 x y) @@ -2577,9 +2465,7 @@ func rewriteValuePPC64_OpLsh16x8(v *Value) bool { x := v_0 y := v_1 v.reset(OpPPC64SLW) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpPPC64ORN, typ.Int64) - v0.AddArg(y) v1 := b.NewValue0(v.Pos, OpPPC64MaskIfNotCarry, typ.Int64) v2 := b.NewValue0(v.Pos, OpPPC64ADDconstForCarry, types.TypeFlags) v2.AuxInt = -16 @@ -2587,8 +2473,8 @@ func rewriteValuePPC64_OpLsh16x8(v *Value) bool { v3.AddArg(y) v2.AddArg(v3) v1.AddArg(v2) - v0.AddArg(v1) - v.AddArg(v0) + v0.AddArg2(y, v1) + v.AddArg2(x, v0) return true } } @@ -2607,8 +2493,7 @@ func rewriteValuePPC64_OpLsh32x16(v *Value) bool { break } v.reset(OpPPC64SLW) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (Lsh32x16 x y) @@ -2617,9 +2502,7 @@ func rewriteValuePPC64_OpLsh32x16(v *Value) bool { x := v_0 y := v_1 v.reset(OpPPC64SLW) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpPPC64ORN, typ.Int64) - v0.AddArg(y) v1 := b.NewValue0(v.Pos, OpPPC64MaskIfNotCarry, typ.Int64) v2 := b.NewValue0(v.Pos, OpPPC64ADDconstForCarry, types.TypeFlags) v2.AuxInt = -32 @@ -2627,8 +2510,8 @@ func rewriteValuePPC64_OpLsh32x16(v *Value) bool { v3.AddArg(y) v2.AddArg(v3) v1.AddArg(v2) - v0.AddArg(v1) - v.AddArg(v0) + v0.AddArg2(y, v1) + v.AddArg2(x, v0) return true } } @@ -2637,23 +2520,6 @@ func rewriteValuePPC64_OpLsh32x32(v *Value) bool { v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (Lsh32x32 x (Const64 [c])) - // cond: uint32(c) < 32 - // result: (SLWconst x [c]) - for { - x := v_0 - if v_1.Op != OpConst64 { - break - } - c := v_1.AuxInt - if !(uint32(c) < 32) { - break - } - v.reset(OpPPC64SLWconst) - v.AuxInt = c - v.AddArg(x) - return true - } // match: (Lsh32x32 x (MOVDconst [c])) // cond: uint32(c) < 32 // result: (SLWconst x [c]) @@ -2681,8 +2547,7 @@ func rewriteValuePPC64_OpLsh32x32(v *Value) bool { break } v.reset(OpPPC64SLW) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (Lsh32x32 x y) @@ -2691,9 +2556,7 @@ func rewriteValuePPC64_OpLsh32x32(v *Value) bool { x := v_0 y := v_1 v.reset(OpPPC64SLW) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpPPC64ORN, typ.Int64) - v0.AddArg(y) v1 := b.NewValue0(v.Pos, OpPPC64MaskIfNotCarry, typ.Int64) v2 := b.NewValue0(v.Pos, OpPPC64ADDconstForCarry, types.TypeFlags) v2.AuxInt = -32 @@ -2701,8 +2564,8 @@ func rewriteValuePPC64_OpLsh32x32(v *Value) bool { v3.AddArg(y) v2.AddArg(v3) v1.AddArg(v2) - v0.AddArg(v1) - v.AddArg(v0) + v0.AddArg2(y, v1) + v.AddArg2(x, v0) return true } } @@ -2711,28 +2574,11 @@ func rewriteValuePPC64_OpLsh32x64(v *Value) bool { v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (Lsh32x64 x (Const64 [c])) - // cond: uint64(c) < 32 - // result: (SLWconst x [c]) - for { - x := v_0 - if v_1.Op != OpConst64 { - break - } - c := v_1.AuxInt - if !(uint64(c) < 32) { - break - } - v.reset(OpPPC64SLWconst) - v.AuxInt = c - v.AddArg(x) - return true - } - // match: (Lsh32x64 _ (Const64 [c])) + // match: (Lsh32x64 _ (MOVDconst [c])) // cond: uint64(c) >= 32 // result: (MOVDconst [0]) for { - if v_1.Op != OpConst64 { + if v_1.Op != OpPPC64MOVDconst { break } c := v_1.AuxInt @@ -2770,8 +2616,7 @@ func rewriteValuePPC64_OpLsh32x64(v *Value) bool { break } v.reset(OpPPC64SLW) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (Lsh32x64 x (AND y (MOVDconst [31]))) @@ -2790,11 +2635,10 @@ func rewriteValuePPC64_OpLsh32x64(v *Value) bool { continue } v.reset(OpPPC64SLW) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpPPC64ANDconst, typ.Int32) v0.AuxInt = 31 v0.AddArg(y) - v.AddArg(v0) + v.AddArg2(x, v0) return true } break @@ -2808,11 +2652,10 @@ func rewriteValuePPC64_OpLsh32x64(v *Value) bool { } y := v_1.Args[0] v.reset(OpPPC64SLW) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpPPC64ANDconst, typ.Int32) v0.AuxInt = 31 v0.AddArg(y) - v.AddArg(v0) + v.AddArg2(x, v0) return true } // match: (Lsh32x64 x y) @@ -2821,16 +2664,14 @@ func rewriteValuePPC64_OpLsh32x64(v *Value) bool { x := v_0 y := v_1 v.reset(OpPPC64SLW) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpPPC64ORN, typ.Int64) - v0.AddArg(y) v1 := b.NewValue0(v.Pos, OpPPC64MaskIfNotCarry, typ.Int64) v2 := b.NewValue0(v.Pos, OpPPC64ADDconstForCarry, types.TypeFlags) v2.AuxInt = -32 v2.AddArg(y) v1.AddArg(v2) - v0.AddArg(v1) - v.AddArg(v0) + v0.AddArg2(y, v1) + v.AddArg2(x, v0) return true } } @@ -2849,8 +2690,7 @@ func rewriteValuePPC64_OpLsh32x8(v *Value) bool { break } v.reset(OpPPC64SLW) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (Lsh32x8 x y) @@ -2859,9 +2699,7 @@ func rewriteValuePPC64_OpLsh32x8(v *Value) bool { x := v_0 y := v_1 v.reset(OpPPC64SLW) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpPPC64ORN, typ.Int64) - v0.AddArg(y) v1 := b.NewValue0(v.Pos, OpPPC64MaskIfNotCarry, typ.Int64) v2 := b.NewValue0(v.Pos, OpPPC64ADDconstForCarry, types.TypeFlags) v2.AuxInt = -32 @@ -2869,8 +2707,8 @@ func rewriteValuePPC64_OpLsh32x8(v *Value) bool { v3.AddArg(y) v2.AddArg(v3) v1.AddArg(v2) - v0.AddArg(v1) - v.AddArg(v0) + v0.AddArg2(y, v1) + v.AddArg2(x, v0) return true } } @@ -2889,8 +2727,7 @@ func rewriteValuePPC64_OpLsh64x16(v *Value) bool { break } v.reset(OpPPC64SLD) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (Lsh64x16 x y) @@ -2899,9 +2736,7 @@ func rewriteValuePPC64_OpLsh64x16(v *Value) bool { x := v_0 y := v_1 v.reset(OpPPC64SLD) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpPPC64ORN, typ.Int64) - v0.AddArg(y) v1 := b.NewValue0(v.Pos, OpPPC64MaskIfNotCarry, typ.Int64) v2 := b.NewValue0(v.Pos, OpPPC64ADDconstForCarry, types.TypeFlags) v2.AuxInt = -64 @@ -2909,8 +2744,8 @@ func rewriteValuePPC64_OpLsh64x16(v *Value) bool { v3.AddArg(y) v2.AddArg(v3) v1.AddArg(v2) - v0.AddArg(v1) - v.AddArg(v0) + v0.AddArg2(y, v1) + v.AddArg2(x, v0) return true } } @@ -2919,23 +2754,6 @@ func rewriteValuePPC64_OpLsh64x32(v *Value) bool { v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (Lsh64x32 x (Const64 [c])) - // cond: uint32(c) < 64 - // result: (SLDconst x [c]) - for { - x := v_0 - if v_1.Op != OpConst64 { - break - } - c := v_1.AuxInt - if !(uint32(c) < 64) { - break - } - v.reset(OpPPC64SLDconst) - v.AuxInt = c - v.AddArg(x) - return true - } // match: (Lsh64x32 x (MOVDconst [c])) // cond: uint32(c) < 64 // result: (SLDconst x [c]) @@ -2963,8 +2781,7 @@ func rewriteValuePPC64_OpLsh64x32(v *Value) bool { break } v.reset(OpPPC64SLD) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (Lsh64x32 x y) @@ -2973,9 +2790,7 @@ func rewriteValuePPC64_OpLsh64x32(v *Value) bool { x := v_0 y := v_1 v.reset(OpPPC64SLD) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpPPC64ORN, typ.Int64) - v0.AddArg(y) v1 := b.NewValue0(v.Pos, OpPPC64MaskIfNotCarry, typ.Int64) v2 := b.NewValue0(v.Pos, OpPPC64ADDconstForCarry, types.TypeFlags) v2.AuxInt = -64 @@ -2983,8 +2798,8 @@ func rewriteValuePPC64_OpLsh64x32(v *Value) bool { v3.AddArg(y) v2.AddArg(v3) v1.AddArg(v2) - v0.AddArg(v1) - v.AddArg(v0) + v0.AddArg2(y, v1) + v.AddArg2(x, v0) return true } } @@ -2993,28 +2808,11 @@ func rewriteValuePPC64_OpLsh64x64(v *Value) bool { v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (Lsh64x64 x (Const64 [c])) - // cond: uint64(c) < 64 - // result: (SLDconst x [c]) - for { - x := v_0 - if v_1.Op != OpConst64 { - break - } - c := v_1.AuxInt - if !(uint64(c) < 64) { - break - } - v.reset(OpPPC64SLDconst) - v.AuxInt = c - v.AddArg(x) - return true - } - // match: (Lsh64x64 _ (Const64 [c])) + // match: (Lsh64x64 _ (MOVDconst [c])) // cond: uint64(c) >= 64 // result: (MOVDconst [0]) for { - if v_1.Op != OpConst64 { + if v_1.Op != OpPPC64MOVDconst { break } c := v_1.AuxInt @@ -3052,8 +2850,7 @@ func rewriteValuePPC64_OpLsh64x64(v *Value) bool { break } v.reset(OpPPC64SLD) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (Lsh64x64 x (AND y (MOVDconst [63]))) @@ -3072,11 +2869,10 @@ func rewriteValuePPC64_OpLsh64x64(v *Value) bool { continue } v.reset(OpPPC64SLD) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpPPC64ANDconst, typ.Int64) v0.AuxInt = 63 v0.AddArg(y) - v.AddArg(v0) + v.AddArg2(x, v0) return true } break @@ -3090,11 +2886,10 @@ func rewriteValuePPC64_OpLsh64x64(v *Value) bool { } y := v_1.Args[0] v.reset(OpPPC64SLD) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpPPC64ANDconst, typ.Int64) v0.AuxInt = 63 v0.AddArg(y) - v.AddArg(v0) + v.AddArg2(x, v0) return true } // match: (Lsh64x64 x y) @@ -3103,16 +2898,14 @@ func rewriteValuePPC64_OpLsh64x64(v *Value) bool { x := v_0 y := v_1 v.reset(OpPPC64SLD) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpPPC64ORN, typ.Int64) - v0.AddArg(y) v1 := b.NewValue0(v.Pos, OpPPC64MaskIfNotCarry, typ.Int64) v2 := b.NewValue0(v.Pos, OpPPC64ADDconstForCarry, types.TypeFlags) v2.AuxInt = -64 v2.AddArg(y) v1.AddArg(v2) - v0.AddArg(v1) - v.AddArg(v0) + v0.AddArg2(y, v1) + v.AddArg2(x, v0) return true } } @@ -3131,8 +2924,7 @@ func rewriteValuePPC64_OpLsh64x8(v *Value) bool { break } v.reset(OpPPC64SLD) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (Lsh64x8 x y) @@ -3141,9 +2933,7 @@ func rewriteValuePPC64_OpLsh64x8(v *Value) bool { x := v_0 y := v_1 v.reset(OpPPC64SLD) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpPPC64ORN, typ.Int64) - v0.AddArg(y) v1 := b.NewValue0(v.Pos, OpPPC64MaskIfNotCarry, typ.Int64) v2 := b.NewValue0(v.Pos, OpPPC64ADDconstForCarry, types.TypeFlags) v2.AuxInt = -64 @@ -3151,8 +2941,8 @@ func rewriteValuePPC64_OpLsh64x8(v *Value) bool { v3.AddArg(y) v2.AddArg(v3) v1.AddArg(v2) - v0.AddArg(v1) - v.AddArg(v0) + v0.AddArg2(y, v1) + v.AddArg2(x, v0) return true } } @@ -3171,8 +2961,7 @@ func rewriteValuePPC64_OpLsh8x16(v *Value) bool { break } v.reset(OpPPC64SLW) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (Lsh8x16 x y) @@ -3181,9 +2970,7 @@ func rewriteValuePPC64_OpLsh8x16(v *Value) bool { x := v_0 y := v_1 v.reset(OpPPC64SLW) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpPPC64ORN, typ.Int64) - v0.AddArg(y) v1 := b.NewValue0(v.Pos, OpPPC64MaskIfNotCarry, typ.Int64) v2 := b.NewValue0(v.Pos, OpPPC64ADDconstForCarry, types.TypeFlags) v2.AuxInt = -8 @@ -3191,8 +2978,8 @@ func rewriteValuePPC64_OpLsh8x16(v *Value) bool { v3.AddArg(y) v2.AddArg(v3) v1.AddArg(v2) - v0.AddArg(v1) - v.AddArg(v0) + v0.AddArg2(y, v1) + v.AddArg2(x, v0) return true } } @@ -3201,23 +2988,6 @@ func rewriteValuePPC64_OpLsh8x32(v *Value) bool { v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (Lsh8x32 x (Const64 [c])) - // cond: uint32(c) < 8 - // result: (SLWconst x [c]) - for { - x := v_0 - if v_1.Op != OpConst64 { - break - } - c := v_1.AuxInt - if !(uint32(c) < 8) { - break - } - v.reset(OpPPC64SLWconst) - v.AuxInt = c - v.AddArg(x) - return true - } // match: (Lsh8x32 x (MOVDconst [c])) // cond: uint32(c) < 8 // result: (SLWconst x [c]) @@ -3245,8 +3015,7 @@ func rewriteValuePPC64_OpLsh8x32(v *Value) bool { break } v.reset(OpPPC64SLW) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (Lsh8x32 x y) @@ -3255,9 +3024,7 @@ func rewriteValuePPC64_OpLsh8x32(v *Value) bool { x := v_0 y := v_1 v.reset(OpPPC64SLW) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpPPC64ORN, typ.Int64) - v0.AddArg(y) v1 := b.NewValue0(v.Pos, OpPPC64MaskIfNotCarry, typ.Int64) v2 := b.NewValue0(v.Pos, OpPPC64ADDconstForCarry, types.TypeFlags) v2.AuxInt = -8 @@ -3265,8 +3032,8 @@ func rewriteValuePPC64_OpLsh8x32(v *Value) bool { v3.AddArg(y) v2.AddArg(v3) v1.AddArg(v2) - v0.AddArg(v1) - v.AddArg(v0) + v0.AddArg2(y, v1) + v.AddArg2(x, v0) return true } } @@ -3275,28 +3042,11 @@ func rewriteValuePPC64_OpLsh8x64(v *Value) bool { v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (Lsh8x64 x (Const64 [c])) - // cond: uint64(c) < 8 - // result: (SLWconst x [c]) - for { - x := v_0 - if v_1.Op != OpConst64 { - break - } - c := v_1.AuxInt - if !(uint64(c) < 8) { - break - } - v.reset(OpPPC64SLWconst) - v.AuxInt = c - v.AddArg(x) - return true - } - // match: (Lsh8x64 _ (Const64 [c])) + // match: (Lsh8x64 _ (MOVDconst [c])) // cond: uint64(c) >= 8 // result: (MOVDconst [0]) for { - if v_1.Op != OpConst64 { + if v_1.Op != OpPPC64MOVDconst { break } c := v_1.AuxInt @@ -3334,8 +3084,7 @@ func rewriteValuePPC64_OpLsh8x64(v *Value) bool { break } v.reset(OpPPC64SLW) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (Lsh8x64 x y) @@ -3344,16 +3093,14 @@ func rewriteValuePPC64_OpLsh8x64(v *Value) bool { x := v_0 y := v_1 v.reset(OpPPC64SLW) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpPPC64ORN, typ.Int64) - v0.AddArg(y) v1 := b.NewValue0(v.Pos, OpPPC64MaskIfNotCarry, typ.Int64) v2 := b.NewValue0(v.Pos, OpPPC64ADDconstForCarry, types.TypeFlags) v2.AuxInt = -8 v2.AddArg(y) v1.AddArg(v2) - v0.AddArg(v1) - v.AddArg(v0) + v0.AddArg2(y, v1) + v.AddArg2(x, v0) return true } } @@ -3372,8 +3119,7 @@ func rewriteValuePPC64_OpLsh8x8(v *Value) bool { break } v.reset(OpPPC64SLW) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (Lsh8x8 x y) @@ -3382,9 +3128,7 @@ func rewriteValuePPC64_OpLsh8x8(v *Value) bool { x := v_0 y := v_1 v.reset(OpPPC64SLW) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpPPC64ORN, typ.Int64) - v0.AddArg(y) v1 := b.NewValue0(v.Pos, OpPPC64MaskIfNotCarry, typ.Int64) v2 := b.NewValue0(v.Pos, OpPPC64ADDconstForCarry, types.TypeFlags) v2.AuxInt = -8 @@ -3392,8 +3136,8 @@ func rewriteValuePPC64_OpLsh8x8(v *Value) bool { v3.AddArg(y) v2.AddArg(v3) v1.AddArg(v2) - v0.AddArg(v1) - v.AddArg(v0) + v0.AddArg2(y, v1) + v.AddArg2(x, v0) return true } } @@ -3410,10 +3154,9 @@ func rewriteValuePPC64_OpMod16(v *Value) bool { v.reset(OpMod32) v0 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32) v1.AddArg(y) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } } @@ -3430,10 +3173,9 @@ func rewriteValuePPC64_OpMod16u(v *Value) bool { v.reset(OpMod32u) v0 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) v1.AddArg(y) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } } @@ -3448,14 +3190,11 @@ func rewriteValuePPC64_OpMod32(v *Value) bool { x := v_0 y := v_1 v.reset(OpPPC64SUB) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpPPC64MULLW, typ.Int32) - v0.AddArg(y) v1 := b.NewValue0(v.Pos, OpPPC64DIVW, typ.Int32) - v1.AddArg(x) - v1.AddArg(y) - v0.AddArg(v1) - v.AddArg(v0) + v1.AddArg2(x, y) + v0.AddArg2(y, v1) + v.AddArg2(x, v0) return true } } @@ -3470,14 +3209,11 @@ func rewriteValuePPC64_OpMod32u(v *Value) bool { x := v_0 y := v_1 v.reset(OpPPC64SUB) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpPPC64MULLW, typ.Int32) - v0.AddArg(y) v1 := b.NewValue0(v.Pos, OpPPC64DIVWU, typ.Int32) - v1.AddArg(x) - v1.AddArg(y) - v0.AddArg(v1) - v.AddArg(v0) + v1.AddArg2(x, y) + v0.AddArg2(y, v1) + v.AddArg2(x, v0) return true } } @@ -3492,14 +3228,11 @@ func rewriteValuePPC64_OpMod64(v *Value) bool { x := v_0 y := v_1 v.reset(OpPPC64SUB) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpPPC64MULLD, typ.Int64) - v0.AddArg(y) v1 := b.NewValue0(v.Pos, OpPPC64DIVD, typ.Int64) - v1.AddArg(x) - v1.AddArg(y) - v0.AddArg(v1) - v.AddArg(v0) + v1.AddArg2(x, y) + v0.AddArg2(y, v1) + v.AddArg2(x, v0) return true } } @@ -3514,14 +3247,11 @@ func rewriteValuePPC64_OpMod64u(v *Value) bool { x := v_0 y := v_1 v.reset(OpPPC64SUB) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpPPC64MULLD, typ.Int64) - v0.AddArg(y) v1 := b.NewValue0(v.Pos, OpPPC64DIVDU, typ.Int64) - v1.AddArg(x) - v1.AddArg(y) - v0.AddArg(v1) - v.AddArg(v0) + v1.AddArg2(x, y) + v0.AddArg2(y, v1) + v.AddArg2(x, v0) return true } } @@ -3538,10 +3268,9 @@ func rewriteValuePPC64_OpMod8(v *Value) bool { v.reset(OpMod32) v0 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32) v1.AddArg(y) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } } @@ -3558,10 +3287,9 @@ func rewriteValuePPC64_OpMod8u(v *Value) bool { v.reset(OpMod32u) v0 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) v1.AddArg(y) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } } @@ -3578,9 +3306,7 @@ func rewriteValuePPC64_OpMove(v *Value) bool { break } mem := v_2 - v.reset(OpCopy) - v.Type = mem.Type - v.AddArg(mem) + v.copyOf(mem) return true } // match: (Move [1] dst src mem) @@ -3593,12 +3319,9 @@ func rewriteValuePPC64_OpMove(v *Value) bool { src := v_1 mem := v_2 v.reset(OpPPC64MOVBstore) - v.AddArg(dst) v0 := b.NewValue0(v.Pos, OpPPC64MOVBZload, typ.UInt8) - v0.AddArg(src) - v0.AddArg(mem) - v.AddArg(v0) - v.AddArg(mem) + v0.AddArg2(src, mem) + v.AddArg3(dst, v0, mem) return true } // match: (Move [2] dst src mem) @@ -3611,12 +3334,9 @@ func rewriteValuePPC64_OpMove(v *Value) bool { src := v_1 mem := v_2 v.reset(OpPPC64MOVHstore) - v.AddArg(dst) v0 := b.NewValue0(v.Pos, OpPPC64MOVHZload, typ.UInt16) - v0.AddArg(src) - v0.AddArg(mem) - v.AddArg(v0) - v.AddArg(mem) + v0.AddArg2(src, mem) + v.AddArg3(dst, v0, mem) return true } // match: (Move [4] dst src mem) @@ -3629,12 +3349,9 @@ func rewriteValuePPC64_OpMove(v *Value) bool { src := v_1 mem := v_2 v.reset(OpPPC64MOVWstore) - v.AddArg(dst) v0 := b.NewValue0(v.Pos, OpPPC64MOVWZload, typ.UInt32) - v0.AddArg(src) - v0.AddArg(mem) - v.AddArg(v0) - v.AddArg(mem) + v0.AddArg2(src, mem) + v.AddArg3(dst, v0, mem) return true } // match: (Move [8] {t} dst src mem) @@ -3652,12 +3369,9 @@ func rewriteValuePPC64_OpMove(v *Value) bool { break } v.reset(OpPPC64MOVDstore) - v.AddArg(dst) v0 := b.NewValue0(v.Pos, OpPPC64MOVDload, typ.Int64) - v0.AddArg(src) - v0.AddArg(mem) - v.AddArg(v0) - v.AddArg(mem) + v0.AddArg2(src, mem) + v.AddArg3(dst, v0, mem) return true } // match: (Move [8] dst src mem) @@ -3671,20 +3385,14 @@ func rewriteValuePPC64_OpMove(v *Value) bool { mem := v_2 v.reset(OpPPC64MOVWstore) v.AuxInt = 4 - v.AddArg(dst) v0 := b.NewValue0(v.Pos, OpPPC64MOVWZload, typ.UInt32) v0.AuxInt = 4 - v0.AddArg(src) - v0.AddArg(mem) - v.AddArg(v0) + v0.AddArg2(src, mem) v1 := b.NewValue0(v.Pos, OpPPC64MOVWstore, types.TypeMem) - v1.AddArg(dst) v2 := b.NewValue0(v.Pos, OpPPC64MOVWZload, typ.UInt32) - v2.AddArg(src) - v2.AddArg(mem) - v1.AddArg(v2) - v1.AddArg(mem) - v.AddArg(v1) + v2.AddArg2(src, mem) + v1.AddArg3(dst, v2, mem) + v.AddArg3(dst, v0, v1) return true } // match: (Move [3] dst src mem) @@ -3698,20 +3406,14 @@ func rewriteValuePPC64_OpMove(v *Value) bool { mem := v_2 v.reset(OpPPC64MOVBstore) v.AuxInt = 2 - v.AddArg(dst) v0 := b.NewValue0(v.Pos, OpPPC64MOVBZload, typ.UInt8) v0.AuxInt = 2 - v0.AddArg(src) - v0.AddArg(mem) - v.AddArg(v0) + v0.AddArg2(src, mem) v1 := b.NewValue0(v.Pos, OpPPC64MOVHstore, types.TypeMem) - v1.AddArg(dst) v2 := b.NewValue0(v.Pos, OpPPC64MOVHload, typ.Int16) - v2.AddArg(src) - v2.AddArg(mem) - v1.AddArg(v2) - v1.AddArg(mem) - v.AddArg(v1) + v2.AddArg2(src, mem) + v1.AddArg3(dst, v2, mem) + v.AddArg3(dst, v0, v1) return true } // match: (Move [5] dst src mem) @@ -3725,20 +3427,14 @@ func rewriteValuePPC64_OpMove(v *Value) bool { mem := v_2 v.reset(OpPPC64MOVBstore) v.AuxInt = 4 - v.AddArg(dst) v0 := b.NewValue0(v.Pos, OpPPC64MOVBZload, typ.UInt8) v0.AuxInt = 4 - v0.AddArg(src) - v0.AddArg(mem) - v.AddArg(v0) + v0.AddArg2(src, mem) v1 := b.NewValue0(v.Pos, OpPPC64MOVWstore, types.TypeMem) - v1.AddArg(dst) v2 := b.NewValue0(v.Pos, OpPPC64MOVWZload, typ.UInt32) - v2.AddArg(src) - v2.AddArg(mem) - v1.AddArg(v2) - v1.AddArg(mem) - v.AddArg(v1) + v2.AddArg2(src, mem) + v1.AddArg3(dst, v2, mem) + v.AddArg3(dst, v0, v1) return true } // match: (Move [6] dst src mem) @@ -3752,20 +3448,14 @@ func rewriteValuePPC64_OpMove(v *Value) bool { mem := v_2 v.reset(OpPPC64MOVHstore) v.AuxInt = 4 - v.AddArg(dst) v0 := b.NewValue0(v.Pos, OpPPC64MOVHZload, typ.UInt16) v0.AuxInt = 4 - v0.AddArg(src) - v0.AddArg(mem) - v.AddArg(v0) + v0.AddArg2(src, mem) v1 := b.NewValue0(v.Pos, OpPPC64MOVWstore, types.TypeMem) - v1.AddArg(dst) v2 := b.NewValue0(v.Pos, OpPPC64MOVWZload, typ.UInt32) - v2.AddArg(src) - v2.AddArg(mem) - v1.AddArg(v2) - v1.AddArg(mem) - v.AddArg(v1) + v2.AddArg2(src, mem) + v1.AddArg3(dst, v2, mem) + v.AddArg3(dst, v0, v1) return true } // match: (Move [7] dst src mem) @@ -3779,29 +3469,20 @@ func rewriteValuePPC64_OpMove(v *Value) bool { mem := v_2 v.reset(OpPPC64MOVBstore) v.AuxInt = 6 - v.AddArg(dst) v0 := b.NewValue0(v.Pos, OpPPC64MOVBZload, typ.UInt8) v0.AuxInt = 6 - v0.AddArg(src) - v0.AddArg(mem) - v.AddArg(v0) + v0.AddArg2(src, mem) v1 := b.NewValue0(v.Pos, OpPPC64MOVHstore, types.TypeMem) v1.AuxInt = 4 - v1.AddArg(dst) v2 := b.NewValue0(v.Pos, OpPPC64MOVHZload, typ.UInt16) v2.AuxInt = 4 - v2.AddArg(src) - v2.AddArg(mem) - v1.AddArg(v2) + v2.AddArg2(src, mem) v3 := b.NewValue0(v.Pos, OpPPC64MOVWstore, types.TypeMem) - v3.AddArg(dst) v4 := b.NewValue0(v.Pos, OpPPC64MOVWZload, typ.UInt32) - v4.AddArg(src) - v4.AddArg(mem) - v3.AddArg(v4) - v3.AddArg(mem) - v1.AddArg(v3) - v.AddArg(v1) + v4.AddArg2(src, mem) + v3.AddArg3(dst, v4, mem) + v1.AddArg3(dst, v2, v3) + v.AddArg3(dst, v0, v1) return true } // match: (Move [s] dst src mem) @@ -3817,9 +3498,7 @@ func rewriteValuePPC64_OpMove(v *Value) bool { } v.reset(OpPPC64LoweredMove) v.AuxInt = s - v.AddArg(dst) - v.AddArg(src) - v.AddArg(mem) + v.AddArg3(dst, src, mem) return true } return false @@ -3843,10 +3522,9 @@ func rewriteValuePPC64_OpNeq16(v *Value) bool { v0 := b.NewValue0(v.Pos, OpPPC64CMPW, types.TypeFlags) v1 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32) v1.AddArg(x) - v0.AddArg(v1) v2 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32) v2.AddArg(y) - v0.AddArg(v2) + v0.AddArg2(v1, v2) v.AddArg(v0) return true } @@ -3861,10 +3539,9 @@ func rewriteValuePPC64_OpNeq16(v *Value) bool { v0 := b.NewValue0(v.Pos, OpPPC64CMPW, types.TypeFlags) v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) v1.AddArg(x) - v0.AddArg(v1) v2 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) v2.AddArg(y) - v0.AddArg(v2) + v0.AddArg2(v1, v2) v.AddArg(v0) return true } @@ -3880,8 +3557,7 @@ func rewriteValuePPC64_OpNeq32(v *Value) bool { y := v_1 v.reset(OpPPC64NotEqual) v0 := b.NewValue0(v.Pos, OpPPC64CMPW, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -3897,8 +3573,7 @@ func rewriteValuePPC64_OpNeq32F(v *Value) bool { y := v_1 v.reset(OpPPC64NotEqual) v0 := b.NewValue0(v.Pos, OpPPC64FCMPU, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -3914,8 +3589,7 @@ func rewriteValuePPC64_OpNeq64(v *Value) bool { y := v_1 v.reset(OpPPC64NotEqual) v0 := b.NewValue0(v.Pos, OpPPC64CMP, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -3931,8 +3605,7 @@ func rewriteValuePPC64_OpNeq64F(v *Value) bool { y := v_1 v.reset(OpPPC64NotEqual) v0 := b.NewValue0(v.Pos, OpPPC64FCMPU, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -3956,10 +3629,9 @@ func rewriteValuePPC64_OpNeq8(v *Value) bool { v0 := b.NewValue0(v.Pos, OpPPC64CMPW, types.TypeFlags) v1 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32) v1.AddArg(x) - v0.AddArg(v1) v2 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32) v2.AddArg(y) - v0.AddArg(v2) + v0.AddArg2(v1, v2) v.AddArg(v0) return true } @@ -3974,10 +3646,9 @@ func rewriteValuePPC64_OpNeq8(v *Value) bool { v0 := b.NewValue0(v.Pos, OpPPC64CMPW, types.TypeFlags) v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) v1.AddArg(x) - v0.AddArg(v1) v2 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) v2.AddArg(y) - v0.AddArg(v2) + v0.AddArg2(v1, v2) v.AddArg(v0) return true } @@ -3993,8 +3664,7 @@ func rewriteValuePPC64_OpNeqPtr(v *Value) bool { y := v_1 v.reset(OpPPC64NotEqual) v0 := b.NewValue0(v.Pos, OpPPC64CMP, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -4023,8 +3693,7 @@ func rewriteValuePPC64_OpOffPtr(v *Value) bool { v.reset(OpPPC64ADD) v0 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64) v0.AuxInt = off - v.AddArg(v0) - v.AddArg(ptr) + v.AddArg2(v0, ptr) return true } } @@ -4116,8 +3785,7 @@ func rewriteValuePPC64_OpPPC64ADD(v *Value) bool { continue } v.reset(OpPPC64ROTL) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } break @@ -4157,8 +3825,7 @@ func rewriteValuePPC64_OpPPC64ADD(v *Value) bool { continue } v.reset(OpPPC64ROTLW) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } break @@ -4212,9 +3879,7 @@ func rewriteValuePPC64_OpPPC64ADDconst(v *Value) bool { break } x := v_0 - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (ADDconst [c] (MOVDaddr [d] {sym} x)) @@ -4235,6 +3900,40 @@ func rewriteValuePPC64_OpPPC64ADDconst(v *Value) bool { } return false } +func rewriteValuePPC64_OpPPC64ADDconstForCarry(v *Value) bool { + v_0 := v.Args[0] + // match: (ADDconstForCarry [c] (MOVDconst [d])) + // cond: int64(int16(c)) < 0 && (int64(int16(c)) < 0 || int64(int16(c)) + d >= 0) + // result: (FlagCarryClear) + for { + c := v.AuxInt + if v_0.Op != OpPPC64MOVDconst { + break + } + d := v_0.AuxInt + if !(int64(int16(c)) < 0 && (int64(int16(c)) < 0 || int64(int16(c))+d >= 0)) { + break + } + v.reset(OpPPC64FlagCarryClear) + return true + } + // match: (ADDconstForCarry [c] (MOVDconst [d])) + // cond: int64(int16(c)) < 0 && int64(int16(c)) >= 0 && int64(int16(c)) + d < 0 + // result: (FlagCarrySet) + for { + c := v.AuxInt + if v_0.Op != OpPPC64MOVDconst { + break + } + d := v_0.AuxInt + if !(int64(int16(c)) < 0 && int64(int16(c)) >= 0 && int64(int16(c))+d < 0) { + break + } + v.reset(OpPPC64FlagCarrySet) + return true + } + return false +} func rewriteValuePPC64_OpPPC64AND(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] @@ -4251,8 +3950,7 @@ func rewriteValuePPC64_OpPPC64AND(v *Value) bool { continue } v.reset(OpPPC64ANDN) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } break @@ -4308,9 +4006,7 @@ func rewriteValuePPC64_OpPPC64AND(v *Value) bool { if y.Op != OpPPC64MOVWZreg || !(c&0xFFFFFFFF == 0xFFFFFFFF) { continue } - v.reset(OpCopy) - v.Type = y.Type - v.AddArg(y) + v.copyOf(y) return true } break @@ -4345,7 +4041,6 @@ func rewriteValuePPC64_OpPPC64AND(v *Value) bool { if x.Op != OpPPC64MOVBZload { continue } - _ = x.Args[1] v.reset(OpPPC64ANDconst) v.AuxInt = c & 0xFF v.AddArg(x) @@ -4355,6 +4050,26 @@ func rewriteValuePPC64_OpPPC64AND(v *Value) bool { } return false } +func rewriteValuePPC64_OpPPC64ANDN(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (ANDN (MOVDconst [c]) (MOVDconst [d])) + // result: (MOVDconst [c&^d]) + for { + if v_0.Op != OpPPC64MOVDconst { + break + } + c := v_0.AuxInt + if v_1.Op != OpPPC64MOVDconst { + break + } + d := v_1.AuxInt + v.reset(OpPPC64MOVDconst) + v.AuxInt = c &^ d + return true + } + return false +} func rewriteValuePPC64_OpPPC64ANDconst(v *Value) bool { v_0 := v.Args[0] // match: (ANDconst [c] (ANDconst [d] x)) @@ -4378,9 +4093,7 @@ func rewriteValuePPC64_OpPPC64ANDconst(v *Value) bool { break } x := v_0 - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (ANDconst [0] _) @@ -4402,9 +4115,7 @@ func rewriteValuePPC64_OpPPC64ANDconst(v *Value) bool { if y.Op != OpPPC64MOVBZreg || !(c&0xFF == 0xFF) { break } - v.reset(OpCopy) - v.Type = y.Type - v.AddArg(y) + v.copyOf(y) return true } // match: (ANDconst [0xFF] y:(MOVBreg _)) @@ -4417,9 +4128,7 @@ func rewriteValuePPC64_OpPPC64ANDconst(v *Value) bool { if y.Op != OpPPC64MOVBreg { break } - v.reset(OpCopy) - v.Type = y.Type - v.AddArg(y) + v.copyOf(y) return true } // match: (ANDconst [c] y:(MOVHZreg _)) @@ -4431,9 +4140,7 @@ func rewriteValuePPC64_OpPPC64ANDconst(v *Value) bool { if y.Op != OpPPC64MOVHZreg || !(c&0xFFFF == 0xFFFF) { break } - v.reset(OpCopy) - v.Type = y.Type - v.AddArg(y) + v.copyOf(y) return true } // match: (ANDconst [0xFFFF] y:(MOVHreg _)) @@ -4446,9 +4153,7 @@ func rewriteValuePPC64_OpPPC64ANDconst(v *Value) bool { if y.Op != OpPPC64MOVHreg { break } - v.reset(OpCopy) - v.Type = y.Type - v.AddArg(y) + v.copyOf(y) return true } // match: (ANDconst [c] (MOVBreg x)) @@ -4582,8 +4287,7 @@ func rewriteValuePPC64_OpPPC64CMP(v *Value) bool { } v.reset(OpPPC64InvertFlags) v0 := b.NewValue0(v.Pos, OpPPC64CMP, types.TypeFlags) - v0.AddArg(y) - v0.AddArg(x) + v0.AddArg2(y, x) v.AddArg(v0) return true } @@ -4640,8 +4344,7 @@ func rewriteValuePPC64_OpPPC64CMPU(v *Value) bool { } v.reset(OpPPC64InvertFlags) v0 := b.NewValue0(v.Pos, OpPPC64CMPU, types.TypeFlags) - v0.AddArg(y) - v0.AddArg(x) + v0.AddArg2(y, x) v.AddArg(v0) return true } @@ -4709,8 +4412,7 @@ func rewriteValuePPC64_OpPPC64CMPW(v *Value) bool { } y := v_1.Args[0] v.reset(OpPPC64CMPW) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (CMPW (MOVWreg x) y) @@ -4722,8 +4424,7 @@ func rewriteValuePPC64_OpPPC64CMPW(v *Value) bool { x := v_0.Args[0] y := v_1 v.reset(OpPPC64CMPW) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (CMPW x (MOVDconst [c])) @@ -4773,8 +4474,7 @@ func rewriteValuePPC64_OpPPC64CMPW(v *Value) bool { } v.reset(OpPPC64InvertFlags) v0 := b.NewValue0(v.Pos, OpPPC64CMPW, types.TypeFlags) - v0.AddArg(y) - v0.AddArg(x) + v0.AddArg2(y, x) v.AddArg(v0) return true } @@ -4793,8 +4493,7 @@ func rewriteValuePPC64_OpPPC64CMPWU(v *Value) bool { } y := v_1.Args[0] v.reset(OpPPC64CMPWU) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (CMPWU (MOVWZreg x) y) @@ -4806,8 +4505,7 @@ func rewriteValuePPC64_OpPPC64CMPWU(v *Value) bool { x := v_0.Args[0] y := v_1 v.reset(OpPPC64CMPWU) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (CMPWU x (MOVDconst [c])) @@ -4857,8 +4555,7 @@ func rewriteValuePPC64_OpPPC64CMPWU(v *Value) bool { } v.reset(OpPPC64InvertFlags) v0 := b.NewValue0(v.Pos, OpPPC64CMPWU, types.TypeFlags) - v0.AddArg(y) - v0.AddArg(x) + v0.AddArg2(y, x) v.AddArg(v0) return true } @@ -5064,8 +4761,7 @@ func rewriteValuePPC64_OpPPC64Equal(v *Value) bool { v.AuxInt = 2 v0 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64) v0.AuxInt = 1 - v.AddArg(v0) - v.AddArg(cmp) + v.AddArg2(v0, cmp) return true } } @@ -5098,9 +4794,7 @@ func rewriteValuePPC64_OpPPC64FADD(v *Value) bool { x := v_0.Args[0] z := v_1 v.reset(OpPPC64FMADD) - v.AddArg(x) - v.AddArg(y) - v.AddArg(z) + v.AddArg3(x, y, z) return true } break @@ -5121,9 +4815,7 @@ func rewriteValuePPC64_OpPPC64FADDS(v *Value) bool { x := v_0.Args[0] z := v_1 v.reset(OpPPC64FMADDS) - v.AddArg(x) - v.AddArg(y) - v.AddArg(z) + v.AddArg3(x, y, z) return true } break @@ -5172,15 +4864,12 @@ func rewriteValuePPC64_OpPPC64FGreaterEqual(v *Value) bool { v.AuxInt = 2 v0 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64) v0.AuxInt = 1 - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpPPC64ISELB, typ.Int32) v1.AuxInt = 1 v2 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64) v2.AuxInt = 1 - v1.AddArg(v2) - v1.AddArg(cmp) - v.AddArg(v1) - v.AddArg(cmp) + v1.AddArg2(v2, cmp) + v.AddArg3(v0, v1, cmp) return true } } @@ -5196,8 +4885,7 @@ func rewriteValuePPC64_OpPPC64FGreaterThan(v *Value) bool { v.AuxInt = 1 v0 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64) v0.AuxInt = 1 - v.AddArg(v0) - v.AddArg(cmp) + v.AddArg2(v0, cmp) return true } } @@ -5213,15 +4901,12 @@ func rewriteValuePPC64_OpPPC64FLessEqual(v *Value) bool { v.AuxInt = 2 v0 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64) v0.AuxInt = 1 - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpPPC64ISELB, typ.Int32) v1.AuxInt = 0 v2 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64) v2.AuxInt = 1 - v1.AddArg(v2) - v1.AddArg(cmp) - v.AddArg(v1) - v.AddArg(cmp) + v1.AddArg2(v2, cmp) + v.AddArg3(v0, v1, cmp) return true } } @@ -5237,8 +4922,7 @@ func rewriteValuePPC64_OpPPC64FLessThan(v *Value) bool { v.AuxInt = 0 v0 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64) v0.AuxInt = 1 - v.AddArg(v0) - v.AddArg(cmp) + v.AddArg2(v0, cmp) return true } } @@ -5254,11 +4938,10 @@ func rewriteValuePPC64_OpPPC64FMOVDload(v *Value) bool { if v_1.Op != OpPPC64MOVDstore || v_1.AuxInt != off || v_1.Aux != sym { break } - _ = v_1.Args[2] + x := v_1.Args[1] if ptr != v_1.Args[0] { break } - x := v_1.Args[1] v.reset(OpPPC64MTVSRD) v.AddArg(x) return true @@ -5283,8 +4966,7 @@ func rewriteValuePPC64_OpPPC64FMOVDload(v *Value) bool { v.reset(OpPPC64FMOVDload) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (FMOVDload [off1] {sym} (ADDconst [off2] ptr) mem) @@ -5305,8 +4987,7 @@ func rewriteValuePPC64_OpPPC64FMOVDload(v *Value) bool { v.reset(OpPPC64FMOVDload) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } return false @@ -5329,9 +5010,7 @@ func rewriteValuePPC64_OpPPC64FMOVDstore(v *Value) bool { v.reset(OpPPC64MOVDstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(x) - v.AddArg(mem) + v.AddArg3(ptr, x, mem) return true } // match: (FMOVDstore [off1] {sym} (ADDconst [off2] ptr) val mem) @@ -5353,9 +5032,7 @@ func rewriteValuePPC64_OpPPC64FMOVDstore(v *Value) bool { v.reset(OpPPC64FMOVDstore) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } // match: (FMOVDstore [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) val mem) @@ -5379,9 +5056,7 @@ func rewriteValuePPC64_OpPPC64FMOVDstore(v *Value) bool { v.reset(OpPPC64FMOVDstore) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } return false @@ -5409,8 +5084,7 @@ func rewriteValuePPC64_OpPPC64FMOVSload(v *Value) bool { v.reset(OpPPC64FMOVSload) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (FMOVSload [off1] {sym} (ADDconst [off2] ptr) mem) @@ -5431,8 +5105,7 @@ func rewriteValuePPC64_OpPPC64FMOVSload(v *Value) bool { v.reset(OpPPC64FMOVSload) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } return false @@ -5460,9 +5133,7 @@ func rewriteValuePPC64_OpPPC64FMOVSstore(v *Value) bool { v.reset(OpPPC64FMOVSstore) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } // match: (FMOVSstore [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) val mem) @@ -5486,9 +5157,7 @@ func rewriteValuePPC64_OpPPC64FMOVSstore(v *Value) bool { v.reset(OpPPC64FMOVSstore) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } return false @@ -5522,12 +5191,16 @@ func rewriteValuePPC64_OpPPC64FNEG(v *Value) bool { func rewriteValuePPC64_OpPPC64FSQRT(v *Value) bool { v_0 := v.Args[0] // match: (FSQRT (FMOVDconst [x])) + // cond: auxTo64F(x) >= 0 // result: (FMOVDconst [auxFrom64F(math.Sqrt(auxTo64F(x)))]) for { if v_0.Op != OpPPC64FMOVDconst { break } x := v_0.AuxInt + if !(auxTo64F(x) >= 0) { + break + } v.reset(OpPPC64FMOVDconst) v.AuxInt = auxFrom64F(math.Sqrt(auxTo64F(x))) return true @@ -5547,9 +5220,7 @@ func rewriteValuePPC64_OpPPC64FSUB(v *Value) bool { x := v_0.Args[0] z := v_1 v.reset(OpPPC64FMSUB) - v.AddArg(x) - v.AddArg(y) - v.AddArg(z) + v.AddArg3(x, y, z) return true } return false @@ -5567,9 +5238,7 @@ func rewriteValuePPC64_OpPPC64FSUBS(v *Value) bool { x := v_0.Args[0] z := v_1 v.reset(OpPPC64FMSUBS) - v.AddArg(x) - v.AddArg(y) - v.AddArg(z) + v.AddArg3(x, y, z) return true } return false @@ -5642,8 +5311,7 @@ func rewriteValuePPC64_OpPPC64GreaterEqual(v *Value) bool { v.AuxInt = 4 v0 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64) v0.AuxInt = 1 - v.AddArg(v0) - v.AddArg(cmp) + v.AddArg2(v0, cmp) return true } } @@ -5700,8 +5368,7 @@ func rewriteValuePPC64_OpPPC64GreaterThan(v *Value) bool { v.AuxInt = 1 v0 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64) v0.AuxInt = 1 - v.AddArg(v0) - v.AddArg(cmp) + v.AddArg2(v0, cmp) return true } } @@ -5719,9 +5386,7 @@ func rewriteValuePPC64_OpPPC64ISEL(v *Value) bool { if v_2.Op != OpPPC64FlagEQ { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (ISEL [2] _ y (FlagLT)) @@ -5734,9 +5399,7 @@ func rewriteValuePPC64_OpPPC64ISEL(v *Value) bool { if v_2.Op != OpPPC64FlagLT { break } - v.reset(OpCopy) - v.Type = y.Type - v.AddArg(y) + v.copyOf(y) return true } // match: (ISEL [2] _ y (FlagGT)) @@ -5749,9 +5412,7 @@ func rewriteValuePPC64_OpPPC64ISEL(v *Value) bool { if v_2.Op != OpPPC64FlagGT { break } - v.reset(OpCopy) - v.Type = y.Type - v.AddArg(y) + v.copyOf(y) return true } // match: (ISEL [6] _ y (FlagEQ)) @@ -5764,9 +5425,7 @@ func rewriteValuePPC64_OpPPC64ISEL(v *Value) bool { if v_2.Op != OpPPC64FlagEQ { break } - v.reset(OpCopy) - v.Type = y.Type - v.AddArg(y) + v.copyOf(y) return true } // match: (ISEL [6] x _ (FlagLT)) @@ -5779,9 +5438,7 @@ func rewriteValuePPC64_OpPPC64ISEL(v *Value) bool { if v_2.Op != OpPPC64FlagLT { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (ISEL [6] x _ (FlagGT)) @@ -5794,9 +5451,7 @@ func rewriteValuePPC64_OpPPC64ISEL(v *Value) bool { if v_2.Op != OpPPC64FlagGT { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (ISEL [0] _ y (FlagEQ)) @@ -5809,9 +5464,7 @@ func rewriteValuePPC64_OpPPC64ISEL(v *Value) bool { if v_2.Op != OpPPC64FlagEQ { break } - v.reset(OpCopy) - v.Type = y.Type - v.AddArg(y) + v.copyOf(y) return true } // match: (ISEL [0] _ y (FlagGT)) @@ -5824,9 +5477,7 @@ func rewriteValuePPC64_OpPPC64ISEL(v *Value) bool { if v_2.Op != OpPPC64FlagGT { break } - v.reset(OpCopy) - v.Type = y.Type - v.AddArg(y) + v.copyOf(y) return true } // match: (ISEL [0] x _ (FlagLT)) @@ -5839,9 +5490,7 @@ func rewriteValuePPC64_OpPPC64ISEL(v *Value) bool { if v_2.Op != OpPPC64FlagLT { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (ISEL [5] _ x (FlagEQ)) @@ -5854,9 +5503,7 @@ func rewriteValuePPC64_OpPPC64ISEL(v *Value) bool { if v_2.Op != OpPPC64FlagEQ { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (ISEL [5] _ x (FlagLT)) @@ -5869,9 +5516,7 @@ func rewriteValuePPC64_OpPPC64ISEL(v *Value) bool { if v_2.Op != OpPPC64FlagLT { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (ISEL [5] y _ (FlagGT)) @@ -5884,9 +5529,7 @@ func rewriteValuePPC64_OpPPC64ISEL(v *Value) bool { if v_2.Op != OpPPC64FlagGT { break } - v.reset(OpCopy) - v.Type = y.Type - v.AddArg(y) + v.copyOf(y) return true } // match: (ISEL [1] _ y (FlagEQ)) @@ -5899,9 +5542,7 @@ func rewriteValuePPC64_OpPPC64ISEL(v *Value) bool { if v_2.Op != OpPPC64FlagEQ { break } - v.reset(OpCopy) - v.Type = y.Type - v.AddArg(y) + v.copyOf(y) return true } // match: (ISEL [1] _ y (FlagLT)) @@ -5914,9 +5555,7 @@ func rewriteValuePPC64_OpPPC64ISEL(v *Value) bool { if v_2.Op != OpPPC64FlagLT { break } - v.reset(OpCopy) - v.Type = y.Type - v.AddArg(y) + v.copyOf(y) return true } // match: (ISEL [1] x _ (FlagGT)) @@ -5929,9 +5568,7 @@ func rewriteValuePPC64_OpPPC64ISEL(v *Value) bool { if v_2.Op != OpPPC64FlagGT { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (ISEL [4] x _ (FlagEQ)) @@ -5944,9 +5581,7 @@ func rewriteValuePPC64_OpPPC64ISEL(v *Value) bool { if v_2.Op != OpPPC64FlagEQ { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (ISEL [4] x _ (FlagGT)) @@ -5959,9 +5594,7 @@ func rewriteValuePPC64_OpPPC64ISEL(v *Value) bool { if v_2.Op != OpPPC64FlagGT { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (ISEL [4] _ y (FlagLT)) @@ -5974,9 +5607,7 @@ func rewriteValuePPC64_OpPPC64ISEL(v *Value) bool { if v_2.Op != OpPPC64FlagLT { break } - v.reset(OpCopy) - v.Type = y.Type - v.AddArg(y) + v.copyOf(y) return true } // match: (ISEL [n] x y (InvertFlags bool)) @@ -5995,9 +5626,7 @@ func rewriteValuePPC64_OpPPC64ISEL(v *Value) bool { } v.reset(OpPPC64ISEL) v.AuxInt = n + 1 - v.AddArg(x) - v.AddArg(y) - v.AddArg(bool) + v.AddArg3(x, y, bool) return true } // match: (ISEL [n] x y (InvertFlags bool)) @@ -6016,9 +5645,7 @@ func rewriteValuePPC64_OpPPC64ISEL(v *Value) bool { } v.reset(OpPPC64ISEL) v.AuxInt = n - 1 - v.AddArg(x) - v.AddArg(y) - v.AddArg(bool) + v.AddArg3(x, y, bool) return true } // match: (ISEL [n] x y (InvertFlags bool)) @@ -6037,9 +5664,7 @@ func rewriteValuePPC64_OpPPC64ISEL(v *Value) bool { } v.reset(OpPPC64ISEL) v.AuxInt = n - v.AddArg(x) - v.AddArg(y) - v.AddArg(bool) + v.AddArg3(x, y, bool) return true } return false @@ -6245,8 +5870,7 @@ func rewriteValuePPC64_OpPPC64ISELB(v *Value) bool { v.AuxInt = n + 1 v0 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64) v0.AuxInt = 1 - v.AddArg(v0) - v.AddArg(bool) + v.AddArg2(v0, bool) return true } // match: (ISELB [n] (MOVDconst [1]) (InvertFlags bool)) @@ -6265,8 +5889,7 @@ func rewriteValuePPC64_OpPPC64ISELB(v *Value) bool { v.AuxInt = n - 1 v0 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64) v0.AuxInt = 1 - v.AddArg(v0) - v.AddArg(bool) + v.AddArg2(v0, bool) return true } // match: (ISELB [n] (MOVDconst [1]) (InvertFlags bool)) @@ -6285,8 +5908,7 @@ func rewriteValuePPC64_OpPPC64ISELB(v *Value) bool { v.AuxInt = n v0 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64) v0.AuxInt = 1 - v.AddArg(v0) - v.AddArg(bool) + v.AddArg2(v0, bool) return true } return false @@ -6344,8 +5966,7 @@ func rewriteValuePPC64_OpPPC64LessEqual(v *Value) bool { v.AuxInt = 5 v0 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64) v0.AuxInt = 1 - v.AddArg(v0) - v.AddArg(cmp) + v.AddArg2(v0, cmp) return true } } @@ -6402,8 +6023,7 @@ func rewriteValuePPC64_OpPPC64LessThan(v *Value) bool { v.AuxInt = 0 v0 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64) v0.AuxInt = 1 - v.AddArg(v0) - v.AddArg(cmp) + v.AddArg2(v0, cmp) return true } } @@ -6439,12 +6059,10 @@ func rewriteValuePPC64_OpPPC64MFVSRD(v *Value) bool { } b = x.Block v0 := b.NewValue0(x.Pos, OpPPC64MOVDload, typ.Int64) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v0.AuxInt = off v0.Aux = sym - v0.AddArg(ptr) - v0.AddArg(mem) + v0.AddArg2(ptr, mem) return true } return false @@ -6472,8 +6090,7 @@ func rewriteValuePPC64_OpPPC64MOVBZload(v *Value) bool { v.reset(OpPPC64MOVBZload) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVBZload [off1] {sym} (ADDconst [off2] x) mem) @@ -6494,8 +6111,7 @@ func rewriteValuePPC64_OpPPC64MOVBZload(v *Value) bool { v.reset(OpPPC64MOVBZload) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(x) - v.AddArg(mem) + v.AddArg2(x, mem) return true } // match: (MOVBZload [0] {sym} p:(ADD ptr idx) mem) @@ -6517,9 +6133,7 @@ func rewriteValuePPC64_OpPPC64MOVBZload(v *Value) bool { break } v.reset(OpPPC64MOVBZloadidx) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } return false @@ -6543,8 +6157,7 @@ func rewriteValuePPC64_OpPPC64MOVBZloadidx(v *Value) bool { } v.reset(OpPPC64MOVBZload) v.AuxInt = c - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVBZloadidx (MOVDconst [c]) ptr mem) @@ -6562,8 +6175,7 @@ func rewriteValuePPC64_OpPPC64MOVBZloadidx(v *Value) bool { } v.reset(OpPPC64MOVBZload) v.AuxInt = c - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } return false @@ -6584,9 +6196,7 @@ func rewriteValuePPC64_OpPPC64MOVBZreg(v *Value) bool { if !(uint64(c) <= 0xFF) { break } - v.reset(OpCopy) - v.Type = y.Type - v.AddArg(y) + v.copyOf(y) return true } // match: (MOVBZreg (SRWconst [c] (MOVBZreg x))) @@ -6666,9 +6276,7 @@ func rewriteValuePPC64_OpPPC64MOVBZreg(v *Value) bool { if y.Op != OpPPC64MOVBZreg { break } - v.reset(OpCopy) - v.Type = y.Type - v.AddArg(y) + v.copyOf(y) return true } // match: (MOVBZreg (MOVBreg x)) @@ -6689,10 +6297,7 @@ func rewriteValuePPC64_OpPPC64MOVBZreg(v *Value) bool { if x.Op != OpPPC64MOVBZload { break } - _ = x.Args[1] - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (MOVBZreg x:(MOVBZloadidx _ _ _)) @@ -6702,10 +6307,7 @@ func rewriteValuePPC64_OpPPC64MOVBZreg(v *Value) bool { if x.Op != OpPPC64MOVBZloadidx { break } - _ = x.Args[2] - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (MOVBZreg x:(Arg )) @@ -6720,9 +6322,7 @@ func rewriteValuePPC64_OpPPC64MOVBZreg(v *Value) bool { if !(is8BitInt(t) && !isSigned(t)) { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (MOVBZreg (MOVDconst [c])) @@ -6754,9 +6354,7 @@ func rewriteValuePPC64_OpPPC64MOVBreg(v *Value) bool { if !(uint64(c) <= 0x7F) { break } - v.reset(OpCopy) - v.Type = y.Type - v.AddArg(y) + v.copyOf(y) return true } // match: (MOVBreg (SRAWconst [c] (MOVBreg x))) @@ -6870,9 +6468,7 @@ func rewriteValuePPC64_OpPPC64MOVBreg(v *Value) bool { if y.Op != OpPPC64MOVBreg { break } - v.reset(OpCopy) - v.Type = y.Type - v.AddArg(y) + v.copyOf(y) return true } // match: (MOVBreg (MOVBZreg x)) @@ -6898,9 +6494,7 @@ func rewriteValuePPC64_OpPPC64MOVBreg(v *Value) bool { if !(is8BitInt(t) && isSigned(t)) { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (MOVBreg (MOVDconst [c])) @@ -6942,9 +6536,7 @@ func rewriteValuePPC64_OpPPC64MOVBstore(v *Value) bool { v.reset(OpPPC64MOVBstore) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(x) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(x, val, mem) return true } // match: (MOVBstore [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) val mem) @@ -6968,9 +6560,7 @@ func rewriteValuePPC64_OpPPC64MOVBstore(v *Value) bool { v.reset(OpPPC64MOVBstore) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } // match: (MOVBstore [off] {sym} ptr (MOVDconst [0]) mem) @@ -6986,8 +6576,7 @@ func rewriteValuePPC64_OpPPC64MOVBstore(v *Value) bool { v.reset(OpPPC64MOVBstorezero) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVBstore [off] {sym} p:(ADD ptr idx) val mem) @@ -7008,10 +6597,7 @@ func rewriteValuePPC64_OpPPC64MOVBstore(v *Value) bool { break } v.reset(OpPPC64MOVBstoreidx) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) + v.AddArg4(ptr, idx, val, mem) return true } // match: (MOVBstore [off] {sym} ptr (MOVBreg x) mem) @@ -7028,9 +6614,7 @@ func rewriteValuePPC64_OpPPC64MOVBstore(v *Value) bool { v.reset(OpPPC64MOVBstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(x) - v.AddArg(mem) + v.AddArg3(ptr, x, mem) return true } // match: (MOVBstore [off] {sym} ptr (MOVBZreg x) mem) @@ -7047,9 +6631,7 @@ func rewriteValuePPC64_OpPPC64MOVBstore(v *Value) bool { v.reset(OpPPC64MOVBstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(x) - v.AddArg(mem) + v.AddArg3(ptr, x, mem) return true } // match: (MOVBstore [off] {sym} ptr (MOVHreg x) mem) @@ -7066,9 +6648,7 @@ func rewriteValuePPC64_OpPPC64MOVBstore(v *Value) bool { v.reset(OpPPC64MOVBstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(x) - v.AddArg(mem) + v.AddArg3(ptr, x, mem) return true } // match: (MOVBstore [off] {sym} ptr (MOVHZreg x) mem) @@ -7085,9 +6665,7 @@ func rewriteValuePPC64_OpPPC64MOVBstore(v *Value) bool { v.reset(OpPPC64MOVBstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(x) - v.AddArg(mem) + v.AddArg3(ptr, x, mem) return true } // match: (MOVBstore [off] {sym} ptr (MOVWreg x) mem) @@ -7104,9 +6682,7 @@ func rewriteValuePPC64_OpPPC64MOVBstore(v *Value) bool { v.reset(OpPPC64MOVBstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(x) - v.AddArg(mem) + v.AddArg3(ptr, x, mem) return true } // match: (MOVBstore [off] {sym} ptr (MOVWZreg x) mem) @@ -7123,9 +6699,7 @@ func rewriteValuePPC64_OpPPC64MOVBstore(v *Value) bool { v.reset(OpPPC64MOVBstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(x) - v.AddArg(mem) + v.AddArg3(ptr, x, mem) return true } // match: (MOVBstore [off] {sym} ptr (SRWconst (MOVHreg x) [c]) mem) @@ -7151,12 +6725,10 @@ func rewriteValuePPC64_OpPPC64MOVBstore(v *Value) bool { v.reset(OpPPC64MOVBstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) v0 := b.NewValue0(v.Pos, OpPPC64SRWconst, typ.UInt32) v0.AuxInt = c v0.AddArg(x) - v.AddArg(v0) - v.AddArg(mem) + v.AddArg3(ptr, v0, mem) return true } // match: (MOVBstore [off] {sym} ptr (SRWconst (MOVHZreg x) [c]) mem) @@ -7182,12 +6754,10 @@ func rewriteValuePPC64_OpPPC64MOVBstore(v *Value) bool { v.reset(OpPPC64MOVBstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) v0 := b.NewValue0(v.Pos, OpPPC64SRWconst, typ.UInt32) v0.AuxInt = c v0.AddArg(x) - v.AddArg(v0) - v.AddArg(mem) + v.AddArg3(ptr, v0, mem) return true } // match: (MOVBstore [off] {sym} ptr (SRWconst (MOVWreg x) [c]) mem) @@ -7213,12 +6783,10 @@ func rewriteValuePPC64_OpPPC64MOVBstore(v *Value) bool { v.reset(OpPPC64MOVBstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) v0 := b.NewValue0(v.Pos, OpPPC64SRWconst, typ.UInt32) v0.AuxInt = c v0.AddArg(x) - v.AddArg(v0) - v.AddArg(mem) + v.AddArg3(ptr, v0, mem) return true } // match: (MOVBstore [off] {sym} ptr (SRWconst (MOVWZreg x) [c]) mem) @@ -7244,12 +6812,10 @@ func rewriteValuePPC64_OpPPC64MOVBstore(v *Value) bool { v.reset(OpPPC64MOVBstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) v0 := b.NewValue0(v.Pos, OpPPC64SRWconst, typ.UInt32) v0.AuxInt = c v0.AddArg(x) - v.AddArg(v0) - v.AddArg(mem) + v.AddArg3(ptr, v0, mem) return true } // match: (MOVBstore [i1] {s} p (SRWconst w [24]) x0:(MOVBstore [i0] {s} p (SRWconst w [16]) mem)) @@ -7282,12 +6848,10 @@ func rewriteValuePPC64_OpPPC64MOVBstore(v *Value) bool { v.reset(OpPPC64MOVHstore) v.AuxInt = i0 v.Aux = s - v.AddArg(p) v0 := b.NewValue0(x0.Pos, OpPPC64SRWconst, typ.UInt16) v0.AuxInt = 16 v0.AddArg(w) - v.AddArg(v0) - v.AddArg(mem) + v.AddArg3(p, v0, mem) return true } // match: (MOVBstore [i1] {s} p (SRDconst w [24]) x0:(MOVBstore [i0] {s} p (SRDconst w [16]) mem)) @@ -7320,12 +6884,10 @@ func rewriteValuePPC64_OpPPC64MOVBstore(v *Value) bool { v.reset(OpPPC64MOVHstore) v.AuxInt = i0 v.Aux = s - v.AddArg(p) v0 := b.NewValue0(x0.Pos, OpPPC64SRWconst, typ.UInt16) v0.AuxInt = 16 v0.AddArg(w) - v.AddArg(v0) - v.AddArg(mem) + v.AddArg3(p, v0, mem) return true } // match: (MOVBstore [i1] {s} p (SRWconst w [8]) x0:(MOVBstore [i0] {s} p w mem)) @@ -7354,9 +6916,7 @@ func rewriteValuePPC64_OpPPC64MOVBstore(v *Value) bool { v.reset(OpPPC64MOVHstore) v.AuxInt = i0 v.Aux = s - v.AddArg(p) - v.AddArg(w) - v.AddArg(mem) + v.AddArg3(p, w, mem) return true } // match: (MOVBstore [i1] {s} p (SRDconst w [8]) x0:(MOVBstore [i0] {s} p w mem)) @@ -7385,9 +6945,7 @@ func rewriteValuePPC64_OpPPC64MOVBstore(v *Value) bool { v.reset(OpPPC64MOVHstore) v.AuxInt = i0 v.Aux = s - v.AddArg(p) - v.AddArg(w) - v.AddArg(mem) + v.AddArg3(p, w, mem) return true } // match: (MOVBstore [i3] {s} p w x0:(MOVBstore [i2] {s} p (SRWconst w [8]) x1:(MOVBstore [i1] {s} p (SRWconst w [16]) x2:(MOVBstore [i0] {s} p (SRWconst w [24]) mem)))) @@ -7451,9 +7009,7 @@ func rewriteValuePPC64_OpPPC64MOVBstore(v *Value) bool { v0.AuxInt = i0 v0.Aux = s v0.AddArg(p) - v.AddArg(v0) - v.AddArg(w) - v.AddArg(mem) + v.AddArg3(v0, w, mem) return true } // match: (MOVBstore [i1] {s} p w x0:(MOVBstore [i0] {s} p (SRWconst w [8]) mem)) @@ -7485,9 +7041,7 @@ func rewriteValuePPC64_OpPPC64MOVBstore(v *Value) bool { v0.AuxInt = i0 v0.Aux = s v0.AddArg(p) - v.AddArg(v0) - v.AddArg(w) - v.AddArg(mem) + v.AddArg3(v0, w, mem) return true } // match: (MOVBstore [i7] {s} p (SRDconst w [56]) x0:(MOVBstore [i6] {s} p (SRDconst w [48]) x1:(MOVBstore [i5] {s} p (SRDconst w [40]) x2:(MOVBstore [i4] {s} p (SRDconst w [32]) x3:(MOVWstore [i0] {s} p w mem))))) @@ -7564,9 +7118,7 @@ func rewriteValuePPC64_OpPPC64MOVBstore(v *Value) bool { v.reset(OpPPC64MOVDstore) v.AuxInt = i0 v.Aux = s - v.AddArg(p) - v.AddArg(w) - v.AddArg(mem) + v.AddArg3(p, w, mem) return true } // match: (MOVBstore [i7] {s} p w x0:(MOVBstore [i6] {s} p (SRDconst w [8]) x1:(MOVBstore [i5] {s} p (SRDconst w [16]) x2:(MOVBstore [i4] {s} p (SRDconst w [24]) x3:(MOVBstore [i3] {s} p (SRDconst w [32]) x4:(MOVBstore [i2] {s} p (SRDconst w [40]) x5:(MOVBstore [i1] {s} p (SRDconst w [48]) x6:(MOVBstore [i0] {s} p (SRDconst w [56]) mem)))))))) @@ -7694,9 +7246,7 @@ func rewriteValuePPC64_OpPPC64MOVBstore(v *Value) bool { v0.AuxInt = i0 v0.Aux = s v0.AddArg(p) - v.AddArg(v0) - v.AddArg(w) - v.AddArg(mem) + v.AddArg3(v0, w, mem) return true } return false @@ -7724,9 +7274,7 @@ func rewriteValuePPC64_OpPPC64MOVBstoreidx(v *Value) bool { } v.reset(OpPPC64MOVBstore) v.AuxInt = c - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } // match: (MOVBstoreidx (MOVDconst [c]) ptr val mem) @@ -7745,9 +7293,7 @@ func rewriteValuePPC64_OpPPC64MOVBstoreidx(v *Value) bool { } v.reset(OpPPC64MOVBstore) v.AuxInt = c - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } // match: (MOVBstoreidx [off] {sym} ptr idx (MOVBreg x) mem) @@ -7765,10 +7311,7 @@ func rewriteValuePPC64_OpPPC64MOVBstoreidx(v *Value) bool { v.reset(OpPPC64MOVBstoreidx) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(x) - v.AddArg(mem) + v.AddArg4(ptr, idx, x, mem) return true } // match: (MOVBstoreidx [off] {sym} ptr idx (MOVBZreg x) mem) @@ -7786,10 +7329,7 @@ func rewriteValuePPC64_OpPPC64MOVBstoreidx(v *Value) bool { v.reset(OpPPC64MOVBstoreidx) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(x) - v.AddArg(mem) + v.AddArg4(ptr, idx, x, mem) return true } // match: (MOVBstoreidx [off] {sym} ptr idx (MOVHreg x) mem) @@ -7807,10 +7347,7 @@ func rewriteValuePPC64_OpPPC64MOVBstoreidx(v *Value) bool { v.reset(OpPPC64MOVBstoreidx) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(x) - v.AddArg(mem) + v.AddArg4(ptr, idx, x, mem) return true } // match: (MOVBstoreidx [off] {sym} ptr idx (MOVHZreg x) mem) @@ -7828,10 +7365,7 @@ func rewriteValuePPC64_OpPPC64MOVBstoreidx(v *Value) bool { v.reset(OpPPC64MOVBstoreidx) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(x) - v.AddArg(mem) + v.AddArg4(ptr, idx, x, mem) return true } // match: (MOVBstoreidx [off] {sym} ptr idx (MOVWreg x) mem) @@ -7849,10 +7383,7 @@ func rewriteValuePPC64_OpPPC64MOVBstoreidx(v *Value) bool { v.reset(OpPPC64MOVBstoreidx) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(x) - v.AddArg(mem) + v.AddArg4(ptr, idx, x, mem) return true } // match: (MOVBstoreidx [off] {sym} ptr idx (MOVWZreg x) mem) @@ -7870,10 +7401,7 @@ func rewriteValuePPC64_OpPPC64MOVBstoreidx(v *Value) bool { v.reset(OpPPC64MOVBstoreidx) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(x) - v.AddArg(mem) + v.AddArg4(ptr, idx, x, mem) return true } // match: (MOVBstoreidx [off] {sym} ptr idx (SRWconst (MOVHreg x) [c]) mem) @@ -7900,13 +7428,10 @@ func rewriteValuePPC64_OpPPC64MOVBstoreidx(v *Value) bool { v.reset(OpPPC64MOVBstoreidx) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) v0 := b.NewValue0(v.Pos, OpPPC64SRWconst, typ.UInt32) v0.AuxInt = c v0.AddArg(x) - v.AddArg(v0) - v.AddArg(mem) + v.AddArg4(ptr, idx, v0, mem) return true } // match: (MOVBstoreidx [off] {sym} ptr idx (SRWconst (MOVHZreg x) [c]) mem) @@ -7933,13 +7458,10 @@ func rewriteValuePPC64_OpPPC64MOVBstoreidx(v *Value) bool { v.reset(OpPPC64MOVBstoreidx) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) v0 := b.NewValue0(v.Pos, OpPPC64SRWconst, typ.UInt32) v0.AuxInt = c v0.AddArg(x) - v.AddArg(v0) - v.AddArg(mem) + v.AddArg4(ptr, idx, v0, mem) return true } // match: (MOVBstoreidx [off] {sym} ptr idx (SRWconst (MOVWreg x) [c]) mem) @@ -7966,13 +7488,10 @@ func rewriteValuePPC64_OpPPC64MOVBstoreidx(v *Value) bool { v.reset(OpPPC64MOVBstoreidx) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) v0 := b.NewValue0(v.Pos, OpPPC64SRWconst, typ.UInt32) v0.AuxInt = c v0.AddArg(x) - v.AddArg(v0) - v.AddArg(mem) + v.AddArg4(ptr, idx, v0, mem) return true } // match: (MOVBstoreidx [off] {sym} ptr idx (SRWconst (MOVWZreg x) [c]) mem) @@ -7999,13 +7518,10 @@ func rewriteValuePPC64_OpPPC64MOVBstoreidx(v *Value) bool { v.reset(OpPPC64MOVBstoreidx) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) v0 := b.NewValue0(v.Pos, OpPPC64SRWconst, typ.UInt32) v0.AuxInt = c v0.AddArg(x) - v.AddArg(v0) - v.AddArg(mem) + v.AddArg4(ptr, idx, v0, mem) return true } return false @@ -8031,8 +7547,7 @@ func rewriteValuePPC64_OpPPC64MOVBstorezero(v *Value) bool { v.reset(OpPPC64MOVBstorezero) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(x) - v.AddArg(mem) + v.AddArg2(x, mem) return true } // match: (MOVBstorezero [off1] {sym1} p:(MOVDaddr [off2] {sym2} x) mem) @@ -8055,8 +7570,7 @@ func rewriteValuePPC64_OpPPC64MOVBstorezero(v *Value) bool { v.reset(OpPPC64MOVBstorezero) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(x) - v.AddArg(mem) + v.AddArg2(x, mem) return true } return false @@ -8073,11 +7587,10 @@ func rewriteValuePPC64_OpPPC64MOVDload(v *Value) bool { if v_1.Op != OpPPC64FMOVDstore || v_1.AuxInt != off || v_1.Aux != sym { break } - _ = v_1.Args[2] + x := v_1.Args[1] if ptr != v_1.Args[0] { break } - x := v_1.Args[1] v.reset(OpPPC64MFVSRD) v.AddArg(x) return true @@ -8102,8 +7615,7 @@ func rewriteValuePPC64_OpPPC64MOVDload(v *Value) bool { v.reset(OpPPC64MOVDload) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVDload [off1] {sym} (ADDconst [off2] x) mem) @@ -8124,8 +7636,7 @@ func rewriteValuePPC64_OpPPC64MOVDload(v *Value) bool { v.reset(OpPPC64MOVDload) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(x) - v.AddArg(mem) + v.AddArg2(x, mem) return true } // match: (MOVDload [0] {sym} p:(ADD ptr idx) mem) @@ -8147,9 +7658,7 @@ func rewriteValuePPC64_OpPPC64MOVDload(v *Value) bool { break } v.reset(OpPPC64MOVDloadidx) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } return false @@ -8173,8 +7682,7 @@ func rewriteValuePPC64_OpPPC64MOVDloadidx(v *Value) bool { } v.reset(OpPPC64MOVDload) v.AuxInt = c - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVDloadidx (MOVDconst [c]) ptr mem) @@ -8192,8 +7700,7 @@ func rewriteValuePPC64_OpPPC64MOVDloadidx(v *Value) bool { } v.reset(OpPPC64MOVDload) v.AuxInt = c - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } return false @@ -8216,9 +7723,7 @@ func rewriteValuePPC64_OpPPC64MOVDstore(v *Value) bool { v.reset(OpPPC64FMOVDstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(x) - v.AddArg(mem) + v.AddArg3(ptr, x, mem) return true } // match: (MOVDstore [off1] {sym} (ADDconst [off2] x) val mem) @@ -8240,9 +7745,7 @@ func rewriteValuePPC64_OpPPC64MOVDstore(v *Value) bool { v.reset(OpPPC64MOVDstore) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(x) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(x, val, mem) return true } // match: (MOVDstore [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) val mem) @@ -8266,9 +7769,7 @@ func rewriteValuePPC64_OpPPC64MOVDstore(v *Value) bool { v.reset(OpPPC64MOVDstore) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } // match: (MOVDstore [off] {sym} ptr (MOVDconst [0]) mem) @@ -8284,8 +7785,7 @@ func rewriteValuePPC64_OpPPC64MOVDstore(v *Value) bool { v.reset(OpPPC64MOVDstorezero) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVDstore [off] {sym} p:(ADD ptr idx) val mem) @@ -8306,10 +7806,7 @@ func rewriteValuePPC64_OpPPC64MOVDstore(v *Value) bool { break } v.reset(OpPPC64MOVDstoreidx) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) + v.AddArg4(ptr, idx, val, mem) return true } return false @@ -8335,9 +7832,7 @@ func rewriteValuePPC64_OpPPC64MOVDstoreidx(v *Value) bool { } v.reset(OpPPC64MOVDstore) v.AuxInt = c - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } // match: (MOVDstoreidx (MOVDconst [c]) ptr val mem) @@ -8356,9 +7851,7 @@ func rewriteValuePPC64_OpPPC64MOVDstoreidx(v *Value) bool { } v.reset(OpPPC64MOVDstore) v.AuxInt = c - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } return false @@ -8384,8 +7877,7 @@ func rewriteValuePPC64_OpPPC64MOVDstorezero(v *Value) bool { v.reset(OpPPC64MOVDstorezero) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(x) - v.AddArg(mem) + v.AddArg2(x, mem) return true } // match: (MOVDstorezero [off1] {sym1} p:(MOVDaddr [off2] {sym2} x) mem) @@ -8408,8 +7900,7 @@ func rewriteValuePPC64_OpPPC64MOVDstorezero(v *Value) bool { v.reset(OpPPC64MOVDstorezero) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(x) - v.AddArg(mem) + v.AddArg2(x, mem) return true } return false @@ -8430,9 +7921,7 @@ func rewriteValuePPC64_OpPPC64MOVHBRstore(v *Value) bool { mem := v_2 v.reset(OpPPC64MOVHBRstore) v.Aux = sym - v.AddArg(ptr) - v.AddArg(x) - v.AddArg(mem) + v.AddArg3(ptr, x, mem) return true } // match: (MOVHBRstore {sym} ptr (MOVHZreg x) mem) @@ -8447,9 +7936,7 @@ func rewriteValuePPC64_OpPPC64MOVHBRstore(v *Value) bool { mem := v_2 v.reset(OpPPC64MOVHBRstore) v.Aux = sym - v.AddArg(ptr) - v.AddArg(x) - v.AddArg(mem) + v.AddArg3(ptr, x, mem) return true } // match: (MOVHBRstore {sym} ptr (MOVWreg x) mem) @@ -8464,9 +7951,7 @@ func rewriteValuePPC64_OpPPC64MOVHBRstore(v *Value) bool { mem := v_2 v.reset(OpPPC64MOVHBRstore) v.Aux = sym - v.AddArg(ptr) - v.AddArg(x) - v.AddArg(mem) + v.AddArg3(ptr, x, mem) return true } // match: (MOVHBRstore {sym} ptr (MOVWZreg x) mem) @@ -8481,9 +7966,7 @@ func rewriteValuePPC64_OpPPC64MOVHBRstore(v *Value) bool { mem := v_2 v.reset(OpPPC64MOVHBRstore) v.Aux = sym - v.AddArg(ptr) - v.AddArg(x) - v.AddArg(mem) + v.AddArg3(ptr, x, mem) return true } return false @@ -8511,8 +7994,7 @@ func rewriteValuePPC64_OpPPC64MOVHZload(v *Value) bool { v.reset(OpPPC64MOVHZload) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVHZload [off1] {sym} (ADDconst [off2] x) mem) @@ -8533,8 +8015,7 @@ func rewriteValuePPC64_OpPPC64MOVHZload(v *Value) bool { v.reset(OpPPC64MOVHZload) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(x) - v.AddArg(mem) + v.AddArg2(x, mem) return true } // match: (MOVHZload [0] {sym} p:(ADD ptr idx) mem) @@ -8556,9 +8037,7 @@ func rewriteValuePPC64_OpPPC64MOVHZload(v *Value) bool { break } v.reset(OpPPC64MOVHZloadidx) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } return false @@ -8582,8 +8061,7 @@ func rewriteValuePPC64_OpPPC64MOVHZloadidx(v *Value) bool { } v.reset(OpPPC64MOVHZload) v.AuxInt = c - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVHZloadidx (MOVDconst [c]) ptr mem) @@ -8601,8 +8079,7 @@ func rewriteValuePPC64_OpPPC64MOVHZloadidx(v *Value) bool { } v.reset(OpPPC64MOVHZload) v.AuxInt = c - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } return false @@ -8623,9 +8100,7 @@ func rewriteValuePPC64_OpPPC64MOVHZreg(v *Value) bool { if !(uint64(c) <= 0xFFFF) { break } - v.reset(OpCopy) - v.Type = y.Type - v.AddArg(y) + v.copyOf(y) return true } // match: (MOVHZreg (SRWconst [c] (MOVBZreg x))) @@ -8724,9 +8199,7 @@ func rewriteValuePPC64_OpPPC64MOVHZreg(v *Value) bool { if y.Op != OpPPC64MOVHZreg { break } - v.reset(OpCopy) - v.Type = y.Type - v.AddArg(y) + v.copyOf(y) return true } // match: (MOVHZreg y:(MOVBZreg _)) @@ -8736,9 +8209,7 @@ func rewriteValuePPC64_OpPPC64MOVHZreg(v *Value) bool { if y.Op != OpPPC64MOVBZreg { break } - v.reset(OpCopy) - v.Type = y.Type - v.AddArg(y) + v.copyOf(y) return true } // match: (MOVHZreg y:(MOVHBRload _ _)) @@ -8748,10 +8219,7 @@ func rewriteValuePPC64_OpPPC64MOVHZreg(v *Value) bool { if y.Op != OpPPC64MOVHBRload { break } - _ = y.Args[1] - v.reset(OpCopy) - v.Type = y.Type - v.AddArg(y) + v.copyOf(y) return true } // match: (MOVHZreg y:(MOVHreg x)) @@ -8773,10 +8241,7 @@ func rewriteValuePPC64_OpPPC64MOVHZreg(v *Value) bool { if x.Op != OpPPC64MOVBZload { break } - _ = x.Args[1] - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (MOVHZreg x:(MOVBZloadidx _ _ _)) @@ -8786,10 +8251,7 @@ func rewriteValuePPC64_OpPPC64MOVHZreg(v *Value) bool { if x.Op != OpPPC64MOVBZloadidx { break } - _ = x.Args[2] - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (MOVHZreg x:(MOVHZload _ _)) @@ -8799,10 +8261,7 @@ func rewriteValuePPC64_OpPPC64MOVHZreg(v *Value) bool { if x.Op != OpPPC64MOVHZload { break } - _ = x.Args[1] - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (MOVHZreg x:(MOVHZloadidx _ _ _)) @@ -8812,10 +8271,7 @@ func rewriteValuePPC64_OpPPC64MOVHZreg(v *Value) bool { if x.Op != OpPPC64MOVHZloadidx { break } - _ = x.Args[2] - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (MOVHZreg x:(Arg )) @@ -8830,9 +8286,7 @@ func rewriteValuePPC64_OpPPC64MOVHZreg(v *Value) bool { if !((is8BitInt(t) || is16BitInt(t)) && !isSigned(t)) { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (MOVHZreg (MOVDconst [c])) @@ -8871,8 +8325,7 @@ func rewriteValuePPC64_OpPPC64MOVHload(v *Value) bool { v.reset(OpPPC64MOVHload) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVHload [off1] {sym} (ADDconst [off2] x) mem) @@ -8893,8 +8346,7 @@ func rewriteValuePPC64_OpPPC64MOVHload(v *Value) bool { v.reset(OpPPC64MOVHload) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(x) - v.AddArg(mem) + v.AddArg2(x, mem) return true } // match: (MOVHload [0] {sym} p:(ADD ptr idx) mem) @@ -8916,9 +8368,7 @@ func rewriteValuePPC64_OpPPC64MOVHload(v *Value) bool { break } v.reset(OpPPC64MOVHloadidx) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } return false @@ -8942,8 +8392,7 @@ func rewriteValuePPC64_OpPPC64MOVHloadidx(v *Value) bool { } v.reset(OpPPC64MOVHload) v.AuxInt = c - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVHloadidx (MOVDconst [c]) ptr mem) @@ -8961,8 +8410,7 @@ func rewriteValuePPC64_OpPPC64MOVHloadidx(v *Value) bool { } v.reset(OpPPC64MOVHload) v.AuxInt = c - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } return false @@ -8983,9 +8431,7 @@ func rewriteValuePPC64_OpPPC64MOVHreg(v *Value) bool { if !(uint64(c) <= 0x7FFF) { break } - v.reset(OpCopy) - v.Type = y.Type - v.AddArg(y) + v.copyOf(y) return true } // match: (MOVHreg (SRAWconst [c] (MOVBreg x))) @@ -9118,9 +8564,7 @@ func rewriteValuePPC64_OpPPC64MOVHreg(v *Value) bool { if y.Op != OpPPC64MOVHreg { break } - v.reset(OpCopy) - v.Type = y.Type - v.AddArg(y) + v.copyOf(y) return true } // match: (MOVHreg y:(MOVBreg _)) @@ -9130,9 +8574,7 @@ func rewriteValuePPC64_OpPPC64MOVHreg(v *Value) bool { if y.Op != OpPPC64MOVBreg { break } - v.reset(OpCopy) - v.Type = y.Type - v.AddArg(y) + v.copyOf(y) return true } // match: (MOVHreg y:(MOVHZreg x)) @@ -9154,10 +8596,7 @@ func rewriteValuePPC64_OpPPC64MOVHreg(v *Value) bool { if x.Op != OpPPC64MOVHload { break } - _ = x.Args[1] - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (MOVHreg x:(MOVHloadidx _ _ _)) @@ -9167,10 +8606,7 @@ func rewriteValuePPC64_OpPPC64MOVHreg(v *Value) bool { if x.Op != OpPPC64MOVHloadidx { break } - _ = x.Args[2] - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (MOVHreg x:(Arg )) @@ -9185,9 +8621,7 @@ func rewriteValuePPC64_OpPPC64MOVHreg(v *Value) bool { if !((is8BitInt(t) || is16BitInt(t)) && isSigned(t)) { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (MOVHreg (MOVDconst [c])) @@ -9228,9 +8662,7 @@ func rewriteValuePPC64_OpPPC64MOVHstore(v *Value) bool { v.reset(OpPPC64MOVHstore) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(x) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(x, val, mem) return true } // match: (MOVHstore [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) val mem) @@ -9254,9 +8686,7 @@ func rewriteValuePPC64_OpPPC64MOVHstore(v *Value) bool { v.reset(OpPPC64MOVHstore) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } // match: (MOVHstore [off] {sym} ptr (MOVDconst [0]) mem) @@ -9272,8 +8702,7 @@ func rewriteValuePPC64_OpPPC64MOVHstore(v *Value) bool { v.reset(OpPPC64MOVHstorezero) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVHstore [off] {sym} p:(ADD ptr idx) val mem) @@ -9294,10 +8723,7 @@ func rewriteValuePPC64_OpPPC64MOVHstore(v *Value) bool { break } v.reset(OpPPC64MOVHstoreidx) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) + v.AddArg4(ptr, idx, val, mem) return true } // match: (MOVHstore [off] {sym} ptr (MOVHreg x) mem) @@ -9314,9 +8740,7 @@ func rewriteValuePPC64_OpPPC64MOVHstore(v *Value) bool { v.reset(OpPPC64MOVHstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(x) - v.AddArg(mem) + v.AddArg3(ptr, x, mem) return true } // match: (MOVHstore [off] {sym} ptr (MOVHZreg x) mem) @@ -9333,9 +8757,7 @@ func rewriteValuePPC64_OpPPC64MOVHstore(v *Value) bool { v.reset(OpPPC64MOVHstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(x) - v.AddArg(mem) + v.AddArg3(ptr, x, mem) return true } // match: (MOVHstore [off] {sym} ptr (MOVWreg x) mem) @@ -9352,9 +8774,7 @@ func rewriteValuePPC64_OpPPC64MOVHstore(v *Value) bool { v.reset(OpPPC64MOVHstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(x) - v.AddArg(mem) + v.AddArg3(ptr, x, mem) return true } // match: (MOVHstore [off] {sym} ptr (MOVWZreg x) mem) @@ -9371,9 +8791,7 @@ func rewriteValuePPC64_OpPPC64MOVHstore(v *Value) bool { v.reset(OpPPC64MOVHstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(x) - v.AddArg(mem) + v.AddArg3(ptr, x, mem) return true } // match: (MOVHstore [i1] {s} p (SRWconst w [16]) x0:(MOVHstore [i0] {s} p w mem)) @@ -9402,9 +8820,7 @@ func rewriteValuePPC64_OpPPC64MOVHstore(v *Value) bool { v.reset(OpPPC64MOVWstore) v.AuxInt = i0 v.Aux = s - v.AddArg(p) - v.AddArg(w) - v.AddArg(mem) + v.AddArg3(p, w, mem) return true } // match: (MOVHstore [i1] {s} p (SRDconst w [16]) x0:(MOVHstore [i0] {s} p w mem)) @@ -9433,9 +8849,7 @@ func rewriteValuePPC64_OpPPC64MOVHstore(v *Value) bool { v.reset(OpPPC64MOVWstore) v.AuxInt = i0 v.Aux = s - v.AddArg(p) - v.AddArg(w) - v.AddArg(mem) + v.AddArg3(p, w, mem) return true } return false @@ -9461,9 +8875,7 @@ func rewriteValuePPC64_OpPPC64MOVHstoreidx(v *Value) bool { } v.reset(OpPPC64MOVHstore) v.AuxInt = c - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } // match: (MOVHstoreidx (MOVDconst [c]) ptr val mem) @@ -9482,9 +8894,7 @@ func rewriteValuePPC64_OpPPC64MOVHstoreidx(v *Value) bool { } v.reset(OpPPC64MOVHstore) v.AuxInt = c - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } // match: (MOVHstoreidx [off] {sym} ptr idx (MOVHreg x) mem) @@ -9502,10 +8912,7 @@ func rewriteValuePPC64_OpPPC64MOVHstoreidx(v *Value) bool { v.reset(OpPPC64MOVHstoreidx) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(x) - v.AddArg(mem) + v.AddArg4(ptr, idx, x, mem) return true } // match: (MOVHstoreidx [off] {sym} ptr idx (MOVHZreg x) mem) @@ -9523,10 +8930,7 @@ func rewriteValuePPC64_OpPPC64MOVHstoreidx(v *Value) bool { v.reset(OpPPC64MOVHstoreidx) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(x) - v.AddArg(mem) + v.AddArg4(ptr, idx, x, mem) return true } // match: (MOVHstoreidx [off] {sym} ptr idx (MOVWreg x) mem) @@ -9544,10 +8948,7 @@ func rewriteValuePPC64_OpPPC64MOVHstoreidx(v *Value) bool { v.reset(OpPPC64MOVHstoreidx) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(x) - v.AddArg(mem) + v.AddArg4(ptr, idx, x, mem) return true } // match: (MOVHstoreidx [off] {sym} ptr idx (MOVWZreg x) mem) @@ -9565,10 +8966,7 @@ func rewriteValuePPC64_OpPPC64MOVHstoreidx(v *Value) bool { v.reset(OpPPC64MOVHstoreidx) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(x) - v.AddArg(mem) + v.AddArg4(ptr, idx, x, mem) return true } return false @@ -9594,8 +8992,7 @@ func rewriteValuePPC64_OpPPC64MOVHstorezero(v *Value) bool { v.reset(OpPPC64MOVHstorezero) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(x) - v.AddArg(mem) + v.AddArg2(x, mem) return true } // match: (MOVHstorezero [off1] {sym1} p:(MOVDaddr [off2] {sym2} x) mem) @@ -9618,8 +9015,7 @@ func rewriteValuePPC64_OpPPC64MOVHstorezero(v *Value) bool { v.reset(OpPPC64MOVHstorezero) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(x) - v.AddArg(mem) + v.AddArg2(x, mem) return true } return false @@ -9640,9 +9036,7 @@ func rewriteValuePPC64_OpPPC64MOVWBRstore(v *Value) bool { mem := v_2 v.reset(OpPPC64MOVWBRstore) v.Aux = sym - v.AddArg(ptr) - v.AddArg(x) - v.AddArg(mem) + v.AddArg3(ptr, x, mem) return true } // match: (MOVWBRstore {sym} ptr (MOVWZreg x) mem) @@ -9657,9 +9051,7 @@ func rewriteValuePPC64_OpPPC64MOVWBRstore(v *Value) bool { mem := v_2 v.reset(OpPPC64MOVWBRstore) v.Aux = sym - v.AddArg(ptr) - v.AddArg(x) - v.AddArg(mem) + v.AddArg3(ptr, x, mem) return true } return false @@ -9687,8 +9079,7 @@ func rewriteValuePPC64_OpPPC64MOVWZload(v *Value) bool { v.reset(OpPPC64MOVWZload) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVWZload [off1] {sym} (ADDconst [off2] x) mem) @@ -9709,8 +9100,7 @@ func rewriteValuePPC64_OpPPC64MOVWZload(v *Value) bool { v.reset(OpPPC64MOVWZload) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(x) - v.AddArg(mem) + v.AddArg2(x, mem) return true } // match: (MOVWZload [0] {sym} p:(ADD ptr idx) mem) @@ -9732,9 +9122,7 @@ func rewriteValuePPC64_OpPPC64MOVWZload(v *Value) bool { break } v.reset(OpPPC64MOVWZloadidx) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } return false @@ -9758,8 +9146,7 @@ func rewriteValuePPC64_OpPPC64MOVWZloadidx(v *Value) bool { } v.reset(OpPPC64MOVWZload) v.AuxInt = c - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVWZloadidx (MOVDconst [c]) ptr mem) @@ -9777,8 +9164,7 @@ func rewriteValuePPC64_OpPPC64MOVWZloadidx(v *Value) bool { } v.reset(OpPPC64MOVWZload) v.AuxInt = c - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } return false @@ -9799,9 +9185,7 @@ func rewriteValuePPC64_OpPPC64MOVWZreg(v *Value) bool { if !(uint64(c) <= 0xFFFFFFFF) { break } - v.reset(OpCopy) - v.Type = y.Type - v.AddArg(y) + v.copyOf(y) return true } // match: (MOVWZreg y:(AND (MOVDconst [c]) _)) @@ -9812,7 +9196,6 @@ func rewriteValuePPC64_OpPPC64MOVWZreg(v *Value) bool { if y.Op != OpPPC64AND { break } - _ = y.Args[1] y_0 := y.Args[0] y_1 := y.Args[1] for _i0 := 0; _i0 <= 1; _i0, y_0, y_1 = _i0+1, y_1, y_0 { @@ -9823,9 +9206,7 @@ func rewriteValuePPC64_OpPPC64MOVWZreg(v *Value) bool { if !(uint64(c) <= 0xFFFFFFFF) { continue } - v.reset(OpCopy) - v.Type = y.Type - v.AddArg(y) + v.copyOf(y) return true } break @@ -9928,9 +9309,7 @@ func rewriteValuePPC64_OpPPC64MOVWZreg(v *Value) bool { if y.Op != OpPPC64MOVWZreg { break } - v.reset(OpCopy) - v.Type = y.Type - v.AddArg(y) + v.copyOf(y) return true } // match: (MOVWZreg y:(MOVHZreg _)) @@ -9940,9 +9319,7 @@ func rewriteValuePPC64_OpPPC64MOVWZreg(v *Value) bool { if y.Op != OpPPC64MOVHZreg { break } - v.reset(OpCopy) - v.Type = y.Type - v.AddArg(y) + v.copyOf(y) return true } // match: (MOVWZreg y:(MOVBZreg _)) @@ -9952,9 +9329,7 @@ func rewriteValuePPC64_OpPPC64MOVWZreg(v *Value) bool { if y.Op != OpPPC64MOVBZreg { break } - v.reset(OpCopy) - v.Type = y.Type - v.AddArg(y) + v.copyOf(y) return true } // match: (MOVWZreg y:(MOVHBRload _ _)) @@ -9964,10 +9339,7 @@ func rewriteValuePPC64_OpPPC64MOVWZreg(v *Value) bool { if y.Op != OpPPC64MOVHBRload { break } - _ = y.Args[1] - v.reset(OpCopy) - v.Type = y.Type - v.AddArg(y) + v.copyOf(y) return true } // match: (MOVWZreg y:(MOVWBRload _ _)) @@ -9977,10 +9349,7 @@ func rewriteValuePPC64_OpPPC64MOVWZreg(v *Value) bool { if y.Op != OpPPC64MOVWBRload { break } - _ = y.Args[1] - v.reset(OpCopy) - v.Type = y.Type - v.AddArg(y) + v.copyOf(y) return true } // match: (MOVWZreg y:(MOVWreg x)) @@ -10002,10 +9371,7 @@ func rewriteValuePPC64_OpPPC64MOVWZreg(v *Value) bool { if x.Op != OpPPC64MOVBZload { break } - _ = x.Args[1] - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (MOVWZreg x:(MOVBZloadidx _ _ _)) @@ -10015,10 +9381,7 @@ func rewriteValuePPC64_OpPPC64MOVWZreg(v *Value) bool { if x.Op != OpPPC64MOVBZloadidx { break } - _ = x.Args[2] - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (MOVWZreg x:(MOVHZload _ _)) @@ -10028,10 +9391,7 @@ func rewriteValuePPC64_OpPPC64MOVWZreg(v *Value) bool { if x.Op != OpPPC64MOVHZload { break } - _ = x.Args[1] - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (MOVWZreg x:(MOVHZloadidx _ _ _)) @@ -10041,10 +9401,7 @@ func rewriteValuePPC64_OpPPC64MOVWZreg(v *Value) bool { if x.Op != OpPPC64MOVHZloadidx { break } - _ = x.Args[2] - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (MOVWZreg x:(MOVWZload _ _)) @@ -10054,10 +9411,7 @@ func rewriteValuePPC64_OpPPC64MOVWZreg(v *Value) bool { if x.Op != OpPPC64MOVWZload { break } - _ = x.Args[1] - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (MOVWZreg x:(MOVWZloadidx _ _ _)) @@ -10067,10 +9421,7 @@ func rewriteValuePPC64_OpPPC64MOVWZreg(v *Value) bool { if x.Op != OpPPC64MOVWZloadidx { break } - _ = x.Args[2] - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (MOVWZreg x:(Arg )) @@ -10085,9 +9436,7 @@ func rewriteValuePPC64_OpPPC64MOVWZreg(v *Value) bool { if !((is8BitInt(t) || is16BitInt(t) || is32BitInt(t)) && !isSigned(t)) { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (MOVWZreg (MOVDconst [c])) @@ -10126,8 +9475,7 @@ func rewriteValuePPC64_OpPPC64MOVWload(v *Value) bool { v.reset(OpPPC64MOVWload) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVWload [off1] {sym} (ADDconst [off2] x) mem) @@ -10148,8 +9496,7 @@ func rewriteValuePPC64_OpPPC64MOVWload(v *Value) bool { v.reset(OpPPC64MOVWload) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(x) - v.AddArg(mem) + v.AddArg2(x, mem) return true } // match: (MOVWload [0] {sym} p:(ADD ptr idx) mem) @@ -10171,9 +9518,7 @@ func rewriteValuePPC64_OpPPC64MOVWload(v *Value) bool { break } v.reset(OpPPC64MOVWloadidx) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } return false @@ -10197,8 +9542,7 @@ func rewriteValuePPC64_OpPPC64MOVWloadidx(v *Value) bool { } v.reset(OpPPC64MOVWload) v.AuxInt = c - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVWloadidx (MOVDconst [c]) ptr mem) @@ -10216,8 +9560,7 @@ func rewriteValuePPC64_OpPPC64MOVWloadidx(v *Value) bool { } v.reset(OpPPC64MOVWload) v.AuxInt = c - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } return false @@ -10238,9 +9581,7 @@ func rewriteValuePPC64_OpPPC64MOVWreg(v *Value) bool { if !(uint64(c) <= 0xFFFF) { break } - v.reset(OpCopy) - v.Type = y.Type - v.AddArg(y) + v.copyOf(y) return true } // match: (MOVWreg y:(AND (MOVDconst [c]) _)) @@ -10251,7 +9592,6 @@ func rewriteValuePPC64_OpPPC64MOVWreg(v *Value) bool { if y.Op != OpPPC64AND { break } - _ = y.Args[1] y_0 := y.Args[0] y_1 := y.Args[1] for _i0 := 0; _i0 <= 1; _i0, y_0, y_1 = _i0+1, y_1, y_0 { @@ -10262,9 +9602,7 @@ func rewriteValuePPC64_OpPPC64MOVWreg(v *Value) bool { if !(uint64(c) <= 0x7FFFFFFF) { continue } - v.reset(OpCopy) - v.Type = y.Type - v.AddArg(y) + v.copyOf(y) return true } break @@ -10384,9 +9722,7 @@ func rewriteValuePPC64_OpPPC64MOVWreg(v *Value) bool { if y.Op != OpPPC64MOVWreg { break } - v.reset(OpCopy) - v.Type = y.Type - v.AddArg(y) + v.copyOf(y) return true } // match: (MOVWreg y:(MOVHreg _)) @@ -10396,9 +9732,7 @@ func rewriteValuePPC64_OpPPC64MOVWreg(v *Value) bool { if y.Op != OpPPC64MOVHreg { break } - v.reset(OpCopy) - v.Type = y.Type - v.AddArg(y) + v.copyOf(y) return true } // match: (MOVWreg y:(MOVBreg _)) @@ -10408,9 +9742,7 @@ func rewriteValuePPC64_OpPPC64MOVWreg(v *Value) bool { if y.Op != OpPPC64MOVBreg { break } - v.reset(OpCopy) - v.Type = y.Type - v.AddArg(y) + v.copyOf(y) return true } // match: (MOVWreg y:(MOVWZreg x)) @@ -10432,10 +9764,7 @@ func rewriteValuePPC64_OpPPC64MOVWreg(v *Value) bool { if x.Op != OpPPC64MOVHload { break } - _ = x.Args[1] - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (MOVWreg x:(MOVHloadidx _ _ _)) @@ -10445,10 +9774,7 @@ func rewriteValuePPC64_OpPPC64MOVWreg(v *Value) bool { if x.Op != OpPPC64MOVHloadidx { break } - _ = x.Args[2] - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (MOVWreg x:(MOVWload _ _)) @@ -10458,10 +9784,7 @@ func rewriteValuePPC64_OpPPC64MOVWreg(v *Value) bool { if x.Op != OpPPC64MOVWload { break } - _ = x.Args[1] - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (MOVWreg x:(MOVWloadidx _ _ _)) @@ -10471,10 +9794,7 @@ func rewriteValuePPC64_OpPPC64MOVWreg(v *Value) bool { if x.Op != OpPPC64MOVWloadidx { break } - _ = x.Args[2] - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (MOVWreg x:(Arg )) @@ -10489,9 +9809,7 @@ func rewriteValuePPC64_OpPPC64MOVWreg(v *Value) bool { if !((is8BitInt(t) || is16BitInt(t) || is32BitInt(t)) && isSigned(t)) { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (MOVWreg (MOVDconst [c])) @@ -10530,9 +9848,7 @@ func rewriteValuePPC64_OpPPC64MOVWstore(v *Value) bool { v.reset(OpPPC64MOVWstore) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(x) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(x, val, mem) return true } // match: (MOVWstore [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) val mem) @@ -10556,9 +9872,7 @@ func rewriteValuePPC64_OpPPC64MOVWstore(v *Value) bool { v.reset(OpPPC64MOVWstore) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } // match: (MOVWstore [off] {sym} ptr (MOVDconst [0]) mem) @@ -10574,8 +9888,7 @@ func rewriteValuePPC64_OpPPC64MOVWstore(v *Value) bool { v.reset(OpPPC64MOVWstorezero) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVWstore [off] {sym} p:(ADD ptr idx) val mem) @@ -10596,10 +9909,7 @@ func rewriteValuePPC64_OpPPC64MOVWstore(v *Value) bool { break } v.reset(OpPPC64MOVWstoreidx) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) + v.AddArg4(ptr, idx, val, mem) return true } // match: (MOVWstore [off] {sym} ptr (MOVWreg x) mem) @@ -10616,9 +9926,7 @@ func rewriteValuePPC64_OpPPC64MOVWstore(v *Value) bool { v.reset(OpPPC64MOVWstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(x) - v.AddArg(mem) + v.AddArg3(ptr, x, mem) return true } // match: (MOVWstore [off] {sym} ptr (MOVWZreg x) mem) @@ -10635,9 +9943,7 @@ func rewriteValuePPC64_OpPPC64MOVWstore(v *Value) bool { v.reset(OpPPC64MOVWstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(x) - v.AddArg(mem) + v.AddArg3(ptr, x, mem) return true } return false @@ -10663,9 +9969,7 @@ func rewriteValuePPC64_OpPPC64MOVWstoreidx(v *Value) bool { } v.reset(OpPPC64MOVWstore) v.AuxInt = c - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } // match: (MOVWstoreidx (MOVDconst [c]) ptr val mem) @@ -10684,9 +9988,7 @@ func rewriteValuePPC64_OpPPC64MOVWstoreidx(v *Value) bool { } v.reset(OpPPC64MOVWstore) v.AuxInt = c - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } // match: (MOVWstoreidx [off] {sym} ptr idx (MOVWreg x) mem) @@ -10704,10 +10006,7 @@ func rewriteValuePPC64_OpPPC64MOVWstoreidx(v *Value) bool { v.reset(OpPPC64MOVWstoreidx) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(x) - v.AddArg(mem) + v.AddArg4(ptr, idx, x, mem) return true } // match: (MOVWstoreidx [off] {sym} ptr idx (MOVWZreg x) mem) @@ -10725,10 +10024,7 @@ func rewriteValuePPC64_OpPPC64MOVWstoreidx(v *Value) bool { v.reset(OpPPC64MOVWstoreidx) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(x) - v.AddArg(mem) + v.AddArg4(ptr, idx, x, mem) return true } return false @@ -10754,8 +10050,7 @@ func rewriteValuePPC64_OpPPC64MOVWstorezero(v *Value) bool { v.reset(OpPPC64MOVWstorezero) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(x) - v.AddArg(mem) + v.AddArg2(x, mem) return true } // match: (MOVWstorezero [off1] {sym1} p:(MOVDaddr [off2] {sym2} x) mem) @@ -10778,8 +10073,7 @@ func rewriteValuePPC64_OpPPC64MOVWstorezero(v *Value) bool { v.reset(OpPPC64MOVWstorezero) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(x) - v.AddArg(mem) + v.AddArg2(x, mem) return true } return false @@ -10816,12 +10110,10 @@ func rewriteValuePPC64_OpPPC64MTVSRD(v *Value) bool { } b = x.Block v0 := b.NewValue0(x.Pos, OpPPC64FMOVDload, typ.Float64) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v0.AuxInt = off v0.Aux = sym - v0.AddArg(ptr) - v0.AddArg(mem) + v0.AddArg2(ptr, mem) return true } return false @@ -10848,6 +10140,49 @@ func rewriteValuePPC64_OpPPC64MaskIfNotCarry(v *Value) bool { v.AuxInt = -1 return true } + // match: (MaskIfNotCarry (FlagCarrySet)) + // result: (MOVDconst [0]) + for { + if v_0.Op != OpPPC64FlagCarrySet { + break + } + v.reset(OpPPC64MOVDconst) + v.AuxInt = 0 + return true + } + // match: (MaskIfNotCarry (FlagCarryClear)) + // result: (MOVDconst [-1]) + for { + if v_0.Op != OpPPC64FlagCarryClear { + break + } + v.reset(OpPPC64MOVDconst) + v.AuxInt = -1 + return true + } + return false +} +func rewriteValuePPC64_OpPPC64NOR(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (NOR (MOVDconst [c]) (MOVDconst [d])) + // result: (MOVDconst [^(c|d)]) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpPPC64MOVDconst { + continue + } + c := v_0.AuxInt + if v_1.Op != OpPPC64MOVDconst { + continue + } + d := v_1.AuxInt + v.reset(OpPPC64MOVDconst) + v.AuxInt = ^(c | d) + return true + } + break + } return false } func rewriteValuePPC64_OpPPC64NotEqual(v *Value) bool { @@ -10903,8 +10238,7 @@ func rewriteValuePPC64_OpPPC64NotEqual(v *Value) bool { v.AuxInt = 6 v0 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64) v0.AuxInt = 1 - v.AddArg(v0) - v.AddArg(cmp) + v.AddArg2(v0, cmp) return true } } @@ -10997,8 +10331,7 @@ func rewriteValuePPC64_OpPPC64OR(v *Value) bool { continue } v.reset(OpPPC64ROTL) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } break @@ -11038,8 +10371,7 @@ func rewriteValuePPC64_OpPPC64OR(v *Value) bool { continue } v.reset(OpPPC64ROTLW) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } break @@ -11114,12 +10446,10 @@ func rewriteValuePPC64_OpPPC64OR(v *Value) bool { } b = mergePoint(b, x0, x1) v0 := b.NewValue0(x1.Pos, OpPPC64MOVHZload, t) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v0.AuxInt = i0 v0.Aux = s - v0.AddArg(p) - v0.AddArg(mem) + v0.AddArg2(p, mem) return true } break @@ -11156,12 +10486,10 @@ func rewriteValuePPC64_OpPPC64OR(v *Value) bool { } b = mergePoint(b, x0, x1) v0 := b.NewValue0(x1.Pos, OpPPC64MOVHZload, t) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v0.AuxInt = i0 v0.Aux = s - v0.AddArg(p) - v0.AddArg(mem) + v0.AddArg2(p, mem) return true } break @@ -11198,14 +10526,12 @@ func rewriteValuePPC64_OpPPC64OR(v *Value) bool { } b = mergePoint(b, x0, x1) v0 := b.NewValue0(x1.Pos, OpPPC64MOVHBRload, t) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v1 := b.NewValue0(x1.Pos, OpPPC64MOVDaddr, typ.Uintptr) v1.AuxInt = i0 v1.Aux = s v1.AddArg(p) - v0.AddArg(v1) - v0.AddArg(mem) + v0.AddArg2(v1, mem) return true } break @@ -11242,14 +10568,12 @@ func rewriteValuePPC64_OpPPC64OR(v *Value) bool { } b = mergePoint(b, x0, x1) v0 := b.NewValue0(x1.Pos, OpPPC64MOVHBRload, t) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v1 := b.NewValue0(x1.Pos, OpPPC64MOVDaddr, typ.Uintptr) v1.AuxInt = i0 v1.Aux = s v1.AddArg(p) - v0.AddArg(v1) - v0.AddArg(mem) + v0.AddArg2(v1, mem) return true } break @@ -11292,16 +10616,14 @@ func rewriteValuePPC64_OpPPC64OR(v *Value) bool { } b = mergePoint(b, x0, x1) v0 := b.NewValue0(x1.Pos, OpPPC64SLDconst, t) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v0.AuxInt = n1 v1 := b.NewValue0(x1.Pos, OpPPC64MOVHBRload, t) v2 := b.NewValue0(x1.Pos, OpPPC64MOVDaddr, typ.Uintptr) v2.AuxInt = i0 v2.Aux = s v2.AddArg(p) - v1.AddArg(v2) - v1.AddArg(mem) + v1.AddArg2(v2, mem) v0.AddArg(v1) return true } @@ -11345,16 +10667,14 @@ func rewriteValuePPC64_OpPPC64OR(v *Value) bool { } b = mergePoint(b, x0, x1) v0 := b.NewValue0(x1.Pos, OpPPC64SLDconst, t) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v0.AuxInt = n1 v1 := b.NewValue0(x1.Pos, OpPPC64MOVHBRload, t) v2 := b.NewValue0(x1.Pos, OpPPC64MOVDaddr, typ.Uintptr) v2.AuxInt = i0 v2.Aux = s v2.AddArg(p) - v1.AddArg(v2) - v1.AddArg(mem) + v1.AddArg2(v2, mem) v0.AddArg(v1) return true } @@ -11416,12 +10736,10 @@ func rewriteValuePPC64_OpPPC64OR(v *Value) bool { } b = mergePoint(b, x0, x1, x2) v0 := b.NewValue0(x0.Pos, OpPPC64MOVWZload, t) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v0.AuxInt = i0 v0.Aux = s - v0.AddArg(p) - v0.AddArg(mem) + v0.AddArg2(p, mem) return true } } @@ -11483,12 +10801,10 @@ func rewriteValuePPC64_OpPPC64OR(v *Value) bool { } b = mergePoint(b, x0, x1, x2) v0 := b.NewValue0(x0.Pos, OpPPC64MOVWZload, t) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v0.AuxInt = i0 v0.Aux = s - v0.AddArg(p) - v0.AddArg(mem) + v0.AddArg2(p, mem) return true } } @@ -11551,14 +10867,12 @@ func rewriteValuePPC64_OpPPC64OR(v *Value) bool { } b = mergePoint(b, x0, x1, x2) v0 := b.NewValue0(x0.Pos, OpPPC64MOVWBRload, t) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v1 := b.NewValue0(x0.Pos, OpPPC64MOVDaddr, typ.Uintptr) v1.AuxInt = i0 v1.Aux = s v1.AddArg(p) - v0.AddArg(v1) - v0.AddArg(mem) + v0.AddArg2(v1, mem) return true } } @@ -11621,14 +10935,12 @@ func rewriteValuePPC64_OpPPC64OR(v *Value) bool { } b = mergePoint(b, x0, x1, x2) v0 := b.NewValue0(x0.Pos, OpPPC64MOVWBRload, t) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v1 := b.NewValue0(x0.Pos, OpPPC64MOVDaddr, typ.Uintptr) v1.AuxInt = i0 v1.Aux = s v1.AddArg(p) - v0.AddArg(v1) - v0.AddArg(mem) + v0.AddArg2(v1, mem) return true } } @@ -11691,14 +11003,12 @@ func rewriteValuePPC64_OpPPC64OR(v *Value) bool { } b = mergePoint(b, x0, x1, x2) v0 := b.NewValue0(x2.Pos, OpPPC64MOVWBRload, t) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v1 := b.NewValue0(x2.Pos, OpPPC64MOVDaddr, typ.Uintptr) v1.AuxInt = i0 v1.Aux = s v1.AddArg(p) - v0.AddArg(v1) - v0.AddArg(mem) + v0.AddArg2(v1, mem) return true } } @@ -11761,14 +11071,12 @@ func rewriteValuePPC64_OpPPC64OR(v *Value) bool { } b = mergePoint(b, x0, x1, x2) v0 := b.NewValue0(x2.Pos, OpPPC64MOVWBRload, t) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v1 := b.NewValue0(x2.Pos, OpPPC64MOVDaddr, typ.Uintptr) v1.AuxInt = i0 v1.Aux = s v1.AddArg(p) - v0.AddArg(v1) - v0.AddArg(mem) + v0.AddArg2(v1, mem) return true } } @@ -11835,16 +11143,14 @@ func rewriteValuePPC64_OpPPC64OR(v *Value) bool { } b = mergePoint(b, x0, x1, x2) v0 := b.NewValue0(x0.Pos, OpPPC64SLDconst, t) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v0.AuxInt = 32 v1 := b.NewValue0(x0.Pos, OpPPC64MOVWBRload, t) v2 := b.NewValue0(x0.Pos, OpPPC64MOVDaddr, typ.Uintptr) v2.AuxInt = i0 v2.Aux = s v2.AddArg(p) - v1.AddArg(v2) - v1.AddArg(mem) + v1.AddArg2(v2, mem) v0.AddArg(v1) return true } @@ -11912,16 +11218,14 @@ func rewriteValuePPC64_OpPPC64OR(v *Value) bool { } b = mergePoint(b, x0, x1, x2) v0 := b.NewValue0(x0.Pos, OpPPC64SLDconst, t) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v0.AuxInt = 32 v1 := b.NewValue0(x0.Pos, OpPPC64MOVWBRload, t) v2 := b.NewValue0(x0.Pos, OpPPC64MOVDaddr, typ.Uintptr) v2.AuxInt = i0 v2.Aux = s v2.AddArg(p) - v1.AddArg(v2) - v1.AddArg(mem) + v1.AddArg2(v2, mem) v0.AddArg(v1) return true } @@ -12032,12 +11336,10 @@ func rewriteValuePPC64_OpPPC64OR(v *Value) bool { } b = mergePoint(b, x0, x4, x5, x6, x7) v0 := b.NewValue0(x0.Pos, OpPPC64MOVDload, t) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v0.AuxInt = i0 v0.Aux = s - v0.AddArg(p) - v0.AddArg(mem) + v0.AddArg2(p, mem) return true } } @@ -12150,14 +11452,12 @@ func rewriteValuePPC64_OpPPC64OR(v *Value) bool { } b = mergePoint(b, x0, x1, x2, x3, x4) v0 := b.NewValue0(x4.Pos, OpPPC64MOVDBRload, t) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v1 := b.NewValue0(x4.Pos, OpPPC64MOVDaddr, typ.Uintptr) v1.AuxInt = i0 v1.Aux = s v1.AddArg(p) - v0.AddArg(v1) - v0.AddArg(mem) + v0.AddArg2(v1, mem) return true } } @@ -12270,14 +11570,12 @@ func rewriteValuePPC64_OpPPC64OR(v *Value) bool { } b = mergePoint(b, x3, x4, x5, x6, x7) v0 := b.NewValue0(x3.Pos, OpPPC64MOVDBRload, t) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v1 := b.NewValue0(x3.Pos, OpPPC64MOVDaddr, typ.Uintptr) v1.AuxInt = i0 v1.Aux = s v1.AddArg(p) - v0.AddArg(v1) - v0.AddArg(mem) + v0.AddArg2(v1, mem) return true } } @@ -12390,14 +11688,12 @@ func rewriteValuePPC64_OpPPC64OR(v *Value) bool { } b = mergePoint(b, x3, x4, x5, x6, x7) v0 := b.NewValue0(x3.Pos, OpPPC64MOVDBRload, t) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v1 := b.NewValue0(x3.Pos, OpPPC64MOVDaddr, typ.Uintptr) v1.AuxInt = i0 v1.Aux = s v1.AddArg(p) - v0.AddArg(v1) - v0.AddArg(mem) + v0.AddArg2(v1, mem) return true } } @@ -12417,9 +11713,22 @@ func rewriteValuePPC64_OpPPC64ORN(v *Value) bool { if v_1.Op != OpPPC64MOVDconst || v_1.AuxInt != -1 { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) + return true + } + // match: (ORN (MOVDconst [c]) (MOVDconst [d])) + // result: (MOVDconst [c|^d]) + for { + if v_0.Op != OpPPC64MOVDconst { + break + } + c := v_0.AuxInt + if v_1.Op != OpPPC64MOVDconst { + break + } + d := v_1.AuxInt + v.reset(OpPPC64MOVDconst) + v.AuxInt = c | ^d return true } return false @@ -12457,9 +11766,7 @@ func rewriteValuePPC64_OpPPC64ORconst(v *Value) bool { break } x := v_0 - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } return false @@ -12500,6 +11807,114 @@ func rewriteValuePPC64_OpPPC64ROTLW(v *Value) bool { } return false } +func rewriteValuePPC64_OpPPC64SLD(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (SLD x (MOVDconst [c])) + // result: (SLDconst [c] x) + for { + x := v_0 + if v_1.Op != OpPPC64MOVDconst { + break + } + c := v_1.AuxInt + v.reset(OpPPC64SLDconst) + v.AuxInt = c + v.AddArg(x) + return true + } + return false +} +func rewriteValuePPC64_OpPPC64SLW(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (SLW x (MOVDconst [c])) + // result: (SLWconst [c] x) + for { + x := v_0 + if v_1.Op != OpPPC64MOVDconst { + break + } + c := v_1.AuxInt + v.reset(OpPPC64SLWconst) + v.AuxInt = c + v.AddArg(x) + return true + } + return false +} +func rewriteValuePPC64_OpPPC64SRAD(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (SRAD x (MOVDconst [c])) + // result: (SRADconst [c] x) + for { + x := v_0 + if v_1.Op != OpPPC64MOVDconst { + break + } + c := v_1.AuxInt + v.reset(OpPPC64SRADconst) + v.AuxInt = c + v.AddArg(x) + return true + } + return false +} +func rewriteValuePPC64_OpPPC64SRAW(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (SRAW x (MOVDconst [c])) + // result: (SRAWconst [c] x) + for { + x := v_0 + if v_1.Op != OpPPC64MOVDconst { + break + } + c := v_1.AuxInt + v.reset(OpPPC64SRAWconst) + v.AuxInt = c + v.AddArg(x) + return true + } + return false +} +func rewriteValuePPC64_OpPPC64SRD(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (SRD x (MOVDconst [c])) + // result: (SRDconst [c] x) + for { + x := v_0 + if v_1.Op != OpPPC64MOVDconst { + break + } + c := v_1.AuxInt + v.reset(OpPPC64SRDconst) + v.AuxInt = c + v.AddArg(x) + return true + } + return false +} +func rewriteValuePPC64_OpPPC64SRW(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (SRW x (MOVDconst [c])) + // result: (SRWconst [c] x) + for { + x := v_0 + if v_1.Op != OpPPC64MOVDconst { + break + } + c := v_1.AuxInt + v.reset(OpPPC64SRWconst) + v.AuxInt = c + v.AddArg(x) + return true + } + return false +} func rewriteValuePPC64_OpPPC64SUB(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] @@ -12610,8 +12025,7 @@ func rewriteValuePPC64_OpPPC64XOR(v *Value) bool { continue } v.reset(OpPPC64ROTL) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } break @@ -12651,8 +12065,7 @@ func rewriteValuePPC64_OpPPC64XOR(v *Value) bool { continue } v.reset(OpPPC64ROTLW) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } break @@ -12720,9 +12133,7 @@ func rewriteValuePPC64_OpPPC64XORconst(v *Value) bool { break } x := v_0 - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } return false @@ -12744,9 +12155,7 @@ func rewriteValuePPC64_OpPanicBounds(v *Value) bool { } v.reset(OpPPC64LoweredPanicBoundsA) v.AuxInt = kind - v.AddArg(x) - v.AddArg(y) - v.AddArg(mem) + v.AddArg3(x, y, mem) return true } // match: (PanicBounds [kind] x y mem) @@ -12762,9 +12171,7 @@ func rewriteValuePPC64_OpPanicBounds(v *Value) bool { } v.reset(OpPPC64LoweredPanicBoundsB) v.AuxInt = kind - v.AddArg(x) - v.AddArg(y) - v.AddArg(mem) + v.AddArg3(x, y, mem) return true } // match: (PanicBounds [kind] x y mem) @@ -12780,9 +12187,7 @@ func rewriteValuePPC64_OpPanicBounds(v *Value) bool { } v.reset(OpPPC64LoweredPanicBoundsC) v.AuxInt = kind - v.AddArg(x) - v.AddArg(y) - v.AddArg(mem) + v.AddArg3(x, y, mem) return true } return false @@ -12848,17 +12253,14 @@ func rewriteValuePPC64_OpRotateLeft16(v *Value) bool { c := v_1.AuxInt v.reset(OpOr16) v0 := b.NewValue0(v.Pos, OpLsh16x64, t) - v0.AddArg(x) v1 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64) v1.AuxInt = c & 15 - v0.AddArg(v1) - v.AddArg(v0) + v0.AddArg2(x, v1) v2 := b.NewValue0(v.Pos, OpRsh16Ux64, t) - v2.AddArg(x) v3 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64) v3.AuxInt = -c & 15 - v2.AddArg(v3) - v.AddArg(v2) + v2.AddArg2(x, v3) + v.AddArg2(v0, v2) return true } return false @@ -12885,8 +12287,7 @@ func rewriteValuePPC64_OpRotateLeft32(v *Value) bool { x := v_0 y := v_1 v.reset(OpPPC64ROTLW) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } } @@ -12912,8 +12313,7 @@ func rewriteValuePPC64_OpRotateLeft64(v *Value) bool { x := v_0 y := v_1 v.reset(OpPPC64ROTL) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } } @@ -12933,17 +12333,14 @@ func rewriteValuePPC64_OpRotateLeft8(v *Value) bool { c := v_1.AuxInt v.reset(OpOr8) v0 := b.NewValue0(v.Pos, OpLsh8x64, t) - v0.AddArg(x) v1 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64) v1.AuxInt = c & 7 - v0.AddArg(v1) - v.AddArg(v0) + v0.AddArg2(x, v1) v2 := b.NewValue0(v.Pos, OpRsh8Ux64, t) - v2.AddArg(x) v3 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64) v3.AuxInt = -c & 7 - v2.AddArg(v3) - v.AddArg(v2) + v2.AddArg2(x, v3) + v.AddArg2(v0, v2) return true } return false @@ -12965,8 +12362,7 @@ func rewriteValuePPC64_OpRsh16Ux16(v *Value) bool { v.reset(OpPPC64SRW) v0 := b.NewValue0(v.Pos, OpPPC64MOVHZreg, typ.Int64) v0.AddArg(x) - v.AddArg(v0) - v.AddArg(y) + v.AddArg2(v0, y) return true } // match: (Rsh16Ux16 x y) @@ -12977,9 +12373,7 @@ func rewriteValuePPC64_OpRsh16Ux16(v *Value) bool { v.reset(OpPPC64SRW) v0 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpPPC64ORN, typ.Int64) - v1.AddArg(y) v2 := b.NewValue0(v.Pos, OpPPC64MaskIfNotCarry, typ.Int64) v3 := b.NewValue0(v.Pos, OpPPC64ADDconstForCarry, types.TypeFlags) v3.AuxInt = -16 @@ -12987,8 +12381,8 @@ func rewriteValuePPC64_OpRsh16Ux16(v *Value) bool { v4.AddArg(y) v3.AddArg(v4) v2.AddArg(v3) - v1.AddArg(v2) - v.AddArg(v1) + v1.AddArg2(y, v2) + v.AddArg2(v0, v1) return true } } @@ -12997,25 +12391,6 @@ func rewriteValuePPC64_OpRsh16Ux32(v *Value) bool { v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (Rsh16Ux32 x (Const64 [c])) - // cond: uint32(c) < 16 - // result: (SRWconst (ZeroExt16to32 x) [c]) - for { - x := v_0 - if v_1.Op != OpConst64 { - break - } - c := v_1.AuxInt - if !(uint32(c) < 16) { - break - } - v.reset(OpPPC64SRWconst) - v.AuxInt = c - v0 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) - v0.AddArg(x) - v.AddArg(v0) - return true - } // match: (Rsh16Ux32 x (MOVDconst [c])) // cond: uint32(c) < 16 // result: (SRWconst (ZeroExt16to32 x) [c]) @@ -13047,8 +12422,7 @@ func rewriteValuePPC64_OpRsh16Ux32(v *Value) bool { v.reset(OpPPC64SRW) v0 := b.NewValue0(v.Pos, OpPPC64MOVHZreg, typ.Int64) v0.AddArg(x) - v.AddArg(v0) - v.AddArg(y) + v.AddArg2(v0, y) return true } // match: (Rsh16Ux32 x y) @@ -13059,9 +12433,7 @@ func rewriteValuePPC64_OpRsh16Ux32(v *Value) bool { v.reset(OpPPC64SRW) v0 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpPPC64ORN, typ.Int64) - v1.AddArg(y) v2 := b.NewValue0(v.Pos, OpPPC64MaskIfNotCarry, typ.Int64) v3 := b.NewValue0(v.Pos, OpPPC64ADDconstForCarry, types.TypeFlags) v3.AuxInt = -16 @@ -13069,8 +12441,8 @@ func rewriteValuePPC64_OpRsh16Ux32(v *Value) bool { v4.AddArg(y) v3.AddArg(v4) v2.AddArg(v3) - v1.AddArg(v2) - v.AddArg(v1) + v1.AddArg2(y, v2) + v.AddArg2(v0, v1) return true } } @@ -13079,30 +12451,11 @@ func rewriteValuePPC64_OpRsh16Ux64(v *Value) bool { v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (Rsh16Ux64 x (Const64 [c])) - // cond: uint64(c) < 16 - // result: (SRWconst (ZeroExt16to32 x) [c]) - for { - x := v_0 - if v_1.Op != OpConst64 { - break - } - c := v_1.AuxInt - if !(uint64(c) < 16) { - break - } - v.reset(OpPPC64SRWconst) - v.AuxInt = c - v0 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) - v0.AddArg(x) - v.AddArg(v0) - return true - } - // match: (Rsh16Ux64 _ (Const64 [c])) + // match: (Rsh16Ux64 _ (MOVDconst [c])) // cond: uint64(c) >= 16 // result: (MOVDconst [0]) for { - if v_1.Op != OpConst64 { + if v_1.Op != OpPPC64MOVDconst { break } c := v_1.AuxInt @@ -13144,8 +12497,7 @@ func rewriteValuePPC64_OpRsh16Ux64(v *Value) bool { v.reset(OpPPC64SRW) v0 := b.NewValue0(v.Pos, OpPPC64MOVHZreg, typ.Int64) v0.AddArg(x) - v.AddArg(v0) - v.AddArg(y) + v.AddArg2(v0, y) return true } // match: (Rsh16Ux64 x y) @@ -13156,16 +12508,14 @@ func rewriteValuePPC64_OpRsh16Ux64(v *Value) bool { v.reset(OpPPC64SRW) v0 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpPPC64ORN, typ.Int64) - v1.AddArg(y) v2 := b.NewValue0(v.Pos, OpPPC64MaskIfNotCarry, typ.Int64) v3 := b.NewValue0(v.Pos, OpPPC64ADDconstForCarry, types.TypeFlags) v3.AuxInt = -16 v3.AddArg(y) v2.AddArg(v3) - v1.AddArg(v2) - v.AddArg(v1) + v1.AddArg2(y, v2) + v.AddArg2(v0, v1) return true } } @@ -13186,8 +12536,7 @@ func rewriteValuePPC64_OpRsh16Ux8(v *Value) bool { v.reset(OpPPC64SRW) v0 := b.NewValue0(v.Pos, OpPPC64MOVHZreg, typ.Int64) v0.AddArg(x) - v.AddArg(v0) - v.AddArg(y) + v.AddArg2(v0, y) return true } // match: (Rsh16Ux8 x y) @@ -13198,9 +12547,7 @@ func rewriteValuePPC64_OpRsh16Ux8(v *Value) bool { v.reset(OpPPC64SRW) v0 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpPPC64ORN, typ.Int64) - v1.AddArg(y) v2 := b.NewValue0(v.Pos, OpPPC64MaskIfNotCarry, typ.Int64) v3 := b.NewValue0(v.Pos, OpPPC64ADDconstForCarry, types.TypeFlags) v3.AuxInt = -16 @@ -13208,8 +12555,8 @@ func rewriteValuePPC64_OpRsh16Ux8(v *Value) bool { v4.AddArg(y) v3.AddArg(v4) v2.AddArg(v3) - v1.AddArg(v2) - v.AddArg(v1) + v1.AddArg2(y, v2) + v.AddArg2(v0, v1) return true } } @@ -13230,8 +12577,7 @@ func rewriteValuePPC64_OpRsh16x16(v *Value) bool { v.reset(OpPPC64SRAW) v0 := b.NewValue0(v.Pos, OpPPC64MOVHreg, typ.Int64) v0.AddArg(x) - v.AddArg(v0) - v.AddArg(y) + v.AddArg2(v0, y) return true } // match: (Rsh16x16 x y) @@ -13242,9 +12588,7 @@ func rewriteValuePPC64_OpRsh16x16(v *Value) bool { v.reset(OpPPC64SRAW) v0 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpPPC64ORN, typ.Int64) - v1.AddArg(y) v2 := b.NewValue0(v.Pos, OpPPC64MaskIfNotCarry, typ.Int64) v3 := b.NewValue0(v.Pos, OpPPC64ADDconstForCarry, types.TypeFlags) v3.AuxInt = -16 @@ -13252,8 +12596,8 @@ func rewriteValuePPC64_OpRsh16x16(v *Value) bool { v4.AddArg(y) v3.AddArg(v4) v2.AddArg(v3) - v1.AddArg(v2) - v.AddArg(v1) + v1.AddArg2(y, v2) + v.AddArg2(v0, v1) return true } } @@ -13262,25 +12606,6 @@ func rewriteValuePPC64_OpRsh16x32(v *Value) bool { v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (Rsh16x32 x (Const64 [c])) - // cond: uint32(c) < 16 - // result: (SRAWconst (SignExt16to32 x) [c]) - for { - x := v_0 - if v_1.Op != OpConst64 { - break - } - c := v_1.AuxInt - if !(uint32(c) < 16) { - break - } - v.reset(OpPPC64SRAWconst) - v.AuxInt = c - v0 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32) - v0.AddArg(x) - v.AddArg(v0) - return true - } // match: (Rsh16x32 x (MOVDconst [c])) // cond: uint32(c) < 16 // result: (SRAWconst (SignExt16to32 x) [c]) @@ -13312,8 +12637,7 @@ func rewriteValuePPC64_OpRsh16x32(v *Value) bool { v.reset(OpPPC64SRAW) v0 := b.NewValue0(v.Pos, OpPPC64MOVHreg, typ.Int64) v0.AddArg(x) - v.AddArg(v0) - v.AddArg(y) + v.AddArg2(v0, y) return true } // match: (Rsh16x32 x y) @@ -13324,9 +12648,7 @@ func rewriteValuePPC64_OpRsh16x32(v *Value) bool { v.reset(OpPPC64SRAW) v0 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpPPC64ORN, typ.Int64) - v1.AddArg(y) v2 := b.NewValue0(v.Pos, OpPPC64MaskIfNotCarry, typ.Int64) v3 := b.NewValue0(v.Pos, OpPPC64ADDconstForCarry, types.TypeFlags) v3.AuxInt = -16 @@ -13334,8 +12656,8 @@ func rewriteValuePPC64_OpRsh16x32(v *Value) bool { v4.AddArg(y) v3.AddArg(v4) v2.AddArg(v3) - v1.AddArg(v2) - v.AddArg(v1) + v1.AddArg2(y, v2) + v.AddArg2(v0, v1) return true } } @@ -13344,31 +12666,12 @@ func rewriteValuePPC64_OpRsh16x64(v *Value) bool { v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (Rsh16x64 x (Const64 [c])) - // cond: uint64(c) < 16 - // result: (SRAWconst (SignExt16to32 x) [c]) - for { - x := v_0 - if v_1.Op != OpConst64 { - break - } - c := v_1.AuxInt - if !(uint64(c) < 16) { - break - } - v.reset(OpPPC64SRAWconst) - v.AuxInt = c - v0 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32) - v0.AddArg(x) - v.AddArg(v0) - return true - } - // match: (Rsh16x64 x (Const64 [c])) + // match: (Rsh16x64 x (MOVDconst [c])) // cond: uint64(c) >= 16 // result: (SRAWconst (SignExt16to32 x) [63]) for { x := v_0 - if v_1.Op != OpConst64 { + if v_1.Op != OpPPC64MOVDconst { break } c := v_1.AuxInt @@ -13413,8 +12716,7 @@ func rewriteValuePPC64_OpRsh16x64(v *Value) bool { v.reset(OpPPC64SRAW) v0 := b.NewValue0(v.Pos, OpPPC64MOVHreg, typ.Int64) v0.AddArg(x) - v.AddArg(v0) - v.AddArg(y) + v.AddArg2(v0, y) return true } // match: (Rsh16x64 x y) @@ -13425,16 +12727,14 @@ func rewriteValuePPC64_OpRsh16x64(v *Value) bool { v.reset(OpPPC64SRAW) v0 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpPPC64ORN, typ.Int64) - v1.AddArg(y) v2 := b.NewValue0(v.Pos, OpPPC64MaskIfNotCarry, typ.Int64) v3 := b.NewValue0(v.Pos, OpPPC64ADDconstForCarry, types.TypeFlags) v3.AuxInt = -16 v3.AddArg(y) v2.AddArg(v3) - v1.AddArg(v2) - v.AddArg(v1) + v1.AddArg2(y, v2) + v.AddArg2(v0, v1) return true } } @@ -13455,8 +12755,7 @@ func rewriteValuePPC64_OpRsh16x8(v *Value) bool { v.reset(OpPPC64SRAW) v0 := b.NewValue0(v.Pos, OpPPC64MOVHreg, typ.Int64) v0.AddArg(x) - v.AddArg(v0) - v.AddArg(y) + v.AddArg2(v0, y) return true } // match: (Rsh16x8 x y) @@ -13467,9 +12766,7 @@ func rewriteValuePPC64_OpRsh16x8(v *Value) bool { v.reset(OpPPC64SRAW) v0 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpPPC64ORN, typ.Int64) - v1.AddArg(y) v2 := b.NewValue0(v.Pos, OpPPC64MaskIfNotCarry, typ.Int64) v3 := b.NewValue0(v.Pos, OpPPC64ADDconstForCarry, types.TypeFlags) v3.AuxInt = -16 @@ -13477,8 +12774,8 @@ func rewriteValuePPC64_OpRsh16x8(v *Value) bool { v4.AddArg(y) v3.AddArg(v4) v2.AddArg(v3) - v1.AddArg(v2) - v.AddArg(v1) + v1.AddArg2(y, v2) + v.AddArg2(v0, v1) return true } } @@ -13497,8 +12794,7 @@ func rewriteValuePPC64_OpRsh32Ux16(v *Value) bool { break } v.reset(OpPPC64SRW) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (Rsh32Ux16 x y) @@ -13507,9 +12803,7 @@ func rewriteValuePPC64_OpRsh32Ux16(v *Value) bool { x := v_0 y := v_1 v.reset(OpPPC64SRW) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpPPC64ORN, typ.Int64) - v0.AddArg(y) v1 := b.NewValue0(v.Pos, OpPPC64MaskIfNotCarry, typ.Int64) v2 := b.NewValue0(v.Pos, OpPPC64ADDconstForCarry, types.TypeFlags) v2.AuxInt = -32 @@ -13517,8 +12811,8 @@ func rewriteValuePPC64_OpRsh32Ux16(v *Value) bool { v3.AddArg(y) v2.AddArg(v3) v1.AddArg(v2) - v0.AddArg(v1) - v.AddArg(v0) + v0.AddArg2(y, v1) + v.AddArg2(x, v0) return true } } @@ -13527,23 +12821,6 @@ func rewriteValuePPC64_OpRsh32Ux32(v *Value) bool { v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (Rsh32Ux32 x (Const64 [c])) - // cond: uint32(c) < 32 - // result: (SRWconst x [c]) - for { - x := v_0 - if v_1.Op != OpConst64 { - break - } - c := v_1.AuxInt - if !(uint32(c) < 32) { - break - } - v.reset(OpPPC64SRWconst) - v.AuxInt = c - v.AddArg(x) - return true - } // match: (Rsh32Ux32 x (MOVDconst [c])) // cond: uint32(c) < 32 // result: (SRWconst x [c]) @@ -13571,8 +12848,7 @@ func rewriteValuePPC64_OpRsh32Ux32(v *Value) bool { break } v.reset(OpPPC64SRW) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (Rsh32Ux32 x y) @@ -13581,9 +12857,7 @@ func rewriteValuePPC64_OpRsh32Ux32(v *Value) bool { x := v_0 y := v_1 v.reset(OpPPC64SRW) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpPPC64ORN, typ.Int64) - v0.AddArg(y) v1 := b.NewValue0(v.Pos, OpPPC64MaskIfNotCarry, typ.Int64) v2 := b.NewValue0(v.Pos, OpPPC64ADDconstForCarry, types.TypeFlags) v2.AuxInt = -32 @@ -13591,8 +12865,8 @@ func rewriteValuePPC64_OpRsh32Ux32(v *Value) bool { v3.AddArg(y) v2.AddArg(v3) v1.AddArg(v2) - v0.AddArg(v1) - v.AddArg(v0) + v0.AddArg2(y, v1) + v.AddArg2(x, v0) return true } } @@ -13601,28 +12875,11 @@ func rewriteValuePPC64_OpRsh32Ux64(v *Value) bool { v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (Rsh32Ux64 x (Const64 [c])) - // cond: uint64(c) < 32 - // result: (SRWconst x [c]) - for { - x := v_0 - if v_1.Op != OpConst64 { - break - } - c := v_1.AuxInt - if !(uint64(c) < 32) { - break - } - v.reset(OpPPC64SRWconst) - v.AuxInt = c - v.AddArg(x) - return true - } - // match: (Rsh32Ux64 _ (Const64 [c])) + // match: (Rsh32Ux64 _ (MOVDconst [c])) // cond: uint64(c) >= 32 // result: (MOVDconst [0]) for { - if v_1.Op != OpConst64 { + if v_1.Op != OpPPC64MOVDconst { break } c := v_1.AuxInt @@ -13660,8 +12917,7 @@ func rewriteValuePPC64_OpRsh32Ux64(v *Value) bool { break } v.reset(OpPPC64SRW) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (Rsh32Ux64 x (AND y (MOVDconst [31]))) @@ -13680,11 +12936,10 @@ func rewriteValuePPC64_OpRsh32Ux64(v *Value) bool { continue } v.reset(OpPPC64SRW) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpPPC64ANDconst, typ.Int32) v0.AuxInt = 31 v0.AddArg(y) - v.AddArg(v0) + v.AddArg2(x, v0) return true } break @@ -13698,11 +12953,10 @@ func rewriteValuePPC64_OpRsh32Ux64(v *Value) bool { } y := v_1.Args[0] v.reset(OpPPC64SRW) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpPPC64ANDconst, typ.UInt) v0.AuxInt = 31 v0.AddArg(y) - v.AddArg(v0) + v.AddArg2(x, v0) return true } // match: (Rsh32Ux64 x (SUB (MOVDconst [32]) (ANDconst [31] y))) @@ -13723,16 +12977,14 @@ func rewriteValuePPC64_OpRsh32Ux64(v *Value) bool { } y := v_1_1.Args[0] v.reset(OpPPC64SRW) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpPPC64SUB, typ.UInt) v1 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64) v1.AuxInt = 32 - v0.AddArg(v1) v2 := b.NewValue0(v.Pos, OpPPC64ANDconst, typ.UInt) v2.AuxInt = 31 v2.AddArg(y) - v0.AddArg(v2) - v.AddArg(v0) + v0.AddArg2(v1, v2) + v.AddArg2(x, v0) return true } // match: (Rsh32Ux64 x (SUB (MOVDconst [32]) (AND y (MOVDconst [31])))) @@ -13760,16 +13012,14 @@ func rewriteValuePPC64_OpRsh32Ux64(v *Value) bool { continue } v.reset(OpPPC64SRW) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpPPC64SUB, typ.UInt) v1 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64) v1.AuxInt = 32 - v0.AddArg(v1) v2 := b.NewValue0(v.Pos, OpPPC64ANDconst, typ.UInt) v2.AuxInt = 31 v2.AddArg(y) - v0.AddArg(v2) - v.AddArg(v0) + v0.AddArg2(v1, v2) + v.AddArg2(x, v0) return true } break @@ -13780,16 +13030,14 @@ func rewriteValuePPC64_OpRsh32Ux64(v *Value) bool { x := v_0 y := v_1 v.reset(OpPPC64SRW) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpPPC64ORN, typ.Int64) - v0.AddArg(y) v1 := b.NewValue0(v.Pos, OpPPC64MaskIfNotCarry, typ.Int64) v2 := b.NewValue0(v.Pos, OpPPC64ADDconstForCarry, types.TypeFlags) v2.AuxInt = -32 v2.AddArg(y) v1.AddArg(v2) - v0.AddArg(v1) - v.AddArg(v0) + v0.AddArg2(y, v1) + v.AddArg2(x, v0) return true } } @@ -13808,8 +13056,7 @@ func rewriteValuePPC64_OpRsh32Ux8(v *Value) bool { break } v.reset(OpPPC64SRW) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (Rsh32Ux8 x y) @@ -13818,9 +13065,7 @@ func rewriteValuePPC64_OpRsh32Ux8(v *Value) bool { x := v_0 y := v_1 v.reset(OpPPC64SRW) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpPPC64ORN, typ.Int64) - v0.AddArg(y) v1 := b.NewValue0(v.Pos, OpPPC64MaskIfNotCarry, typ.Int64) v2 := b.NewValue0(v.Pos, OpPPC64ADDconstForCarry, types.TypeFlags) v2.AuxInt = -32 @@ -13828,8 +13073,8 @@ func rewriteValuePPC64_OpRsh32Ux8(v *Value) bool { v3.AddArg(y) v2.AddArg(v3) v1.AddArg(v2) - v0.AddArg(v1) - v.AddArg(v0) + v0.AddArg2(y, v1) + v.AddArg2(x, v0) return true } } @@ -13848,8 +13093,7 @@ func rewriteValuePPC64_OpRsh32x16(v *Value) bool { break } v.reset(OpPPC64SRAW) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (Rsh32x16 x y) @@ -13858,9 +13102,7 @@ func rewriteValuePPC64_OpRsh32x16(v *Value) bool { x := v_0 y := v_1 v.reset(OpPPC64SRAW) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpPPC64ORN, typ.Int64) - v0.AddArg(y) v1 := b.NewValue0(v.Pos, OpPPC64MaskIfNotCarry, typ.Int64) v2 := b.NewValue0(v.Pos, OpPPC64ADDconstForCarry, types.TypeFlags) v2.AuxInt = -32 @@ -13868,8 +13110,8 @@ func rewriteValuePPC64_OpRsh32x16(v *Value) bool { v3.AddArg(y) v2.AddArg(v3) v1.AddArg(v2) - v0.AddArg(v1) - v.AddArg(v0) + v0.AddArg2(y, v1) + v.AddArg2(x, v0) return true } } @@ -13878,23 +13120,6 @@ func rewriteValuePPC64_OpRsh32x32(v *Value) bool { v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (Rsh32x32 x (Const64 [c])) - // cond: uint32(c) < 32 - // result: (SRAWconst x [c]) - for { - x := v_0 - if v_1.Op != OpConst64 { - break - } - c := v_1.AuxInt - if !(uint32(c) < 32) { - break - } - v.reset(OpPPC64SRAWconst) - v.AuxInt = c - v.AddArg(x) - return true - } // match: (Rsh32x32 x (MOVDconst [c])) // cond: uint32(c) < 32 // result: (SRAWconst x [c]) @@ -13922,8 +13147,7 @@ func rewriteValuePPC64_OpRsh32x32(v *Value) bool { break } v.reset(OpPPC64SRAW) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (Rsh32x32 x y) @@ -13932,9 +13156,7 @@ func rewriteValuePPC64_OpRsh32x32(v *Value) bool { x := v_0 y := v_1 v.reset(OpPPC64SRAW) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpPPC64ORN, typ.Int64) - v0.AddArg(y) v1 := b.NewValue0(v.Pos, OpPPC64MaskIfNotCarry, typ.Int64) v2 := b.NewValue0(v.Pos, OpPPC64ADDconstForCarry, types.TypeFlags) v2.AuxInt = -32 @@ -13942,8 +13164,8 @@ func rewriteValuePPC64_OpRsh32x32(v *Value) bool { v3.AddArg(y) v2.AddArg(v3) v1.AddArg(v2) - v0.AddArg(v1) - v.AddArg(v0) + v0.AddArg2(y, v1) + v.AddArg2(x, v0) return true } } @@ -13952,29 +13174,12 @@ func rewriteValuePPC64_OpRsh32x64(v *Value) bool { v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (Rsh32x64 x (Const64 [c])) - // cond: uint64(c) < 32 - // result: (SRAWconst x [c]) - for { - x := v_0 - if v_1.Op != OpConst64 { - break - } - c := v_1.AuxInt - if !(uint64(c) < 32) { - break - } - v.reset(OpPPC64SRAWconst) - v.AuxInt = c - v.AddArg(x) - return true - } - // match: (Rsh32x64 x (Const64 [c])) + // match: (Rsh32x64 x (MOVDconst [c])) // cond: uint64(c) >= 32 // result: (SRAWconst x [63]) for { x := v_0 - if v_1.Op != OpConst64 { + if v_1.Op != OpPPC64MOVDconst { break } c := v_1.AuxInt @@ -14013,8 +13218,7 @@ func rewriteValuePPC64_OpRsh32x64(v *Value) bool { break } v.reset(OpPPC64SRAW) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (Rsh32x64 x (AND y (MOVDconst [31]))) @@ -14033,11 +13237,10 @@ func rewriteValuePPC64_OpRsh32x64(v *Value) bool { continue } v.reset(OpPPC64SRAW) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpPPC64ANDconst, typ.Int32) v0.AuxInt = 31 v0.AddArg(y) - v.AddArg(v0) + v.AddArg2(x, v0) return true } break @@ -14051,11 +13254,10 @@ func rewriteValuePPC64_OpRsh32x64(v *Value) bool { } y := v_1.Args[0] v.reset(OpPPC64SRAW) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpPPC64ANDconst, typ.UInt) v0.AuxInt = 31 v0.AddArg(y) - v.AddArg(v0) + v.AddArg2(x, v0) return true } // match: (Rsh32x64 x (SUB (MOVDconst [32]) (ANDconst [31] y))) @@ -14076,16 +13278,14 @@ func rewriteValuePPC64_OpRsh32x64(v *Value) bool { } y := v_1_1.Args[0] v.reset(OpPPC64SRAW) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpPPC64SUB, typ.UInt) v1 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64) v1.AuxInt = 32 - v0.AddArg(v1) v2 := b.NewValue0(v.Pos, OpPPC64ANDconst, typ.UInt) v2.AuxInt = 31 v2.AddArg(y) - v0.AddArg(v2) - v.AddArg(v0) + v0.AddArg2(v1, v2) + v.AddArg2(x, v0) return true } // match: (Rsh32x64 x (SUB (MOVDconst [32]) (AND y (MOVDconst [31])))) @@ -14113,16 +13313,14 @@ func rewriteValuePPC64_OpRsh32x64(v *Value) bool { continue } v.reset(OpPPC64SRAW) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpPPC64SUB, typ.UInt) v1 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64) v1.AuxInt = 32 - v0.AddArg(v1) v2 := b.NewValue0(v.Pos, OpPPC64ANDconst, typ.UInt) v2.AuxInt = 31 v2.AddArg(y) - v0.AddArg(v2) - v.AddArg(v0) + v0.AddArg2(v1, v2) + v.AddArg2(x, v0) return true } break @@ -14133,16 +13331,14 @@ func rewriteValuePPC64_OpRsh32x64(v *Value) bool { x := v_0 y := v_1 v.reset(OpPPC64SRAW) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpPPC64ORN, typ.Int64) - v0.AddArg(y) v1 := b.NewValue0(v.Pos, OpPPC64MaskIfNotCarry, typ.Int64) v2 := b.NewValue0(v.Pos, OpPPC64ADDconstForCarry, types.TypeFlags) v2.AuxInt = -32 v2.AddArg(y) v1.AddArg(v2) - v0.AddArg(v1) - v.AddArg(v0) + v0.AddArg2(y, v1) + v.AddArg2(x, v0) return true } } @@ -14161,8 +13357,7 @@ func rewriteValuePPC64_OpRsh32x8(v *Value) bool { break } v.reset(OpPPC64SRAW) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (Rsh32x8 x y) @@ -14171,9 +13366,7 @@ func rewriteValuePPC64_OpRsh32x8(v *Value) bool { x := v_0 y := v_1 v.reset(OpPPC64SRAW) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpPPC64ORN, typ.Int64) - v0.AddArg(y) v1 := b.NewValue0(v.Pos, OpPPC64MaskIfNotCarry, typ.Int64) v2 := b.NewValue0(v.Pos, OpPPC64ADDconstForCarry, types.TypeFlags) v2.AuxInt = -32 @@ -14181,8 +13374,8 @@ func rewriteValuePPC64_OpRsh32x8(v *Value) bool { v3.AddArg(y) v2.AddArg(v3) v1.AddArg(v2) - v0.AddArg(v1) - v.AddArg(v0) + v0.AddArg2(y, v1) + v.AddArg2(x, v0) return true } } @@ -14201,8 +13394,7 @@ func rewriteValuePPC64_OpRsh64Ux16(v *Value) bool { break } v.reset(OpPPC64SRD) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (Rsh64Ux16 x y) @@ -14211,9 +13403,7 @@ func rewriteValuePPC64_OpRsh64Ux16(v *Value) bool { x := v_0 y := v_1 v.reset(OpPPC64SRD) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpPPC64ORN, typ.Int64) - v0.AddArg(y) v1 := b.NewValue0(v.Pos, OpPPC64MaskIfNotCarry, typ.Int64) v2 := b.NewValue0(v.Pos, OpPPC64ADDconstForCarry, types.TypeFlags) v2.AuxInt = -64 @@ -14221,8 +13411,8 @@ func rewriteValuePPC64_OpRsh64Ux16(v *Value) bool { v3.AddArg(y) v2.AddArg(v3) v1.AddArg(v2) - v0.AddArg(v1) - v.AddArg(v0) + v0.AddArg2(y, v1) + v.AddArg2(x, v0) return true } } @@ -14231,23 +13421,6 @@ func rewriteValuePPC64_OpRsh64Ux32(v *Value) bool { v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (Rsh64Ux32 x (Const64 [c])) - // cond: uint32(c) < 64 - // result: (SRDconst x [c]) - for { - x := v_0 - if v_1.Op != OpConst64 { - break - } - c := v_1.AuxInt - if !(uint32(c) < 64) { - break - } - v.reset(OpPPC64SRDconst) - v.AuxInt = c - v.AddArg(x) - return true - } // match: (Rsh64Ux32 x (MOVDconst [c])) // cond: uint32(c) < 64 // result: (SRDconst x [c]) @@ -14275,8 +13448,7 @@ func rewriteValuePPC64_OpRsh64Ux32(v *Value) bool { break } v.reset(OpPPC64SRD) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (Rsh64Ux32 x y) @@ -14285,9 +13457,7 @@ func rewriteValuePPC64_OpRsh64Ux32(v *Value) bool { x := v_0 y := v_1 v.reset(OpPPC64SRD) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpPPC64ORN, typ.Int64) - v0.AddArg(y) v1 := b.NewValue0(v.Pos, OpPPC64MaskIfNotCarry, typ.Int64) v2 := b.NewValue0(v.Pos, OpPPC64ADDconstForCarry, types.TypeFlags) v2.AuxInt = -64 @@ -14295,8 +13465,8 @@ func rewriteValuePPC64_OpRsh64Ux32(v *Value) bool { v3.AddArg(y) v2.AddArg(v3) v1.AddArg(v2) - v0.AddArg(v1) - v.AddArg(v0) + v0.AddArg2(y, v1) + v.AddArg2(x, v0) return true } } @@ -14305,28 +13475,11 @@ func rewriteValuePPC64_OpRsh64Ux64(v *Value) bool { v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (Rsh64Ux64 x (Const64 [c])) - // cond: uint64(c) < 64 - // result: (SRDconst x [c]) - for { - x := v_0 - if v_1.Op != OpConst64 { - break - } - c := v_1.AuxInt - if !(uint64(c) < 64) { - break - } - v.reset(OpPPC64SRDconst) - v.AuxInt = c - v.AddArg(x) - return true - } - // match: (Rsh64Ux64 _ (Const64 [c])) + // match: (Rsh64Ux64 _ (MOVDconst [c])) // cond: uint64(c) >= 64 // result: (MOVDconst [0]) for { - if v_1.Op != OpConst64 { + if v_1.Op != OpPPC64MOVDconst { break } c := v_1.AuxInt @@ -14364,8 +13517,7 @@ func rewriteValuePPC64_OpRsh64Ux64(v *Value) bool { break } v.reset(OpPPC64SRD) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (Rsh64Ux64 x (AND y (MOVDconst [63]))) @@ -14384,11 +13536,10 @@ func rewriteValuePPC64_OpRsh64Ux64(v *Value) bool { continue } v.reset(OpPPC64SRD) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpPPC64ANDconst, typ.Int64) v0.AuxInt = 63 v0.AddArg(y) - v.AddArg(v0) + v.AddArg2(x, v0) return true } break @@ -14402,11 +13553,10 @@ func rewriteValuePPC64_OpRsh64Ux64(v *Value) bool { } y := v_1.Args[0] v.reset(OpPPC64SRD) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpPPC64ANDconst, typ.UInt) v0.AuxInt = 63 v0.AddArg(y) - v.AddArg(v0) + v.AddArg2(x, v0) return true } // match: (Rsh64Ux64 x (SUB (MOVDconst [64]) (ANDconst [63] y))) @@ -14427,16 +13577,14 @@ func rewriteValuePPC64_OpRsh64Ux64(v *Value) bool { } y := v_1_1.Args[0] v.reset(OpPPC64SRD) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpPPC64SUB, typ.UInt) v1 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64) v1.AuxInt = 64 - v0.AddArg(v1) v2 := b.NewValue0(v.Pos, OpPPC64ANDconst, typ.UInt) v2.AuxInt = 63 v2.AddArg(y) - v0.AddArg(v2) - v.AddArg(v0) + v0.AddArg2(v1, v2) + v.AddArg2(x, v0) return true } // match: (Rsh64Ux64 x (SUB (MOVDconst [64]) (AND y (MOVDconst [63])))) @@ -14464,16 +13612,14 @@ func rewriteValuePPC64_OpRsh64Ux64(v *Value) bool { continue } v.reset(OpPPC64SRD) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpPPC64SUB, typ.UInt) v1 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64) v1.AuxInt = 64 - v0.AddArg(v1) v2 := b.NewValue0(v.Pos, OpPPC64ANDconst, typ.UInt) v2.AuxInt = 63 v2.AddArg(y) - v0.AddArg(v2) - v.AddArg(v0) + v0.AddArg2(v1, v2) + v.AddArg2(x, v0) return true } break @@ -14484,16 +13630,14 @@ func rewriteValuePPC64_OpRsh64Ux64(v *Value) bool { x := v_0 y := v_1 v.reset(OpPPC64SRD) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpPPC64ORN, typ.Int64) - v0.AddArg(y) v1 := b.NewValue0(v.Pos, OpPPC64MaskIfNotCarry, typ.Int64) v2 := b.NewValue0(v.Pos, OpPPC64ADDconstForCarry, types.TypeFlags) v2.AuxInt = -64 v2.AddArg(y) v1.AddArg(v2) - v0.AddArg(v1) - v.AddArg(v0) + v0.AddArg2(y, v1) + v.AddArg2(x, v0) return true } } @@ -14512,8 +13656,7 @@ func rewriteValuePPC64_OpRsh64Ux8(v *Value) bool { break } v.reset(OpPPC64SRD) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (Rsh64Ux8 x y) @@ -14522,9 +13665,7 @@ func rewriteValuePPC64_OpRsh64Ux8(v *Value) bool { x := v_0 y := v_1 v.reset(OpPPC64SRD) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpPPC64ORN, typ.Int64) - v0.AddArg(y) v1 := b.NewValue0(v.Pos, OpPPC64MaskIfNotCarry, typ.Int64) v2 := b.NewValue0(v.Pos, OpPPC64ADDconstForCarry, types.TypeFlags) v2.AuxInt = -64 @@ -14532,8 +13673,8 @@ func rewriteValuePPC64_OpRsh64Ux8(v *Value) bool { v3.AddArg(y) v2.AddArg(v3) v1.AddArg(v2) - v0.AddArg(v1) - v.AddArg(v0) + v0.AddArg2(y, v1) + v.AddArg2(x, v0) return true } } @@ -14552,8 +13693,7 @@ func rewriteValuePPC64_OpRsh64x16(v *Value) bool { break } v.reset(OpPPC64SRAD) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (Rsh64x16 x y) @@ -14562,9 +13702,7 @@ func rewriteValuePPC64_OpRsh64x16(v *Value) bool { x := v_0 y := v_1 v.reset(OpPPC64SRAD) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpPPC64ORN, typ.Int64) - v0.AddArg(y) v1 := b.NewValue0(v.Pos, OpPPC64MaskIfNotCarry, typ.Int64) v2 := b.NewValue0(v.Pos, OpPPC64ADDconstForCarry, types.TypeFlags) v2.AuxInt = -64 @@ -14572,8 +13710,8 @@ func rewriteValuePPC64_OpRsh64x16(v *Value) bool { v3.AddArg(y) v2.AddArg(v3) v1.AddArg(v2) - v0.AddArg(v1) - v.AddArg(v0) + v0.AddArg2(y, v1) + v.AddArg2(x, v0) return true } } @@ -14582,23 +13720,6 @@ func rewriteValuePPC64_OpRsh64x32(v *Value) bool { v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (Rsh64x32 x (Const64 [c])) - // cond: uint32(c) < 64 - // result: (SRADconst x [c]) - for { - x := v_0 - if v_1.Op != OpConst64 { - break - } - c := v_1.AuxInt - if !(uint32(c) < 64) { - break - } - v.reset(OpPPC64SRADconst) - v.AuxInt = c - v.AddArg(x) - return true - } // match: (Rsh64x32 x (MOVDconst [c])) // cond: uint32(c) < 64 // result: (SRADconst x [c]) @@ -14626,8 +13747,7 @@ func rewriteValuePPC64_OpRsh64x32(v *Value) bool { break } v.reset(OpPPC64SRAD) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (Rsh64x32 x y) @@ -14636,9 +13756,7 @@ func rewriteValuePPC64_OpRsh64x32(v *Value) bool { x := v_0 y := v_1 v.reset(OpPPC64SRAD) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpPPC64ORN, typ.Int64) - v0.AddArg(y) v1 := b.NewValue0(v.Pos, OpPPC64MaskIfNotCarry, typ.Int64) v2 := b.NewValue0(v.Pos, OpPPC64ADDconstForCarry, types.TypeFlags) v2.AuxInt = -64 @@ -14646,8 +13764,8 @@ func rewriteValuePPC64_OpRsh64x32(v *Value) bool { v3.AddArg(y) v2.AddArg(v3) v1.AddArg(v2) - v0.AddArg(v1) - v.AddArg(v0) + v0.AddArg2(y, v1) + v.AddArg2(x, v0) return true } } @@ -14656,29 +13774,12 @@ func rewriteValuePPC64_OpRsh64x64(v *Value) bool { v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (Rsh64x64 x (Const64 [c])) - // cond: uint64(c) < 64 - // result: (SRADconst x [c]) - for { - x := v_0 - if v_1.Op != OpConst64 { - break - } - c := v_1.AuxInt - if !(uint64(c) < 64) { - break - } - v.reset(OpPPC64SRADconst) - v.AuxInt = c - v.AddArg(x) - return true - } - // match: (Rsh64x64 x (Const64 [c])) + // match: (Rsh64x64 x (MOVDconst [c])) // cond: uint64(c) >= 64 // result: (SRADconst x [63]) for { x := v_0 - if v_1.Op != OpConst64 { + if v_1.Op != OpPPC64MOVDconst { break } c := v_1.AuxInt @@ -14717,8 +13818,7 @@ func rewriteValuePPC64_OpRsh64x64(v *Value) bool { break } v.reset(OpPPC64SRAD) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (Rsh64x64 x (AND y (MOVDconst [63]))) @@ -14737,11 +13837,10 @@ func rewriteValuePPC64_OpRsh64x64(v *Value) bool { continue } v.reset(OpPPC64SRAD) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpPPC64ANDconst, typ.Int64) v0.AuxInt = 63 v0.AddArg(y) - v.AddArg(v0) + v.AddArg2(x, v0) return true } break @@ -14755,11 +13854,10 @@ func rewriteValuePPC64_OpRsh64x64(v *Value) bool { } y := v_1.Args[0] v.reset(OpPPC64SRAD) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpPPC64ANDconst, typ.UInt) v0.AuxInt = 63 v0.AddArg(y) - v.AddArg(v0) + v.AddArg2(x, v0) return true } // match: (Rsh64x64 x (SUB (MOVDconst [64]) (ANDconst [63] y))) @@ -14780,16 +13878,14 @@ func rewriteValuePPC64_OpRsh64x64(v *Value) bool { } y := v_1_1.Args[0] v.reset(OpPPC64SRAD) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpPPC64SUB, typ.UInt) v1 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64) v1.AuxInt = 64 - v0.AddArg(v1) v2 := b.NewValue0(v.Pos, OpPPC64ANDconst, typ.UInt) v2.AuxInt = 63 v2.AddArg(y) - v0.AddArg(v2) - v.AddArg(v0) + v0.AddArg2(v1, v2) + v.AddArg2(x, v0) return true } // match: (Rsh64x64 x (SUB (MOVDconst [64]) (AND y (MOVDconst [63])))) @@ -14817,16 +13913,14 @@ func rewriteValuePPC64_OpRsh64x64(v *Value) bool { continue } v.reset(OpPPC64SRAD) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpPPC64SUB, typ.UInt) v1 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64) v1.AuxInt = 64 - v0.AddArg(v1) v2 := b.NewValue0(v.Pos, OpPPC64ANDconst, typ.UInt) v2.AuxInt = 63 v2.AddArg(y) - v0.AddArg(v2) - v.AddArg(v0) + v0.AddArg2(v1, v2) + v.AddArg2(x, v0) return true } break @@ -14837,16 +13931,14 @@ func rewriteValuePPC64_OpRsh64x64(v *Value) bool { x := v_0 y := v_1 v.reset(OpPPC64SRAD) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpPPC64ORN, typ.Int64) - v0.AddArg(y) v1 := b.NewValue0(v.Pos, OpPPC64MaskIfNotCarry, typ.Int64) v2 := b.NewValue0(v.Pos, OpPPC64ADDconstForCarry, types.TypeFlags) v2.AuxInt = -64 v2.AddArg(y) v1.AddArg(v2) - v0.AddArg(v1) - v.AddArg(v0) + v0.AddArg2(y, v1) + v.AddArg2(x, v0) return true } } @@ -14865,8 +13957,7 @@ func rewriteValuePPC64_OpRsh64x8(v *Value) bool { break } v.reset(OpPPC64SRAD) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (Rsh64x8 x y) @@ -14875,9 +13966,7 @@ func rewriteValuePPC64_OpRsh64x8(v *Value) bool { x := v_0 y := v_1 v.reset(OpPPC64SRAD) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpPPC64ORN, typ.Int64) - v0.AddArg(y) v1 := b.NewValue0(v.Pos, OpPPC64MaskIfNotCarry, typ.Int64) v2 := b.NewValue0(v.Pos, OpPPC64ADDconstForCarry, types.TypeFlags) v2.AuxInt = -64 @@ -14885,8 +13974,8 @@ func rewriteValuePPC64_OpRsh64x8(v *Value) bool { v3.AddArg(y) v2.AddArg(v3) v1.AddArg(v2) - v0.AddArg(v1) - v.AddArg(v0) + v0.AddArg2(y, v1) + v.AddArg2(x, v0) return true } } @@ -14907,8 +13996,7 @@ func rewriteValuePPC64_OpRsh8Ux16(v *Value) bool { v.reset(OpPPC64SRW) v0 := b.NewValue0(v.Pos, OpPPC64MOVBZreg, typ.Int64) v0.AddArg(x) - v.AddArg(v0) - v.AddArg(y) + v.AddArg2(v0, y) return true } // match: (Rsh8Ux16 x y) @@ -14919,9 +14007,7 @@ func rewriteValuePPC64_OpRsh8Ux16(v *Value) bool { v.reset(OpPPC64SRW) v0 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpPPC64ORN, typ.Int64) - v1.AddArg(y) v2 := b.NewValue0(v.Pos, OpPPC64MaskIfNotCarry, typ.Int64) v3 := b.NewValue0(v.Pos, OpPPC64ADDconstForCarry, types.TypeFlags) v3.AuxInt = -8 @@ -14929,8 +14015,8 @@ func rewriteValuePPC64_OpRsh8Ux16(v *Value) bool { v4.AddArg(y) v3.AddArg(v4) v2.AddArg(v3) - v1.AddArg(v2) - v.AddArg(v1) + v1.AddArg2(y, v2) + v.AddArg2(v0, v1) return true } } @@ -14939,25 +14025,6 @@ func rewriteValuePPC64_OpRsh8Ux32(v *Value) bool { v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (Rsh8Ux32 x (Const64 [c])) - // cond: uint32(c) < 8 - // result: (SRWconst (ZeroExt8to32 x) [c]) - for { - x := v_0 - if v_1.Op != OpConst64 { - break - } - c := v_1.AuxInt - if !(uint32(c) < 8) { - break - } - v.reset(OpPPC64SRWconst) - v.AuxInt = c - v0 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) - v0.AddArg(x) - v.AddArg(v0) - return true - } // match: (Rsh8Ux32 x (MOVDconst [c])) // cond: uint32(c) < 8 // result: (SRWconst (ZeroExt8to32 x) [c]) @@ -14989,8 +14056,7 @@ func rewriteValuePPC64_OpRsh8Ux32(v *Value) bool { v.reset(OpPPC64SRW) v0 := b.NewValue0(v.Pos, OpPPC64MOVBZreg, typ.Int64) v0.AddArg(x) - v.AddArg(v0) - v.AddArg(y) + v.AddArg2(v0, y) return true } // match: (Rsh8Ux32 x y) @@ -15001,9 +14067,7 @@ func rewriteValuePPC64_OpRsh8Ux32(v *Value) bool { v.reset(OpPPC64SRW) v0 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpPPC64ORN, typ.Int64) - v1.AddArg(y) v2 := b.NewValue0(v.Pos, OpPPC64MaskIfNotCarry, typ.Int64) v3 := b.NewValue0(v.Pos, OpPPC64ADDconstForCarry, types.TypeFlags) v3.AuxInt = -8 @@ -15011,8 +14075,8 @@ func rewriteValuePPC64_OpRsh8Ux32(v *Value) bool { v4.AddArg(y) v3.AddArg(v4) v2.AddArg(v3) - v1.AddArg(v2) - v.AddArg(v1) + v1.AddArg2(y, v2) + v.AddArg2(v0, v1) return true } } @@ -15021,30 +14085,11 @@ func rewriteValuePPC64_OpRsh8Ux64(v *Value) bool { v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (Rsh8Ux64 x (Const64 [c])) - // cond: uint64(c) < 8 - // result: (SRWconst (ZeroExt8to32 x) [c]) - for { - x := v_0 - if v_1.Op != OpConst64 { - break - } - c := v_1.AuxInt - if !(uint64(c) < 8) { - break - } - v.reset(OpPPC64SRWconst) - v.AuxInt = c - v0 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) - v0.AddArg(x) - v.AddArg(v0) - return true - } - // match: (Rsh8Ux64 _ (Const64 [c])) + // match: (Rsh8Ux64 _ (MOVDconst [c])) // cond: uint64(c) >= 8 // result: (MOVDconst [0]) for { - if v_1.Op != OpConst64 { + if v_1.Op != OpPPC64MOVDconst { break } c := v_1.AuxInt @@ -15086,8 +14131,7 @@ func rewriteValuePPC64_OpRsh8Ux64(v *Value) bool { v.reset(OpPPC64SRW) v0 := b.NewValue0(v.Pos, OpPPC64MOVBZreg, typ.Int64) v0.AddArg(x) - v.AddArg(v0) - v.AddArg(y) + v.AddArg2(v0, y) return true } // match: (Rsh8Ux64 x y) @@ -15098,16 +14142,14 @@ func rewriteValuePPC64_OpRsh8Ux64(v *Value) bool { v.reset(OpPPC64SRW) v0 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpPPC64ORN, typ.Int64) - v1.AddArg(y) v2 := b.NewValue0(v.Pos, OpPPC64MaskIfNotCarry, typ.Int64) v3 := b.NewValue0(v.Pos, OpPPC64ADDconstForCarry, types.TypeFlags) v3.AuxInt = -8 v3.AddArg(y) v2.AddArg(v3) - v1.AddArg(v2) - v.AddArg(v1) + v1.AddArg2(y, v2) + v.AddArg2(v0, v1) return true } } @@ -15128,8 +14170,7 @@ func rewriteValuePPC64_OpRsh8Ux8(v *Value) bool { v.reset(OpPPC64SRW) v0 := b.NewValue0(v.Pos, OpPPC64MOVBZreg, typ.Int64) v0.AddArg(x) - v.AddArg(v0) - v.AddArg(y) + v.AddArg2(v0, y) return true } // match: (Rsh8Ux8 x y) @@ -15140,9 +14181,7 @@ func rewriteValuePPC64_OpRsh8Ux8(v *Value) bool { v.reset(OpPPC64SRW) v0 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpPPC64ORN, typ.Int64) - v1.AddArg(y) v2 := b.NewValue0(v.Pos, OpPPC64MaskIfNotCarry, typ.Int64) v3 := b.NewValue0(v.Pos, OpPPC64ADDconstForCarry, types.TypeFlags) v3.AuxInt = -8 @@ -15150,8 +14189,8 @@ func rewriteValuePPC64_OpRsh8Ux8(v *Value) bool { v4.AddArg(y) v3.AddArg(v4) v2.AddArg(v3) - v1.AddArg(v2) - v.AddArg(v1) + v1.AddArg2(y, v2) + v.AddArg2(v0, v1) return true } } @@ -15172,8 +14211,7 @@ func rewriteValuePPC64_OpRsh8x16(v *Value) bool { v.reset(OpPPC64SRAW) v0 := b.NewValue0(v.Pos, OpPPC64MOVBreg, typ.Int64) v0.AddArg(x) - v.AddArg(v0) - v.AddArg(y) + v.AddArg2(v0, y) return true } // match: (Rsh8x16 x y) @@ -15184,9 +14222,7 @@ func rewriteValuePPC64_OpRsh8x16(v *Value) bool { v.reset(OpPPC64SRAW) v0 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpPPC64ORN, typ.Int64) - v1.AddArg(y) v2 := b.NewValue0(v.Pos, OpPPC64MaskIfNotCarry, typ.Int64) v3 := b.NewValue0(v.Pos, OpPPC64ADDconstForCarry, types.TypeFlags) v3.AuxInt = -8 @@ -15194,8 +14230,8 @@ func rewriteValuePPC64_OpRsh8x16(v *Value) bool { v4.AddArg(y) v3.AddArg(v4) v2.AddArg(v3) - v1.AddArg(v2) - v.AddArg(v1) + v1.AddArg2(y, v2) + v.AddArg2(v0, v1) return true } } @@ -15204,25 +14240,6 @@ func rewriteValuePPC64_OpRsh8x32(v *Value) bool { v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (Rsh8x32 x (Const64 [c])) - // cond: uint32(c) < 8 - // result: (SRAWconst (SignExt8to32 x) [c]) - for { - x := v_0 - if v_1.Op != OpConst64 { - break - } - c := v_1.AuxInt - if !(uint32(c) < 8) { - break - } - v.reset(OpPPC64SRAWconst) - v.AuxInt = c - v0 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32) - v0.AddArg(x) - v.AddArg(v0) - return true - } // match: (Rsh8x32 x (MOVDconst [c])) // cond: uint32(c) < 8 // result: (SRAWconst (SignExt8to32 x) [c]) @@ -15254,8 +14271,7 @@ func rewriteValuePPC64_OpRsh8x32(v *Value) bool { v.reset(OpPPC64SRAW) v0 := b.NewValue0(v.Pos, OpPPC64MOVBreg, typ.Int64) v0.AddArg(x) - v.AddArg(v0) - v.AddArg(y) + v.AddArg2(v0, y) return true } // match: (Rsh8x32 x y) @@ -15266,9 +14282,7 @@ func rewriteValuePPC64_OpRsh8x32(v *Value) bool { v.reset(OpPPC64SRAW) v0 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpPPC64ORN, typ.Int64) - v1.AddArg(y) v2 := b.NewValue0(v.Pos, OpPPC64MaskIfNotCarry, typ.Int64) v3 := b.NewValue0(v.Pos, OpPPC64ADDconstForCarry, types.TypeFlags) v3.AuxInt = -8 @@ -15276,8 +14290,8 @@ func rewriteValuePPC64_OpRsh8x32(v *Value) bool { v4.AddArg(y) v3.AddArg(v4) v2.AddArg(v3) - v1.AddArg(v2) - v.AddArg(v1) + v1.AddArg2(y, v2) + v.AddArg2(v0, v1) return true } } @@ -15286,31 +14300,12 @@ func rewriteValuePPC64_OpRsh8x64(v *Value) bool { v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (Rsh8x64 x (Const64 [c])) - // cond: uint64(c) < 8 - // result: (SRAWconst (SignExt8to32 x) [c]) - for { - x := v_0 - if v_1.Op != OpConst64 { - break - } - c := v_1.AuxInt - if !(uint64(c) < 8) { - break - } - v.reset(OpPPC64SRAWconst) - v.AuxInt = c - v0 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32) - v0.AddArg(x) - v.AddArg(v0) - return true - } - // match: (Rsh8x64 x (Const64 [c])) + // match: (Rsh8x64 x (MOVDconst [c])) // cond: uint64(c) >= 8 // result: (SRAWconst (SignExt8to32 x) [63]) for { x := v_0 - if v_1.Op != OpConst64 { + if v_1.Op != OpPPC64MOVDconst { break } c := v_1.AuxInt @@ -15355,8 +14350,7 @@ func rewriteValuePPC64_OpRsh8x64(v *Value) bool { v.reset(OpPPC64SRAW) v0 := b.NewValue0(v.Pos, OpPPC64MOVBreg, typ.Int64) v0.AddArg(x) - v.AddArg(v0) - v.AddArg(y) + v.AddArg2(v0, y) return true } // match: (Rsh8x64 x y) @@ -15367,16 +14361,14 @@ func rewriteValuePPC64_OpRsh8x64(v *Value) bool { v.reset(OpPPC64SRAW) v0 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpPPC64ORN, typ.Int64) - v1.AddArg(y) v2 := b.NewValue0(v.Pos, OpPPC64MaskIfNotCarry, typ.Int64) v3 := b.NewValue0(v.Pos, OpPPC64ADDconstForCarry, types.TypeFlags) v3.AuxInt = -8 v3.AddArg(y) v2.AddArg(v3) - v1.AddArg(v2) - v.AddArg(v1) + v1.AddArg2(y, v2) + v.AddArg2(v0, v1) return true } } @@ -15397,8 +14389,7 @@ func rewriteValuePPC64_OpRsh8x8(v *Value) bool { v.reset(OpPPC64SRAW) v0 := b.NewValue0(v.Pos, OpPPC64MOVBreg, typ.Int64) v0.AddArg(x) - v.AddArg(v0) - v.AddArg(y) + v.AddArg2(v0, y) return true } // match: (Rsh8x8 x y) @@ -15409,9 +14400,7 @@ func rewriteValuePPC64_OpRsh8x8(v *Value) bool { v.reset(OpPPC64SRAW) v0 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpPPC64ORN, typ.Int64) - v1.AddArg(y) v2 := b.NewValue0(v.Pos, OpPPC64MaskIfNotCarry, typ.Int64) v3 := b.NewValue0(v.Pos, OpPPC64ADDconstForCarry, types.TypeFlags) v3.AuxInt = -8 @@ -15419,8 +14408,8 @@ func rewriteValuePPC64_OpRsh8x8(v *Value) bool { v4.AddArg(y) v3.AddArg(v4) v2.AddArg(v3) - v1.AddArg(v2) - v.AddArg(v1) + v1.AddArg2(y, v2) + v.AddArg2(v0, v1) return true } } @@ -15456,9 +14445,7 @@ func rewriteValuePPC64_OpStore(v *Value) bool { break } v.reset(OpPPC64FMOVDstore) - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } // match: (Store {t} ptr val mem) @@ -15473,9 +14460,7 @@ func rewriteValuePPC64_OpStore(v *Value) bool { break } v.reset(OpPPC64FMOVDstore) - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } // match: (Store {t} ptr val mem) @@ -15490,9 +14475,7 @@ func rewriteValuePPC64_OpStore(v *Value) bool { break } v.reset(OpPPC64FMOVSstore) - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } // match: (Store {t} ptr val mem) @@ -15507,9 +14490,7 @@ func rewriteValuePPC64_OpStore(v *Value) bool { break } v.reset(OpPPC64MOVDstore) - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } // match: (Store {t} ptr val mem) @@ -15524,9 +14505,7 @@ func rewriteValuePPC64_OpStore(v *Value) bool { break } v.reset(OpPPC64MOVWstore) - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } // match: (Store {t} ptr val mem) @@ -15541,9 +14520,7 @@ func rewriteValuePPC64_OpStore(v *Value) bool { break } v.reset(OpPPC64MOVHstore) - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } // match: (Store {t} ptr val mem) @@ -15558,9 +14535,7 @@ func rewriteValuePPC64_OpStore(v *Value) bool { break } v.reset(OpPPC64MOVBstore) - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } return false @@ -15720,9 +14695,7 @@ func rewriteValuePPC64_OpZero(v *Value) bool { break } mem := v_1 - v.reset(OpCopy) - v.Type = mem.Type - v.AddArg(mem) + v.copyOf(mem) return true } // match: (Zero [1] destptr mem) @@ -15734,8 +14707,7 @@ func rewriteValuePPC64_OpZero(v *Value) bool { destptr := v_0 mem := v_1 v.reset(OpPPC64MOVBstorezero) - v.AddArg(destptr) - v.AddArg(mem) + v.AddArg2(destptr, mem) return true } // match: (Zero [2] destptr mem) @@ -15747,8 +14719,7 @@ func rewriteValuePPC64_OpZero(v *Value) bool { destptr := v_0 mem := v_1 v.reset(OpPPC64MOVHstorezero) - v.AddArg(destptr) - v.AddArg(mem) + v.AddArg2(destptr, mem) return true } // match: (Zero [3] destptr mem) @@ -15761,11 +14732,9 @@ func rewriteValuePPC64_OpZero(v *Value) bool { mem := v_1 v.reset(OpPPC64MOVBstorezero) v.AuxInt = 2 - v.AddArg(destptr) v0 := b.NewValue0(v.Pos, OpPPC64MOVHstorezero, types.TypeMem) - v0.AddArg(destptr) - v0.AddArg(mem) - v.AddArg(v0) + v0.AddArg2(destptr, mem) + v.AddArg2(destptr, v0) return true } // match: (Zero [4] destptr mem) @@ -15777,8 +14746,7 @@ func rewriteValuePPC64_OpZero(v *Value) bool { destptr := v_0 mem := v_1 v.reset(OpPPC64MOVWstorezero) - v.AddArg(destptr) - v.AddArg(mem) + v.AddArg2(destptr, mem) return true } // match: (Zero [5] destptr mem) @@ -15791,11 +14759,9 @@ func rewriteValuePPC64_OpZero(v *Value) bool { mem := v_1 v.reset(OpPPC64MOVBstorezero) v.AuxInt = 4 - v.AddArg(destptr) v0 := b.NewValue0(v.Pos, OpPPC64MOVWstorezero, types.TypeMem) - v0.AddArg(destptr) - v0.AddArg(mem) - v.AddArg(v0) + v0.AddArg2(destptr, mem) + v.AddArg2(destptr, v0) return true } // match: (Zero [6] destptr mem) @@ -15808,11 +14774,9 @@ func rewriteValuePPC64_OpZero(v *Value) bool { mem := v_1 v.reset(OpPPC64MOVHstorezero) v.AuxInt = 4 - v.AddArg(destptr) v0 := b.NewValue0(v.Pos, OpPPC64MOVWstorezero, types.TypeMem) - v0.AddArg(destptr) - v0.AddArg(mem) - v.AddArg(v0) + v0.AddArg2(destptr, mem) + v.AddArg2(destptr, v0) return true } // match: (Zero [7] destptr mem) @@ -15825,15 +14789,12 @@ func rewriteValuePPC64_OpZero(v *Value) bool { mem := v_1 v.reset(OpPPC64MOVBstorezero) v.AuxInt = 6 - v.AddArg(destptr) v0 := b.NewValue0(v.Pos, OpPPC64MOVHstorezero, types.TypeMem) v0.AuxInt = 4 - v0.AddArg(destptr) v1 := b.NewValue0(v.Pos, OpPPC64MOVWstorezero, types.TypeMem) - v1.AddArg(destptr) - v1.AddArg(mem) - v0.AddArg(v1) - v.AddArg(v0) + v1.AddArg2(destptr, mem) + v0.AddArg2(destptr, v1) + v.AddArg2(destptr, v0) return true } // match: (Zero [8] {t} destptr mem) @@ -15850,8 +14811,7 @@ func rewriteValuePPC64_OpZero(v *Value) bool { break } v.reset(OpPPC64MOVDstorezero) - v.AddArg(destptr) - v.AddArg(mem) + v.AddArg2(destptr, mem) return true } // match: (Zero [8] destptr mem) @@ -15864,12 +14824,10 @@ func rewriteValuePPC64_OpZero(v *Value) bool { mem := v_1 v.reset(OpPPC64MOVWstorezero) v.AuxInt = 4 - v.AddArg(destptr) v0 := b.NewValue0(v.Pos, OpPPC64MOVWstorezero, types.TypeMem) v0.AuxInt = 0 - v0.AddArg(destptr) - v0.AddArg(mem) - v.AddArg(v0) + v0.AddArg2(destptr, mem) + v.AddArg2(destptr, v0) return true } // match: (Zero [12] {t} destptr mem) @@ -15887,12 +14845,10 @@ func rewriteValuePPC64_OpZero(v *Value) bool { } v.reset(OpPPC64MOVWstorezero) v.AuxInt = 8 - v.AddArg(destptr) v0 := b.NewValue0(v.Pos, OpPPC64MOVDstorezero, types.TypeMem) v0.AuxInt = 0 - v0.AddArg(destptr) - v0.AddArg(mem) - v.AddArg(v0) + v0.AddArg2(destptr, mem) + v.AddArg2(destptr, v0) return true } // match: (Zero [16] {t} destptr mem) @@ -15910,12 +14866,10 @@ func rewriteValuePPC64_OpZero(v *Value) bool { } v.reset(OpPPC64MOVDstorezero) v.AuxInt = 8 - v.AddArg(destptr) v0 := b.NewValue0(v.Pos, OpPPC64MOVDstorezero, types.TypeMem) v0.AuxInt = 0 - v0.AddArg(destptr) - v0.AddArg(mem) - v.AddArg(v0) + v0.AddArg2(destptr, mem) + v.AddArg2(destptr, v0) return true } // match: (Zero [24] {t} destptr mem) @@ -15933,16 +14887,13 @@ func rewriteValuePPC64_OpZero(v *Value) bool { } v.reset(OpPPC64MOVDstorezero) v.AuxInt = 16 - v.AddArg(destptr) v0 := b.NewValue0(v.Pos, OpPPC64MOVDstorezero, types.TypeMem) v0.AuxInt = 8 - v0.AddArg(destptr) v1 := b.NewValue0(v.Pos, OpPPC64MOVDstorezero, types.TypeMem) v1.AuxInt = 0 - v1.AddArg(destptr) - v1.AddArg(mem) - v0.AddArg(v1) - v.AddArg(v0) + v1.AddArg2(destptr, mem) + v0.AddArg2(destptr, v1) + v.AddArg2(destptr, v0) return true } // match: (Zero [32] {t} destptr mem) @@ -15960,20 +14911,16 @@ func rewriteValuePPC64_OpZero(v *Value) bool { } v.reset(OpPPC64MOVDstorezero) v.AuxInt = 24 - v.AddArg(destptr) v0 := b.NewValue0(v.Pos, OpPPC64MOVDstorezero, types.TypeMem) v0.AuxInt = 16 - v0.AddArg(destptr) v1 := b.NewValue0(v.Pos, OpPPC64MOVDstorezero, types.TypeMem) v1.AuxInt = 8 - v1.AddArg(destptr) v2 := b.NewValue0(v.Pos, OpPPC64MOVDstorezero, types.TypeMem) v2.AuxInt = 0 - v2.AddArg(destptr) - v2.AddArg(mem) - v1.AddArg(v2) - v0.AddArg(v1) - v.AddArg(v0) + v2.AddArg2(destptr, mem) + v1.AddArg2(destptr, v2) + v0.AddArg2(destptr, v1) + v.AddArg2(destptr, v0) return true } // match: (Zero [s] ptr mem) @@ -15984,8 +14931,7 @@ func rewriteValuePPC64_OpZero(v *Value) bool { mem := v_1 v.reset(OpPPC64LoweredZero) v.AuxInt = s - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } } @@ -16005,11 +14951,10 @@ func rewriteBlockPPC64(b *Block) bool { } c := v_0_0.AuxInt x := v_0_0.Args[0] - b.Reset(BlockPPC64EQ) v0 := b.NewValue0(v_0.Pos, OpPPC64ANDCCconst, types.TypeFlags) v0.AuxInt = c v0.AddArg(x) - b.AddControl(v0) + b.resetWithControl(BlockPPC64EQ, v0) return true } // match: (EQ (CMPWconst [0] (ANDconst [c] x)) yes no) @@ -16025,11 +14970,10 @@ func rewriteBlockPPC64(b *Block) bool { } c := v_0_0.AuxInt x := v_0_0.Args[0] - b.Reset(BlockPPC64EQ) v0 := b.NewValue0(v_0.Pos, OpPPC64ANDCCconst, types.TypeFlags) v0.AuxInt = c v0.AddArg(x) - b.AddControl(v0) + b.resetWithControl(BlockPPC64EQ, v0) return true } // match: (EQ (FlagEQ) yes no) @@ -16057,8 +15001,7 @@ func rewriteBlockPPC64(b *Block) bool { for b.Controls[0].Op == OpPPC64InvertFlags { v_0 := b.Controls[0] cmp := v_0.Args[0] - b.Reset(BlockPPC64EQ) - b.AddControl(cmp) + b.resetWithControl(BlockPPC64EQ, cmp) return true } // match: (EQ (CMPconst [0] (ANDconst [c] x)) yes no) @@ -16074,11 +15017,10 @@ func rewriteBlockPPC64(b *Block) bool { } c := v_0_0.AuxInt x := v_0_0.Args[0] - b.Reset(BlockPPC64EQ) v0 := b.NewValue0(v_0.Pos, OpPPC64ANDCCconst, types.TypeFlags) v0.AuxInt = c v0.AddArg(x) - b.AddControl(v0) + b.resetWithControl(BlockPPC64EQ, v0) return true } // match: (EQ (CMPWconst [0] (ANDconst [c] x)) yes no) @@ -16094,11 +15036,10 @@ func rewriteBlockPPC64(b *Block) bool { } c := v_0_0.AuxInt x := v_0_0.Args[0] - b.Reset(BlockPPC64EQ) v0 := b.NewValue0(v_0.Pos, OpPPC64ANDCCconst, types.TypeFlags) v0.AuxInt = c v0.AddArg(x) - b.AddControl(v0) + b.resetWithControl(BlockPPC64EQ, v0) return true } // match: (EQ (CMPconst [0] z:(AND x y)) yes no) @@ -16122,11 +15063,9 @@ func rewriteBlockPPC64(b *Block) bool { if !(z.Uses == 1) { continue } - b.Reset(BlockPPC64EQ) v0 := b.NewValue0(v_0.Pos, OpPPC64ANDCC, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - b.AddControl(v0) + v0.AddArg2(x, y) + b.resetWithControl(BlockPPC64EQ, v0) return true } break @@ -16152,11 +15091,9 @@ func rewriteBlockPPC64(b *Block) bool { if !(z.Uses == 1) { continue } - b.Reset(BlockPPC64EQ) v0 := b.NewValue0(v_0.Pos, OpPPC64ORCC, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - b.AddControl(v0) + v0.AddArg2(x, y) + b.resetWithControl(BlockPPC64EQ, v0) return true } break @@ -16182,11 +15119,9 @@ func rewriteBlockPPC64(b *Block) bool { if !(z.Uses == 1) { continue } - b.Reset(BlockPPC64EQ) v0 := b.NewValue0(v_0.Pos, OpPPC64XORCC, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - b.AddControl(v0) + v0.AddArg2(x, y) + b.resetWithControl(BlockPPC64EQ, v0) return true } break @@ -16216,8 +15151,7 @@ func rewriteBlockPPC64(b *Block) bool { for b.Controls[0].Op == OpPPC64InvertFlags { v_0 := b.Controls[0] cmp := v_0.Args[0] - b.Reset(BlockPPC64LE) - b.AddControl(cmp) + b.resetWithControl(BlockPPC64LE, cmp) return true } // match: (GE (CMPconst [0] (ANDconst [c] x)) yes no) @@ -16233,11 +15167,10 @@ func rewriteBlockPPC64(b *Block) bool { } c := v_0_0.AuxInt x := v_0_0.Args[0] - b.Reset(BlockPPC64GE) v0 := b.NewValue0(v_0.Pos, OpPPC64ANDCCconst, types.TypeFlags) v0.AuxInt = c v0.AddArg(x) - b.AddControl(v0) + b.resetWithControl(BlockPPC64GE, v0) return true } // match: (GE (CMPWconst [0] (ANDconst [c] x)) yes no) @@ -16253,11 +15186,10 @@ func rewriteBlockPPC64(b *Block) bool { } c := v_0_0.AuxInt x := v_0_0.Args[0] - b.Reset(BlockPPC64GE) v0 := b.NewValue0(v_0.Pos, OpPPC64ANDCCconst, types.TypeFlags) v0.AuxInt = c v0.AddArg(x) - b.AddControl(v0) + b.resetWithControl(BlockPPC64GE, v0) return true } // match: (GE (CMPconst [0] z:(AND x y)) yes no) @@ -16281,11 +15213,9 @@ func rewriteBlockPPC64(b *Block) bool { if !(z.Uses == 1) { continue } - b.Reset(BlockPPC64GE) v0 := b.NewValue0(v_0.Pos, OpPPC64ANDCC, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - b.AddControl(v0) + v0.AddArg2(x, y) + b.resetWithControl(BlockPPC64GE, v0) return true } break @@ -16311,11 +15241,9 @@ func rewriteBlockPPC64(b *Block) bool { if !(z.Uses == 1) { continue } - b.Reset(BlockPPC64GE) v0 := b.NewValue0(v_0.Pos, OpPPC64ORCC, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - b.AddControl(v0) + v0.AddArg2(x, y) + b.resetWithControl(BlockPPC64GE, v0) return true } break @@ -16341,11 +15269,9 @@ func rewriteBlockPPC64(b *Block) bool { if !(z.Uses == 1) { continue } - b.Reset(BlockPPC64GE) v0 := b.NewValue0(v_0.Pos, OpPPC64XORCC, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - b.AddControl(v0) + v0.AddArg2(x, y) + b.resetWithControl(BlockPPC64GE, v0) return true } break @@ -16376,8 +15302,7 @@ func rewriteBlockPPC64(b *Block) bool { for b.Controls[0].Op == OpPPC64InvertFlags { v_0 := b.Controls[0] cmp := v_0.Args[0] - b.Reset(BlockPPC64LT) - b.AddControl(cmp) + b.resetWithControl(BlockPPC64LT, cmp) return true } // match: (GT (CMPconst [0] (ANDconst [c] x)) yes no) @@ -16393,11 +15318,10 @@ func rewriteBlockPPC64(b *Block) bool { } c := v_0_0.AuxInt x := v_0_0.Args[0] - b.Reset(BlockPPC64GT) v0 := b.NewValue0(v_0.Pos, OpPPC64ANDCCconst, types.TypeFlags) v0.AuxInt = c v0.AddArg(x) - b.AddControl(v0) + b.resetWithControl(BlockPPC64GT, v0) return true } // match: (GT (CMPWconst [0] (ANDconst [c] x)) yes no) @@ -16413,11 +15337,10 @@ func rewriteBlockPPC64(b *Block) bool { } c := v_0_0.AuxInt x := v_0_0.Args[0] - b.Reset(BlockPPC64GT) v0 := b.NewValue0(v_0.Pos, OpPPC64ANDCCconst, types.TypeFlags) v0.AuxInt = c v0.AddArg(x) - b.AddControl(v0) + b.resetWithControl(BlockPPC64GT, v0) return true } // match: (GT (CMPconst [0] z:(AND x y)) yes no) @@ -16441,11 +15364,9 @@ func rewriteBlockPPC64(b *Block) bool { if !(z.Uses == 1) { continue } - b.Reset(BlockPPC64GT) v0 := b.NewValue0(v_0.Pos, OpPPC64ANDCC, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - b.AddControl(v0) + v0.AddArg2(x, y) + b.resetWithControl(BlockPPC64GT, v0) return true } break @@ -16471,11 +15392,9 @@ func rewriteBlockPPC64(b *Block) bool { if !(z.Uses == 1) { continue } - b.Reset(BlockPPC64GT) v0 := b.NewValue0(v_0.Pos, OpPPC64ORCC, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - b.AddControl(v0) + v0.AddArg2(x, y) + b.resetWithControl(BlockPPC64GT, v0) return true } break @@ -16501,11 +15420,9 @@ func rewriteBlockPPC64(b *Block) bool { if !(z.Uses == 1) { continue } - b.Reset(BlockPPC64GT) v0 := b.NewValue0(v_0.Pos, OpPPC64XORCC, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - b.AddControl(v0) + v0.AddArg2(x, y) + b.resetWithControl(BlockPPC64GT, v0) return true } break @@ -16516,8 +15433,7 @@ func rewriteBlockPPC64(b *Block) bool { for b.Controls[0].Op == OpPPC64Equal { v_0 := b.Controls[0] cc := v_0.Args[0] - b.Reset(BlockPPC64EQ) - b.AddControl(cc) + b.resetWithControl(BlockPPC64EQ, cc) return true } // match: (If (NotEqual cc) yes no) @@ -16525,8 +15441,7 @@ func rewriteBlockPPC64(b *Block) bool { for b.Controls[0].Op == OpPPC64NotEqual { v_0 := b.Controls[0] cc := v_0.Args[0] - b.Reset(BlockPPC64NE) - b.AddControl(cc) + b.resetWithControl(BlockPPC64NE, cc) return true } // match: (If (LessThan cc) yes no) @@ -16534,8 +15449,7 @@ func rewriteBlockPPC64(b *Block) bool { for b.Controls[0].Op == OpPPC64LessThan { v_0 := b.Controls[0] cc := v_0.Args[0] - b.Reset(BlockPPC64LT) - b.AddControl(cc) + b.resetWithControl(BlockPPC64LT, cc) return true } // match: (If (LessEqual cc) yes no) @@ -16543,8 +15457,7 @@ func rewriteBlockPPC64(b *Block) bool { for b.Controls[0].Op == OpPPC64LessEqual { v_0 := b.Controls[0] cc := v_0.Args[0] - b.Reset(BlockPPC64LE) - b.AddControl(cc) + b.resetWithControl(BlockPPC64LE, cc) return true } // match: (If (GreaterThan cc) yes no) @@ -16552,8 +15465,7 @@ func rewriteBlockPPC64(b *Block) bool { for b.Controls[0].Op == OpPPC64GreaterThan { v_0 := b.Controls[0] cc := v_0.Args[0] - b.Reset(BlockPPC64GT) - b.AddControl(cc) + b.resetWithControl(BlockPPC64GT, cc) return true } // match: (If (GreaterEqual cc) yes no) @@ -16561,8 +15473,7 @@ func rewriteBlockPPC64(b *Block) bool { for b.Controls[0].Op == OpPPC64GreaterEqual { v_0 := b.Controls[0] cc := v_0.Args[0] - b.Reset(BlockPPC64GE) - b.AddControl(cc) + b.resetWithControl(BlockPPC64GE, cc) return true } // match: (If (FLessThan cc) yes no) @@ -16570,8 +15481,7 @@ func rewriteBlockPPC64(b *Block) bool { for b.Controls[0].Op == OpPPC64FLessThan { v_0 := b.Controls[0] cc := v_0.Args[0] - b.Reset(BlockPPC64FLT) - b.AddControl(cc) + b.resetWithControl(BlockPPC64FLT, cc) return true } // match: (If (FLessEqual cc) yes no) @@ -16579,8 +15489,7 @@ func rewriteBlockPPC64(b *Block) bool { for b.Controls[0].Op == OpPPC64FLessEqual { v_0 := b.Controls[0] cc := v_0.Args[0] - b.Reset(BlockPPC64FLE) - b.AddControl(cc) + b.resetWithControl(BlockPPC64FLE, cc) return true } // match: (If (FGreaterThan cc) yes no) @@ -16588,8 +15497,7 @@ func rewriteBlockPPC64(b *Block) bool { for b.Controls[0].Op == OpPPC64FGreaterThan { v_0 := b.Controls[0] cc := v_0.Args[0] - b.Reset(BlockPPC64FGT) - b.AddControl(cc) + b.resetWithControl(BlockPPC64FGT, cc) return true } // match: (If (FGreaterEqual cc) yes no) @@ -16597,19 +15505,17 @@ func rewriteBlockPPC64(b *Block) bool { for b.Controls[0].Op == OpPPC64FGreaterEqual { v_0 := b.Controls[0] cc := v_0.Args[0] - b.Reset(BlockPPC64FGE) - b.AddControl(cc) + b.resetWithControl(BlockPPC64FGE, cc) return true } // match: (If cond yes no) // result: (NE (CMPWconst [0] cond) yes no) for { cond := b.Controls[0] - b.Reset(BlockPPC64NE) v0 := b.NewValue0(cond.Pos, OpPPC64CMPWconst, types.TypeFlags) v0.AuxInt = 0 v0.AddArg(cond) - b.AddControl(v0) + b.resetWithControl(BlockPPC64NE, v0) return true } case BlockPPC64LE: @@ -16637,8 +15543,7 @@ func rewriteBlockPPC64(b *Block) bool { for b.Controls[0].Op == OpPPC64InvertFlags { v_0 := b.Controls[0] cmp := v_0.Args[0] - b.Reset(BlockPPC64GE) - b.AddControl(cmp) + b.resetWithControl(BlockPPC64GE, cmp) return true } // match: (LE (CMPconst [0] (ANDconst [c] x)) yes no) @@ -16654,11 +15559,10 @@ func rewriteBlockPPC64(b *Block) bool { } c := v_0_0.AuxInt x := v_0_0.Args[0] - b.Reset(BlockPPC64LE) v0 := b.NewValue0(v_0.Pos, OpPPC64ANDCCconst, types.TypeFlags) v0.AuxInt = c v0.AddArg(x) - b.AddControl(v0) + b.resetWithControl(BlockPPC64LE, v0) return true } // match: (LE (CMPWconst [0] (ANDconst [c] x)) yes no) @@ -16674,11 +15578,10 @@ func rewriteBlockPPC64(b *Block) bool { } c := v_0_0.AuxInt x := v_0_0.Args[0] - b.Reset(BlockPPC64LE) v0 := b.NewValue0(v_0.Pos, OpPPC64ANDCCconst, types.TypeFlags) v0.AuxInt = c v0.AddArg(x) - b.AddControl(v0) + b.resetWithControl(BlockPPC64LE, v0) return true } // match: (LE (CMPconst [0] z:(AND x y)) yes no) @@ -16702,11 +15605,9 @@ func rewriteBlockPPC64(b *Block) bool { if !(z.Uses == 1) { continue } - b.Reset(BlockPPC64LE) v0 := b.NewValue0(v_0.Pos, OpPPC64ANDCC, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - b.AddControl(v0) + v0.AddArg2(x, y) + b.resetWithControl(BlockPPC64LE, v0) return true } break @@ -16732,11 +15633,9 @@ func rewriteBlockPPC64(b *Block) bool { if !(z.Uses == 1) { continue } - b.Reset(BlockPPC64LE) v0 := b.NewValue0(v_0.Pos, OpPPC64ORCC, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - b.AddControl(v0) + v0.AddArg2(x, y) + b.resetWithControl(BlockPPC64LE, v0) return true } break @@ -16762,11 +15661,9 @@ func rewriteBlockPPC64(b *Block) bool { if !(z.Uses == 1) { continue } - b.Reset(BlockPPC64LE) v0 := b.NewValue0(v_0.Pos, OpPPC64XORCC, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - b.AddControl(v0) + v0.AddArg2(x, y) + b.resetWithControl(BlockPPC64LE, v0) return true } break @@ -16797,8 +15694,7 @@ func rewriteBlockPPC64(b *Block) bool { for b.Controls[0].Op == OpPPC64InvertFlags { v_0 := b.Controls[0] cmp := v_0.Args[0] - b.Reset(BlockPPC64GT) - b.AddControl(cmp) + b.resetWithControl(BlockPPC64GT, cmp) return true } // match: (LT (CMPconst [0] (ANDconst [c] x)) yes no) @@ -16814,11 +15710,10 @@ func rewriteBlockPPC64(b *Block) bool { } c := v_0_0.AuxInt x := v_0_0.Args[0] - b.Reset(BlockPPC64LT) v0 := b.NewValue0(v_0.Pos, OpPPC64ANDCCconst, types.TypeFlags) v0.AuxInt = c v0.AddArg(x) - b.AddControl(v0) + b.resetWithControl(BlockPPC64LT, v0) return true } // match: (LT (CMPWconst [0] (ANDconst [c] x)) yes no) @@ -16834,11 +15729,10 @@ func rewriteBlockPPC64(b *Block) bool { } c := v_0_0.AuxInt x := v_0_0.Args[0] - b.Reset(BlockPPC64LT) v0 := b.NewValue0(v_0.Pos, OpPPC64ANDCCconst, types.TypeFlags) v0.AuxInt = c v0.AddArg(x) - b.AddControl(v0) + b.resetWithControl(BlockPPC64LT, v0) return true } // match: (LT (CMPconst [0] z:(AND x y)) yes no) @@ -16862,11 +15756,9 @@ func rewriteBlockPPC64(b *Block) bool { if !(z.Uses == 1) { continue } - b.Reset(BlockPPC64LT) v0 := b.NewValue0(v_0.Pos, OpPPC64ANDCC, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - b.AddControl(v0) + v0.AddArg2(x, y) + b.resetWithControl(BlockPPC64LT, v0) return true } break @@ -16892,11 +15784,9 @@ func rewriteBlockPPC64(b *Block) bool { if !(z.Uses == 1) { continue } - b.Reset(BlockPPC64LT) v0 := b.NewValue0(v_0.Pos, OpPPC64ORCC, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - b.AddControl(v0) + v0.AddArg2(x, y) + b.resetWithControl(BlockPPC64LT, v0) return true } break @@ -16922,11 +15812,9 @@ func rewriteBlockPPC64(b *Block) bool { if !(z.Uses == 1) { continue } - b.Reset(BlockPPC64LT) v0 := b.NewValue0(v_0.Pos, OpPPC64XORCC, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - b.AddControl(v0) + v0.AddArg2(x, y) + b.resetWithControl(BlockPPC64LT, v0) return true } break @@ -16944,8 +15832,7 @@ func rewriteBlockPPC64(b *Block) bool { break } cc := v_0_0.Args[0] - b.Reset(BlockPPC64EQ) - b.AddControl(cc) + b.resetWithControl(BlockPPC64EQ, cc) return true } // match: (NE (CMPWconst [0] (NotEqual cc)) yes no) @@ -16960,8 +15847,7 @@ func rewriteBlockPPC64(b *Block) bool { break } cc := v_0_0.Args[0] - b.Reset(BlockPPC64NE) - b.AddControl(cc) + b.resetWithControl(BlockPPC64NE, cc) return true } // match: (NE (CMPWconst [0] (LessThan cc)) yes no) @@ -16976,8 +15862,7 @@ func rewriteBlockPPC64(b *Block) bool { break } cc := v_0_0.Args[0] - b.Reset(BlockPPC64LT) - b.AddControl(cc) + b.resetWithControl(BlockPPC64LT, cc) return true } // match: (NE (CMPWconst [0] (LessEqual cc)) yes no) @@ -16992,8 +15877,7 @@ func rewriteBlockPPC64(b *Block) bool { break } cc := v_0_0.Args[0] - b.Reset(BlockPPC64LE) - b.AddControl(cc) + b.resetWithControl(BlockPPC64LE, cc) return true } // match: (NE (CMPWconst [0] (GreaterThan cc)) yes no) @@ -17008,8 +15892,7 @@ func rewriteBlockPPC64(b *Block) bool { break } cc := v_0_0.Args[0] - b.Reset(BlockPPC64GT) - b.AddControl(cc) + b.resetWithControl(BlockPPC64GT, cc) return true } // match: (NE (CMPWconst [0] (GreaterEqual cc)) yes no) @@ -17024,8 +15907,7 @@ func rewriteBlockPPC64(b *Block) bool { break } cc := v_0_0.Args[0] - b.Reset(BlockPPC64GE) - b.AddControl(cc) + b.resetWithControl(BlockPPC64GE, cc) return true } // match: (NE (CMPWconst [0] (FLessThan cc)) yes no) @@ -17040,8 +15922,7 @@ func rewriteBlockPPC64(b *Block) bool { break } cc := v_0_0.Args[0] - b.Reset(BlockPPC64FLT) - b.AddControl(cc) + b.resetWithControl(BlockPPC64FLT, cc) return true } // match: (NE (CMPWconst [0] (FLessEqual cc)) yes no) @@ -17056,8 +15937,7 @@ func rewriteBlockPPC64(b *Block) bool { break } cc := v_0_0.Args[0] - b.Reset(BlockPPC64FLE) - b.AddControl(cc) + b.resetWithControl(BlockPPC64FLE, cc) return true } // match: (NE (CMPWconst [0] (FGreaterThan cc)) yes no) @@ -17072,8 +15952,7 @@ func rewriteBlockPPC64(b *Block) bool { break } cc := v_0_0.Args[0] - b.Reset(BlockPPC64FGT) - b.AddControl(cc) + b.resetWithControl(BlockPPC64FGT, cc) return true } // match: (NE (CMPWconst [0] (FGreaterEqual cc)) yes no) @@ -17088,8 +15967,7 @@ func rewriteBlockPPC64(b *Block) bool { break } cc := v_0_0.Args[0] - b.Reset(BlockPPC64FGE) - b.AddControl(cc) + b.resetWithControl(BlockPPC64FGE, cc) return true } // match: (NE (CMPconst [0] (ANDconst [c] x)) yes no) @@ -17105,11 +15983,10 @@ func rewriteBlockPPC64(b *Block) bool { } c := v_0_0.AuxInt x := v_0_0.Args[0] - b.Reset(BlockPPC64NE) v0 := b.NewValue0(v_0.Pos, OpPPC64ANDCCconst, types.TypeFlags) v0.AuxInt = c v0.AddArg(x) - b.AddControl(v0) + b.resetWithControl(BlockPPC64NE, v0) return true } // match: (NE (CMPWconst [0] (ANDconst [c] x)) yes no) @@ -17125,11 +16002,10 @@ func rewriteBlockPPC64(b *Block) bool { } c := v_0_0.AuxInt x := v_0_0.Args[0] - b.Reset(BlockPPC64NE) v0 := b.NewValue0(v_0.Pos, OpPPC64ANDCCconst, types.TypeFlags) v0.AuxInt = c v0.AddArg(x) - b.AddControl(v0) + b.resetWithControl(BlockPPC64NE, v0) return true } // match: (NE (FlagEQ) yes no) @@ -17156,8 +16032,7 @@ func rewriteBlockPPC64(b *Block) bool { for b.Controls[0].Op == OpPPC64InvertFlags { v_0 := b.Controls[0] cmp := v_0.Args[0] - b.Reset(BlockPPC64NE) - b.AddControl(cmp) + b.resetWithControl(BlockPPC64NE, cmp) return true } // match: (NE (CMPconst [0] (ANDconst [c] x)) yes no) @@ -17173,11 +16048,10 @@ func rewriteBlockPPC64(b *Block) bool { } c := v_0_0.AuxInt x := v_0_0.Args[0] - b.Reset(BlockPPC64NE) v0 := b.NewValue0(v_0.Pos, OpPPC64ANDCCconst, types.TypeFlags) v0.AuxInt = c v0.AddArg(x) - b.AddControl(v0) + b.resetWithControl(BlockPPC64NE, v0) return true } // match: (NE (CMPWconst [0] (ANDconst [c] x)) yes no) @@ -17193,11 +16067,10 @@ func rewriteBlockPPC64(b *Block) bool { } c := v_0_0.AuxInt x := v_0_0.Args[0] - b.Reset(BlockPPC64NE) v0 := b.NewValue0(v_0.Pos, OpPPC64ANDCCconst, types.TypeFlags) v0.AuxInt = c v0.AddArg(x) - b.AddControl(v0) + b.resetWithControl(BlockPPC64NE, v0) return true } // match: (NE (CMPconst [0] z:(AND x y)) yes no) @@ -17221,11 +16094,9 @@ func rewriteBlockPPC64(b *Block) bool { if !(z.Uses == 1) { continue } - b.Reset(BlockPPC64NE) v0 := b.NewValue0(v_0.Pos, OpPPC64ANDCC, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - b.AddControl(v0) + v0.AddArg2(x, y) + b.resetWithControl(BlockPPC64NE, v0) return true } break @@ -17251,11 +16122,9 @@ func rewriteBlockPPC64(b *Block) bool { if !(z.Uses == 1) { continue } - b.Reset(BlockPPC64NE) v0 := b.NewValue0(v_0.Pos, OpPPC64ORCC, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - b.AddControl(v0) + v0.AddArg2(x, y) + b.resetWithControl(BlockPPC64NE, v0) return true } break @@ -17281,11 +16150,9 @@ func rewriteBlockPPC64(b *Block) bool { if !(z.Uses == 1) { continue } - b.Reset(BlockPPC64NE) v0 := b.NewValue0(v_0.Pos, OpPPC64XORCC, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - b.AddControl(v0) + v0.AddArg2(x, y) + b.resetWithControl(BlockPPC64NE, v0) return true } break diff --git a/src/cmd/compile/internal/ssa/rewriteRISCV64.go b/src/cmd/compile/internal/ssa/rewriteRISCV64.go index e4480dc366..676ca52e8e 100644 --- a/src/cmd/compile/internal/ssa/rewriteRISCV64.go +++ b/src/cmd/compile/internal/ssa/rewriteRISCV64.go @@ -114,6 +114,9 @@ func rewriteValueRISCV64(v *Value) bool { case OpCvt64to64F: v.Op = OpRISCV64FCVTDL return true + case OpCvtBoolToUint8: + v.Op = OpCopy + return true case OpDiv16: return rewriteValueRISCV64_OpDiv16(v) case OpDiv16u: @@ -383,24 +386,36 @@ func rewriteValueRISCV64(v *Value) bool { return rewriteValueRISCV64_OpRISCV64MOVBload(v) case OpRISCV64MOVBstore: return rewriteValueRISCV64_OpRISCV64MOVBstore(v) + case OpRISCV64MOVBstorezero: + return rewriteValueRISCV64_OpRISCV64MOVBstorezero(v) case OpRISCV64MOVDconst: return rewriteValueRISCV64_OpRISCV64MOVDconst(v) case OpRISCV64MOVDload: return rewriteValueRISCV64_OpRISCV64MOVDload(v) case OpRISCV64MOVDstore: return rewriteValueRISCV64_OpRISCV64MOVDstore(v) + case OpRISCV64MOVDstorezero: + return rewriteValueRISCV64_OpRISCV64MOVDstorezero(v) case OpRISCV64MOVHUload: return rewriteValueRISCV64_OpRISCV64MOVHUload(v) case OpRISCV64MOVHload: return rewriteValueRISCV64_OpRISCV64MOVHload(v) case OpRISCV64MOVHstore: return rewriteValueRISCV64_OpRISCV64MOVHstore(v) + case OpRISCV64MOVHstorezero: + return rewriteValueRISCV64_OpRISCV64MOVHstorezero(v) case OpRISCV64MOVWUload: return rewriteValueRISCV64_OpRISCV64MOVWUload(v) case OpRISCV64MOVWload: return rewriteValueRISCV64_OpRISCV64MOVWload(v) case OpRISCV64MOVWstore: return rewriteValueRISCV64_OpRISCV64MOVWstore(v) + case OpRISCV64MOVWstorezero: + return rewriteValueRISCV64_OpRISCV64MOVWstorezero(v) + case OpRISCV64SUB: + return rewriteValueRISCV64_OpRISCV64SUB(v) + case OpRISCV64SUBW: + return rewriteValueRISCV64_OpRISCV64SUBW(v) case OpRotateLeft16: return rewriteValueRISCV64_OpRotateLeft16(v) case OpRotateLeft32: @@ -587,19 +602,16 @@ func rewriteValueRISCV64_OpAvg64u(v *Value) bool { v1 := b.NewValue0(v.Pos, OpRISCV64SRLI, t) v1.AuxInt = 1 v1.AddArg(x) - v0.AddArg(v1) v2 := b.NewValue0(v.Pos, OpRISCV64SRLI, t) v2.AuxInt = 1 v2.AddArg(y) - v0.AddArg(v2) - v.AddArg(v0) + v0.AddArg2(v1, v2) v3 := b.NewValue0(v.Pos, OpRISCV64ANDI, t) v3.AuxInt = 1 v4 := b.NewValue0(v.Pos, OpRISCV64AND, t) - v4.AddArg(x) - v4.AddArg(y) + v4.AddArg2(x, y) v3.AddArg(v4) - v.AddArg(v3) + v.AddArg2(v0, v3) return true } } @@ -701,10 +713,9 @@ func rewriteValueRISCV64_OpDiv16(v *Value) bool { v.reset(OpRISCV64DIVW) v0 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32) v1.AddArg(y) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } } @@ -721,10 +732,9 @@ func rewriteValueRISCV64_OpDiv16u(v *Value) bool { v.reset(OpRISCV64DIVUW) v0 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) v1.AddArg(y) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } } @@ -741,10 +751,9 @@ func rewriteValueRISCV64_OpDiv8(v *Value) bool { v.reset(OpRISCV64DIVW) v0 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32) v1.AddArg(y) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } } @@ -761,10 +770,9 @@ func rewriteValueRISCV64_OpDiv8u(v *Value) bool { v.reset(OpRISCV64DIVUW) v0 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) v1.AddArg(y) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } } @@ -781,8 +789,7 @@ func rewriteValueRISCV64_OpEq16(v *Value) bool { v.reset(OpRISCV64SEQZ) v0 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) v1 := b.NewValue0(v.Pos, OpRISCV64SUB, x.Type) - v1.AddArg(x) - v1.AddArg(y) + v1.AddArg2(x, y) v0.AddArg(v1) v.AddArg(v0) return true @@ -799,8 +806,7 @@ func rewriteValueRISCV64_OpEq32(v *Value) bool { y := v_1 v.reset(OpRISCV64SEQZ) v0 := b.NewValue0(v.Pos, OpRISCV64SUBW, x.Type) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -816,8 +822,7 @@ func rewriteValueRISCV64_OpEq64(v *Value) bool { y := v_1 v.reset(OpRISCV64SEQZ) v0 := b.NewValue0(v.Pos, OpRISCV64SUB, x.Type) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -835,8 +840,7 @@ func rewriteValueRISCV64_OpEq8(v *Value) bool { v.reset(OpRISCV64SEQZ) v0 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) v1 := b.NewValue0(v.Pos, OpRISCV64SUB, x.Type) - v1.AddArg(x) - v1.AddArg(y) + v1.AddArg2(x, y) v0.AddArg(v1) v.AddArg(v0) return true @@ -855,8 +859,7 @@ func rewriteValueRISCV64_OpEqB(v *Value) bool { v.reset(OpRISCV64XORI) v.AuxInt = 1 v0 := b.NewValue0(v.Pos, OpRISCV64XOR, typ.Bool) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -872,8 +875,7 @@ func rewriteValueRISCV64_OpEqPtr(v *Value) bool { y := v_1 v.reset(OpRISCV64SEQZ) v0 := b.NewValue0(v.Pos, OpRISCV64SUB, x.Type) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -887,8 +889,7 @@ func rewriteValueRISCV64_OpGeq32F(v *Value) bool { x := v_0 y := v_1 v.reset(OpRISCV64FLES) - v.AddArg(y) - v.AddArg(x) + v.AddArg2(y, x) return true } } @@ -901,8 +902,7 @@ func rewriteValueRISCV64_OpGeq64F(v *Value) bool { x := v_0 y := v_1 v.reset(OpRISCV64FLED) - v.AddArg(y) - v.AddArg(x) + v.AddArg2(y, x) return true } } @@ -915,8 +915,7 @@ func rewriteValueRISCV64_OpGreater32F(v *Value) bool { x := v_0 y := v_1 v.reset(OpRISCV64FLTS) - v.AddArg(y) - v.AddArg(x) + v.AddArg2(y, x) return true } } @@ -929,8 +928,7 @@ func rewriteValueRISCV64_OpGreater64F(v *Value) bool { x := v_0 y := v_1 v.reset(OpRISCV64FLTD) - v.AddArg(y) - v.AddArg(x) + v.AddArg2(y, x) return true } } @@ -949,10 +947,9 @@ func rewriteValueRISCV64_OpHmul32(v *Value) bool { v0 := b.NewValue0(v.Pos, OpRISCV64MUL, typ.Int64) v1 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64) v1.AddArg(x) - v0.AddArg(v1) v2 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64) v2.AddArg(y) - v0.AddArg(v2) + v0.AddArg2(v1, v2) v.AddArg(v0) return true } @@ -972,10 +969,9 @@ func rewriteValueRISCV64_OpHmul32u(v *Value) bool { v0 := b.NewValue0(v.Pos, OpRISCV64MUL, typ.Int64) v1 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) v1.AddArg(x) - v0.AddArg(v1) v2 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) v2.AddArg(y) - v0.AddArg(v2) + v0.AddArg2(v1, v2) v.AddArg(v0) return true } @@ -990,8 +986,7 @@ func rewriteValueRISCV64_OpIsNonNil(v *Value) bool { p := v_0 v.reset(OpNeqPtr) v0 := b.NewValue0(v.Pos, OpRISCV64MOVDconst, typ.UInt64) - v.AddArg(v0) - v.AddArg(p) + v.AddArg2(v0, p) return true } } @@ -1007,8 +1002,7 @@ func rewriteValueRISCV64_OpLeq16(v *Value) bool { y := v_1 v.reset(OpNot) v0 := b.NewValue0(v.Pos, OpLess16, typ.Bool) - v0.AddArg(y) - v0.AddArg(x) + v0.AddArg2(y, x) v.AddArg(v0) return true } @@ -1025,8 +1019,7 @@ func rewriteValueRISCV64_OpLeq16U(v *Value) bool { y := v_1 v.reset(OpNot) v0 := b.NewValue0(v.Pos, OpLess16U, typ.Bool) - v0.AddArg(y) - v0.AddArg(x) + v0.AddArg2(y, x) v.AddArg(v0) return true } @@ -1043,8 +1036,7 @@ func rewriteValueRISCV64_OpLeq32(v *Value) bool { y := v_1 v.reset(OpNot) v0 := b.NewValue0(v.Pos, OpLess32, typ.Bool) - v0.AddArg(y) - v0.AddArg(x) + v0.AddArg2(y, x) v.AddArg(v0) return true } @@ -1061,8 +1053,7 @@ func rewriteValueRISCV64_OpLeq32U(v *Value) bool { y := v_1 v.reset(OpNot) v0 := b.NewValue0(v.Pos, OpLess32U, typ.Bool) - v0.AddArg(y) - v0.AddArg(x) + v0.AddArg2(y, x) v.AddArg(v0) return true } @@ -1079,8 +1070,7 @@ func rewriteValueRISCV64_OpLeq64(v *Value) bool { y := v_1 v.reset(OpNot) v0 := b.NewValue0(v.Pos, OpLess64, typ.Bool) - v0.AddArg(y) - v0.AddArg(x) + v0.AddArg2(y, x) v.AddArg(v0) return true } @@ -1097,8 +1087,7 @@ func rewriteValueRISCV64_OpLeq64U(v *Value) bool { y := v_1 v.reset(OpNot) v0 := b.NewValue0(v.Pos, OpLess64U, typ.Bool) - v0.AddArg(y) - v0.AddArg(x) + v0.AddArg2(y, x) v.AddArg(v0) return true } @@ -1115,8 +1104,7 @@ func rewriteValueRISCV64_OpLeq8(v *Value) bool { y := v_1 v.reset(OpNot) v0 := b.NewValue0(v.Pos, OpLess8, typ.Bool) - v0.AddArg(y) - v0.AddArg(x) + v0.AddArg2(y, x) v.AddArg(v0) return true } @@ -1133,8 +1121,7 @@ func rewriteValueRISCV64_OpLeq8U(v *Value) bool { y := v_1 v.reset(OpNot) v0 := b.NewValue0(v.Pos, OpLess8U, typ.Bool) - v0.AddArg(y) - v0.AddArg(x) + v0.AddArg2(y, x) v.AddArg(v0) return true } @@ -1152,10 +1139,9 @@ func rewriteValueRISCV64_OpLess16(v *Value) bool { v.reset(OpRISCV64SLT) v0 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64) v1.AddArg(y) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } } @@ -1172,10 +1158,9 @@ func rewriteValueRISCV64_OpLess16U(v *Value) bool { v.reset(OpRISCV64SLTU) v0 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) v1.AddArg(y) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } } @@ -1192,10 +1177,9 @@ func rewriteValueRISCV64_OpLess32(v *Value) bool { v.reset(OpRISCV64SLT) v0 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64) v1.AddArg(y) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } } @@ -1212,10 +1196,9 @@ func rewriteValueRISCV64_OpLess32U(v *Value) bool { v.reset(OpRISCV64SLTU) v0 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) v1.AddArg(y) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } } @@ -1232,10 +1215,9 @@ func rewriteValueRISCV64_OpLess8(v *Value) bool { v.reset(OpRISCV64SLT) v0 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64) v1.AddArg(y) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } } @@ -1252,10 +1234,9 @@ func rewriteValueRISCV64_OpLess8U(v *Value) bool { v.reset(OpRISCV64SLTU) v0 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) v1.AddArg(y) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } } @@ -1273,8 +1254,7 @@ func rewriteValueRISCV64_OpLoad(v *Value) bool { break } v.reset(OpRISCV64MOVBUload) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (Load ptr mem) @@ -1288,8 +1268,7 @@ func rewriteValueRISCV64_OpLoad(v *Value) bool { break } v.reset(OpRISCV64MOVBload) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (Load ptr mem) @@ -1303,8 +1282,7 @@ func rewriteValueRISCV64_OpLoad(v *Value) bool { break } v.reset(OpRISCV64MOVBUload) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (Load ptr mem) @@ -1318,8 +1296,7 @@ func rewriteValueRISCV64_OpLoad(v *Value) bool { break } v.reset(OpRISCV64MOVHload) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (Load ptr mem) @@ -1333,8 +1310,7 @@ func rewriteValueRISCV64_OpLoad(v *Value) bool { break } v.reset(OpRISCV64MOVHUload) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (Load ptr mem) @@ -1348,8 +1324,7 @@ func rewriteValueRISCV64_OpLoad(v *Value) bool { break } v.reset(OpRISCV64MOVWload) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (Load ptr mem) @@ -1363,8 +1338,7 @@ func rewriteValueRISCV64_OpLoad(v *Value) bool { break } v.reset(OpRISCV64MOVWUload) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (Load ptr mem) @@ -1378,8 +1352,7 @@ func rewriteValueRISCV64_OpLoad(v *Value) bool { break } v.reset(OpRISCV64MOVDload) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (Load ptr mem) @@ -1393,8 +1366,7 @@ func rewriteValueRISCV64_OpLoad(v *Value) bool { break } v.reset(OpRISCV64FMOVWload) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (Load ptr mem) @@ -1408,8 +1380,7 @@ func rewriteValueRISCV64_OpLoad(v *Value) bool { break } v.reset(OpRISCV64FMOVDload) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } return false @@ -1440,9 +1411,7 @@ func rewriteValueRISCV64_OpLsh16x16(v *Value) bool { y := v_1 v.reset(OpRISCV64AND) v0 := b.NewValue0(v.Pos, OpRISCV64SLL, t) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) + v0.AddArg2(x, y) v1 := b.NewValue0(v.Pos, OpNeg16, t) v2 := b.NewValue0(v.Pos, OpRISCV64SLTIU, t) v2.AuxInt = 64 @@ -1450,7 +1419,7 @@ func rewriteValueRISCV64_OpLsh16x16(v *Value) bool { v3.AddArg(y) v2.AddArg(v3) v1.AddArg(v2) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } } @@ -1467,9 +1436,7 @@ func rewriteValueRISCV64_OpLsh16x32(v *Value) bool { y := v_1 v.reset(OpRISCV64AND) v0 := b.NewValue0(v.Pos, OpRISCV64SLL, t) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) + v0.AddArg2(x, y) v1 := b.NewValue0(v.Pos, OpNeg16, t) v2 := b.NewValue0(v.Pos, OpRISCV64SLTIU, t) v2.AuxInt = 64 @@ -1477,7 +1444,7 @@ func rewriteValueRISCV64_OpLsh16x32(v *Value) bool { v3.AddArg(y) v2.AddArg(v3) v1.AddArg(v2) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } } @@ -1493,15 +1460,13 @@ func rewriteValueRISCV64_OpLsh16x64(v *Value) bool { y := v_1 v.reset(OpRISCV64AND) v0 := b.NewValue0(v.Pos, OpRISCV64SLL, t) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) + v0.AddArg2(x, y) v1 := b.NewValue0(v.Pos, OpNeg16, t) v2 := b.NewValue0(v.Pos, OpRISCV64SLTIU, t) v2.AuxInt = 64 v2.AddArg(y) v1.AddArg(v2) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } } @@ -1518,9 +1483,7 @@ func rewriteValueRISCV64_OpLsh16x8(v *Value) bool { y := v_1 v.reset(OpRISCV64AND) v0 := b.NewValue0(v.Pos, OpRISCV64SLL, t) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) + v0.AddArg2(x, y) v1 := b.NewValue0(v.Pos, OpNeg16, t) v2 := b.NewValue0(v.Pos, OpRISCV64SLTIU, t) v2.AuxInt = 64 @@ -1528,7 +1491,7 @@ func rewriteValueRISCV64_OpLsh16x8(v *Value) bool { v3.AddArg(y) v2.AddArg(v3) v1.AddArg(v2) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } } @@ -1545,9 +1508,7 @@ func rewriteValueRISCV64_OpLsh32x16(v *Value) bool { y := v_1 v.reset(OpRISCV64AND) v0 := b.NewValue0(v.Pos, OpRISCV64SLL, t) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) + v0.AddArg2(x, y) v1 := b.NewValue0(v.Pos, OpNeg32, t) v2 := b.NewValue0(v.Pos, OpRISCV64SLTIU, t) v2.AuxInt = 64 @@ -1555,7 +1516,7 @@ func rewriteValueRISCV64_OpLsh32x16(v *Value) bool { v3.AddArg(y) v2.AddArg(v3) v1.AddArg(v2) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } } @@ -1572,9 +1533,7 @@ func rewriteValueRISCV64_OpLsh32x32(v *Value) bool { y := v_1 v.reset(OpRISCV64AND) v0 := b.NewValue0(v.Pos, OpRISCV64SLL, t) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) + v0.AddArg2(x, y) v1 := b.NewValue0(v.Pos, OpNeg32, t) v2 := b.NewValue0(v.Pos, OpRISCV64SLTIU, t) v2.AuxInt = 64 @@ -1582,7 +1541,7 @@ func rewriteValueRISCV64_OpLsh32x32(v *Value) bool { v3.AddArg(y) v2.AddArg(v3) v1.AddArg(v2) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } } @@ -1598,15 +1557,13 @@ func rewriteValueRISCV64_OpLsh32x64(v *Value) bool { y := v_1 v.reset(OpRISCV64AND) v0 := b.NewValue0(v.Pos, OpRISCV64SLL, t) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) + v0.AddArg2(x, y) v1 := b.NewValue0(v.Pos, OpNeg32, t) v2 := b.NewValue0(v.Pos, OpRISCV64SLTIU, t) v2.AuxInt = 64 v2.AddArg(y) v1.AddArg(v2) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } } @@ -1623,9 +1580,7 @@ func rewriteValueRISCV64_OpLsh32x8(v *Value) bool { y := v_1 v.reset(OpRISCV64AND) v0 := b.NewValue0(v.Pos, OpRISCV64SLL, t) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) + v0.AddArg2(x, y) v1 := b.NewValue0(v.Pos, OpNeg32, t) v2 := b.NewValue0(v.Pos, OpRISCV64SLTIU, t) v2.AuxInt = 64 @@ -1633,7 +1588,7 @@ func rewriteValueRISCV64_OpLsh32x8(v *Value) bool { v3.AddArg(y) v2.AddArg(v3) v1.AddArg(v2) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } } @@ -1650,9 +1605,7 @@ func rewriteValueRISCV64_OpLsh64x16(v *Value) bool { y := v_1 v.reset(OpRISCV64AND) v0 := b.NewValue0(v.Pos, OpRISCV64SLL, t) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) + v0.AddArg2(x, y) v1 := b.NewValue0(v.Pos, OpNeg64, t) v2 := b.NewValue0(v.Pos, OpRISCV64SLTIU, t) v2.AuxInt = 64 @@ -1660,7 +1613,7 @@ func rewriteValueRISCV64_OpLsh64x16(v *Value) bool { v3.AddArg(y) v2.AddArg(v3) v1.AddArg(v2) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } } @@ -1677,9 +1630,7 @@ func rewriteValueRISCV64_OpLsh64x32(v *Value) bool { y := v_1 v.reset(OpRISCV64AND) v0 := b.NewValue0(v.Pos, OpRISCV64SLL, t) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) + v0.AddArg2(x, y) v1 := b.NewValue0(v.Pos, OpNeg64, t) v2 := b.NewValue0(v.Pos, OpRISCV64SLTIU, t) v2.AuxInt = 64 @@ -1687,7 +1638,7 @@ func rewriteValueRISCV64_OpLsh64x32(v *Value) bool { v3.AddArg(y) v2.AddArg(v3) v1.AddArg(v2) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } } @@ -1703,15 +1654,13 @@ func rewriteValueRISCV64_OpLsh64x64(v *Value) bool { y := v_1 v.reset(OpRISCV64AND) v0 := b.NewValue0(v.Pos, OpRISCV64SLL, t) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) + v0.AddArg2(x, y) v1 := b.NewValue0(v.Pos, OpNeg64, t) v2 := b.NewValue0(v.Pos, OpRISCV64SLTIU, t) v2.AuxInt = 64 v2.AddArg(y) v1.AddArg(v2) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } } @@ -1728,9 +1677,7 @@ func rewriteValueRISCV64_OpLsh64x8(v *Value) bool { y := v_1 v.reset(OpRISCV64AND) v0 := b.NewValue0(v.Pos, OpRISCV64SLL, t) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) + v0.AddArg2(x, y) v1 := b.NewValue0(v.Pos, OpNeg64, t) v2 := b.NewValue0(v.Pos, OpRISCV64SLTIU, t) v2.AuxInt = 64 @@ -1738,7 +1685,7 @@ func rewriteValueRISCV64_OpLsh64x8(v *Value) bool { v3.AddArg(y) v2.AddArg(v3) v1.AddArg(v2) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } } @@ -1755,9 +1702,7 @@ func rewriteValueRISCV64_OpLsh8x16(v *Value) bool { y := v_1 v.reset(OpRISCV64AND) v0 := b.NewValue0(v.Pos, OpRISCV64SLL, t) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) + v0.AddArg2(x, y) v1 := b.NewValue0(v.Pos, OpNeg8, t) v2 := b.NewValue0(v.Pos, OpRISCV64SLTIU, t) v2.AuxInt = 64 @@ -1765,7 +1710,7 @@ func rewriteValueRISCV64_OpLsh8x16(v *Value) bool { v3.AddArg(y) v2.AddArg(v3) v1.AddArg(v2) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } } @@ -1782,9 +1727,7 @@ func rewriteValueRISCV64_OpLsh8x32(v *Value) bool { y := v_1 v.reset(OpRISCV64AND) v0 := b.NewValue0(v.Pos, OpRISCV64SLL, t) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) + v0.AddArg2(x, y) v1 := b.NewValue0(v.Pos, OpNeg8, t) v2 := b.NewValue0(v.Pos, OpRISCV64SLTIU, t) v2.AuxInt = 64 @@ -1792,7 +1735,7 @@ func rewriteValueRISCV64_OpLsh8x32(v *Value) bool { v3.AddArg(y) v2.AddArg(v3) v1.AddArg(v2) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } } @@ -1808,15 +1751,13 @@ func rewriteValueRISCV64_OpLsh8x64(v *Value) bool { y := v_1 v.reset(OpRISCV64AND) v0 := b.NewValue0(v.Pos, OpRISCV64SLL, t) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) + v0.AddArg2(x, y) v1 := b.NewValue0(v.Pos, OpNeg8, t) v2 := b.NewValue0(v.Pos, OpRISCV64SLTIU, t) v2.AuxInt = 64 v2.AddArg(y) v1.AddArg(v2) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } } @@ -1833,9 +1774,7 @@ func rewriteValueRISCV64_OpLsh8x8(v *Value) bool { y := v_1 v.reset(OpRISCV64AND) v0 := b.NewValue0(v.Pos, OpRISCV64SLL, t) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) + v0.AddArg2(x, y) v1 := b.NewValue0(v.Pos, OpNeg8, t) v2 := b.NewValue0(v.Pos, OpRISCV64SLTIU, t) v2.AuxInt = 64 @@ -1843,7 +1782,7 @@ func rewriteValueRISCV64_OpLsh8x8(v *Value) bool { v3.AddArg(y) v2.AddArg(v3) v1.AddArg(v2) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } } @@ -1860,10 +1799,9 @@ func rewriteValueRISCV64_OpMod16(v *Value) bool { v.reset(OpRISCV64REMW) v0 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32) v1.AddArg(y) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } } @@ -1880,10 +1818,9 @@ func rewriteValueRISCV64_OpMod16u(v *Value) bool { v.reset(OpRISCV64REMUW) v0 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) v1.AddArg(y) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } } @@ -1900,10 +1837,9 @@ func rewriteValueRISCV64_OpMod8(v *Value) bool { v.reset(OpRISCV64REMW) v0 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32) v1.AddArg(y) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } } @@ -1920,10 +1856,9 @@ func rewriteValueRISCV64_OpMod8u(v *Value) bool { v.reset(OpRISCV64REMUW) v0 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) v1.AddArg(y) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } } @@ -1941,9 +1876,7 @@ func rewriteValueRISCV64_OpMove(v *Value) bool { break } mem := v_2 - v.reset(OpCopy) - v.Type = mem.Type - v.AddArg(mem) + v.copyOf(mem) return true } // match: (Move [1] dst src mem) @@ -1956,12 +1889,9 @@ func rewriteValueRISCV64_OpMove(v *Value) bool { src := v_1 mem := v_2 v.reset(OpRISCV64MOVBstore) - v.AddArg(dst) v0 := b.NewValue0(v.Pos, OpRISCV64MOVBload, typ.Int8) - v0.AddArg(src) - v0.AddArg(mem) - v.AddArg(v0) - v.AddArg(mem) + v0.AddArg2(src, mem) + v.AddArg3(dst, v0, mem) return true } // match: (Move [2] dst src mem) @@ -1974,12 +1904,9 @@ func rewriteValueRISCV64_OpMove(v *Value) bool { src := v_1 mem := v_2 v.reset(OpRISCV64MOVHstore) - v.AddArg(dst) v0 := b.NewValue0(v.Pos, OpRISCV64MOVHload, typ.Int16) - v0.AddArg(src) - v0.AddArg(mem) - v.AddArg(v0) - v.AddArg(mem) + v0.AddArg2(src, mem) + v.AddArg3(dst, v0, mem) return true } // match: (Move [4] dst src mem) @@ -1992,12 +1919,9 @@ func rewriteValueRISCV64_OpMove(v *Value) bool { src := v_1 mem := v_2 v.reset(OpRISCV64MOVWstore) - v.AddArg(dst) v0 := b.NewValue0(v.Pos, OpRISCV64MOVWload, typ.Int32) - v0.AddArg(src) - v0.AddArg(mem) - v.AddArg(v0) - v.AddArg(mem) + v0.AddArg2(src, mem) + v.AddArg3(dst, v0, mem) return true } // match: (Move [8] dst src mem) @@ -2010,12 +1934,9 @@ func rewriteValueRISCV64_OpMove(v *Value) bool { src := v_1 mem := v_2 v.reset(OpRISCV64MOVDstore) - v.AddArg(dst) v0 := b.NewValue0(v.Pos, OpRISCV64MOVDload, typ.Int64) - v0.AddArg(src) - v0.AddArg(mem) - v.AddArg(v0) - v.AddArg(mem) + v0.AddArg2(src, mem) + v.AddArg3(dst, v0, mem) return true } // match: (Move [s] {t} dst src mem) @@ -2028,13 +1949,10 @@ func rewriteValueRISCV64_OpMove(v *Value) bool { mem := v_2 v.reset(OpRISCV64LoweredMove) v.AuxInt = t.(*types.Type).Alignment() - v.AddArg(dst) - v.AddArg(src) v0 := b.NewValue0(v.Pos, OpRISCV64ADDI, src.Type) v0.AuxInt = s - moveSize(t.(*types.Type).Alignment(), config) v0.AddArg(src) - v.AddArg(v0) - v.AddArg(mem) + v.AddArg4(dst, src, v0, mem) return true } } @@ -2051,10 +1969,9 @@ func rewriteValueRISCV64_OpMul16(v *Value) bool { v.reset(OpRISCV64MULW) v0 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32) v1.AddArg(y) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } } @@ -2071,10 +1988,9 @@ func rewriteValueRISCV64_OpMul8(v *Value) bool { v.reset(OpRISCV64MULW) v0 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32) v1.AddArg(y) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } } @@ -2088,8 +2004,7 @@ func rewriteValueRISCV64_OpNeg16(v *Value) bool { x := v_0 v.reset(OpRISCV64SUB) v0 := b.NewValue0(v.Pos, OpRISCV64MOVHconst, typ.UInt16) - v.AddArg(v0) - v.AddArg(x) + v.AddArg2(v0, x) return true } } @@ -2103,8 +2018,7 @@ func rewriteValueRISCV64_OpNeg32(v *Value) bool { x := v_0 v.reset(OpRISCV64SUB) v0 := b.NewValue0(v.Pos, OpRISCV64MOVWconst, typ.UInt32) - v.AddArg(v0) - v.AddArg(x) + v.AddArg2(v0, x) return true } } @@ -2118,8 +2032,7 @@ func rewriteValueRISCV64_OpNeg64(v *Value) bool { x := v_0 v.reset(OpRISCV64SUB) v0 := b.NewValue0(v.Pos, OpRISCV64MOVDconst, typ.UInt64) - v.AddArg(v0) - v.AddArg(x) + v.AddArg2(v0, x) return true } } @@ -2133,8 +2046,7 @@ func rewriteValueRISCV64_OpNeg8(v *Value) bool { x := v_0 v.reset(OpRISCV64SUB) v0 := b.NewValue0(v.Pos, OpRISCV64MOVBconst, typ.UInt8) - v.AddArg(v0) - v.AddArg(x) + v.AddArg2(v0, x) return true } } @@ -2151,8 +2063,7 @@ func rewriteValueRISCV64_OpNeq16(v *Value) bool { v.reset(OpRISCV64SNEZ) v0 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) v1 := b.NewValue0(v.Pos, OpRISCV64SUB, x.Type) - v1.AddArg(x) - v1.AddArg(y) + v1.AddArg2(x, y) v0.AddArg(v1) v.AddArg(v0) return true @@ -2169,8 +2080,7 @@ func rewriteValueRISCV64_OpNeq32(v *Value) bool { y := v_1 v.reset(OpRISCV64SNEZ) v0 := b.NewValue0(v.Pos, OpRISCV64SUBW, x.Type) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -2186,8 +2096,7 @@ func rewriteValueRISCV64_OpNeq64(v *Value) bool { y := v_1 v.reset(OpRISCV64SNEZ) v0 := b.NewValue0(v.Pos, OpRISCV64SUB, x.Type) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -2205,8 +2114,7 @@ func rewriteValueRISCV64_OpNeq8(v *Value) bool { v.reset(OpRISCV64SNEZ) v0 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) v1 := b.NewValue0(v.Pos, OpRISCV64SUB, x.Type) - v1.AddArg(x) - v1.AddArg(y) + v1.AddArg2(x, y) v0.AddArg(v1) v.AddArg(v0) return true @@ -2223,8 +2131,7 @@ func rewriteValueRISCV64_OpNeqPtr(v *Value) bool { y := v_1 v.reset(OpRISCV64SNEZ) v0 := b.NewValue0(v.Pos, OpRISCV64SUB, x.Type) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -2280,8 +2187,7 @@ func rewriteValueRISCV64_OpOffPtr(v *Value) bool { v.reset(OpRISCV64ADD) v0 := b.NewValue0(v.Pos, OpRISCV64MOVDconst, typ.UInt64) v0.AuxInt = off - v.AddArg(v0) - v.AddArg(ptr) + v.AddArg2(v0, ptr) return true } } @@ -2302,9 +2208,7 @@ func rewriteValueRISCV64_OpPanicBounds(v *Value) bool { } v.reset(OpRISCV64LoweredPanicBoundsA) v.AuxInt = kind - v.AddArg(x) - v.AddArg(y) - v.AddArg(mem) + v.AddArg3(x, y, mem) return true } // match: (PanicBounds [kind] x y mem) @@ -2320,9 +2224,7 @@ func rewriteValueRISCV64_OpPanicBounds(v *Value) bool { } v.reset(OpRISCV64LoweredPanicBoundsB) v.AuxInt = kind - v.AddArg(x) - v.AddArg(y) - v.AddArg(mem) + v.AddArg3(x, y, mem) return true } // match: (PanicBounds [kind] x y mem) @@ -2338,9 +2240,7 @@ func rewriteValueRISCV64_OpPanicBounds(v *Value) bool { } v.reset(OpRISCV64LoweredPanicBoundsC) v.AuxInt = kind - v.AddArg(x) - v.AddArg(y) - v.AddArg(mem) + v.AddArg3(x, y, mem) return true } return false @@ -2399,9 +2299,7 @@ func rewriteValueRISCV64_OpRISCV64ADDI(v *Value) bool { break } x := v_0 - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } return false @@ -2428,8 +2326,7 @@ func rewriteValueRISCV64_OpRISCV64MOVBUload(v *Value) bool { v.reset(OpRISCV64MOVBUload) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(base) - v.AddArg(mem) + v.AddArg2(base, mem) return true } // match: (MOVBUload [off1] {sym} (ADDI [off2] base) mem) @@ -2450,8 +2347,7 @@ func rewriteValueRISCV64_OpRISCV64MOVBUload(v *Value) bool { v.reset(OpRISCV64MOVBUload) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(base) - v.AddArg(mem) + v.AddArg2(base, mem) return true } return false @@ -2478,8 +2374,7 @@ func rewriteValueRISCV64_OpRISCV64MOVBload(v *Value) bool { v.reset(OpRISCV64MOVBload) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(base) - v.AddArg(mem) + v.AddArg2(base, mem) return true } // match: (MOVBload [off1] {sym} (ADDI [off2] base) mem) @@ -2500,8 +2395,7 @@ func rewriteValueRISCV64_OpRISCV64MOVBload(v *Value) bool { v.reset(OpRISCV64MOVBload) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(base) - v.AddArg(mem) + v.AddArg2(base, mem) return true } return false @@ -2530,9 +2424,7 @@ func rewriteValueRISCV64_OpRISCV64MOVBstore(v *Value) bool { v.reset(OpRISCV64MOVBstore) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(base) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(base, val, mem) return true } // match: (MOVBstore [off1] {sym} (ADDI [off2] base) val mem) @@ -2554,9 +2446,71 @@ func rewriteValueRISCV64_OpRISCV64MOVBstore(v *Value) bool { v.reset(OpRISCV64MOVBstore) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(base) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(base, val, mem) + return true + } + // match: (MOVBstore [off] {sym} ptr (MOVBconst [0]) mem) + // result: (MOVBstorezero [off] {sym} ptr mem) + for { + off := v.AuxInt + sym := v.Aux + ptr := v_0 + if v_1.Op != OpRISCV64MOVBconst || v_1.AuxInt != 0 { + break + } + mem := v_2 + v.reset(OpRISCV64MOVBstorezero) + v.AuxInt = off + v.Aux = sym + v.AddArg2(ptr, mem) + return true + } + return false +} +func rewriteValueRISCV64_OpRISCV64MOVBstorezero(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (MOVBstorezero [off1] {sym1} (MOVaddr [off2] {sym2} ptr) mem) + // cond: canMergeSym(sym1,sym2) && is32Bit(off1+off2) + // result: (MOVBstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem) + for { + off1 := v.AuxInt + sym1 := v.Aux + if v_0.Op != OpRISCV64MOVaddr { + break + } + off2 := v_0.AuxInt + sym2 := v_0.Aux + ptr := v_0.Args[0] + mem := v_1 + if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2)) { + break + } + v.reset(OpRISCV64MOVBstorezero) + v.AuxInt = off1 + off2 + v.Aux = mergeSym(sym1, sym2) + v.AddArg2(ptr, mem) + return true + } + // match: (MOVBstorezero [off1] {sym} (ADDI [off2] ptr) mem) + // cond: is32Bit(off1+off2) + // result: (MOVBstorezero [off1+off2] {sym} ptr mem) + for { + off1 := v.AuxInt + sym := v.Aux + if v_0.Op != OpRISCV64ADDI { + break + } + off2 := v_0.AuxInt + ptr := v_0.Args[0] + mem := v_1 + if !(is32Bit(off1 + off2)) { + break + } + v.reset(OpRISCV64MOVBstorezero) + v.AuxInt = off1 + off2 + v.Aux = sym + v.AddArg2(ptr, mem) return true } return false @@ -2579,10 +2533,9 @@ func rewriteValueRISCV64_OpRISCV64MOVDconst(v *Value) bool { v1 := b.NewValue0(v.Pos, OpRISCV64MOVDconst, typ.UInt64) v1.AuxInt = c>>32 + 1 v0.AddArg(v1) - v.AddArg(v0) v2 := b.NewValue0(v.Pos, OpRISCV64MOVDconst, typ.UInt64) v2.AuxInt = int64(int32(c)) - v.AddArg(v2) + v.AddArg2(v0, v2) return true } // match: (MOVDconst [c]) @@ -2600,10 +2553,9 @@ func rewriteValueRISCV64_OpRISCV64MOVDconst(v *Value) bool { v1 := b.NewValue0(v.Pos, OpRISCV64MOVDconst, typ.UInt64) v1.AuxInt = c>>32 + 0 v0.AddArg(v1) - v.AddArg(v0) v2 := b.NewValue0(v.Pos, OpRISCV64MOVDconst, typ.UInt64) v2.AuxInt = int64(int32(c)) - v.AddArg(v2) + v.AddArg2(v0, v2) return true } return false @@ -2630,8 +2582,7 @@ func rewriteValueRISCV64_OpRISCV64MOVDload(v *Value) bool { v.reset(OpRISCV64MOVDload) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(base) - v.AddArg(mem) + v.AddArg2(base, mem) return true } // match: (MOVDload [off1] {sym} (ADDI [off2] base) mem) @@ -2652,8 +2603,7 @@ func rewriteValueRISCV64_OpRISCV64MOVDload(v *Value) bool { v.reset(OpRISCV64MOVDload) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(base) - v.AddArg(mem) + v.AddArg2(base, mem) return true } return false @@ -2682,9 +2632,7 @@ func rewriteValueRISCV64_OpRISCV64MOVDstore(v *Value) bool { v.reset(OpRISCV64MOVDstore) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(base) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(base, val, mem) return true } // match: (MOVDstore [off1] {sym} (ADDI [off2] base) val mem) @@ -2706,9 +2654,71 @@ func rewriteValueRISCV64_OpRISCV64MOVDstore(v *Value) bool { v.reset(OpRISCV64MOVDstore) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(base) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(base, val, mem) + return true + } + // match: (MOVDstore [off] {sym} ptr (MOVDconst [0]) mem) + // result: (MOVDstorezero [off] {sym} ptr mem) + for { + off := v.AuxInt + sym := v.Aux + ptr := v_0 + if v_1.Op != OpRISCV64MOVDconst || v_1.AuxInt != 0 { + break + } + mem := v_2 + v.reset(OpRISCV64MOVDstorezero) + v.AuxInt = off + v.Aux = sym + v.AddArg2(ptr, mem) + return true + } + return false +} +func rewriteValueRISCV64_OpRISCV64MOVDstorezero(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (MOVDstorezero [off1] {sym1} (MOVaddr [off2] {sym2} ptr) mem) + // cond: canMergeSym(sym1,sym2) && is32Bit(off1+off2) + // result: (MOVDstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem) + for { + off1 := v.AuxInt + sym1 := v.Aux + if v_0.Op != OpRISCV64MOVaddr { + break + } + off2 := v_0.AuxInt + sym2 := v_0.Aux + ptr := v_0.Args[0] + mem := v_1 + if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2)) { + break + } + v.reset(OpRISCV64MOVDstorezero) + v.AuxInt = off1 + off2 + v.Aux = mergeSym(sym1, sym2) + v.AddArg2(ptr, mem) + return true + } + // match: (MOVDstorezero [off1] {sym} (ADDI [off2] ptr) mem) + // cond: is32Bit(off1+off2) + // result: (MOVDstorezero [off1+off2] {sym} ptr mem) + for { + off1 := v.AuxInt + sym := v.Aux + if v_0.Op != OpRISCV64ADDI { + break + } + off2 := v_0.AuxInt + ptr := v_0.Args[0] + mem := v_1 + if !(is32Bit(off1 + off2)) { + break + } + v.reset(OpRISCV64MOVDstorezero) + v.AuxInt = off1 + off2 + v.Aux = sym + v.AddArg2(ptr, mem) return true } return false @@ -2735,8 +2745,7 @@ func rewriteValueRISCV64_OpRISCV64MOVHUload(v *Value) bool { v.reset(OpRISCV64MOVHUload) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(base) - v.AddArg(mem) + v.AddArg2(base, mem) return true } // match: (MOVHUload [off1] {sym} (ADDI [off2] base) mem) @@ -2757,8 +2766,7 @@ func rewriteValueRISCV64_OpRISCV64MOVHUload(v *Value) bool { v.reset(OpRISCV64MOVHUload) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(base) - v.AddArg(mem) + v.AddArg2(base, mem) return true } return false @@ -2785,8 +2793,7 @@ func rewriteValueRISCV64_OpRISCV64MOVHload(v *Value) bool { v.reset(OpRISCV64MOVHload) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(base) - v.AddArg(mem) + v.AddArg2(base, mem) return true } // match: (MOVHload [off1] {sym} (ADDI [off2] base) mem) @@ -2807,8 +2814,7 @@ func rewriteValueRISCV64_OpRISCV64MOVHload(v *Value) bool { v.reset(OpRISCV64MOVHload) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(base) - v.AddArg(mem) + v.AddArg2(base, mem) return true } return false @@ -2837,9 +2843,7 @@ func rewriteValueRISCV64_OpRISCV64MOVHstore(v *Value) bool { v.reset(OpRISCV64MOVHstore) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(base) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(base, val, mem) return true } // match: (MOVHstore [off1] {sym} (ADDI [off2] base) val mem) @@ -2861,9 +2865,71 @@ func rewriteValueRISCV64_OpRISCV64MOVHstore(v *Value) bool { v.reset(OpRISCV64MOVHstore) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(base) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(base, val, mem) + return true + } + // match: (MOVHstore [off] {sym} ptr (MOVHconst [0]) mem) + // result: (MOVHstorezero [off] {sym} ptr mem) + for { + off := v.AuxInt + sym := v.Aux + ptr := v_0 + if v_1.Op != OpRISCV64MOVHconst || v_1.AuxInt != 0 { + break + } + mem := v_2 + v.reset(OpRISCV64MOVHstorezero) + v.AuxInt = off + v.Aux = sym + v.AddArg2(ptr, mem) + return true + } + return false +} +func rewriteValueRISCV64_OpRISCV64MOVHstorezero(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (MOVHstorezero [off1] {sym1} (MOVaddr [off2] {sym2} ptr) mem) + // cond: canMergeSym(sym1,sym2) && is32Bit(off1+off2) + // result: (MOVHstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem) + for { + off1 := v.AuxInt + sym1 := v.Aux + if v_0.Op != OpRISCV64MOVaddr { + break + } + off2 := v_0.AuxInt + sym2 := v_0.Aux + ptr := v_0.Args[0] + mem := v_1 + if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2)) { + break + } + v.reset(OpRISCV64MOVHstorezero) + v.AuxInt = off1 + off2 + v.Aux = mergeSym(sym1, sym2) + v.AddArg2(ptr, mem) + return true + } + // match: (MOVHstorezero [off1] {sym} (ADDI [off2] ptr) mem) + // cond: is32Bit(off1+off2) + // result: (MOVHstorezero [off1+off2] {sym} ptr mem) + for { + off1 := v.AuxInt + sym := v.Aux + if v_0.Op != OpRISCV64ADDI { + break + } + off2 := v_0.AuxInt + ptr := v_0.Args[0] + mem := v_1 + if !(is32Bit(off1 + off2)) { + break + } + v.reset(OpRISCV64MOVHstorezero) + v.AuxInt = off1 + off2 + v.Aux = sym + v.AddArg2(ptr, mem) return true } return false @@ -2890,8 +2956,7 @@ func rewriteValueRISCV64_OpRISCV64MOVWUload(v *Value) bool { v.reset(OpRISCV64MOVWUload) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(base) - v.AddArg(mem) + v.AddArg2(base, mem) return true } // match: (MOVWUload [off1] {sym} (ADDI [off2] base) mem) @@ -2912,8 +2977,7 @@ func rewriteValueRISCV64_OpRISCV64MOVWUload(v *Value) bool { v.reset(OpRISCV64MOVWUload) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(base) - v.AddArg(mem) + v.AddArg2(base, mem) return true } return false @@ -2940,8 +3004,7 @@ func rewriteValueRISCV64_OpRISCV64MOVWload(v *Value) bool { v.reset(OpRISCV64MOVWload) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(base) - v.AddArg(mem) + v.AddArg2(base, mem) return true } // match: (MOVWload [off1] {sym} (ADDI [off2] base) mem) @@ -2962,8 +3025,7 @@ func rewriteValueRISCV64_OpRISCV64MOVWload(v *Value) bool { v.reset(OpRISCV64MOVWload) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(base) - v.AddArg(mem) + v.AddArg2(base, mem) return true } return false @@ -2992,9 +3054,7 @@ func rewriteValueRISCV64_OpRISCV64MOVWstore(v *Value) bool { v.reset(OpRISCV64MOVWstore) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(base) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(base, val, mem) return true } // match: (MOVWstore [off1] {sym} (ADDI [off2] base) val mem) @@ -3016,9 +3076,201 @@ func rewriteValueRISCV64_OpRISCV64MOVWstore(v *Value) bool { v.reset(OpRISCV64MOVWstore) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(base) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(base, val, mem) + return true + } + // match: (MOVWstore [off] {sym} ptr (MOVWconst [0]) mem) + // result: (MOVWstorezero [off] {sym} ptr mem) + for { + off := v.AuxInt + sym := v.Aux + ptr := v_0 + if v_1.Op != OpRISCV64MOVWconst || v_1.AuxInt != 0 { + break + } + mem := v_2 + v.reset(OpRISCV64MOVWstorezero) + v.AuxInt = off + v.Aux = sym + v.AddArg2(ptr, mem) + return true + } + return false +} +func rewriteValueRISCV64_OpRISCV64MOVWstorezero(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (MOVWstorezero [off1] {sym1} (MOVaddr [off2] {sym2} ptr) mem) + // cond: canMergeSym(sym1,sym2) && is32Bit(off1+off2) + // result: (MOVWstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem) + for { + off1 := v.AuxInt + sym1 := v.Aux + if v_0.Op != OpRISCV64MOVaddr { + break + } + off2 := v_0.AuxInt + sym2 := v_0.Aux + ptr := v_0.Args[0] + mem := v_1 + if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2)) { + break + } + v.reset(OpRISCV64MOVWstorezero) + v.AuxInt = off1 + off2 + v.Aux = mergeSym(sym1, sym2) + v.AddArg2(ptr, mem) + return true + } + // match: (MOVWstorezero [off1] {sym} (ADDI [off2] ptr) mem) + // cond: is32Bit(off1+off2) + // result: (MOVWstorezero [off1+off2] {sym} ptr mem) + for { + off1 := v.AuxInt + sym := v.Aux + if v_0.Op != OpRISCV64ADDI { + break + } + off2 := v_0.AuxInt + ptr := v_0.Args[0] + mem := v_1 + if !(is32Bit(off1 + off2)) { + break + } + v.reset(OpRISCV64MOVWstorezero) + v.AuxInt = off1 + off2 + v.Aux = sym + v.AddArg2(ptr, mem) + return true + } + return false +} +func rewriteValueRISCV64_OpRISCV64SUB(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (SUB x (MOVBconst [val])) + // cond: is32Bit(-val) + // result: (ADDI [-val] x) + for { + x := v_0 + if v_1.Op != OpRISCV64MOVBconst { + break + } + val := v_1.AuxInt + if !(is32Bit(-val)) { + break + } + v.reset(OpRISCV64ADDI) + v.AuxInt = -val + v.AddArg(x) + return true + } + // match: (SUB x (MOVHconst [val])) + // cond: is32Bit(-val) + // result: (ADDI [-val] x) + for { + x := v_0 + if v_1.Op != OpRISCV64MOVHconst { + break + } + val := v_1.AuxInt + if !(is32Bit(-val)) { + break + } + v.reset(OpRISCV64ADDI) + v.AuxInt = -val + v.AddArg(x) + return true + } + // match: (SUB x (MOVWconst [val])) + // cond: is32Bit(-val) + // result: (ADDI [-val] x) + for { + x := v_0 + if v_1.Op != OpRISCV64MOVWconst { + break + } + val := v_1.AuxInt + if !(is32Bit(-val)) { + break + } + v.reset(OpRISCV64ADDI) + v.AuxInt = -val + v.AddArg(x) + return true + } + // match: (SUB x (MOVDconst [val])) + // cond: is32Bit(-val) + // result: (ADDI [-val] x) + for { + x := v_0 + if v_1.Op != OpRISCV64MOVDconst { + break + } + val := v_1.AuxInt + if !(is32Bit(-val)) { + break + } + v.reset(OpRISCV64ADDI) + v.AuxInt = -val + v.AddArg(x) + return true + } + // match: (SUB x (MOVBconst [0])) + // result: x + for { + x := v_0 + if v_1.Op != OpRISCV64MOVBconst || v_1.AuxInt != 0 { + break + } + v.copyOf(x) + return true + } + // match: (SUB x (MOVHconst [0])) + // result: x + for { + x := v_0 + if v_1.Op != OpRISCV64MOVHconst || v_1.AuxInt != 0 { + break + } + v.copyOf(x) + return true + } + // match: (SUB x (MOVWconst [0])) + // result: x + for { + x := v_0 + if v_1.Op != OpRISCV64MOVWconst || v_1.AuxInt != 0 { + break + } + v.copyOf(x) + return true + } + // match: (SUB x (MOVDconst [0])) + // result: x + for { + x := v_0 + if v_1.Op != OpRISCV64MOVDconst || v_1.AuxInt != 0 { + break + } + v.copyOf(x) + return true + } + return false +} +func rewriteValueRISCV64_OpRISCV64SUBW(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (SUBW x (MOVWconst [0])) + // result: (ADDIW [0] x) + for { + x := v_0 + if v_1.Op != OpRISCV64MOVWconst || v_1.AuxInt != 0 { + break + } + v.reset(OpRISCV64ADDIW) + v.AuxInt = 0 + v.AddArg(x) return true } return false @@ -3039,17 +3291,14 @@ func rewriteValueRISCV64_OpRotateLeft16(v *Value) bool { c := v_1.AuxInt v.reset(OpOr16) v0 := b.NewValue0(v.Pos, OpLsh16x64, t) - v0.AddArg(x) v1 := b.NewValue0(v.Pos, OpRISCV64MOVHconst, typ.UInt16) v1.AuxInt = c & 15 - v0.AddArg(v1) - v.AddArg(v0) + v0.AddArg2(x, v1) v2 := b.NewValue0(v.Pos, OpRsh16Ux64, t) - v2.AddArg(x) v3 := b.NewValue0(v.Pos, OpRISCV64MOVHconst, typ.UInt16) v3.AuxInt = -c & 15 - v2.AddArg(v3) - v.AddArg(v2) + v2.AddArg2(x, v3) + v.AddArg2(v0, v2) return true } return false @@ -3070,17 +3319,14 @@ func rewriteValueRISCV64_OpRotateLeft32(v *Value) bool { c := v_1.AuxInt v.reset(OpOr32) v0 := b.NewValue0(v.Pos, OpLsh32x64, t) - v0.AddArg(x) v1 := b.NewValue0(v.Pos, OpRISCV64MOVWconst, typ.UInt32) v1.AuxInt = c & 31 - v0.AddArg(v1) - v.AddArg(v0) + v0.AddArg2(x, v1) v2 := b.NewValue0(v.Pos, OpRsh32Ux64, t) - v2.AddArg(x) v3 := b.NewValue0(v.Pos, OpRISCV64MOVWconst, typ.UInt32) v3.AuxInt = -c & 31 - v2.AddArg(v3) - v.AddArg(v2) + v2.AddArg2(x, v3) + v.AddArg2(v0, v2) return true } return false @@ -3101,17 +3347,14 @@ func rewriteValueRISCV64_OpRotateLeft64(v *Value) bool { c := v_1.AuxInt v.reset(OpOr64) v0 := b.NewValue0(v.Pos, OpLsh64x64, t) - v0.AddArg(x) v1 := b.NewValue0(v.Pos, OpRISCV64MOVDconst, typ.UInt64) v1.AuxInt = c & 63 - v0.AddArg(v1) - v.AddArg(v0) + v0.AddArg2(x, v1) v2 := b.NewValue0(v.Pos, OpRsh64Ux64, t) - v2.AddArg(x) v3 := b.NewValue0(v.Pos, OpRISCV64MOVDconst, typ.UInt64) v3.AuxInt = -c & 63 - v2.AddArg(v3) - v.AddArg(v2) + v2.AddArg2(x, v3) + v.AddArg2(v0, v2) return true } return false @@ -3132,17 +3375,14 @@ func rewriteValueRISCV64_OpRotateLeft8(v *Value) bool { c := v_1.AuxInt v.reset(OpOr8) v0 := b.NewValue0(v.Pos, OpLsh8x64, t) - v0.AddArg(x) v1 := b.NewValue0(v.Pos, OpRISCV64MOVBconst, typ.UInt8) v1.AuxInt = c & 7 - v0.AddArg(v1) - v.AddArg(v0) + v0.AddArg2(x, v1) v2 := b.NewValue0(v.Pos, OpRsh8Ux64, t) - v2.AddArg(x) v3 := b.NewValue0(v.Pos, OpRISCV64MOVBconst, typ.UInt8) v3.AuxInt = -c & 7 - v2.AddArg(v3) - v.AddArg(v2) + v2.AddArg2(x, v3) + v.AddArg2(v0, v2) return true } return false @@ -3162,9 +3402,7 @@ func rewriteValueRISCV64_OpRsh16Ux16(v *Value) bool { v0 := b.NewValue0(v.Pos, OpRISCV64SRL, t) v1 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) v1.AddArg(x) - v0.AddArg(v1) - v0.AddArg(y) - v.AddArg(v0) + v0.AddArg2(v1, y) v2 := b.NewValue0(v.Pos, OpNeg16, t) v3 := b.NewValue0(v.Pos, OpRISCV64SLTIU, t) v3.AuxInt = 64 @@ -3172,7 +3410,7 @@ func rewriteValueRISCV64_OpRsh16Ux16(v *Value) bool { v4.AddArg(y) v3.AddArg(v4) v2.AddArg(v3) - v.AddArg(v2) + v.AddArg2(v0, v2) return true } } @@ -3191,9 +3429,7 @@ func rewriteValueRISCV64_OpRsh16Ux32(v *Value) bool { v0 := b.NewValue0(v.Pos, OpRISCV64SRL, t) v1 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) v1.AddArg(x) - v0.AddArg(v1) - v0.AddArg(y) - v.AddArg(v0) + v0.AddArg2(v1, y) v2 := b.NewValue0(v.Pos, OpNeg16, t) v3 := b.NewValue0(v.Pos, OpRISCV64SLTIU, t) v3.AuxInt = 64 @@ -3201,7 +3437,7 @@ func rewriteValueRISCV64_OpRsh16Ux32(v *Value) bool { v4.AddArg(y) v3.AddArg(v4) v2.AddArg(v3) - v.AddArg(v2) + v.AddArg2(v0, v2) return true } } @@ -3220,15 +3456,13 @@ func rewriteValueRISCV64_OpRsh16Ux64(v *Value) bool { v0 := b.NewValue0(v.Pos, OpRISCV64SRL, t) v1 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) v1.AddArg(x) - v0.AddArg(v1) - v0.AddArg(y) - v.AddArg(v0) + v0.AddArg2(v1, y) v2 := b.NewValue0(v.Pos, OpNeg16, t) v3 := b.NewValue0(v.Pos, OpRISCV64SLTIU, t) v3.AuxInt = 64 v3.AddArg(y) v2.AddArg(v3) - v.AddArg(v2) + v.AddArg2(v0, v2) return true } } @@ -3247,9 +3481,7 @@ func rewriteValueRISCV64_OpRsh16Ux8(v *Value) bool { v0 := b.NewValue0(v.Pos, OpRISCV64SRL, t) v1 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) v1.AddArg(x) - v0.AddArg(v1) - v0.AddArg(y) - v.AddArg(v0) + v0.AddArg2(v1, y) v2 := b.NewValue0(v.Pos, OpNeg16, t) v3 := b.NewValue0(v.Pos, OpRISCV64SLTIU, t) v3.AuxInt = 64 @@ -3257,7 +3489,7 @@ func rewriteValueRISCV64_OpRsh16Ux8(v *Value) bool { v4.AddArg(y) v3.AddArg(v4) v2.AddArg(v3) - v.AddArg(v2) + v.AddArg2(v0, v2) return true } } @@ -3276,9 +3508,7 @@ func rewriteValueRISCV64_OpRsh16x16(v *Value) bool { v.Type = t v0 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpRISCV64OR, y.Type) - v1.AddArg(y) v2 := b.NewValue0(v.Pos, OpRISCV64ADDI, y.Type) v2.AuxInt = -1 v3 := b.NewValue0(v.Pos, OpRISCV64SLTIU, y.Type) @@ -3287,8 +3517,8 @@ func rewriteValueRISCV64_OpRsh16x16(v *Value) bool { v4.AddArg(y) v3.AddArg(v4) v2.AddArg(v3) - v1.AddArg(v2) - v.AddArg(v1) + v1.AddArg2(y, v2) + v.AddArg2(v0, v1) return true } } @@ -3307,9 +3537,7 @@ func rewriteValueRISCV64_OpRsh16x32(v *Value) bool { v.Type = t v0 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpRISCV64OR, y.Type) - v1.AddArg(y) v2 := b.NewValue0(v.Pos, OpRISCV64ADDI, y.Type) v2.AuxInt = -1 v3 := b.NewValue0(v.Pos, OpRISCV64SLTIU, y.Type) @@ -3318,8 +3546,8 @@ func rewriteValueRISCV64_OpRsh16x32(v *Value) bool { v4.AddArg(y) v3.AddArg(v4) v2.AddArg(v3) - v1.AddArg(v2) - v.AddArg(v1) + v1.AddArg2(y, v2) + v.AddArg2(v0, v1) return true } } @@ -3338,17 +3566,15 @@ func rewriteValueRISCV64_OpRsh16x64(v *Value) bool { v.Type = t v0 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpRISCV64OR, y.Type) - v1.AddArg(y) v2 := b.NewValue0(v.Pos, OpRISCV64ADDI, y.Type) v2.AuxInt = -1 v3 := b.NewValue0(v.Pos, OpRISCV64SLTIU, y.Type) v3.AuxInt = 64 v3.AddArg(y) v2.AddArg(v3) - v1.AddArg(v2) - v.AddArg(v1) + v1.AddArg2(y, v2) + v.AddArg2(v0, v1) return true } } @@ -3367,9 +3593,7 @@ func rewriteValueRISCV64_OpRsh16x8(v *Value) bool { v.Type = t v0 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpRISCV64OR, y.Type) - v1.AddArg(y) v2 := b.NewValue0(v.Pos, OpRISCV64ADDI, y.Type) v2.AuxInt = -1 v3 := b.NewValue0(v.Pos, OpRISCV64SLTIU, y.Type) @@ -3378,8 +3602,8 @@ func rewriteValueRISCV64_OpRsh16x8(v *Value) bool { v4.AddArg(y) v3.AddArg(v4) v2.AddArg(v3) - v1.AddArg(v2) - v.AddArg(v1) + v1.AddArg2(y, v2) + v.AddArg2(v0, v1) return true } } @@ -3398,9 +3622,7 @@ func rewriteValueRISCV64_OpRsh32Ux16(v *Value) bool { v0 := b.NewValue0(v.Pos, OpRISCV64SRL, t) v1 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) v1.AddArg(x) - v0.AddArg(v1) - v0.AddArg(y) - v.AddArg(v0) + v0.AddArg2(v1, y) v2 := b.NewValue0(v.Pos, OpNeg32, t) v3 := b.NewValue0(v.Pos, OpRISCV64SLTIU, t) v3.AuxInt = 64 @@ -3408,7 +3630,7 @@ func rewriteValueRISCV64_OpRsh32Ux16(v *Value) bool { v4.AddArg(y) v3.AddArg(v4) v2.AddArg(v3) - v.AddArg(v2) + v.AddArg2(v0, v2) return true } } @@ -3427,9 +3649,7 @@ func rewriteValueRISCV64_OpRsh32Ux32(v *Value) bool { v0 := b.NewValue0(v.Pos, OpRISCV64SRL, t) v1 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) v1.AddArg(x) - v0.AddArg(v1) - v0.AddArg(y) - v.AddArg(v0) + v0.AddArg2(v1, y) v2 := b.NewValue0(v.Pos, OpNeg32, t) v3 := b.NewValue0(v.Pos, OpRISCV64SLTIU, t) v3.AuxInt = 64 @@ -3437,7 +3657,7 @@ func rewriteValueRISCV64_OpRsh32Ux32(v *Value) bool { v4.AddArg(y) v3.AddArg(v4) v2.AddArg(v3) - v.AddArg(v2) + v.AddArg2(v0, v2) return true } } @@ -3456,15 +3676,13 @@ func rewriteValueRISCV64_OpRsh32Ux64(v *Value) bool { v0 := b.NewValue0(v.Pos, OpRISCV64SRL, t) v1 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) v1.AddArg(x) - v0.AddArg(v1) - v0.AddArg(y) - v.AddArg(v0) + v0.AddArg2(v1, y) v2 := b.NewValue0(v.Pos, OpNeg32, t) v3 := b.NewValue0(v.Pos, OpRISCV64SLTIU, t) v3.AuxInt = 64 v3.AddArg(y) v2.AddArg(v3) - v.AddArg(v2) + v.AddArg2(v0, v2) return true } } @@ -3483,9 +3701,7 @@ func rewriteValueRISCV64_OpRsh32Ux8(v *Value) bool { v0 := b.NewValue0(v.Pos, OpRISCV64SRL, t) v1 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) v1.AddArg(x) - v0.AddArg(v1) - v0.AddArg(y) - v.AddArg(v0) + v0.AddArg2(v1, y) v2 := b.NewValue0(v.Pos, OpNeg32, t) v3 := b.NewValue0(v.Pos, OpRISCV64SLTIU, t) v3.AuxInt = 64 @@ -3493,7 +3709,7 @@ func rewriteValueRISCV64_OpRsh32Ux8(v *Value) bool { v4.AddArg(y) v3.AddArg(v4) v2.AddArg(v3) - v.AddArg(v2) + v.AddArg2(v0, v2) return true } } @@ -3512,9 +3728,7 @@ func rewriteValueRISCV64_OpRsh32x16(v *Value) bool { v.Type = t v0 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpRISCV64OR, y.Type) - v1.AddArg(y) v2 := b.NewValue0(v.Pos, OpRISCV64ADDI, y.Type) v2.AuxInt = -1 v3 := b.NewValue0(v.Pos, OpRISCV64SLTIU, y.Type) @@ -3523,8 +3737,8 @@ func rewriteValueRISCV64_OpRsh32x16(v *Value) bool { v4.AddArg(y) v3.AddArg(v4) v2.AddArg(v3) - v1.AddArg(v2) - v.AddArg(v1) + v1.AddArg2(y, v2) + v.AddArg2(v0, v1) return true } } @@ -3543,9 +3757,7 @@ func rewriteValueRISCV64_OpRsh32x32(v *Value) bool { v.Type = t v0 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpRISCV64OR, y.Type) - v1.AddArg(y) v2 := b.NewValue0(v.Pos, OpRISCV64ADDI, y.Type) v2.AuxInt = -1 v3 := b.NewValue0(v.Pos, OpRISCV64SLTIU, y.Type) @@ -3554,8 +3766,8 @@ func rewriteValueRISCV64_OpRsh32x32(v *Value) bool { v4.AddArg(y) v3.AddArg(v4) v2.AddArg(v3) - v1.AddArg(v2) - v.AddArg(v1) + v1.AddArg2(y, v2) + v.AddArg2(v0, v1) return true } } @@ -3574,17 +3786,15 @@ func rewriteValueRISCV64_OpRsh32x64(v *Value) bool { v.Type = t v0 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpRISCV64OR, y.Type) - v1.AddArg(y) v2 := b.NewValue0(v.Pos, OpRISCV64ADDI, y.Type) v2.AuxInt = -1 v3 := b.NewValue0(v.Pos, OpRISCV64SLTIU, y.Type) v3.AuxInt = 64 v3.AddArg(y) v2.AddArg(v3) - v1.AddArg(v2) - v.AddArg(v1) + v1.AddArg2(y, v2) + v.AddArg2(v0, v1) return true } } @@ -3603,9 +3813,7 @@ func rewriteValueRISCV64_OpRsh32x8(v *Value) bool { v.Type = t v0 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpRISCV64OR, y.Type) - v1.AddArg(y) v2 := b.NewValue0(v.Pos, OpRISCV64ADDI, y.Type) v2.AuxInt = -1 v3 := b.NewValue0(v.Pos, OpRISCV64SLTIU, y.Type) @@ -3614,8 +3822,8 @@ func rewriteValueRISCV64_OpRsh32x8(v *Value) bool { v4.AddArg(y) v3.AddArg(v4) v2.AddArg(v3) - v1.AddArg(v2) - v.AddArg(v1) + v1.AddArg2(y, v2) + v.AddArg2(v0, v1) return true } } @@ -3632,9 +3840,7 @@ func rewriteValueRISCV64_OpRsh64Ux16(v *Value) bool { y := v_1 v.reset(OpRISCV64AND) v0 := b.NewValue0(v.Pos, OpRISCV64SRL, t) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) + v0.AddArg2(x, y) v1 := b.NewValue0(v.Pos, OpNeg64, t) v2 := b.NewValue0(v.Pos, OpRISCV64SLTIU, t) v2.AuxInt = 64 @@ -3642,7 +3848,7 @@ func rewriteValueRISCV64_OpRsh64Ux16(v *Value) bool { v3.AddArg(y) v2.AddArg(v3) v1.AddArg(v2) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } } @@ -3659,9 +3865,7 @@ func rewriteValueRISCV64_OpRsh64Ux32(v *Value) bool { y := v_1 v.reset(OpRISCV64AND) v0 := b.NewValue0(v.Pos, OpRISCV64SRL, t) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) + v0.AddArg2(x, y) v1 := b.NewValue0(v.Pos, OpNeg64, t) v2 := b.NewValue0(v.Pos, OpRISCV64SLTIU, t) v2.AuxInt = 64 @@ -3669,7 +3873,7 @@ func rewriteValueRISCV64_OpRsh64Ux32(v *Value) bool { v3.AddArg(y) v2.AddArg(v3) v1.AddArg(v2) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } } @@ -3685,15 +3889,13 @@ func rewriteValueRISCV64_OpRsh64Ux64(v *Value) bool { y := v_1 v.reset(OpRISCV64AND) v0 := b.NewValue0(v.Pos, OpRISCV64SRL, t) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) + v0.AddArg2(x, y) v1 := b.NewValue0(v.Pos, OpNeg64, t) v2 := b.NewValue0(v.Pos, OpRISCV64SLTIU, t) v2.AuxInt = 64 v2.AddArg(y) v1.AddArg(v2) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } } @@ -3710,9 +3912,7 @@ func rewriteValueRISCV64_OpRsh64Ux8(v *Value) bool { y := v_1 v.reset(OpRISCV64AND) v0 := b.NewValue0(v.Pos, OpRISCV64SRL, t) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) + v0.AddArg2(x, y) v1 := b.NewValue0(v.Pos, OpNeg64, t) v2 := b.NewValue0(v.Pos, OpRISCV64SLTIU, t) v2.AuxInt = 64 @@ -3720,7 +3920,7 @@ func rewriteValueRISCV64_OpRsh64Ux8(v *Value) bool { v3.AddArg(y) v2.AddArg(v3) v1.AddArg(v2) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } } @@ -3737,9 +3937,7 @@ func rewriteValueRISCV64_OpRsh64x16(v *Value) bool { y := v_1 v.reset(OpRISCV64SRA) v.Type = t - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpRISCV64OR, y.Type) - v0.AddArg(y) v1 := b.NewValue0(v.Pos, OpRISCV64ADDI, y.Type) v1.AuxInt = -1 v2 := b.NewValue0(v.Pos, OpRISCV64SLTIU, y.Type) @@ -3748,8 +3946,8 @@ func rewriteValueRISCV64_OpRsh64x16(v *Value) bool { v3.AddArg(y) v2.AddArg(v3) v1.AddArg(v2) - v0.AddArg(v1) - v.AddArg(v0) + v0.AddArg2(y, v1) + v.AddArg2(x, v0) return true } } @@ -3766,9 +3964,7 @@ func rewriteValueRISCV64_OpRsh64x32(v *Value) bool { y := v_1 v.reset(OpRISCV64SRA) v.Type = t - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpRISCV64OR, y.Type) - v0.AddArg(y) v1 := b.NewValue0(v.Pos, OpRISCV64ADDI, y.Type) v1.AuxInt = -1 v2 := b.NewValue0(v.Pos, OpRISCV64SLTIU, y.Type) @@ -3777,8 +3973,8 @@ func rewriteValueRISCV64_OpRsh64x32(v *Value) bool { v3.AddArg(y) v2.AddArg(v3) v1.AddArg(v2) - v0.AddArg(v1) - v.AddArg(v0) + v0.AddArg2(y, v1) + v.AddArg2(x, v0) return true } } @@ -3794,17 +3990,15 @@ func rewriteValueRISCV64_OpRsh64x64(v *Value) bool { y := v_1 v.reset(OpRISCV64SRA) v.Type = t - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpRISCV64OR, y.Type) - v0.AddArg(y) v1 := b.NewValue0(v.Pos, OpRISCV64ADDI, y.Type) v1.AuxInt = -1 v2 := b.NewValue0(v.Pos, OpRISCV64SLTIU, y.Type) v2.AuxInt = 64 v2.AddArg(y) v1.AddArg(v2) - v0.AddArg(v1) - v.AddArg(v0) + v0.AddArg2(y, v1) + v.AddArg2(x, v0) return true } } @@ -3821,9 +4015,7 @@ func rewriteValueRISCV64_OpRsh64x8(v *Value) bool { y := v_1 v.reset(OpRISCV64SRA) v.Type = t - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpRISCV64OR, y.Type) - v0.AddArg(y) v1 := b.NewValue0(v.Pos, OpRISCV64ADDI, y.Type) v1.AuxInt = -1 v2 := b.NewValue0(v.Pos, OpRISCV64SLTIU, y.Type) @@ -3832,8 +4024,8 @@ func rewriteValueRISCV64_OpRsh64x8(v *Value) bool { v3.AddArg(y) v2.AddArg(v3) v1.AddArg(v2) - v0.AddArg(v1) - v.AddArg(v0) + v0.AddArg2(y, v1) + v.AddArg2(x, v0) return true } } @@ -3852,9 +4044,7 @@ func rewriteValueRISCV64_OpRsh8Ux16(v *Value) bool { v0 := b.NewValue0(v.Pos, OpRISCV64SRL, t) v1 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) v1.AddArg(x) - v0.AddArg(v1) - v0.AddArg(y) - v.AddArg(v0) + v0.AddArg2(v1, y) v2 := b.NewValue0(v.Pos, OpNeg8, t) v3 := b.NewValue0(v.Pos, OpRISCV64SLTIU, t) v3.AuxInt = 64 @@ -3862,7 +4052,7 @@ func rewriteValueRISCV64_OpRsh8Ux16(v *Value) bool { v4.AddArg(y) v3.AddArg(v4) v2.AddArg(v3) - v.AddArg(v2) + v.AddArg2(v0, v2) return true } } @@ -3881,9 +4071,7 @@ func rewriteValueRISCV64_OpRsh8Ux32(v *Value) bool { v0 := b.NewValue0(v.Pos, OpRISCV64SRL, t) v1 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) v1.AddArg(x) - v0.AddArg(v1) - v0.AddArg(y) - v.AddArg(v0) + v0.AddArg2(v1, y) v2 := b.NewValue0(v.Pos, OpNeg8, t) v3 := b.NewValue0(v.Pos, OpRISCV64SLTIU, t) v3.AuxInt = 64 @@ -3891,7 +4079,7 @@ func rewriteValueRISCV64_OpRsh8Ux32(v *Value) bool { v4.AddArg(y) v3.AddArg(v4) v2.AddArg(v3) - v.AddArg(v2) + v.AddArg2(v0, v2) return true } } @@ -3910,15 +4098,13 @@ func rewriteValueRISCV64_OpRsh8Ux64(v *Value) bool { v0 := b.NewValue0(v.Pos, OpRISCV64SRL, t) v1 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) v1.AddArg(x) - v0.AddArg(v1) - v0.AddArg(y) - v.AddArg(v0) + v0.AddArg2(v1, y) v2 := b.NewValue0(v.Pos, OpNeg8, t) v3 := b.NewValue0(v.Pos, OpRISCV64SLTIU, t) v3.AuxInt = 64 v3.AddArg(y) v2.AddArg(v3) - v.AddArg(v2) + v.AddArg2(v0, v2) return true } } @@ -3937,9 +4123,7 @@ func rewriteValueRISCV64_OpRsh8Ux8(v *Value) bool { v0 := b.NewValue0(v.Pos, OpRISCV64SRL, t) v1 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) v1.AddArg(x) - v0.AddArg(v1) - v0.AddArg(y) - v.AddArg(v0) + v0.AddArg2(v1, y) v2 := b.NewValue0(v.Pos, OpNeg8, t) v3 := b.NewValue0(v.Pos, OpRISCV64SLTIU, t) v3.AuxInt = 64 @@ -3947,7 +4131,7 @@ func rewriteValueRISCV64_OpRsh8Ux8(v *Value) bool { v4.AddArg(y) v3.AddArg(v4) v2.AddArg(v3) - v.AddArg(v2) + v.AddArg2(v0, v2) return true } } @@ -3966,9 +4150,7 @@ func rewriteValueRISCV64_OpRsh8x16(v *Value) bool { v.Type = t v0 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpRISCV64OR, y.Type) - v1.AddArg(y) v2 := b.NewValue0(v.Pos, OpRISCV64ADDI, y.Type) v2.AuxInt = -1 v3 := b.NewValue0(v.Pos, OpRISCV64SLTIU, y.Type) @@ -3977,8 +4159,8 @@ func rewriteValueRISCV64_OpRsh8x16(v *Value) bool { v4.AddArg(y) v3.AddArg(v4) v2.AddArg(v3) - v1.AddArg(v2) - v.AddArg(v1) + v1.AddArg2(y, v2) + v.AddArg2(v0, v1) return true } } @@ -3997,9 +4179,7 @@ func rewriteValueRISCV64_OpRsh8x32(v *Value) bool { v.Type = t v0 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpRISCV64OR, y.Type) - v1.AddArg(y) v2 := b.NewValue0(v.Pos, OpRISCV64ADDI, y.Type) v2.AuxInt = -1 v3 := b.NewValue0(v.Pos, OpRISCV64SLTIU, y.Type) @@ -4008,8 +4188,8 @@ func rewriteValueRISCV64_OpRsh8x32(v *Value) bool { v4.AddArg(y) v3.AddArg(v4) v2.AddArg(v3) - v1.AddArg(v2) - v.AddArg(v1) + v1.AddArg2(y, v2) + v.AddArg2(v0, v1) return true } } @@ -4028,17 +4208,15 @@ func rewriteValueRISCV64_OpRsh8x64(v *Value) bool { v.Type = t v0 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpRISCV64OR, y.Type) - v1.AddArg(y) v2 := b.NewValue0(v.Pos, OpRISCV64ADDI, y.Type) v2.AuxInt = -1 v3 := b.NewValue0(v.Pos, OpRISCV64SLTIU, y.Type) v3.AuxInt = 64 v3.AddArg(y) v2.AddArg(v3) - v1.AddArg(v2) - v.AddArg(v1) + v1.AddArg2(y, v2) + v.AddArg2(v0, v1) return true } } @@ -4057,9 +4235,7 @@ func rewriteValueRISCV64_OpRsh8x8(v *Value) bool { v.Type = t v0 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpRISCV64OR, y.Type) - v1.AddArg(y) v2 := b.NewValue0(v.Pos, OpRISCV64ADDI, y.Type) v2.AuxInt = -1 v3 := b.NewValue0(v.Pos, OpRISCV64SLTIU, y.Type) @@ -4068,8 +4244,8 @@ func rewriteValueRISCV64_OpRsh8x8(v *Value) bool { v4.AddArg(y) v3.AddArg(v4) v2.AddArg(v3) - v1.AddArg(v2) - v.AddArg(v1) + v1.AddArg2(y, v2) + v.AddArg2(v0, v1) return true } } @@ -4206,9 +4382,7 @@ func rewriteValueRISCV64_OpStore(v *Value) bool { break } v.reset(OpRISCV64MOVBstore) - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } // match: (Store {t} ptr val mem) @@ -4223,9 +4397,7 @@ func rewriteValueRISCV64_OpStore(v *Value) bool { break } v.reset(OpRISCV64MOVHstore) - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } // match: (Store {t} ptr val mem) @@ -4240,9 +4412,7 @@ func rewriteValueRISCV64_OpStore(v *Value) bool { break } v.reset(OpRISCV64MOVWstore) - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } // match: (Store {t} ptr val mem) @@ -4257,9 +4427,7 @@ func rewriteValueRISCV64_OpStore(v *Value) bool { break } v.reset(OpRISCV64MOVDstore) - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } // match: (Store {t} ptr val mem) @@ -4274,9 +4442,7 @@ func rewriteValueRISCV64_OpStore(v *Value) bool { break } v.reset(OpRISCV64FMOVWstore) - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } // match: (Store {t} ptr val mem) @@ -4291,9 +4457,7 @@ func rewriteValueRISCV64_OpStore(v *Value) bool { break } v.reset(OpRISCV64FMOVDstore) - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } return false @@ -4311,9 +4475,7 @@ func rewriteValueRISCV64_OpZero(v *Value) bool { break } mem := v_1 - v.reset(OpCopy) - v.Type = mem.Type - v.AddArg(mem) + v.copyOf(mem) return true } // match: (Zero [1] ptr mem) @@ -4325,10 +4487,8 @@ func rewriteValueRISCV64_OpZero(v *Value) bool { ptr := v_0 mem := v_1 v.reset(OpRISCV64MOVBstore) - v.AddArg(ptr) v0 := b.NewValue0(v.Pos, OpRISCV64MOVBconst, typ.UInt8) - v.AddArg(v0) - v.AddArg(mem) + v.AddArg3(ptr, v0, mem) return true } // match: (Zero [2] ptr mem) @@ -4340,10 +4500,8 @@ func rewriteValueRISCV64_OpZero(v *Value) bool { ptr := v_0 mem := v_1 v.reset(OpRISCV64MOVHstore) - v.AddArg(ptr) v0 := b.NewValue0(v.Pos, OpRISCV64MOVHconst, typ.UInt16) - v.AddArg(v0) - v.AddArg(mem) + v.AddArg3(ptr, v0, mem) return true } // match: (Zero [4] ptr mem) @@ -4355,10 +4513,8 @@ func rewriteValueRISCV64_OpZero(v *Value) bool { ptr := v_0 mem := v_1 v.reset(OpRISCV64MOVWstore) - v.AddArg(ptr) v0 := b.NewValue0(v.Pos, OpRISCV64MOVWconst, typ.UInt32) - v.AddArg(v0) - v.AddArg(mem) + v.AddArg3(ptr, v0, mem) return true } // match: (Zero [8] ptr mem) @@ -4370,10 +4526,8 @@ func rewriteValueRISCV64_OpZero(v *Value) bool { ptr := v_0 mem := v_1 v.reset(OpRISCV64MOVDstore) - v.AddArg(ptr) v0 := b.NewValue0(v.Pos, OpRISCV64MOVDconst, typ.UInt64) - v.AddArg(v0) - v.AddArg(mem) + v.AddArg3(ptr, v0, mem) return true } // match: (Zero [s] {t} ptr mem) @@ -4385,14 +4539,11 @@ func rewriteValueRISCV64_OpZero(v *Value) bool { mem := v_1 v.reset(OpRISCV64LoweredZero) v.AuxInt = t.(*types.Type).Alignment() - v.AddArg(ptr) v0 := b.NewValue0(v.Pos, OpRISCV64ADD, ptr.Type) - v0.AddArg(ptr) v1 := b.NewValue0(v.Pos, OpRISCV64MOVDconst, typ.UInt64) v1.AuxInt = s - moveSize(t.(*types.Type).Alignment(), config) - v0.AddArg(v1) - v.AddArg(v0) - v.AddArg(mem) + v0.AddArg2(ptr, v1) + v.AddArg3(ptr, v0, mem) return true } } @@ -4500,13 +4651,21 @@ func rewriteValueRISCV64_OpZeroExt8to64(v *Value) bool { } func rewriteBlockRISCV64(b *Block) bool { switch b.Kind { + case BlockRISCV64BNE: + // match: (BNE (SNEZ x) yes no) + // result: (BNE x yes no) + for b.Controls[0].Op == OpRISCV64SNEZ { + v_0 := b.Controls[0] + x := v_0.Args[0] + b.resetWithControl(BlockRISCV64BNE, x) + return true + } case BlockIf: // match: (If cond yes no) // result: (BNE cond yes no) for { cond := b.Controls[0] - b.Reset(BlockRISCV64BNE) - b.AddControl(cond) + b.resetWithControl(BlockRISCV64BNE, cond) return true } } diff --git a/src/cmd/compile/internal/ssa/rewriteS390X.go b/src/cmd/compile/internal/ssa/rewriteS390X.go index 43ca2ceb92..7c750574bc 100644 --- a/src/cmd/compile/internal/ssa/rewriteS390X.go +++ b/src/cmd/compile/internal/ssa/rewriteS390X.go @@ -156,12 +156,24 @@ func rewriteValueS390X(v *Value) bool { case OpCvt32Fto32: v.Op = OpS390XCFEBRA return true + case OpCvt32Fto32U: + v.Op = OpS390XCLFEBR + return true case OpCvt32Fto64: v.Op = OpS390XCGEBRA return true case OpCvt32Fto64F: v.Op = OpS390XLDEBR return true + case OpCvt32Fto64U: + v.Op = OpS390XCLGEBR + return true + case OpCvt32Uto32F: + v.Op = OpS390XCELFBR + return true + case OpCvt32Uto64F: + v.Op = OpS390XCDLFBR + return true case OpCvt32to32F: v.Op = OpS390XCEFBRA return true @@ -174,15 +186,30 @@ func rewriteValueS390X(v *Value) bool { case OpCvt64Fto32F: v.Op = OpS390XLEDBR return true + case OpCvt64Fto32U: + v.Op = OpS390XCLFDBR + return true case OpCvt64Fto64: v.Op = OpS390XCGDBRA return true + case OpCvt64Fto64U: + v.Op = OpS390XCLGDBR + return true + case OpCvt64Uto32F: + v.Op = OpS390XCELGBR + return true + case OpCvt64Uto64F: + v.Op = OpS390XCDLGBR + return true case OpCvt64to32F: v.Op = OpS390XCEGBRA return true case OpCvt64to64F: v.Op = OpS390XCDGBRA return true + case OpCvtBoolToUint8: + v.Op = OpCopy + return true case OpDiv16: return rewriteValueS390X_OpDiv16(v) case OpDiv16u: @@ -909,12 +936,9 @@ func rewriteValueS390X_OpAtomicAdd32(v *Value) bool { val := v_1 mem := v_2 v.reset(OpS390XAddTupleFirst32) - v.AddArg(val) v0 := b.NewValue0(v.Pos, OpS390XLAA, types.NewTuple(typ.UInt32, types.TypeMem)) - v0.AddArg(ptr) - v0.AddArg(val) - v0.AddArg(mem) - v.AddArg(v0) + v0.AddArg3(ptr, val, mem) + v.AddArg2(val, v0) return true } } @@ -931,12 +955,9 @@ func rewriteValueS390X_OpAtomicAdd64(v *Value) bool { val := v_1 mem := v_2 v.reset(OpS390XAddTupleFirst64) - v.AddArg(val) v0 := b.NewValue0(v.Pos, OpS390XLAAG, types.NewTuple(typ.UInt64, types.TypeMem)) - v0.AddArg(ptr) - v0.AddArg(val) - v0.AddArg(mem) - v.AddArg(v0) + v0.AddArg3(ptr, val, mem) + v.AddArg2(val, v0) return true } } @@ -953,21 +974,17 @@ func rewriteValueS390X_OpAtomicAnd8(v *Value) bool { val := v_1 mem := v_2 v.reset(OpS390XLANfloor) - v.AddArg(ptr) v0 := b.NewValue0(v.Pos, OpS390XRLL, typ.UInt32) v1 := b.NewValue0(v.Pos, OpS390XORWconst, typ.UInt32) v1.AuxInt = -1 << 8 v1.AddArg(val) - v0.AddArg(v1) v2 := b.NewValue0(v.Pos, OpS390XRXSBG, typ.UInt32) v2.Aux = s390x.NewRotateParams(59, 60, 3) v3 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) v3.AuxInt = 3 << 3 - v2.AddArg(v3) - v2.AddArg(ptr) - v0.AddArg(v2) - v.AddArg(v0) - v.AddArg(mem) + v2.AddArg2(v3, ptr) + v0.AddArg2(v1, v2) + v.AddArg3(ptr, v0, mem) return true } } @@ -984,20 +1001,16 @@ func rewriteValueS390X_OpAtomicOr8(v *Value) bool { val := v_1 mem := v_2 v.reset(OpS390XLAOfloor) - v.AddArg(ptr) v0 := b.NewValue0(v.Pos, OpS390XSLW, typ.UInt32) v1 := b.NewValue0(v.Pos, OpS390XMOVBZreg, typ.UInt32) v1.AddArg(val) - v0.AddArg(v1) v2 := b.NewValue0(v.Pos, OpS390XRXSBG, typ.UInt32) v2.Aux = s390x.NewRotateParams(59, 60, 3) v3 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) v3.AuxInt = 3 << 3 - v2.AddArg(v3) - v2.AddArg(ptr) - v0.AddArg(v2) - v.AddArg(v0) - v.AddArg(mem) + v2.AddArg2(v3, ptr) + v0.AddArg2(v1, v2) + v.AddArg3(ptr, v0, mem) return true } } @@ -1014,9 +1027,7 @@ func rewriteValueS390X_OpAtomicStore32(v *Value) bool { mem := v_2 v.reset(OpS390XSYNC) v0 := b.NewValue0(v.Pos, OpS390XMOVWatomicstore, types.TypeMem) - v0.AddArg(ptr) - v0.AddArg(val) - v0.AddArg(mem) + v0.AddArg3(ptr, val, mem) v.AddArg(v0) return true } @@ -1034,9 +1045,7 @@ func rewriteValueS390X_OpAtomicStore64(v *Value) bool { mem := v_2 v.reset(OpS390XSYNC) v0 := b.NewValue0(v.Pos, OpS390XMOVDatomicstore, types.TypeMem) - v0.AddArg(ptr) - v0.AddArg(val) - v0.AddArg(mem) + v0.AddArg3(ptr, val, mem) v.AddArg(v0) return true } @@ -1054,9 +1063,7 @@ func rewriteValueS390X_OpAtomicStore8(v *Value) bool { mem := v_2 v.reset(OpS390XSYNC) v0 := b.NewValue0(v.Pos, OpS390XMOVBatomicstore, types.TypeMem) - v0.AddArg(ptr) - v0.AddArg(val) - v0.AddArg(mem) + v0.AddArg3(ptr, val, mem) v.AddArg(v0) return true } @@ -1074,9 +1081,7 @@ func rewriteValueS390X_OpAtomicStorePtrNoWB(v *Value) bool { mem := v_2 v.reset(OpS390XSYNC) v0 := b.NewValue0(v.Pos, OpS390XMOVDatomicstore, types.TypeMem) - v0.AddArg(ptr) - v0.AddArg(val) - v0.AddArg(mem) + v0.AddArg3(ptr, val, mem) v.AddArg(v0) return true } @@ -1095,11 +1100,9 @@ func rewriteValueS390X_OpAvg64u(v *Value) bool { v0 := b.NewValue0(v.Pos, OpS390XSRDconst, t) v0.AuxInt = 1 v1 := b.NewValue0(v.Pos, OpS390XSUB, t) - v1.AddArg(x) - v1.AddArg(y) + v1.AddArg2(x, y) v0.AddArg(v1) - v.AddArg(v0) - v.AddArg(y) + v.AddArg2(v0, y) return true } } @@ -1114,10 +1117,9 @@ func rewriteValueS390X_OpBitLen64(v *Value) bool { v.reset(OpS390XSUB) v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) v0.AuxInt = 64 - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpS390XFLOGR, typ.UInt64) v1.AddArg(x) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } } @@ -1154,20 +1156,18 @@ func rewriteValueS390X_OpCtz32(v *Value) bool { v.reset(OpS390XSUB) v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) v0.AuxInt = 64 - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpS390XFLOGR, typ.UInt64) v2 := b.NewValue0(v.Pos, OpS390XMOVWZreg, typ.UInt64) v3 := b.NewValue0(v.Pos, OpS390XANDW, t) v4 := b.NewValue0(v.Pos, OpS390XSUBWconst, t) v4.AuxInt = 1 v4.AddArg(x) - v3.AddArg(v4) v5 := b.NewValue0(v.Pos, OpS390XNOTW, t) v5.AddArg(x) - v3.AddArg(v5) + v3.AddArg2(v4, v5) v2.AddArg(v3) v1.AddArg(v2) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } } @@ -1183,18 +1183,16 @@ func rewriteValueS390X_OpCtz64(v *Value) bool { v.reset(OpS390XSUB) v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) v0.AuxInt = 64 - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpS390XFLOGR, typ.UInt64) v2 := b.NewValue0(v.Pos, OpS390XAND, t) v3 := b.NewValue0(v.Pos, OpS390XSUBconst, t) v3.AuxInt = 1 v3.AddArg(x) - v2.AddArg(v3) v4 := b.NewValue0(v.Pos, OpS390XNOT, t) v4.AddArg(x) - v2.AddArg(v4) + v2.AddArg2(v3, v4) v1.AddArg(v2) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } } @@ -1211,10 +1209,9 @@ func rewriteValueS390X_OpDiv16(v *Value) bool { v.reset(OpS390XDIVW) v0 := b.NewValue0(v.Pos, OpS390XMOVHreg, typ.Int64) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpS390XMOVHreg, typ.Int64) v1.AddArg(y) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } } @@ -1231,10 +1228,9 @@ func rewriteValueS390X_OpDiv16u(v *Value) bool { v.reset(OpS390XDIVWU) v0 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64) v1.AddArg(y) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } } @@ -1251,8 +1247,7 @@ func rewriteValueS390X_OpDiv32(v *Value) bool { v.reset(OpS390XDIVW) v0 := b.NewValue0(v.Pos, OpS390XMOVWreg, typ.Int64) v0.AddArg(x) - v.AddArg(v0) - v.AddArg(y) + v.AddArg2(v0, y) return true } } @@ -1269,8 +1264,7 @@ func rewriteValueS390X_OpDiv32u(v *Value) bool { v.reset(OpS390XDIVWU) v0 := b.NewValue0(v.Pos, OpS390XMOVWZreg, typ.UInt64) v0.AddArg(x) - v.AddArg(v0) - v.AddArg(y) + v.AddArg2(v0, y) return true } } @@ -1287,10 +1281,9 @@ func rewriteValueS390X_OpDiv8(v *Value) bool { v.reset(OpS390XDIVW) v0 := b.NewValue0(v.Pos, OpS390XMOVBreg, typ.Int64) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpS390XMOVBreg, typ.Int64) v1.AddArg(y) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } } @@ -1307,10 +1300,9 @@ func rewriteValueS390X_OpDiv8u(v *Value) bool { v.reset(OpS390XDIVWU) v0 := b.NewValue0(v.Pos, OpS390XMOVBZreg, typ.UInt64) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpS390XMOVBZreg, typ.UInt64) v1.AddArg(y) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } } @@ -1328,18 +1320,15 @@ func rewriteValueS390X_OpEq16(v *Value) bool { v.Aux = s390x.Equal v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) v0.AuxInt = 0 - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) v1.AuxInt = 1 - v.AddArg(v1) v2 := b.NewValue0(v.Pos, OpS390XCMPW, types.TypeFlags) v3 := b.NewValue0(v.Pos, OpS390XMOVHreg, typ.Int64) v3.AddArg(x) - v2.AddArg(v3) v4 := b.NewValue0(v.Pos, OpS390XMOVHreg, typ.Int64) v4.AddArg(y) - v2.AddArg(v4) - v.AddArg(v2) + v2.AddArg2(v3, v4) + v.AddArg3(v0, v1, v2) return true } } @@ -1357,14 +1346,11 @@ func rewriteValueS390X_OpEq32(v *Value) bool { v.Aux = s390x.Equal v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) v0.AuxInt = 0 - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) v1.AuxInt = 1 - v.AddArg(v1) v2 := b.NewValue0(v.Pos, OpS390XCMPW, types.TypeFlags) - v2.AddArg(x) - v2.AddArg(y) - v.AddArg(v2) + v2.AddArg2(x, y) + v.AddArg3(v0, v1, v2) return true } } @@ -1382,14 +1368,11 @@ func rewriteValueS390X_OpEq32F(v *Value) bool { v.Aux = s390x.Equal v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) v0.AuxInt = 0 - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) v1.AuxInt = 1 - v.AddArg(v1) v2 := b.NewValue0(v.Pos, OpS390XFCMPS, types.TypeFlags) - v2.AddArg(x) - v2.AddArg(y) - v.AddArg(v2) + v2.AddArg2(x, y) + v.AddArg3(v0, v1, v2) return true } } @@ -1407,14 +1390,11 @@ func rewriteValueS390X_OpEq64(v *Value) bool { v.Aux = s390x.Equal v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) v0.AuxInt = 0 - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) v1.AuxInt = 1 - v.AddArg(v1) v2 := b.NewValue0(v.Pos, OpS390XCMP, types.TypeFlags) - v2.AddArg(x) - v2.AddArg(y) - v.AddArg(v2) + v2.AddArg2(x, y) + v.AddArg3(v0, v1, v2) return true } } @@ -1432,14 +1412,11 @@ func rewriteValueS390X_OpEq64F(v *Value) bool { v.Aux = s390x.Equal v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) v0.AuxInt = 0 - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) v1.AuxInt = 1 - v.AddArg(v1) v2 := b.NewValue0(v.Pos, OpS390XFCMP, types.TypeFlags) - v2.AddArg(x) - v2.AddArg(y) - v.AddArg(v2) + v2.AddArg2(x, y) + v.AddArg3(v0, v1, v2) return true } } @@ -1457,18 +1434,15 @@ func rewriteValueS390X_OpEq8(v *Value) bool { v.Aux = s390x.Equal v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) v0.AuxInt = 0 - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) v1.AuxInt = 1 - v.AddArg(v1) v2 := b.NewValue0(v.Pos, OpS390XCMPW, types.TypeFlags) v3 := b.NewValue0(v.Pos, OpS390XMOVBreg, typ.Int64) v3.AddArg(x) - v2.AddArg(v3) v4 := b.NewValue0(v.Pos, OpS390XMOVBreg, typ.Int64) v4.AddArg(y) - v2.AddArg(v4) - v.AddArg(v2) + v2.AddArg2(v3, v4) + v.AddArg3(v0, v1, v2) return true } } @@ -1486,18 +1460,15 @@ func rewriteValueS390X_OpEqB(v *Value) bool { v.Aux = s390x.Equal v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) v0.AuxInt = 0 - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) v1.AuxInt = 1 - v.AddArg(v1) v2 := b.NewValue0(v.Pos, OpS390XCMPW, types.TypeFlags) v3 := b.NewValue0(v.Pos, OpS390XMOVBreg, typ.Int64) v3.AddArg(x) - v2.AddArg(v3) v4 := b.NewValue0(v.Pos, OpS390XMOVBreg, typ.Int64) v4.AddArg(y) - v2.AddArg(v4) - v.AddArg(v2) + v2.AddArg2(v3, v4) + v.AddArg3(v0, v1, v2) return true } } @@ -1515,14 +1486,11 @@ func rewriteValueS390X_OpEqPtr(v *Value) bool { v.Aux = s390x.Equal v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) v0.AuxInt = 0 - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) v1.AuxInt = 1 - v.AddArg(v1) v2 := b.NewValue0(v.Pos, OpS390XCMP, types.TypeFlags) - v2.AddArg(x) - v2.AddArg(y) - v.AddArg(v2) + v2.AddArg2(x, y) + v.AddArg3(v0, v1, v2) return true } } @@ -1537,9 +1505,7 @@ func rewriteValueS390X_OpFMA(v *Value) bool { y := v_1 z := v_2 v.reset(OpS390XFMADD) - v.AddArg(z) - v.AddArg(x) - v.AddArg(y) + v.AddArg3(z, x, y) return true } } @@ -1569,14 +1535,11 @@ func rewriteValueS390X_OpGeq32F(v *Value) bool { v.Aux = s390x.GreaterOrEqual v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) v0.AuxInt = 0 - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) v1.AuxInt = 1 - v.AddArg(v1) v2 := b.NewValue0(v.Pos, OpS390XFCMPS, types.TypeFlags) - v2.AddArg(x) - v2.AddArg(y) - v.AddArg(v2) + v2.AddArg2(x, y) + v.AddArg3(v0, v1, v2) return true } } @@ -1594,14 +1557,11 @@ func rewriteValueS390X_OpGeq64F(v *Value) bool { v.Aux = s390x.GreaterOrEqual v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) v0.AuxInt = 0 - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) v1.AuxInt = 1 - v.AddArg(v1) v2 := b.NewValue0(v.Pos, OpS390XFCMP, types.TypeFlags) - v2.AddArg(x) - v2.AddArg(y) - v.AddArg(v2) + v2.AddArg2(x, y) + v.AddArg3(v0, v1, v2) return true } } @@ -1619,14 +1579,11 @@ func rewriteValueS390X_OpGreater32F(v *Value) bool { v.Aux = s390x.Greater v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) v0.AuxInt = 0 - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) v1.AuxInt = 1 - v.AddArg(v1) v2 := b.NewValue0(v.Pos, OpS390XFCMPS, types.TypeFlags) - v2.AddArg(x) - v2.AddArg(y) - v.AddArg(v2) + v2.AddArg2(x, y) + v.AddArg3(v0, v1, v2) return true } } @@ -1644,14 +1601,11 @@ func rewriteValueS390X_OpGreater64F(v *Value) bool { v.Aux = s390x.Greater v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) v0.AuxInt = 0 - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) v1.AuxInt = 1 - v.AddArg(v1) v2 := b.NewValue0(v.Pos, OpS390XFCMP, types.TypeFlags) - v2.AddArg(x) - v2.AddArg(y) - v.AddArg(v2) + v2.AddArg2(x, y) + v.AddArg3(v0, v1, v2) return true } } @@ -1670,10 +1624,9 @@ func rewriteValueS390X_OpHmul32(v *Value) bool { v0 := b.NewValue0(v.Pos, OpS390XMULLD, typ.Int64) v1 := b.NewValue0(v.Pos, OpS390XMOVWreg, typ.Int64) v1.AddArg(x) - v0.AddArg(v1) v2 := b.NewValue0(v.Pos, OpS390XMOVWreg, typ.Int64) v2.AddArg(y) - v0.AddArg(v2) + v0.AddArg2(v1, v2) v.AddArg(v0) return true } @@ -1693,10 +1646,9 @@ func rewriteValueS390X_OpHmul32u(v *Value) bool { v0 := b.NewValue0(v.Pos, OpS390XMULLD, typ.Int64) v1 := b.NewValue0(v.Pos, OpS390XMOVWZreg, typ.UInt64) v1.AddArg(x) - v0.AddArg(v1) v2 := b.NewValue0(v.Pos, OpS390XMOVWZreg, typ.UInt64) v2.AddArg(y) - v0.AddArg(v2) + v0.AddArg2(v1, v2) v.AddArg(v0) return true } @@ -1712,8 +1664,7 @@ func rewriteValueS390X_OpITab(v *Value) bool { mem := v_0.Args[1] ptr := v_0.Args[0] v.reset(OpS390XMOVDload) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } return false @@ -1732,14 +1683,11 @@ func rewriteValueS390X_OpIsInBounds(v *Value) bool { v.Aux = s390x.Less v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) v0.AuxInt = 0 - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) v1.AuxInt = 1 - v.AddArg(v1) v2 := b.NewValue0(v.Pos, OpS390XCMPU, types.TypeFlags) - v2.AddArg(idx) - v2.AddArg(len) - v.AddArg(v2) + v2.AddArg2(idx, len) + v.AddArg3(v0, v1, v2) return true } } @@ -1755,14 +1703,12 @@ func rewriteValueS390X_OpIsNonNil(v *Value) bool { v.Aux = s390x.NotEqual v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) v0.AuxInt = 0 - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) v1.AuxInt = 1 - v.AddArg(v1) v2 := b.NewValue0(v.Pos, OpS390XCMPconst, types.TypeFlags) v2.AuxInt = 0 v2.AddArg(p) - v.AddArg(v2) + v.AddArg3(v0, v1, v2) return true } } @@ -1780,14 +1726,11 @@ func rewriteValueS390X_OpIsSliceInBounds(v *Value) bool { v.Aux = s390x.LessOrEqual v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) v0.AuxInt = 0 - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) v1.AuxInt = 1 - v.AddArg(v1) v2 := b.NewValue0(v.Pos, OpS390XCMPU, types.TypeFlags) - v2.AddArg(idx) - v2.AddArg(len) - v.AddArg(v2) + v2.AddArg2(idx, len) + v.AddArg3(v0, v1, v2) return true } } @@ -1805,18 +1748,15 @@ func rewriteValueS390X_OpLeq16(v *Value) bool { v.Aux = s390x.LessOrEqual v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) v0.AuxInt = 0 - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) v1.AuxInt = 1 - v.AddArg(v1) v2 := b.NewValue0(v.Pos, OpS390XCMPW, types.TypeFlags) v3 := b.NewValue0(v.Pos, OpS390XMOVHreg, typ.Int64) v3.AddArg(x) - v2.AddArg(v3) v4 := b.NewValue0(v.Pos, OpS390XMOVHreg, typ.Int64) v4.AddArg(y) - v2.AddArg(v4) - v.AddArg(v2) + v2.AddArg2(v3, v4) + v.AddArg3(v0, v1, v2) return true } } @@ -1834,18 +1774,15 @@ func rewriteValueS390X_OpLeq16U(v *Value) bool { v.Aux = s390x.LessOrEqual v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) v0.AuxInt = 0 - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) v1.AuxInt = 1 - v.AddArg(v1) v2 := b.NewValue0(v.Pos, OpS390XCMPWU, types.TypeFlags) v3 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64) v3.AddArg(x) - v2.AddArg(v3) v4 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64) v4.AddArg(y) - v2.AddArg(v4) - v.AddArg(v2) + v2.AddArg2(v3, v4) + v.AddArg3(v0, v1, v2) return true } } @@ -1863,14 +1800,11 @@ func rewriteValueS390X_OpLeq32(v *Value) bool { v.Aux = s390x.LessOrEqual v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) v0.AuxInt = 0 - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) v1.AuxInt = 1 - v.AddArg(v1) v2 := b.NewValue0(v.Pos, OpS390XCMPW, types.TypeFlags) - v2.AddArg(x) - v2.AddArg(y) - v.AddArg(v2) + v2.AddArg2(x, y) + v.AddArg3(v0, v1, v2) return true } } @@ -1888,14 +1822,11 @@ func rewriteValueS390X_OpLeq32F(v *Value) bool { v.Aux = s390x.LessOrEqual v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) v0.AuxInt = 0 - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) v1.AuxInt = 1 - v.AddArg(v1) v2 := b.NewValue0(v.Pos, OpS390XFCMPS, types.TypeFlags) - v2.AddArg(x) - v2.AddArg(y) - v.AddArg(v2) + v2.AddArg2(x, y) + v.AddArg3(v0, v1, v2) return true } } @@ -1913,14 +1844,11 @@ func rewriteValueS390X_OpLeq32U(v *Value) bool { v.Aux = s390x.LessOrEqual v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) v0.AuxInt = 0 - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) v1.AuxInt = 1 - v.AddArg(v1) v2 := b.NewValue0(v.Pos, OpS390XCMPWU, types.TypeFlags) - v2.AddArg(x) - v2.AddArg(y) - v.AddArg(v2) + v2.AddArg2(x, y) + v.AddArg3(v0, v1, v2) return true } } @@ -1938,14 +1866,11 @@ func rewriteValueS390X_OpLeq64(v *Value) bool { v.Aux = s390x.LessOrEqual v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) v0.AuxInt = 0 - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) v1.AuxInt = 1 - v.AddArg(v1) v2 := b.NewValue0(v.Pos, OpS390XCMP, types.TypeFlags) - v2.AddArg(x) - v2.AddArg(y) - v.AddArg(v2) + v2.AddArg2(x, y) + v.AddArg3(v0, v1, v2) return true } } @@ -1963,14 +1888,11 @@ func rewriteValueS390X_OpLeq64F(v *Value) bool { v.Aux = s390x.LessOrEqual v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) v0.AuxInt = 0 - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) v1.AuxInt = 1 - v.AddArg(v1) v2 := b.NewValue0(v.Pos, OpS390XFCMP, types.TypeFlags) - v2.AddArg(x) - v2.AddArg(y) - v.AddArg(v2) + v2.AddArg2(x, y) + v.AddArg3(v0, v1, v2) return true } } @@ -1988,14 +1910,11 @@ func rewriteValueS390X_OpLeq64U(v *Value) bool { v.Aux = s390x.LessOrEqual v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) v0.AuxInt = 0 - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) v1.AuxInt = 1 - v.AddArg(v1) v2 := b.NewValue0(v.Pos, OpS390XCMPU, types.TypeFlags) - v2.AddArg(x) - v2.AddArg(y) - v.AddArg(v2) + v2.AddArg2(x, y) + v.AddArg3(v0, v1, v2) return true } } @@ -2013,18 +1932,15 @@ func rewriteValueS390X_OpLeq8(v *Value) bool { v.Aux = s390x.LessOrEqual v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) v0.AuxInt = 0 - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) v1.AuxInt = 1 - v.AddArg(v1) v2 := b.NewValue0(v.Pos, OpS390XCMPW, types.TypeFlags) v3 := b.NewValue0(v.Pos, OpS390XMOVBreg, typ.Int64) v3.AddArg(x) - v2.AddArg(v3) v4 := b.NewValue0(v.Pos, OpS390XMOVBreg, typ.Int64) v4.AddArg(y) - v2.AddArg(v4) - v.AddArg(v2) + v2.AddArg2(v3, v4) + v.AddArg3(v0, v1, v2) return true } } @@ -2042,18 +1958,15 @@ func rewriteValueS390X_OpLeq8U(v *Value) bool { v.Aux = s390x.LessOrEqual v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) v0.AuxInt = 0 - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) v1.AuxInt = 1 - v.AddArg(v1) v2 := b.NewValue0(v.Pos, OpS390XCMPWU, types.TypeFlags) v3 := b.NewValue0(v.Pos, OpS390XMOVBZreg, typ.UInt64) v3.AddArg(x) - v2.AddArg(v3) v4 := b.NewValue0(v.Pos, OpS390XMOVBZreg, typ.UInt64) v4.AddArg(y) - v2.AddArg(v4) - v.AddArg(v2) + v2.AddArg2(v3, v4) + v.AddArg3(v0, v1, v2) return true } } @@ -2071,18 +1984,15 @@ func rewriteValueS390X_OpLess16(v *Value) bool { v.Aux = s390x.Less v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) v0.AuxInt = 0 - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) v1.AuxInt = 1 - v.AddArg(v1) v2 := b.NewValue0(v.Pos, OpS390XCMPW, types.TypeFlags) v3 := b.NewValue0(v.Pos, OpS390XMOVHreg, typ.Int64) v3.AddArg(x) - v2.AddArg(v3) v4 := b.NewValue0(v.Pos, OpS390XMOVHreg, typ.Int64) v4.AddArg(y) - v2.AddArg(v4) - v.AddArg(v2) + v2.AddArg2(v3, v4) + v.AddArg3(v0, v1, v2) return true } } @@ -2100,18 +2010,15 @@ func rewriteValueS390X_OpLess16U(v *Value) bool { v.Aux = s390x.Less v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) v0.AuxInt = 0 - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) v1.AuxInt = 1 - v.AddArg(v1) v2 := b.NewValue0(v.Pos, OpS390XCMPWU, types.TypeFlags) v3 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64) v3.AddArg(x) - v2.AddArg(v3) v4 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64) v4.AddArg(y) - v2.AddArg(v4) - v.AddArg(v2) + v2.AddArg2(v3, v4) + v.AddArg3(v0, v1, v2) return true } } @@ -2129,14 +2036,11 @@ func rewriteValueS390X_OpLess32(v *Value) bool { v.Aux = s390x.Less v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) v0.AuxInt = 0 - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) v1.AuxInt = 1 - v.AddArg(v1) v2 := b.NewValue0(v.Pos, OpS390XCMPW, types.TypeFlags) - v2.AddArg(x) - v2.AddArg(y) - v.AddArg(v2) + v2.AddArg2(x, y) + v.AddArg3(v0, v1, v2) return true } } @@ -2154,14 +2058,11 @@ func rewriteValueS390X_OpLess32F(v *Value) bool { v.Aux = s390x.Less v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) v0.AuxInt = 0 - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) v1.AuxInt = 1 - v.AddArg(v1) v2 := b.NewValue0(v.Pos, OpS390XFCMPS, types.TypeFlags) - v2.AddArg(x) - v2.AddArg(y) - v.AddArg(v2) + v2.AddArg2(x, y) + v.AddArg3(v0, v1, v2) return true } } @@ -2179,14 +2080,11 @@ func rewriteValueS390X_OpLess32U(v *Value) bool { v.Aux = s390x.Less v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) v0.AuxInt = 0 - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) v1.AuxInt = 1 - v.AddArg(v1) v2 := b.NewValue0(v.Pos, OpS390XCMPWU, types.TypeFlags) - v2.AddArg(x) - v2.AddArg(y) - v.AddArg(v2) + v2.AddArg2(x, y) + v.AddArg3(v0, v1, v2) return true } } @@ -2204,14 +2102,11 @@ func rewriteValueS390X_OpLess64(v *Value) bool { v.Aux = s390x.Less v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) v0.AuxInt = 0 - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) v1.AuxInt = 1 - v.AddArg(v1) v2 := b.NewValue0(v.Pos, OpS390XCMP, types.TypeFlags) - v2.AddArg(x) - v2.AddArg(y) - v.AddArg(v2) + v2.AddArg2(x, y) + v.AddArg3(v0, v1, v2) return true } } @@ -2229,14 +2124,11 @@ func rewriteValueS390X_OpLess64F(v *Value) bool { v.Aux = s390x.Less v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) v0.AuxInt = 0 - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) v1.AuxInt = 1 - v.AddArg(v1) v2 := b.NewValue0(v.Pos, OpS390XFCMP, types.TypeFlags) - v2.AddArg(x) - v2.AddArg(y) - v.AddArg(v2) + v2.AddArg2(x, y) + v.AddArg3(v0, v1, v2) return true } } @@ -2254,14 +2146,11 @@ func rewriteValueS390X_OpLess64U(v *Value) bool { v.Aux = s390x.Less v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) v0.AuxInt = 0 - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) v1.AuxInt = 1 - v.AddArg(v1) v2 := b.NewValue0(v.Pos, OpS390XCMPU, types.TypeFlags) - v2.AddArg(x) - v2.AddArg(y) - v.AddArg(v2) + v2.AddArg2(x, y) + v.AddArg3(v0, v1, v2) return true } } @@ -2279,18 +2168,15 @@ func rewriteValueS390X_OpLess8(v *Value) bool { v.Aux = s390x.Less v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) v0.AuxInt = 0 - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) v1.AuxInt = 1 - v.AddArg(v1) v2 := b.NewValue0(v.Pos, OpS390XCMPW, types.TypeFlags) v3 := b.NewValue0(v.Pos, OpS390XMOVBreg, typ.Int64) v3.AddArg(x) - v2.AddArg(v3) v4 := b.NewValue0(v.Pos, OpS390XMOVBreg, typ.Int64) v4.AddArg(y) - v2.AddArg(v4) - v.AddArg(v2) + v2.AddArg2(v3, v4) + v.AddArg3(v0, v1, v2) return true } } @@ -2308,18 +2194,15 @@ func rewriteValueS390X_OpLess8U(v *Value) bool { v.Aux = s390x.Less v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) v0.AuxInt = 0 - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) v1.AuxInt = 1 - v.AddArg(v1) v2 := b.NewValue0(v.Pos, OpS390XCMPWU, types.TypeFlags) v3 := b.NewValue0(v.Pos, OpS390XMOVBZreg, typ.UInt64) v3.AddArg(x) - v2.AddArg(v3) v4 := b.NewValue0(v.Pos, OpS390XMOVBZreg, typ.UInt64) v4.AddArg(y) - v2.AddArg(v4) - v.AddArg(v2) + v2.AddArg2(v3, v4) + v.AddArg3(v0, v1, v2) return true } } @@ -2337,8 +2220,7 @@ func rewriteValueS390X_OpLoad(v *Value) bool { break } v.reset(OpS390XMOVDload) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (Load ptr mem) @@ -2352,8 +2234,7 @@ func rewriteValueS390X_OpLoad(v *Value) bool { break } v.reset(OpS390XMOVWload) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (Load ptr mem) @@ -2367,8 +2248,7 @@ func rewriteValueS390X_OpLoad(v *Value) bool { break } v.reset(OpS390XMOVWZload) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (Load ptr mem) @@ -2382,8 +2262,7 @@ func rewriteValueS390X_OpLoad(v *Value) bool { break } v.reset(OpS390XMOVHload) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (Load ptr mem) @@ -2397,8 +2276,7 @@ func rewriteValueS390X_OpLoad(v *Value) bool { break } v.reset(OpS390XMOVHZload) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (Load ptr mem) @@ -2412,8 +2290,7 @@ func rewriteValueS390X_OpLoad(v *Value) bool { break } v.reset(OpS390XMOVBload) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (Load ptr mem) @@ -2427,8 +2304,7 @@ func rewriteValueS390X_OpLoad(v *Value) bool { break } v.reset(OpS390XMOVBZload) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (Load ptr mem) @@ -2442,8 +2318,7 @@ func rewriteValueS390X_OpLoad(v *Value) bool { break } v.reset(OpS390XFMOVSload) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (Load ptr mem) @@ -2457,8 +2332,7 @@ func rewriteValueS390X_OpLoad(v *Value) bool { break } v.reset(OpS390XFMOVDload) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } return false @@ -2491,8 +2365,7 @@ func rewriteValueS390X_OpLsh16x16(v *Value) bool { break } v.reset(OpS390XSLW) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (Lsh16x16 x y) @@ -2505,18 +2378,15 @@ func rewriteValueS390X_OpLsh16x16(v *Value) bool { v.Type = t v.Aux = s390x.GreaterOrEqual v0 := b.NewValue0(v.Pos, OpS390XSLW, t) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) + v0.AddArg2(x, y) v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) v1.AuxInt = 0 - v.AddArg(v1) v2 := b.NewValue0(v.Pos, OpS390XCMPWUconst, types.TypeFlags) v2.AuxInt = 64 v3 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64) v3.AddArg(y) v2.AddArg(v3) - v.AddArg(v2) + v.AddArg3(v0, v1, v2) return true } } @@ -2535,8 +2405,7 @@ func rewriteValueS390X_OpLsh16x32(v *Value) bool { break } v.reset(OpS390XSLW) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (Lsh16x32 x y) @@ -2549,16 +2418,13 @@ func rewriteValueS390X_OpLsh16x32(v *Value) bool { v.Type = t v.Aux = s390x.GreaterOrEqual v0 := b.NewValue0(v.Pos, OpS390XSLW, t) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) + v0.AddArg2(x, y) v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) v1.AuxInt = 0 - v.AddArg(v1) v2 := b.NewValue0(v.Pos, OpS390XCMPWUconst, types.TypeFlags) v2.AuxInt = 64 v2.AddArg(y) - v.AddArg(v2) + v.AddArg3(v0, v1, v2) return true } } @@ -2577,8 +2443,7 @@ func rewriteValueS390X_OpLsh16x64(v *Value) bool { break } v.reset(OpS390XSLW) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (Lsh16x64 x y) @@ -2591,16 +2456,13 @@ func rewriteValueS390X_OpLsh16x64(v *Value) bool { v.Type = t v.Aux = s390x.GreaterOrEqual v0 := b.NewValue0(v.Pos, OpS390XSLW, t) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) + v0.AddArg2(x, y) v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) v1.AuxInt = 0 - v.AddArg(v1) v2 := b.NewValue0(v.Pos, OpS390XCMPUconst, types.TypeFlags) v2.AuxInt = 64 v2.AddArg(y) - v.AddArg(v2) + v.AddArg3(v0, v1, v2) return true } } @@ -2619,8 +2481,7 @@ func rewriteValueS390X_OpLsh16x8(v *Value) bool { break } v.reset(OpS390XSLW) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (Lsh16x8 x y) @@ -2633,18 +2494,15 @@ func rewriteValueS390X_OpLsh16x8(v *Value) bool { v.Type = t v.Aux = s390x.GreaterOrEqual v0 := b.NewValue0(v.Pos, OpS390XSLW, t) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) + v0.AddArg2(x, y) v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) v1.AuxInt = 0 - v.AddArg(v1) v2 := b.NewValue0(v.Pos, OpS390XCMPWUconst, types.TypeFlags) v2.AuxInt = 64 v3 := b.NewValue0(v.Pos, OpS390XMOVBZreg, typ.UInt64) v3.AddArg(y) v2.AddArg(v3) - v.AddArg(v2) + v.AddArg3(v0, v1, v2) return true } } @@ -2663,8 +2521,7 @@ func rewriteValueS390X_OpLsh32x16(v *Value) bool { break } v.reset(OpS390XSLW) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (Lsh32x16 x y) @@ -2677,18 +2534,15 @@ func rewriteValueS390X_OpLsh32x16(v *Value) bool { v.Type = t v.Aux = s390x.GreaterOrEqual v0 := b.NewValue0(v.Pos, OpS390XSLW, t) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) + v0.AddArg2(x, y) v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) v1.AuxInt = 0 - v.AddArg(v1) v2 := b.NewValue0(v.Pos, OpS390XCMPWUconst, types.TypeFlags) v2.AuxInt = 64 v3 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64) v3.AddArg(y) v2.AddArg(v3) - v.AddArg(v2) + v.AddArg3(v0, v1, v2) return true } } @@ -2707,8 +2561,7 @@ func rewriteValueS390X_OpLsh32x32(v *Value) bool { break } v.reset(OpS390XSLW) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (Lsh32x32 x y) @@ -2721,16 +2574,13 @@ func rewriteValueS390X_OpLsh32x32(v *Value) bool { v.Type = t v.Aux = s390x.GreaterOrEqual v0 := b.NewValue0(v.Pos, OpS390XSLW, t) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) + v0.AddArg2(x, y) v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) v1.AuxInt = 0 - v.AddArg(v1) v2 := b.NewValue0(v.Pos, OpS390XCMPWUconst, types.TypeFlags) v2.AuxInt = 64 v2.AddArg(y) - v.AddArg(v2) + v.AddArg3(v0, v1, v2) return true } } @@ -2749,8 +2599,7 @@ func rewriteValueS390X_OpLsh32x64(v *Value) bool { break } v.reset(OpS390XSLW) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (Lsh32x64 x y) @@ -2763,16 +2612,13 @@ func rewriteValueS390X_OpLsh32x64(v *Value) bool { v.Type = t v.Aux = s390x.GreaterOrEqual v0 := b.NewValue0(v.Pos, OpS390XSLW, t) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) + v0.AddArg2(x, y) v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) v1.AuxInt = 0 - v.AddArg(v1) v2 := b.NewValue0(v.Pos, OpS390XCMPUconst, types.TypeFlags) v2.AuxInt = 64 v2.AddArg(y) - v.AddArg(v2) + v.AddArg3(v0, v1, v2) return true } } @@ -2791,8 +2637,7 @@ func rewriteValueS390X_OpLsh32x8(v *Value) bool { break } v.reset(OpS390XSLW) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (Lsh32x8 x y) @@ -2805,18 +2650,15 @@ func rewriteValueS390X_OpLsh32x8(v *Value) bool { v.Type = t v.Aux = s390x.GreaterOrEqual v0 := b.NewValue0(v.Pos, OpS390XSLW, t) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) + v0.AddArg2(x, y) v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) v1.AuxInt = 0 - v.AddArg(v1) v2 := b.NewValue0(v.Pos, OpS390XCMPWUconst, types.TypeFlags) v2.AuxInt = 64 v3 := b.NewValue0(v.Pos, OpS390XMOVBZreg, typ.UInt64) v3.AddArg(y) v2.AddArg(v3) - v.AddArg(v2) + v.AddArg3(v0, v1, v2) return true } } @@ -2835,8 +2677,7 @@ func rewriteValueS390X_OpLsh64x16(v *Value) bool { break } v.reset(OpS390XSLD) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (Lsh64x16 x y) @@ -2849,18 +2690,15 @@ func rewriteValueS390X_OpLsh64x16(v *Value) bool { v.Type = t v.Aux = s390x.GreaterOrEqual v0 := b.NewValue0(v.Pos, OpS390XSLD, t) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) + v0.AddArg2(x, y) v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) v1.AuxInt = 0 - v.AddArg(v1) v2 := b.NewValue0(v.Pos, OpS390XCMPWUconst, types.TypeFlags) v2.AuxInt = 64 v3 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64) v3.AddArg(y) v2.AddArg(v3) - v.AddArg(v2) + v.AddArg3(v0, v1, v2) return true } } @@ -2879,8 +2717,7 @@ func rewriteValueS390X_OpLsh64x32(v *Value) bool { break } v.reset(OpS390XSLD) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (Lsh64x32 x y) @@ -2893,16 +2730,13 @@ func rewriteValueS390X_OpLsh64x32(v *Value) bool { v.Type = t v.Aux = s390x.GreaterOrEqual v0 := b.NewValue0(v.Pos, OpS390XSLD, t) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) + v0.AddArg2(x, y) v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) v1.AuxInt = 0 - v.AddArg(v1) v2 := b.NewValue0(v.Pos, OpS390XCMPWUconst, types.TypeFlags) v2.AuxInt = 64 v2.AddArg(y) - v.AddArg(v2) + v.AddArg3(v0, v1, v2) return true } } @@ -2921,8 +2755,7 @@ func rewriteValueS390X_OpLsh64x64(v *Value) bool { break } v.reset(OpS390XSLD) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (Lsh64x64 x y) @@ -2935,16 +2768,13 @@ func rewriteValueS390X_OpLsh64x64(v *Value) bool { v.Type = t v.Aux = s390x.GreaterOrEqual v0 := b.NewValue0(v.Pos, OpS390XSLD, t) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) + v0.AddArg2(x, y) v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) v1.AuxInt = 0 - v.AddArg(v1) v2 := b.NewValue0(v.Pos, OpS390XCMPUconst, types.TypeFlags) v2.AuxInt = 64 v2.AddArg(y) - v.AddArg(v2) + v.AddArg3(v0, v1, v2) return true } } @@ -2963,8 +2793,7 @@ func rewriteValueS390X_OpLsh64x8(v *Value) bool { break } v.reset(OpS390XSLD) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (Lsh64x8 x y) @@ -2977,18 +2806,15 @@ func rewriteValueS390X_OpLsh64x8(v *Value) bool { v.Type = t v.Aux = s390x.GreaterOrEqual v0 := b.NewValue0(v.Pos, OpS390XSLD, t) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) + v0.AddArg2(x, y) v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) v1.AuxInt = 0 - v.AddArg(v1) v2 := b.NewValue0(v.Pos, OpS390XCMPWUconst, types.TypeFlags) v2.AuxInt = 64 v3 := b.NewValue0(v.Pos, OpS390XMOVBZreg, typ.UInt64) v3.AddArg(y) v2.AddArg(v3) - v.AddArg(v2) + v.AddArg3(v0, v1, v2) return true } } @@ -3007,8 +2833,7 @@ func rewriteValueS390X_OpLsh8x16(v *Value) bool { break } v.reset(OpS390XSLW) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (Lsh8x16 x y) @@ -3021,18 +2846,15 @@ func rewriteValueS390X_OpLsh8x16(v *Value) bool { v.Type = t v.Aux = s390x.GreaterOrEqual v0 := b.NewValue0(v.Pos, OpS390XSLW, t) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) + v0.AddArg2(x, y) v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) v1.AuxInt = 0 - v.AddArg(v1) v2 := b.NewValue0(v.Pos, OpS390XCMPWUconst, types.TypeFlags) v2.AuxInt = 64 v3 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64) v3.AddArg(y) v2.AddArg(v3) - v.AddArg(v2) + v.AddArg3(v0, v1, v2) return true } } @@ -3051,8 +2873,7 @@ func rewriteValueS390X_OpLsh8x32(v *Value) bool { break } v.reset(OpS390XSLW) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (Lsh8x32 x y) @@ -3065,16 +2886,13 @@ func rewriteValueS390X_OpLsh8x32(v *Value) bool { v.Type = t v.Aux = s390x.GreaterOrEqual v0 := b.NewValue0(v.Pos, OpS390XSLW, t) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) + v0.AddArg2(x, y) v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) v1.AuxInt = 0 - v.AddArg(v1) v2 := b.NewValue0(v.Pos, OpS390XCMPWUconst, types.TypeFlags) v2.AuxInt = 64 v2.AddArg(y) - v.AddArg(v2) + v.AddArg3(v0, v1, v2) return true } } @@ -3093,8 +2911,7 @@ func rewriteValueS390X_OpLsh8x64(v *Value) bool { break } v.reset(OpS390XSLW) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (Lsh8x64 x y) @@ -3107,16 +2924,13 @@ func rewriteValueS390X_OpLsh8x64(v *Value) bool { v.Type = t v.Aux = s390x.GreaterOrEqual v0 := b.NewValue0(v.Pos, OpS390XSLW, t) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) + v0.AddArg2(x, y) v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) v1.AuxInt = 0 - v.AddArg(v1) v2 := b.NewValue0(v.Pos, OpS390XCMPUconst, types.TypeFlags) v2.AuxInt = 64 v2.AddArg(y) - v.AddArg(v2) + v.AddArg3(v0, v1, v2) return true } } @@ -3135,8 +2949,7 @@ func rewriteValueS390X_OpLsh8x8(v *Value) bool { break } v.reset(OpS390XSLW) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (Lsh8x8 x y) @@ -3149,18 +2962,15 @@ func rewriteValueS390X_OpLsh8x8(v *Value) bool { v.Type = t v.Aux = s390x.GreaterOrEqual v0 := b.NewValue0(v.Pos, OpS390XSLW, t) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) + v0.AddArg2(x, y) v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) v1.AuxInt = 0 - v.AddArg(v1) v2 := b.NewValue0(v.Pos, OpS390XCMPWUconst, types.TypeFlags) v2.AuxInt = 64 v3 := b.NewValue0(v.Pos, OpS390XMOVBZreg, typ.UInt64) v3.AddArg(y) v2.AddArg(v3) - v.AddArg(v2) + v.AddArg3(v0, v1, v2) return true } } @@ -3177,10 +2987,9 @@ func rewriteValueS390X_OpMod16(v *Value) bool { v.reset(OpS390XMODW) v0 := b.NewValue0(v.Pos, OpS390XMOVHreg, typ.Int64) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpS390XMOVHreg, typ.Int64) v1.AddArg(y) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } } @@ -3197,10 +3006,9 @@ func rewriteValueS390X_OpMod16u(v *Value) bool { v.reset(OpS390XMODWU) v0 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64) v1.AddArg(y) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } } @@ -3217,8 +3025,7 @@ func rewriteValueS390X_OpMod32(v *Value) bool { v.reset(OpS390XMODW) v0 := b.NewValue0(v.Pos, OpS390XMOVWreg, typ.Int64) v0.AddArg(x) - v.AddArg(v0) - v.AddArg(y) + v.AddArg2(v0, y) return true } } @@ -3235,8 +3042,7 @@ func rewriteValueS390X_OpMod32u(v *Value) bool { v.reset(OpS390XMODWU) v0 := b.NewValue0(v.Pos, OpS390XMOVWZreg, typ.UInt64) v0.AddArg(x) - v.AddArg(v0) - v.AddArg(y) + v.AddArg2(v0, y) return true } } @@ -3253,10 +3059,9 @@ func rewriteValueS390X_OpMod8(v *Value) bool { v.reset(OpS390XMODW) v0 := b.NewValue0(v.Pos, OpS390XMOVBreg, typ.Int64) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpS390XMOVBreg, typ.Int64) v1.AddArg(y) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } } @@ -3273,10 +3078,9 @@ func rewriteValueS390X_OpMod8u(v *Value) bool { v.reset(OpS390XMODWU) v0 := b.NewValue0(v.Pos, OpS390XMOVBZreg, typ.UInt64) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpS390XMOVBZreg, typ.UInt64) v1.AddArg(y) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } } @@ -3293,9 +3097,7 @@ func rewriteValueS390X_OpMove(v *Value) bool { break } mem := v_2 - v.reset(OpCopy) - v.Type = mem.Type - v.AddArg(mem) + v.copyOf(mem) return true } // match: (Move [1] dst src mem) @@ -3308,12 +3110,9 @@ func rewriteValueS390X_OpMove(v *Value) bool { src := v_1 mem := v_2 v.reset(OpS390XMOVBstore) - v.AddArg(dst) v0 := b.NewValue0(v.Pos, OpS390XMOVBZload, typ.UInt8) - v0.AddArg(src) - v0.AddArg(mem) - v.AddArg(v0) - v.AddArg(mem) + v0.AddArg2(src, mem) + v.AddArg3(dst, v0, mem) return true } // match: (Move [2] dst src mem) @@ -3326,12 +3125,9 @@ func rewriteValueS390X_OpMove(v *Value) bool { src := v_1 mem := v_2 v.reset(OpS390XMOVHstore) - v.AddArg(dst) v0 := b.NewValue0(v.Pos, OpS390XMOVHZload, typ.UInt16) - v0.AddArg(src) - v0.AddArg(mem) - v.AddArg(v0) - v.AddArg(mem) + v0.AddArg2(src, mem) + v.AddArg3(dst, v0, mem) return true } // match: (Move [4] dst src mem) @@ -3344,12 +3140,9 @@ func rewriteValueS390X_OpMove(v *Value) bool { src := v_1 mem := v_2 v.reset(OpS390XMOVWstore) - v.AddArg(dst) v0 := b.NewValue0(v.Pos, OpS390XMOVWZload, typ.UInt32) - v0.AddArg(src) - v0.AddArg(mem) - v.AddArg(v0) - v.AddArg(mem) + v0.AddArg2(src, mem) + v.AddArg3(dst, v0, mem) return true } // match: (Move [8] dst src mem) @@ -3362,12 +3155,9 @@ func rewriteValueS390X_OpMove(v *Value) bool { src := v_1 mem := v_2 v.reset(OpS390XMOVDstore) - v.AddArg(dst) v0 := b.NewValue0(v.Pos, OpS390XMOVDload, typ.UInt64) - v0.AddArg(src) - v0.AddArg(mem) - v.AddArg(v0) - v.AddArg(mem) + v0.AddArg2(src, mem) + v.AddArg3(dst, v0, mem) return true } // match: (Move [16] dst src mem) @@ -3381,20 +3171,14 @@ func rewriteValueS390X_OpMove(v *Value) bool { mem := v_2 v.reset(OpS390XMOVDstore) v.AuxInt = 8 - v.AddArg(dst) v0 := b.NewValue0(v.Pos, OpS390XMOVDload, typ.UInt64) v0.AuxInt = 8 - v0.AddArg(src) - v0.AddArg(mem) - v.AddArg(v0) + v0.AddArg2(src, mem) v1 := b.NewValue0(v.Pos, OpS390XMOVDstore, types.TypeMem) - v1.AddArg(dst) v2 := b.NewValue0(v.Pos, OpS390XMOVDload, typ.UInt64) - v2.AddArg(src) - v2.AddArg(mem) - v1.AddArg(v2) - v1.AddArg(mem) - v.AddArg(v1) + v2.AddArg2(src, mem) + v1.AddArg3(dst, v2, mem) + v.AddArg3(dst, v0, v1) return true } // match: (Move [24] dst src mem) @@ -3408,29 +3192,20 @@ func rewriteValueS390X_OpMove(v *Value) bool { mem := v_2 v.reset(OpS390XMOVDstore) v.AuxInt = 16 - v.AddArg(dst) v0 := b.NewValue0(v.Pos, OpS390XMOVDload, typ.UInt64) v0.AuxInt = 16 - v0.AddArg(src) - v0.AddArg(mem) - v.AddArg(v0) + v0.AddArg2(src, mem) v1 := b.NewValue0(v.Pos, OpS390XMOVDstore, types.TypeMem) v1.AuxInt = 8 - v1.AddArg(dst) v2 := b.NewValue0(v.Pos, OpS390XMOVDload, typ.UInt64) v2.AuxInt = 8 - v2.AddArg(src) - v2.AddArg(mem) - v1.AddArg(v2) + v2.AddArg2(src, mem) v3 := b.NewValue0(v.Pos, OpS390XMOVDstore, types.TypeMem) - v3.AddArg(dst) v4 := b.NewValue0(v.Pos, OpS390XMOVDload, typ.UInt64) - v4.AddArg(src) - v4.AddArg(mem) - v3.AddArg(v4) - v3.AddArg(mem) - v1.AddArg(v3) - v.AddArg(v1) + v4.AddArg2(src, mem) + v3.AddArg3(dst, v4, mem) + v1.AddArg3(dst, v2, v3) + v.AddArg3(dst, v0, v1) return true } // match: (Move [3] dst src mem) @@ -3444,20 +3219,14 @@ func rewriteValueS390X_OpMove(v *Value) bool { mem := v_2 v.reset(OpS390XMOVBstore) v.AuxInt = 2 - v.AddArg(dst) v0 := b.NewValue0(v.Pos, OpS390XMOVBZload, typ.UInt8) v0.AuxInt = 2 - v0.AddArg(src) - v0.AddArg(mem) - v.AddArg(v0) + v0.AddArg2(src, mem) v1 := b.NewValue0(v.Pos, OpS390XMOVHstore, types.TypeMem) - v1.AddArg(dst) v2 := b.NewValue0(v.Pos, OpS390XMOVHZload, typ.UInt16) - v2.AddArg(src) - v2.AddArg(mem) - v1.AddArg(v2) - v1.AddArg(mem) - v.AddArg(v1) + v2.AddArg2(src, mem) + v1.AddArg3(dst, v2, mem) + v.AddArg3(dst, v0, v1) return true } // match: (Move [5] dst src mem) @@ -3471,20 +3240,14 @@ func rewriteValueS390X_OpMove(v *Value) bool { mem := v_2 v.reset(OpS390XMOVBstore) v.AuxInt = 4 - v.AddArg(dst) v0 := b.NewValue0(v.Pos, OpS390XMOVBZload, typ.UInt8) v0.AuxInt = 4 - v0.AddArg(src) - v0.AddArg(mem) - v.AddArg(v0) + v0.AddArg2(src, mem) v1 := b.NewValue0(v.Pos, OpS390XMOVWstore, types.TypeMem) - v1.AddArg(dst) v2 := b.NewValue0(v.Pos, OpS390XMOVWZload, typ.UInt32) - v2.AddArg(src) - v2.AddArg(mem) - v1.AddArg(v2) - v1.AddArg(mem) - v.AddArg(v1) + v2.AddArg2(src, mem) + v1.AddArg3(dst, v2, mem) + v.AddArg3(dst, v0, v1) return true } // match: (Move [6] dst src mem) @@ -3498,20 +3261,14 @@ func rewriteValueS390X_OpMove(v *Value) bool { mem := v_2 v.reset(OpS390XMOVHstore) v.AuxInt = 4 - v.AddArg(dst) v0 := b.NewValue0(v.Pos, OpS390XMOVHZload, typ.UInt16) v0.AuxInt = 4 - v0.AddArg(src) - v0.AddArg(mem) - v.AddArg(v0) + v0.AddArg2(src, mem) v1 := b.NewValue0(v.Pos, OpS390XMOVWstore, types.TypeMem) - v1.AddArg(dst) v2 := b.NewValue0(v.Pos, OpS390XMOVWZload, typ.UInt32) - v2.AddArg(src) - v2.AddArg(mem) - v1.AddArg(v2) - v1.AddArg(mem) - v.AddArg(v1) + v2.AddArg2(src, mem) + v1.AddArg3(dst, v2, mem) + v.AddArg3(dst, v0, v1) return true } // match: (Move [7] dst src mem) @@ -3525,29 +3282,20 @@ func rewriteValueS390X_OpMove(v *Value) bool { mem := v_2 v.reset(OpS390XMOVBstore) v.AuxInt = 6 - v.AddArg(dst) v0 := b.NewValue0(v.Pos, OpS390XMOVBZload, typ.UInt8) v0.AuxInt = 6 - v0.AddArg(src) - v0.AddArg(mem) - v.AddArg(v0) + v0.AddArg2(src, mem) v1 := b.NewValue0(v.Pos, OpS390XMOVHstore, types.TypeMem) v1.AuxInt = 4 - v1.AddArg(dst) v2 := b.NewValue0(v.Pos, OpS390XMOVHZload, typ.UInt16) v2.AuxInt = 4 - v2.AddArg(src) - v2.AddArg(mem) - v1.AddArg(v2) + v2.AddArg2(src, mem) v3 := b.NewValue0(v.Pos, OpS390XMOVWstore, types.TypeMem) - v3.AddArg(dst) v4 := b.NewValue0(v.Pos, OpS390XMOVWZload, typ.UInt32) - v4.AddArg(src) - v4.AddArg(mem) - v3.AddArg(v4) - v3.AddArg(mem) - v1.AddArg(v3) - v.AddArg(v1) + v4.AddArg2(src, mem) + v3.AddArg3(dst, v4, mem) + v1.AddArg3(dst, v2, v3) + v.AddArg3(dst, v0, v1) return true } // match: (Move [s] dst src mem) @@ -3563,9 +3311,7 @@ func rewriteValueS390X_OpMove(v *Value) bool { } v.reset(OpS390XMVC) v.AuxInt = makeValAndOff(s, 0) - v.AddArg(dst) - v.AddArg(src) - v.AddArg(mem) + v.AddArg3(dst, src, mem) return true } // match: (Move [s] dst src mem) @@ -3581,14 +3327,10 @@ func rewriteValueS390X_OpMove(v *Value) bool { } v.reset(OpS390XMVC) v.AuxInt = makeValAndOff(s-256, 256) - v.AddArg(dst) - v.AddArg(src) v0 := b.NewValue0(v.Pos, OpS390XMVC, types.TypeMem) v0.AuxInt = makeValAndOff(256, 0) - v0.AddArg(dst) - v0.AddArg(src) - v0.AddArg(mem) - v.AddArg(v0) + v0.AddArg3(dst, src, mem) + v.AddArg3(dst, src, v0) return true } // match: (Move [s] dst src mem) @@ -3604,19 +3346,13 @@ func rewriteValueS390X_OpMove(v *Value) bool { } v.reset(OpS390XMVC) v.AuxInt = makeValAndOff(s-512, 512) - v.AddArg(dst) - v.AddArg(src) v0 := b.NewValue0(v.Pos, OpS390XMVC, types.TypeMem) v0.AuxInt = makeValAndOff(256, 256) - v0.AddArg(dst) - v0.AddArg(src) v1 := b.NewValue0(v.Pos, OpS390XMVC, types.TypeMem) v1.AuxInt = makeValAndOff(256, 0) - v1.AddArg(dst) - v1.AddArg(src) - v1.AddArg(mem) - v0.AddArg(v1) - v.AddArg(v0) + v1.AddArg3(dst, src, mem) + v0.AddArg3(dst, src, v1) + v.AddArg3(dst, src, v0) return true } // match: (Move [s] dst src mem) @@ -3632,24 +3368,16 @@ func rewriteValueS390X_OpMove(v *Value) bool { } v.reset(OpS390XMVC) v.AuxInt = makeValAndOff(s-768, 768) - v.AddArg(dst) - v.AddArg(src) v0 := b.NewValue0(v.Pos, OpS390XMVC, types.TypeMem) v0.AuxInt = makeValAndOff(256, 512) - v0.AddArg(dst) - v0.AddArg(src) v1 := b.NewValue0(v.Pos, OpS390XMVC, types.TypeMem) v1.AuxInt = makeValAndOff(256, 256) - v1.AddArg(dst) - v1.AddArg(src) v2 := b.NewValue0(v.Pos, OpS390XMVC, types.TypeMem) v2.AuxInt = makeValAndOff(256, 0) - v2.AddArg(dst) - v2.AddArg(src) - v2.AddArg(mem) - v1.AddArg(v2) - v0.AddArg(v1) - v.AddArg(v0) + v2.AddArg3(dst, src, mem) + v1.AddArg3(dst, src, v2) + v0.AddArg3(dst, src, v1) + v.AddArg3(dst, src, v0) return true } // match: (Move [s] dst src mem) @@ -3665,15 +3393,11 @@ func rewriteValueS390X_OpMove(v *Value) bool { } v.reset(OpS390XLoweredMove) v.AuxInt = s % 256 - v.AddArg(dst) - v.AddArg(src) v0 := b.NewValue0(v.Pos, OpS390XADD, src.Type) - v0.AddArg(src) v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) v1.AuxInt = (s / 256) * 256 - v0.AddArg(v1) - v.AddArg(v0) - v.AddArg(mem) + v0.AddArg2(src, v1) + v.AddArg4(dst, src, v0, mem) return true } return false @@ -3692,18 +3416,15 @@ func rewriteValueS390X_OpNeq16(v *Value) bool { v.Aux = s390x.NotEqual v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) v0.AuxInt = 0 - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) v1.AuxInt = 1 - v.AddArg(v1) v2 := b.NewValue0(v.Pos, OpS390XCMPW, types.TypeFlags) v3 := b.NewValue0(v.Pos, OpS390XMOVHreg, typ.Int64) v3.AddArg(x) - v2.AddArg(v3) v4 := b.NewValue0(v.Pos, OpS390XMOVHreg, typ.Int64) v4.AddArg(y) - v2.AddArg(v4) - v.AddArg(v2) + v2.AddArg2(v3, v4) + v.AddArg3(v0, v1, v2) return true } } @@ -3721,14 +3442,11 @@ func rewriteValueS390X_OpNeq32(v *Value) bool { v.Aux = s390x.NotEqual v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) v0.AuxInt = 0 - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) v1.AuxInt = 1 - v.AddArg(v1) v2 := b.NewValue0(v.Pos, OpS390XCMPW, types.TypeFlags) - v2.AddArg(x) - v2.AddArg(y) - v.AddArg(v2) + v2.AddArg2(x, y) + v.AddArg3(v0, v1, v2) return true } } @@ -3746,14 +3464,11 @@ func rewriteValueS390X_OpNeq32F(v *Value) bool { v.Aux = s390x.NotEqual v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) v0.AuxInt = 0 - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) v1.AuxInt = 1 - v.AddArg(v1) v2 := b.NewValue0(v.Pos, OpS390XFCMPS, types.TypeFlags) - v2.AddArg(x) - v2.AddArg(y) - v.AddArg(v2) + v2.AddArg2(x, y) + v.AddArg3(v0, v1, v2) return true } } @@ -3771,14 +3486,11 @@ func rewriteValueS390X_OpNeq64(v *Value) bool { v.Aux = s390x.NotEqual v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) v0.AuxInt = 0 - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) v1.AuxInt = 1 - v.AddArg(v1) v2 := b.NewValue0(v.Pos, OpS390XCMP, types.TypeFlags) - v2.AddArg(x) - v2.AddArg(y) - v.AddArg(v2) + v2.AddArg2(x, y) + v.AddArg3(v0, v1, v2) return true } } @@ -3796,14 +3508,11 @@ func rewriteValueS390X_OpNeq64F(v *Value) bool { v.Aux = s390x.NotEqual v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) v0.AuxInt = 0 - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) v1.AuxInt = 1 - v.AddArg(v1) v2 := b.NewValue0(v.Pos, OpS390XFCMP, types.TypeFlags) - v2.AddArg(x) - v2.AddArg(y) - v.AddArg(v2) + v2.AddArg2(x, y) + v.AddArg3(v0, v1, v2) return true } } @@ -3821,18 +3530,15 @@ func rewriteValueS390X_OpNeq8(v *Value) bool { v.Aux = s390x.NotEqual v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) v0.AuxInt = 0 - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) v1.AuxInt = 1 - v.AddArg(v1) v2 := b.NewValue0(v.Pos, OpS390XCMPW, types.TypeFlags) v3 := b.NewValue0(v.Pos, OpS390XMOVBreg, typ.Int64) v3.AddArg(x) - v2.AddArg(v3) v4 := b.NewValue0(v.Pos, OpS390XMOVBreg, typ.Int64) v4.AddArg(y) - v2.AddArg(v4) - v.AddArg(v2) + v2.AddArg2(v3, v4) + v.AddArg3(v0, v1, v2) return true } } @@ -3850,18 +3556,15 @@ func rewriteValueS390X_OpNeqB(v *Value) bool { v.Aux = s390x.NotEqual v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) v0.AuxInt = 0 - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) v1.AuxInt = 1 - v.AddArg(v1) v2 := b.NewValue0(v.Pos, OpS390XCMPW, types.TypeFlags) v3 := b.NewValue0(v.Pos, OpS390XMOVBreg, typ.Int64) v3.AddArg(x) - v2.AddArg(v3) v4 := b.NewValue0(v.Pos, OpS390XMOVBreg, typ.Int64) v4.AddArg(y) - v2.AddArg(v4) - v.AddArg(v2) + v2.AddArg2(v3, v4) + v.AddArg3(v0, v1, v2) return true } } @@ -3879,14 +3582,11 @@ func rewriteValueS390X_OpNeqPtr(v *Value) bool { v.Aux = s390x.NotEqual v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) v0.AuxInt = 0 - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) v1.AuxInt = 1 - v.AddArg(v1) v2 := b.NewValue0(v.Pos, OpS390XCMP, types.TypeFlags) - v2.AddArg(x) - v2.AddArg(y) - v.AddArg(v2) + v2.AddArg2(x, y) + v.AddArg3(v0, v1, v2) return true } } @@ -3941,8 +3641,7 @@ func rewriteValueS390X_OpOffPtr(v *Value) bool { v.reset(OpS390XADD) v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) v0.AuxInt = off - v.AddArg(v0) - v.AddArg(ptr) + v.AddArg2(v0, ptr) return true } } @@ -3963,9 +3662,7 @@ func rewriteValueS390X_OpPanicBounds(v *Value) bool { } v.reset(OpS390XLoweredPanicBoundsA) v.AuxInt = kind - v.AddArg(x) - v.AddArg(y) - v.AddArg(mem) + v.AddArg3(x, y, mem) return true } // match: (PanicBounds [kind] x y mem) @@ -3981,9 +3678,7 @@ func rewriteValueS390X_OpPanicBounds(v *Value) bool { } v.reset(OpS390XLoweredPanicBoundsB) v.AuxInt = kind - v.AddArg(x) - v.AddArg(y) - v.AddArg(mem) + v.AddArg3(x, y, mem) return true } // match: (PanicBounds [kind] x y mem) @@ -3999,9 +3694,7 @@ func rewriteValueS390X_OpPanicBounds(v *Value) bool { } v.reset(OpS390XLoweredPanicBoundsC) v.AuxInt = kind - v.AddArg(x) - v.AddArg(y) - v.AddArg(mem) + v.AddArg3(x, y, mem) return true } return false @@ -4088,17 +3781,14 @@ func rewriteValueS390X_OpRotateLeft16(v *Value) bool { c := v_1.AuxInt v.reset(OpOr16) v0 := b.NewValue0(v.Pos, OpLsh16x64, t) - v0.AddArg(x) v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) v1.AuxInt = c & 15 - v0.AddArg(v1) - v.AddArg(v0) + v0.AddArg2(x, v1) v2 := b.NewValue0(v.Pos, OpRsh16Ux64, t) - v2.AddArg(x) v3 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) v3.AuxInt = -c & 15 - v2.AddArg(v3) - v.AddArg(v2) + v2.AddArg2(x, v3) + v.AddArg2(v0, v2) return true } return false @@ -4119,17 +3809,14 @@ func rewriteValueS390X_OpRotateLeft8(v *Value) bool { c := v_1.AuxInt v.reset(OpOr8) v0 := b.NewValue0(v.Pos, OpLsh8x64, t) - v0.AddArg(x) v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) v1.AuxInt = c & 7 - v0.AddArg(v1) - v.AddArg(v0) + v0.AddArg2(x, v1) v2 := b.NewValue0(v.Pos, OpRsh8Ux64, t) - v2.AddArg(x) v3 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) v3.AuxInt = -c & 7 - v2.AddArg(v3) - v.AddArg(v2) + v2.AddArg2(x, v3) + v.AddArg2(v0, v2) return true } return false @@ -4175,8 +3862,7 @@ func rewriteValueS390X_OpRsh16Ux16(v *Value) bool { v.reset(OpS390XSRW) v0 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64) v0.AddArg(x) - v.AddArg(v0) - v.AddArg(y) + v.AddArg2(v0, y) return true } // match: (Rsh16Ux16 x y) @@ -4191,18 +3877,15 @@ func rewriteValueS390X_OpRsh16Ux16(v *Value) bool { v0 := b.NewValue0(v.Pos, OpS390XSRW, t) v1 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64) v1.AddArg(x) - v0.AddArg(v1) - v0.AddArg(y) - v.AddArg(v0) + v0.AddArg2(v1, y) v2 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) v2.AuxInt = 0 - v.AddArg(v2) v3 := b.NewValue0(v.Pos, OpS390XCMPWUconst, types.TypeFlags) v3.AuxInt = 64 v4 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64) v4.AddArg(y) v3.AddArg(v4) - v.AddArg(v3) + v.AddArg3(v0, v2, v3) return true } } @@ -4223,8 +3906,7 @@ func rewriteValueS390X_OpRsh16Ux32(v *Value) bool { v.reset(OpS390XSRW) v0 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64) v0.AddArg(x) - v.AddArg(v0) - v.AddArg(y) + v.AddArg2(v0, y) return true } // match: (Rsh16Ux32 x y) @@ -4239,16 +3921,13 @@ func rewriteValueS390X_OpRsh16Ux32(v *Value) bool { v0 := b.NewValue0(v.Pos, OpS390XSRW, t) v1 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64) v1.AddArg(x) - v0.AddArg(v1) - v0.AddArg(y) - v.AddArg(v0) + v0.AddArg2(v1, y) v2 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) v2.AuxInt = 0 - v.AddArg(v2) v3 := b.NewValue0(v.Pos, OpS390XCMPWUconst, types.TypeFlags) v3.AuxInt = 64 v3.AddArg(y) - v.AddArg(v3) + v.AddArg3(v0, v2, v3) return true } } @@ -4269,8 +3948,7 @@ func rewriteValueS390X_OpRsh16Ux64(v *Value) bool { v.reset(OpS390XSRW) v0 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64) v0.AddArg(x) - v.AddArg(v0) - v.AddArg(y) + v.AddArg2(v0, y) return true } // match: (Rsh16Ux64 x y) @@ -4285,16 +3963,13 @@ func rewriteValueS390X_OpRsh16Ux64(v *Value) bool { v0 := b.NewValue0(v.Pos, OpS390XSRW, t) v1 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64) v1.AddArg(x) - v0.AddArg(v1) - v0.AddArg(y) - v.AddArg(v0) + v0.AddArg2(v1, y) v2 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) v2.AuxInt = 0 - v.AddArg(v2) v3 := b.NewValue0(v.Pos, OpS390XCMPUconst, types.TypeFlags) v3.AuxInt = 64 v3.AddArg(y) - v.AddArg(v3) + v.AddArg3(v0, v2, v3) return true } } @@ -4315,8 +3990,7 @@ func rewriteValueS390X_OpRsh16Ux8(v *Value) bool { v.reset(OpS390XSRW) v0 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64) v0.AddArg(x) - v.AddArg(v0) - v.AddArg(y) + v.AddArg2(v0, y) return true } // match: (Rsh16Ux8 x y) @@ -4331,18 +4005,15 @@ func rewriteValueS390X_OpRsh16Ux8(v *Value) bool { v0 := b.NewValue0(v.Pos, OpS390XSRW, t) v1 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64) v1.AddArg(x) - v0.AddArg(v1) - v0.AddArg(y) - v.AddArg(v0) + v0.AddArg2(v1, y) v2 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) v2.AuxInt = 0 - v.AddArg(v2) v3 := b.NewValue0(v.Pos, OpS390XCMPWUconst, types.TypeFlags) v3.AuxInt = 64 v4 := b.NewValue0(v.Pos, OpS390XMOVBZreg, typ.UInt64) v4.AddArg(y) v3.AddArg(v4) - v.AddArg(v3) + v.AddArg3(v0, v2, v3) return true } } @@ -4363,8 +4034,7 @@ func rewriteValueS390X_OpRsh16x16(v *Value) bool { v.reset(OpS390XSRAW) v0 := b.NewValue0(v.Pos, OpS390XMOVHreg, typ.Int64) v0.AddArg(x) - v.AddArg(v0) - v.AddArg(y) + v.AddArg2(v0, y) return true } // match: (Rsh16x16 x y) @@ -4375,20 +4045,17 @@ func rewriteValueS390X_OpRsh16x16(v *Value) bool { v.reset(OpS390XSRAW) v0 := b.NewValue0(v.Pos, OpS390XMOVHreg, typ.Int64) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpS390XLOCGR, y.Type) v1.Aux = s390x.GreaterOrEqual - v1.AddArg(y) v2 := b.NewValue0(v.Pos, OpS390XMOVDconst, y.Type) v2.AuxInt = 63 - v1.AddArg(v2) v3 := b.NewValue0(v.Pos, OpS390XCMPWUconst, types.TypeFlags) v3.AuxInt = 64 v4 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64) v4.AddArg(y) v3.AddArg(v4) - v1.AddArg(v3) - v.AddArg(v1) + v1.AddArg3(y, v2, v3) + v.AddArg2(v0, v1) return true } } @@ -4409,8 +4076,7 @@ func rewriteValueS390X_OpRsh16x32(v *Value) bool { v.reset(OpS390XSRAW) v0 := b.NewValue0(v.Pos, OpS390XMOVHreg, typ.Int64) v0.AddArg(x) - v.AddArg(v0) - v.AddArg(y) + v.AddArg2(v0, y) return true } // match: (Rsh16x32 x y) @@ -4421,18 +4087,15 @@ func rewriteValueS390X_OpRsh16x32(v *Value) bool { v.reset(OpS390XSRAW) v0 := b.NewValue0(v.Pos, OpS390XMOVHreg, typ.Int64) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpS390XLOCGR, y.Type) v1.Aux = s390x.GreaterOrEqual - v1.AddArg(y) v2 := b.NewValue0(v.Pos, OpS390XMOVDconst, y.Type) v2.AuxInt = 63 - v1.AddArg(v2) v3 := b.NewValue0(v.Pos, OpS390XCMPWUconst, types.TypeFlags) v3.AuxInt = 64 v3.AddArg(y) - v1.AddArg(v3) - v.AddArg(v1) + v1.AddArg3(y, v2, v3) + v.AddArg2(v0, v1) return true } } @@ -4453,8 +4116,7 @@ func rewriteValueS390X_OpRsh16x64(v *Value) bool { v.reset(OpS390XSRAW) v0 := b.NewValue0(v.Pos, OpS390XMOVHreg, typ.Int64) v0.AddArg(x) - v.AddArg(v0) - v.AddArg(y) + v.AddArg2(v0, y) return true } // match: (Rsh16x64 x y) @@ -4465,18 +4127,15 @@ func rewriteValueS390X_OpRsh16x64(v *Value) bool { v.reset(OpS390XSRAW) v0 := b.NewValue0(v.Pos, OpS390XMOVHreg, typ.Int64) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpS390XLOCGR, y.Type) v1.Aux = s390x.GreaterOrEqual - v1.AddArg(y) v2 := b.NewValue0(v.Pos, OpS390XMOVDconst, y.Type) v2.AuxInt = 63 - v1.AddArg(v2) v3 := b.NewValue0(v.Pos, OpS390XCMPUconst, types.TypeFlags) v3.AuxInt = 64 v3.AddArg(y) - v1.AddArg(v3) - v.AddArg(v1) + v1.AddArg3(y, v2, v3) + v.AddArg2(v0, v1) return true } } @@ -4497,8 +4156,7 @@ func rewriteValueS390X_OpRsh16x8(v *Value) bool { v.reset(OpS390XSRAW) v0 := b.NewValue0(v.Pos, OpS390XMOVHreg, typ.Int64) v0.AddArg(x) - v.AddArg(v0) - v.AddArg(y) + v.AddArg2(v0, y) return true } // match: (Rsh16x8 x y) @@ -4509,20 +4167,17 @@ func rewriteValueS390X_OpRsh16x8(v *Value) bool { v.reset(OpS390XSRAW) v0 := b.NewValue0(v.Pos, OpS390XMOVHreg, typ.Int64) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpS390XLOCGR, y.Type) v1.Aux = s390x.GreaterOrEqual - v1.AddArg(y) v2 := b.NewValue0(v.Pos, OpS390XMOVDconst, y.Type) v2.AuxInt = 63 - v1.AddArg(v2) v3 := b.NewValue0(v.Pos, OpS390XCMPWUconst, types.TypeFlags) v3.AuxInt = 64 v4 := b.NewValue0(v.Pos, OpS390XMOVBZreg, typ.UInt64) v4.AddArg(y) v3.AddArg(v4) - v1.AddArg(v3) - v.AddArg(v1) + v1.AddArg3(y, v2, v3) + v.AddArg2(v0, v1) return true } } @@ -4541,8 +4196,7 @@ func rewriteValueS390X_OpRsh32Ux16(v *Value) bool { break } v.reset(OpS390XSRW) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (Rsh32Ux16 x y) @@ -4555,18 +4209,15 @@ func rewriteValueS390X_OpRsh32Ux16(v *Value) bool { v.Type = t v.Aux = s390x.GreaterOrEqual v0 := b.NewValue0(v.Pos, OpS390XSRW, t) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) + v0.AddArg2(x, y) v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) v1.AuxInt = 0 - v.AddArg(v1) v2 := b.NewValue0(v.Pos, OpS390XCMPWUconst, types.TypeFlags) v2.AuxInt = 64 v3 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64) v3.AddArg(y) v2.AddArg(v3) - v.AddArg(v2) + v.AddArg3(v0, v1, v2) return true } } @@ -4585,8 +4236,7 @@ func rewriteValueS390X_OpRsh32Ux32(v *Value) bool { break } v.reset(OpS390XSRW) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (Rsh32Ux32 x y) @@ -4599,16 +4249,13 @@ func rewriteValueS390X_OpRsh32Ux32(v *Value) bool { v.Type = t v.Aux = s390x.GreaterOrEqual v0 := b.NewValue0(v.Pos, OpS390XSRW, t) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) + v0.AddArg2(x, y) v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) v1.AuxInt = 0 - v.AddArg(v1) v2 := b.NewValue0(v.Pos, OpS390XCMPWUconst, types.TypeFlags) v2.AuxInt = 64 v2.AddArg(y) - v.AddArg(v2) + v.AddArg3(v0, v1, v2) return true } } @@ -4627,8 +4274,7 @@ func rewriteValueS390X_OpRsh32Ux64(v *Value) bool { break } v.reset(OpS390XSRW) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (Rsh32Ux64 x y) @@ -4641,16 +4287,13 @@ func rewriteValueS390X_OpRsh32Ux64(v *Value) bool { v.Type = t v.Aux = s390x.GreaterOrEqual v0 := b.NewValue0(v.Pos, OpS390XSRW, t) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) + v0.AddArg2(x, y) v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) v1.AuxInt = 0 - v.AddArg(v1) v2 := b.NewValue0(v.Pos, OpS390XCMPUconst, types.TypeFlags) v2.AuxInt = 64 v2.AddArg(y) - v.AddArg(v2) + v.AddArg3(v0, v1, v2) return true } } @@ -4669,8 +4312,7 @@ func rewriteValueS390X_OpRsh32Ux8(v *Value) bool { break } v.reset(OpS390XSRW) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (Rsh32Ux8 x y) @@ -4683,18 +4325,15 @@ func rewriteValueS390X_OpRsh32Ux8(v *Value) bool { v.Type = t v.Aux = s390x.GreaterOrEqual v0 := b.NewValue0(v.Pos, OpS390XSRW, t) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) + v0.AddArg2(x, y) v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) v1.AuxInt = 0 - v.AddArg(v1) v2 := b.NewValue0(v.Pos, OpS390XCMPWUconst, types.TypeFlags) v2.AuxInt = 64 v3 := b.NewValue0(v.Pos, OpS390XMOVBZreg, typ.UInt64) v3.AddArg(y) v2.AddArg(v3) - v.AddArg(v2) + v.AddArg3(v0, v1, v2) return true } } @@ -4713,8 +4352,7 @@ func rewriteValueS390X_OpRsh32x16(v *Value) bool { break } v.reset(OpS390XSRAW) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (Rsh32x16 x y) @@ -4723,20 +4361,17 @@ func rewriteValueS390X_OpRsh32x16(v *Value) bool { x := v_0 y := v_1 v.reset(OpS390XSRAW) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpS390XLOCGR, y.Type) v0.Aux = s390x.GreaterOrEqual - v0.AddArg(y) v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, y.Type) v1.AuxInt = 63 - v0.AddArg(v1) v2 := b.NewValue0(v.Pos, OpS390XCMPWUconst, types.TypeFlags) v2.AuxInt = 64 v3 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64) v3.AddArg(y) v2.AddArg(v3) - v0.AddArg(v2) - v.AddArg(v0) + v0.AddArg3(y, v1, v2) + v.AddArg2(x, v0) return true } } @@ -4754,8 +4389,7 @@ func rewriteValueS390X_OpRsh32x32(v *Value) bool { break } v.reset(OpS390XSRAW) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (Rsh32x32 x y) @@ -4764,18 +4398,15 @@ func rewriteValueS390X_OpRsh32x32(v *Value) bool { x := v_0 y := v_1 v.reset(OpS390XSRAW) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpS390XLOCGR, y.Type) v0.Aux = s390x.GreaterOrEqual - v0.AddArg(y) v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, y.Type) v1.AuxInt = 63 - v0.AddArg(v1) v2 := b.NewValue0(v.Pos, OpS390XCMPWUconst, types.TypeFlags) v2.AuxInt = 64 v2.AddArg(y) - v0.AddArg(v2) - v.AddArg(v0) + v0.AddArg3(y, v1, v2) + v.AddArg2(x, v0) return true } } @@ -4793,8 +4424,7 @@ func rewriteValueS390X_OpRsh32x64(v *Value) bool { break } v.reset(OpS390XSRAW) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (Rsh32x64 x y) @@ -4803,18 +4433,15 @@ func rewriteValueS390X_OpRsh32x64(v *Value) bool { x := v_0 y := v_1 v.reset(OpS390XSRAW) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpS390XLOCGR, y.Type) v0.Aux = s390x.GreaterOrEqual - v0.AddArg(y) v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, y.Type) v1.AuxInt = 63 - v0.AddArg(v1) v2 := b.NewValue0(v.Pos, OpS390XCMPUconst, types.TypeFlags) v2.AuxInt = 64 v2.AddArg(y) - v0.AddArg(v2) - v.AddArg(v0) + v0.AddArg3(y, v1, v2) + v.AddArg2(x, v0) return true } } @@ -4833,8 +4460,7 @@ func rewriteValueS390X_OpRsh32x8(v *Value) bool { break } v.reset(OpS390XSRAW) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (Rsh32x8 x y) @@ -4843,20 +4469,17 @@ func rewriteValueS390X_OpRsh32x8(v *Value) bool { x := v_0 y := v_1 v.reset(OpS390XSRAW) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpS390XLOCGR, y.Type) v0.Aux = s390x.GreaterOrEqual - v0.AddArg(y) v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, y.Type) v1.AuxInt = 63 - v0.AddArg(v1) v2 := b.NewValue0(v.Pos, OpS390XCMPWUconst, types.TypeFlags) v2.AuxInt = 64 v3 := b.NewValue0(v.Pos, OpS390XMOVBZreg, typ.UInt64) v3.AddArg(y) v2.AddArg(v3) - v0.AddArg(v2) - v.AddArg(v0) + v0.AddArg3(y, v1, v2) + v.AddArg2(x, v0) return true } } @@ -4875,8 +4498,7 @@ func rewriteValueS390X_OpRsh64Ux16(v *Value) bool { break } v.reset(OpS390XSRD) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (Rsh64Ux16 x y) @@ -4889,18 +4511,15 @@ func rewriteValueS390X_OpRsh64Ux16(v *Value) bool { v.Type = t v.Aux = s390x.GreaterOrEqual v0 := b.NewValue0(v.Pos, OpS390XSRD, t) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) + v0.AddArg2(x, y) v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) v1.AuxInt = 0 - v.AddArg(v1) v2 := b.NewValue0(v.Pos, OpS390XCMPWUconst, types.TypeFlags) v2.AuxInt = 64 v3 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64) v3.AddArg(y) v2.AddArg(v3) - v.AddArg(v2) + v.AddArg3(v0, v1, v2) return true } } @@ -4919,8 +4538,7 @@ func rewriteValueS390X_OpRsh64Ux32(v *Value) bool { break } v.reset(OpS390XSRD) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (Rsh64Ux32 x y) @@ -4933,16 +4551,13 @@ func rewriteValueS390X_OpRsh64Ux32(v *Value) bool { v.Type = t v.Aux = s390x.GreaterOrEqual v0 := b.NewValue0(v.Pos, OpS390XSRD, t) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) + v0.AddArg2(x, y) v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) v1.AuxInt = 0 - v.AddArg(v1) v2 := b.NewValue0(v.Pos, OpS390XCMPWUconst, types.TypeFlags) v2.AuxInt = 64 v2.AddArg(y) - v.AddArg(v2) + v.AddArg3(v0, v1, v2) return true } } @@ -4961,8 +4576,7 @@ func rewriteValueS390X_OpRsh64Ux64(v *Value) bool { break } v.reset(OpS390XSRD) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (Rsh64Ux64 x y) @@ -4975,16 +4589,13 @@ func rewriteValueS390X_OpRsh64Ux64(v *Value) bool { v.Type = t v.Aux = s390x.GreaterOrEqual v0 := b.NewValue0(v.Pos, OpS390XSRD, t) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) + v0.AddArg2(x, y) v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) v1.AuxInt = 0 - v.AddArg(v1) v2 := b.NewValue0(v.Pos, OpS390XCMPUconst, types.TypeFlags) v2.AuxInt = 64 v2.AddArg(y) - v.AddArg(v2) + v.AddArg3(v0, v1, v2) return true } } @@ -5003,8 +4614,7 @@ func rewriteValueS390X_OpRsh64Ux8(v *Value) bool { break } v.reset(OpS390XSRD) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (Rsh64Ux8 x y) @@ -5017,18 +4627,15 @@ func rewriteValueS390X_OpRsh64Ux8(v *Value) bool { v.Type = t v.Aux = s390x.GreaterOrEqual v0 := b.NewValue0(v.Pos, OpS390XSRD, t) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) + v0.AddArg2(x, y) v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) v1.AuxInt = 0 - v.AddArg(v1) v2 := b.NewValue0(v.Pos, OpS390XCMPWUconst, types.TypeFlags) v2.AuxInt = 64 v3 := b.NewValue0(v.Pos, OpS390XMOVBZreg, typ.UInt64) v3.AddArg(y) v2.AddArg(v3) - v.AddArg(v2) + v.AddArg3(v0, v1, v2) return true } } @@ -5047,8 +4654,7 @@ func rewriteValueS390X_OpRsh64x16(v *Value) bool { break } v.reset(OpS390XSRAD) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (Rsh64x16 x y) @@ -5057,20 +4663,17 @@ func rewriteValueS390X_OpRsh64x16(v *Value) bool { x := v_0 y := v_1 v.reset(OpS390XSRAD) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpS390XLOCGR, y.Type) v0.Aux = s390x.GreaterOrEqual - v0.AddArg(y) v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, y.Type) v1.AuxInt = 63 - v0.AddArg(v1) v2 := b.NewValue0(v.Pos, OpS390XCMPWUconst, types.TypeFlags) v2.AuxInt = 64 v3 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64) v3.AddArg(y) v2.AddArg(v3) - v0.AddArg(v2) - v.AddArg(v0) + v0.AddArg3(y, v1, v2) + v.AddArg2(x, v0) return true } } @@ -5088,8 +4691,7 @@ func rewriteValueS390X_OpRsh64x32(v *Value) bool { break } v.reset(OpS390XSRAD) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (Rsh64x32 x y) @@ -5098,18 +4700,15 @@ func rewriteValueS390X_OpRsh64x32(v *Value) bool { x := v_0 y := v_1 v.reset(OpS390XSRAD) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpS390XLOCGR, y.Type) v0.Aux = s390x.GreaterOrEqual - v0.AddArg(y) v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, y.Type) v1.AuxInt = 63 - v0.AddArg(v1) v2 := b.NewValue0(v.Pos, OpS390XCMPWUconst, types.TypeFlags) v2.AuxInt = 64 v2.AddArg(y) - v0.AddArg(v2) - v.AddArg(v0) + v0.AddArg3(y, v1, v2) + v.AddArg2(x, v0) return true } } @@ -5127,8 +4726,7 @@ func rewriteValueS390X_OpRsh64x64(v *Value) bool { break } v.reset(OpS390XSRAD) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (Rsh64x64 x y) @@ -5137,18 +4735,15 @@ func rewriteValueS390X_OpRsh64x64(v *Value) bool { x := v_0 y := v_1 v.reset(OpS390XSRAD) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpS390XLOCGR, y.Type) v0.Aux = s390x.GreaterOrEqual - v0.AddArg(y) v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, y.Type) v1.AuxInt = 63 - v0.AddArg(v1) v2 := b.NewValue0(v.Pos, OpS390XCMPUconst, types.TypeFlags) v2.AuxInt = 64 v2.AddArg(y) - v0.AddArg(v2) - v.AddArg(v0) + v0.AddArg3(y, v1, v2) + v.AddArg2(x, v0) return true } } @@ -5167,8 +4762,7 @@ func rewriteValueS390X_OpRsh64x8(v *Value) bool { break } v.reset(OpS390XSRAD) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (Rsh64x8 x y) @@ -5177,20 +4771,17 @@ func rewriteValueS390X_OpRsh64x8(v *Value) bool { x := v_0 y := v_1 v.reset(OpS390XSRAD) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpS390XLOCGR, y.Type) v0.Aux = s390x.GreaterOrEqual - v0.AddArg(y) v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, y.Type) v1.AuxInt = 63 - v0.AddArg(v1) v2 := b.NewValue0(v.Pos, OpS390XCMPWUconst, types.TypeFlags) v2.AuxInt = 64 v3 := b.NewValue0(v.Pos, OpS390XMOVBZreg, typ.UInt64) v3.AddArg(y) v2.AddArg(v3) - v0.AddArg(v2) - v.AddArg(v0) + v0.AddArg3(y, v1, v2) + v.AddArg2(x, v0) return true } } @@ -5211,8 +4802,7 @@ func rewriteValueS390X_OpRsh8Ux16(v *Value) bool { v.reset(OpS390XSRW) v0 := b.NewValue0(v.Pos, OpS390XMOVBZreg, typ.UInt64) v0.AddArg(x) - v.AddArg(v0) - v.AddArg(y) + v.AddArg2(v0, y) return true } // match: (Rsh8Ux16 x y) @@ -5227,18 +4817,15 @@ func rewriteValueS390X_OpRsh8Ux16(v *Value) bool { v0 := b.NewValue0(v.Pos, OpS390XSRW, t) v1 := b.NewValue0(v.Pos, OpS390XMOVBZreg, typ.UInt64) v1.AddArg(x) - v0.AddArg(v1) - v0.AddArg(y) - v.AddArg(v0) + v0.AddArg2(v1, y) v2 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) v2.AuxInt = 0 - v.AddArg(v2) v3 := b.NewValue0(v.Pos, OpS390XCMPWUconst, types.TypeFlags) v3.AuxInt = 64 v4 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64) v4.AddArg(y) v3.AddArg(v4) - v.AddArg(v3) + v.AddArg3(v0, v2, v3) return true } } @@ -5259,8 +4846,7 @@ func rewriteValueS390X_OpRsh8Ux32(v *Value) bool { v.reset(OpS390XSRW) v0 := b.NewValue0(v.Pos, OpS390XMOVBZreg, typ.UInt64) v0.AddArg(x) - v.AddArg(v0) - v.AddArg(y) + v.AddArg2(v0, y) return true } // match: (Rsh8Ux32 x y) @@ -5275,16 +4861,13 @@ func rewriteValueS390X_OpRsh8Ux32(v *Value) bool { v0 := b.NewValue0(v.Pos, OpS390XSRW, t) v1 := b.NewValue0(v.Pos, OpS390XMOVBZreg, typ.UInt64) v1.AddArg(x) - v0.AddArg(v1) - v0.AddArg(y) - v.AddArg(v0) + v0.AddArg2(v1, y) v2 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) v2.AuxInt = 0 - v.AddArg(v2) v3 := b.NewValue0(v.Pos, OpS390XCMPWUconst, types.TypeFlags) v3.AuxInt = 64 v3.AddArg(y) - v.AddArg(v3) + v.AddArg3(v0, v2, v3) return true } } @@ -5305,8 +4888,7 @@ func rewriteValueS390X_OpRsh8Ux64(v *Value) bool { v.reset(OpS390XSRW) v0 := b.NewValue0(v.Pos, OpS390XMOVBZreg, typ.UInt64) v0.AddArg(x) - v.AddArg(v0) - v.AddArg(y) + v.AddArg2(v0, y) return true } // match: (Rsh8Ux64 x y) @@ -5321,16 +4903,13 @@ func rewriteValueS390X_OpRsh8Ux64(v *Value) bool { v0 := b.NewValue0(v.Pos, OpS390XSRW, t) v1 := b.NewValue0(v.Pos, OpS390XMOVBZreg, typ.UInt64) v1.AddArg(x) - v0.AddArg(v1) - v0.AddArg(y) - v.AddArg(v0) + v0.AddArg2(v1, y) v2 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) v2.AuxInt = 0 - v.AddArg(v2) v3 := b.NewValue0(v.Pos, OpS390XCMPUconst, types.TypeFlags) v3.AuxInt = 64 v3.AddArg(y) - v.AddArg(v3) + v.AddArg3(v0, v2, v3) return true } } @@ -5351,8 +4930,7 @@ func rewriteValueS390X_OpRsh8Ux8(v *Value) bool { v.reset(OpS390XSRW) v0 := b.NewValue0(v.Pos, OpS390XMOVBZreg, typ.UInt64) v0.AddArg(x) - v.AddArg(v0) - v.AddArg(y) + v.AddArg2(v0, y) return true } // match: (Rsh8Ux8 x y) @@ -5367,18 +4945,15 @@ func rewriteValueS390X_OpRsh8Ux8(v *Value) bool { v0 := b.NewValue0(v.Pos, OpS390XSRW, t) v1 := b.NewValue0(v.Pos, OpS390XMOVBZreg, typ.UInt64) v1.AddArg(x) - v0.AddArg(v1) - v0.AddArg(y) - v.AddArg(v0) + v0.AddArg2(v1, y) v2 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) v2.AuxInt = 0 - v.AddArg(v2) v3 := b.NewValue0(v.Pos, OpS390XCMPWUconst, types.TypeFlags) v3.AuxInt = 64 v4 := b.NewValue0(v.Pos, OpS390XMOVBZreg, typ.UInt64) v4.AddArg(y) v3.AddArg(v4) - v.AddArg(v3) + v.AddArg3(v0, v2, v3) return true } } @@ -5399,8 +4974,7 @@ func rewriteValueS390X_OpRsh8x16(v *Value) bool { v.reset(OpS390XSRAW) v0 := b.NewValue0(v.Pos, OpS390XMOVBreg, typ.Int64) v0.AddArg(x) - v.AddArg(v0) - v.AddArg(y) + v.AddArg2(v0, y) return true } // match: (Rsh8x16 x y) @@ -5411,20 +4985,17 @@ func rewriteValueS390X_OpRsh8x16(v *Value) bool { v.reset(OpS390XSRAW) v0 := b.NewValue0(v.Pos, OpS390XMOVBreg, typ.Int64) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpS390XLOCGR, y.Type) v1.Aux = s390x.GreaterOrEqual - v1.AddArg(y) v2 := b.NewValue0(v.Pos, OpS390XMOVDconst, y.Type) v2.AuxInt = 63 - v1.AddArg(v2) v3 := b.NewValue0(v.Pos, OpS390XCMPWUconst, types.TypeFlags) v3.AuxInt = 64 v4 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64) v4.AddArg(y) v3.AddArg(v4) - v1.AddArg(v3) - v.AddArg(v1) + v1.AddArg3(y, v2, v3) + v.AddArg2(v0, v1) return true } } @@ -5445,8 +5016,7 @@ func rewriteValueS390X_OpRsh8x32(v *Value) bool { v.reset(OpS390XSRAW) v0 := b.NewValue0(v.Pos, OpS390XMOVBreg, typ.Int64) v0.AddArg(x) - v.AddArg(v0) - v.AddArg(y) + v.AddArg2(v0, y) return true } // match: (Rsh8x32 x y) @@ -5457,18 +5027,15 @@ func rewriteValueS390X_OpRsh8x32(v *Value) bool { v.reset(OpS390XSRAW) v0 := b.NewValue0(v.Pos, OpS390XMOVBreg, typ.Int64) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpS390XLOCGR, y.Type) v1.Aux = s390x.GreaterOrEqual - v1.AddArg(y) v2 := b.NewValue0(v.Pos, OpS390XMOVDconst, y.Type) v2.AuxInt = 63 - v1.AddArg(v2) v3 := b.NewValue0(v.Pos, OpS390XCMPWUconst, types.TypeFlags) v3.AuxInt = 64 v3.AddArg(y) - v1.AddArg(v3) - v.AddArg(v1) + v1.AddArg3(y, v2, v3) + v.AddArg2(v0, v1) return true } } @@ -5489,8 +5056,7 @@ func rewriteValueS390X_OpRsh8x64(v *Value) bool { v.reset(OpS390XSRAW) v0 := b.NewValue0(v.Pos, OpS390XMOVBreg, typ.Int64) v0.AddArg(x) - v.AddArg(v0) - v.AddArg(y) + v.AddArg2(v0, y) return true } // match: (Rsh8x64 x y) @@ -5501,18 +5067,15 @@ func rewriteValueS390X_OpRsh8x64(v *Value) bool { v.reset(OpS390XSRAW) v0 := b.NewValue0(v.Pos, OpS390XMOVBreg, typ.Int64) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpS390XLOCGR, y.Type) v1.Aux = s390x.GreaterOrEqual - v1.AddArg(y) v2 := b.NewValue0(v.Pos, OpS390XMOVDconst, y.Type) v2.AuxInt = 63 - v1.AddArg(v2) v3 := b.NewValue0(v.Pos, OpS390XCMPUconst, types.TypeFlags) v3.AuxInt = 64 v3.AddArg(y) - v1.AddArg(v3) - v.AddArg(v1) + v1.AddArg3(y, v2, v3) + v.AddArg2(v0, v1) return true } } @@ -5533,8 +5096,7 @@ func rewriteValueS390X_OpRsh8x8(v *Value) bool { v.reset(OpS390XSRAW) v0 := b.NewValue0(v.Pos, OpS390XMOVBreg, typ.Int64) v0.AddArg(x) - v.AddArg(v0) - v.AddArg(y) + v.AddArg2(v0, y) return true } // match: (Rsh8x8 x y) @@ -5545,20 +5107,17 @@ func rewriteValueS390X_OpRsh8x8(v *Value) bool { v.reset(OpS390XSRAW) v0 := b.NewValue0(v.Pos, OpS390XMOVBreg, typ.Int64) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpS390XLOCGR, y.Type) v1.Aux = s390x.GreaterOrEqual - v1.AddArg(y) v2 := b.NewValue0(v.Pos, OpS390XMOVDconst, y.Type) v2.AuxInt = 63 - v1.AddArg(v2) v3 := b.NewValue0(v.Pos, OpS390XCMPWUconst, types.TypeFlags) v3.AuxInt = 64 v4 := b.NewValue0(v.Pos, OpS390XMOVBZreg, typ.UInt64) v4.AddArg(y) v3.AddArg(v4) - v1.AddArg(v3) - v.AddArg(v1) + v1.AddArg3(y, v2, v3) + v.AddArg2(v0, v1) return true } } @@ -5627,8 +5186,7 @@ func rewriteValueS390X_OpS390XADD(v *Value) bool { v.reset(OpS390XMOVDaddridx) v.AuxInt = c v.Aux = s - v.AddArg(ptr) - v.AddArg(idx) + v.AddArg2(ptr, idx) return true } break @@ -5643,8 +5201,7 @@ func rewriteValueS390X_OpS390XADD(v *Value) bool { } y := v_1.Args[0] v.reset(OpS390XSUB) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } break @@ -5671,9 +5228,7 @@ func rewriteValueS390X_OpS390XADD(v *Value) bool { v.Type = t v.AuxInt = off v.Aux = sym - v.AddArg(x) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg3(x, ptr, mem) return true } break @@ -5718,8 +5273,7 @@ func rewriteValueS390X_OpS390XADDE(v *Value) bool { break } v.reset(OpS390XADDC) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (ADDE x y (FlagLT)) @@ -5731,8 +5285,7 @@ func rewriteValueS390X_OpS390XADDE(v *Value) bool { break } v.reset(OpS390XADDC) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (ADDE x y (Select1 (ADDCconst [-1] (Select0 (ADDE (MOVDconst [0]) (MOVDconst [0]) c))))) @@ -5765,9 +5318,7 @@ func rewriteValueS390X_OpS390XADDE(v *Value) bool { break } v.reset(OpS390XADDE) - v.AddArg(x) - v.AddArg(y) - v.AddArg(c) + v.AddArg3(x, y, c) return true } return false @@ -5825,8 +5376,7 @@ func rewriteValueS390X_OpS390XADDW(v *Value) bool { } y := v_1.Args[0] v.reset(OpS390XSUBW) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } break @@ -5853,9 +5403,7 @@ func rewriteValueS390X_OpS390XADDW(v *Value) bool { v.Type = t v.AuxInt = off v.Aux = sym - v.AddArg(x) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg3(x, ptr, mem) return true } break @@ -5882,9 +5430,7 @@ func rewriteValueS390X_OpS390XADDW(v *Value) bool { v.Type = t v.AuxInt = off v.Aux = sym - v.AddArg(x) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg3(x, ptr, mem) return true } break @@ -5902,9 +5448,7 @@ func rewriteValueS390X_OpS390XADDWconst(v *Value) bool { if !(int32(c) == 0) { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (ADDWconst [c] (MOVDconst [d])) @@ -5958,9 +5502,7 @@ func rewriteValueS390X_OpS390XADDWload(v *Value) bool { v.reset(OpS390XADDWload) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(x) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg3(x, ptr, mem) return true } // match: (ADDWload [o1] {s1} x (MOVDaddr [o2] {s2} ptr) mem) @@ -5983,9 +5525,7 @@ func rewriteValueS390X_OpS390XADDWload(v *Value) bool { v.reset(OpS390XADDWload) v.AuxInt = o1 + o2 v.Aux = mergeSym(s1, s2) - v.AddArg(x) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg3(x, ptr, mem) return true } return false @@ -6050,8 +5590,7 @@ func rewriteValueS390X_OpS390XADDconst(v *Value) bool { v.reset(OpS390XMOVDaddridx) v.AuxInt = c + d v.Aux = s - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (ADDconst [0] x) @@ -6061,9 +5600,7 @@ func rewriteValueS390X_OpS390XADDconst(v *Value) bool { break } x := v_0 - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (ADDconst [c] (MOVDconst [d])) @@ -6115,17 +5652,15 @@ func rewriteValueS390X_OpS390XADDload(v *Value) bool { if v_2.Op != OpS390XFMOVDstore || v_2.AuxInt != off || v_2.Aux != sym { break } - _ = v_2.Args[2] - ptr2 := v_2.Args[0] y := v_2.Args[1] + ptr2 := v_2.Args[0] if !(isSamePtr(ptr1, ptr2)) { break } v.reset(OpS390XADD) - v.AddArg(x) v0 := b.NewValue0(v_2.Pos, OpS390XLGDR, t) v0.AddArg(y) - v.AddArg(v0) + v.AddArg2(x, v0) return true } // match: (ADDload [off1] {sym} x (ADDconst [off2] ptr) mem) @@ -6147,9 +5682,7 @@ func rewriteValueS390X_OpS390XADDload(v *Value) bool { v.reset(OpS390XADDload) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(x) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg3(x, ptr, mem) return true } // match: (ADDload [o1] {s1} x (MOVDaddr [o2] {s2} ptr) mem) @@ -6172,9 +5705,7 @@ func rewriteValueS390X_OpS390XADDload(v *Value) bool { v.reset(OpS390XADDload) v.AuxInt = o1 + o2 v.Aux = mergeSym(s1, s2) - v.AddArg(x) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg3(x, ptr, mem) return true } return false @@ -6311,9 +5842,7 @@ func rewriteValueS390X_OpS390XAND(v *Value) bool { if x != v_1 { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (AND x g:(MOVDload [off] {sym} ptr mem)) @@ -6338,9 +5867,7 @@ func rewriteValueS390X_OpS390XAND(v *Value) bool { v.Type = t v.AuxInt = off v.Aux = sym - v.AddArg(x) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg3(x, ptr, mem) return true } break @@ -6373,9 +5900,7 @@ func rewriteValueS390X_OpS390XANDW(v *Value) bool { if x != v_1 { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (ANDW x g:(MOVWload [off] {sym} ptr mem)) @@ -6400,9 +5925,7 @@ func rewriteValueS390X_OpS390XANDW(v *Value) bool { v.Type = t v.AuxInt = off v.Aux = sym - v.AddArg(x) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg3(x, ptr, mem) return true } break @@ -6429,9 +5952,7 @@ func rewriteValueS390X_OpS390XANDW(v *Value) bool { v.Type = t v.AuxInt = off v.Aux = sym - v.AddArg(x) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg3(x, ptr, mem) return true } break @@ -6497,9 +6018,7 @@ func rewriteValueS390X_OpS390XANDWconst(v *Value) bool { if !(int32(c) == -1) { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (ANDWconst [c] (MOVDconst [d])) @@ -6539,9 +6058,7 @@ func rewriteValueS390X_OpS390XANDWload(v *Value) bool { v.reset(OpS390XANDWload) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(x) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg3(x, ptr, mem) return true } // match: (ANDWload [o1] {s1} x (MOVDaddr [o2] {s2} ptr) mem) @@ -6564,9 +6081,7 @@ func rewriteValueS390X_OpS390XANDWload(v *Value) bool { v.reset(OpS390XANDWload) v.AuxInt = o1 + o2 v.Aux = mergeSym(s1, s2) - v.AddArg(x) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg3(x, ptr, mem) return true } return false @@ -6604,9 +6119,7 @@ func rewriteValueS390X_OpS390XANDconst(v *Value) bool { break } x := v_0 - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (ANDconst [c] (MOVDconst [d])) @@ -6640,17 +6153,15 @@ func rewriteValueS390X_OpS390XANDload(v *Value) bool { if v_2.Op != OpS390XFMOVDstore || v_2.AuxInt != off || v_2.Aux != sym { break } - _ = v_2.Args[2] - ptr2 := v_2.Args[0] y := v_2.Args[1] + ptr2 := v_2.Args[0] if !(isSamePtr(ptr1, ptr2)) { break } v.reset(OpS390XAND) - v.AddArg(x) v0 := b.NewValue0(v_2.Pos, OpS390XLGDR, t) v0.AddArg(y) - v.AddArg(v0) + v.AddArg2(x, v0) return true } // match: (ANDload [off1] {sym} x (ADDconst [off2] ptr) mem) @@ -6672,9 +6183,7 @@ func rewriteValueS390X_OpS390XANDload(v *Value) bool { v.reset(OpS390XANDload) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(x) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg3(x, ptr, mem) return true } // match: (ANDload [o1] {s1} x (MOVDaddr [o2] {s2} ptr) mem) @@ -6697,9 +6206,7 @@ func rewriteValueS390X_OpS390XANDload(v *Value) bool { v.reset(OpS390XANDload) v.AuxInt = o1 + o2 v.Aux = mergeSym(s1, s2) - v.AddArg(x) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg3(x, ptr, mem) return true } return false @@ -6755,8 +6262,7 @@ func rewriteValueS390X_OpS390XCMP(v *Value) bool { } v.reset(OpS390XInvertFlags) v0 := b.NewValue0(v.Pos, OpS390XCMP, types.TypeFlags) - v0.AddArg(y) - v0.AddArg(x) + v0.AddArg2(y, x) v.AddArg(v0) return true } @@ -6813,8 +6319,7 @@ func rewriteValueS390X_OpS390XCMPU(v *Value) bool { } v.reset(OpS390XInvertFlags) v0 := b.NewValue0(v.Pos, OpS390XCMPU, types.TypeFlags) - v0.AddArg(y) - v0.AddArg(x) + v0.AddArg2(y, x) v.AddArg(v0) return true } @@ -7034,8 +6539,7 @@ func rewriteValueS390X_OpS390XCMPW(v *Value) bool { } v.reset(OpS390XInvertFlags) v0 := b.NewValue0(v.Pos, OpS390XCMPW, types.TypeFlags) - v0.AddArg(y) - v0.AddArg(x) + v0.AddArg2(y, x) v.AddArg(v0) return true } @@ -7048,8 +6552,7 @@ func rewriteValueS390X_OpS390XCMPW(v *Value) bool { } y := v_1.Args[0] v.reset(OpS390XCMPW) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (CMPW x (MOVWZreg y)) @@ -7061,8 +6564,7 @@ func rewriteValueS390X_OpS390XCMPW(v *Value) bool { } y := v_1.Args[0] v.reset(OpS390XCMPW) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (CMPW (MOVWreg x) y) @@ -7074,8 +6576,7 @@ func rewriteValueS390X_OpS390XCMPW(v *Value) bool { x := v_0.Args[0] y := v_1 v.reset(OpS390XCMPW) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (CMPW (MOVWZreg x) y) @@ -7087,8 +6588,7 @@ func rewriteValueS390X_OpS390XCMPW(v *Value) bool { x := v_0.Args[0] y := v_1 v.reset(OpS390XCMPW) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } return false @@ -7136,8 +6636,7 @@ func rewriteValueS390X_OpS390XCMPWU(v *Value) bool { } v.reset(OpS390XInvertFlags) v0 := b.NewValue0(v.Pos, OpS390XCMPWU, types.TypeFlags) - v0.AddArg(y) - v0.AddArg(x) + v0.AddArg2(y, x) v.AddArg(v0) return true } @@ -7150,8 +6649,7 @@ func rewriteValueS390X_OpS390XCMPWU(v *Value) bool { } y := v_1.Args[0] v.reset(OpS390XCMPWU) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (CMPWU x (MOVWZreg y)) @@ -7163,8 +6661,7 @@ func rewriteValueS390X_OpS390XCMPWU(v *Value) bool { } y := v_1.Args[0] v.reset(OpS390XCMPWU) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (CMPWU (MOVWreg x) y) @@ -7176,8 +6673,7 @@ func rewriteValueS390X_OpS390XCMPWU(v *Value) bool { x := v_0.Args[0] y := v_1 v.reset(OpS390XCMPWU) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (CMPWU (MOVWZreg x) y) @@ -7189,8 +6685,7 @@ func rewriteValueS390X_OpS390XCMPWU(v *Value) bool { x := v_0.Args[0] y := v_1 v.reset(OpS390XCMPWU) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } return false @@ -7707,9 +7202,7 @@ func rewriteValueS390X_OpS390XFADD(v *Value) bool { y := v_0.Args[0] x := v_1 v.reset(OpS390XFMADD) - v.AddArg(x) - v.AddArg(y) - v.AddArg(z) + v.AddArg3(x, y, z) return true } break @@ -7730,9 +7223,7 @@ func rewriteValueS390X_OpS390XFADDS(v *Value) bool { y := v_0.Args[0] x := v_1 v.reset(OpS390XFMADDS) - v.AddArg(x) - v.AddArg(y) - v.AddArg(z) + v.AddArg3(x, y, z) return true } break @@ -7752,9 +7243,8 @@ func rewriteValueS390X_OpS390XFMOVDload(v *Value) bool { if v_1.Op != OpS390XMOVDstore || v_1.AuxInt != off || v_1.Aux != sym { break } - _ = v_1.Args[2] - ptr2 := v_1.Args[0] x := v_1.Args[1] + ptr2 := v_1.Args[0] if !(isSamePtr(ptr1, ptr2)) { break } @@ -7772,15 +7262,12 @@ func rewriteValueS390X_OpS390XFMOVDload(v *Value) bool { if v_1.Op != OpS390XFMOVDstore || v_1.AuxInt != off || v_1.Aux != sym { break } - _ = v_1.Args[2] - ptr2 := v_1.Args[0] x := v_1.Args[1] + ptr2 := v_1.Args[0] if !(isSamePtr(ptr1, ptr2)) { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (FMOVDload [off1] {sym} (ADDconst [off2] ptr) mem) @@ -7801,8 +7288,7 @@ func rewriteValueS390X_OpS390XFMOVDload(v *Value) bool { v.reset(OpS390XFMOVDload) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (FMOVDload [off1] {sym1} (MOVDaddr [off2] {sym2} base) mem) @@ -7824,8 +7310,7 @@ func rewriteValueS390X_OpS390XFMOVDload(v *Value) bool { v.reset(OpS390XFMOVDload) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(base) - v.AddArg(mem) + v.AddArg2(base, mem) return true } // match: (FMOVDload [off1] {sym1} (MOVDaddridx [off2] {sym2} ptr idx) mem) @@ -7848,9 +7333,7 @@ func rewriteValueS390X_OpS390XFMOVDload(v *Value) bool { v.reset(OpS390XFMOVDloadidx) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } // match: (FMOVDload [off] {sym} (ADD ptr idx) mem) @@ -7875,9 +7358,7 @@ func rewriteValueS390X_OpS390XFMOVDload(v *Value) bool { v.reset(OpS390XFMOVDloadidx) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } break @@ -7907,9 +7388,7 @@ func rewriteValueS390X_OpS390XFMOVDloadidx(v *Value) bool { v.reset(OpS390XFMOVDloadidx) v.AuxInt = c + d v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } // match: (FMOVDloadidx [c] {sym} ptr (ADDconst [d] idx) mem) @@ -7931,9 +7410,7 @@ func rewriteValueS390X_OpS390XFMOVDloadidx(v *Value) bool { v.reset(OpS390XFMOVDloadidx) v.AuxInt = c + d v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } return false @@ -7961,9 +7438,7 @@ func rewriteValueS390X_OpS390XFMOVDstore(v *Value) bool { v.reset(OpS390XFMOVDstore) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } // match: (FMOVDstore [off1] {sym1} (MOVDaddr [off2] {sym2} base) val mem) @@ -7986,9 +7461,7 @@ func rewriteValueS390X_OpS390XFMOVDstore(v *Value) bool { v.reset(OpS390XFMOVDstore) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(base) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(base, val, mem) return true } // match: (FMOVDstore [off1] {sym1} (MOVDaddridx [off2] {sym2} ptr idx) val mem) @@ -8012,10 +7485,7 @@ func rewriteValueS390X_OpS390XFMOVDstore(v *Value) bool { v.reset(OpS390XFMOVDstoreidx) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) + v.AddArg4(ptr, idx, val, mem) return true } // match: (FMOVDstore [off] {sym} (ADD ptr idx) val mem) @@ -8041,10 +7511,7 @@ func rewriteValueS390X_OpS390XFMOVDstore(v *Value) bool { v.reset(OpS390XFMOVDstoreidx) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) + v.AddArg4(ptr, idx, val, mem) return true } break @@ -8076,10 +7543,7 @@ func rewriteValueS390X_OpS390XFMOVDstoreidx(v *Value) bool { v.reset(OpS390XFMOVDstoreidx) v.AuxInt = c + d v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) + v.AddArg4(ptr, idx, val, mem) return true } // match: (FMOVDstoreidx [c] {sym} ptr (ADDconst [d] idx) val mem) @@ -8102,10 +7566,7 @@ func rewriteValueS390X_OpS390XFMOVDstoreidx(v *Value) bool { v.reset(OpS390XFMOVDstoreidx) v.AuxInt = c + d v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) + v.AddArg4(ptr, idx, val, mem) return true } return false @@ -8123,15 +7584,12 @@ func rewriteValueS390X_OpS390XFMOVSload(v *Value) bool { if v_1.Op != OpS390XFMOVSstore || v_1.AuxInt != off || v_1.Aux != sym { break } - _ = v_1.Args[2] - ptr2 := v_1.Args[0] x := v_1.Args[1] + ptr2 := v_1.Args[0] if !(isSamePtr(ptr1, ptr2)) { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (FMOVSload [off1] {sym} (ADDconst [off2] ptr) mem) @@ -8152,8 +7610,7 @@ func rewriteValueS390X_OpS390XFMOVSload(v *Value) bool { v.reset(OpS390XFMOVSload) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (FMOVSload [off1] {sym1} (MOVDaddr [off2] {sym2} base) mem) @@ -8175,8 +7632,7 @@ func rewriteValueS390X_OpS390XFMOVSload(v *Value) bool { v.reset(OpS390XFMOVSload) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(base) - v.AddArg(mem) + v.AddArg2(base, mem) return true } // match: (FMOVSload [off1] {sym1} (MOVDaddridx [off2] {sym2} ptr idx) mem) @@ -8199,9 +7655,7 @@ func rewriteValueS390X_OpS390XFMOVSload(v *Value) bool { v.reset(OpS390XFMOVSloadidx) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } // match: (FMOVSload [off] {sym} (ADD ptr idx) mem) @@ -8226,9 +7680,7 @@ func rewriteValueS390X_OpS390XFMOVSload(v *Value) bool { v.reset(OpS390XFMOVSloadidx) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } break @@ -8258,9 +7710,7 @@ func rewriteValueS390X_OpS390XFMOVSloadidx(v *Value) bool { v.reset(OpS390XFMOVSloadidx) v.AuxInt = c + d v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } // match: (FMOVSloadidx [c] {sym} ptr (ADDconst [d] idx) mem) @@ -8282,9 +7732,7 @@ func rewriteValueS390X_OpS390XFMOVSloadidx(v *Value) bool { v.reset(OpS390XFMOVSloadidx) v.AuxInt = c + d v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } return false @@ -8312,9 +7760,7 @@ func rewriteValueS390X_OpS390XFMOVSstore(v *Value) bool { v.reset(OpS390XFMOVSstore) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } // match: (FMOVSstore [off1] {sym1} (MOVDaddr [off2] {sym2} base) val mem) @@ -8337,9 +7783,7 @@ func rewriteValueS390X_OpS390XFMOVSstore(v *Value) bool { v.reset(OpS390XFMOVSstore) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(base) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(base, val, mem) return true } // match: (FMOVSstore [off1] {sym1} (MOVDaddridx [off2] {sym2} ptr idx) val mem) @@ -8363,10 +7807,7 @@ func rewriteValueS390X_OpS390XFMOVSstore(v *Value) bool { v.reset(OpS390XFMOVSstoreidx) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) + v.AddArg4(ptr, idx, val, mem) return true } // match: (FMOVSstore [off] {sym} (ADD ptr idx) val mem) @@ -8392,10 +7833,7 @@ func rewriteValueS390X_OpS390XFMOVSstore(v *Value) bool { v.reset(OpS390XFMOVSstoreidx) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) + v.AddArg4(ptr, idx, val, mem) return true } break @@ -8427,10 +7865,7 @@ func rewriteValueS390X_OpS390XFMOVSstoreidx(v *Value) bool { v.reset(OpS390XFMOVSstoreidx) v.AuxInt = c + d v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) + v.AddArg4(ptr, idx, val, mem) return true } // match: (FMOVSstoreidx [c] {sym} ptr (ADDconst [d] idx) val mem) @@ -8453,10 +7888,7 @@ func rewriteValueS390X_OpS390XFMOVSstoreidx(v *Value) bool { v.reset(OpS390XFMOVSstoreidx) v.AuxInt = c + d v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) + v.AddArg4(ptr, idx, val, mem) return true } return false @@ -8526,9 +7958,7 @@ func rewriteValueS390X_OpS390XFSUB(v *Value) bool { y := v_0.Args[0] x := v_1 v.reset(OpS390XFMSUB) - v.AddArg(x) - v.AddArg(y) - v.AddArg(z) + v.AddArg3(x, y, z) return true } return false @@ -8546,9 +7976,7 @@ func rewriteValueS390X_OpS390XFSUBS(v *Value) bool { y := v_0.Args[0] x := v_1 v.reset(OpS390XFMSUBS) - v.AddArg(x) - v.AddArg(y) - v.AddArg(z) + v.AddArg3(x, y, z) return true } return false @@ -8643,14 +8071,12 @@ func rewriteValueS390X_OpS390XLDGR(v *Value) bool { } b = x.Block v0 := b.NewValue0(x.Pos, OpS390XLNDFR, t) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v1 := b.NewValue0(x.Pos, OpS390XLDGR, t) v2 := b.NewValue0(x.Pos, OpS390XMOVDload, t1) v2.AuxInt = off v2.Aux = sym - v2.AddArg(ptr) - v2.AddArg(mem) + v2.AddArg2(ptr, mem) v1.AddArg(v2) v0.AddArg(v1) return true @@ -8662,9 +8088,7 @@ func rewriteValueS390X_OpS390XLDGR(v *Value) bool { break } x := v_0.Args[0] - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } return false @@ -8712,9 +8136,7 @@ func rewriteValueS390X_OpS390XLGDR(v *Value) bool { break } x := v_0.Args[0] - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } return false @@ -8735,9 +8157,7 @@ func rewriteValueS390X_OpS390XLOCGR(v *Value) bool { cmp := v_2.Args[0] v.reset(OpS390XLOCGR) v.Aux = c.(s390x.CCMask).ReverseComparison() - v.AddArg(x) - v.AddArg(y) - v.AddArg(cmp) + v.AddArg3(x, y, cmp) return true } // match: (LOCGR {c} _ x (FlagEQ)) @@ -8749,9 +8169,7 @@ func rewriteValueS390X_OpS390XLOCGR(v *Value) bool { if v_2.Op != OpS390XFlagEQ || !(c.(s390x.CCMask)&s390x.Equal != 0) { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (LOCGR {c} _ x (FlagLT)) @@ -8763,9 +8181,7 @@ func rewriteValueS390X_OpS390XLOCGR(v *Value) bool { if v_2.Op != OpS390XFlagLT || !(c.(s390x.CCMask)&s390x.Less != 0) { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (LOCGR {c} _ x (FlagGT)) @@ -8777,9 +8193,7 @@ func rewriteValueS390X_OpS390XLOCGR(v *Value) bool { if v_2.Op != OpS390XFlagGT || !(c.(s390x.CCMask)&s390x.Greater != 0) { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (LOCGR {c} _ x (FlagOV)) @@ -8791,9 +8205,7 @@ func rewriteValueS390X_OpS390XLOCGR(v *Value) bool { if v_2.Op != OpS390XFlagOV || !(c.(s390x.CCMask)&s390x.Unordered != 0) { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (LOCGR {c} x _ (FlagEQ)) @@ -8805,9 +8217,7 @@ func rewriteValueS390X_OpS390XLOCGR(v *Value) bool { if v_2.Op != OpS390XFlagEQ || !(c.(s390x.CCMask)&s390x.Equal == 0) { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (LOCGR {c} x _ (FlagLT)) @@ -8819,9 +8229,7 @@ func rewriteValueS390X_OpS390XLOCGR(v *Value) bool { if v_2.Op != OpS390XFlagLT || !(c.(s390x.CCMask)&s390x.Less == 0) { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (LOCGR {c} x _ (FlagGT)) @@ -8833,9 +8241,7 @@ func rewriteValueS390X_OpS390XLOCGR(v *Value) bool { if v_2.Op != OpS390XFlagGT || !(c.(s390x.CCMask)&s390x.Greater == 0) { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (LOCGR {c} x _ (FlagOV)) @@ -8847,9 +8253,7 @@ func rewriteValueS390X_OpS390XLOCGR(v *Value) bool { if v_2.Op != OpS390XFlagOV || !(c.(s390x.CCMask)&s390x.Unordered == 0) { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } return false @@ -8863,9 +8267,7 @@ func rewriteValueS390X_OpS390XLoweredRound32F(v *Value) bool { if x.Op != OpS390XFMOVSconst { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } return false @@ -8879,9 +8281,7 @@ func rewriteValueS390X_OpS390XLoweredRound64F(v *Value) bool { if x.Op != OpS390XFMOVDconst { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } return false @@ -8899,9 +8299,8 @@ func rewriteValueS390X_OpS390XMOVBZload(v *Value) bool { if v_1.Op != OpS390XMOVBstore || v_1.AuxInt != off || v_1.Aux != sym { break } - _ = v_1.Args[2] - ptr2 := v_1.Args[0] x := v_1.Args[1] + ptr2 := v_1.Args[0] if !(isSamePtr(ptr1, ptr2)) { break } @@ -8927,8 +8326,7 @@ func rewriteValueS390X_OpS390XMOVBZload(v *Value) bool { v.reset(OpS390XMOVBZload) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVBZload [off1] {sym1} (MOVDaddr [off2] {sym2} base) mem) @@ -8950,8 +8348,7 @@ func rewriteValueS390X_OpS390XMOVBZload(v *Value) bool { v.reset(OpS390XMOVBZload) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(base) - v.AddArg(mem) + v.AddArg2(base, mem) return true } // match: (MOVBZload [off1] {sym1} (MOVDaddridx [off2] {sym2} ptr idx) mem) @@ -8974,9 +8371,7 @@ func rewriteValueS390X_OpS390XMOVBZload(v *Value) bool { v.reset(OpS390XMOVBZloadidx) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } // match: (MOVBZload [off] {sym} (ADD ptr idx) mem) @@ -9001,9 +8396,7 @@ func rewriteValueS390X_OpS390XMOVBZload(v *Value) bool { v.reset(OpS390XMOVBZloadidx) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } break @@ -9034,9 +8427,7 @@ func rewriteValueS390X_OpS390XMOVBZloadidx(v *Value) bool { v.reset(OpS390XMOVBZloadidx) v.AuxInt = c + d v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } break @@ -9061,9 +8452,7 @@ func rewriteValueS390X_OpS390XMOVBZloadidx(v *Value) bool { v.reset(OpS390XMOVBZloadidx) v.AuxInt = c + d v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } break @@ -9175,16 +8564,10 @@ func rewriteValueS390X_OpS390XMOVBZreg(v *Value) bool { // result: x for { x := v_0 - if x.Op != OpS390XMOVBZload { + if x.Op != OpS390XMOVBZload || !(!x.Type.IsSigned() || x.Type.Size() > 1) { break } - _ = x.Args[1] - if !(!x.Type.IsSigned() || x.Type.Size() > 1) { - break - } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (MOVBZreg x:(MOVBZloadidx _ _ _)) @@ -9192,16 +8575,10 @@ func rewriteValueS390X_OpS390XMOVBZreg(v *Value) bool { // result: x for { x := v_0 - if x.Op != OpS390XMOVBZloadidx { + if x.Op != OpS390XMOVBZloadidx || !(!x.Type.IsSigned() || x.Type.Size() > 1) { break } - _ = x.Args[2] - if !(!x.Type.IsSigned() || x.Type.Size() > 1) { - break - } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (MOVBZreg x:(MOVBload [o] {s} p mem)) @@ -9222,12 +8599,10 @@ func rewriteValueS390X_OpS390XMOVBZreg(v *Value) bool { } b = x.Block v0 := b.NewValue0(x.Pos, OpS390XMOVBZload, t) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v0.AuxInt = o v0.Aux = s - v0.AddArg(p) - v0.AddArg(mem) + v0.AddArg2(p, mem) return true } // match: (MOVBZreg x:(MOVBloadidx [o] {s} p i mem)) @@ -9249,13 +8624,10 @@ func rewriteValueS390X_OpS390XMOVBZreg(v *Value) bool { } b = x.Block v0 := b.NewValue0(v.Pos, OpS390XMOVBZloadidx, t) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v0.AuxInt = o v0.Aux = s - v0.AddArg(p) - v0.AddArg(i) - v0.AddArg(mem) + v0.AddArg3(p, i, mem) return true } // match: (MOVBZreg x:(Arg )) @@ -9270,9 +8642,7 @@ func rewriteValueS390X_OpS390XMOVBZreg(v *Value) bool { if !(!t.IsSigned() && t.Size() == 1) { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (MOVBZreg (MOVDconst [c])) @@ -9294,7 +8664,7 @@ func rewriteValueS390X_OpS390XMOVBZreg(v *Value) bool { if x.Op != OpS390XLOCGR { break } - _ = x.Args[2] + _ = x.Args[1] x_0 := x.Args[0] if x_0.Op != OpS390XMOVDconst { break @@ -9308,9 +8678,7 @@ func rewriteValueS390X_OpS390XMOVBZreg(v *Value) bool { if !(int64(uint8(c)) == c && int64(uint8(d)) == d && (!x.Type.IsSigned() || x.Type.Size() > 1)) { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (MOVBZreg (ANDWconst [m] x)) @@ -9343,9 +8711,8 @@ func rewriteValueS390X_OpS390XMOVBload(v *Value) bool { if v_1.Op != OpS390XMOVBstore || v_1.AuxInt != off || v_1.Aux != sym { break } - _ = v_1.Args[2] - ptr2 := v_1.Args[0] x := v_1.Args[1] + ptr2 := v_1.Args[0] if !(isSamePtr(ptr1, ptr2)) { break } @@ -9371,8 +8738,7 @@ func rewriteValueS390X_OpS390XMOVBload(v *Value) bool { v.reset(OpS390XMOVBload) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVBload [off1] {sym1} (MOVDaddr [off2] {sym2} base) mem) @@ -9394,8 +8760,7 @@ func rewriteValueS390X_OpS390XMOVBload(v *Value) bool { v.reset(OpS390XMOVBload) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(base) - v.AddArg(mem) + v.AddArg2(base, mem) return true } // match: (MOVBload [off1] {sym1} (MOVDaddridx [off2] {sym2} ptr idx) mem) @@ -9418,9 +8783,7 @@ func rewriteValueS390X_OpS390XMOVBload(v *Value) bool { v.reset(OpS390XMOVBloadidx) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } // match: (MOVBload [off] {sym} (ADD ptr idx) mem) @@ -9445,9 +8808,7 @@ func rewriteValueS390X_OpS390XMOVBload(v *Value) bool { v.reset(OpS390XMOVBloadidx) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } break @@ -9478,9 +8839,7 @@ func rewriteValueS390X_OpS390XMOVBloadidx(v *Value) bool { v.reset(OpS390XMOVBloadidx) v.AuxInt = c + d v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } break @@ -9505,9 +8864,7 @@ func rewriteValueS390X_OpS390XMOVBloadidx(v *Value) bool { v.reset(OpS390XMOVBloadidx) v.AuxInt = c + d v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } break @@ -9619,16 +8976,10 @@ func rewriteValueS390X_OpS390XMOVBreg(v *Value) bool { // result: x for { x := v_0 - if x.Op != OpS390XMOVBload { + if x.Op != OpS390XMOVBload || !(x.Type.IsSigned() || x.Type.Size() == 8) { break } - _ = x.Args[1] - if !(x.Type.IsSigned() || x.Type.Size() == 8) { - break - } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (MOVBreg x:(MOVBloadidx _ _ _)) @@ -9636,16 +8987,10 @@ func rewriteValueS390X_OpS390XMOVBreg(v *Value) bool { // result: x for { x := v_0 - if x.Op != OpS390XMOVBloadidx { + if x.Op != OpS390XMOVBloadidx || !(x.Type.IsSigned() || x.Type.Size() == 8) { break } - _ = x.Args[2] - if !(x.Type.IsSigned() || x.Type.Size() == 8) { - break - } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (MOVBreg x:(MOVBZload [o] {s} p mem)) @@ -9666,12 +9011,10 @@ func rewriteValueS390X_OpS390XMOVBreg(v *Value) bool { } b = x.Block v0 := b.NewValue0(x.Pos, OpS390XMOVBload, t) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v0.AuxInt = o v0.Aux = s - v0.AddArg(p) - v0.AddArg(mem) + v0.AddArg2(p, mem) return true } // match: (MOVBreg x:(MOVBZloadidx [o] {s} p i mem)) @@ -9693,13 +9036,10 @@ func rewriteValueS390X_OpS390XMOVBreg(v *Value) bool { } b = x.Block v0 := b.NewValue0(v.Pos, OpS390XMOVBloadidx, t) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v0.AuxInt = o v0.Aux = s - v0.AddArg(p) - v0.AddArg(i) - v0.AddArg(mem) + v0.AddArg3(p, i, mem) return true } // match: (MOVBreg x:(Arg )) @@ -9714,9 +9054,7 @@ func rewriteValueS390X_OpS390XMOVBreg(v *Value) bool { if !(t.IsSigned() && t.Size() == 1) { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (MOVBreg (MOVDconst [c])) @@ -9769,9 +9107,7 @@ func rewriteValueS390X_OpS390XMOVBstore(v *Value) bool { v.reset(OpS390XMOVBstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(x) - v.AddArg(mem) + v.AddArg3(ptr, x, mem) return true } // match: (MOVBstore [off] {sym} ptr (MOVBZreg x) mem) @@ -9788,9 +9124,7 @@ func rewriteValueS390X_OpS390XMOVBstore(v *Value) bool { v.reset(OpS390XMOVBstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(x) - v.AddArg(mem) + v.AddArg3(ptr, x, mem) return true } // match: (MOVBstore [off1] {sym} (ADDconst [off2] ptr) val mem) @@ -9812,9 +9146,7 @@ func rewriteValueS390X_OpS390XMOVBstore(v *Value) bool { v.reset(OpS390XMOVBstore) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } // match: (MOVBstore [off] {sym} ptr (MOVDconst [c]) mem) @@ -9835,8 +9167,7 @@ func rewriteValueS390X_OpS390XMOVBstore(v *Value) bool { v.reset(OpS390XMOVBstoreconst) v.AuxInt = makeValAndOff(int64(int8(c)), off) v.Aux = sym - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVBstore [off1] {sym1} (MOVDaddr [off2] {sym2} base) val mem) @@ -9859,9 +9190,7 @@ func rewriteValueS390X_OpS390XMOVBstore(v *Value) bool { v.reset(OpS390XMOVBstore) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(base) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(base, val, mem) return true } // match: (MOVBstore [off1] {sym1} (MOVDaddridx [off2] {sym2} ptr idx) val mem) @@ -9885,10 +9214,7 @@ func rewriteValueS390X_OpS390XMOVBstore(v *Value) bool { v.reset(OpS390XMOVBstoreidx) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) + v.AddArg4(ptr, idx, val, mem) return true } // match: (MOVBstore [off] {sym} (ADD ptr idx) val mem) @@ -9914,10 +9240,7 @@ func rewriteValueS390X_OpS390XMOVBstore(v *Value) bool { v.reset(OpS390XMOVBstoreidx) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) + v.AddArg4(ptr, idx, val, mem) return true } break @@ -9945,9 +9268,7 @@ func rewriteValueS390X_OpS390XMOVBstore(v *Value) bool { v.reset(OpS390XMOVHstore) v.AuxInt = i - 1 v.Aux = s - v.AddArg(p) - v.AddArg(w) - v.AddArg(mem) + v.AddArg3(p, w, mem) return true } // match: (MOVBstore [i] {s} p w0:(SRDconst [j] w) x:(MOVBstore [i-1] {s} p (SRDconst [j+8] w) mem)) @@ -9978,9 +9299,7 @@ func rewriteValueS390X_OpS390XMOVBstore(v *Value) bool { v.reset(OpS390XMOVHstore) v.AuxInt = i - 1 v.Aux = s - v.AddArg(p) - v.AddArg(w0) - v.AddArg(mem) + v.AddArg3(p, w0, mem) return true } // match: (MOVBstore [i] {s} p w x:(MOVBstore [i-1] {s} p (SRWconst [8] w) mem)) @@ -10006,9 +9325,7 @@ func rewriteValueS390X_OpS390XMOVBstore(v *Value) bool { v.reset(OpS390XMOVHstore) v.AuxInt = i - 1 v.Aux = s - v.AddArg(p) - v.AddArg(w) - v.AddArg(mem) + v.AddArg3(p, w, mem) return true } // match: (MOVBstore [i] {s} p w0:(SRWconst [j] w) x:(MOVBstore [i-1] {s} p (SRWconst [j+8] w) mem)) @@ -10039,9 +9356,7 @@ func rewriteValueS390X_OpS390XMOVBstore(v *Value) bool { v.reset(OpS390XMOVHstore) v.AuxInt = i - 1 v.Aux = s - v.AddArg(p) - v.AddArg(w0) - v.AddArg(mem) + v.AddArg3(p, w0, mem) return true } // match: (MOVBstore [i] {s} p (SRDconst [8] w) x:(MOVBstore [i-1] {s} p w mem)) @@ -10066,9 +9381,7 @@ func rewriteValueS390X_OpS390XMOVBstore(v *Value) bool { v.reset(OpS390XMOVHBRstore) v.AuxInt = i - 1 v.Aux = s - v.AddArg(p) - v.AddArg(w) - v.AddArg(mem) + v.AddArg3(p, w, mem) return true } // match: (MOVBstore [i] {s} p (SRDconst [j] w) x:(MOVBstore [i-1] {s} p w0:(SRDconst [j-8] w) mem)) @@ -10098,9 +9411,7 @@ func rewriteValueS390X_OpS390XMOVBstore(v *Value) bool { v.reset(OpS390XMOVHBRstore) v.AuxInt = i - 1 v.Aux = s - v.AddArg(p) - v.AddArg(w0) - v.AddArg(mem) + v.AddArg3(p, w0, mem) return true } // match: (MOVBstore [i] {s} p (SRWconst [8] w) x:(MOVBstore [i-1] {s} p w mem)) @@ -10125,9 +9436,7 @@ func rewriteValueS390X_OpS390XMOVBstore(v *Value) bool { v.reset(OpS390XMOVHBRstore) v.AuxInt = i - 1 v.Aux = s - v.AddArg(p) - v.AddArg(w) - v.AddArg(mem) + v.AddArg3(p, w, mem) return true } // match: (MOVBstore [i] {s} p (SRWconst [j] w) x:(MOVBstore [i-1] {s} p w0:(SRWconst [j-8] w) mem)) @@ -10157,9 +9466,7 @@ func rewriteValueS390X_OpS390XMOVBstore(v *Value) bool { v.reset(OpS390XMOVHBRstore) v.AuxInt = i - 1 v.Aux = s - v.AddArg(p) - v.AddArg(w0) - v.AddArg(mem) + v.AddArg3(p, w0, mem) return true } return false @@ -10185,8 +9492,7 @@ func rewriteValueS390X_OpS390XMOVBstoreconst(v *Value) bool { v.reset(OpS390XMOVBstoreconst) v.AuxInt = ValAndOff(sc).add(off) v.Aux = s - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVBstoreconst [sc] {sym1} (MOVDaddr [off] {sym2} ptr) mem) @@ -10208,8 +9514,7 @@ func rewriteValueS390X_OpS390XMOVBstoreconst(v *Value) bool { v.reset(OpS390XMOVBstoreconst) v.AuxInt = ValAndOff(sc).add(off) v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVBstoreconst [c] {s} p x:(MOVBstoreconst [a] {s} p mem)) @@ -10234,8 +9539,7 @@ func rewriteValueS390X_OpS390XMOVBstoreconst(v *Value) bool { v.reset(OpS390XMOVHstoreconst) v.AuxInt = makeValAndOff(ValAndOff(c).Val()&0xff|ValAndOff(a).Val()<<8, ValAndOff(a).Off()) v.Aux = s - v.AddArg(p) - v.AddArg(mem) + v.AddArg2(p, mem) return true } return false @@ -10266,10 +9570,7 @@ func rewriteValueS390X_OpS390XMOVBstoreidx(v *Value) bool { v.reset(OpS390XMOVBstoreidx) v.AuxInt = c + d v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) + v.AddArg4(ptr, idx, val, mem) return true } break @@ -10295,10 +9596,7 @@ func rewriteValueS390X_OpS390XMOVBstoreidx(v *Value) bool { v.reset(OpS390XMOVBstoreidx) v.AuxInt = c + d v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) + v.AddArg4(ptr, idx, val, mem) return true } break @@ -10331,10 +9629,7 @@ func rewriteValueS390X_OpS390XMOVBstoreidx(v *Value) bool { v.reset(OpS390XMOVHstoreidx) v.AuxInt = i - 1 v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v.AddArg(w) - v.AddArg(mem) + v.AddArg4(p, idx, w, mem) return true } } @@ -10373,10 +9668,7 @@ func rewriteValueS390X_OpS390XMOVBstoreidx(v *Value) bool { v.reset(OpS390XMOVHstoreidx) v.AuxInt = i - 1 v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v.AddArg(w0) - v.AddArg(mem) + v.AddArg4(p, idx, w0, mem) return true } } @@ -10410,10 +9702,7 @@ func rewriteValueS390X_OpS390XMOVBstoreidx(v *Value) bool { v.reset(OpS390XMOVHstoreidx) v.AuxInt = i - 1 v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v.AddArg(w) - v.AddArg(mem) + v.AddArg4(p, idx, w, mem) return true } } @@ -10452,10 +9741,7 @@ func rewriteValueS390X_OpS390XMOVBstoreidx(v *Value) bool { v.reset(OpS390XMOVHstoreidx) v.AuxInt = i - 1 v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v.AddArg(w0) - v.AddArg(mem) + v.AddArg4(p, idx, w0, mem) return true } } @@ -10488,10 +9774,7 @@ func rewriteValueS390X_OpS390XMOVBstoreidx(v *Value) bool { v.reset(OpS390XMOVHBRstoreidx) v.AuxInt = i - 1 v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v.AddArg(w) - v.AddArg(mem) + v.AddArg4(p, idx, w, mem) return true } } @@ -10529,10 +9812,7 @@ func rewriteValueS390X_OpS390XMOVBstoreidx(v *Value) bool { v.reset(OpS390XMOVHBRstoreidx) v.AuxInt = i - 1 v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v.AddArg(w0) - v.AddArg(mem) + v.AddArg4(p, idx, w0, mem) return true } } @@ -10565,10 +9845,7 @@ func rewriteValueS390X_OpS390XMOVBstoreidx(v *Value) bool { v.reset(OpS390XMOVHBRstoreidx) v.AuxInt = i - 1 v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v.AddArg(w) - v.AddArg(mem) + v.AddArg4(p, idx, w, mem) return true } } @@ -10606,10 +9883,7 @@ func rewriteValueS390X_OpS390XMOVBstoreidx(v *Value) bool { v.reset(OpS390XMOVHBRstoreidx) v.AuxInt = i - 1 v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v.AddArg(w0) - v.AddArg(mem) + v.AddArg4(p, idx, w0, mem) return true } } @@ -10638,8 +9912,7 @@ func rewriteValueS390X_OpS390XMOVDaddridx(v *Value) bool { v.reset(OpS390XMOVDaddridx) v.AuxInt = c + d v.Aux = s - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (MOVDaddridx [c] {s} x (ADDconst [d] y)) @@ -10660,8 +9933,7 @@ func rewriteValueS390X_OpS390XMOVDaddridx(v *Value) bool { v.reset(OpS390XMOVDaddridx) v.AuxInt = c + d v.Aux = s - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (MOVDaddridx [off1] {sym1} (MOVDaddr [off2] {sym2} x) y) @@ -10683,8 +9955,7 @@ func rewriteValueS390X_OpS390XMOVDaddridx(v *Value) bool { v.reset(OpS390XMOVDaddridx) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (MOVDaddridx [off1] {sym1} x (MOVDaddr [off2] {sym2} y)) @@ -10706,8 +9977,7 @@ func rewriteValueS390X_OpS390XMOVDaddridx(v *Value) bool { v.reset(OpS390XMOVDaddridx) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } return false @@ -10725,15 +9995,12 @@ func rewriteValueS390X_OpS390XMOVDload(v *Value) bool { if v_1.Op != OpS390XMOVDstore || v_1.AuxInt != off || v_1.Aux != sym { break } - _ = v_1.Args[2] - ptr2 := v_1.Args[0] x := v_1.Args[1] + ptr2 := v_1.Args[0] if !(isSamePtr(ptr1, ptr2)) { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (MOVDload [off] {sym} ptr1 (FMOVDstore [off] {sym} ptr2 x _)) @@ -10746,9 +10013,8 @@ func rewriteValueS390X_OpS390XMOVDload(v *Value) bool { if v_1.Op != OpS390XFMOVDstore || v_1.AuxInt != off || v_1.Aux != sym { break } - _ = v_1.Args[2] - ptr2 := v_1.Args[0] x := v_1.Args[1] + ptr2 := v_1.Args[0] if !(isSamePtr(ptr1, ptr2)) { break } @@ -10774,8 +10040,7 @@ func rewriteValueS390X_OpS390XMOVDload(v *Value) bool { v.reset(OpS390XMOVDload) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVDload [off1] {sym1} (MOVDaddr [off2] {sym2} base) mem) @@ -10798,8 +10063,7 @@ func rewriteValueS390X_OpS390XMOVDload(v *Value) bool { v.reset(OpS390XMOVDload) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(base) - v.AddArg(mem) + v.AddArg2(base, mem) return true } // match: (MOVDload [off1] {sym1} (MOVDaddridx [off2] {sym2} ptr idx) mem) @@ -10822,9 +10086,7 @@ func rewriteValueS390X_OpS390XMOVDload(v *Value) bool { v.reset(OpS390XMOVDloadidx) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } // match: (MOVDload [off] {sym} (ADD ptr idx) mem) @@ -10849,9 +10111,7 @@ func rewriteValueS390X_OpS390XMOVDload(v *Value) bool { v.reset(OpS390XMOVDloadidx) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } break @@ -10882,9 +10142,7 @@ func rewriteValueS390X_OpS390XMOVDloadidx(v *Value) bool { v.reset(OpS390XMOVDloadidx) v.AuxInt = c + d v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } break @@ -10909,9 +10167,7 @@ func rewriteValueS390X_OpS390XMOVDloadidx(v *Value) bool { v.reset(OpS390XMOVDloadidx) v.AuxInt = c + d v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } break @@ -10941,9 +10197,7 @@ func rewriteValueS390X_OpS390XMOVDstore(v *Value) bool { v.reset(OpS390XMOVDstore) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } // match: (MOVDstore [off] {sym} ptr (MOVDconst [c]) mem) @@ -10964,8 +10218,7 @@ func rewriteValueS390X_OpS390XMOVDstore(v *Value) bool { v.reset(OpS390XMOVDstoreconst) v.AuxInt = makeValAndOff(c, off) v.Aux = sym - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVDstore [off1] {sym1} (MOVDaddr [off2] {sym2} base) val mem) @@ -10989,9 +10242,7 @@ func rewriteValueS390X_OpS390XMOVDstore(v *Value) bool { v.reset(OpS390XMOVDstore) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(base) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(base, val, mem) return true } // match: (MOVDstore [off1] {sym1} (MOVDaddridx [off2] {sym2} ptr idx) val mem) @@ -11015,10 +10266,7 @@ func rewriteValueS390X_OpS390XMOVDstore(v *Value) bool { v.reset(OpS390XMOVDstoreidx) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) + v.AddArg4(ptr, idx, val, mem) return true } // match: (MOVDstore [off] {sym} (ADD ptr idx) val mem) @@ -11044,10 +10292,7 @@ func rewriteValueS390X_OpS390XMOVDstore(v *Value) bool { v.reset(OpS390XMOVDstoreidx) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) + v.AddArg4(ptr, idx, val, mem) return true } break @@ -11075,10 +10320,7 @@ func rewriteValueS390X_OpS390XMOVDstore(v *Value) bool { v.reset(OpS390XSTMG2) v.AuxInt = i - 8 v.Aux = s - v.AddArg(p) - v.AddArg(w0) - v.AddArg(w1) - v.AddArg(mem) + v.AddArg4(p, w0, w1, mem) return true } // match: (MOVDstore [i] {s} p w2 x:(STMG2 [i-16] {s} p w0 w1 mem)) @@ -11105,11 +10347,7 @@ func rewriteValueS390X_OpS390XMOVDstore(v *Value) bool { v.reset(OpS390XSTMG3) v.AuxInt = i - 16 v.Aux = s - v.AddArg(p) - v.AddArg(w0) - v.AddArg(w1) - v.AddArg(w2) - v.AddArg(mem) + v.AddArg5(p, w0, w1, w2, mem) return true } // match: (MOVDstore [i] {s} p w3 x:(STMG3 [i-24] {s} p w0 w1 w2 mem)) @@ -11137,12 +10375,7 @@ func rewriteValueS390X_OpS390XMOVDstore(v *Value) bool { v.reset(OpS390XSTMG4) v.AuxInt = i - 24 v.Aux = s - v.AddArg(p) - v.AddArg(w0) - v.AddArg(w1) - v.AddArg(w2) - v.AddArg(w3) - v.AddArg(mem) + v.AddArg6(p, w0, w1, w2, w3, mem) return true } return false @@ -11168,8 +10401,7 @@ func rewriteValueS390X_OpS390XMOVDstoreconst(v *Value) bool { v.reset(OpS390XMOVDstoreconst) v.AuxInt = ValAndOff(sc).add(off) v.Aux = s - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVDstoreconst [sc] {sym1} (MOVDaddr [off] {sym2} ptr) mem) @@ -11191,8 +10423,7 @@ func rewriteValueS390X_OpS390XMOVDstoreconst(v *Value) bool { v.reset(OpS390XMOVDstoreconst) v.AuxInt = ValAndOff(sc).add(off) v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } return false @@ -11223,10 +10454,7 @@ func rewriteValueS390X_OpS390XMOVDstoreidx(v *Value) bool { v.reset(OpS390XMOVDstoreidx) v.AuxInt = c + d v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) + v.AddArg4(ptr, idx, val, mem) return true } break @@ -11252,10 +10480,7 @@ func rewriteValueS390X_OpS390XMOVDstoreidx(v *Value) bool { v.reset(OpS390XMOVDstoreidx) v.AuxInt = c + d v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) + v.AddArg4(ptr, idx, val, mem) return true } break @@ -11288,9 +10513,7 @@ func rewriteValueS390X_OpS390XMOVHBRstore(v *Value) bool { v.reset(OpS390XMOVWBRstore) v.AuxInt = i - 2 v.Aux = s - v.AddArg(p) - v.AddArg(w) - v.AddArg(mem) + v.AddArg3(p, w, mem) return true } // match: (MOVHBRstore [i] {s} p (SRDconst [j] w) x:(MOVHBRstore [i-2] {s} p w0:(SRDconst [j-16] w) mem)) @@ -11320,9 +10543,7 @@ func rewriteValueS390X_OpS390XMOVHBRstore(v *Value) bool { v.reset(OpS390XMOVWBRstore) v.AuxInt = i - 2 v.Aux = s - v.AddArg(p) - v.AddArg(w0) - v.AddArg(mem) + v.AddArg3(p, w0, mem) return true } // match: (MOVHBRstore [i] {s} p (SRWconst [16] w) x:(MOVHBRstore [i-2] {s} p w mem)) @@ -11347,9 +10568,7 @@ func rewriteValueS390X_OpS390XMOVHBRstore(v *Value) bool { v.reset(OpS390XMOVWBRstore) v.AuxInt = i - 2 v.Aux = s - v.AddArg(p) - v.AddArg(w) - v.AddArg(mem) + v.AddArg3(p, w, mem) return true } // match: (MOVHBRstore [i] {s} p (SRWconst [j] w) x:(MOVHBRstore [i-2] {s} p w0:(SRWconst [j-16] w) mem)) @@ -11379,9 +10598,7 @@ func rewriteValueS390X_OpS390XMOVHBRstore(v *Value) bool { v.reset(OpS390XMOVWBRstore) v.AuxInt = i - 2 v.Aux = s - v.AddArg(p) - v.AddArg(w0) - v.AddArg(mem) + v.AddArg3(p, w0, mem) return true } return false @@ -11418,10 +10635,7 @@ func rewriteValueS390X_OpS390XMOVHBRstoreidx(v *Value) bool { v.reset(OpS390XMOVWBRstoreidx) v.AuxInt = i - 2 v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v.AddArg(w) - v.AddArg(mem) + v.AddArg4(p, idx, w, mem) return true } } @@ -11459,10 +10673,7 @@ func rewriteValueS390X_OpS390XMOVHBRstoreidx(v *Value) bool { v.reset(OpS390XMOVWBRstoreidx) v.AuxInt = i - 2 v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v.AddArg(w0) - v.AddArg(mem) + v.AddArg4(p, idx, w0, mem) return true } } @@ -11495,10 +10706,7 @@ func rewriteValueS390X_OpS390XMOVHBRstoreidx(v *Value) bool { v.reset(OpS390XMOVWBRstoreidx) v.AuxInt = i - 2 v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v.AddArg(w) - v.AddArg(mem) + v.AddArg4(p, idx, w, mem) return true } } @@ -11536,10 +10744,7 @@ func rewriteValueS390X_OpS390XMOVHBRstoreidx(v *Value) bool { v.reset(OpS390XMOVWBRstoreidx) v.AuxInt = i - 2 v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v.AddArg(w0) - v.AddArg(mem) + v.AddArg4(p, idx, w0, mem) return true } } @@ -11560,9 +10765,8 @@ func rewriteValueS390X_OpS390XMOVHZload(v *Value) bool { if v_1.Op != OpS390XMOVHstore || v_1.AuxInt != off || v_1.Aux != sym { break } - _ = v_1.Args[2] - ptr2 := v_1.Args[0] x := v_1.Args[1] + ptr2 := v_1.Args[0] if !(isSamePtr(ptr1, ptr2)) { break } @@ -11588,8 +10792,7 @@ func rewriteValueS390X_OpS390XMOVHZload(v *Value) bool { v.reset(OpS390XMOVHZload) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVHZload [off1] {sym1} (MOVDaddr [off2] {sym2} base) mem) @@ -11612,8 +10815,7 @@ func rewriteValueS390X_OpS390XMOVHZload(v *Value) bool { v.reset(OpS390XMOVHZload) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(base) - v.AddArg(mem) + v.AddArg2(base, mem) return true } // match: (MOVHZload [off1] {sym1} (MOVDaddridx [off2] {sym2} ptr idx) mem) @@ -11636,9 +10838,7 @@ func rewriteValueS390X_OpS390XMOVHZload(v *Value) bool { v.reset(OpS390XMOVHZloadidx) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } // match: (MOVHZload [off] {sym} (ADD ptr idx) mem) @@ -11663,9 +10863,7 @@ func rewriteValueS390X_OpS390XMOVHZload(v *Value) bool { v.reset(OpS390XMOVHZloadidx) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } break @@ -11696,9 +10894,7 @@ func rewriteValueS390X_OpS390XMOVHZloadidx(v *Value) bool { v.reset(OpS390XMOVHZloadidx) v.AuxInt = c + d v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } break @@ -11723,9 +10919,7 @@ func rewriteValueS390X_OpS390XMOVHZloadidx(v *Value) bool { v.reset(OpS390XMOVHZloadidx) v.AuxInt = c + d v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } break @@ -11821,16 +11015,10 @@ func rewriteValueS390X_OpS390XMOVHZreg(v *Value) bool { // result: x for { x := v_0 - if x.Op != OpS390XMOVBZload { + if x.Op != OpS390XMOVBZload || !(!x.Type.IsSigned() || x.Type.Size() > 1) { break } - _ = x.Args[1] - if !(!x.Type.IsSigned() || x.Type.Size() > 1) { - break - } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (MOVHZreg x:(MOVBZloadidx _ _ _)) @@ -11838,16 +11026,10 @@ func rewriteValueS390X_OpS390XMOVHZreg(v *Value) bool { // result: x for { x := v_0 - if x.Op != OpS390XMOVBZloadidx { + if x.Op != OpS390XMOVBZloadidx || !(!x.Type.IsSigned() || x.Type.Size() > 1) { break } - _ = x.Args[2] - if !(!x.Type.IsSigned() || x.Type.Size() > 1) { - break - } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (MOVHZreg x:(MOVHZload _ _)) @@ -11855,16 +11037,10 @@ func rewriteValueS390X_OpS390XMOVHZreg(v *Value) bool { // result: x for { x := v_0 - if x.Op != OpS390XMOVHZload { + if x.Op != OpS390XMOVHZload || !(!x.Type.IsSigned() || x.Type.Size() > 2) { break } - _ = x.Args[1] - if !(!x.Type.IsSigned() || x.Type.Size() > 2) { - break - } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (MOVHZreg x:(MOVHZloadidx _ _ _)) @@ -11872,16 +11048,10 @@ func rewriteValueS390X_OpS390XMOVHZreg(v *Value) bool { // result: x for { x := v_0 - if x.Op != OpS390XMOVHZloadidx { + if x.Op != OpS390XMOVHZloadidx || !(!x.Type.IsSigned() || x.Type.Size() > 2) { break } - _ = x.Args[2] - if !(!x.Type.IsSigned() || x.Type.Size() > 2) { - break - } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (MOVHZreg x:(MOVHload [o] {s} p mem)) @@ -11902,12 +11072,10 @@ func rewriteValueS390X_OpS390XMOVHZreg(v *Value) bool { } b = x.Block v0 := b.NewValue0(x.Pos, OpS390XMOVHZload, t) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v0.AuxInt = o v0.Aux = s - v0.AddArg(p) - v0.AddArg(mem) + v0.AddArg2(p, mem) return true } // match: (MOVHZreg x:(MOVHloadidx [o] {s} p i mem)) @@ -11929,13 +11097,10 @@ func rewriteValueS390X_OpS390XMOVHZreg(v *Value) bool { } b = x.Block v0 := b.NewValue0(v.Pos, OpS390XMOVHZloadidx, t) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v0.AuxInt = o v0.Aux = s - v0.AddArg(p) - v0.AddArg(i) - v0.AddArg(mem) + v0.AddArg3(p, i, mem) return true } // match: (MOVHZreg x:(Arg )) @@ -11950,9 +11115,7 @@ func rewriteValueS390X_OpS390XMOVHZreg(v *Value) bool { if !(!t.IsSigned() && t.Size() <= 2) { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (MOVHZreg (MOVDconst [c])) @@ -11996,9 +11159,8 @@ func rewriteValueS390X_OpS390XMOVHload(v *Value) bool { if v_1.Op != OpS390XMOVHstore || v_1.AuxInt != off || v_1.Aux != sym { break } - _ = v_1.Args[2] - ptr2 := v_1.Args[0] x := v_1.Args[1] + ptr2 := v_1.Args[0] if !(isSamePtr(ptr1, ptr2)) { break } @@ -12024,8 +11186,7 @@ func rewriteValueS390X_OpS390XMOVHload(v *Value) bool { v.reset(OpS390XMOVHload) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVHload [off1] {sym1} (MOVDaddr [off2] {sym2} base) mem) @@ -12048,8 +11209,7 @@ func rewriteValueS390X_OpS390XMOVHload(v *Value) bool { v.reset(OpS390XMOVHload) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(base) - v.AddArg(mem) + v.AddArg2(base, mem) return true } // match: (MOVHload [off1] {sym1} (MOVDaddridx [off2] {sym2} ptr idx) mem) @@ -12072,9 +11232,7 @@ func rewriteValueS390X_OpS390XMOVHload(v *Value) bool { v.reset(OpS390XMOVHloadidx) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } // match: (MOVHload [off] {sym} (ADD ptr idx) mem) @@ -12099,9 +11257,7 @@ func rewriteValueS390X_OpS390XMOVHload(v *Value) bool { v.reset(OpS390XMOVHloadidx) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } break @@ -12132,9 +11288,7 @@ func rewriteValueS390X_OpS390XMOVHloadidx(v *Value) bool { v.reset(OpS390XMOVHloadidx) v.AuxInt = c + d v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } break @@ -12159,9 +11313,7 @@ func rewriteValueS390X_OpS390XMOVHloadidx(v *Value) bool { v.reset(OpS390XMOVHloadidx) v.AuxInt = c + d v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } break @@ -12257,16 +11409,10 @@ func rewriteValueS390X_OpS390XMOVHreg(v *Value) bool { // result: x for { x := v_0 - if x.Op != OpS390XMOVBload { + if x.Op != OpS390XMOVBload || !(x.Type.IsSigned() || x.Type.Size() == 8) { break } - _ = x.Args[1] - if !(x.Type.IsSigned() || x.Type.Size() == 8) { - break - } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (MOVHreg x:(MOVBloadidx _ _ _)) @@ -12274,16 +11420,10 @@ func rewriteValueS390X_OpS390XMOVHreg(v *Value) bool { // result: x for { x := v_0 - if x.Op != OpS390XMOVBloadidx { + if x.Op != OpS390XMOVBloadidx || !(x.Type.IsSigned() || x.Type.Size() == 8) { break } - _ = x.Args[2] - if !(x.Type.IsSigned() || x.Type.Size() == 8) { - break - } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (MOVHreg x:(MOVHload _ _)) @@ -12291,16 +11431,10 @@ func rewriteValueS390X_OpS390XMOVHreg(v *Value) bool { // result: x for { x := v_0 - if x.Op != OpS390XMOVHload { + if x.Op != OpS390XMOVHload || !(x.Type.IsSigned() || x.Type.Size() == 8) { break } - _ = x.Args[1] - if !(x.Type.IsSigned() || x.Type.Size() == 8) { - break - } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (MOVHreg x:(MOVHloadidx _ _ _)) @@ -12308,16 +11442,10 @@ func rewriteValueS390X_OpS390XMOVHreg(v *Value) bool { // result: x for { x := v_0 - if x.Op != OpS390XMOVHloadidx { + if x.Op != OpS390XMOVHloadidx || !(x.Type.IsSigned() || x.Type.Size() == 8) { break } - _ = x.Args[2] - if !(x.Type.IsSigned() || x.Type.Size() == 8) { - break - } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (MOVHreg x:(MOVBZload _ _)) @@ -12325,16 +11453,10 @@ func rewriteValueS390X_OpS390XMOVHreg(v *Value) bool { // result: x for { x := v_0 - if x.Op != OpS390XMOVBZload { + if x.Op != OpS390XMOVBZload || !(!x.Type.IsSigned() || x.Type.Size() > 1) { break } - _ = x.Args[1] - if !(!x.Type.IsSigned() || x.Type.Size() > 1) { - break - } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (MOVHreg x:(MOVBZloadidx _ _ _)) @@ -12342,16 +11464,10 @@ func rewriteValueS390X_OpS390XMOVHreg(v *Value) bool { // result: x for { x := v_0 - if x.Op != OpS390XMOVBZloadidx { + if x.Op != OpS390XMOVBZloadidx || !(!x.Type.IsSigned() || x.Type.Size() > 1) { break } - _ = x.Args[2] - if !(!x.Type.IsSigned() || x.Type.Size() > 1) { - break - } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (MOVHreg x:(MOVHZload [o] {s} p mem)) @@ -12372,12 +11488,10 @@ func rewriteValueS390X_OpS390XMOVHreg(v *Value) bool { } b = x.Block v0 := b.NewValue0(x.Pos, OpS390XMOVHload, t) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v0.AuxInt = o v0.Aux = s - v0.AddArg(p) - v0.AddArg(mem) + v0.AddArg2(p, mem) return true } // match: (MOVHreg x:(MOVHZloadidx [o] {s} p i mem)) @@ -12399,13 +11513,10 @@ func rewriteValueS390X_OpS390XMOVHreg(v *Value) bool { } b = x.Block v0 := b.NewValue0(v.Pos, OpS390XMOVHloadidx, t) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v0.AuxInt = o v0.Aux = s - v0.AddArg(p) - v0.AddArg(i) - v0.AddArg(mem) + v0.AddArg3(p, i, mem) return true } // match: (MOVHreg x:(Arg )) @@ -12420,9 +11531,7 @@ func rewriteValueS390X_OpS390XMOVHreg(v *Value) bool { if !(t.IsSigned() && t.Size() <= 2) { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (MOVHreg (MOVDconst [c])) @@ -12475,9 +11584,7 @@ func rewriteValueS390X_OpS390XMOVHstore(v *Value) bool { v.reset(OpS390XMOVHstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(x) - v.AddArg(mem) + v.AddArg3(ptr, x, mem) return true } // match: (MOVHstore [off] {sym} ptr (MOVHZreg x) mem) @@ -12494,9 +11601,7 @@ func rewriteValueS390X_OpS390XMOVHstore(v *Value) bool { v.reset(OpS390XMOVHstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(x) - v.AddArg(mem) + v.AddArg3(ptr, x, mem) return true } // match: (MOVHstore [off1] {sym} (ADDconst [off2] ptr) val mem) @@ -12518,9 +11623,7 @@ func rewriteValueS390X_OpS390XMOVHstore(v *Value) bool { v.reset(OpS390XMOVHstore) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } // match: (MOVHstore [off] {sym} ptr (MOVDconst [c]) mem) @@ -12541,8 +11644,7 @@ func rewriteValueS390X_OpS390XMOVHstore(v *Value) bool { v.reset(OpS390XMOVHstoreconst) v.AuxInt = makeValAndOff(int64(int16(c)), off) v.Aux = sym - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVHstore [off1] {sym1} (MOVDaddr [off2] {sym2} base) val mem) @@ -12566,9 +11668,7 @@ func rewriteValueS390X_OpS390XMOVHstore(v *Value) bool { v.reset(OpS390XMOVHstore) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(base) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(base, val, mem) return true } // match: (MOVHstore [off1] {sym1} (MOVDaddridx [off2] {sym2} ptr idx) val mem) @@ -12592,10 +11692,7 @@ func rewriteValueS390X_OpS390XMOVHstore(v *Value) bool { v.reset(OpS390XMOVHstoreidx) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) + v.AddArg4(ptr, idx, val, mem) return true } // match: (MOVHstore [off] {sym} (ADD ptr idx) val mem) @@ -12621,10 +11718,7 @@ func rewriteValueS390X_OpS390XMOVHstore(v *Value) bool { v.reset(OpS390XMOVHstoreidx) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) + v.AddArg4(ptr, idx, val, mem) return true } break @@ -12652,9 +11746,7 @@ func rewriteValueS390X_OpS390XMOVHstore(v *Value) bool { v.reset(OpS390XMOVWstore) v.AuxInt = i - 2 v.Aux = s - v.AddArg(p) - v.AddArg(w) - v.AddArg(mem) + v.AddArg3(p, w, mem) return true } // match: (MOVHstore [i] {s} p w0:(SRDconst [j] w) x:(MOVHstore [i-2] {s} p (SRDconst [j+16] w) mem)) @@ -12685,9 +11777,7 @@ func rewriteValueS390X_OpS390XMOVHstore(v *Value) bool { v.reset(OpS390XMOVWstore) v.AuxInt = i - 2 v.Aux = s - v.AddArg(p) - v.AddArg(w0) - v.AddArg(mem) + v.AddArg3(p, w0, mem) return true } // match: (MOVHstore [i] {s} p w x:(MOVHstore [i-2] {s} p (SRWconst [16] w) mem)) @@ -12713,9 +11803,7 @@ func rewriteValueS390X_OpS390XMOVHstore(v *Value) bool { v.reset(OpS390XMOVWstore) v.AuxInt = i - 2 v.Aux = s - v.AddArg(p) - v.AddArg(w) - v.AddArg(mem) + v.AddArg3(p, w, mem) return true } // match: (MOVHstore [i] {s} p w0:(SRWconst [j] w) x:(MOVHstore [i-2] {s} p (SRWconst [j+16] w) mem)) @@ -12746,9 +11834,7 @@ func rewriteValueS390X_OpS390XMOVHstore(v *Value) bool { v.reset(OpS390XMOVWstore) v.AuxInt = i - 2 v.Aux = s - v.AddArg(p) - v.AddArg(w0) - v.AddArg(mem) + v.AddArg3(p, w0, mem) return true } return false @@ -12776,8 +11862,7 @@ func rewriteValueS390X_OpS390XMOVHstoreconst(v *Value) bool { v.reset(OpS390XMOVHstoreconst) v.AuxInt = ValAndOff(sc).add(off) v.Aux = s - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVHstoreconst [sc] {sym1} (MOVDaddr [off] {sym2} ptr) mem) @@ -12799,8 +11884,7 @@ func rewriteValueS390X_OpS390XMOVHstoreconst(v *Value) bool { v.reset(OpS390XMOVHstoreconst) v.AuxInt = ValAndOff(sc).add(off) v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVHstoreconst [c] {s} p x:(MOVHstoreconst [a] {s} p mem)) @@ -12825,11 +11909,9 @@ func rewriteValueS390X_OpS390XMOVHstoreconst(v *Value) bool { v.reset(OpS390XMOVWstore) v.AuxInt = ValAndOff(a).Off() v.Aux = s - v.AddArg(p) v0 := b.NewValue0(x.Pos, OpS390XMOVDconst, typ.UInt64) v0.AuxInt = int64(int32(ValAndOff(c).Val()&0xffff | ValAndOff(a).Val()<<16)) - v.AddArg(v0) - v.AddArg(mem) + v.AddArg3(p, v0, mem) return true } return false @@ -12860,10 +11942,7 @@ func rewriteValueS390X_OpS390XMOVHstoreidx(v *Value) bool { v.reset(OpS390XMOVHstoreidx) v.AuxInt = c + d v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) + v.AddArg4(ptr, idx, val, mem) return true } break @@ -12889,10 +11968,7 @@ func rewriteValueS390X_OpS390XMOVHstoreidx(v *Value) bool { v.reset(OpS390XMOVHstoreidx) v.AuxInt = c + d v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) + v.AddArg4(ptr, idx, val, mem) return true } break @@ -12925,10 +12001,7 @@ func rewriteValueS390X_OpS390XMOVHstoreidx(v *Value) bool { v.reset(OpS390XMOVWstoreidx) v.AuxInt = i - 2 v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v.AddArg(w) - v.AddArg(mem) + v.AddArg4(p, idx, w, mem) return true } } @@ -12967,10 +12040,7 @@ func rewriteValueS390X_OpS390XMOVHstoreidx(v *Value) bool { v.reset(OpS390XMOVWstoreidx) v.AuxInt = i - 2 v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v.AddArg(w0) - v.AddArg(mem) + v.AddArg4(p, idx, w0, mem) return true } } @@ -13004,10 +12074,7 @@ func rewriteValueS390X_OpS390XMOVHstoreidx(v *Value) bool { v.reset(OpS390XMOVWstoreidx) v.AuxInt = i - 2 v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v.AddArg(w) - v.AddArg(mem) + v.AddArg4(p, idx, w, mem) return true } } @@ -13046,10 +12113,7 @@ func rewriteValueS390X_OpS390XMOVHstoreidx(v *Value) bool { v.reset(OpS390XMOVWstoreidx) v.AuxInt = i - 2 v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v.AddArg(w0) - v.AddArg(mem) + v.AddArg4(p, idx, w0, mem) return true } } @@ -13083,9 +12147,7 @@ func rewriteValueS390X_OpS390XMOVWBRstore(v *Value) bool { v.reset(OpS390XMOVDBRstore) v.AuxInt = i - 4 v.Aux = s - v.AddArg(p) - v.AddArg(w) - v.AddArg(mem) + v.AddArg3(p, w, mem) return true } // match: (MOVWBRstore [i] {s} p (SRDconst [j] w) x:(MOVWBRstore [i-4] {s} p w0:(SRDconst [j-32] w) mem)) @@ -13115,9 +12177,7 @@ func rewriteValueS390X_OpS390XMOVWBRstore(v *Value) bool { v.reset(OpS390XMOVDBRstore) v.AuxInt = i - 4 v.Aux = s - v.AddArg(p) - v.AddArg(w0) - v.AddArg(mem) + v.AddArg3(p, w0, mem) return true } return false @@ -13154,10 +12214,7 @@ func rewriteValueS390X_OpS390XMOVWBRstoreidx(v *Value) bool { v.reset(OpS390XMOVDBRstoreidx) v.AuxInt = i - 4 v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v.AddArg(w) - v.AddArg(mem) + v.AddArg4(p, idx, w, mem) return true } } @@ -13195,10 +12252,7 @@ func rewriteValueS390X_OpS390XMOVWBRstoreidx(v *Value) bool { v.reset(OpS390XMOVDBRstoreidx) v.AuxInt = i - 4 v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v.AddArg(w0) - v.AddArg(mem) + v.AddArg4(p, idx, w0, mem) return true } } @@ -13219,9 +12273,8 @@ func rewriteValueS390X_OpS390XMOVWZload(v *Value) bool { if v_1.Op != OpS390XMOVWstore || v_1.AuxInt != off || v_1.Aux != sym { break } - _ = v_1.Args[2] - ptr2 := v_1.Args[0] x := v_1.Args[1] + ptr2 := v_1.Args[0] if !(isSamePtr(ptr1, ptr2)) { break } @@ -13247,8 +12300,7 @@ func rewriteValueS390X_OpS390XMOVWZload(v *Value) bool { v.reset(OpS390XMOVWZload) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVWZload [off1] {sym1} (MOVDaddr [off2] {sym2} base) mem) @@ -13271,8 +12323,7 @@ func rewriteValueS390X_OpS390XMOVWZload(v *Value) bool { v.reset(OpS390XMOVWZload) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(base) - v.AddArg(mem) + v.AddArg2(base, mem) return true } // match: (MOVWZload [off1] {sym1} (MOVDaddridx [off2] {sym2} ptr idx) mem) @@ -13295,9 +12346,7 @@ func rewriteValueS390X_OpS390XMOVWZload(v *Value) bool { v.reset(OpS390XMOVWZloadidx) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } // match: (MOVWZload [off] {sym} (ADD ptr idx) mem) @@ -13322,9 +12371,7 @@ func rewriteValueS390X_OpS390XMOVWZload(v *Value) bool { v.reset(OpS390XMOVWZloadidx) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } break @@ -13355,9 +12402,7 @@ func rewriteValueS390X_OpS390XMOVWZloadidx(v *Value) bool { v.reset(OpS390XMOVWZloadidx) v.AuxInt = c + d v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } break @@ -13382,9 +12427,7 @@ func rewriteValueS390X_OpS390XMOVWZloadidx(v *Value) bool { v.reset(OpS390XMOVWZloadidx) v.AuxInt = c + d v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } break @@ -13463,16 +12506,10 @@ func rewriteValueS390X_OpS390XMOVWZreg(v *Value) bool { // result: x for { x := v_0 - if x.Op != OpS390XMOVBZload { + if x.Op != OpS390XMOVBZload || !(!x.Type.IsSigned() || x.Type.Size() > 1) { break } - _ = x.Args[1] - if !(!x.Type.IsSigned() || x.Type.Size() > 1) { - break - } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (MOVWZreg x:(MOVBZloadidx _ _ _)) @@ -13480,16 +12517,10 @@ func rewriteValueS390X_OpS390XMOVWZreg(v *Value) bool { // result: x for { x := v_0 - if x.Op != OpS390XMOVBZloadidx { + if x.Op != OpS390XMOVBZloadidx || !(!x.Type.IsSigned() || x.Type.Size() > 1) { break } - _ = x.Args[2] - if !(!x.Type.IsSigned() || x.Type.Size() > 1) { - break - } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (MOVWZreg x:(MOVHZload _ _)) @@ -13497,16 +12528,10 @@ func rewriteValueS390X_OpS390XMOVWZreg(v *Value) bool { // result: x for { x := v_0 - if x.Op != OpS390XMOVHZload { + if x.Op != OpS390XMOVHZload || !(!x.Type.IsSigned() || x.Type.Size() > 2) { break } - _ = x.Args[1] - if !(!x.Type.IsSigned() || x.Type.Size() > 2) { - break - } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (MOVWZreg x:(MOVHZloadidx _ _ _)) @@ -13514,16 +12539,10 @@ func rewriteValueS390X_OpS390XMOVWZreg(v *Value) bool { // result: x for { x := v_0 - if x.Op != OpS390XMOVHZloadidx { + if x.Op != OpS390XMOVHZloadidx || !(!x.Type.IsSigned() || x.Type.Size() > 2) { break } - _ = x.Args[2] - if !(!x.Type.IsSigned() || x.Type.Size() > 2) { - break - } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (MOVWZreg x:(MOVWZload _ _)) @@ -13531,16 +12550,10 @@ func rewriteValueS390X_OpS390XMOVWZreg(v *Value) bool { // result: x for { x := v_0 - if x.Op != OpS390XMOVWZload { + if x.Op != OpS390XMOVWZload || !(!x.Type.IsSigned() || x.Type.Size() > 4) { break } - _ = x.Args[1] - if !(!x.Type.IsSigned() || x.Type.Size() > 4) { - break - } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (MOVWZreg x:(MOVWZloadidx _ _ _)) @@ -13548,16 +12561,10 @@ func rewriteValueS390X_OpS390XMOVWZreg(v *Value) bool { // result: x for { x := v_0 - if x.Op != OpS390XMOVWZloadidx { + if x.Op != OpS390XMOVWZloadidx || !(!x.Type.IsSigned() || x.Type.Size() > 4) { break } - _ = x.Args[2] - if !(!x.Type.IsSigned() || x.Type.Size() > 4) { - break - } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (MOVWZreg x:(MOVWload [o] {s} p mem)) @@ -13578,12 +12585,10 @@ func rewriteValueS390X_OpS390XMOVWZreg(v *Value) bool { } b = x.Block v0 := b.NewValue0(x.Pos, OpS390XMOVWZload, t) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v0.AuxInt = o v0.Aux = s - v0.AddArg(p) - v0.AddArg(mem) + v0.AddArg2(p, mem) return true } // match: (MOVWZreg x:(MOVWloadidx [o] {s} p i mem)) @@ -13605,13 +12610,10 @@ func rewriteValueS390X_OpS390XMOVWZreg(v *Value) bool { } b = x.Block v0 := b.NewValue0(v.Pos, OpS390XMOVWZloadidx, t) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v0.AuxInt = o v0.Aux = s - v0.AddArg(p) - v0.AddArg(i) - v0.AddArg(mem) + v0.AddArg3(p, i, mem) return true } // match: (MOVWZreg x:(Arg )) @@ -13626,9 +12628,7 @@ func rewriteValueS390X_OpS390XMOVWZreg(v *Value) bool { if !(!t.IsSigned() && t.Size() <= 4) { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (MOVWZreg (MOVDconst [c])) @@ -13657,9 +12657,8 @@ func rewriteValueS390X_OpS390XMOVWload(v *Value) bool { if v_1.Op != OpS390XMOVWstore || v_1.AuxInt != off || v_1.Aux != sym { break } - _ = v_1.Args[2] - ptr2 := v_1.Args[0] x := v_1.Args[1] + ptr2 := v_1.Args[0] if !(isSamePtr(ptr1, ptr2)) { break } @@ -13685,8 +12684,7 @@ func rewriteValueS390X_OpS390XMOVWload(v *Value) bool { v.reset(OpS390XMOVWload) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVWload [off1] {sym1} (MOVDaddr [off2] {sym2} base) mem) @@ -13709,8 +12707,7 @@ func rewriteValueS390X_OpS390XMOVWload(v *Value) bool { v.reset(OpS390XMOVWload) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(base) - v.AddArg(mem) + v.AddArg2(base, mem) return true } // match: (MOVWload [off1] {sym1} (MOVDaddridx [off2] {sym2} ptr idx) mem) @@ -13733,9 +12730,7 @@ func rewriteValueS390X_OpS390XMOVWload(v *Value) bool { v.reset(OpS390XMOVWloadidx) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } // match: (MOVWload [off] {sym} (ADD ptr idx) mem) @@ -13760,9 +12755,7 @@ func rewriteValueS390X_OpS390XMOVWload(v *Value) bool { v.reset(OpS390XMOVWloadidx) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } break @@ -13793,9 +12786,7 @@ func rewriteValueS390X_OpS390XMOVWloadidx(v *Value) bool { v.reset(OpS390XMOVWloadidx) v.AuxInt = c + d v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } break @@ -13820,9 +12811,7 @@ func rewriteValueS390X_OpS390XMOVWloadidx(v *Value) bool { v.reset(OpS390XMOVWloadidx) v.AuxInt = c + d v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } break @@ -13901,16 +12890,10 @@ func rewriteValueS390X_OpS390XMOVWreg(v *Value) bool { // result: x for { x := v_0 - if x.Op != OpS390XMOVBload { + if x.Op != OpS390XMOVBload || !(x.Type.IsSigned() || x.Type.Size() == 8) { break } - _ = x.Args[1] - if !(x.Type.IsSigned() || x.Type.Size() == 8) { - break - } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (MOVWreg x:(MOVBloadidx _ _ _)) @@ -13918,16 +12901,10 @@ func rewriteValueS390X_OpS390XMOVWreg(v *Value) bool { // result: x for { x := v_0 - if x.Op != OpS390XMOVBloadidx { + if x.Op != OpS390XMOVBloadidx || !(x.Type.IsSigned() || x.Type.Size() == 8) { break } - _ = x.Args[2] - if !(x.Type.IsSigned() || x.Type.Size() == 8) { - break - } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (MOVWreg x:(MOVHload _ _)) @@ -13935,16 +12912,10 @@ func rewriteValueS390X_OpS390XMOVWreg(v *Value) bool { // result: x for { x := v_0 - if x.Op != OpS390XMOVHload { + if x.Op != OpS390XMOVHload || !(x.Type.IsSigned() || x.Type.Size() == 8) { break } - _ = x.Args[1] - if !(x.Type.IsSigned() || x.Type.Size() == 8) { - break - } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (MOVWreg x:(MOVHloadidx _ _ _)) @@ -13952,16 +12923,10 @@ func rewriteValueS390X_OpS390XMOVWreg(v *Value) bool { // result: x for { x := v_0 - if x.Op != OpS390XMOVHloadidx { + if x.Op != OpS390XMOVHloadidx || !(x.Type.IsSigned() || x.Type.Size() == 8) { break } - _ = x.Args[2] - if !(x.Type.IsSigned() || x.Type.Size() == 8) { - break - } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (MOVWreg x:(MOVWload _ _)) @@ -13969,16 +12934,10 @@ func rewriteValueS390X_OpS390XMOVWreg(v *Value) bool { // result: x for { x := v_0 - if x.Op != OpS390XMOVWload { + if x.Op != OpS390XMOVWload || !(x.Type.IsSigned() || x.Type.Size() == 8) { break } - _ = x.Args[1] - if !(x.Type.IsSigned() || x.Type.Size() == 8) { - break - } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (MOVWreg x:(MOVWloadidx _ _ _)) @@ -13986,16 +12945,10 @@ func rewriteValueS390X_OpS390XMOVWreg(v *Value) bool { // result: x for { x := v_0 - if x.Op != OpS390XMOVWloadidx { + if x.Op != OpS390XMOVWloadidx || !(x.Type.IsSigned() || x.Type.Size() == 8) { break } - _ = x.Args[2] - if !(x.Type.IsSigned() || x.Type.Size() == 8) { - break - } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (MOVWreg x:(MOVBZload _ _)) @@ -14003,16 +12956,10 @@ func rewriteValueS390X_OpS390XMOVWreg(v *Value) bool { // result: x for { x := v_0 - if x.Op != OpS390XMOVBZload { + if x.Op != OpS390XMOVBZload || !(!x.Type.IsSigned() || x.Type.Size() > 1) { break } - _ = x.Args[1] - if !(!x.Type.IsSigned() || x.Type.Size() > 1) { - break - } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (MOVWreg x:(MOVBZloadidx _ _ _)) @@ -14020,16 +12967,10 @@ func rewriteValueS390X_OpS390XMOVWreg(v *Value) bool { // result: x for { x := v_0 - if x.Op != OpS390XMOVBZloadidx { + if x.Op != OpS390XMOVBZloadidx || !(!x.Type.IsSigned() || x.Type.Size() > 1) { break } - _ = x.Args[2] - if !(!x.Type.IsSigned() || x.Type.Size() > 1) { - break - } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (MOVWreg x:(MOVHZload _ _)) @@ -14037,16 +12978,10 @@ func rewriteValueS390X_OpS390XMOVWreg(v *Value) bool { // result: x for { x := v_0 - if x.Op != OpS390XMOVHZload { + if x.Op != OpS390XMOVHZload || !(!x.Type.IsSigned() || x.Type.Size() > 2) { break } - _ = x.Args[1] - if !(!x.Type.IsSigned() || x.Type.Size() > 2) { - break - } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (MOVWreg x:(MOVHZloadidx _ _ _)) @@ -14054,16 +12989,10 @@ func rewriteValueS390X_OpS390XMOVWreg(v *Value) bool { // result: x for { x := v_0 - if x.Op != OpS390XMOVHZloadidx { + if x.Op != OpS390XMOVHZloadidx || !(!x.Type.IsSigned() || x.Type.Size() > 2) { break } - _ = x.Args[2] - if !(!x.Type.IsSigned() || x.Type.Size() > 2) { - break - } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (MOVWreg x:(MOVWZload [o] {s} p mem)) @@ -14084,12 +13013,10 @@ func rewriteValueS390X_OpS390XMOVWreg(v *Value) bool { } b = x.Block v0 := b.NewValue0(x.Pos, OpS390XMOVWload, t) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v0.AuxInt = o v0.Aux = s - v0.AddArg(p) - v0.AddArg(mem) + v0.AddArg2(p, mem) return true } // match: (MOVWreg x:(MOVWZloadidx [o] {s} p i mem)) @@ -14111,13 +13038,10 @@ func rewriteValueS390X_OpS390XMOVWreg(v *Value) bool { } b = x.Block v0 := b.NewValue0(v.Pos, OpS390XMOVWloadidx, t) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v0.AuxInt = o v0.Aux = s - v0.AddArg(p) - v0.AddArg(i) - v0.AddArg(mem) + v0.AddArg3(p, i, mem) return true } // match: (MOVWreg x:(Arg )) @@ -14132,9 +13056,7 @@ func rewriteValueS390X_OpS390XMOVWreg(v *Value) bool { if !(t.IsSigned() && t.Size() <= 4) { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (MOVWreg (MOVDconst [c])) @@ -14168,9 +13090,7 @@ func rewriteValueS390X_OpS390XMOVWstore(v *Value) bool { v.reset(OpS390XMOVWstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(x) - v.AddArg(mem) + v.AddArg3(ptr, x, mem) return true } // match: (MOVWstore [off] {sym} ptr (MOVWZreg x) mem) @@ -14187,9 +13107,7 @@ func rewriteValueS390X_OpS390XMOVWstore(v *Value) bool { v.reset(OpS390XMOVWstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(x) - v.AddArg(mem) + v.AddArg3(ptr, x, mem) return true } // match: (MOVWstore [off1] {sym} (ADDconst [off2] ptr) val mem) @@ -14211,9 +13129,7 @@ func rewriteValueS390X_OpS390XMOVWstore(v *Value) bool { v.reset(OpS390XMOVWstore) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } // match: (MOVWstore [off] {sym} ptr (MOVDconst [c]) mem) @@ -14234,8 +13150,7 @@ func rewriteValueS390X_OpS390XMOVWstore(v *Value) bool { v.reset(OpS390XMOVWstoreconst) v.AuxInt = makeValAndOff(int64(int32(c)), off) v.Aux = sym - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVWstore [off1] {sym1} (MOVDaddr [off2] {sym2} base) val mem) @@ -14259,9 +13174,7 @@ func rewriteValueS390X_OpS390XMOVWstore(v *Value) bool { v.reset(OpS390XMOVWstore) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(base) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(base, val, mem) return true } // match: (MOVWstore [off1] {sym1} (MOVDaddridx [off2] {sym2} ptr idx) val mem) @@ -14285,10 +13198,7 @@ func rewriteValueS390X_OpS390XMOVWstore(v *Value) bool { v.reset(OpS390XMOVWstoreidx) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) + v.AddArg4(ptr, idx, val, mem) return true } // match: (MOVWstore [off] {sym} (ADD ptr idx) val mem) @@ -14314,10 +13224,7 @@ func rewriteValueS390X_OpS390XMOVWstore(v *Value) bool { v.reset(OpS390XMOVWstoreidx) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) + v.AddArg4(ptr, idx, val, mem) return true } break @@ -14344,9 +13251,7 @@ func rewriteValueS390X_OpS390XMOVWstore(v *Value) bool { v.reset(OpS390XMOVDstore) v.AuxInt = i - 4 v.Aux = s - v.AddArg(p) - v.AddArg(w) - v.AddArg(mem) + v.AddArg3(p, w, mem) return true } // match: (MOVWstore [i] {s} p w0:(SRDconst [j] w) x:(MOVWstore [i-4] {s} p (SRDconst [j+32] w) mem)) @@ -14377,9 +13282,7 @@ func rewriteValueS390X_OpS390XMOVWstore(v *Value) bool { v.reset(OpS390XMOVDstore) v.AuxInt = i - 4 v.Aux = s - v.AddArg(p) - v.AddArg(w0) - v.AddArg(mem) + v.AddArg3(p, w0, mem) return true } // match: (MOVWstore [i] {s} p w1 x:(MOVWstore [i-4] {s} p w0 mem)) @@ -14405,10 +13308,7 @@ func rewriteValueS390X_OpS390XMOVWstore(v *Value) bool { v.reset(OpS390XSTM2) v.AuxInt = i - 4 v.Aux = s - v.AddArg(p) - v.AddArg(w0) - v.AddArg(w1) - v.AddArg(mem) + v.AddArg4(p, w0, w1, mem) return true } // match: (MOVWstore [i] {s} p w2 x:(STM2 [i-8] {s} p w0 w1 mem)) @@ -14435,11 +13335,7 @@ func rewriteValueS390X_OpS390XMOVWstore(v *Value) bool { v.reset(OpS390XSTM3) v.AuxInt = i - 8 v.Aux = s - v.AddArg(p) - v.AddArg(w0) - v.AddArg(w1) - v.AddArg(w2) - v.AddArg(mem) + v.AddArg5(p, w0, w1, w2, mem) return true } // match: (MOVWstore [i] {s} p w3 x:(STM3 [i-12] {s} p w0 w1 w2 mem)) @@ -14467,12 +13363,7 @@ func rewriteValueS390X_OpS390XMOVWstore(v *Value) bool { v.reset(OpS390XSTM4) v.AuxInt = i - 12 v.Aux = s - v.AddArg(p) - v.AddArg(w0) - v.AddArg(w1) - v.AddArg(w2) - v.AddArg(w3) - v.AddArg(mem) + v.AddArg6(p, w0, w1, w2, w3, mem) return true } return false @@ -14500,8 +13391,7 @@ func rewriteValueS390X_OpS390XMOVWstoreconst(v *Value) bool { v.reset(OpS390XMOVWstoreconst) v.AuxInt = ValAndOff(sc).add(off) v.Aux = s - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVWstoreconst [sc] {sym1} (MOVDaddr [off] {sym2} ptr) mem) @@ -14523,8 +13413,7 @@ func rewriteValueS390X_OpS390XMOVWstoreconst(v *Value) bool { v.reset(OpS390XMOVWstoreconst) v.AuxInt = ValAndOff(sc).add(off) v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVWstoreconst [c] {s} p x:(MOVWstoreconst [a] {s} p mem)) @@ -14549,11 +13438,9 @@ func rewriteValueS390X_OpS390XMOVWstoreconst(v *Value) bool { v.reset(OpS390XMOVDstore) v.AuxInt = ValAndOff(a).Off() v.Aux = s - v.AddArg(p) v0 := b.NewValue0(x.Pos, OpS390XMOVDconst, typ.UInt64) v0.AuxInt = ValAndOff(c).Val()&0xffffffff | ValAndOff(a).Val()<<32 - v.AddArg(v0) - v.AddArg(mem) + v.AddArg3(p, v0, mem) return true } return false @@ -14584,10 +13471,7 @@ func rewriteValueS390X_OpS390XMOVWstoreidx(v *Value) bool { v.reset(OpS390XMOVWstoreidx) v.AuxInt = c + d v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) + v.AddArg4(ptr, idx, val, mem) return true } break @@ -14613,10 +13497,7 @@ func rewriteValueS390X_OpS390XMOVWstoreidx(v *Value) bool { v.reset(OpS390XMOVWstoreidx) v.AuxInt = c + d v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) + v.AddArg4(ptr, idx, val, mem) return true } break @@ -14649,10 +13530,7 @@ func rewriteValueS390X_OpS390XMOVWstoreidx(v *Value) bool { v.reset(OpS390XMOVDstoreidx) v.AuxInt = i - 4 v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v.AddArg(w) - v.AddArg(mem) + v.AddArg4(p, idx, w, mem) return true } } @@ -14691,10 +13569,7 @@ func rewriteValueS390X_OpS390XMOVWstoreidx(v *Value) bool { v.reset(OpS390XMOVDstoreidx) v.AuxInt = i - 4 v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v.AddArg(w0) - v.AddArg(mem) + v.AddArg4(p, idx, w0, mem) return true } } @@ -14747,9 +13622,7 @@ func rewriteValueS390X_OpS390XMULLD(v *Value) bool { v.Type = t v.AuxInt = off v.Aux = sym - v.AddArg(x) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg3(x, ptr, mem) return true } break @@ -14787,9 +13660,7 @@ func rewriteValueS390X_OpS390XMULLDconst(v *Value) bool { break } x := v_0 - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (MULLDconst [c] x) @@ -14819,8 +13690,7 @@ func rewriteValueS390X_OpS390XMULLDconst(v *Value) bool { v0 := b.NewValue0(v.Pos, OpS390XSLDconst, v.Type) v0.AuxInt = log2(c + 1) v0.AddArg(x) - v.AddArg(v0) - v.AddArg(x) + v.AddArg2(v0, x) return true } // match: (MULLDconst [c] x) @@ -14836,8 +13706,7 @@ func rewriteValueS390X_OpS390XMULLDconst(v *Value) bool { v0 := b.NewValue0(v.Pos, OpS390XSLDconst, v.Type) v0.AuxInt = log2(c - 1) v0.AddArg(x) - v.AddArg(v0) - v.AddArg(x) + v.AddArg2(v0, x) return true } // match: (MULLDconst [c] (MOVDconst [d])) @@ -14871,17 +13740,15 @@ func rewriteValueS390X_OpS390XMULLDload(v *Value) bool { if v_2.Op != OpS390XFMOVDstore || v_2.AuxInt != off || v_2.Aux != sym { break } - _ = v_2.Args[2] - ptr2 := v_2.Args[0] y := v_2.Args[1] + ptr2 := v_2.Args[0] if !(isSamePtr(ptr1, ptr2)) { break } v.reset(OpS390XMULLD) - v.AddArg(x) v0 := b.NewValue0(v_2.Pos, OpS390XLGDR, t) v0.AddArg(y) - v.AddArg(v0) + v.AddArg2(x, v0) return true } // match: (MULLDload [off1] {sym} x (ADDconst [off2] ptr) mem) @@ -14903,9 +13770,7 @@ func rewriteValueS390X_OpS390XMULLDload(v *Value) bool { v.reset(OpS390XMULLDload) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(x) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg3(x, ptr, mem) return true } // match: (MULLDload [o1] {s1} x (MOVDaddr [o2] {s2} ptr) mem) @@ -14928,9 +13793,7 @@ func rewriteValueS390X_OpS390XMULLDload(v *Value) bool { v.reset(OpS390XMULLDload) v.AuxInt = o1 + o2 v.Aux = mergeSym(s1, s2) - v.AddArg(x) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg3(x, ptr, mem) return true } return false @@ -14976,9 +13839,7 @@ func rewriteValueS390X_OpS390XMULLW(v *Value) bool { v.Type = t v.AuxInt = off v.Aux = sym - v.AddArg(x) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg3(x, ptr, mem) return true } break @@ -15005,9 +13866,7 @@ func rewriteValueS390X_OpS390XMULLW(v *Value) bool { v.Type = t v.AuxInt = off v.Aux = sym - v.AddArg(x) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg3(x, ptr, mem) return true } break @@ -15045,9 +13904,7 @@ func rewriteValueS390X_OpS390XMULLWconst(v *Value) bool { break } x := v_0 - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (MULLWconst [c] x) @@ -15077,8 +13934,7 @@ func rewriteValueS390X_OpS390XMULLWconst(v *Value) bool { v0 := b.NewValue0(v.Pos, OpS390XSLWconst, v.Type) v0.AuxInt = log2(c + 1) v0.AddArg(x) - v.AddArg(v0) - v.AddArg(x) + v.AddArg2(v0, x) return true } // match: (MULLWconst [c] x) @@ -15094,8 +13950,7 @@ func rewriteValueS390X_OpS390XMULLWconst(v *Value) bool { v0 := b.NewValue0(v.Pos, OpS390XSLWconst, v.Type) v0.AuxInt = log2(c - 1) v0.AddArg(x) - v.AddArg(v0) - v.AddArg(x) + v.AddArg2(v0, x) return true } // match: (MULLWconst [c] (MOVDconst [d])) @@ -15135,9 +13990,7 @@ func rewriteValueS390X_OpS390XMULLWload(v *Value) bool { v.reset(OpS390XMULLWload) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(x) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg3(x, ptr, mem) return true } // match: (MULLWload [o1] {s1} x (MOVDaddr [o2] {s2} ptr) mem) @@ -15160,9 +14013,7 @@ func rewriteValueS390X_OpS390XMULLWload(v *Value) bool { v.reset(OpS390XMULLWload) v.AuxInt = o1 + o2 v.Aux = mergeSym(s1, s2) - v.AddArg(x) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg3(x, ptr, mem) return true } return false @@ -15229,8 +14080,7 @@ func rewriteValueS390X_OpS390XNOT(v *Value) bool { v.reset(OpS390XXOR) v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) v0.AuxInt = -1 - v.AddArg(v0) - v.AddArg(x) + v.AddArg2(v0, x) return true } } @@ -15340,8 +14190,7 @@ func rewriteValueS390X_OpS390XOR(v *Value) bool { y := v_1_0.Args[0] v.reset(OpS390XLGDR) v0 := b.NewValue0(v.Pos, OpS390XCPSDR, t) - v0.AddArg(y) - v0.AddArg(x) + v0.AddArg2(y, x) v.AddArg(v0) return true } @@ -15375,8 +14224,7 @@ func rewriteValueS390X_OpS390XOR(v *Value) bool { v0 := b.NewValue0(v.Pos, OpS390XCPSDR, x.Type) v1 := b.NewValue0(v.Pos, OpS390XFMOVDconst, x.Type) v1.AuxInt = c - v0.AddArg(v1) - v0.AddArg(x) + v0.AddArg2(v1, x) v.AddArg(v0) return true } @@ -15408,8 +14256,7 @@ func rewriteValueS390X_OpS390XOR(v *Value) bool { y := v_1_0.Args[0] v.reset(OpS390XLGDR) v0 := b.NewValue0(v.Pos, OpS390XCPSDR, t) - v0.AddArg(y) - v0.AddArg(x) + v0.AddArg2(y, x) v.AddArg(v0) return true } @@ -15443,8 +14290,7 @@ func rewriteValueS390X_OpS390XOR(v *Value) bool { v0 := b.NewValue0(v.Pos, OpS390XCPSDR, x.Type) v1 := b.NewValue0(v.Pos, OpS390XFMOVDconst, x.Type) v1.AuxInt = c - v0.AddArg(v1) - v0.AddArg(x) + v0.AddArg2(v1, x) v.AddArg(v0) return true } @@ -15476,9 +14322,7 @@ func rewriteValueS390X_OpS390XOR(v *Value) bool { if x != v_1 { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (OR x g:(MOVDload [off] {sym} ptr mem)) @@ -15503,9 +14347,7 @@ func rewriteValueS390X_OpS390XOR(v *Value) bool { v.Type = t v.AuxInt = off v.Aux = sym - v.AddArg(x) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg3(x, ptr, mem) return true } break @@ -15541,12 +14383,10 @@ func rewriteValueS390X_OpS390XOR(v *Value) bool { } b = mergePoint(b, x0, x1) v0 := b.NewValue0(x0.Pos, OpS390XMOVHZload, typ.UInt16) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v0.AuxInt = i0 v0.Aux = s - v0.AddArg(p) - v0.AddArg(mem) + v0.AddArg2(p, mem) return true } break @@ -15582,12 +14422,10 @@ func rewriteValueS390X_OpS390XOR(v *Value) bool { } b = mergePoint(b, x0, x1) v0 := b.NewValue0(x0.Pos, OpS390XMOVWZload, typ.UInt32) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v0.AuxInt = i0 v0.Aux = s - v0.AddArg(p) - v0.AddArg(mem) + v0.AddArg2(p, mem) return true } break @@ -15623,12 +14461,10 @@ func rewriteValueS390X_OpS390XOR(v *Value) bool { } b = mergePoint(b, x0, x1) v0 := b.NewValue0(x0.Pos, OpS390XMOVDload, typ.UInt64) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v0.AuxInt = i0 v0.Aux = s - v0.AddArg(p) - v0.AddArg(mem) + v0.AddArg2(p, mem) return true } break @@ -15682,18 +14518,15 @@ func rewriteValueS390X_OpS390XOR(v *Value) bool { } b = mergePoint(b, x0, x1, y) v0 := b.NewValue0(x1.Pos, OpS390XOR, v.Type) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v1 := b.NewValue0(x1.Pos, OpS390XSLDconst, v.Type) v1.AuxInt = j1 v2 := b.NewValue0(x1.Pos, OpS390XMOVHZload, typ.UInt16) v2.AuxInt = i0 v2.Aux = s - v2.AddArg(p) - v2.AddArg(mem) + v2.AddArg2(p, mem) v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) + v0.AddArg2(v1, y) return true } } @@ -15748,18 +14581,15 @@ func rewriteValueS390X_OpS390XOR(v *Value) bool { } b = mergePoint(b, x0, x1, y) v0 := b.NewValue0(x1.Pos, OpS390XOR, v.Type) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v1 := b.NewValue0(x1.Pos, OpS390XSLDconst, v.Type) v1.AuxInt = j1 v2 := b.NewValue0(x1.Pos, OpS390XMOVWZload, typ.UInt32) v2.AuxInt = i0 v2.Aux = s - v2.AddArg(p) - v2.AddArg(mem) + v2.AddArg2(p, mem) v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) + v0.AddArg2(v1, y) return true } } @@ -15803,13 +14633,10 @@ func rewriteValueS390X_OpS390XOR(v *Value) bool { } b = mergePoint(b, x0, x1) v0 := b.NewValue0(v.Pos, OpS390XMOVHZloadidx, typ.UInt16) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v0.AuxInt = i0 v0.Aux = s - v0.AddArg(p) - v0.AddArg(idx) - v0.AddArg(mem) + v0.AddArg3(p, idx, mem) return true } } @@ -15854,13 +14681,10 @@ func rewriteValueS390X_OpS390XOR(v *Value) bool { } b = mergePoint(b, x0, x1) v0 := b.NewValue0(v.Pos, OpS390XMOVWZloadidx, typ.UInt32) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v0.AuxInt = i0 v0.Aux = s - v0.AddArg(p) - v0.AddArg(idx) - v0.AddArg(mem) + v0.AddArg3(p, idx, mem) return true } } @@ -15905,13 +14729,10 @@ func rewriteValueS390X_OpS390XOR(v *Value) bool { } b = mergePoint(b, x0, x1) v0 := b.NewValue0(v.Pos, OpS390XMOVDloadidx, typ.UInt64) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v0.AuxInt = i0 v0.Aux = s - v0.AddArg(p) - v0.AddArg(idx) - v0.AddArg(mem) + v0.AddArg3(p, idx, mem) return true } } @@ -15974,19 +14795,15 @@ func rewriteValueS390X_OpS390XOR(v *Value) bool { } b = mergePoint(b, x0, x1, y) v0 := b.NewValue0(v.Pos, OpS390XOR, v.Type) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v1 := b.NewValue0(v.Pos, OpS390XSLDconst, v.Type) v1.AuxInt = j1 v2 := b.NewValue0(v.Pos, OpS390XMOVHZloadidx, typ.UInt16) v2.AuxInt = i0 v2.Aux = s - v2.AddArg(p) - v2.AddArg(idx) - v2.AddArg(mem) + v2.AddArg3(p, idx, mem) v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) + v0.AddArg2(v1, y) return true } } @@ -16050,19 +14867,15 @@ func rewriteValueS390X_OpS390XOR(v *Value) bool { } b = mergePoint(b, x0, x1, y) v0 := b.NewValue0(v.Pos, OpS390XOR, v.Type) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v1 := b.NewValue0(v.Pos, OpS390XSLDconst, v.Type) v1.AuxInt = j1 v2 := b.NewValue0(v.Pos, OpS390XMOVWZloadidx, typ.UInt32) v2.AuxInt = i0 v2.Aux = s - v2.AddArg(p) - v2.AddArg(idx) - v2.AddArg(mem) + v2.AddArg3(p, idx, mem) v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) + v0.AddArg2(v1, y) return true } } @@ -16101,13 +14914,11 @@ func rewriteValueS390X_OpS390XOR(v *Value) bool { } b = mergePoint(b, x0, x1) v0 := b.NewValue0(x1.Pos, OpS390XMOVHZreg, typ.UInt64) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v1 := b.NewValue0(x1.Pos, OpS390XMOVHBRload, typ.UInt16) v1.AuxInt = i0 v1.Aux = s - v1.AddArg(p) - v1.AddArg(mem) + v1.AddArg2(p, mem) v0.AddArg(v1) return true } @@ -16152,13 +14963,11 @@ func rewriteValueS390X_OpS390XOR(v *Value) bool { } b = mergePoint(b, x0, x1) v0 := b.NewValue0(x1.Pos, OpS390XMOVWZreg, typ.UInt64) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v1 := b.NewValue0(x1.Pos, OpS390XMOVWBRload, typ.UInt32) v1.AuxInt = i0 v1.Aux = s - v1.AddArg(p) - v1.AddArg(mem) + v1.AddArg2(p, mem) v0.AddArg(v1) return true } @@ -16203,12 +15012,10 @@ func rewriteValueS390X_OpS390XOR(v *Value) bool { } b = mergePoint(b, x0, x1) v0 := b.NewValue0(x1.Pos, OpS390XMOVDBRload, typ.UInt64) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v0.AuxInt = i0 v0.Aux = s - v0.AddArg(p) - v0.AddArg(mem) + v0.AddArg2(p, mem) return true } break @@ -16262,20 +15069,17 @@ func rewriteValueS390X_OpS390XOR(v *Value) bool { } b = mergePoint(b, x0, x1, y) v0 := b.NewValue0(x0.Pos, OpS390XOR, v.Type) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v1 := b.NewValue0(x0.Pos, OpS390XSLDconst, v.Type) v1.AuxInt = j0 v2 := b.NewValue0(x0.Pos, OpS390XMOVHZreg, typ.UInt64) v3 := b.NewValue0(x0.Pos, OpS390XMOVHBRload, typ.UInt16) v3.AuxInt = i0 v3.Aux = s - v3.AddArg(p) - v3.AddArg(mem) + v3.AddArg2(p, mem) v2.AddArg(v3) v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) + v0.AddArg2(v1, y) return true } } @@ -16338,20 +15142,17 @@ func rewriteValueS390X_OpS390XOR(v *Value) bool { } b = mergePoint(b, x0, x1, y) v0 := b.NewValue0(x0.Pos, OpS390XOR, v.Type) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v1 := b.NewValue0(x0.Pos, OpS390XSLDconst, v.Type) v1.AuxInt = j0 v2 := b.NewValue0(x0.Pos, OpS390XMOVWZreg, typ.UInt64) v3 := b.NewValue0(x0.Pos, OpS390XMOVWBRload, typ.UInt32) v3.AuxInt = i0 v3.Aux = s - v3.AddArg(p) - v3.AddArg(mem) + v3.AddArg2(p, mem) v2.AddArg(v3) v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) + v0.AddArg2(v1, y) return true } } @@ -16395,14 +15196,11 @@ func rewriteValueS390X_OpS390XOR(v *Value) bool { } b = mergePoint(b, x0, x1) v0 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v1 := b.NewValue0(v.Pos, OpS390XMOVHBRloadidx, typ.Int16) v1.AuxInt = i0 v1.Aux = s - v1.AddArg(p) - v1.AddArg(idx) - v1.AddArg(mem) + v1.AddArg3(p, idx, mem) v0.AddArg(v1) return true } @@ -16456,14 +15254,11 @@ func rewriteValueS390X_OpS390XOR(v *Value) bool { } b = mergePoint(b, x0, x1) v0 := b.NewValue0(v.Pos, OpS390XMOVWZreg, typ.UInt64) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v1 := b.NewValue0(v.Pos, OpS390XMOVWBRloadidx, typ.Int32) v1.AuxInt = i0 v1.Aux = s - v1.AddArg(p) - v1.AddArg(idx) - v1.AddArg(mem) + v1.AddArg3(p, idx, mem) v0.AddArg(v1) return true } @@ -16517,13 +15312,10 @@ func rewriteValueS390X_OpS390XOR(v *Value) bool { } b = mergePoint(b, x0, x1) v0 := b.NewValue0(v.Pos, OpS390XMOVDBRloadidx, typ.Int64) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v0.AuxInt = i0 v0.Aux = s - v0.AddArg(p) - v0.AddArg(idx) - v0.AddArg(mem) + v0.AddArg3(p, idx, mem) return true } } @@ -16586,21 +15378,17 @@ func rewriteValueS390X_OpS390XOR(v *Value) bool { } b = mergePoint(b, x0, x1, y) v0 := b.NewValue0(v.Pos, OpS390XOR, v.Type) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v1 := b.NewValue0(v.Pos, OpS390XSLDconst, v.Type) v1.AuxInt = j0 v2 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64) v3 := b.NewValue0(v.Pos, OpS390XMOVHBRloadidx, typ.Int16) v3.AuxInt = i0 v3.Aux = s - v3.AddArg(p) - v3.AddArg(idx) - v3.AddArg(mem) + v3.AddArg3(p, idx, mem) v2.AddArg(v3) v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) + v0.AddArg2(v1, y) return true } } @@ -16672,21 +15460,17 @@ func rewriteValueS390X_OpS390XOR(v *Value) bool { } b = mergePoint(b, x0, x1, y) v0 := b.NewValue0(v.Pos, OpS390XOR, v.Type) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v1 := b.NewValue0(v.Pos, OpS390XSLDconst, v.Type) v1.AuxInt = j0 v2 := b.NewValue0(v.Pos, OpS390XMOVWZreg, typ.UInt64) v3 := b.NewValue0(v.Pos, OpS390XMOVWBRloadidx, typ.Int32) v3.AuxInt = i0 v3.Aux = s - v3.AddArg(p) - v3.AddArg(idx) - v3.AddArg(mem) + v3.AddArg3(p, idx, mem) v2.AddArg(v3) v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) + v0.AddArg2(v1, y) return true } } @@ -16748,9 +15532,7 @@ func rewriteValueS390X_OpS390XORW(v *Value) bool { if x != v_1 { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (ORW x g:(MOVWload [off] {sym} ptr mem)) @@ -16775,9 +15557,7 @@ func rewriteValueS390X_OpS390XORW(v *Value) bool { v.Type = t v.AuxInt = off v.Aux = sym - v.AddArg(x) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg3(x, ptr, mem) return true } break @@ -16804,9 +15584,7 @@ func rewriteValueS390X_OpS390XORW(v *Value) bool { v.Type = t v.AuxInt = off v.Aux = sym - v.AddArg(x) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg3(x, ptr, mem) return true } break @@ -16842,12 +15620,10 @@ func rewriteValueS390X_OpS390XORW(v *Value) bool { } b = mergePoint(b, x0, x1) v0 := b.NewValue0(x0.Pos, OpS390XMOVHZload, typ.UInt16) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v0.AuxInt = i0 v0.Aux = s - v0.AddArg(p) - v0.AddArg(mem) + v0.AddArg2(p, mem) return true } break @@ -16883,12 +15659,10 @@ func rewriteValueS390X_OpS390XORW(v *Value) bool { } b = mergePoint(b, x0, x1) v0 := b.NewValue0(x0.Pos, OpS390XMOVWZload, typ.UInt32) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v0.AuxInt = i0 v0.Aux = s - v0.AddArg(p) - v0.AddArg(mem) + v0.AddArg2(p, mem) return true } break @@ -16942,18 +15716,15 @@ func rewriteValueS390X_OpS390XORW(v *Value) bool { } b = mergePoint(b, x0, x1, y) v0 := b.NewValue0(x1.Pos, OpS390XORW, v.Type) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v1 := b.NewValue0(x1.Pos, OpS390XSLWconst, v.Type) v1.AuxInt = j1 v2 := b.NewValue0(x1.Pos, OpS390XMOVHZload, typ.UInt16) v2.AuxInt = i0 v2.Aux = s - v2.AddArg(p) - v2.AddArg(mem) + v2.AddArg2(p, mem) v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) + v0.AddArg2(v1, y) return true } } @@ -16997,13 +15768,10 @@ func rewriteValueS390X_OpS390XORW(v *Value) bool { } b = mergePoint(b, x0, x1) v0 := b.NewValue0(v.Pos, OpS390XMOVHZloadidx, typ.UInt16) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v0.AuxInt = i0 v0.Aux = s - v0.AddArg(p) - v0.AddArg(idx) - v0.AddArg(mem) + v0.AddArg3(p, idx, mem) return true } } @@ -17048,13 +15816,10 @@ func rewriteValueS390X_OpS390XORW(v *Value) bool { } b = mergePoint(b, x0, x1) v0 := b.NewValue0(v.Pos, OpS390XMOVWZloadidx, typ.UInt32) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v0.AuxInt = i0 v0.Aux = s - v0.AddArg(p) - v0.AddArg(idx) - v0.AddArg(mem) + v0.AddArg3(p, idx, mem) return true } } @@ -17117,19 +15882,15 @@ func rewriteValueS390X_OpS390XORW(v *Value) bool { } b = mergePoint(b, x0, x1, y) v0 := b.NewValue0(v.Pos, OpS390XORW, v.Type) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v1 := b.NewValue0(v.Pos, OpS390XSLWconst, v.Type) v1.AuxInt = j1 v2 := b.NewValue0(v.Pos, OpS390XMOVHZloadidx, typ.UInt16) v2.AuxInt = i0 v2.Aux = s - v2.AddArg(p) - v2.AddArg(idx) - v2.AddArg(mem) + v2.AddArg3(p, idx, mem) v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) + v0.AddArg2(v1, y) return true } } @@ -17168,13 +15929,11 @@ func rewriteValueS390X_OpS390XORW(v *Value) bool { } b = mergePoint(b, x0, x1) v0 := b.NewValue0(x1.Pos, OpS390XMOVHZreg, typ.UInt64) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v1 := b.NewValue0(x1.Pos, OpS390XMOVHBRload, typ.UInt16) v1.AuxInt = i0 v1.Aux = s - v1.AddArg(p) - v1.AddArg(mem) + v1.AddArg2(p, mem) v0.AddArg(v1) return true } @@ -17219,12 +15978,10 @@ func rewriteValueS390X_OpS390XORW(v *Value) bool { } b = mergePoint(b, x0, x1) v0 := b.NewValue0(x1.Pos, OpS390XMOVWBRload, typ.UInt32) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v0.AuxInt = i0 v0.Aux = s - v0.AddArg(p) - v0.AddArg(mem) + v0.AddArg2(p, mem) return true } break @@ -17278,20 +16035,17 @@ func rewriteValueS390X_OpS390XORW(v *Value) bool { } b = mergePoint(b, x0, x1, y) v0 := b.NewValue0(x0.Pos, OpS390XORW, v.Type) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v1 := b.NewValue0(x0.Pos, OpS390XSLWconst, v.Type) v1.AuxInt = j0 v2 := b.NewValue0(x0.Pos, OpS390XMOVHZreg, typ.UInt64) v3 := b.NewValue0(x0.Pos, OpS390XMOVHBRload, typ.UInt16) v3.AuxInt = i0 v3.Aux = s - v3.AddArg(p) - v3.AddArg(mem) + v3.AddArg2(p, mem) v2.AddArg(v3) v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) + v0.AddArg2(v1, y) return true } } @@ -17335,14 +16089,11 @@ func rewriteValueS390X_OpS390XORW(v *Value) bool { } b = mergePoint(b, x0, x1) v0 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v1 := b.NewValue0(v.Pos, OpS390XMOVHBRloadidx, typ.Int16) v1.AuxInt = i0 v1.Aux = s - v1.AddArg(p) - v1.AddArg(idx) - v1.AddArg(mem) + v1.AddArg3(p, idx, mem) v0.AddArg(v1) return true } @@ -17396,13 +16147,10 @@ func rewriteValueS390X_OpS390XORW(v *Value) bool { } b = mergePoint(b, x0, x1) v0 := b.NewValue0(v.Pos, OpS390XMOVWBRloadidx, typ.Int32) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v0.AuxInt = i0 v0.Aux = s - v0.AddArg(p) - v0.AddArg(idx) - v0.AddArg(mem) + v0.AddArg3(p, idx, mem) return true } } @@ -17465,21 +16213,17 @@ func rewriteValueS390X_OpS390XORW(v *Value) bool { } b = mergePoint(b, x0, x1, y) v0 := b.NewValue0(v.Pos, OpS390XORW, v.Type) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v1 := b.NewValue0(v.Pos, OpS390XSLWconst, v.Type) v1.AuxInt = j0 v2 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64) v3 := b.NewValue0(v.Pos, OpS390XMOVHBRloadidx, typ.Int16) v3.AuxInt = i0 v3.Aux = s - v3.AddArg(p) - v3.AddArg(idx) - v3.AddArg(mem) + v3.AddArg3(p, idx, mem) v2.AddArg(v3) v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) + v0.AddArg2(v1, y) return true } } @@ -17500,9 +16244,7 @@ func rewriteValueS390X_OpS390XORWconst(v *Value) bool { if !(int32(c) == 0) { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (ORWconst [c] _) @@ -17554,9 +16296,7 @@ func rewriteValueS390X_OpS390XORWload(v *Value) bool { v.reset(OpS390XORWload) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(x) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg3(x, ptr, mem) return true } // match: (ORWload [o1] {s1} x (MOVDaddr [o2] {s2} ptr) mem) @@ -17579,9 +16319,7 @@ func rewriteValueS390X_OpS390XORWload(v *Value) bool { v.reset(OpS390XORWload) v.AuxInt = o1 + o2 v.Aux = mergeSym(s1, s2) - v.AddArg(x) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg3(x, ptr, mem) return true } return false @@ -17595,9 +16333,7 @@ func rewriteValueS390X_OpS390XORconst(v *Value) bool { break } x := v_0 - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (ORconst [-1] _) @@ -17641,17 +16377,15 @@ func rewriteValueS390X_OpS390XORload(v *Value) bool { if v_2.Op != OpS390XFMOVDstore || v_2.AuxInt != off || v_2.Aux != sym { break } - _ = v_2.Args[2] - ptr2 := v_2.Args[0] y := v_2.Args[1] + ptr2 := v_2.Args[0] if !(isSamePtr(ptr1, ptr2)) { break } v.reset(OpS390XOR) - v.AddArg(x) v0 := b.NewValue0(v_2.Pos, OpS390XLGDR, t) v0.AddArg(y) - v.AddArg(v0) + v.AddArg2(x, v0) return true } // match: (ORload [off1] {sym} x (ADDconst [off2] ptr) mem) @@ -17673,9 +16407,7 @@ func rewriteValueS390X_OpS390XORload(v *Value) bool { v.reset(OpS390XORload) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(x) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg3(x, ptr, mem) return true } // match: (ORload [o1] {s1} x (MOVDaddr [o2] {s2} ptr) mem) @@ -17698,9 +16430,7 @@ func rewriteValueS390X_OpS390XORload(v *Value) bool { v.reset(OpS390XORload) v.AuxInt = o1 + o2 v.Aux = mergeSym(s1, s2) - v.AddArg(x) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg3(x, ptr, mem) return true } return false @@ -17776,11 +16506,10 @@ func rewriteValueS390X_OpS390XSLD(v *Value) bool { c := v_1_0.AuxInt y := v_1_1 v.reset(OpS390XSLD) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpS390XANDWconst, typ.UInt32) v0.AuxInt = c & 63 v0.AddArg(y) - v.AddArg(v0) + v.AddArg2(x, v0) return true } break @@ -17799,8 +16528,7 @@ func rewriteValueS390X_OpS390XSLD(v *Value) bool { break } v.reset(OpS390XSLD) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (SLD x (MOVWreg y)) @@ -17812,8 +16540,7 @@ func rewriteValueS390X_OpS390XSLD(v *Value) bool { } y := v_1.Args[0] v.reset(OpS390XSLD) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (SLD x (MOVHreg y)) @@ -17825,8 +16552,7 @@ func rewriteValueS390X_OpS390XSLD(v *Value) bool { } y := v_1.Args[0] v.reset(OpS390XSLD) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (SLD x (MOVBreg y)) @@ -17838,8 +16564,7 @@ func rewriteValueS390X_OpS390XSLD(v *Value) bool { } y := v_1.Args[0] v.reset(OpS390XSLD) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (SLD x (MOVWZreg y)) @@ -17851,8 +16576,7 @@ func rewriteValueS390X_OpS390XSLD(v *Value) bool { } y := v_1.Args[0] v.reset(OpS390XSLD) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (SLD x (MOVHZreg y)) @@ -17864,8 +16588,7 @@ func rewriteValueS390X_OpS390XSLD(v *Value) bool { } y := v_1.Args[0] v.reset(OpS390XSLD) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (SLD x (MOVBZreg y)) @@ -17877,8 +16600,7 @@ func rewriteValueS390X_OpS390XSLD(v *Value) bool { } y := v_1.Args[0] v.reset(OpS390XSLD) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } return false @@ -17918,11 +16640,10 @@ func rewriteValueS390X_OpS390XSLW(v *Value) bool { c := v_1_0.AuxInt y := v_1_1 v.reset(OpS390XSLW) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpS390XANDWconst, typ.UInt32) v0.AuxInt = c & 63 v0.AddArg(y) - v.AddArg(v0) + v.AddArg2(x, v0) return true } break @@ -17941,8 +16662,7 @@ func rewriteValueS390X_OpS390XSLW(v *Value) bool { break } v.reset(OpS390XSLW) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (SLW x (MOVWreg y)) @@ -17954,8 +16674,7 @@ func rewriteValueS390X_OpS390XSLW(v *Value) bool { } y := v_1.Args[0] v.reset(OpS390XSLW) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (SLW x (MOVHreg y)) @@ -17967,8 +16686,7 @@ func rewriteValueS390X_OpS390XSLW(v *Value) bool { } y := v_1.Args[0] v.reset(OpS390XSLW) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (SLW x (MOVBreg y)) @@ -17980,8 +16698,7 @@ func rewriteValueS390X_OpS390XSLW(v *Value) bool { } y := v_1.Args[0] v.reset(OpS390XSLW) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (SLW x (MOVWZreg y)) @@ -17993,8 +16710,7 @@ func rewriteValueS390X_OpS390XSLW(v *Value) bool { } y := v_1.Args[0] v.reset(OpS390XSLW) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (SLW x (MOVHZreg y)) @@ -18006,8 +16722,7 @@ func rewriteValueS390X_OpS390XSLW(v *Value) bool { } y := v_1.Args[0] v.reset(OpS390XSLW) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (SLW x (MOVBZreg y)) @@ -18019,8 +16734,7 @@ func rewriteValueS390X_OpS390XSLW(v *Value) bool { } y := v_1.Args[0] v.reset(OpS390XSLW) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } return false @@ -18060,11 +16774,10 @@ func rewriteValueS390X_OpS390XSRAD(v *Value) bool { c := v_1_0.AuxInt y := v_1_1 v.reset(OpS390XSRAD) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpS390XANDWconst, typ.UInt32) v0.AuxInt = c & 63 v0.AddArg(y) - v.AddArg(v0) + v.AddArg2(x, v0) return true } break @@ -18083,8 +16796,7 @@ func rewriteValueS390X_OpS390XSRAD(v *Value) bool { break } v.reset(OpS390XSRAD) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (SRAD x (MOVWreg y)) @@ -18096,8 +16808,7 @@ func rewriteValueS390X_OpS390XSRAD(v *Value) bool { } y := v_1.Args[0] v.reset(OpS390XSRAD) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (SRAD x (MOVHreg y)) @@ -18109,8 +16820,7 @@ func rewriteValueS390X_OpS390XSRAD(v *Value) bool { } y := v_1.Args[0] v.reset(OpS390XSRAD) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (SRAD x (MOVBreg y)) @@ -18122,8 +16832,7 @@ func rewriteValueS390X_OpS390XSRAD(v *Value) bool { } y := v_1.Args[0] v.reset(OpS390XSRAD) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (SRAD x (MOVWZreg y)) @@ -18135,8 +16844,7 @@ func rewriteValueS390X_OpS390XSRAD(v *Value) bool { } y := v_1.Args[0] v.reset(OpS390XSRAD) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (SRAD x (MOVHZreg y)) @@ -18148,8 +16856,7 @@ func rewriteValueS390X_OpS390XSRAD(v *Value) bool { } y := v_1.Args[0] v.reset(OpS390XSRAD) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (SRAD x (MOVBZreg y)) @@ -18161,8 +16868,7 @@ func rewriteValueS390X_OpS390XSRAD(v *Value) bool { } y := v_1.Args[0] v.reset(OpS390XSRAD) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } return false @@ -18218,11 +16924,10 @@ func rewriteValueS390X_OpS390XSRAW(v *Value) bool { c := v_1_0.AuxInt y := v_1_1 v.reset(OpS390XSRAW) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpS390XANDWconst, typ.UInt32) v0.AuxInt = c & 63 v0.AddArg(y) - v.AddArg(v0) + v.AddArg2(x, v0) return true } break @@ -18241,8 +16946,7 @@ func rewriteValueS390X_OpS390XSRAW(v *Value) bool { break } v.reset(OpS390XSRAW) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (SRAW x (MOVWreg y)) @@ -18254,8 +16958,7 @@ func rewriteValueS390X_OpS390XSRAW(v *Value) bool { } y := v_1.Args[0] v.reset(OpS390XSRAW) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (SRAW x (MOVHreg y)) @@ -18267,8 +16970,7 @@ func rewriteValueS390X_OpS390XSRAW(v *Value) bool { } y := v_1.Args[0] v.reset(OpS390XSRAW) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (SRAW x (MOVBreg y)) @@ -18280,8 +16982,7 @@ func rewriteValueS390X_OpS390XSRAW(v *Value) bool { } y := v_1.Args[0] v.reset(OpS390XSRAW) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (SRAW x (MOVWZreg y)) @@ -18293,8 +16994,7 @@ func rewriteValueS390X_OpS390XSRAW(v *Value) bool { } y := v_1.Args[0] v.reset(OpS390XSRAW) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (SRAW x (MOVHZreg y)) @@ -18306,8 +17006,7 @@ func rewriteValueS390X_OpS390XSRAW(v *Value) bool { } y := v_1.Args[0] v.reset(OpS390XSRAW) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (SRAW x (MOVBZreg y)) @@ -18319,8 +17018,7 @@ func rewriteValueS390X_OpS390XSRAW(v *Value) bool { } y := v_1.Args[0] v.reset(OpS390XSRAW) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } return false @@ -18376,11 +17074,10 @@ func rewriteValueS390X_OpS390XSRD(v *Value) bool { c := v_1_0.AuxInt y := v_1_1 v.reset(OpS390XSRD) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpS390XANDWconst, typ.UInt32) v0.AuxInt = c & 63 v0.AddArg(y) - v.AddArg(v0) + v.AddArg2(x, v0) return true } break @@ -18399,8 +17096,7 @@ func rewriteValueS390X_OpS390XSRD(v *Value) bool { break } v.reset(OpS390XSRD) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (SRD x (MOVWreg y)) @@ -18412,8 +17108,7 @@ func rewriteValueS390X_OpS390XSRD(v *Value) bool { } y := v_1.Args[0] v.reset(OpS390XSRD) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (SRD x (MOVHreg y)) @@ -18425,8 +17120,7 @@ func rewriteValueS390X_OpS390XSRD(v *Value) bool { } y := v_1.Args[0] v.reset(OpS390XSRD) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (SRD x (MOVBreg y)) @@ -18438,8 +17132,7 @@ func rewriteValueS390X_OpS390XSRD(v *Value) bool { } y := v_1.Args[0] v.reset(OpS390XSRD) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (SRD x (MOVWZreg y)) @@ -18451,8 +17144,7 @@ func rewriteValueS390X_OpS390XSRD(v *Value) bool { } y := v_1.Args[0] v.reset(OpS390XSRD) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (SRD x (MOVHZreg y)) @@ -18464,8 +17156,7 @@ func rewriteValueS390X_OpS390XSRD(v *Value) bool { } y := v_1.Args[0] v.reset(OpS390XSRD) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (SRD x (MOVBZreg y)) @@ -18477,8 +17168,7 @@ func rewriteValueS390X_OpS390XSRD(v *Value) bool { } y := v_1.Args[0] v.reset(OpS390XSRD) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } return false @@ -18542,11 +17232,10 @@ func rewriteValueS390X_OpS390XSRW(v *Value) bool { c := v_1_0.AuxInt y := v_1_1 v.reset(OpS390XSRW) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpS390XANDWconst, typ.UInt32) v0.AuxInt = c & 63 v0.AddArg(y) - v.AddArg(v0) + v.AddArg2(x, v0) return true } break @@ -18565,8 +17254,7 @@ func rewriteValueS390X_OpS390XSRW(v *Value) bool { break } v.reset(OpS390XSRW) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (SRW x (MOVWreg y)) @@ -18578,8 +17266,7 @@ func rewriteValueS390X_OpS390XSRW(v *Value) bool { } y := v_1.Args[0] v.reset(OpS390XSRW) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (SRW x (MOVHreg y)) @@ -18591,8 +17278,7 @@ func rewriteValueS390X_OpS390XSRW(v *Value) bool { } y := v_1.Args[0] v.reset(OpS390XSRW) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (SRW x (MOVBreg y)) @@ -18604,8 +17290,7 @@ func rewriteValueS390X_OpS390XSRW(v *Value) bool { } y := v_1.Args[0] v.reset(OpS390XSRW) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (SRW x (MOVWZreg y)) @@ -18617,8 +17302,7 @@ func rewriteValueS390X_OpS390XSRW(v *Value) bool { } y := v_1.Args[0] v.reset(OpS390XSRW) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (SRW x (MOVHZreg y)) @@ -18630,8 +17314,7 @@ func rewriteValueS390X_OpS390XSRW(v *Value) bool { } y := v_1.Args[0] v.reset(OpS390XSRW) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (SRW x (MOVBZreg y)) @@ -18643,8 +17326,7 @@ func rewriteValueS390X_OpS390XSRW(v *Value) bool { } y := v_1.Args[0] v.reset(OpS390XSRW) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } return false @@ -18679,12 +17361,7 @@ func rewriteValueS390X_OpS390XSTM2(v *Value) bool { v.reset(OpS390XSTM4) v.AuxInt = i - 8 v.Aux = s - v.AddArg(p) - v.AddArg(w0) - v.AddArg(w1) - v.AddArg(w2) - v.AddArg(w3) - v.AddArg(mem) + v.AddArg6(p, w0, w1, w2, w3, mem) return true } // match: (STM2 [i] {s} p (SRDconst [32] x) x mem) @@ -18704,9 +17381,7 @@ func rewriteValueS390X_OpS390XSTM2(v *Value) bool { v.reset(OpS390XMOVDstore) v.AuxInt = i v.Aux = s - v.AddArg(p) - v.AddArg(x) - v.AddArg(mem) + v.AddArg3(p, x, mem) return true } return false @@ -18741,12 +17416,7 @@ func rewriteValueS390X_OpS390XSTMG2(v *Value) bool { v.reset(OpS390XSTMG4) v.AuxInt = i - 16 v.Aux = s - v.AddArg(p) - v.AddArg(w0) - v.AddArg(w1) - v.AddArg(w2) - v.AddArg(w3) - v.AddArg(mem) + v.AddArg6(p, w0, w1, w2, w3, mem) return true } return false @@ -18823,9 +17493,7 @@ func rewriteValueS390X_OpS390XSUB(v *Value) bool { v.Type = t v.AuxInt = off v.Aux = sym - v.AddArg(x) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg3(x, ptr, mem) return true } return false @@ -18843,8 +17511,7 @@ func rewriteValueS390X_OpS390XSUBE(v *Value) bool { break } v.reset(OpS390XSUBC) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (SUBE x y (FlagOV)) @@ -18856,8 +17523,7 @@ func rewriteValueS390X_OpS390XSUBE(v *Value) bool { break } v.reset(OpS390XSUBC) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (SUBE x y (Select1 (SUBC (MOVDconst [0]) (NEG (Select0 (SUBE (MOVDconst [0]) (MOVDconst [0]) c)))))) @@ -18899,9 +17565,7 @@ func rewriteValueS390X_OpS390XSUBE(v *Value) bool { break } v.reset(OpS390XSUBE) - v.AddArg(x) - v.AddArg(y) - v.AddArg(c) + v.AddArg3(x, y, c) return true } return false @@ -18970,9 +17634,7 @@ func rewriteValueS390X_OpS390XSUBW(v *Value) bool { v.Type = t v.AuxInt = off v.Aux = sym - v.AddArg(x) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg3(x, ptr, mem) return true } // match: (SUBW x g:(MOVWZload [off] {sym} ptr mem)) @@ -18996,9 +17658,7 @@ func rewriteValueS390X_OpS390XSUBW(v *Value) bool { v.Type = t v.AuxInt = off v.Aux = sym - v.AddArg(x) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg3(x, ptr, mem) return true } return false @@ -19014,9 +17674,7 @@ func rewriteValueS390X_OpS390XSUBWconst(v *Value) bool { if !(int32(c) == 0) { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (SUBWconst [c] x) @@ -19053,9 +17711,7 @@ func rewriteValueS390X_OpS390XSUBWload(v *Value) bool { v.reset(OpS390XSUBWload) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(x) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg3(x, ptr, mem) return true } // match: (SUBWload [o1] {s1} x (MOVDaddr [o2] {s2} ptr) mem) @@ -19078,9 +17734,7 @@ func rewriteValueS390X_OpS390XSUBWload(v *Value) bool { v.reset(OpS390XSUBWload) v.AuxInt = o1 + o2 v.Aux = mergeSym(s1, s2) - v.AddArg(x) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg3(x, ptr, mem) return true } return false @@ -19094,9 +17748,7 @@ func rewriteValueS390X_OpS390XSUBconst(v *Value) bool { break } x := v_0 - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (SUBconst [c] x) @@ -19162,17 +17814,15 @@ func rewriteValueS390X_OpS390XSUBload(v *Value) bool { if v_2.Op != OpS390XFMOVDstore || v_2.AuxInt != off || v_2.Aux != sym { break } - _ = v_2.Args[2] - ptr2 := v_2.Args[0] y := v_2.Args[1] + ptr2 := v_2.Args[0] if !(isSamePtr(ptr1, ptr2)) { break } v.reset(OpS390XSUB) - v.AddArg(x) v0 := b.NewValue0(v_2.Pos, OpS390XLGDR, t) v0.AddArg(y) - v.AddArg(v0) + v.AddArg2(x, v0) return true } // match: (SUBload [off1] {sym} x (ADDconst [off2] ptr) mem) @@ -19194,9 +17844,7 @@ func rewriteValueS390X_OpS390XSUBload(v *Value) bool { v.reset(OpS390XSUBload) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(x) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg3(x, ptr, mem) return true } // match: (SUBload [o1] {s1} x (MOVDaddr [o2] {s2} ptr) mem) @@ -19219,9 +17867,7 @@ func rewriteValueS390X_OpS390XSUBload(v *Value) bool { v.reset(OpS390XSUBload) v.AuxInt = o1 + o2 v.Aux = mergeSym(s1, s2) - v.AddArg(x) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg3(x, ptr, mem) return true } return false @@ -19238,8 +17884,7 @@ func rewriteValueS390X_OpS390XSumBytes2(v *Value) bool { v0 := b.NewValue0(v.Pos, OpS390XSRWconst, typ.UInt8) v0.AuxInt = 8 v0.AddArg(x) - v.AddArg(v0) - v.AddArg(x) + v.AddArg2(v0, x) return true } } @@ -19256,8 +17901,7 @@ func rewriteValueS390X_OpS390XSumBytes4(v *Value) bool { v1 := b.NewValue0(v.Pos, OpS390XSRWconst, typ.UInt16) v1.AuxInt = 16 v1.AddArg(x) - v0.AddArg(v1) - v0.AddArg(x) + v0.AddArg2(v1, x) v.AddArg(v0) return true } @@ -19275,8 +17919,7 @@ func rewriteValueS390X_OpS390XSumBytes8(v *Value) bool { v1 := b.NewValue0(v.Pos, OpS390XSRDconst, typ.UInt32) v1.AuxInt = 32 v1.AddArg(x) - v0.AddArg(v1) - v0.AddArg(x) + v0.AddArg2(v1, x) v.AddArg(v0) return true } @@ -19379,9 +18022,7 @@ func rewriteValueS390X_OpS390XXOR(v *Value) bool { v.Type = t v.AuxInt = off v.Aux = sym - v.AddArg(x) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg3(x, ptr, mem) return true } break @@ -19464,9 +18105,7 @@ func rewriteValueS390X_OpS390XXORW(v *Value) bool { v.Type = t v.AuxInt = off v.Aux = sym - v.AddArg(x) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg3(x, ptr, mem) return true } break @@ -19493,9 +18132,7 @@ func rewriteValueS390X_OpS390XXORW(v *Value) bool { v.Type = t v.AuxInt = off v.Aux = sym - v.AddArg(x) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg3(x, ptr, mem) return true } break @@ -19513,9 +18150,7 @@ func rewriteValueS390X_OpS390XXORWconst(v *Value) bool { if !(int32(c) == 0) { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (XORWconst [c] (MOVDconst [d])) @@ -19555,9 +18190,7 @@ func rewriteValueS390X_OpS390XXORWload(v *Value) bool { v.reset(OpS390XXORWload) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(x) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg3(x, ptr, mem) return true } // match: (XORWload [o1] {s1} x (MOVDaddr [o2] {s2} ptr) mem) @@ -19580,9 +18213,7 @@ func rewriteValueS390X_OpS390XXORWload(v *Value) bool { v.reset(OpS390XXORWload) v.AuxInt = o1 + o2 v.Aux = mergeSym(s1, s2) - v.AddArg(x) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg3(x, ptr, mem) return true } return false @@ -19596,9 +18227,7 @@ func rewriteValueS390X_OpS390XXORconst(v *Value) bool { break } x := v_0 - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (XORconst [c] (MOVDconst [d])) @@ -19632,17 +18261,15 @@ func rewriteValueS390X_OpS390XXORload(v *Value) bool { if v_2.Op != OpS390XFMOVDstore || v_2.AuxInt != off || v_2.Aux != sym { break } - _ = v_2.Args[2] - ptr2 := v_2.Args[0] y := v_2.Args[1] + ptr2 := v_2.Args[0] if !(isSamePtr(ptr1, ptr2)) { break } v.reset(OpS390XXOR) - v.AddArg(x) v0 := b.NewValue0(v_2.Pos, OpS390XLGDR, t) v0.AddArg(y) - v.AddArg(v0) + v.AddArg2(x, v0) return true } // match: (XORload [off1] {sym} x (ADDconst [off2] ptr) mem) @@ -19664,9 +18291,7 @@ func rewriteValueS390X_OpS390XXORload(v *Value) bool { v.reset(OpS390XXORload) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(x) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg3(x, ptr, mem) return true } // match: (XORload [o1] {s1} x (MOVDaddr [o2] {s2} ptr) mem) @@ -19689,9 +18314,7 @@ func rewriteValueS390X_OpS390XXORload(v *Value) bool { v.reset(OpS390XXORload) v.AuxInt = o1 + o2 v.Aux = mergeSym(s1, s2) - v.AddArg(x) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg3(x, ptr, mem) return true } return false @@ -19712,14 +18335,12 @@ func rewriteValueS390X_OpSelect0(v *Value) bool { v.reset(OpSelect0) v.Type = typ.UInt64 v0 := b.NewValue0(v.Pos, OpS390XADDE, types.NewTuple(typ.UInt64, types.TypeFlags)) - v0.AddArg(x) - v0.AddArg(y) v1 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) v2 := b.NewValue0(v.Pos, OpS390XADDCconst, types.NewTuple(typ.UInt64, types.TypeFlags)) v2.AuxInt = -1 v2.AddArg(c) v1.AddArg(v2) - v0.AddArg(v1) + v0.AddArg3(x, y, v1) v.AddArg(v0) return true } @@ -19735,16 +18356,13 @@ func rewriteValueS390X_OpSelect0(v *Value) bool { v.reset(OpSelect0) v.Type = typ.UInt64 v0 := b.NewValue0(v.Pos, OpS390XSUBE, types.NewTuple(typ.UInt64, types.TypeFlags)) - v0.AddArg(x) - v0.AddArg(y) v1 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) v2 := b.NewValue0(v.Pos, OpS390XSUBC, types.NewTuple(typ.UInt64, types.TypeFlags)) v3 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) v3.AuxInt = 0 - v2.AddArg(v3) - v2.AddArg(c) + v2.AddArg2(v3, c) v1.AddArg(v2) - v0.AddArg(v1) + v0.AddArg3(x, y, v1) v.AddArg(v0) return true } @@ -19758,10 +18376,9 @@ func rewriteValueS390X_OpSelect0(v *Value) bool { tuple := v_0.Args[1] val := v_0.Args[0] v.reset(OpS390XADDW) - v.AddArg(val) v0 := b.NewValue0(v.Pos, OpSelect0, t) v0.AddArg(tuple) - v.AddArg(v0) + v.AddArg2(val, v0) return true } // match: (Select0 (AddTupleFirst64 val tuple)) @@ -19774,10 +18391,9 @@ func rewriteValueS390X_OpSelect0(v *Value) bool { tuple := v_0.Args[1] val := v_0.Args[0] v.reset(OpS390XADD) - v.AddArg(val) v0 := b.NewValue0(v.Pos, OpSelect0, t) v0.AddArg(tuple) - v.AddArg(v0) + v.AddArg2(val, v0) return true } // match: (Select0 (ADDCconst (MOVDconst [c]) [d])) @@ -19837,22 +18453,18 @@ func rewriteValueS390X_OpSelect1(v *Value) bool { v0 := b.NewValue0(v.Pos, OpS390XADDE, types.NewTuple(typ.UInt64, types.TypeFlags)) v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) v1.AuxInt = 0 - v0.AddArg(v1) v2 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) v2.AuxInt = 0 - v0.AddArg(v2) v3 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) v4 := b.NewValue0(v.Pos, OpS390XADDE, types.NewTuple(typ.UInt64, types.TypeFlags)) - v4.AddArg(x) - v4.AddArg(y) v5 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) v6 := b.NewValue0(v.Pos, OpS390XADDCconst, types.NewTuple(typ.UInt64, types.TypeFlags)) v6.AuxInt = -1 v6.AddArg(c) v5.AddArg(v6) - v4.AddArg(v5) + v4.AddArg3(x, y, v5) v3.AddArg(v4) - v0.AddArg(v3) + v0.AddArg3(v1, v2, v3) v.AddArg(v0) return true } @@ -19870,24 +18482,19 @@ func rewriteValueS390X_OpSelect1(v *Value) bool { v1 := b.NewValue0(v.Pos, OpS390XSUBE, types.NewTuple(typ.UInt64, types.TypeFlags)) v2 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) v2.AuxInt = 0 - v1.AddArg(v2) v3 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) v3.AuxInt = 0 - v1.AddArg(v3) v4 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) v5 := b.NewValue0(v.Pos, OpS390XSUBE, types.NewTuple(typ.UInt64, types.TypeFlags)) - v5.AddArg(x) - v5.AddArg(y) v6 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) v7 := b.NewValue0(v.Pos, OpS390XSUBC, types.NewTuple(typ.UInt64, types.TypeFlags)) v8 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) v8.AuxInt = 0 - v7.AddArg(v8) - v7.AddArg(c) + v7.AddArg2(v8, c) v6.AddArg(v7) - v5.AddArg(v6) + v5.AddArg3(x, y, v6) v4.AddArg(v5) - v1.AddArg(v4) + v1.AddArg3(v2, v3, v4) v0.AddArg(v1) v.AddArg(v0) return true @@ -20034,9 +18641,7 @@ func rewriteValueS390X_OpStore(v *Value) bool { break } v.reset(OpS390XFMOVDstore) - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } // match: (Store {t} ptr val mem) @@ -20051,9 +18656,7 @@ func rewriteValueS390X_OpStore(v *Value) bool { break } v.reset(OpS390XFMOVSstore) - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } // match: (Store {t} ptr val mem) @@ -20068,9 +18671,7 @@ func rewriteValueS390X_OpStore(v *Value) bool { break } v.reset(OpS390XMOVDstore) - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } // match: (Store {t} ptr val mem) @@ -20085,9 +18686,7 @@ func rewriteValueS390X_OpStore(v *Value) bool { break } v.reset(OpS390XMOVWstore) - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } // match: (Store {t} ptr val mem) @@ -20102,9 +18701,7 @@ func rewriteValueS390X_OpStore(v *Value) bool { break } v.reset(OpS390XMOVHstore) - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } // match: (Store {t} ptr val mem) @@ -20119,9 +18716,7 @@ func rewriteValueS390X_OpStore(v *Value) bool { break } v.reset(OpS390XMOVBstore) - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } return false @@ -20149,9 +18744,7 @@ func rewriteValueS390X_OpZero(v *Value) bool { break } mem := v_1 - v.reset(OpCopy) - v.Type = mem.Type - v.AddArg(mem) + v.copyOf(mem) return true } // match: (Zero [1] destptr mem) @@ -20164,8 +18757,7 @@ func rewriteValueS390X_OpZero(v *Value) bool { mem := v_1 v.reset(OpS390XMOVBstoreconst) v.AuxInt = 0 - v.AddArg(destptr) - v.AddArg(mem) + v.AddArg2(destptr, mem) return true } // match: (Zero [2] destptr mem) @@ -20178,8 +18770,7 @@ func rewriteValueS390X_OpZero(v *Value) bool { mem := v_1 v.reset(OpS390XMOVHstoreconst) v.AuxInt = 0 - v.AddArg(destptr) - v.AddArg(mem) + v.AddArg2(destptr, mem) return true } // match: (Zero [4] destptr mem) @@ -20192,8 +18783,7 @@ func rewriteValueS390X_OpZero(v *Value) bool { mem := v_1 v.reset(OpS390XMOVWstoreconst) v.AuxInt = 0 - v.AddArg(destptr) - v.AddArg(mem) + v.AddArg2(destptr, mem) return true } // match: (Zero [8] destptr mem) @@ -20206,8 +18796,7 @@ func rewriteValueS390X_OpZero(v *Value) bool { mem := v_1 v.reset(OpS390XMOVDstoreconst) v.AuxInt = 0 - v.AddArg(destptr) - v.AddArg(mem) + v.AddArg2(destptr, mem) return true } // match: (Zero [3] destptr mem) @@ -20220,12 +18809,10 @@ func rewriteValueS390X_OpZero(v *Value) bool { mem := v_1 v.reset(OpS390XMOVBstoreconst) v.AuxInt = makeValAndOff(0, 2) - v.AddArg(destptr) v0 := b.NewValue0(v.Pos, OpS390XMOVHstoreconst, types.TypeMem) v0.AuxInt = 0 - v0.AddArg(destptr) - v0.AddArg(mem) - v.AddArg(v0) + v0.AddArg2(destptr, mem) + v.AddArg2(destptr, v0) return true } // match: (Zero [5] destptr mem) @@ -20238,12 +18825,10 @@ func rewriteValueS390X_OpZero(v *Value) bool { mem := v_1 v.reset(OpS390XMOVBstoreconst) v.AuxInt = makeValAndOff(0, 4) - v.AddArg(destptr) v0 := b.NewValue0(v.Pos, OpS390XMOVWstoreconst, types.TypeMem) v0.AuxInt = 0 - v0.AddArg(destptr) - v0.AddArg(mem) - v.AddArg(v0) + v0.AddArg2(destptr, mem) + v.AddArg2(destptr, v0) return true } // match: (Zero [6] destptr mem) @@ -20256,12 +18841,10 @@ func rewriteValueS390X_OpZero(v *Value) bool { mem := v_1 v.reset(OpS390XMOVHstoreconst) v.AuxInt = makeValAndOff(0, 4) - v.AddArg(destptr) v0 := b.NewValue0(v.Pos, OpS390XMOVWstoreconst, types.TypeMem) v0.AuxInt = 0 - v0.AddArg(destptr) - v0.AddArg(mem) - v.AddArg(v0) + v0.AddArg2(destptr, mem) + v.AddArg2(destptr, v0) return true } // match: (Zero [7] destptr mem) @@ -20274,12 +18857,10 @@ func rewriteValueS390X_OpZero(v *Value) bool { mem := v_1 v.reset(OpS390XMOVWstoreconst) v.AuxInt = makeValAndOff(0, 3) - v.AddArg(destptr) v0 := b.NewValue0(v.Pos, OpS390XMOVWstoreconst, types.TypeMem) v0.AuxInt = 0 - v0.AddArg(destptr) - v0.AddArg(mem) - v.AddArg(v0) + v0.AddArg2(destptr, mem) + v.AddArg2(destptr, v0) return true } // match: (Zero [s] destptr mem) @@ -20294,8 +18875,7 @@ func rewriteValueS390X_OpZero(v *Value) bool { } v.reset(OpS390XCLEAR) v.AuxInt = makeValAndOff(s, 0) - v.AddArg(destptr) - v.AddArg(mem) + v.AddArg2(destptr, mem) return true } // match: (Zero [s] destptr mem) @@ -20310,12 +18890,10 @@ func rewriteValueS390X_OpZero(v *Value) bool { } v.reset(OpS390XLoweredZero) v.AuxInt = s % 256 - v.AddArg(destptr) v0 := b.NewValue0(v.Pos, OpS390XADDconst, destptr.Type) v0.AuxInt = (s / 256) * 256 v0.AddArg(destptr) - v.AddArg(v0) - v.AddArg(mem) + v.AddArg3(destptr, v0, mem) return true } return false @@ -20331,9 +18909,7 @@ func rewriteBlockS390X(b *Block) bool { y := v_0.Args[1] x := v_0.Args[0] c := b.Aux - b.Reset(BlockS390XCGRJ) - b.AddControl(x) - b.AddControl(y) + b.resetWithControl2(BlockS390XCGRJ, x, y) b.Aux = c.(s390x.CCMask) &^ s390x.Unordered return true } @@ -20344,9 +18920,7 @@ func rewriteBlockS390X(b *Block) bool { y := v_0.Args[1] x := v_0.Args[0] c := b.Aux - b.Reset(BlockS390XCRJ) - b.AddControl(x) - b.AddControl(y) + b.resetWithControl2(BlockS390XCRJ, x, y) b.Aux = c.(s390x.CCMask) &^ s390x.Unordered return true } @@ -20357,9 +18931,7 @@ func rewriteBlockS390X(b *Block) bool { y := v_0.Args[1] x := v_0.Args[0] c := b.Aux - b.Reset(BlockS390XCLGRJ) - b.AddControl(x) - b.AddControl(y) + b.resetWithControl2(BlockS390XCLGRJ, x, y) b.Aux = c.(s390x.CCMask) &^ s390x.Unordered return true } @@ -20370,9 +18942,7 @@ func rewriteBlockS390X(b *Block) bool { y := v_0.Args[1] x := v_0.Args[0] c := b.Aux - b.Reset(BlockS390XCLRJ) - b.AddControl(x) - b.AddControl(y) + b.resetWithControl2(BlockS390XCLRJ, x, y) b.Aux = c.(s390x.CCMask) &^ s390x.Unordered return true } @@ -20387,8 +18957,7 @@ func rewriteBlockS390X(b *Block) bool { if !(is8Bit(y)) { break } - b.Reset(BlockS390XCGIJ) - b.AddControl(x) + b.resetWithControl(BlockS390XCGIJ, x) b.AuxInt = int64(int8(y)) b.Aux = c.(s390x.CCMask) &^ s390x.Unordered return true @@ -20404,8 +18973,7 @@ func rewriteBlockS390X(b *Block) bool { if !(is8Bit(y)) { break } - b.Reset(BlockS390XCIJ) - b.AddControl(x) + b.resetWithControl(BlockS390XCIJ, x) b.AuxInt = int64(int8(y)) b.Aux = c.(s390x.CCMask) &^ s390x.Unordered return true @@ -20421,8 +18989,7 @@ func rewriteBlockS390X(b *Block) bool { if !(isU8Bit(y)) { break } - b.Reset(BlockS390XCLGIJ) - b.AddControl(x) + b.resetWithControl(BlockS390XCLGIJ, x) b.AuxInt = int64(int8(y)) b.Aux = c.(s390x.CCMask) &^ s390x.Unordered return true @@ -20438,8 +19005,7 @@ func rewriteBlockS390X(b *Block) bool { if !(isU8Bit(y)) { break } - b.Reset(BlockS390XCLIJ) - b.AddControl(x) + b.resetWithControl(BlockS390XCLIJ, x) b.AuxInt = int64(int8(y)) b.Aux = c.(s390x.CCMask) &^ s390x.Unordered return true @@ -20455,8 +19021,7 @@ func rewriteBlockS390X(b *Block) bool { if b.Aux != s390x.Less { break } - b.Reset(BlockS390XCGIJ) - b.AddControl(x) + b.resetWithControl(BlockS390XCGIJ, x) b.AuxInt = 127 b.Aux = s390x.LessOrEqual return true @@ -20472,8 +19037,7 @@ func rewriteBlockS390X(b *Block) bool { if b.Aux != s390x.Less { break } - b.Reset(BlockS390XCIJ) - b.AddControl(x) + b.resetWithControl(BlockS390XCIJ, x) b.AuxInt = 127 b.Aux = s390x.LessOrEqual return true @@ -20489,8 +19053,7 @@ func rewriteBlockS390X(b *Block) bool { if b.Aux != s390x.LessOrEqual { break } - b.Reset(BlockS390XCGIJ) - b.AddControl(x) + b.resetWithControl(BlockS390XCGIJ, x) b.AuxInt = -128 b.Aux = s390x.Less return true @@ -20506,8 +19069,7 @@ func rewriteBlockS390X(b *Block) bool { if b.Aux != s390x.LessOrEqual { break } - b.Reset(BlockS390XCIJ) - b.AddControl(x) + b.resetWithControl(BlockS390XCIJ, x) b.AuxInt = -128 b.Aux = s390x.Less return true @@ -20523,8 +19085,7 @@ func rewriteBlockS390X(b *Block) bool { if b.Aux != s390x.Greater { break } - b.Reset(BlockS390XCGIJ) - b.AddControl(x) + b.resetWithControl(BlockS390XCGIJ, x) b.AuxInt = -128 b.Aux = s390x.GreaterOrEqual return true @@ -20540,8 +19101,7 @@ func rewriteBlockS390X(b *Block) bool { if b.Aux != s390x.Greater { break } - b.Reset(BlockS390XCIJ) - b.AddControl(x) + b.resetWithControl(BlockS390XCIJ, x) b.AuxInt = -128 b.Aux = s390x.GreaterOrEqual return true @@ -20557,8 +19117,7 @@ func rewriteBlockS390X(b *Block) bool { if b.Aux != s390x.GreaterOrEqual { break } - b.Reset(BlockS390XCGIJ) - b.AddControl(x) + b.resetWithControl(BlockS390XCGIJ, x) b.AuxInt = 127 b.Aux = s390x.Greater return true @@ -20574,8 +19133,7 @@ func rewriteBlockS390X(b *Block) bool { if b.Aux != s390x.GreaterOrEqual { break } - b.Reset(BlockS390XCIJ) - b.AddControl(x) + b.resetWithControl(BlockS390XCIJ, x) b.AuxInt = 127 b.Aux = s390x.Greater return true @@ -20591,8 +19149,7 @@ func rewriteBlockS390X(b *Block) bool { if b.Aux != s390x.Less { break } - b.Reset(BlockS390XCLIJ) - b.AddControl(x) + b.resetWithControl(BlockS390XCLIJ, x) b.AuxInt = -1 b.Aux = s390x.LessOrEqual return true @@ -20608,8 +19165,7 @@ func rewriteBlockS390X(b *Block) bool { if b.Aux != s390x.Less { break } - b.Reset(BlockS390XCLGIJ) - b.AddControl(x) + b.resetWithControl(BlockS390XCLGIJ, x) b.AuxInt = -1 b.Aux = s390x.LessOrEqual return true @@ -20625,8 +19181,7 @@ func rewriteBlockS390X(b *Block) bool { if b.Aux != s390x.GreaterOrEqual { break } - b.Reset(BlockS390XCLIJ) - b.AddControl(x) + b.resetWithControl(BlockS390XCLIJ, x) b.AuxInt = -1 b.Aux = s390x.Greater return true @@ -20642,8 +19197,7 @@ func rewriteBlockS390X(b *Block) bool { if b.Aux != s390x.GreaterOrEqual { break } - b.Reset(BlockS390XCLGIJ) - b.AddControl(x) + b.resetWithControl(BlockS390XCLGIJ, x) b.AuxInt = -1 b.Aux = s390x.Greater return true @@ -20654,8 +19208,7 @@ func rewriteBlockS390X(b *Block) bool { v_0 := b.Controls[0] cmp := v_0.Args[0] c := b.Aux - b.Reset(BlockS390XBRC) - b.AddControl(cmp) + b.resetWithControl(BlockS390XBRC, cmp) b.Aux = c.(s390x.CCMask).ReverseComparison() return true } @@ -20851,8 +19404,7 @@ func rewriteBlockS390X(b *Block) bool { if !(is8Bit(y)) { break } - b.Reset(BlockS390XCGIJ) - b.AddControl(x) + b.resetWithControl(BlockS390XCGIJ, x) b.AuxInt = int64(int8(y)) b.Aux = c return true @@ -20868,8 +19420,7 @@ func rewriteBlockS390X(b *Block) bool { if !(is8Bit(x)) { break } - b.Reset(BlockS390XCGIJ) - b.AddControl(y) + b.resetWithControl(BlockS390XCGIJ, y) b.AuxInt = int64(int8(x)) b.Aux = c.(s390x.CCMask).ReverseComparison() return true @@ -20885,11 +19436,10 @@ func rewriteBlockS390X(b *Block) bool { if !(!is8Bit(y) && is32Bit(y)) { break } - b.Reset(BlockS390XBRC) v0 := b.NewValue0(x.Pos, OpS390XCMPconst, types.TypeFlags) v0.AuxInt = int64(int32(y)) v0.AddArg(x) - b.AddControl(v0) + b.resetWithControl(BlockS390XBRC, v0) b.Aux = c return true } @@ -20904,11 +19454,10 @@ func rewriteBlockS390X(b *Block) bool { if !(!is8Bit(x) && is32Bit(x)) { break } - b.Reset(BlockS390XBRC) v0 := b.NewValue0(v_0.Pos, OpS390XCMPconst, types.TypeFlags) v0.AuxInt = int64(int32(x)) v0.AddArg(y) - b.AddControl(v0) + b.resetWithControl(BlockS390XBRC, v0) b.Aux = c.(s390x.CCMask).ReverseComparison() return true } @@ -20947,8 +19496,7 @@ func rewriteBlockS390X(b *Block) bool { x := v_0.Args[0] y := b.AuxInt c := b.Aux - b.Reset(BlockS390XCIJ) - b.AddControl(x) + b.resetWithControl(BlockS390XCIJ, x) b.AuxInt = y b.Aux = c return true @@ -20960,8 +19508,7 @@ func rewriteBlockS390X(b *Block) bool { x := v_0.Args[0] y := b.AuxInt c := b.Aux - b.Reset(BlockS390XCIJ) - b.AddControl(x) + b.resetWithControl(BlockS390XCIJ, x) b.AuxInt = y b.Aux = c return true @@ -21172,8 +19719,7 @@ func rewriteBlockS390X(b *Block) bool { if !(isU8Bit(y)) { break } - b.Reset(BlockS390XCLGIJ) - b.AddControl(x) + b.resetWithControl(BlockS390XCLGIJ, x) b.AuxInt = int64(int8(y)) b.Aux = c return true @@ -21189,8 +19735,7 @@ func rewriteBlockS390X(b *Block) bool { if !(isU8Bit(x)) { break } - b.Reset(BlockS390XCLGIJ) - b.AddControl(y) + b.resetWithControl(BlockS390XCLGIJ, y) b.AuxInt = int64(int8(x)) b.Aux = c.(s390x.CCMask).ReverseComparison() return true @@ -21206,11 +19751,10 @@ func rewriteBlockS390X(b *Block) bool { if !(!isU8Bit(y) && isU32Bit(y)) { break } - b.Reset(BlockS390XBRC) v0 := b.NewValue0(x.Pos, OpS390XCMPUconst, types.TypeFlags) v0.AuxInt = int64(int32(y)) v0.AddArg(x) - b.AddControl(v0) + b.resetWithControl(BlockS390XBRC, v0) b.Aux = c return true } @@ -21225,11 +19769,10 @@ func rewriteBlockS390X(b *Block) bool { if !(!isU8Bit(x) && isU32Bit(x)) { break } - b.Reset(BlockS390XBRC) v0 := b.NewValue0(v_0.Pos, OpS390XCMPUconst, types.TypeFlags) v0.AuxInt = int64(int32(x)) v0.AddArg(y) - b.AddControl(v0) + b.resetWithControl(BlockS390XBRC, v0) b.Aux = c.(s390x.CCMask).ReverseComparison() return true } @@ -21280,8 +19823,7 @@ func rewriteBlockS390X(b *Block) bool { if b.AuxInt != 0 || b.Aux != s390x.LessOrGreater || !(int32(x) != 0) { break } - b.Reset(BlockS390XBRC) - b.AddControl(cmp) + b.resetWithControl(BlockS390XBRC, cmp) b.Aux = d return true } @@ -21292,8 +19834,7 @@ func rewriteBlockS390X(b *Block) bool { x := v_0.Args[0] y := b.AuxInt c := b.Aux - b.Reset(BlockS390XCLIJ) - b.AddControl(x) + b.resetWithControl(BlockS390XCLIJ, x) b.AuxInt = y b.Aux = c return true @@ -21305,8 +19846,7 @@ func rewriteBlockS390X(b *Block) bool { x := v_0.Args[0] y := b.AuxInt c := b.Aux - b.Reset(BlockS390XCLIJ) - b.AddControl(x) + b.resetWithControl(BlockS390XCLIJ, x) b.AuxInt = y b.Aux = c return true @@ -21429,8 +19969,7 @@ func rewriteBlockS390X(b *Block) bool { if !(isU8Bit(y)) { break } - b.Reset(BlockS390XCLIJ) - b.AddControl(x) + b.resetWithControl(BlockS390XCLIJ, x) b.AuxInt = int64(int8(y)) b.Aux = c return true @@ -21446,8 +19985,7 @@ func rewriteBlockS390X(b *Block) bool { if !(isU8Bit(x)) { break } - b.Reset(BlockS390XCLIJ) - b.AddControl(y) + b.resetWithControl(BlockS390XCLIJ, y) b.AuxInt = int64(int8(x)) b.Aux = c.(s390x.CCMask).ReverseComparison() return true @@ -21463,11 +20001,10 @@ func rewriteBlockS390X(b *Block) bool { if !(!isU8Bit(y) && isU32Bit(y)) { break } - b.Reset(BlockS390XBRC) v0 := b.NewValue0(x.Pos, OpS390XCMPWUconst, types.TypeFlags) v0.AuxInt = int64(int32(y)) v0.AddArg(x) - b.AddControl(v0) + b.resetWithControl(BlockS390XBRC, v0) b.Aux = c return true } @@ -21482,11 +20019,10 @@ func rewriteBlockS390X(b *Block) bool { if !(!isU8Bit(x) && isU32Bit(x)) { break } - b.Reset(BlockS390XBRC) v0 := b.NewValue0(v_0.Pos, OpS390XCMPWUconst, types.TypeFlags) v0.AuxInt = int64(int32(x)) v0.AddArg(y) - b.AddControl(v0) + b.resetWithControl(BlockS390XBRC, v0) b.Aux = c.(s390x.CCMask).ReverseComparison() return true } @@ -21529,8 +20065,7 @@ func rewriteBlockS390X(b *Block) bool { if !(is8Bit(y)) { break } - b.Reset(BlockS390XCIJ) - b.AddControl(x) + b.resetWithControl(BlockS390XCIJ, x) b.AuxInt = int64(int8(y)) b.Aux = c return true @@ -21546,8 +20081,7 @@ func rewriteBlockS390X(b *Block) bool { if !(is8Bit(x)) { break } - b.Reset(BlockS390XCIJ) - b.AddControl(y) + b.resetWithControl(BlockS390XCIJ, y) b.AuxInt = int64(int8(x)) b.Aux = c.(s390x.CCMask).ReverseComparison() return true @@ -21563,11 +20097,10 @@ func rewriteBlockS390X(b *Block) bool { if !(!is8Bit(y) && is32Bit(y)) { break } - b.Reset(BlockS390XBRC) v0 := b.NewValue0(x.Pos, OpS390XCMPWconst, types.TypeFlags) v0.AuxInt = int64(int32(y)) v0.AddArg(x) - b.AddControl(v0) + b.resetWithControl(BlockS390XBRC, v0) b.Aux = c return true } @@ -21582,11 +20115,10 @@ func rewriteBlockS390X(b *Block) bool { if !(!is8Bit(x) && is32Bit(x)) { break } - b.Reset(BlockS390XBRC) v0 := b.NewValue0(v_0.Pos, OpS390XCMPWconst, types.TypeFlags) v0.AuxInt = int64(int32(x)) v0.AddArg(y) - b.AddControl(v0) + b.resetWithControl(BlockS390XBRC, v0) b.Aux = c.(s390x.CCMask).ReverseComparison() return true } @@ -21622,10 +20154,9 @@ func rewriteBlockS390X(b *Block) bool { // result: (CLIJ {s390x.LessOrGreater} (MOVBZreg cond) [0] yes no) for { cond := b.Controls[0] - b.Reset(BlockS390XCLIJ) v0 := b.NewValue0(cond.Pos, OpS390XMOVBZreg, typ.Bool) v0.AddArg(cond) - b.AddControl(v0) + b.resetWithControl(BlockS390XCLIJ, v0) b.AuxInt = 0 b.Aux = s390x.LessOrGreater return true diff --git a/src/cmd/compile/internal/ssa/rewriteWasm.go b/src/cmd/compile/internal/ssa/rewriteWasm.go index ea365f46b6..be1b51e7aa 100644 --- a/src/cmd/compile/internal/ssa/rewriteWasm.go +++ b/src/cmd/compile/internal/ssa/rewriteWasm.go @@ -3,6 +3,7 @@ package ssa +import "math" import "cmd/internal/objabi" import "cmd/compile/internal/types" @@ -169,6 +170,9 @@ func rewriteValueWasm(v *Value) bool { case OpCvt64to64F: v.Op = OpWasmF64ConvertI64S return true + case OpCvtBoolToUint8: + v.Op = OpCopy + return true case OpDiv16: return rewriteValueWasm_OpDiv16(v) case OpDiv16u: @@ -682,10 +686,9 @@ func rewriteValueWasm_OpBitLen64(v *Value) bool { v.reset(OpWasmI64Sub) v0 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64) v0.AuxInt = 64 - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpWasmI64Clz, typ.Int64) v1.AddArg(x) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } } @@ -698,10 +701,9 @@ func rewriteValueWasm_OpCom16(v *Value) bool { for { x := v_0 v.reset(OpWasmI64Xor) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64) v0.AuxInt = -1 - v.AddArg(v0) + v.AddArg2(x, v0) return true } } @@ -714,10 +716,9 @@ func rewriteValueWasm_OpCom32(v *Value) bool { for { x := v_0 v.reset(OpWasmI64Xor) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64) v0.AuxInt = -1 - v.AddArg(v0) + v.AddArg2(x, v0) return true } } @@ -730,10 +731,9 @@ func rewriteValueWasm_OpCom64(v *Value) bool { for { x := v_0 v.reset(OpWasmI64Xor) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64) v0.AuxInt = -1 - v.AddArg(v0) + v.AddArg2(x, v0) return true } } @@ -746,10 +746,9 @@ func rewriteValueWasm_OpCom8(v *Value) bool { for { x := v_0 v.reset(OpWasmI64Xor) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64) v0.AuxInt = -1 - v.AddArg(v0) + v.AddArg2(x, v0) return true } } @@ -772,10 +771,9 @@ func rewriteValueWasm_OpCtz16(v *Value) bool { x := v_0 v.reset(OpWasmI64Ctz) v0 := b.NewValue0(v.Pos, OpWasmI64Or, typ.Int64) - v0.AddArg(x) v1 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64) v1.AuxInt = 0x10000 - v0.AddArg(v1) + v0.AddArg2(x, v1) v.AddArg(v0) return true } @@ -790,10 +788,9 @@ func rewriteValueWasm_OpCtz32(v *Value) bool { x := v_0 v.reset(OpWasmI64Ctz) v0 := b.NewValue0(v.Pos, OpWasmI64Or, typ.Int64) - v0.AddArg(x) v1 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64) v1.AuxInt = 0x100000000 - v0.AddArg(v1) + v0.AddArg2(x, v1) v.AddArg(v0) return true } @@ -808,10 +805,9 @@ func rewriteValueWasm_OpCtz8(v *Value) bool { x := v_0 v.reset(OpWasmI64Ctz) v0 := b.NewValue0(v.Pos, OpWasmI64Or, typ.Int64) - v0.AddArg(x) v1 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64) v1.AuxInt = 0x100 - v0.AddArg(v1) + v0.AddArg2(x, v1) v.AddArg(v0) return true } @@ -889,10 +885,9 @@ func rewriteValueWasm_OpDiv16(v *Value) bool { v.reset(OpWasmI64DivS) v0 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64) v1.AddArg(y) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } } @@ -909,10 +904,9 @@ func rewriteValueWasm_OpDiv16u(v *Value) bool { v.reset(OpWasmI64DivU) v0 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) v1.AddArg(y) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } } @@ -929,10 +923,9 @@ func rewriteValueWasm_OpDiv32(v *Value) bool { v.reset(OpWasmI64DivS) v0 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64) v1.AddArg(y) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } } @@ -949,10 +942,9 @@ func rewriteValueWasm_OpDiv32u(v *Value) bool { v.reset(OpWasmI64DivU) v0 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) v1.AddArg(y) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } } @@ -969,10 +961,9 @@ func rewriteValueWasm_OpDiv8(v *Value) bool { v.reset(OpWasmI64DivS) v0 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64) v1.AddArg(y) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } } @@ -989,10 +980,9 @@ func rewriteValueWasm_OpDiv8u(v *Value) bool { v.reset(OpWasmI64DivU) v0 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) v1.AddArg(y) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } } @@ -1009,10 +999,9 @@ func rewriteValueWasm_OpEq16(v *Value) bool { v.reset(OpWasmI64Eq) v0 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) v1.AddArg(y) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } } @@ -1029,10 +1018,9 @@ func rewriteValueWasm_OpEq32(v *Value) bool { v.reset(OpWasmI64Eq) v0 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) v1.AddArg(y) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } } @@ -1049,10 +1037,9 @@ func rewriteValueWasm_OpEq8(v *Value) bool { v.reset(OpWasmI64Eq) v0 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) v1.AddArg(y) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } } @@ -1084,10 +1071,9 @@ func rewriteValueWasm_OpLeq16(v *Value) bool { v.reset(OpWasmI64LeS) v0 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64) v1.AddArg(y) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } } @@ -1104,10 +1090,9 @@ func rewriteValueWasm_OpLeq16U(v *Value) bool { v.reset(OpWasmI64LeU) v0 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) v1.AddArg(y) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } } @@ -1124,10 +1109,9 @@ func rewriteValueWasm_OpLeq32(v *Value) bool { v.reset(OpWasmI64LeS) v0 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64) v1.AddArg(y) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } } @@ -1144,10 +1128,9 @@ func rewriteValueWasm_OpLeq32U(v *Value) bool { v.reset(OpWasmI64LeU) v0 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) v1.AddArg(y) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } } @@ -1164,10 +1147,9 @@ func rewriteValueWasm_OpLeq8(v *Value) bool { v.reset(OpWasmI64LeS) v0 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64) v1.AddArg(y) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } } @@ -1184,10 +1166,9 @@ func rewriteValueWasm_OpLeq8U(v *Value) bool { v.reset(OpWasmI64LeU) v0 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) v1.AddArg(y) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } } @@ -1204,10 +1185,9 @@ func rewriteValueWasm_OpLess16(v *Value) bool { v.reset(OpWasmI64LtS) v0 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64) v1.AddArg(y) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } } @@ -1224,10 +1204,9 @@ func rewriteValueWasm_OpLess16U(v *Value) bool { v.reset(OpWasmI64LtU) v0 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) v1.AddArg(y) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } } @@ -1244,10 +1223,9 @@ func rewriteValueWasm_OpLess32(v *Value) bool { v.reset(OpWasmI64LtS) v0 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64) v1.AddArg(y) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } } @@ -1264,10 +1242,9 @@ func rewriteValueWasm_OpLess32U(v *Value) bool { v.reset(OpWasmI64LtU) v0 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) v1.AddArg(y) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } } @@ -1284,10 +1261,9 @@ func rewriteValueWasm_OpLess8(v *Value) bool { v.reset(OpWasmI64LtS) v0 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64) v1.AddArg(y) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } } @@ -1304,10 +1280,9 @@ func rewriteValueWasm_OpLess8U(v *Value) bool { v.reset(OpWasmI64LtU) v0 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) v1.AddArg(y) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } } @@ -1325,8 +1300,7 @@ func rewriteValueWasm_OpLoad(v *Value) bool { break } v.reset(OpWasmF32Load) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (Load ptr mem) @@ -1340,8 +1314,7 @@ func rewriteValueWasm_OpLoad(v *Value) bool { break } v.reset(OpWasmF64Load) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (Load ptr mem) @@ -1355,8 +1328,7 @@ func rewriteValueWasm_OpLoad(v *Value) bool { break } v.reset(OpWasmI64Load) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (Load ptr mem) @@ -1370,8 +1342,7 @@ func rewriteValueWasm_OpLoad(v *Value) bool { break } v.reset(OpWasmI64Load32U) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (Load ptr mem) @@ -1385,8 +1356,7 @@ func rewriteValueWasm_OpLoad(v *Value) bool { break } v.reset(OpWasmI64Load32S) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (Load ptr mem) @@ -1400,8 +1370,7 @@ func rewriteValueWasm_OpLoad(v *Value) bool { break } v.reset(OpWasmI64Load16U) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (Load ptr mem) @@ -1415,8 +1384,7 @@ func rewriteValueWasm_OpLoad(v *Value) bool { break } v.reset(OpWasmI64Load16S) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (Load ptr mem) @@ -1430,8 +1398,7 @@ func rewriteValueWasm_OpLoad(v *Value) bool { break } v.reset(OpWasmI64Load8U) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (Load ptr mem) @@ -1445,8 +1412,7 @@ func rewriteValueWasm_OpLoad(v *Value) bool { break } v.reset(OpWasmI64Load8S) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } return false @@ -1477,10 +1443,9 @@ func rewriteValueWasm_OpLsh16x16(v *Value) bool { y := v_1 v.reset(OpLsh64x64) v.AuxInt = c - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) v0.AddArg(y) - v.AddArg(v0) + v.AddArg2(x, v0) return true } } @@ -1497,10 +1462,9 @@ func rewriteValueWasm_OpLsh16x32(v *Value) bool { y := v_1 v.reset(OpLsh64x64) v.AuxInt = c - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) v0.AddArg(y) - v.AddArg(v0) + v.AddArg2(x, v0) return true } } @@ -1517,10 +1481,9 @@ func rewriteValueWasm_OpLsh16x8(v *Value) bool { y := v_1 v.reset(OpLsh64x64) v.AuxInt = c - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) v0.AddArg(y) - v.AddArg(v0) + v.AddArg2(x, v0) return true } } @@ -1537,10 +1500,9 @@ func rewriteValueWasm_OpLsh32x16(v *Value) bool { y := v_1 v.reset(OpLsh64x64) v.AuxInt = c - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) v0.AddArg(y) - v.AddArg(v0) + v.AddArg2(x, v0) return true } } @@ -1557,10 +1519,9 @@ func rewriteValueWasm_OpLsh32x32(v *Value) bool { y := v_1 v.reset(OpLsh64x64) v.AuxInt = c - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) v0.AddArg(y) - v.AddArg(v0) + v.AddArg2(x, v0) return true } } @@ -1577,10 +1538,9 @@ func rewriteValueWasm_OpLsh32x8(v *Value) bool { y := v_1 v.reset(OpLsh64x64) v.AuxInt = c - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) v0.AddArg(y) - v.AddArg(v0) + v.AddArg2(x, v0) return true } } @@ -1597,10 +1557,9 @@ func rewriteValueWasm_OpLsh64x16(v *Value) bool { y := v_1 v.reset(OpLsh64x64) v.AuxInt = c - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) v0.AddArg(y) - v.AddArg(v0) + v.AddArg2(x, v0) return true } } @@ -1617,10 +1576,9 @@ func rewriteValueWasm_OpLsh64x32(v *Value) bool { y := v_1 v.reset(OpLsh64x64) v.AuxInt = c - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) v0.AddArg(y) - v.AddArg(v0) + v.AddArg2(x, v0) return true } } @@ -1639,8 +1597,7 @@ func rewriteValueWasm_OpLsh64x64(v *Value) bool { break } v.reset(OpWasmI64Shl) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (Lsh64x64 x (I64Const [c])) @@ -1656,10 +1613,9 @@ func rewriteValueWasm_OpLsh64x64(v *Value) bool { break } v.reset(OpWasmI64Shl) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64) v0.AuxInt = c - v.AddArg(v0) + v.AddArg2(x, v0) return true } // match: (Lsh64x64 x (I64Const [c])) @@ -1684,18 +1640,14 @@ func rewriteValueWasm_OpLsh64x64(v *Value) bool { y := v_1 v.reset(OpWasmSelect) v0 := b.NewValue0(v.Pos, OpWasmI64Shl, typ.Int64) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) + v0.AddArg2(x, y) v1 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64) v1.AuxInt = 0 - v.AddArg(v1) v2 := b.NewValue0(v.Pos, OpWasmI64LtU, typ.Bool) - v2.AddArg(y) v3 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64) v3.AuxInt = 64 - v2.AddArg(v3) - v.AddArg(v2) + v2.AddArg2(y, v3) + v.AddArg3(v0, v1, v2) return true } } @@ -1712,10 +1664,9 @@ func rewriteValueWasm_OpLsh64x8(v *Value) bool { y := v_1 v.reset(OpLsh64x64) v.AuxInt = c - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) v0.AddArg(y) - v.AddArg(v0) + v.AddArg2(x, v0) return true } } @@ -1732,10 +1683,9 @@ func rewriteValueWasm_OpLsh8x16(v *Value) bool { y := v_1 v.reset(OpLsh64x64) v.AuxInt = c - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) v0.AddArg(y) - v.AddArg(v0) + v.AddArg2(x, v0) return true } } @@ -1752,10 +1702,9 @@ func rewriteValueWasm_OpLsh8x32(v *Value) bool { y := v_1 v.reset(OpLsh64x64) v.AuxInt = c - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) v0.AddArg(y) - v.AddArg(v0) + v.AddArg2(x, v0) return true } } @@ -1772,10 +1721,9 @@ func rewriteValueWasm_OpLsh8x8(v *Value) bool { y := v_1 v.reset(OpLsh64x64) v.AuxInt = c - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) v0.AddArg(y) - v.AddArg(v0) + v.AddArg2(x, v0) return true } } @@ -1792,10 +1740,9 @@ func rewriteValueWasm_OpMod16(v *Value) bool { v.reset(OpWasmI64RemS) v0 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64) v1.AddArg(y) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } } @@ -1812,10 +1759,9 @@ func rewriteValueWasm_OpMod16u(v *Value) bool { v.reset(OpWasmI64RemU) v0 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) v1.AddArg(y) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } } @@ -1832,10 +1778,9 @@ func rewriteValueWasm_OpMod32(v *Value) bool { v.reset(OpWasmI64RemS) v0 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64) v1.AddArg(y) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } } @@ -1852,10 +1797,9 @@ func rewriteValueWasm_OpMod32u(v *Value) bool { v.reset(OpWasmI64RemU) v0 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) v1.AddArg(y) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } } @@ -1872,10 +1816,9 @@ func rewriteValueWasm_OpMod8(v *Value) bool { v.reset(OpWasmI64RemS) v0 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64) v1.AddArg(y) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } } @@ -1892,10 +1835,9 @@ func rewriteValueWasm_OpMod8u(v *Value) bool { v.reset(OpWasmI64RemU) v0 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) v1.AddArg(y) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } } @@ -1912,9 +1854,7 @@ func rewriteValueWasm_OpMove(v *Value) bool { break } mem := v_2 - v.reset(OpCopy) - v.Type = mem.Type - v.AddArg(mem) + v.copyOf(mem) return true } // match: (Move [1] dst src mem) @@ -1927,12 +1867,9 @@ func rewriteValueWasm_OpMove(v *Value) bool { src := v_1 mem := v_2 v.reset(OpWasmI64Store8) - v.AddArg(dst) v0 := b.NewValue0(v.Pos, OpWasmI64Load8U, typ.UInt8) - v0.AddArg(src) - v0.AddArg(mem) - v.AddArg(v0) - v.AddArg(mem) + v0.AddArg2(src, mem) + v.AddArg3(dst, v0, mem) return true } // match: (Move [2] dst src mem) @@ -1945,12 +1882,9 @@ func rewriteValueWasm_OpMove(v *Value) bool { src := v_1 mem := v_2 v.reset(OpWasmI64Store16) - v.AddArg(dst) v0 := b.NewValue0(v.Pos, OpWasmI64Load16U, typ.UInt16) - v0.AddArg(src) - v0.AddArg(mem) - v.AddArg(v0) - v.AddArg(mem) + v0.AddArg2(src, mem) + v.AddArg3(dst, v0, mem) return true } // match: (Move [4] dst src mem) @@ -1963,12 +1897,9 @@ func rewriteValueWasm_OpMove(v *Value) bool { src := v_1 mem := v_2 v.reset(OpWasmI64Store32) - v.AddArg(dst) v0 := b.NewValue0(v.Pos, OpWasmI64Load32U, typ.UInt32) - v0.AddArg(src) - v0.AddArg(mem) - v.AddArg(v0) - v.AddArg(mem) + v0.AddArg2(src, mem) + v.AddArg3(dst, v0, mem) return true } // match: (Move [8] dst src mem) @@ -1981,12 +1912,9 @@ func rewriteValueWasm_OpMove(v *Value) bool { src := v_1 mem := v_2 v.reset(OpWasmI64Store) - v.AddArg(dst) v0 := b.NewValue0(v.Pos, OpWasmI64Load, typ.UInt64) - v0.AddArg(src) - v0.AddArg(mem) - v.AddArg(v0) - v.AddArg(mem) + v0.AddArg2(src, mem) + v.AddArg3(dst, v0, mem) return true } // match: (Move [16] dst src mem) @@ -2000,20 +1928,14 @@ func rewriteValueWasm_OpMove(v *Value) bool { mem := v_2 v.reset(OpWasmI64Store) v.AuxInt = 8 - v.AddArg(dst) v0 := b.NewValue0(v.Pos, OpWasmI64Load, typ.UInt64) v0.AuxInt = 8 - v0.AddArg(src) - v0.AddArg(mem) - v.AddArg(v0) + v0.AddArg2(src, mem) v1 := b.NewValue0(v.Pos, OpWasmI64Store, types.TypeMem) - v1.AddArg(dst) v2 := b.NewValue0(v.Pos, OpWasmI64Load, typ.UInt64) - v2.AddArg(src) - v2.AddArg(mem) - v1.AddArg(v2) - v1.AddArg(mem) - v.AddArg(v1) + v2.AddArg2(src, mem) + v1.AddArg3(dst, v2, mem) + v.AddArg3(dst, v0, v1) return true } // match: (Move [3] dst src mem) @@ -2027,20 +1949,14 @@ func rewriteValueWasm_OpMove(v *Value) bool { mem := v_2 v.reset(OpWasmI64Store8) v.AuxInt = 2 - v.AddArg(dst) v0 := b.NewValue0(v.Pos, OpWasmI64Load8U, typ.UInt8) v0.AuxInt = 2 - v0.AddArg(src) - v0.AddArg(mem) - v.AddArg(v0) + v0.AddArg2(src, mem) v1 := b.NewValue0(v.Pos, OpWasmI64Store16, types.TypeMem) - v1.AddArg(dst) v2 := b.NewValue0(v.Pos, OpWasmI64Load16U, typ.UInt16) - v2.AddArg(src) - v2.AddArg(mem) - v1.AddArg(v2) - v1.AddArg(mem) - v.AddArg(v1) + v2.AddArg2(src, mem) + v1.AddArg3(dst, v2, mem) + v.AddArg3(dst, v0, v1) return true } // match: (Move [5] dst src mem) @@ -2054,20 +1970,14 @@ func rewriteValueWasm_OpMove(v *Value) bool { mem := v_2 v.reset(OpWasmI64Store8) v.AuxInt = 4 - v.AddArg(dst) v0 := b.NewValue0(v.Pos, OpWasmI64Load8U, typ.UInt8) v0.AuxInt = 4 - v0.AddArg(src) - v0.AddArg(mem) - v.AddArg(v0) + v0.AddArg2(src, mem) v1 := b.NewValue0(v.Pos, OpWasmI64Store32, types.TypeMem) - v1.AddArg(dst) v2 := b.NewValue0(v.Pos, OpWasmI64Load32U, typ.UInt32) - v2.AddArg(src) - v2.AddArg(mem) - v1.AddArg(v2) - v1.AddArg(mem) - v.AddArg(v1) + v2.AddArg2(src, mem) + v1.AddArg3(dst, v2, mem) + v.AddArg3(dst, v0, v1) return true } // match: (Move [6] dst src mem) @@ -2081,20 +1991,14 @@ func rewriteValueWasm_OpMove(v *Value) bool { mem := v_2 v.reset(OpWasmI64Store16) v.AuxInt = 4 - v.AddArg(dst) v0 := b.NewValue0(v.Pos, OpWasmI64Load16U, typ.UInt16) v0.AuxInt = 4 - v0.AddArg(src) - v0.AddArg(mem) - v.AddArg(v0) + v0.AddArg2(src, mem) v1 := b.NewValue0(v.Pos, OpWasmI64Store32, types.TypeMem) - v1.AddArg(dst) v2 := b.NewValue0(v.Pos, OpWasmI64Load32U, typ.UInt32) - v2.AddArg(src) - v2.AddArg(mem) - v1.AddArg(v2) - v1.AddArg(mem) - v.AddArg(v1) + v2.AddArg2(src, mem) + v1.AddArg3(dst, v2, mem) + v.AddArg3(dst, v0, v1) return true } // match: (Move [7] dst src mem) @@ -2108,20 +2012,14 @@ func rewriteValueWasm_OpMove(v *Value) bool { mem := v_2 v.reset(OpWasmI64Store32) v.AuxInt = 3 - v.AddArg(dst) v0 := b.NewValue0(v.Pos, OpWasmI64Load32U, typ.UInt32) v0.AuxInt = 3 - v0.AddArg(src) - v0.AddArg(mem) - v.AddArg(v0) + v0.AddArg2(src, mem) v1 := b.NewValue0(v.Pos, OpWasmI64Store32, types.TypeMem) - v1.AddArg(dst) v2 := b.NewValue0(v.Pos, OpWasmI64Load32U, typ.UInt32) - v2.AddArg(src) - v2.AddArg(mem) - v1.AddArg(v2) - v1.AddArg(mem) - v.AddArg(v1) + v2.AddArg2(src, mem) + v1.AddArg3(dst, v2, mem) + v.AddArg3(dst, v0, v1) return true } // match: (Move [s] dst src mem) @@ -2137,20 +2035,14 @@ func rewriteValueWasm_OpMove(v *Value) bool { } v.reset(OpWasmI64Store) v.AuxInt = s - 8 - v.AddArg(dst) v0 := b.NewValue0(v.Pos, OpWasmI64Load, typ.UInt64) v0.AuxInt = s - 8 - v0.AddArg(src) - v0.AddArg(mem) - v.AddArg(v0) + v0.AddArg2(src, mem) v1 := b.NewValue0(v.Pos, OpWasmI64Store, types.TypeMem) - v1.AddArg(dst) v2 := b.NewValue0(v.Pos, OpWasmI64Load, typ.UInt64) - v2.AddArg(src) - v2.AddArg(mem) - v1.AddArg(v2) - v1.AddArg(mem) - v.AddArg(v1) + v2.AddArg2(src, mem) + v1.AddArg3(dst, v2, mem) + v.AddArg3(dst, v0, v1) return true } // match: (Move [s] dst src mem) @@ -2169,19 +2061,14 @@ func rewriteValueWasm_OpMove(v *Value) bool { v0 := b.NewValue0(v.Pos, OpOffPtr, dst.Type) v0.AuxInt = s % 16 v0.AddArg(dst) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpOffPtr, src.Type) v1.AuxInt = s % 16 v1.AddArg(src) - v.AddArg(v1) v2 := b.NewValue0(v.Pos, OpWasmI64Store, types.TypeMem) - v2.AddArg(dst) v3 := b.NewValue0(v.Pos, OpWasmI64Load, typ.UInt64) - v3.AddArg(src) - v3.AddArg(mem) - v2.AddArg(v3) - v2.AddArg(mem) - v.AddArg(v2) + v3.AddArg2(src, mem) + v2.AddArg3(dst, v3, mem) + v.AddArg3(v0, v1, v2) return true } // match: (Move [s] dst src mem) @@ -2200,28 +2087,20 @@ func rewriteValueWasm_OpMove(v *Value) bool { v0 := b.NewValue0(v.Pos, OpOffPtr, dst.Type) v0.AuxInt = s % 16 v0.AddArg(dst) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpOffPtr, src.Type) v1.AuxInt = s % 16 v1.AddArg(src) - v.AddArg(v1) v2 := b.NewValue0(v.Pos, OpWasmI64Store, types.TypeMem) v2.AuxInt = 8 - v2.AddArg(dst) v3 := b.NewValue0(v.Pos, OpWasmI64Load, typ.UInt64) v3.AuxInt = 8 - v3.AddArg(src) - v3.AddArg(mem) - v2.AddArg(v3) + v3.AddArg2(src, mem) v4 := b.NewValue0(v.Pos, OpWasmI64Store, types.TypeMem) - v4.AddArg(dst) v5 := b.NewValue0(v.Pos, OpWasmI64Load, typ.UInt64) - v5.AddArg(src) - v5.AddArg(mem) - v4.AddArg(v5) - v4.AddArg(mem) - v2.AddArg(v4) - v.AddArg(v2) + v5.AddArg2(src, mem) + v4.AddArg3(dst, v5, mem) + v2.AddArg3(dst, v3, v4) + v.AddArg3(v0, v1, v2) return true } // match: (Move [s] dst src mem) @@ -2237,9 +2116,7 @@ func rewriteValueWasm_OpMove(v *Value) bool { } v.reset(OpWasmLoweredMove) v.AuxInt = s / 8 - v.AddArg(dst) - v.AddArg(src) - v.AddArg(mem) + v.AddArg3(dst, src, mem) return true } return false @@ -2255,8 +2132,7 @@ func rewriteValueWasm_OpNeg16(v *Value) bool { v.reset(OpWasmI64Sub) v0 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64) v0.AuxInt = 0 - v.AddArg(v0) - v.AddArg(x) + v.AddArg2(v0, x) return true } } @@ -2271,8 +2147,7 @@ func rewriteValueWasm_OpNeg32(v *Value) bool { v.reset(OpWasmI64Sub) v0 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64) v0.AuxInt = 0 - v.AddArg(v0) - v.AddArg(x) + v.AddArg2(v0, x) return true } } @@ -2287,8 +2162,7 @@ func rewriteValueWasm_OpNeg64(v *Value) bool { v.reset(OpWasmI64Sub) v0 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64) v0.AuxInt = 0 - v.AddArg(v0) - v.AddArg(x) + v.AddArg2(v0, x) return true } } @@ -2303,8 +2177,7 @@ func rewriteValueWasm_OpNeg8(v *Value) bool { v.reset(OpWasmI64Sub) v0 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64) v0.AuxInt = 0 - v.AddArg(v0) - v.AddArg(x) + v.AddArg2(v0, x) return true } } @@ -2321,10 +2194,9 @@ func rewriteValueWasm_OpNeq16(v *Value) bool { v.reset(OpWasmI64Ne) v0 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) v1.AddArg(y) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } } @@ -2341,10 +2213,9 @@ func rewriteValueWasm_OpNeq32(v *Value) bool { v.reset(OpWasmI64Ne) v0 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) v1.AddArg(y) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } } @@ -2361,10 +2232,9 @@ func rewriteValueWasm_OpNeq8(v *Value) bool { v.reset(OpWasmI64Ne) v0 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) v1.AddArg(y) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } } @@ -2429,17 +2299,14 @@ func rewriteValueWasm_OpRotateLeft16(v *Value) bool { c := v_1.AuxInt v.reset(OpOr16) v0 := b.NewValue0(v.Pos, OpLsh16x64, t) - v0.AddArg(x) v1 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64) v1.AuxInt = c & 15 - v0.AddArg(v1) - v.AddArg(v0) + v0.AddArg2(x, v1) v2 := b.NewValue0(v.Pos, OpRsh16Ux64, t) - v2.AddArg(x) v3 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64) v3.AuxInt = -c & 15 - v2.AddArg(v3) - v.AddArg(v2) + v2.AddArg2(x, v3) + v.AddArg2(v0, v2) return true } return false @@ -2460,17 +2327,14 @@ func rewriteValueWasm_OpRotateLeft8(v *Value) bool { c := v_1.AuxInt v.reset(OpOr8) v0 := b.NewValue0(v.Pos, OpLsh8x64, t) - v0.AddArg(x) v1 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64) v1.AuxInt = c & 7 - v0.AddArg(v1) - v.AddArg(v0) + v0.AddArg2(x, v1) v2 := b.NewValue0(v.Pos, OpRsh8Ux64, t) - v2.AddArg(x) v3 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64) v3.AuxInt = -c & 7 - v2.AddArg(v3) - v.AddArg(v2) + v2.AddArg2(x, v3) + v.AddArg2(v0, v2) return true } return false @@ -2490,10 +2354,9 @@ func rewriteValueWasm_OpRsh16Ux16(v *Value) bool { v.AuxInt = c v0 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) v1.AddArg(y) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } } @@ -2512,10 +2375,9 @@ func rewriteValueWasm_OpRsh16Ux32(v *Value) bool { v.AuxInt = c v0 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) v1.AddArg(y) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } } @@ -2534,8 +2396,7 @@ func rewriteValueWasm_OpRsh16Ux64(v *Value) bool { v.AuxInt = c v0 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) v0.AddArg(x) - v.AddArg(v0) - v.AddArg(y) + v.AddArg2(v0, y) return true } } @@ -2554,10 +2415,9 @@ func rewriteValueWasm_OpRsh16Ux8(v *Value) bool { v.AuxInt = c v0 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) v1.AddArg(y) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } } @@ -2576,10 +2436,9 @@ func rewriteValueWasm_OpRsh16x16(v *Value) bool { v.AuxInt = c v0 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) v1.AddArg(y) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } } @@ -2598,10 +2457,9 @@ func rewriteValueWasm_OpRsh16x32(v *Value) bool { v.AuxInt = c v0 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) v1.AddArg(y) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } } @@ -2620,8 +2478,7 @@ func rewriteValueWasm_OpRsh16x64(v *Value) bool { v.AuxInt = c v0 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64) v0.AddArg(x) - v.AddArg(v0) - v.AddArg(y) + v.AddArg2(v0, y) return true } } @@ -2640,10 +2497,9 @@ func rewriteValueWasm_OpRsh16x8(v *Value) bool { v.AuxInt = c v0 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) v1.AddArg(y) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } } @@ -2662,10 +2518,9 @@ func rewriteValueWasm_OpRsh32Ux16(v *Value) bool { v.AuxInt = c v0 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) v1.AddArg(y) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } } @@ -2684,10 +2539,9 @@ func rewriteValueWasm_OpRsh32Ux32(v *Value) bool { v.AuxInt = c v0 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) v1.AddArg(y) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } } @@ -2706,8 +2560,7 @@ func rewriteValueWasm_OpRsh32Ux64(v *Value) bool { v.AuxInt = c v0 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) v0.AddArg(x) - v.AddArg(v0) - v.AddArg(y) + v.AddArg2(v0, y) return true } } @@ -2726,10 +2579,9 @@ func rewriteValueWasm_OpRsh32Ux8(v *Value) bool { v.AuxInt = c v0 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) v1.AddArg(y) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } } @@ -2748,10 +2600,9 @@ func rewriteValueWasm_OpRsh32x16(v *Value) bool { v.AuxInt = c v0 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) v1.AddArg(y) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } } @@ -2770,10 +2621,9 @@ func rewriteValueWasm_OpRsh32x32(v *Value) bool { v.AuxInt = c v0 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) v1.AddArg(y) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } } @@ -2792,8 +2642,7 @@ func rewriteValueWasm_OpRsh32x64(v *Value) bool { v.AuxInt = c v0 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64) v0.AddArg(x) - v.AddArg(v0) - v.AddArg(y) + v.AddArg2(v0, y) return true } } @@ -2812,10 +2661,9 @@ func rewriteValueWasm_OpRsh32x8(v *Value) bool { v.AuxInt = c v0 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) v1.AddArg(y) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } } @@ -2832,10 +2680,9 @@ func rewriteValueWasm_OpRsh64Ux16(v *Value) bool { y := v_1 v.reset(OpRsh64Ux64) v.AuxInt = c - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) v0.AddArg(y) - v.AddArg(v0) + v.AddArg2(x, v0) return true } } @@ -2852,10 +2699,9 @@ func rewriteValueWasm_OpRsh64Ux32(v *Value) bool { y := v_1 v.reset(OpRsh64Ux64) v.AuxInt = c - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) v0.AddArg(y) - v.AddArg(v0) + v.AddArg2(x, v0) return true } } @@ -2874,8 +2720,7 @@ func rewriteValueWasm_OpRsh64Ux64(v *Value) bool { break } v.reset(OpWasmI64ShrU) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (Rsh64Ux64 x (I64Const [c])) @@ -2891,10 +2736,9 @@ func rewriteValueWasm_OpRsh64Ux64(v *Value) bool { break } v.reset(OpWasmI64ShrU) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64) v0.AuxInt = c - v.AddArg(v0) + v.AddArg2(x, v0) return true } // match: (Rsh64Ux64 x (I64Const [c])) @@ -2919,18 +2763,14 @@ func rewriteValueWasm_OpRsh64Ux64(v *Value) bool { y := v_1 v.reset(OpWasmSelect) v0 := b.NewValue0(v.Pos, OpWasmI64ShrU, typ.Int64) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) + v0.AddArg2(x, y) v1 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64) v1.AuxInt = 0 - v.AddArg(v1) v2 := b.NewValue0(v.Pos, OpWasmI64LtU, typ.Bool) - v2.AddArg(y) v3 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64) v3.AuxInt = 64 - v2.AddArg(v3) - v.AddArg(v2) + v2.AddArg2(y, v3) + v.AddArg3(v0, v1, v2) return true } } @@ -2947,10 +2787,9 @@ func rewriteValueWasm_OpRsh64Ux8(v *Value) bool { y := v_1 v.reset(OpRsh64Ux64) v.AuxInt = c - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) v0.AddArg(y) - v.AddArg(v0) + v.AddArg2(x, v0) return true } } @@ -2967,10 +2806,9 @@ func rewriteValueWasm_OpRsh64x16(v *Value) bool { y := v_1 v.reset(OpRsh64x64) v.AuxInt = c - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) v0.AddArg(y) - v.AddArg(v0) + v.AddArg2(x, v0) return true } } @@ -2987,10 +2825,9 @@ func rewriteValueWasm_OpRsh64x32(v *Value) bool { y := v_1 v.reset(OpRsh64x64) v.AuxInt = c - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) v0.AddArg(y) - v.AddArg(v0) + v.AddArg2(x, v0) return true } } @@ -3009,8 +2846,7 @@ func rewriteValueWasm_OpRsh64x64(v *Value) bool { break } v.reset(OpWasmI64ShrS) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (Rsh64x64 x (I64Const [c])) @@ -3026,10 +2862,9 @@ func rewriteValueWasm_OpRsh64x64(v *Value) bool { break } v.reset(OpWasmI64ShrS) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64) v0.AuxInt = c - v.AddArg(v0) + v.AddArg2(x, v0) return true } // match: (Rsh64x64 x (I64Const [c])) @@ -3045,10 +2880,9 @@ func rewriteValueWasm_OpRsh64x64(v *Value) bool { break } v.reset(OpWasmI64ShrS) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64) v0.AuxInt = 63 - v.AddArg(v0) + v.AddArg2(x, v0) return true } // match: (Rsh64x64 x y) @@ -3057,19 +2891,15 @@ func rewriteValueWasm_OpRsh64x64(v *Value) bool { x := v_0 y := v_1 v.reset(OpWasmI64ShrS) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpWasmSelect, typ.Int64) - v0.AddArg(y) v1 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64) v1.AuxInt = 63 - v0.AddArg(v1) v2 := b.NewValue0(v.Pos, OpWasmI64LtU, typ.Bool) - v2.AddArg(y) v3 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64) v3.AuxInt = 64 - v2.AddArg(v3) - v0.AddArg(v2) - v.AddArg(v0) + v2.AddArg2(y, v3) + v0.AddArg3(y, v1, v2) + v.AddArg2(x, v0) return true } } @@ -3086,10 +2916,9 @@ func rewriteValueWasm_OpRsh64x8(v *Value) bool { y := v_1 v.reset(OpRsh64x64) v.AuxInt = c - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) v0.AddArg(y) - v.AddArg(v0) + v.AddArg2(x, v0) return true } } @@ -3108,10 +2937,9 @@ func rewriteValueWasm_OpRsh8Ux16(v *Value) bool { v.AuxInt = c v0 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) v1.AddArg(y) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } } @@ -3130,10 +2958,9 @@ func rewriteValueWasm_OpRsh8Ux32(v *Value) bool { v.AuxInt = c v0 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) v1.AddArg(y) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } } @@ -3152,8 +2979,7 @@ func rewriteValueWasm_OpRsh8Ux64(v *Value) bool { v.AuxInt = c v0 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) v0.AddArg(x) - v.AddArg(v0) - v.AddArg(y) + v.AddArg2(v0, y) return true } } @@ -3172,10 +2998,9 @@ func rewriteValueWasm_OpRsh8Ux8(v *Value) bool { v.AuxInt = c v0 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) v1.AddArg(y) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } } @@ -3194,10 +3019,9 @@ func rewriteValueWasm_OpRsh8x16(v *Value) bool { v.AuxInt = c v0 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) v1.AddArg(y) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } } @@ -3216,10 +3040,9 @@ func rewriteValueWasm_OpRsh8x32(v *Value) bool { v.AuxInt = c v0 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) v1.AddArg(y) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } } @@ -3238,8 +3061,7 @@ func rewriteValueWasm_OpRsh8x64(v *Value) bool { v.AuxInt = c v0 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64) v0.AddArg(x) - v.AddArg(v0) - v.AddArg(y) + v.AddArg2(v0, y) return true } } @@ -3258,10 +3080,9 @@ func rewriteValueWasm_OpRsh8x8(v *Value) bool { v.AuxInt = c v0 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) v1.AddArg(y) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } } @@ -3276,10 +3097,7 @@ func rewriteValueWasm_OpSignExt16to32(v *Value) bool { if x.Op != OpWasmI64Load16S { break } - _ = x.Args[1] - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (SignExt16to32 x) @@ -3300,14 +3118,12 @@ func rewriteValueWasm_OpSignExt16to32(v *Value) bool { x := v_0 v.reset(OpWasmI64ShrS) v0 := b.NewValue0(v.Pos, OpWasmI64Shl, typ.Int64) - v0.AddArg(x) v1 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64) v1.AuxInt = 48 - v0.AddArg(v1) - v.AddArg(v0) + v0.AddArg2(x, v1) v2 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64) v2.AuxInt = 48 - v.AddArg(v2) + v.AddArg2(v0, v2) return true } } @@ -3322,10 +3138,7 @@ func rewriteValueWasm_OpSignExt16to64(v *Value) bool { if x.Op != OpWasmI64Load16S { break } - _ = x.Args[1] - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (SignExt16to64 x) @@ -3346,14 +3159,12 @@ func rewriteValueWasm_OpSignExt16to64(v *Value) bool { x := v_0 v.reset(OpWasmI64ShrS) v0 := b.NewValue0(v.Pos, OpWasmI64Shl, typ.Int64) - v0.AddArg(x) v1 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64) v1.AuxInt = 48 - v0.AddArg(v1) - v.AddArg(v0) + v0.AddArg2(x, v1) v2 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64) v2.AuxInt = 48 - v.AddArg(v2) + v.AddArg2(v0, v2) return true } } @@ -3368,10 +3179,7 @@ func rewriteValueWasm_OpSignExt32to64(v *Value) bool { if x.Op != OpWasmI64Load32S { break } - _ = x.Args[1] - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (SignExt32to64 x) @@ -3392,14 +3200,12 @@ func rewriteValueWasm_OpSignExt32to64(v *Value) bool { x := v_0 v.reset(OpWasmI64ShrS) v0 := b.NewValue0(v.Pos, OpWasmI64Shl, typ.Int64) - v0.AddArg(x) v1 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64) v1.AuxInt = 32 - v0.AddArg(v1) - v.AddArg(v0) + v0.AddArg2(x, v1) v2 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64) v2.AuxInt = 32 - v.AddArg(v2) + v.AddArg2(v0, v2) return true } } @@ -3414,10 +3220,7 @@ func rewriteValueWasm_OpSignExt8to16(v *Value) bool { if x.Op != OpWasmI64Load8S { break } - _ = x.Args[1] - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (SignExt8to16 x) @@ -3438,14 +3241,12 @@ func rewriteValueWasm_OpSignExt8to16(v *Value) bool { x := v_0 v.reset(OpWasmI64ShrS) v0 := b.NewValue0(v.Pos, OpWasmI64Shl, typ.Int64) - v0.AddArg(x) v1 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64) v1.AuxInt = 56 - v0.AddArg(v1) - v.AddArg(v0) + v0.AddArg2(x, v1) v2 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64) v2.AuxInt = 56 - v.AddArg(v2) + v.AddArg2(v0, v2) return true } } @@ -3460,10 +3261,7 @@ func rewriteValueWasm_OpSignExt8to32(v *Value) bool { if x.Op != OpWasmI64Load8S { break } - _ = x.Args[1] - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (SignExt8to32 x) @@ -3484,14 +3282,12 @@ func rewriteValueWasm_OpSignExt8to32(v *Value) bool { x := v_0 v.reset(OpWasmI64ShrS) v0 := b.NewValue0(v.Pos, OpWasmI64Shl, typ.Int64) - v0.AddArg(x) v1 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64) v1.AuxInt = 56 - v0.AddArg(v1) - v.AddArg(v0) + v0.AddArg2(x, v1) v2 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64) v2.AuxInt = 56 - v.AddArg(v2) + v.AddArg2(v0, v2) return true } } @@ -3506,10 +3302,7 @@ func rewriteValueWasm_OpSignExt8to64(v *Value) bool { if x.Op != OpWasmI64Load8S { break } - _ = x.Args[1] - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (SignExt8to64 x) @@ -3530,14 +3323,12 @@ func rewriteValueWasm_OpSignExt8to64(v *Value) bool { x := v_0 v.reset(OpWasmI64ShrS) v0 := b.NewValue0(v.Pos, OpWasmI64Shl, typ.Int64) - v0.AddArg(x) v1 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64) v1.AuxInt = 56 - v0.AddArg(v1) - v.AddArg(v0) + v0.AddArg2(x, v1) v2 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64) v2.AuxInt = 56 - v.AddArg(v2) + v.AddArg2(v0, v2) return true } } @@ -3553,12 +3344,10 @@ func rewriteValueWasm_OpSlicemask(v *Value) bool { v0 := b.NewValue0(v.Pos, OpWasmI64Sub, typ.Int64) v1 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64) v1.AuxInt = 0 - v0.AddArg(v1) - v0.AddArg(x) - v.AddArg(v0) + v0.AddArg2(v1, x) v2 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64) v2.AuxInt = 63 - v.AddArg(v2) + v.AddArg2(v0, v2) return true } } @@ -3578,9 +3367,7 @@ func rewriteValueWasm_OpStore(v *Value) bool { break } v.reset(OpWasmF64Store) - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } // match: (Store {t} ptr val mem) @@ -3595,9 +3382,7 @@ func rewriteValueWasm_OpStore(v *Value) bool { break } v.reset(OpWasmF32Store) - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } // match: (Store {t} ptr val mem) @@ -3612,9 +3397,7 @@ func rewriteValueWasm_OpStore(v *Value) bool { break } v.reset(OpWasmI64Store) - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } // match: (Store {t} ptr val mem) @@ -3629,9 +3412,7 @@ func rewriteValueWasm_OpStore(v *Value) bool { break } v.reset(OpWasmI64Store32) - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } // match: (Store {t} ptr val mem) @@ -3646,9 +3427,7 @@ func rewriteValueWasm_OpStore(v *Value) bool { break } v.reset(OpWasmI64Store16) - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } // match: (Store {t} ptr val mem) @@ -3663,9 +3442,7 @@ func rewriteValueWasm_OpStore(v *Value) bool { break } v.reset(OpWasmI64Store8) - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } return false @@ -3691,6 +3468,7 @@ func rewriteValueWasm_OpWasmF64Add(v *Value) bool { return true } // match: (F64Add (F64Const [x]) y) + // cond: y.Op != OpWasmF64Const // result: (F64Add y (F64Const [x])) for { if v_0.Op != OpWasmF64Const { @@ -3698,11 +3476,13 @@ func rewriteValueWasm_OpWasmF64Add(v *Value) bool { } x := v_0.AuxInt y := v_1 + if !(y.Op != OpWasmF64Const) { + break + } v.reset(OpWasmF64Add) - v.AddArg(y) v0 := b.NewValue0(v.Pos, OpWasmF64Const, typ.Float64) v0.AuxInt = x - v.AddArg(v0) + v.AddArg2(y, v0) return true } return false @@ -3713,6 +3493,7 @@ func rewriteValueWasm_OpWasmF64Mul(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (F64Mul (F64Const [x]) (F64Const [y])) + // cond: !math.IsNaN(auxTo64F(x) * auxTo64F(y)) // result: (F64Const [auxFrom64F(auxTo64F(x) * auxTo64F(y))]) for { if v_0.Op != OpWasmF64Const { @@ -3723,11 +3504,15 @@ func rewriteValueWasm_OpWasmF64Mul(v *Value) bool { break } y := v_1.AuxInt + if !(!math.IsNaN(auxTo64F(x) * auxTo64F(y))) { + break + } v.reset(OpWasmF64Const) v.AuxInt = auxFrom64F(auxTo64F(x) * auxTo64F(y)) return true } // match: (F64Mul (F64Const [x]) y) + // cond: y.Op != OpWasmF64Const // result: (F64Mul y (F64Const [x])) for { if v_0.Op != OpWasmF64Const { @@ -3735,11 +3520,13 @@ func rewriteValueWasm_OpWasmF64Mul(v *Value) bool { } x := v_0.AuxInt y := v_1 + if !(y.Op != OpWasmF64Const) { + break + } v.reset(OpWasmF64Mul) - v.AddArg(y) v0 := b.NewValue0(v.Pos, OpWasmF64Const, typ.Float64) v0.AuxInt = x - v.AddArg(v0) + v.AddArg2(y, v0) return true } return false @@ -3765,6 +3552,7 @@ func rewriteValueWasm_OpWasmI64Add(v *Value) bool { return true } // match: (I64Add (I64Const [x]) y) + // cond: y.Op != OpWasmI64Const // result: (I64Add y (I64Const [x])) for { if v_0.Op != OpWasmI64Const { @@ -3772,11 +3560,13 @@ func rewriteValueWasm_OpWasmI64Add(v *Value) bool { } x := v_0.AuxInt y := v_1 + if !(y.Op != OpWasmI64Const) { + break + } v.reset(OpWasmI64Add) - v.AddArg(y) v0 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64) v0.AuxInt = x - v.AddArg(v0) + v.AddArg2(y, v0) return true } // match: (I64Add x (I64Const [y])) @@ -3803,9 +3593,7 @@ func rewriteValueWasm_OpWasmI64AddConst(v *Value) bool { break } x := v_0 - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (I64AddConst [off] (LoweredAddr {sym} [off2] base)) @@ -3851,6 +3639,7 @@ func rewriteValueWasm_OpWasmI64And(v *Value) bool { return true } // match: (I64And (I64Const [x]) y) + // cond: y.Op != OpWasmI64Const // result: (I64And y (I64Const [x])) for { if v_0.Op != OpWasmI64Const { @@ -3858,11 +3647,13 @@ func rewriteValueWasm_OpWasmI64And(v *Value) bool { } x := v_0.AuxInt y := v_1 + if !(y.Op != OpWasmI64Const) { + break + } v.reset(OpWasmI64And) - v.AddArg(y) v0 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64) v0.AuxInt = x - v.AddArg(v0) + v.AddArg2(y, v0) return true } return false @@ -3911,6 +3702,7 @@ func rewriteValueWasm_OpWasmI64Eq(v *Value) bool { return true } // match: (I64Eq (I64Const [x]) y) + // cond: y.Op != OpWasmI64Const // result: (I64Eq y (I64Const [x])) for { if v_0.Op != OpWasmI64Const { @@ -3918,11 +3710,13 @@ func rewriteValueWasm_OpWasmI64Eq(v *Value) bool { } x := v_0.AuxInt y := v_1 + if !(y.Op != OpWasmI64Const) { + break + } v.reset(OpWasmI64Eq) - v.AddArg(y) v0 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64) v0.AuxInt = x - v.AddArg(v0) + v.AddArg2(y, v0) return true } // match: (I64Eq x (I64Const [0])) @@ -3978,8 +3772,7 @@ func rewriteValueWasm_OpWasmI64Load(v *Value) bool { } v.reset(OpWasmI64Load) v.AuxInt = off + off2 - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (I64Load [off] (LoweredAddr {sym} [off2] (SB)) _) @@ -4021,8 +3814,7 @@ func rewriteValueWasm_OpWasmI64Load16S(v *Value) bool { } v.reset(OpWasmI64Load16S) v.AuxInt = off + off2 - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } return false @@ -4048,8 +3840,7 @@ func rewriteValueWasm_OpWasmI64Load16U(v *Value) bool { } v.reset(OpWasmI64Load16U) v.AuxInt = off + off2 - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (I64Load16U [off] (LoweredAddr {sym} [off2] (SB)) _) @@ -4091,8 +3882,7 @@ func rewriteValueWasm_OpWasmI64Load32S(v *Value) bool { } v.reset(OpWasmI64Load32S) v.AuxInt = off + off2 - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } return false @@ -4118,8 +3908,7 @@ func rewriteValueWasm_OpWasmI64Load32U(v *Value) bool { } v.reset(OpWasmI64Load32U) v.AuxInt = off + off2 - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (I64Load32U [off] (LoweredAddr {sym} [off2] (SB)) _) @@ -4161,8 +3950,7 @@ func rewriteValueWasm_OpWasmI64Load8S(v *Value) bool { } v.reset(OpWasmI64Load8S) v.AuxInt = off + off2 - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } return false @@ -4186,8 +3974,7 @@ func rewriteValueWasm_OpWasmI64Load8U(v *Value) bool { } v.reset(OpWasmI64Load8U) v.AuxInt = off + off2 - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (I64Load8U [off] (LoweredAddr {sym} [off2] (SB)) _) @@ -4231,6 +4018,7 @@ func rewriteValueWasm_OpWasmI64Mul(v *Value) bool { return true } // match: (I64Mul (I64Const [x]) y) + // cond: y.Op != OpWasmI64Const // result: (I64Mul y (I64Const [x])) for { if v_0.Op != OpWasmI64Const { @@ -4238,11 +4026,13 @@ func rewriteValueWasm_OpWasmI64Mul(v *Value) bool { } x := v_0.AuxInt y := v_1 + if !(y.Op != OpWasmI64Const) { + break + } v.reset(OpWasmI64Mul) - v.AddArg(y) v0 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64) v0.AuxInt = x - v.AddArg(v0) + v.AddArg2(y, v0) return true } return false @@ -4291,6 +4081,7 @@ func rewriteValueWasm_OpWasmI64Ne(v *Value) bool { return true } // match: (I64Ne (I64Const [x]) y) + // cond: y.Op != OpWasmI64Const // result: (I64Ne y (I64Const [x])) for { if v_0.Op != OpWasmI64Const { @@ -4298,11 +4089,13 @@ func rewriteValueWasm_OpWasmI64Ne(v *Value) bool { } x := v_0.AuxInt y := v_1 + if !(y.Op != OpWasmI64Const) { + break + } v.reset(OpWasmI64Ne) - v.AddArg(y) v0 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64) v0.AuxInt = x - v.AddArg(v0) + v.AddArg2(y, v0) return true } // match: (I64Ne x (I64Const [0])) @@ -4341,6 +4134,7 @@ func rewriteValueWasm_OpWasmI64Or(v *Value) bool { return true } // match: (I64Or (I64Const [x]) y) + // cond: y.Op != OpWasmI64Const // result: (I64Or y (I64Const [x])) for { if v_0.Op != OpWasmI64Const { @@ -4348,11 +4142,13 @@ func rewriteValueWasm_OpWasmI64Or(v *Value) bool { } x := v_0.AuxInt y := v_1 + if !(y.Op != OpWasmI64Const) { + break + } v.reset(OpWasmI64Or) - v.AddArg(y) v0 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64) v0.AuxInt = x - v.AddArg(v0) + v.AddArg2(y, v0) return true } return false @@ -4438,9 +4234,7 @@ func rewriteValueWasm_OpWasmI64Store(v *Value) bool { } v.reset(OpWasmI64Store) v.AuxInt = off + off2 - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } return false @@ -4466,9 +4260,7 @@ func rewriteValueWasm_OpWasmI64Store16(v *Value) bool { } v.reset(OpWasmI64Store16) v.AuxInt = off + off2 - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } return false @@ -4494,9 +4286,7 @@ func rewriteValueWasm_OpWasmI64Store32(v *Value) bool { } v.reset(OpWasmI64Store32) v.AuxInt = off + off2 - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } return false @@ -4522,9 +4312,7 @@ func rewriteValueWasm_OpWasmI64Store8(v *Value) bool { } v.reset(OpWasmI64Store8) v.AuxInt = off + off2 - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } return false @@ -4550,6 +4338,7 @@ func rewriteValueWasm_OpWasmI64Xor(v *Value) bool { return true } // match: (I64Xor (I64Const [x]) y) + // cond: y.Op != OpWasmI64Const // result: (I64Xor y (I64Const [x])) for { if v_0.Op != OpWasmI64Const { @@ -4557,11 +4346,13 @@ func rewriteValueWasm_OpWasmI64Xor(v *Value) bool { } x := v_0.AuxInt y := v_1 + if !(y.Op != OpWasmI64Const) { + break + } v.reset(OpWasmI64Xor) - v.AddArg(y) v0 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64) v0.AuxInt = x - v.AddArg(v0) + v.AddArg2(y, v0) return true } return false @@ -4578,9 +4369,7 @@ func rewriteValueWasm_OpZero(v *Value) bool { break } mem := v_1 - v.reset(OpCopy) - v.Type = mem.Type - v.AddArg(mem) + v.copyOf(mem) return true } // match: (Zero [1] destptr mem) @@ -4592,11 +4381,9 @@ func rewriteValueWasm_OpZero(v *Value) bool { destptr := v_0 mem := v_1 v.reset(OpWasmI64Store8) - v.AddArg(destptr) v0 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64) v0.AuxInt = 0 - v.AddArg(v0) - v.AddArg(mem) + v.AddArg3(destptr, v0, mem) return true } // match: (Zero [2] destptr mem) @@ -4608,11 +4395,9 @@ func rewriteValueWasm_OpZero(v *Value) bool { destptr := v_0 mem := v_1 v.reset(OpWasmI64Store16) - v.AddArg(destptr) v0 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64) v0.AuxInt = 0 - v.AddArg(v0) - v.AddArg(mem) + v.AddArg3(destptr, v0, mem) return true } // match: (Zero [4] destptr mem) @@ -4624,11 +4409,9 @@ func rewriteValueWasm_OpZero(v *Value) bool { destptr := v_0 mem := v_1 v.reset(OpWasmI64Store32) - v.AddArg(destptr) v0 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64) v0.AuxInt = 0 - v.AddArg(v0) - v.AddArg(mem) + v.AddArg3(destptr, v0, mem) return true } // match: (Zero [8] destptr mem) @@ -4640,11 +4423,9 @@ func rewriteValueWasm_OpZero(v *Value) bool { destptr := v_0 mem := v_1 v.reset(OpWasmI64Store) - v.AddArg(destptr) v0 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64) v0.AuxInt = 0 - v.AddArg(v0) - v.AddArg(mem) + v.AddArg3(destptr, v0, mem) return true } // match: (Zero [3] destptr mem) @@ -4657,17 +4438,13 @@ func rewriteValueWasm_OpZero(v *Value) bool { mem := v_1 v.reset(OpWasmI64Store8) v.AuxInt = 2 - v.AddArg(destptr) v0 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64) v0.AuxInt = 0 - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpWasmI64Store16, types.TypeMem) - v1.AddArg(destptr) v2 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64) v2.AuxInt = 0 - v1.AddArg(v2) - v1.AddArg(mem) - v.AddArg(v1) + v1.AddArg3(destptr, v2, mem) + v.AddArg3(destptr, v0, v1) return true } // match: (Zero [5] destptr mem) @@ -4680,17 +4457,13 @@ func rewriteValueWasm_OpZero(v *Value) bool { mem := v_1 v.reset(OpWasmI64Store8) v.AuxInt = 4 - v.AddArg(destptr) v0 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64) v0.AuxInt = 0 - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpWasmI64Store32, types.TypeMem) - v1.AddArg(destptr) v2 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64) v2.AuxInt = 0 - v1.AddArg(v2) - v1.AddArg(mem) - v.AddArg(v1) + v1.AddArg3(destptr, v2, mem) + v.AddArg3(destptr, v0, v1) return true } // match: (Zero [6] destptr mem) @@ -4703,17 +4476,13 @@ func rewriteValueWasm_OpZero(v *Value) bool { mem := v_1 v.reset(OpWasmI64Store16) v.AuxInt = 4 - v.AddArg(destptr) v0 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64) v0.AuxInt = 0 - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpWasmI64Store32, types.TypeMem) - v1.AddArg(destptr) v2 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64) v2.AuxInt = 0 - v1.AddArg(v2) - v1.AddArg(mem) - v.AddArg(v1) + v1.AddArg3(destptr, v2, mem) + v.AddArg3(destptr, v0, v1) return true } // match: (Zero [7] destptr mem) @@ -4726,17 +4495,13 @@ func rewriteValueWasm_OpZero(v *Value) bool { mem := v_1 v.reset(OpWasmI64Store32) v.AuxInt = 3 - v.AddArg(destptr) v0 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64) v0.AuxInt = 0 - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpWasmI64Store32, types.TypeMem) - v1.AddArg(destptr) v2 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64) v2.AuxInt = 0 - v1.AddArg(v2) - v1.AddArg(mem) - v.AddArg(v1) + v1.AddArg3(destptr, v2, mem) + v.AddArg3(destptr, v0, v1) return true } // match: (Zero [s] destptr mem) @@ -4754,14 +4519,11 @@ func rewriteValueWasm_OpZero(v *Value) bool { v0 := b.NewValue0(v.Pos, OpOffPtr, destptr.Type) v0.AuxInt = s % 8 v0.AddArg(destptr) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpWasmI64Store, types.TypeMem) - v1.AddArg(destptr) v2 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64) v2.AuxInt = 0 - v1.AddArg(v2) - v1.AddArg(mem) - v.AddArg(v1) + v1.AddArg3(destptr, v2, mem) + v.AddArg2(v0, v1) return true } // match: (Zero [16] destptr mem) @@ -4774,17 +4536,13 @@ func rewriteValueWasm_OpZero(v *Value) bool { mem := v_1 v.reset(OpWasmI64Store) v.AuxInt = 8 - v.AddArg(destptr) v0 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64) v0.AuxInt = 0 - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpWasmI64Store, types.TypeMem) - v1.AddArg(destptr) v2 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64) v2.AuxInt = 0 - v1.AddArg(v2) - v1.AddArg(mem) - v.AddArg(v1) + v1.AddArg3(destptr, v2, mem) + v.AddArg3(destptr, v0, v1) return true } // match: (Zero [24] destptr mem) @@ -4797,24 +4555,18 @@ func rewriteValueWasm_OpZero(v *Value) bool { mem := v_1 v.reset(OpWasmI64Store) v.AuxInt = 16 - v.AddArg(destptr) v0 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64) v0.AuxInt = 0 - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpWasmI64Store, types.TypeMem) v1.AuxInt = 8 - v1.AddArg(destptr) v2 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64) v2.AuxInt = 0 - v1.AddArg(v2) v3 := b.NewValue0(v.Pos, OpWasmI64Store, types.TypeMem) - v3.AddArg(destptr) v4 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64) v4.AuxInt = 0 - v3.AddArg(v4) - v3.AddArg(mem) - v1.AddArg(v3) - v.AddArg(v1) + v3.AddArg3(destptr, v4, mem) + v1.AddArg3(destptr, v2, v3) + v.AddArg3(destptr, v0, v1) return true } // match: (Zero [32] destptr mem) @@ -4827,31 +4579,23 @@ func rewriteValueWasm_OpZero(v *Value) bool { mem := v_1 v.reset(OpWasmI64Store) v.AuxInt = 24 - v.AddArg(destptr) v0 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64) v0.AuxInt = 0 - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpWasmI64Store, types.TypeMem) v1.AuxInt = 16 - v1.AddArg(destptr) v2 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64) v2.AuxInt = 0 - v1.AddArg(v2) v3 := b.NewValue0(v.Pos, OpWasmI64Store, types.TypeMem) v3.AuxInt = 8 - v3.AddArg(destptr) v4 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64) v4.AuxInt = 0 - v3.AddArg(v4) v5 := b.NewValue0(v.Pos, OpWasmI64Store, types.TypeMem) - v5.AddArg(destptr) v6 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64) v6.AuxInt = 0 - v5.AddArg(v6) - v5.AddArg(mem) - v3.AddArg(v5) - v1.AddArg(v3) - v.AddArg(v1) + v5.AddArg3(destptr, v6, mem) + v3.AddArg3(destptr, v4, v5) + v1.AddArg3(destptr, v2, v3) + v.AddArg3(destptr, v0, v1) return true } // match: (Zero [s] destptr mem) @@ -4866,8 +4610,7 @@ func rewriteValueWasm_OpZero(v *Value) bool { } v.reset(OpWasmLoweredZero) v.AuxInt = s / 8 - v.AddArg(destptr) - v.AddArg(mem) + v.AddArg2(destptr, mem) return true } return false @@ -4883,10 +4626,7 @@ func rewriteValueWasm_OpZeroExt16to32(v *Value) bool { if x.Op != OpWasmI64Load16U { break } - _ = x.Args[1] - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (ZeroExt16to32 x) @@ -4894,10 +4634,9 @@ func rewriteValueWasm_OpZeroExt16to32(v *Value) bool { for { x := v_0 v.reset(OpWasmI64And) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64) v0.AuxInt = 0xffff - v.AddArg(v0) + v.AddArg2(x, v0) return true } } @@ -4912,10 +4651,7 @@ func rewriteValueWasm_OpZeroExt16to64(v *Value) bool { if x.Op != OpWasmI64Load16U { break } - _ = x.Args[1] - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (ZeroExt16to64 x) @@ -4923,10 +4659,9 @@ func rewriteValueWasm_OpZeroExt16to64(v *Value) bool { for { x := v_0 v.reset(OpWasmI64And) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64) v0.AuxInt = 0xffff - v.AddArg(v0) + v.AddArg2(x, v0) return true } } @@ -4941,10 +4676,7 @@ func rewriteValueWasm_OpZeroExt32to64(v *Value) bool { if x.Op != OpWasmI64Load32U { break } - _ = x.Args[1] - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (ZeroExt32to64 x) @@ -4952,10 +4684,9 @@ func rewriteValueWasm_OpZeroExt32to64(v *Value) bool { for { x := v_0 v.reset(OpWasmI64And) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64) v0.AuxInt = 0xffffffff - v.AddArg(v0) + v.AddArg2(x, v0) return true } } @@ -4970,10 +4701,7 @@ func rewriteValueWasm_OpZeroExt8to16(v *Value) bool { if x.Op != OpWasmI64Load8U { break } - _ = x.Args[1] - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (ZeroExt8to16 x) @@ -4981,10 +4709,9 @@ func rewriteValueWasm_OpZeroExt8to16(v *Value) bool { for { x := v_0 v.reset(OpWasmI64And) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64) v0.AuxInt = 0xff - v.AddArg(v0) + v.AddArg2(x, v0) return true } } @@ -4999,10 +4726,7 @@ func rewriteValueWasm_OpZeroExt8to32(v *Value) bool { if x.Op != OpWasmI64Load8U { break } - _ = x.Args[1] - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (ZeroExt8to32 x) @@ -5010,10 +4734,9 @@ func rewriteValueWasm_OpZeroExt8to32(v *Value) bool { for { x := v_0 v.reset(OpWasmI64And) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64) v0.AuxInt = 0xff - v.AddArg(v0) + v.AddArg2(x, v0) return true } } @@ -5028,10 +4751,7 @@ func rewriteValueWasm_OpZeroExt8to64(v *Value) bool { if x.Op != OpWasmI64Load8U { break } - _ = x.Args[1] - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (ZeroExt8to64 x) @@ -5039,10 +4759,9 @@ func rewriteValueWasm_OpZeroExt8to64(v *Value) bool { for { x := v_0 v.reset(OpWasmI64And) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64) v0.AuxInt = 0xff - v.AddArg(v0) + v.AddArg2(x, v0) return true } } diff --git a/src/cmd/compile/internal/ssa/rewritedec.go b/src/cmd/compile/internal/ssa/rewritedec.go index 9393cade62..1d7979f5c9 100644 --- a/src/cmd/compile/internal/ssa/rewritedec.go +++ b/src/cmd/compile/internal/ssa/rewritedec.go @@ -41,9 +41,7 @@ func rewriteValuedec_OpComplexImag(v *Value) bool { break } imag := v_0.Args[1] - v.reset(OpCopy) - v.Type = imag.Type - v.AddArg(imag) + v.copyOf(imag) return true } return false @@ -56,11 +54,8 @@ func rewriteValuedec_OpComplexReal(v *Value) bool { if v_0.Op != OpComplexMake { break } - _ = v_0.Args[1] real := v_0.Args[0] - v.reset(OpCopy) - v.Type = real.Type - v.AddArg(real) + v.copyOf(real) return true } return false @@ -74,9 +69,7 @@ func rewriteValuedec_OpIData(v *Value) bool { break } data := v_0.Args[1] - v.reset(OpCopy) - v.Type = data.Type - v.AddArg(data) + v.copyOf(data) return true } return false @@ -89,11 +82,8 @@ func rewriteValuedec_OpITab(v *Value) bool { if v_0.Op != OpIMake { break } - _ = v_0.Args[1] itab := v_0.Args[0] - v.reset(OpCopy) - v.Type = itab.Type - v.AddArg(itab) + v.copyOf(itab) return true } return false @@ -116,16 +106,13 @@ func rewriteValuedec_OpLoad(v *Value) bool { } v.reset(OpComplexMake) v0 := b.NewValue0(v.Pos, OpLoad, typ.Float32) - v0.AddArg(ptr) - v0.AddArg(mem) - v.AddArg(v0) + v0.AddArg2(ptr, mem) v1 := b.NewValue0(v.Pos, OpLoad, typ.Float32) v2 := b.NewValue0(v.Pos, OpOffPtr, typ.Float32Ptr) v2.AuxInt = 4 v2.AddArg(ptr) - v1.AddArg(v2) - v1.AddArg(mem) - v.AddArg(v1) + v1.AddArg2(v2, mem) + v.AddArg2(v0, v1) return true } // match: (Load ptr mem) @@ -140,16 +127,13 @@ func rewriteValuedec_OpLoad(v *Value) bool { } v.reset(OpComplexMake) v0 := b.NewValue0(v.Pos, OpLoad, typ.Float64) - v0.AddArg(ptr) - v0.AddArg(mem) - v.AddArg(v0) + v0.AddArg2(ptr, mem) v1 := b.NewValue0(v.Pos, OpLoad, typ.Float64) v2 := b.NewValue0(v.Pos, OpOffPtr, typ.Float64Ptr) v2.AuxInt = 8 v2.AddArg(ptr) - v1.AddArg(v2) - v1.AddArg(mem) - v.AddArg(v1) + v1.AddArg2(v2, mem) + v.AddArg2(v0, v1) return true } // match: (Load ptr mem) @@ -164,16 +148,13 @@ func rewriteValuedec_OpLoad(v *Value) bool { } v.reset(OpStringMake) v0 := b.NewValue0(v.Pos, OpLoad, typ.BytePtr) - v0.AddArg(ptr) - v0.AddArg(mem) - v.AddArg(v0) + v0.AddArg2(ptr, mem) v1 := b.NewValue0(v.Pos, OpLoad, typ.Int) v2 := b.NewValue0(v.Pos, OpOffPtr, typ.IntPtr) v2.AuxInt = config.PtrSize v2.AddArg(ptr) - v1.AddArg(v2) - v1.AddArg(mem) - v.AddArg(v1) + v1.AddArg2(v2, mem) + v.AddArg2(v0, v1) return true } // match: (Load ptr mem) @@ -188,23 +169,18 @@ func rewriteValuedec_OpLoad(v *Value) bool { } v.reset(OpSliceMake) v0 := b.NewValue0(v.Pos, OpLoad, t.Elem().PtrTo()) - v0.AddArg(ptr) - v0.AddArg(mem) - v.AddArg(v0) + v0.AddArg2(ptr, mem) v1 := b.NewValue0(v.Pos, OpLoad, typ.Int) v2 := b.NewValue0(v.Pos, OpOffPtr, typ.IntPtr) v2.AuxInt = config.PtrSize v2.AddArg(ptr) - v1.AddArg(v2) - v1.AddArg(mem) - v.AddArg(v1) + v1.AddArg2(v2, mem) v3 := b.NewValue0(v.Pos, OpLoad, typ.Int) v4 := b.NewValue0(v.Pos, OpOffPtr, typ.IntPtr) v4.AuxInt = 2 * config.PtrSize v4.AddArg(ptr) - v3.AddArg(v4) - v3.AddArg(mem) - v.AddArg(v3) + v3.AddArg2(v4, mem) + v.AddArg3(v0, v1, v3) return true } // match: (Load ptr mem) @@ -219,16 +195,13 @@ func rewriteValuedec_OpLoad(v *Value) bool { } v.reset(OpIMake) v0 := b.NewValue0(v.Pos, OpLoad, typ.Uintptr) - v0.AddArg(ptr) - v0.AddArg(mem) - v.AddArg(v0) + v0.AddArg2(ptr, mem) v1 := b.NewValue0(v.Pos, OpLoad, typ.BytePtr) v2 := b.NewValue0(v.Pos, OpOffPtr, typ.BytePtrPtr) v2.AuxInt = config.PtrSize v2.AddArg(ptr) - v1.AddArg(v2) - v1.AddArg(mem) - v.AddArg(v1) + v1.AddArg2(v2, mem) + v.AddArg2(v0, v1) return true } return false @@ -242,9 +215,7 @@ func rewriteValuedec_OpSliceCap(v *Value) bool { break } cap := v_0.Args[2] - v.reset(OpCopy) - v.Type = cap.Type - v.AddArg(cap) + v.copyOf(cap) return true } return false @@ -257,11 +228,8 @@ func rewriteValuedec_OpSliceLen(v *Value) bool { if v_0.Op != OpSliceMake { break } - _ = v_0.Args[2] len := v_0.Args[1] - v.reset(OpCopy) - v.Type = len.Type - v.AddArg(len) + v.copyOf(len) return true } return false @@ -274,11 +242,8 @@ func rewriteValuedec_OpSlicePtr(v *Value) bool { if v_0.Op != OpSliceMake { break } - _ = v_0.Args[2] ptr := v_0.Args[0] - v.reset(OpCopy) - v.Type = ptr.Type - v.AddArg(ptr) + v.copyOf(ptr) return true } return false @@ -310,14 +275,10 @@ func rewriteValuedec_OpStore(v *Value) bool { v0 := b.NewValue0(v.Pos, OpOffPtr, typ.Float32Ptr) v0.AuxInt = 4 v0.AddArg(dst) - v.AddArg(v0) - v.AddArg(imag) v1 := b.NewValue0(v.Pos, OpStore, types.TypeMem) v1.Aux = typ.Float32 - v1.AddArg(dst) - v1.AddArg(real) - v1.AddArg(mem) - v.AddArg(v1) + v1.AddArg3(dst, real, mem) + v.AddArg3(v0, imag, v1) return true } // match: (Store {t} dst (ComplexMake real imag) mem) @@ -340,14 +301,10 @@ func rewriteValuedec_OpStore(v *Value) bool { v0 := b.NewValue0(v.Pos, OpOffPtr, typ.Float64Ptr) v0.AuxInt = 8 v0.AddArg(dst) - v.AddArg(v0) - v.AddArg(imag) v1 := b.NewValue0(v.Pos, OpStore, types.TypeMem) v1.Aux = typ.Float64 - v1.AddArg(dst) - v1.AddArg(real) - v1.AddArg(mem) - v.AddArg(v1) + v1.AddArg3(dst, real, mem) + v.AddArg3(v0, imag, v1) return true } // match: (Store dst (StringMake ptr len) mem) @@ -365,14 +322,10 @@ func rewriteValuedec_OpStore(v *Value) bool { v0 := b.NewValue0(v.Pos, OpOffPtr, typ.IntPtr) v0.AuxInt = config.PtrSize v0.AddArg(dst) - v.AddArg(v0) - v.AddArg(len) v1 := b.NewValue0(v.Pos, OpStore, types.TypeMem) v1.Aux = typ.BytePtr - v1.AddArg(dst) - v1.AddArg(ptr) - v1.AddArg(mem) - v.AddArg(v1) + v1.AddArg3(dst, ptr, mem) + v.AddArg3(v0, len, v1) return true } // match: (Store dst (SliceMake ptr len cap) mem) @@ -391,22 +344,16 @@ func rewriteValuedec_OpStore(v *Value) bool { v0 := b.NewValue0(v.Pos, OpOffPtr, typ.IntPtr) v0.AuxInt = 2 * config.PtrSize v0.AddArg(dst) - v.AddArg(v0) - v.AddArg(cap) v1 := b.NewValue0(v.Pos, OpStore, types.TypeMem) v1.Aux = typ.Int v2 := b.NewValue0(v.Pos, OpOffPtr, typ.IntPtr) v2.AuxInt = config.PtrSize v2.AddArg(dst) - v1.AddArg(v2) - v1.AddArg(len) v3 := b.NewValue0(v.Pos, OpStore, types.TypeMem) v3.Aux = typ.BytePtr - v3.AddArg(dst) - v3.AddArg(ptr) - v3.AddArg(mem) - v1.AddArg(v3) - v.AddArg(v1) + v3.AddArg3(dst, ptr, mem) + v1.AddArg3(v2, len, v3) + v.AddArg3(v0, cap, v1) return true } // match: (Store dst (IMake itab data) mem) @@ -424,14 +371,10 @@ func rewriteValuedec_OpStore(v *Value) bool { v0 := b.NewValue0(v.Pos, OpOffPtr, typ.BytePtrPtr) v0.AuxInt = config.PtrSize v0.AddArg(dst) - v.AddArg(v0) - v.AddArg(data) v1 := b.NewValue0(v.Pos, OpStore, types.TypeMem) v1.Aux = typ.Uintptr - v1.AddArg(dst) - v1.AddArg(itab) - v1.AddArg(mem) - v.AddArg(v1) + v1.AddArg3(dst, itab, mem) + v.AddArg3(v0, data, v1) return true } return false @@ -445,9 +388,7 @@ func rewriteValuedec_OpStringLen(v *Value) bool { break } len := v_0.Args[1] - v.reset(OpCopy) - v.Type = len.Type - v.AddArg(len) + v.copyOf(len) return true } return false @@ -460,11 +401,8 @@ func rewriteValuedec_OpStringPtr(v *Value) bool { if v_0.Op != OpStringMake { break } - _ = v_0.Args[1] ptr := v_0.Args[0] - v.reset(OpCopy) - v.Type = ptr.Type - v.AddArg(ptr) + v.copyOf(ptr) return true } return false diff --git a/src/cmd/compile/internal/ssa/rewritedec64.go b/src/cmd/compile/internal/ssa/rewritedec64.go index 96a23afd8d..b7048f111c 100644 --- a/src/cmd/compile/internal/ssa/rewritedec64.go +++ b/src/cmd/compile/internal/ssa/rewritedec64.go @@ -133,31 +133,26 @@ func rewriteValuedec64_OpAdd64(v *Value) bool { v0 := b.NewValue0(v.Pos, OpAdd32withcarry, typ.Int32) v1 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32) v1.AddArg(x) - v0.AddArg(v1) v2 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32) v2.AddArg(y) - v0.AddArg(v2) v3 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) v4 := b.NewValue0(v.Pos, OpAdd32carry, types.NewTuple(typ.UInt32, types.TypeFlags)) v5 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32) v5.AddArg(x) - v4.AddArg(v5) v6 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32) v6.AddArg(y) - v4.AddArg(v6) + v4.AddArg2(v5, v6) v3.AddArg(v4) - v0.AddArg(v3) - v.AddArg(v0) + v0.AddArg3(v1, v2, v3) v7 := b.NewValue0(v.Pos, OpSelect0, typ.UInt32) v8 := b.NewValue0(v.Pos, OpAdd32carry, types.NewTuple(typ.UInt32, types.TypeFlags)) v9 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32) v9.AddArg(x) - v8.AddArg(v9) v10 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32) v10.AddArg(y) - v8.AddArg(v10) + v8.AddArg2(v9, v10) v7.AddArg(v8) - v.AddArg(v7) + v.AddArg2(v0, v7) return true } } @@ -175,19 +170,16 @@ func rewriteValuedec64_OpAnd64(v *Value) bool { v0 := b.NewValue0(v.Pos, OpAnd32, typ.UInt32) v1 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32) v1.AddArg(x) - v0.AddArg(v1) v2 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32) v2.AddArg(y) - v0.AddArg(v2) - v.AddArg(v0) + v0.AddArg2(v1, v2) v3 := b.NewValue0(v.Pos, OpAnd32, typ.UInt32) v4 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32) v4.AddArg(x) - v3.AddArg(v4) v5 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32) v5.AddArg(y) - v3.AddArg(v5) - v.AddArg(v3) + v3.AddArg2(v4, v5) + v.AddArg2(v0, v3) return true } } @@ -208,11 +200,10 @@ func rewriteValuedec64_OpArg(v *Value) bool { v0 := b.NewValue0(v.Pos, OpArg, typ.Int32) v0.AuxInt = off + 4 v0.Aux = n - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpArg, typ.UInt32) v1.AuxInt = off v1.Aux = n - v.AddArg(v1) + v.AddArg2(v0, v1) return true } // match: (Arg {n} [off]) @@ -228,11 +219,10 @@ func rewriteValuedec64_OpArg(v *Value) bool { v0 := b.NewValue0(v.Pos, OpArg, typ.UInt32) v0.AuxInt = off + 4 v0.Aux = n - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpArg, typ.UInt32) v1.AuxInt = off v1.Aux = n - v.AddArg(v1) + v.AddArg2(v0, v1) return true } // match: (Arg {n} [off]) @@ -248,11 +238,10 @@ func rewriteValuedec64_OpArg(v *Value) bool { v0 := b.NewValue0(v.Pos, OpArg, typ.Int32) v0.AuxInt = off v0.Aux = n - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpArg, typ.UInt32) v1.AuxInt = off + 4 v1.Aux = n - v.AddArg(v1) + v.AddArg2(v0, v1) return true } // match: (Arg {n} [off]) @@ -268,11 +257,10 @@ func rewriteValuedec64_OpArg(v *Value) bool { v0 := b.NewValue0(v.Pos, OpArg, typ.UInt32) v0.AuxInt = off v0.Aux = n - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpArg, typ.UInt32) v1.AuxInt = off + 4 v1.Aux = n - v.AddArg(v1) + v.AddArg2(v0, v1) return true } return false @@ -291,19 +279,17 @@ func rewriteValuedec64_OpBitLen64(v *Value) bool { v1 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32) v1.AddArg(x) v0.AddArg(v1) - v.AddArg(v0) v2 := b.NewValue0(v.Pos, OpBitLen32, typ.Int) v3 := b.NewValue0(v.Pos, OpOr32, typ.UInt32) v4 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32) v4.AddArg(x) - v3.AddArg(v4) v5 := b.NewValue0(v.Pos, OpZeromask, typ.UInt32) v6 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32) v6.AddArg(x) v5.AddArg(v6) - v3.AddArg(v5) + v3.AddArg2(v4, v5) v2.AddArg(v3) - v.AddArg(v2) + v.AddArg2(v0, v2) return true } } @@ -320,12 +306,11 @@ func rewriteValuedec64_OpBswap64(v *Value) bool { v1 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32) v1.AddArg(x) v0.AddArg(v1) - v.AddArg(v0) v2 := b.NewValue0(v.Pos, OpBswap32, typ.UInt32) v3 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32) v3.AddArg(x) v2.AddArg(v3) - v.AddArg(v2) + v.AddArg2(v0, v2) return true } } @@ -342,12 +327,11 @@ func rewriteValuedec64_OpCom64(v *Value) bool { v1 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32) v1.AddArg(x) v0.AddArg(v1) - v.AddArg(v0) v2 := b.NewValue0(v.Pos, OpCom32, typ.UInt32) v3 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32) v3.AddArg(x) v2.AddArg(v3) - v.AddArg(v2) + v.AddArg2(v0, v2) return true } } @@ -366,10 +350,9 @@ func rewriteValuedec64_OpConst64(v *Value) bool { v.reset(OpInt64Make) v0 := b.NewValue0(v.Pos, OpConst32, typ.Int32) v0.AuxInt = c >> 32 - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) v1.AuxInt = int64(int32(c)) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } // match: (Const64 [c]) @@ -384,10 +367,9 @@ func rewriteValuedec64_OpConst64(v *Value) bool { v.reset(OpInt64Make) v0 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) v0.AuxInt = c >> 32 - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) v1.AuxInt = int64(int32(c)) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } return false @@ -406,7 +388,6 @@ func rewriteValuedec64_OpCtz64(v *Value) bool { v1 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32) v1.AddArg(x) v0.AddArg(v1) - v.AddArg(v0) v2 := b.NewValue0(v.Pos, OpAnd32, typ.UInt32) v3 := b.NewValue0(v.Pos, OpCom32, typ.UInt32) v4 := b.NewValue0(v.Pos, OpZeromask, typ.UInt32) @@ -414,13 +395,12 @@ func rewriteValuedec64_OpCtz64(v *Value) bool { v5.AddArg(x) v4.AddArg(v5) v3.AddArg(v4) - v2.AddArg(v3) v6 := b.NewValue0(v.Pos, OpCtz32, typ.UInt32) v7 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32) v7.AddArg(x) v6.AddArg(v7) - v2.AddArg(v6) - v.AddArg(v2) + v2.AddArg2(v3, v6) + v.AddArg2(v0, v2) return true } } @@ -438,19 +418,16 @@ func rewriteValuedec64_OpEq64(v *Value) bool { v0 := b.NewValue0(v.Pos, OpEq32, typ.Bool) v1 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32) v1.AddArg(x) - v0.AddArg(v1) v2 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32) v2.AddArg(y) - v0.AddArg(v2) - v.AddArg(v0) + v0.AddArg2(v1, v2) v3 := b.NewValue0(v.Pos, OpEq32, typ.Bool) v4 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32) v4.AddArg(x) - v3.AddArg(v4) v5 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32) v5.AddArg(y) - v3.AddArg(v5) - v.AddArg(v3) + v3.AddArg2(v4, v5) + v.AddArg2(v0, v3) return true } } @@ -462,11 +439,8 @@ func rewriteValuedec64_OpInt64Hi(v *Value) bool { if v_0.Op != OpInt64Make { break } - _ = v_0.Args[1] hi := v_0.Args[0] - v.reset(OpCopy) - v.Type = hi.Type - v.AddArg(hi) + v.copyOf(hi) return true } return false @@ -480,9 +454,7 @@ func rewriteValuedec64_OpInt64Lo(v *Value) bool { break } lo := v_0.Args[1] - v.reset(OpCopy) - v.Type = lo.Type - v.AddArg(lo) + v.copyOf(lo) return true } return false @@ -501,29 +473,24 @@ func rewriteValuedec64_OpLeq64(v *Value) bool { v0 := b.NewValue0(v.Pos, OpLess32, typ.Bool) v1 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32) v1.AddArg(x) - v0.AddArg(v1) v2 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32) v2.AddArg(y) - v0.AddArg(v2) - v.AddArg(v0) + v0.AddArg2(v1, v2) v3 := b.NewValue0(v.Pos, OpAndB, typ.Bool) v4 := b.NewValue0(v.Pos, OpEq32, typ.Bool) v5 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32) v5.AddArg(x) - v4.AddArg(v5) v6 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32) v6.AddArg(y) - v4.AddArg(v6) - v3.AddArg(v4) + v4.AddArg2(v5, v6) v7 := b.NewValue0(v.Pos, OpLeq32U, typ.Bool) v8 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32) v8.AddArg(x) - v7.AddArg(v8) v9 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32) v9.AddArg(y) - v7.AddArg(v9) - v3.AddArg(v7) - v.AddArg(v3) + v7.AddArg2(v8, v9) + v3.AddArg2(v4, v7) + v.AddArg2(v0, v3) return true } } @@ -541,29 +508,24 @@ func rewriteValuedec64_OpLeq64U(v *Value) bool { v0 := b.NewValue0(v.Pos, OpLess32U, typ.Bool) v1 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32) v1.AddArg(x) - v0.AddArg(v1) v2 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32) v2.AddArg(y) - v0.AddArg(v2) - v.AddArg(v0) + v0.AddArg2(v1, v2) v3 := b.NewValue0(v.Pos, OpAndB, typ.Bool) v4 := b.NewValue0(v.Pos, OpEq32, typ.Bool) v5 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32) v5.AddArg(x) - v4.AddArg(v5) v6 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32) v6.AddArg(y) - v4.AddArg(v6) - v3.AddArg(v4) + v4.AddArg2(v5, v6) v7 := b.NewValue0(v.Pos, OpLeq32U, typ.Bool) v8 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32) v8.AddArg(x) - v7.AddArg(v8) v9 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32) v9.AddArg(y) - v7.AddArg(v9) - v3.AddArg(v7) - v.AddArg(v3) + v7.AddArg2(v8, v9) + v3.AddArg2(v4, v7) + v.AddArg2(v0, v3) return true } } @@ -581,29 +543,24 @@ func rewriteValuedec64_OpLess64(v *Value) bool { v0 := b.NewValue0(v.Pos, OpLess32, typ.Bool) v1 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32) v1.AddArg(x) - v0.AddArg(v1) v2 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32) v2.AddArg(y) - v0.AddArg(v2) - v.AddArg(v0) + v0.AddArg2(v1, v2) v3 := b.NewValue0(v.Pos, OpAndB, typ.Bool) v4 := b.NewValue0(v.Pos, OpEq32, typ.Bool) v5 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32) v5.AddArg(x) - v4.AddArg(v5) v6 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32) v6.AddArg(y) - v4.AddArg(v6) - v3.AddArg(v4) + v4.AddArg2(v5, v6) v7 := b.NewValue0(v.Pos, OpLess32U, typ.Bool) v8 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32) v8.AddArg(x) - v7.AddArg(v8) v9 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32) v9.AddArg(y) - v7.AddArg(v9) - v3.AddArg(v7) - v.AddArg(v3) + v7.AddArg2(v8, v9) + v3.AddArg2(v4, v7) + v.AddArg2(v0, v3) return true } } @@ -621,29 +578,24 @@ func rewriteValuedec64_OpLess64U(v *Value) bool { v0 := b.NewValue0(v.Pos, OpLess32U, typ.Bool) v1 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32) v1.AddArg(x) - v0.AddArg(v1) v2 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32) v2.AddArg(y) - v0.AddArg(v2) - v.AddArg(v0) + v0.AddArg2(v1, v2) v3 := b.NewValue0(v.Pos, OpAndB, typ.Bool) v4 := b.NewValue0(v.Pos, OpEq32, typ.Bool) v5 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32) v5.AddArg(x) - v4.AddArg(v5) v6 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32) v6.AddArg(y) - v4.AddArg(v6) - v3.AddArg(v4) + v4.AddArg2(v5, v6) v7 := b.NewValue0(v.Pos, OpLess32U, typ.Bool) v8 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32) v8.AddArg(x) - v7.AddArg(v8) v9 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32) v9.AddArg(y) - v7.AddArg(v9) - v3.AddArg(v7) - v.AddArg(v3) + v7.AddArg2(v8, v9) + v3.AddArg2(v4, v7) + v.AddArg2(v0, v3) return true } } @@ -668,13 +620,10 @@ func rewriteValuedec64_OpLoad(v *Value) bool { v1 := b.NewValue0(v.Pos, OpOffPtr, typ.Int32Ptr) v1.AuxInt = 4 v1.AddArg(ptr) - v0.AddArg(v1) - v0.AddArg(mem) - v.AddArg(v0) + v0.AddArg2(v1, mem) v2 := b.NewValue0(v.Pos, OpLoad, typ.UInt32) - v2.AddArg(ptr) - v2.AddArg(mem) - v.AddArg(v2) + v2.AddArg2(ptr, mem) + v.AddArg2(v0, v2) return true } // match: (Load ptr mem) @@ -692,13 +641,10 @@ func rewriteValuedec64_OpLoad(v *Value) bool { v1 := b.NewValue0(v.Pos, OpOffPtr, typ.UInt32Ptr) v1.AuxInt = 4 v1.AddArg(ptr) - v0.AddArg(v1) - v0.AddArg(mem) - v.AddArg(v0) + v0.AddArg2(v1, mem) v2 := b.NewValue0(v.Pos, OpLoad, typ.UInt32) - v2.AddArg(ptr) - v2.AddArg(mem) - v.AddArg(v2) + v2.AddArg2(ptr, mem) + v.AddArg2(v0, v2) return true } // match: (Load ptr mem) @@ -713,16 +659,13 @@ func rewriteValuedec64_OpLoad(v *Value) bool { } v.reset(OpInt64Make) v0 := b.NewValue0(v.Pos, OpLoad, typ.Int32) - v0.AddArg(ptr) - v0.AddArg(mem) - v.AddArg(v0) + v0.AddArg2(ptr, mem) v1 := b.NewValue0(v.Pos, OpLoad, typ.UInt32) v2 := b.NewValue0(v.Pos, OpOffPtr, typ.UInt32Ptr) v2.AuxInt = 4 v2.AddArg(ptr) - v1.AddArg(v2) - v1.AddArg(mem) - v.AddArg(v1) + v1.AddArg2(v2, mem) + v.AddArg2(v0, v1) return true } // match: (Load ptr mem) @@ -737,16 +680,13 @@ func rewriteValuedec64_OpLoad(v *Value) bool { } v.reset(OpInt64Make) v0 := b.NewValue0(v.Pos, OpLoad, typ.UInt32) - v0.AddArg(ptr) - v0.AddArg(mem) - v.AddArg(v0) + v0.AddArg2(ptr, mem) v1 := b.NewValue0(v.Pos, OpLoad, typ.UInt32) v2 := b.NewValue0(v.Pos, OpOffPtr, typ.UInt32Ptr) v2.AuxInt = 4 v2.AddArg(ptr) - v1.AddArg(v2) - v1.AddArg(mem) - v.AddArg(v1) + v1.AddArg2(v2, mem) + v.AddArg2(v0, v1) return true } return false @@ -763,7 +703,6 @@ func rewriteValuedec64_OpLsh16x64(v *Value) bool { if v_1.Op != OpInt64Make { break } - _ = v_1.Args[1] v_1_0 := v_1.Args[0] if v_1_0.Op != OpConst32 { break @@ -791,8 +730,7 @@ func rewriteValuedec64_OpLsh16x64(v *Value) bool { } v.reset(OpLsh16x32) v.AuxInt = c - v.AddArg(x) - v.AddArg(lo) + v.AddArg2(x, lo) return true } // match: (Lsh16x64 x (Int64Make hi lo)) @@ -809,13 +747,11 @@ func rewriteValuedec64_OpLsh16x64(v *Value) bool { break } v.reset(OpLsh16x32) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpOr32, typ.UInt32) v1 := b.NewValue0(v.Pos, OpZeromask, typ.UInt32) v1.AddArg(hi) - v0.AddArg(v1) - v0.AddArg(lo) - v.AddArg(v0) + v0.AddArg2(v1, lo) + v.AddArg2(x, v0) return true } return false @@ -832,7 +768,6 @@ func rewriteValuedec64_OpLsh32x64(v *Value) bool { if v_1.Op != OpInt64Make { break } - _ = v_1.Args[1] v_1_0 := v_1.Args[0] if v_1_0.Op != OpConst32 { break @@ -860,8 +795,7 @@ func rewriteValuedec64_OpLsh32x64(v *Value) bool { } v.reset(OpLsh32x32) v.AuxInt = c - v.AddArg(x) - v.AddArg(lo) + v.AddArg2(x, lo) return true } // match: (Lsh32x64 x (Int64Make hi lo)) @@ -878,13 +812,11 @@ func rewriteValuedec64_OpLsh32x64(v *Value) bool { break } v.reset(OpLsh32x32) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpOr32, typ.UInt32) v1 := b.NewValue0(v.Pos, OpZeromask, typ.UInt32) v1.AddArg(hi) - v0.AddArg(v1) - v0.AddArg(lo) - v.AddArg(v0) + v0.AddArg2(v1, lo) + v.AddArg2(x, v0) return true } return false @@ -907,33 +839,24 @@ func rewriteValuedec64_OpLsh64x16(v *Value) bool { v0 := b.NewValue0(v.Pos, OpOr32, typ.UInt32) v1 := b.NewValue0(v.Pos, OpOr32, typ.UInt32) v2 := b.NewValue0(v.Pos, OpLsh32x16, typ.UInt32) - v2.AddArg(hi) - v2.AddArg(s) - v1.AddArg(v2) + v2.AddArg2(hi, s) v3 := b.NewValue0(v.Pos, OpRsh32Ux16, typ.UInt32) - v3.AddArg(lo) v4 := b.NewValue0(v.Pos, OpSub16, typ.UInt16) v5 := b.NewValue0(v.Pos, OpConst16, typ.UInt16) v5.AuxInt = 32 - v4.AddArg(v5) - v4.AddArg(s) - v3.AddArg(v4) - v1.AddArg(v3) - v0.AddArg(v1) + v4.AddArg2(v5, s) + v3.AddArg2(lo, v4) + v1.AddArg2(v2, v3) v6 := b.NewValue0(v.Pos, OpLsh32x16, typ.UInt32) - v6.AddArg(lo) v7 := b.NewValue0(v.Pos, OpSub16, typ.UInt16) - v7.AddArg(s) v8 := b.NewValue0(v.Pos, OpConst16, typ.UInt16) v8.AuxInt = 32 - v7.AddArg(v8) - v6.AddArg(v7) - v0.AddArg(v6) - v.AddArg(v0) + v7.AddArg2(s, v8) + v6.AddArg2(lo, v7) + v0.AddArg2(v1, v6) v9 := b.NewValue0(v.Pos, OpLsh32x16, typ.UInt32) - v9.AddArg(lo) - v9.AddArg(s) - v.AddArg(v9) + v9.AddArg2(lo, s) + v.AddArg2(v0, v9) return true } return false @@ -956,33 +879,24 @@ func rewriteValuedec64_OpLsh64x32(v *Value) bool { v0 := b.NewValue0(v.Pos, OpOr32, typ.UInt32) v1 := b.NewValue0(v.Pos, OpOr32, typ.UInt32) v2 := b.NewValue0(v.Pos, OpLsh32x32, typ.UInt32) - v2.AddArg(hi) - v2.AddArg(s) - v1.AddArg(v2) + v2.AddArg2(hi, s) v3 := b.NewValue0(v.Pos, OpRsh32Ux32, typ.UInt32) - v3.AddArg(lo) v4 := b.NewValue0(v.Pos, OpSub32, typ.UInt32) v5 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) v5.AuxInt = 32 - v4.AddArg(v5) - v4.AddArg(s) - v3.AddArg(v4) - v1.AddArg(v3) - v0.AddArg(v1) + v4.AddArg2(v5, s) + v3.AddArg2(lo, v4) + v1.AddArg2(v2, v3) v6 := b.NewValue0(v.Pos, OpLsh32x32, typ.UInt32) - v6.AddArg(lo) v7 := b.NewValue0(v.Pos, OpSub32, typ.UInt32) - v7.AddArg(s) v8 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) v8.AuxInt = 32 - v7.AddArg(v8) - v6.AddArg(v7) - v0.AddArg(v6) - v.AddArg(v0) + v7.AddArg2(s, v8) + v6.AddArg2(lo, v7) + v0.AddArg2(v1, v6) v9 := b.NewValue0(v.Pos, OpLsh32x32, typ.UInt32) - v9.AddArg(lo) - v9.AddArg(s) - v.AddArg(v9) + v9.AddArg2(lo, s) + v.AddArg2(v0, v9) return true } return false @@ -999,7 +913,6 @@ func rewriteValuedec64_OpLsh64x64(v *Value) bool { if v_1.Op != OpInt64Make { break } - _ = v_1.Args[1] v_1_0 := v_1.Args[0] if v_1_0.Op != OpConst32 { break @@ -1027,8 +940,7 @@ func rewriteValuedec64_OpLsh64x64(v *Value) bool { } v.reset(OpLsh64x32) v.AuxInt = c - v.AddArg(x) - v.AddArg(lo) + v.AddArg2(x, lo) return true } // match: (Lsh64x64 x (Int64Make hi lo)) @@ -1045,13 +957,11 @@ func rewriteValuedec64_OpLsh64x64(v *Value) bool { break } v.reset(OpLsh64x32) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpOr32, typ.UInt32) v1 := b.NewValue0(v.Pos, OpZeromask, typ.UInt32) v1.AddArg(hi) - v0.AddArg(v1) - v0.AddArg(lo) - v.AddArg(v0) + v0.AddArg2(v1, lo) + v.AddArg2(x, v0) return true } return false @@ -1074,33 +984,24 @@ func rewriteValuedec64_OpLsh64x8(v *Value) bool { v0 := b.NewValue0(v.Pos, OpOr32, typ.UInt32) v1 := b.NewValue0(v.Pos, OpOr32, typ.UInt32) v2 := b.NewValue0(v.Pos, OpLsh32x8, typ.UInt32) - v2.AddArg(hi) - v2.AddArg(s) - v1.AddArg(v2) + v2.AddArg2(hi, s) v3 := b.NewValue0(v.Pos, OpRsh32Ux8, typ.UInt32) - v3.AddArg(lo) v4 := b.NewValue0(v.Pos, OpSub8, typ.UInt8) v5 := b.NewValue0(v.Pos, OpConst8, typ.UInt8) v5.AuxInt = 32 - v4.AddArg(v5) - v4.AddArg(s) - v3.AddArg(v4) - v1.AddArg(v3) - v0.AddArg(v1) + v4.AddArg2(v5, s) + v3.AddArg2(lo, v4) + v1.AddArg2(v2, v3) v6 := b.NewValue0(v.Pos, OpLsh32x8, typ.UInt32) - v6.AddArg(lo) v7 := b.NewValue0(v.Pos, OpSub8, typ.UInt8) - v7.AddArg(s) v8 := b.NewValue0(v.Pos, OpConst8, typ.UInt8) v8.AuxInt = 32 - v7.AddArg(v8) - v6.AddArg(v7) - v0.AddArg(v6) - v.AddArg(v0) + v7.AddArg2(s, v8) + v6.AddArg2(lo, v7) + v0.AddArg2(v1, v6) v9 := b.NewValue0(v.Pos, OpLsh32x8, typ.UInt32) - v9.AddArg(lo) - v9.AddArg(s) - v.AddArg(v9) + v9.AddArg2(lo, s) + v.AddArg2(v0, v9) return true } return false @@ -1117,7 +1018,6 @@ func rewriteValuedec64_OpLsh8x64(v *Value) bool { if v_1.Op != OpInt64Make { break } - _ = v_1.Args[1] v_1_0 := v_1.Args[0] if v_1_0.Op != OpConst32 { break @@ -1145,8 +1045,7 @@ func rewriteValuedec64_OpLsh8x64(v *Value) bool { } v.reset(OpLsh8x32) v.AuxInt = c - v.AddArg(x) - v.AddArg(lo) + v.AddArg2(x, lo) return true } // match: (Lsh8x64 x (Int64Make hi lo)) @@ -1163,13 +1062,11 @@ func rewriteValuedec64_OpLsh8x64(v *Value) bool { break } v.reset(OpLsh8x32) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpOr32, typ.UInt32) v1 := b.NewValue0(v.Pos, OpZeromask, typ.UInt32) v1.AddArg(hi) - v0.AddArg(v1) - v0.AddArg(lo) - v.AddArg(v0) + v0.AddArg2(v1, lo) + v.AddArg2(x, v0) return true } return false @@ -1189,42 +1086,35 @@ func rewriteValuedec64_OpMul64(v *Value) bool { v1 := b.NewValue0(v.Pos, OpMul32, typ.UInt32) v2 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32) v2.AddArg(x) - v1.AddArg(v2) v3 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32) v3.AddArg(y) - v1.AddArg(v3) - v0.AddArg(v1) + v1.AddArg2(v2, v3) v4 := b.NewValue0(v.Pos, OpAdd32, typ.UInt32) v5 := b.NewValue0(v.Pos, OpMul32, typ.UInt32) v6 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32) v6.AddArg(x) - v5.AddArg(v6) v7 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32) v7.AddArg(y) - v5.AddArg(v7) - v4.AddArg(v5) + v5.AddArg2(v6, v7) v8 := b.NewValue0(v.Pos, OpSelect0, typ.UInt32) v9 := b.NewValue0(v.Pos, OpMul32uhilo, types.NewTuple(typ.UInt32, typ.UInt32)) v10 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32) v10.AddArg(x) - v9.AddArg(v10) v11 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32) v11.AddArg(y) - v9.AddArg(v11) + v9.AddArg2(v10, v11) v8.AddArg(v9) - v4.AddArg(v8) - v0.AddArg(v4) - v.AddArg(v0) + v4.AddArg2(v5, v8) + v0.AddArg2(v1, v4) v12 := b.NewValue0(v.Pos, OpSelect1, typ.UInt32) v13 := b.NewValue0(v.Pos, OpMul32uhilo, types.NewTuple(typ.UInt32, typ.UInt32)) v14 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32) v14.AddArg(x) - v13.AddArg(v14) v15 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32) v15.AddArg(y) - v13.AddArg(v15) + v13.AddArg2(v14, v15) v12.AddArg(v13) - v.AddArg(v12) + v.AddArg2(v0, v12) return true } } @@ -1239,8 +1129,7 @@ func rewriteValuedec64_OpNeg64(v *Value) bool { v.reset(OpSub64) v0 := b.NewValue0(v.Pos, OpConst64, t) v0.AuxInt = 0 - v.AddArg(v0) - v.AddArg(x) + v.AddArg2(v0, x) return true } } @@ -1258,19 +1147,16 @@ func rewriteValuedec64_OpNeq64(v *Value) bool { v0 := b.NewValue0(v.Pos, OpNeq32, typ.Bool) v1 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32) v1.AddArg(x) - v0.AddArg(v1) v2 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32) v2.AddArg(y) - v0.AddArg(v2) - v.AddArg(v0) + v0.AddArg2(v1, v2) v3 := b.NewValue0(v.Pos, OpNeq32, typ.Bool) v4 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32) v4.AddArg(x) - v3.AddArg(v4) v5 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32) v5.AddArg(y) - v3.AddArg(v5) - v.AddArg(v3) + v3.AddArg2(v4, v5) + v.AddArg2(v0, v3) return true } } @@ -1288,19 +1174,16 @@ func rewriteValuedec64_OpOr64(v *Value) bool { v0 := b.NewValue0(v.Pos, OpOr32, typ.UInt32) v1 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32) v1.AddArg(x) - v0.AddArg(v1) v2 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32) v2.AddArg(y) - v0.AddArg(v2) - v.AddArg(v0) + v0.AddArg2(v1, v2) v3 := b.NewValue0(v.Pos, OpOr32, typ.UInt32) v4 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32) v4.AddArg(x) - v3.AddArg(v4) v5 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32) v5.AddArg(y) - v3.AddArg(v5) - v.AddArg(v3) + v3.AddArg2(v4, v5) + v.AddArg2(v0, v3) return true } } @@ -1316,7 +1199,6 @@ func rewriteValuedec64_OpRsh16Ux64(v *Value) bool { if v_1.Op != OpInt64Make { break } - _ = v_1.Args[1] v_1_0 := v_1.Args[0] if v_1_0.Op != OpConst32 { break @@ -1344,8 +1226,7 @@ func rewriteValuedec64_OpRsh16Ux64(v *Value) bool { } v.reset(OpRsh16Ux32) v.AuxInt = c - v.AddArg(x) - v.AddArg(lo) + v.AddArg2(x, lo) return true } // match: (Rsh16Ux64 x (Int64Make hi lo)) @@ -1362,13 +1243,11 @@ func rewriteValuedec64_OpRsh16Ux64(v *Value) bool { break } v.reset(OpRsh16Ux32) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpOr32, typ.UInt32) v1 := b.NewValue0(v.Pos, OpZeromask, typ.UInt32) v1.AddArg(hi) - v0.AddArg(v1) - v0.AddArg(lo) - v.AddArg(v0) + v0.AddArg2(v1, lo) + v.AddArg2(x, v0) return true } return false @@ -1386,7 +1265,6 @@ func rewriteValuedec64_OpRsh16x64(v *Value) bool { if v_1.Op != OpInt64Make { break } - _ = v_1.Args[1] v_1_0 := v_1.Args[0] if v_1_0.Op != OpConst32 { break @@ -1416,8 +1294,7 @@ func rewriteValuedec64_OpRsh16x64(v *Value) bool { } v.reset(OpRsh16x32) v.AuxInt = c - v.AddArg(x) - v.AddArg(lo) + v.AddArg2(x, lo) return true } // match: (Rsh16x64 x (Int64Make hi lo)) @@ -1434,13 +1311,11 @@ func rewriteValuedec64_OpRsh16x64(v *Value) bool { break } v.reset(OpRsh16x32) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpOr32, typ.UInt32) v1 := b.NewValue0(v.Pos, OpZeromask, typ.UInt32) v1.AddArg(hi) - v0.AddArg(v1) - v0.AddArg(lo) - v.AddArg(v0) + v0.AddArg2(v1, lo) + v.AddArg2(x, v0) return true } return false @@ -1457,7 +1332,6 @@ func rewriteValuedec64_OpRsh32Ux64(v *Value) bool { if v_1.Op != OpInt64Make { break } - _ = v_1.Args[1] v_1_0 := v_1.Args[0] if v_1_0.Op != OpConst32 { break @@ -1485,8 +1359,7 @@ func rewriteValuedec64_OpRsh32Ux64(v *Value) bool { } v.reset(OpRsh32Ux32) v.AuxInt = c - v.AddArg(x) - v.AddArg(lo) + v.AddArg2(x, lo) return true } // match: (Rsh32Ux64 x (Int64Make hi lo)) @@ -1503,13 +1376,11 @@ func rewriteValuedec64_OpRsh32Ux64(v *Value) bool { break } v.reset(OpRsh32Ux32) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpOr32, typ.UInt32) v1 := b.NewValue0(v.Pos, OpZeromask, typ.UInt32) v1.AddArg(hi) - v0.AddArg(v1) - v0.AddArg(lo) - v.AddArg(v0) + v0.AddArg2(v1, lo) + v.AddArg2(x, v0) return true } return false @@ -1527,7 +1398,6 @@ func rewriteValuedec64_OpRsh32x64(v *Value) bool { if v_1.Op != OpInt64Make { break } - _ = v_1.Args[1] v_1_0 := v_1.Args[0] if v_1_0.Op != OpConst32 { break @@ -1555,8 +1425,7 @@ func rewriteValuedec64_OpRsh32x64(v *Value) bool { } v.reset(OpRsh32x32) v.AuxInt = c - v.AddArg(x) - v.AddArg(lo) + v.AddArg2(x, lo) return true } // match: (Rsh32x64 x (Int64Make hi lo)) @@ -1573,13 +1442,11 @@ func rewriteValuedec64_OpRsh32x64(v *Value) bool { break } v.reset(OpRsh32x32) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpOr32, typ.UInt32) v1 := b.NewValue0(v.Pos, OpZeromask, typ.UInt32) v1.AddArg(hi) - v0.AddArg(v1) - v0.AddArg(lo) - v.AddArg(v0) + v0.AddArg2(v1, lo) + v.AddArg2(x, v0) return true } return false @@ -1600,35 +1467,26 @@ func rewriteValuedec64_OpRsh64Ux16(v *Value) bool { s := v_1 v.reset(OpInt64Make) v0 := b.NewValue0(v.Pos, OpRsh32Ux16, typ.UInt32) - v0.AddArg(hi) - v0.AddArg(s) - v.AddArg(v0) + v0.AddArg2(hi, s) v1 := b.NewValue0(v.Pos, OpOr32, typ.UInt32) v2 := b.NewValue0(v.Pos, OpOr32, typ.UInt32) v3 := b.NewValue0(v.Pos, OpRsh32Ux16, typ.UInt32) - v3.AddArg(lo) - v3.AddArg(s) - v2.AddArg(v3) + v3.AddArg2(lo, s) v4 := b.NewValue0(v.Pos, OpLsh32x16, typ.UInt32) - v4.AddArg(hi) v5 := b.NewValue0(v.Pos, OpSub16, typ.UInt16) v6 := b.NewValue0(v.Pos, OpConst16, typ.UInt16) v6.AuxInt = 32 - v5.AddArg(v6) - v5.AddArg(s) - v4.AddArg(v5) - v2.AddArg(v4) - v1.AddArg(v2) + v5.AddArg2(v6, s) + v4.AddArg2(hi, v5) + v2.AddArg2(v3, v4) v7 := b.NewValue0(v.Pos, OpRsh32Ux16, typ.UInt32) - v7.AddArg(hi) v8 := b.NewValue0(v.Pos, OpSub16, typ.UInt16) - v8.AddArg(s) v9 := b.NewValue0(v.Pos, OpConst16, typ.UInt16) v9.AuxInt = 32 - v8.AddArg(v9) - v7.AddArg(v8) - v1.AddArg(v7) - v.AddArg(v1) + v8.AddArg2(s, v9) + v7.AddArg2(hi, v8) + v1.AddArg2(v2, v7) + v.AddArg2(v0, v1) return true } return false @@ -1649,35 +1507,26 @@ func rewriteValuedec64_OpRsh64Ux32(v *Value) bool { s := v_1 v.reset(OpInt64Make) v0 := b.NewValue0(v.Pos, OpRsh32Ux32, typ.UInt32) - v0.AddArg(hi) - v0.AddArg(s) - v.AddArg(v0) + v0.AddArg2(hi, s) v1 := b.NewValue0(v.Pos, OpOr32, typ.UInt32) v2 := b.NewValue0(v.Pos, OpOr32, typ.UInt32) v3 := b.NewValue0(v.Pos, OpRsh32Ux32, typ.UInt32) - v3.AddArg(lo) - v3.AddArg(s) - v2.AddArg(v3) + v3.AddArg2(lo, s) v4 := b.NewValue0(v.Pos, OpLsh32x32, typ.UInt32) - v4.AddArg(hi) v5 := b.NewValue0(v.Pos, OpSub32, typ.UInt32) v6 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) v6.AuxInt = 32 - v5.AddArg(v6) - v5.AddArg(s) - v4.AddArg(v5) - v2.AddArg(v4) - v1.AddArg(v2) + v5.AddArg2(v6, s) + v4.AddArg2(hi, v5) + v2.AddArg2(v3, v4) v7 := b.NewValue0(v.Pos, OpRsh32Ux32, typ.UInt32) - v7.AddArg(hi) v8 := b.NewValue0(v.Pos, OpSub32, typ.UInt32) - v8.AddArg(s) v9 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) v9.AuxInt = 32 - v8.AddArg(v9) - v7.AddArg(v8) - v1.AddArg(v7) - v.AddArg(v1) + v8.AddArg2(s, v9) + v7.AddArg2(hi, v8) + v1.AddArg2(v2, v7) + v.AddArg2(v0, v1) return true } return false @@ -1694,7 +1543,6 @@ func rewriteValuedec64_OpRsh64Ux64(v *Value) bool { if v_1.Op != OpInt64Make { break } - _ = v_1.Args[1] v_1_0 := v_1.Args[0] if v_1_0.Op != OpConst32 { break @@ -1722,8 +1570,7 @@ func rewriteValuedec64_OpRsh64Ux64(v *Value) bool { } v.reset(OpRsh64Ux32) v.AuxInt = c - v.AddArg(x) - v.AddArg(lo) + v.AddArg2(x, lo) return true } // match: (Rsh64Ux64 x (Int64Make hi lo)) @@ -1740,13 +1587,11 @@ func rewriteValuedec64_OpRsh64Ux64(v *Value) bool { break } v.reset(OpRsh64Ux32) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpOr32, typ.UInt32) v1 := b.NewValue0(v.Pos, OpZeromask, typ.UInt32) v1.AddArg(hi) - v0.AddArg(v1) - v0.AddArg(lo) - v.AddArg(v0) + v0.AddArg2(v1, lo) + v.AddArg2(x, v0) return true } return false @@ -1767,35 +1612,26 @@ func rewriteValuedec64_OpRsh64Ux8(v *Value) bool { s := v_1 v.reset(OpInt64Make) v0 := b.NewValue0(v.Pos, OpRsh32Ux8, typ.UInt32) - v0.AddArg(hi) - v0.AddArg(s) - v.AddArg(v0) + v0.AddArg2(hi, s) v1 := b.NewValue0(v.Pos, OpOr32, typ.UInt32) v2 := b.NewValue0(v.Pos, OpOr32, typ.UInt32) v3 := b.NewValue0(v.Pos, OpRsh32Ux8, typ.UInt32) - v3.AddArg(lo) - v3.AddArg(s) - v2.AddArg(v3) + v3.AddArg2(lo, s) v4 := b.NewValue0(v.Pos, OpLsh32x8, typ.UInt32) - v4.AddArg(hi) v5 := b.NewValue0(v.Pos, OpSub8, typ.UInt8) v6 := b.NewValue0(v.Pos, OpConst8, typ.UInt8) v6.AuxInt = 32 - v5.AddArg(v6) - v5.AddArg(s) - v4.AddArg(v5) - v2.AddArg(v4) - v1.AddArg(v2) + v5.AddArg2(v6, s) + v4.AddArg2(hi, v5) + v2.AddArg2(v3, v4) v7 := b.NewValue0(v.Pos, OpRsh32Ux8, typ.UInt32) - v7.AddArg(hi) v8 := b.NewValue0(v.Pos, OpSub8, typ.UInt8) - v8.AddArg(s) v9 := b.NewValue0(v.Pos, OpConst8, typ.UInt8) v9.AuxInt = 32 - v8.AddArg(v9) - v7.AddArg(v8) - v1.AddArg(v7) - v.AddArg(v1) + v8.AddArg2(s, v9) + v7.AddArg2(hi, v8) + v1.AddArg2(v2, v7) + v.AddArg2(v0, v1) return true } return false @@ -1816,47 +1652,36 @@ func rewriteValuedec64_OpRsh64x16(v *Value) bool { s := v_1 v.reset(OpInt64Make) v0 := b.NewValue0(v.Pos, OpRsh32x16, typ.UInt32) - v0.AddArg(hi) - v0.AddArg(s) - v.AddArg(v0) + v0.AddArg2(hi, s) v1 := b.NewValue0(v.Pos, OpOr32, typ.UInt32) v2 := b.NewValue0(v.Pos, OpOr32, typ.UInt32) v3 := b.NewValue0(v.Pos, OpRsh32Ux16, typ.UInt32) - v3.AddArg(lo) - v3.AddArg(s) - v2.AddArg(v3) + v3.AddArg2(lo, s) v4 := b.NewValue0(v.Pos, OpLsh32x16, typ.UInt32) - v4.AddArg(hi) v5 := b.NewValue0(v.Pos, OpSub16, typ.UInt16) v6 := b.NewValue0(v.Pos, OpConst16, typ.UInt16) v6.AuxInt = 32 - v5.AddArg(v6) - v5.AddArg(s) - v4.AddArg(v5) - v2.AddArg(v4) - v1.AddArg(v2) + v5.AddArg2(v6, s) + v4.AddArg2(hi, v5) + v2.AddArg2(v3, v4) v7 := b.NewValue0(v.Pos, OpAnd32, typ.UInt32) v8 := b.NewValue0(v.Pos, OpRsh32x16, typ.UInt32) - v8.AddArg(hi) v9 := b.NewValue0(v.Pos, OpSub16, typ.UInt16) - v9.AddArg(s) v10 := b.NewValue0(v.Pos, OpConst16, typ.UInt16) v10.AuxInt = 32 - v9.AddArg(v10) - v8.AddArg(v9) - v7.AddArg(v8) + v9.AddArg2(s, v10) + v8.AddArg2(hi, v9) v11 := b.NewValue0(v.Pos, OpZeromask, typ.UInt32) v12 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) v13 := b.NewValue0(v.Pos, OpRsh16Ux32, typ.UInt16) - v13.AddArg(s) v14 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) v14.AuxInt = 5 - v13.AddArg(v14) + v13.AddArg2(s, v14) v12.AddArg(v13) v11.AddArg(v12) - v7.AddArg(v11) - v1.AddArg(v7) - v.AddArg(v1) + v7.AddArg2(v8, v11) + v1.AddArg2(v2, v7) + v.AddArg2(v0, v1) return true } return false @@ -1877,45 +1702,34 @@ func rewriteValuedec64_OpRsh64x32(v *Value) bool { s := v_1 v.reset(OpInt64Make) v0 := b.NewValue0(v.Pos, OpRsh32x32, typ.UInt32) - v0.AddArg(hi) - v0.AddArg(s) - v.AddArg(v0) + v0.AddArg2(hi, s) v1 := b.NewValue0(v.Pos, OpOr32, typ.UInt32) v2 := b.NewValue0(v.Pos, OpOr32, typ.UInt32) v3 := b.NewValue0(v.Pos, OpRsh32Ux32, typ.UInt32) - v3.AddArg(lo) - v3.AddArg(s) - v2.AddArg(v3) + v3.AddArg2(lo, s) v4 := b.NewValue0(v.Pos, OpLsh32x32, typ.UInt32) - v4.AddArg(hi) v5 := b.NewValue0(v.Pos, OpSub32, typ.UInt32) v6 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) v6.AuxInt = 32 - v5.AddArg(v6) - v5.AddArg(s) - v4.AddArg(v5) - v2.AddArg(v4) - v1.AddArg(v2) + v5.AddArg2(v6, s) + v4.AddArg2(hi, v5) + v2.AddArg2(v3, v4) v7 := b.NewValue0(v.Pos, OpAnd32, typ.UInt32) v8 := b.NewValue0(v.Pos, OpRsh32x32, typ.UInt32) - v8.AddArg(hi) v9 := b.NewValue0(v.Pos, OpSub32, typ.UInt32) - v9.AddArg(s) v10 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) v10.AuxInt = 32 - v9.AddArg(v10) - v8.AddArg(v9) - v7.AddArg(v8) + v9.AddArg2(s, v10) + v8.AddArg2(hi, v9) v11 := b.NewValue0(v.Pos, OpZeromask, typ.UInt32) v12 := b.NewValue0(v.Pos, OpRsh32Ux32, typ.UInt32) - v12.AddArg(s) v13 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) v13.AuxInt = 5 - v12.AddArg(v13) + v12.AddArg2(s, v13) v11.AddArg(v12) - v7.AddArg(v11) - v1.AddArg(v7) - v.AddArg(v1) + v7.AddArg2(v8, v11) + v1.AddArg2(v2, v7) + v.AddArg2(v0, v1) return true } return false @@ -1933,7 +1747,6 @@ func rewriteValuedec64_OpRsh64x64(v *Value) bool { if v_1.Op != OpInt64Make { break } - _ = v_1.Args[1] v_1_0 := v_1.Args[0] if v_1_0.Op != OpConst32 { break @@ -1947,12 +1760,11 @@ func rewriteValuedec64_OpRsh64x64(v *Value) bool { v1 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32) v1.AddArg(x) v0.AddArg(v1) - v.AddArg(v0) v2 := b.NewValue0(v.Pos, OpSignmask, typ.Int32) v3 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32) v3.AddArg(x) v2.AddArg(v3) - v.AddArg(v2) + v.AddArg2(v0, v2) return true } // match: (Rsh64x64 [c] x (Int64Make (Const32 [0]) lo)) @@ -1970,8 +1782,7 @@ func rewriteValuedec64_OpRsh64x64(v *Value) bool { } v.reset(OpRsh64x32) v.AuxInt = c - v.AddArg(x) - v.AddArg(lo) + v.AddArg2(x, lo) return true } // match: (Rsh64x64 x (Int64Make hi lo)) @@ -1988,13 +1799,11 @@ func rewriteValuedec64_OpRsh64x64(v *Value) bool { break } v.reset(OpRsh64x32) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpOr32, typ.UInt32) v1 := b.NewValue0(v.Pos, OpZeromask, typ.UInt32) v1.AddArg(hi) - v0.AddArg(v1) - v0.AddArg(lo) - v.AddArg(v0) + v0.AddArg2(v1, lo) + v.AddArg2(x, v0) return true } return false @@ -2015,47 +1824,36 @@ func rewriteValuedec64_OpRsh64x8(v *Value) bool { s := v_1 v.reset(OpInt64Make) v0 := b.NewValue0(v.Pos, OpRsh32x8, typ.UInt32) - v0.AddArg(hi) - v0.AddArg(s) - v.AddArg(v0) + v0.AddArg2(hi, s) v1 := b.NewValue0(v.Pos, OpOr32, typ.UInt32) v2 := b.NewValue0(v.Pos, OpOr32, typ.UInt32) v3 := b.NewValue0(v.Pos, OpRsh32Ux8, typ.UInt32) - v3.AddArg(lo) - v3.AddArg(s) - v2.AddArg(v3) + v3.AddArg2(lo, s) v4 := b.NewValue0(v.Pos, OpLsh32x8, typ.UInt32) - v4.AddArg(hi) v5 := b.NewValue0(v.Pos, OpSub8, typ.UInt8) v6 := b.NewValue0(v.Pos, OpConst8, typ.UInt8) v6.AuxInt = 32 - v5.AddArg(v6) - v5.AddArg(s) - v4.AddArg(v5) - v2.AddArg(v4) - v1.AddArg(v2) + v5.AddArg2(v6, s) + v4.AddArg2(hi, v5) + v2.AddArg2(v3, v4) v7 := b.NewValue0(v.Pos, OpAnd32, typ.UInt32) v8 := b.NewValue0(v.Pos, OpRsh32x8, typ.UInt32) - v8.AddArg(hi) v9 := b.NewValue0(v.Pos, OpSub8, typ.UInt8) - v9.AddArg(s) v10 := b.NewValue0(v.Pos, OpConst8, typ.UInt8) v10.AuxInt = 32 - v9.AddArg(v10) - v8.AddArg(v9) - v7.AddArg(v8) + v9.AddArg2(s, v10) + v8.AddArg2(hi, v9) v11 := b.NewValue0(v.Pos, OpZeromask, typ.UInt32) v12 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) v13 := b.NewValue0(v.Pos, OpRsh8Ux32, typ.UInt8) - v13.AddArg(s) v14 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) v14.AuxInt = 5 - v13.AddArg(v14) + v13.AddArg2(s, v14) v12.AddArg(v13) v11.AddArg(v12) - v7.AddArg(v11) - v1.AddArg(v7) - v.AddArg(v1) + v7.AddArg2(v8, v11) + v1.AddArg2(v2, v7) + v.AddArg2(v0, v1) return true } return false @@ -2072,7 +1870,6 @@ func rewriteValuedec64_OpRsh8Ux64(v *Value) bool { if v_1.Op != OpInt64Make { break } - _ = v_1.Args[1] v_1_0 := v_1.Args[0] if v_1_0.Op != OpConst32 { break @@ -2100,8 +1897,7 @@ func rewriteValuedec64_OpRsh8Ux64(v *Value) bool { } v.reset(OpRsh8Ux32) v.AuxInt = c - v.AddArg(x) - v.AddArg(lo) + v.AddArg2(x, lo) return true } // match: (Rsh8Ux64 x (Int64Make hi lo)) @@ -2118,13 +1914,11 @@ func rewriteValuedec64_OpRsh8Ux64(v *Value) bool { break } v.reset(OpRsh8Ux32) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpOr32, typ.UInt32) v1 := b.NewValue0(v.Pos, OpZeromask, typ.UInt32) v1.AddArg(hi) - v0.AddArg(v1) - v0.AddArg(lo) - v.AddArg(v0) + v0.AddArg2(v1, lo) + v.AddArg2(x, v0) return true } return false @@ -2142,7 +1936,6 @@ func rewriteValuedec64_OpRsh8x64(v *Value) bool { if v_1.Op != OpInt64Make { break } - _ = v_1.Args[1] v_1_0 := v_1.Args[0] if v_1_0.Op != OpConst32 { break @@ -2172,8 +1965,7 @@ func rewriteValuedec64_OpRsh8x64(v *Value) bool { } v.reset(OpRsh8x32) v.AuxInt = c - v.AddArg(x) - v.AddArg(lo) + v.AddArg2(x, lo) return true } // match: (Rsh8x64 x (Int64Make hi lo)) @@ -2190,13 +1982,11 @@ func rewriteValuedec64_OpRsh8x64(v *Value) bool { break } v.reset(OpRsh8x32) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpOr32, typ.UInt32) v1 := b.NewValue0(v.Pos, OpZeromask, typ.UInt32) v1.AddArg(hi) - v0.AddArg(v1) - v0.AddArg(lo) - v.AddArg(v0) + v0.AddArg2(v1, lo) + v.AddArg2(x, v0) return true } return false @@ -2227,8 +2017,7 @@ func rewriteValuedec64_OpSignExt32to64(v *Value) bool { v.reset(OpInt64Make) v0 := b.NewValue0(v.Pos, OpSignmask, typ.Int32) v0.AddArg(x) - v.AddArg(v0) - v.AddArg(x) + v.AddArg2(v0, x) return true } } @@ -2273,14 +2062,10 @@ func rewriteValuedec64_OpStore(v *Value) bool { v0 := b.NewValue0(v.Pos, OpOffPtr, hi.Type.PtrTo()) v0.AuxInt = 4 v0.AddArg(dst) - v.AddArg(v0) - v.AddArg(hi) v1 := b.NewValue0(v.Pos, OpStore, types.TypeMem) v1.Aux = lo.Type - v1.AddArg(dst) - v1.AddArg(lo) - v1.AddArg(mem) - v.AddArg(v1) + v1.AddArg3(dst, lo, mem) + v.AddArg3(v0, hi, v1) return true } // match: (Store {t} dst (Int64Make hi lo) mem) @@ -2303,14 +2088,10 @@ func rewriteValuedec64_OpStore(v *Value) bool { v0 := b.NewValue0(v.Pos, OpOffPtr, lo.Type.PtrTo()) v0.AuxInt = 4 v0.AddArg(dst) - v.AddArg(v0) - v.AddArg(lo) v1 := b.NewValue0(v.Pos, OpStore, types.TypeMem) v1.Aux = hi.Type - v1.AddArg(dst) - v1.AddArg(hi) - v1.AddArg(mem) - v.AddArg(v1) + v1.AddArg3(dst, hi, mem) + v.AddArg3(v0, lo, v1) return true } return false @@ -2329,31 +2110,26 @@ func rewriteValuedec64_OpSub64(v *Value) bool { v0 := b.NewValue0(v.Pos, OpSub32withcarry, typ.Int32) v1 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32) v1.AddArg(x) - v0.AddArg(v1) v2 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32) v2.AddArg(y) - v0.AddArg(v2) v3 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) v4 := b.NewValue0(v.Pos, OpSub32carry, types.NewTuple(typ.UInt32, types.TypeFlags)) v5 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32) v5.AddArg(x) - v4.AddArg(v5) v6 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32) v6.AddArg(y) - v4.AddArg(v6) + v4.AddArg2(v5, v6) v3.AddArg(v4) - v0.AddArg(v3) - v.AddArg(v0) + v0.AddArg3(v1, v2, v3) v7 := b.NewValue0(v.Pos, OpSelect0, typ.UInt32) v8 := b.NewValue0(v.Pos, OpSub32carry, types.NewTuple(typ.UInt32, types.TypeFlags)) v9 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32) v9.AddArg(x) - v8.AddArg(v9) v10 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32) v10.AddArg(y) - v8.AddArg(v10) + v8.AddArg2(v9, v10) v7.AddArg(v8) - v.AddArg(v7) + v.AddArg2(v0, v7) return true } } @@ -2381,9 +2157,7 @@ func rewriteValuedec64_OpTrunc64to32(v *Value) bool { break } lo := v_0.Args[1] - v.reset(OpCopy) - v.Type = lo.Type - v.AddArg(lo) + v.copyOf(lo) return true } return false @@ -2417,19 +2191,16 @@ func rewriteValuedec64_OpXor64(v *Value) bool { v0 := b.NewValue0(v.Pos, OpXor32, typ.UInt32) v1 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32) v1.AddArg(x) - v0.AddArg(v1) v2 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32) v2.AddArg(y) - v0.AddArg(v2) - v.AddArg(v0) + v0.AddArg2(v1, v2) v3 := b.NewValue0(v.Pos, OpXor32, typ.UInt32) v4 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32) v4.AddArg(x) - v3.AddArg(v4) v5 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32) v5.AddArg(y) - v3.AddArg(v5) - v.AddArg(v3) + v3.AddArg2(v4, v5) + v.AddArg2(v0, v3) return true } } @@ -2459,8 +2230,7 @@ func rewriteValuedec64_OpZeroExt32to64(v *Value) bool { v.reset(OpInt64Make) v0 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) v0.AuxInt = 0 - v.AddArg(v0) - v.AddArg(x) + v.AddArg2(v0, x) return true } } diff --git a/src/cmd/compile/internal/ssa/rewritedecArgs.go b/src/cmd/compile/internal/ssa/rewritedecArgs.go index 9a9e522554..eec3acfcda 100644 --- a/src/cmd/compile/internal/ssa/rewritedecArgs.go +++ b/src/cmd/compile/internal/ssa/rewritedecArgs.go @@ -28,11 +28,10 @@ func rewriteValuedecArgs_OpArg(v *Value) bool { v0 := b.NewValue0(v.Pos, OpArg, typ.BytePtr) v0.AuxInt = off v0.Aux = n - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpArg, typ.Int) v1.AuxInt = off + config.PtrSize v1.Aux = n - v.AddArg(v1) + v.AddArg2(v0, v1) return true } // match: (Arg {n} [off]) @@ -48,15 +47,13 @@ func rewriteValuedecArgs_OpArg(v *Value) bool { v0 := b.NewValue0(v.Pos, OpArg, v.Type.Elem().PtrTo()) v0.AuxInt = off v0.Aux = n - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpArg, typ.Int) v1.AuxInt = off + config.PtrSize v1.Aux = n - v.AddArg(v1) v2 := b.NewValue0(v.Pos, OpArg, typ.Int) v2.AuxInt = off + 2*config.PtrSize v2.Aux = n - v.AddArg(v2) + v.AddArg3(v0, v1, v2) return true } // match: (Arg {n} [off]) @@ -72,11 +69,10 @@ func rewriteValuedecArgs_OpArg(v *Value) bool { v0 := b.NewValue0(v.Pos, OpArg, typ.Uintptr) v0.AuxInt = off v0.Aux = n - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpArg, typ.BytePtr) v1.AuxInt = off + config.PtrSize v1.Aux = n - v.AddArg(v1) + v.AddArg2(v0, v1) return true } // match: (Arg {n} [off]) @@ -92,11 +88,10 @@ func rewriteValuedecArgs_OpArg(v *Value) bool { v0 := b.NewValue0(v.Pos, OpArg, typ.Float64) v0.AuxInt = off v0.Aux = n - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpArg, typ.Float64) v1.AuxInt = off + 8 v1.Aux = n - v.AddArg(v1) + v.AddArg2(v0, v1) return true } // match: (Arg {n} [off]) @@ -112,11 +107,10 @@ func rewriteValuedecArgs_OpArg(v *Value) bool { v0 := b.NewValue0(v.Pos, OpArg, typ.Float32) v0.AuxInt = off v0.Aux = n - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpArg, typ.Float32) v1.AuxInt = off + 4 v1.Aux = n - v.AddArg(v1) + v.AddArg2(v0, v1) return true } // match: (Arg ) @@ -161,11 +155,10 @@ func rewriteValuedecArgs_OpArg(v *Value) bool { v0 := b.NewValue0(v.Pos, OpArg, t.FieldType(0)) v0.AuxInt = off + t.FieldOff(0) v0.Aux = n - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpArg, t.FieldType(1)) v1.AuxInt = off + t.FieldOff(1) v1.Aux = n - v.AddArg(v1) + v.AddArg2(v0, v1) return true } // match: (Arg {n} [off]) @@ -182,15 +175,13 @@ func rewriteValuedecArgs_OpArg(v *Value) bool { v0 := b.NewValue0(v.Pos, OpArg, t.FieldType(0)) v0.AuxInt = off + t.FieldOff(0) v0.Aux = n - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpArg, t.FieldType(1)) v1.AuxInt = off + t.FieldOff(1) v1.Aux = n - v.AddArg(v1) v2 := b.NewValue0(v.Pos, OpArg, t.FieldType(2)) v2.AuxInt = off + t.FieldOff(2) v2.Aux = n - v.AddArg(v2) + v.AddArg3(v0, v1, v2) return true } // match: (Arg {n} [off]) @@ -207,19 +198,16 @@ func rewriteValuedecArgs_OpArg(v *Value) bool { v0 := b.NewValue0(v.Pos, OpArg, t.FieldType(0)) v0.AuxInt = off + t.FieldOff(0) v0.Aux = n - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpArg, t.FieldType(1)) v1.AuxInt = off + t.FieldOff(1) v1.Aux = n - v.AddArg(v1) v2 := b.NewValue0(v.Pos, OpArg, t.FieldType(2)) v2.AuxInt = off + t.FieldOff(2) v2.Aux = n - v.AddArg(v2) v3 := b.NewValue0(v.Pos, OpArg, t.FieldType(3)) v3.AuxInt = off + t.FieldOff(3) v3.Aux = n - v.AddArg(v3) + v.AddArg4(v0, v1, v2, v3) return true } // match: (Arg ) diff --git a/src/cmd/compile/internal/ssa/rewritegeneric.go b/src/cmd/compile/internal/ssa/rewritegeneric.go index c711af249c..13873b2ac8 100644 --- a/src/cmd/compile/internal/ssa/rewritegeneric.go +++ b/src/cmd/compile/internal/ssa/rewritegeneric.go @@ -30,6 +30,8 @@ func rewriteValuegeneric(v *Value) bool { return rewriteValuegeneric_OpAnd64(v) case OpAnd8: return rewriteValuegeneric_OpAnd8(v) + case OpAndB: + return rewriteValuegeneric_OpAndB(v) case OpArraySelect: return rewriteValuegeneric_OpArraySelect(v) case OpCom16: @@ -68,6 +70,8 @@ func rewriteValuegeneric(v *Value) bool { return rewriteValuegeneric_OpCvt64to32F(v) case OpCvt64to64F: return rewriteValuegeneric_OpCvt64to64F(v) + case OpCvtBoolToUint8: + return rewriteValuegeneric_OpCvtBoolToUint8(v) case OpDiv16: return rewriteValuegeneric_OpDiv16(v) case OpDiv16u: @@ -276,6 +280,8 @@ func rewriteValuegeneric(v *Value) bool { return rewriteValuegeneric_OpOr64(v) case OpOr8: return rewriteValuegeneric_OpOr8(v) + case OpOrB: + return rewriteValuegeneric_OpOrB(v) case OpPhi: return rewriteValuegeneric_OpPhi(v) case OpPtrIndex: @@ -489,11 +495,9 @@ func rewriteValuegeneric_OpAdd16(v *Value) bool { } z := v_1_1 v.reset(OpMul16) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpAdd16, t) - v0.AddArg(y) - v0.AddArg(z) - v.AddArg(v0) + v0.AddArg2(y, z) + v.AddArg2(x, v0) return true } } @@ -508,9 +512,7 @@ func rewriteValuegeneric_OpAdd16(v *Value) bool { continue } x := v_1 - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } break @@ -552,11 +554,9 @@ func rewriteValuegeneric_OpAdd16(v *Value) bool { continue } v.reset(OpAdd16) - v.AddArg(i) v0 := b.NewValue0(v.Pos, OpAdd16, t) - v0.AddArg(z) - v0.AddArg(x) - v.AddArg(v0) + v0.AddArg2(z, x) + v.AddArg2(i, v0) return true } } @@ -581,11 +581,9 @@ func rewriteValuegeneric_OpAdd16(v *Value) bool { continue } v.reset(OpAdd16) - v.AddArg(i) v0 := b.NewValue0(v.Pos, OpSub16, t) - v0.AddArg(x) - v0.AddArg(z) - v.AddArg(v0) + v0.AddArg2(x, z) + v.AddArg2(i, v0) return true } break @@ -611,10 +609,8 @@ func rewriteValuegeneric_OpAdd16(v *Value) bool { } v.reset(OpSub16) v0 := b.NewValue0(v.Pos, OpAdd16, t) - v0.AddArg(x) - v0.AddArg(z) - v.AddArg(v0) - v.AddArg(i) + v0.AddArg2(x, z) + v.AddArg2(v0, i) return true } break @@ -643,8 +639,7 @@ func rewriteValuegeneric_OpAdd16(v *Value) bool { v.reset(OpAdd16) v0 := b.NewValue0(v.Pos, OpConst16, t) v0.AuxInt = int64(int16(c + d)) - v.AddArg(v0) - v.AddArg(x) + v.AddArg2(v0, x) return true } } @@ -671,8 +666,7 @@ func rewriteValuegeneric_OpAdd16(v *Value) bool { v.reset(OpSub16) v0 := b.NewValue0(v.Pos, OpConst16, t) v0.AuxInt = int64(int16(c + d)) - v.AddArg(v0) - v.AddArg(x) + v.AddArg2(v0, x) return true } break @@ -699,8 +693,7 @@ func rewriteValuegeneric_OpAdd16(v *Value) bool { v.reset(OpAdd16) v0 := b.NewValue0(v.Pos, OpConst16, t) v0.AuxInt = int64(int16(c - d)) - v.AddArg(v0) - v.AddArg(x) + v.AddArg2(v0, x) return true } break @@ -755,11 +748,9 @@ func rewriteValuegeneric_OpAdd32(v *Value) bool { } z := v_1_1 v.reset(OpMul32) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpAdd32, t) - v0.AddArg(y) - v0.AddArg(z) - v.AddArg(v0) + v0.AddArg2(y, z) + v.AddArg2(x, v0) return true } } @@ -774,9 +765,7 @@ func rewriteValuegeneric_OpAdd32(v *Value) bool { continue } x := v_1 - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } break @@ -818,11 +807,9 @@ func rewriteValuegeneric_OpAdd32(v *Value) bool { continue } v.reset(OpAdd32) - v.AddArg(i) v0 := b.NewValue0(v.Pos, OpAdd32, t) - v0.AddArg(z) - v0.AddArg(x) - v.AddArg(v0) + v0.AddArg2(z, x) + v.AddArg2(i, v0) return true } } @@ -847,11 +834,9 @@ func rewriteValuegeneric_OpAdd32(v *Value) bool { continue } v.reset(OpAdd32) - v.AddArg(i) v0 := b.NewValue0(v.Pos, OpSub32, t) - v0.AddArg(x) - v0.AddArg(z) - v.AddArg(v0) + v0.AddArg2(x, z) + v.AddArg2(i, v0) return true } break @@ -877,10 +862,8 @@ func rewriteValuegeneric_OpAdd32(v *Value) bool { } v.reset(OpSub32) v0 := b.NewValue0(v.Pos, OpAdd32, t) - v0.AddArg(x) - v0.AddArg(z) - v.AddArg(v0) - v.AddArg(i) + v0.AddArg2(x, z) + v.AddArg2(v0, i) return true } break @@ -909,8 +892,7 @@ func rewriteValuegeneric_OpAdd32(v *Value) bool { v.reset(OpAdd32) v0 := b.NewValue0(v.Pos, OpConst32, t) v0.AuxInt = int64(int32(c + d)) - v.AddArg(v0) - v.AddArg(x) + v.AddArg2(v0, x) return true } } @@ -937,8 +919,7 @@ func rewriteValuegeneric_OpAdd32(v *Value) bool { v.reset(OpSub32) v0 := b.NewValue0(v.Pos, OpConst32, t) v0.AuxInt = int64(int32(c + d)) - v.AddArg(v0) - v.AddArg(x) + v.AddArg2(v0, x) return true } break @@ -965,8 +946,7 @@ func rewriteValuegeneric_OpAdd32(v *Value) bool { v.reset(OpAdd32) v0 := b.NewValue0(v.Pos, OpConst32, t) v0.AuxInt = int64(int32(c - d)) - v.AddArg(v0) - v.AddArg(x) + v.AddArg2(v0, x) return true } break @@ -1044,11 +1024,9 @@ func rewriteValuegeneric_OpAdd64(v *Value) bool { } z := v_1_1 v.reset(OpMul64) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpAdd64, t) - v0.AddArg(y) - v0.AddArg(z) - v.AddArg(v0) + v0.AddArg2(y, z) + v.AddArg2(x, v0) return true } } @@ -1063,9 +1041,7 @@ func rewriteValuegeneric_OpAdd64(v *Value) bool { continue } x := v_1 - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } break @@ -1107,11 +1083,9 @@ func rewriteValuegeneric_OpAdd64(v *Value) bool { continue } v.reset(OpAdd64) - v.AddArg(i) v0 := b.NewValue0(v.Pos, OpAdd64, t) - v0.AddArg(z) - v0.AddArg(x) - v.AddArg(v0) + v0.AddArg2(z, x) + v.AddArg2(i, v0) return true } } @@ -1136,11 +1110,9 @@ func rewriteValuegeneric_OpAdd64(v *Value) bool { continue } v.reset(OpAdd64) - v.AddArg(i) v0 := b.NewValue0(v.Pos, OpSub64, t) - v0.AddArg(x) - v0.AddArg(z) - v.AddArg(v0) + v0.AddArg2(x, z) + v.AddArg2(i, v0) return true } break @@ -1166,10 +1138,8 @@ func rewriteValuegeneric_OpAdd64(v *Value) bool { } v.reset(OpSub64) v0 := b.NewValue0(v.Pos, OpAdd64, t) - v0.AddArg(x) - v0.AddArg(z) - v.AddArg(v0) - v.AddArg(i) + v0.AddArg2(x, z) + v.AddArg2(v0, i) return true } break @@ -1198,8 +1168,7 @@ func rewriteValuegeneric_OpAdd64(v *Value) bool { v.reset(OpAdd64) v0 := b.NewValue0(v.Pos, OpConst64, t) v0.AuxInt = c + d - v.AddArg(v0) - v.AddArg(x) + v.AddArg2(v0, x) return true } } @@ -1226,8 +1195,7 @@ func rewriteValuegeneric_OpAdd64(v *Value) bool { v.reset(OpSub64) v0 := b.NewValue0(v.Pos, OpConst64, t) v0.AuxInt = c + d - v.AddArg(v0) - v.AddArg(x) + v.AddArg2(v0, x) return true } break @@ -1254,8 +1222,7 @@ func rewriteValuegeneric_OpAdd64(v *Value) bool { v.reset(OpAdd64) v0 := b.NewValue0(v.Pos, OpConst64, t) v0.AuxInt = c - d - v.AddArg(v0) - v.AddArg(x) + v.AddArg2(v0, x) return true } break @@ -1333,11 +1300,9 @@ func rewriteValuegeneric_OpAdd8(v *Value) bool { } z := v_1_1 v.reset(OpMul8) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpAdd8, t) - v0.AddArg(y) - v0.AddArg(z) - v.AddArg(v0) + v0.AddArg2(y, z) + v.AddArg2(x, v0) return true } } @@ -1352,9 +1317,7 @@ func rewriteValuegeneric_OpAdd8(v *Value) bool { continue } x := v_1 - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } break @@ -1396,11 +1359,9 @@ func rewriteValuegeneric_OpAdd8(v *Value) bool { continue } v.reset(OpAdd8) - v.AddArg(i) v0 := b.NewValue0(v.Pos, OpAdd8, t) - v0.AddArg(z) - v0.AddArg(x) - v.AddArg(v0) + v0.AddArg2(z, x) + v.AddArg2(i, v0) return true } } @@ -1425,11 +1386,9 @@ func rewriteValuegeneric_OpAdd8(v *Value) bool { continue } v.reset(OpAdd8) - v.AddArg(i) v0 := b.NewValue0(v.Pos, OpSub8, t) - v0.AddArg(x) - v0.AddArg(z) - v.AddArg(v0) + v0.AddArg2(x, z) + v.AddArg2(i, v0) return true } break @@ -1455,10 +1414,8 @@ func rewriteValuegeneric_OpAdd8(v *Value) bool { } v.reset(OpSub8) v0 := b.NewValue0(v.Pos, OpAdd8, t) - v0.AddArg(x) - v0.AddArg(z) - v.AddArg(v0) - v.AddArg(i) + v0.AddArg2(x, z) + v.AddArg2(v0, i) return true } break @@ -1487,8 +1444,7 @@ func rewriteValuegeneric_OpAdd8(v *Value) bool { v.reset(OpAdd8) v0 := b.NewValue0(v.Pos, OpConst8, t) v0.AuxInt = int64(int8(c + d)) - v.AddArg(v0) - v.AddArg(x) + v.AddArg2(v0, x) return true } } @@ -1515,8 +1471,7 @@ func rewriteValuegeneric_OpAdd8(v *Value) bool { v.reset(OpSub8) v0 := b.NewValue0(v.Pos, OpConst8, t) v0.AuxInt = int64(int8(c + d)) - v.AddArg(v0) - v.AddArg(x) + v.AddArg2(v0, x) return true } break @@ -1543,8 +1498,7 @@ func rewriteValuegeneric_OpAdd8(v *Value) bool { v.reset(OpAdd8) v0 := b.NewValue0(v.Pos, OpConst8, t) v0.AuxInt = int64(int8(c - d)) - v.AddArg(v0) - v.AddArg(x) + v.AddArg2(v0, x) return true } break @@ -1669,9 +1623,7 @@ func rewriteValuegeneric_OpAnd16(v *Value) bool { if x != v_1 { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (And16 (Const16 [-1]) x) @@ -1682,9 +1634,7 @@ func rewriteValuegeneric_OpAnd16(v *Value) bool { continue } x := v_1 - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } break @@ -1719,8 +1669,7 @@ func rewriteValuegeneric_OpAnd16(v *Value) bool { } y := v_1_1 v.reset(OpAnd16) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } } @@ -1749,11 +1698,9 @@ func rewriteValuegeneric_OpAnd16(v *Value) bool { continue } v.reset(OpAnd16) - v.AddArg(i) v0 := b.NewValue0(v.Pos, OpAnd16, t) - v0.AddArg(z) - v0.AddArg(x) - v.AddArg(v0) + v0.AddArg2(z, x) + v.AddArg2(i, v0) return true } } @@ -1783,8 +1730,7 @@ func rewriteValuegeneric_OpAnd16(v *Value) bool { v.reset(OpAnd16) v0 := b.NewValue0(v.Pos, OpConst16, t) v0.AuxInt = int64(int16(c & d)) - v.AddArg(v0) - v.AddArg(x) + v.AddArg2(v0, x) return true } } @@ -1875,9 +1821,7 @@ func rewriteValuegeneric_OpAnd32(v *Value) bool { if x != v_1 { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (And32 (Const32 [-1]) x) @@ -1888,9 +1832,7 @@ func rewriteValuegeneric_OpAnd32(v *Value) bool { continue } x := v_1 - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } break @@ -1925,8 +1867,7 @@ func rewriteValuegeneric_OpAnd32(v *Value) bool { } y := v_1_1 v.reset(OpAnd32) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } } @@ -1955,11 +1896,9 @@ func rewriteValuegeneric_OpAnd32(v *Value) bool { continue } v.reset(OpAnd32) - v.AddArg(i) v0 := b.NewValue0(v.Pos, OpAnd32, t) - v0.AddArg(z) - v0.AddArg(x) - v.AddArg(v0) + v0.AddArg2(z, x) + v.AddArg2(i, v0) return true } } @@ -1989,8 +1928,7 @@ func rewriteValuegeneric_OpAnd32(v *Value) bool { v.reset(OpAnd32) v0 := b.NewValue0(v.Pos, OpConst32, t) v0.AuxInt = int64(int32(c & d)) - v.AddArg(v0) - v.AddArg(x) + v.AddArg2(v0, x) return true } } @@ -2081,9 +2019,7 @@ func rewriteValuegeneric_OpAnd64(v *Value) bool { if x != v_1 { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (And64 (Const64 [-1]) x) @@ -2094,9 +2030,7 @@ func rewriteValuegeneric_OpAnd64(v *Value) bool { continue } x := v_1 - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } break @@ -2131,8 +2065,7 @@ func rewriteValuegeneric_OpAnd64(v *Value) bool { } y := v_1_1 v.reset(OpAnd64) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } } @@ -2161,11 +2094,9 @@ func rewriteValuegeneric_OpAnd64(v *Value) bool { continue } v.reset(OpAnd64) - v.AddArg(i) v0 := b.NewValue0(v.Pos, OpAnd64, t) - v0.AddArg(z) - v0.AddArg(x) - v.AddArg(v0) + v0.AddArg2(z, x) + v.AddArg2(i, v0) return true } } @@ -2195,8 +2126,7 @@ func rewriteValuegeneric_OpAnd64(v *Value) bool { v.reset(OpAnd64) v0 := b.NewValue0(v.Pos, OpConst64, t) v0.AuxInt = c & d - v.AddArg(v0) - v.AddArg(x) + v.AddArg2(v0, x) return true } } @@ -2287,9 +2217,7 @@ func rewriteValuegeneric_OpAnd8(v *Value) bool { if x != v_1 { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (And8 (Const8 [-1]) x) @@ -2300,9 +2228,7 @@ func rewriteValuegeneric_OpAnd8(v *Value) bool { continue } x := v_1 - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } break @@ -2337,8 +2263,7 @@ func rewriteValuegeneric_OpAnd8(v *Value) bool { } y := v_1_1 v.reset(OpAnd8) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } } @@ -2367,11 +2292,9 @@ func rewriteValuegeneric_OpAnd8(v *Value) bool { continue } v.reset(OpAnd8) - v.AddArg(i) v0 := b.NewValue0(v.Pos, OpAnd8, t) - v0.AddArg(z) - v0.AddArg(x) - v.AddArg(v0) + v0.AddArg2(z, x) + v.AddArg2(i, v0) return true } } @@ -2401,8 +2324,7 @@ func rewriteValuegeneric_OpAnd8(v *Value) bool { v.reset(OpAnd8) v0 := b.NewValue0(v.Pos, OpConst8, t) v0.AuxInt = int64(int8(c & d)) - v.AddArg(v0) - v.AddArg(x) + v.AddArg2(v0, x) return true } } @@ -2410,6 +2332,1324 @@ func rewriteValuegeneric_OpAnd8(v *Value) bool { } return false } +func rewriteValuegeneric_OpAndB(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (AndB (Leq64 (Const64 [c]) x) (Less64 x (Const64 [d]))) + // cond: d >= c + // result: (Less64U (Sub64 x (Const64 [c])) (Const64 [d-c])) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpLeq64 { + continue + } + x := v_0.Args[1] + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpConst64 { + continue + } + c := v_0_0.AuxInt + if v_1.Op != OpLess64 { + continue + } + _ = v_1.Args[1] + if x != v_1.Args[0] { + continue + } + v_1_1 := v_1.Args[1] + if v_1_1.Op != OpConst64 { + continue + } + d := v_1_1.AuxInt + if !(d >= c) { + continue + } + v.reset(OpLess64U) + v0 := b.NewValue0(v.Pos, OpSub64, x.Type) + v1 := b.NewValue0(v.Pos, OpConst64, x.Type) + v1.AuxInt = c + v0.AddArg2(x, v1) + v2 := b.NewValue0(v.Pos, OpConst64, x.Type) + v2.AuxInt = d - c + v.AddArg2(v0, v2) + return true + } + break + } + // match: (AndB (Leq64 (Const64 [c]) x) (Leq64 x (Const64 [d]))) + // cond: d >= c + // result: (Leq64U (Sub64 x (Const64 [c])) (Const64 [d-c])) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpLeq64 { + continue + } + x := v_0.Args[1] + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpConst64 { + continue + } + c := v_0_0.AuxInt + if v_1.Op != OpLeq64 { + continue + } + _ = v_1.Args[1] + if x != v_1.Args[0] { + continue + } + v_1_1 := v_1.Args[1] + if v_1_1.Op != OpConst64 { + continue + } + d := v_1_1.AuxInt + if !(d >= c) { + continue + } + v.reset(OpLeq64U) + v0 := b.NewValue0(v.Pos, OpSub64, x.Type) + v1 := b.NewValue0(v.Pos, OpConst64, x.Type) + v1.AuxInt = c + v0.AddArg2(x, v1) + v2 := b.NewValue0(v.Pos, OpConst64, x.Type) + v2.AuxInt = d - c + v.AddArg2(v0, v2) + return true + } + break + } + // match: (AndB (Leq32 (Const32 [c]) x) (Less32 x (Const32 [d]))) + // cond: d >= c + // result: (Less32U (Sub32 x (Const32 [c])) (Const32 [d-c])) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpLeq32 { + continue + } + x := v_0.Args[1] + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpConst32 { + continue + } + c := v_0_0.AuxInt + if v_1.Op != OpLess32 { + continue + } + _ = v_1.Args[1] + if x != v_1.Args[0] { + continue + } + v_1_1 := v_1.Args[1] + if v_1_1.Op != OpConst32 { + continue + } + d := v_1_1.AuxInt + if !(d >= c) { + continue + } + v.reset(OpLess32U) + v0 := b.NewValue0(v.Pos, OpSub32, x.Type) + v1 := b.NewValue0(v.Pos, OpConst32, x.Type) + v1.AuxInt = c + v0.AddArg2(x, v1) + v2 := b.NewValue0(v.Pos, OpConst32, x.Type) + v2.AuxInt = d - c + v.AddArg2(v0, v2) + return true + } + break + } + // match: (AndB (Leq32 (Const32 [c]) x) (Leq32 x (Const32 [d]))) + // cond: d >= c + // result: (Leq32U (Sub32 x (Const32 [c])) (Const32 [d-c])) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpLeq32 { + continue + } + x := v_0.Args[1] + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpConst32 { + continue + } + c := v_0_0.AuxInt + if v_1.Op != OpLeq32 { + continue + } + _ = v_1.Args[1] + if x != v_1.Args[0] { + continue + } + v_1_1 := v_1.Args[1] + if v_1_1.Op != OpConst32 { + continue + } + d := v_1_1.AuxInt + if !(d >= c) { + continue + } + v.reset(OpLeq32U) + v0 := b.NewValue0(v.Pos, OpSub32, x.Type) + v1 := b.NewValue0(v.Pos, OpConst32, x.Type) + v1.AuxInt = c + v0.AddArg2(x, v1) + v2 := b.NewValue0(v.Pos, OpConst32, x.Type) + v2.AuxInt = d - c + v.AddArg2(v0, v2) + return true + } + break + } + // match: (AndB (Leq16 (Const16 [c]) x) (Less16 x (Const16 [d]))) + // cond: d >= c + // result: (Less16U (Sub16 x (Const16 [c])) (Const16 [d-c])) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpLeq16 { + continue + } + x := v_0.Args[1] + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpConst16 { + continue + } + c := v_0_0.AuxInt + if v_1.Op != OpLess16 { + continue + } + _ = v_1.Args[1] + if x != v_1.Args[0] { + continue + } + v_1_1 := v_1.Args[1] + if v_1_1.Op != OpConst16 { + continue + } + d := v_1_1.AuxInt + if !(d >= c) { + continue + } + v.reset(OpLess16U) + v0 := b.NewValue0(v.Pos, OpSub16, x.Type) + v1 := b.NewValue0(v.Pos, OpConst16, x.Type) + v1.AuxInt = c + v0.AddArg2(x, v1) + v2 := b.NewValue0(v.Pos, OpConst16, x.Type) + v2.AuxInt = d - c + v.AddArg2(v0, v2) + return true + } + break + } + // match: (AndB (Leq16 (Const16 [c]) x) (Leq16 x (Const16 [d]))) + // cond: d >= c + // result: (Leq16U (Sub16 x (Const16 [c])) (Const16 [d-c])) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpLeq16 { + continue + } + x := v_0.Args[1] + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpConst16 { + continue + } + c := v_0_0.AuxInt + if v_1.Op != OpLeq16 { + continue + } + _ = v_1.Args[1] + if x != v_1.Args[0] { + continue + } + v_1_1 := v_1.Args[1] + if v_1_1.Op != OpConst16 { + continue + } + d := v_1_1.AuxInt + if !(d >= c) { + continue + } + v.reset(OpLeq16U) + v0 := b.NewValue0(v.Pos, OpSub16, x.Type) + v1 := b.NewValue0(v.Pos, OpConst16, x.Type) + v1.AuxInt = c + v0.AddArg2(x, v1) + v2 := b.NewValue0(v.Pos, OpConst16, x.Type) + v2.AuxInt = d - c + v.AddArg2(v0, v2) + return true + } + break + } + // match: (AndB (Leq8 (Const8 [c]) x) (Less8 x (Const8 [d]))) + // cond: d >= c + // result: (Less8U (Sub8 x (Const8 [c])) (Const8 [d-c])) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpLeq8 { + continue + } + x := v_0.Args[1] + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpConst8 { + continue + } + c := v_0_0.AuxInt + if v_1.Op != OpLess8 { + continue + } + _ = v_1.Args[1] + if x != v_1.Args[0] { + continue + } + v_1_1 := v_1.Args[1] + if v_1_1.Op != OpConst8 { + continue + } + d := v_1_1.AuxInt + if !(d >= c) { + continue + } + v.reset(OpLess8U) + v0 := b.NewValue0(v.Pos, OpSub8, x.Type) + v1 := b.NewValue0(v.Pos, OpConst8, x.Type) + v1.AuxInt = c + v0.AddArg2(x, v1) + v2 := b.NewValue0(v.Pos, OpConst8, x.Type) + v2.AuxInt = d - c + v.AddArg2(v0, v2) + return true + } + break + } + // match: (AndB (Leq8 (Const8 [c]) x) (Leq8 x (Const8 [d]))) + // cond: d >= c + // result: (Leq8U (Sub8 x (Const8 [c])) (Const8 [d-c])) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpLeq8 { + continue + } + x := v_0.Args[1] + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpConst8 { + continue + } + c := v_0_0.AuxInt + if v_1.Op != OpLeq8 { + continue + } + _ = v_1.Args[1] + if x != v_1.Args[0] { + continue + } + v_1_1 := v_1.Args[1] + if v_1_1.Op != OpConst8 { + continue + } + d := v_1_1.AuxInt + if !(d >= c) { + continue + } + v.reset(OpLeq8U) + v0 := b.NewValue0(v.Pos, OpSub8, x.Type) + v1 := b.NewValue0(v.Pos, OpConst8, x.Type) + v1.AuxInt = c + v0.AddArg2(x, v1) + v2 := b.NewValue0(v.Pos, OpConst8, x.Type) + v2.AuxInt = d - c + v.AddArg2(v0, v2) + return true + } + break + } + // match: (AndB (Less64 (Const64 [c]) x) (Less64 x (Const64 [d]))) + // cond: d >= c+1 && int64(c+1) > int64(c) + // result: (Less64U (Sub64 x (Const64 [c+1])) (Const64 [d-c-1])) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpLess64 { + continue + } + x := v_0.Args[1] + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpConst64 { + continue + } + c := v_0_0.AuxInt + if v_1.Op != OpLess64 { + continue + } + _ = v_1.Args[1] + if x != v_1.Args[0] { + continue + } + v_1_1 := v_1.Args[1] + if v_1_1.Op != OpConst64 { + continue + } + d := v_1_1.AuxInt + if !(d >= c+1 && int64(c+1) > int64(c)) { + continue + } + v.reset(OpLess64U) + v0 := b.NewValue0(v.Pos, OpSub64, x.Type) + v1 := b.NewValue0(v.Pos, OpConst64, x.Type) + v1.AuxInt = c + 1 + v0.AddArg2(x, v1) + v2 := b.NewValue0(v.Pos, OpConst64, x.Type) + v2.AuxInt = d - c - 1 + v.AddArg2(v0, v2) + return true + } + break + } + // match: (AndB (Less64 (Const64 [c]) x) (Leq64 x (Const64 [d]))) + // cond: d >= c+1 && int64(c+1) > int64(c) + // result: (Leq64U (Sub64 x (Const64 [c+1])) (Const64 [d-c-1])) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpLess64 { + continue + } + x := v_0.Args[1] + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpConst64 { + continue + } + c := v_0_0.AuxInt + if v_1.Op != OpLeq64 { + continue + } + _ = v_1.Args[1] + if x != v_1.Args[0] { + continue + } + v_1_1 := v_1.Args[1] + if v_1_1.Op != OpConst64 { + continue + } + d := v_1_1.AuxInt + if !(d >= c+1 && int64(c+1) > int64(c)) { + continue + } + v.reset(OpLeq64U) + v0 := b.NewValue0(v.Pos, OpSub64, x.Type) + v1 := b.NewValue0(v.Pos, OpConst64, x.Type) + v1.AuxInt = c + 1 + v0.AddArg2(x, v1) + v2 := b.NewValue0(v.Pos, OpConst64, x.Type) + v2.AuxInt = d - c - 1 + v.AddArg2(v0, v2) + return true + } + break + } + // match: (AndB (Less32 (Const32 [c]) x) (Less32 x (Const32 [d]))) + // cond: d >= c+1 && int32(c+1) > int32(c) + // result: (Less32U (Sub32 x (Const32 [c+1])) (Const32 [d-c-1])) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpLess32 { + continue + } + x := v_0.Args[1] + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpConst32 { + continue + } + c := v_0_0.AuxInt + if v_1.Op != OpLess32 { + continue + } + _ = v_1.Args[1] + if x != v_1.Args[0] { + continue + } + v_1_1 := v_1.Args[1] + if v_1_1.Op != OpConst32 { + continue + } + d := v_1_1.AuxInt + if !(d >= c+1 && int32(c+1) > int32(c)) { + continue + } + v.reset(OpLess32U) + v0 := b.NewValue0(v.Pos, OpSub32, x.Type) + v1 := b.NewValue0(v.Pos, OpConst32, x.Type) + v1.AuxInt = c + 1 + v0.AddArg2(x, v1) + v2 := b.NewValue0(v.Pos, OpConst32, x.Type) + v2.AuxInt = d - c - 1 + v.AddArg2(v0, v2) + return true + } + break + } + // match: (AndB (Less32 (Const32 [c]) x) (Leq32 x (Const32 [d]))) + // cond: d >= c+1 && int32(c+1) > int32(c) + // result: (Leq32U (Sub32 x (Const32 [c+1])) (Const32 [d-c-1])) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpLess32 { + continue + } + x := v_0.Args[1] + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpConst32 { + continue + } + c := v_0_0.AuxInt + if v_1.Op != OpLeq32 { + continue + } + _ = v_1.Args[1] + if x != v_1.Args[0] { + continue + } + v_1_1 := v_1.Args[1] + if v_1_1.Op != OpConst32 { + continue + } + d := v_1_1.AuxInt + if !(d >= c+1 && int32(c+1) > int32(c)) { + continue + } + v.reset(OpLeq32U) + v0 := b.NewValue0(v.Pos, OpSub32, x.Type) + v1 := b.NewValue0(v.Pos, OpConst32, x.Type) + v1.AuxInt = c + 1 + v0.AddArg2(x, v1) + v2 := b.NewValue0(v.Pos, OpConst32, x.Type) + v2.AuxInt = d - c - 1 + v.AddArg2(v0, v2) + return true + } + break + } + // match: (AndB (Less16 (Const16 [c]) x) (Less16 x (Const16 [d]))) + // cond: d >= c+1 && int16(c+1) > int16(c) + // result: (Less16U (Sub16 x (Const16 [c+1])) (Const16 [d-c-1])) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpLess16 { + continue + } + x := v_0.Args[1] + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpConst16 { + continue + } + c := v_0_0.AuxInt + if v_1.Op != OpLess16 { + continue + } + _ = v_1.Args[1] + if x != v_1.Args[0] { + continue + } + v_1_1 := v_1.Args[1] + if v_1_1.Op != OpConst16 { + continue + } + d := v_1_1.AuxInt + if !(d >= c+1 && int16(c+1) > int16(c)) { + continue + } + v.reset(OpLess16U) + v0 := b.NewValue0(v.Pos, OpSub16, x.Type) + v1 := b.NewValue0(v.Pos, OpConst16, x.Type) + v1.AuxInt = c + 1 + v0.AddArg2(x, v1) + v2 := b.NewValue0(v.Pos, OpConst16, x.Type) + v2.AuxInt = d - c - 1 + v.AddArg2(v0, v2) + return true + } + break + } + // match: (AndB (Less16 (Const16 [c]) x) (Leq16 x (Const16 [d]))) + // cond: d >= c+1 && int16(c+1) > int16(c) + // result: (Leq16U (Sub16 x (Const16 [c+1])) (Const16 [d-c-1])) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpLess16 { + continue + } + x := v_0.Args[1] + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpConst16 { + continue + } + c := v_0_0.AuxInt + if v_1.Op != OpLeq16 { + continue + } + _ = v_1.Args[1] + if x != v_1.Args[0] { + continue + } + v_1_1 := v_1.Args[1] + if v_1_1.Op != OpConst16 { + continue + } + d := v_1_1.AuxInt + if !(d >= c+1 && int16(c+1) > int16(c)) { + continue + } + v.reset(OpLeq16U) + v0 := b.NewValue0(v.Pos, OpSub16, x.Type) + v1 := b.NewValue0(v.Pos, OpConst16, x.Type) + v1.AuxInt = c + 1 + v0.AddArg2(x, v1) + v2 := b.NewValue0(v.Pos, OpConst16, x.Type) + v2.AuxInt = d - c - 1 + v.AddArg2(v0, v2) + return true + } + break + } + // match: (AndB (Less8 (Const8 [c]) x) (Less8 x (Const8 [d]))) + // cond: d >= c+1 && int8(c+1) > int8(c) + // result: (Less8U (Sub8 x (Const8 [c+1])) (Const8 [d-c-1])) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpLess8 { + continue + } + x := v_0.Args[1] + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpConst8 { + continue + } + c := v_0_0.AuxInt + if v_1.Op != OpLess8 { + continue + } + _ = v_1.Args[1] + if x != v_1.Args[0] { + continue + } + v_1_1 := v_1.Args[1] + if v_1_1.Op != OpConst8 { + continue + } + d := v_1_1.AuxInt + if !(d >= c+1 && int8(c+1) > int8(c)) { + continue + } + v.reset(OpLess8U) + v0 := b.NewValue0(v.Pos, OpSub8, x.Type) + v1 := b.NewValue0(v.Pos, OpConst8, x.Type) + v1.AuxInt = c + 1 + v0.AddArg2(x, v1) + v2 := b.NewValue0(v.Pos, OpConst8, x.Type) + v2.AuxInt = d - c - 1 + v.AddArg2(v0, v2) + return true + } + break + } + // match: (AndB (Less8 (Const8 [c]) x) (Leq8 x (Const8 [d]))) + // cond: d >= c+1 && int8(c+1) > int8(c) + // result: (Leq8U (Sub8 x (Const8 [c+1])) (Const8 [d-c-1])) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpLess8 { + continue + } + x := v_0.Args[1] + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpConst8 { + continue + } + c := v_0_0.AuxInt + if v_1.Op != OpLeq8 { + continue + } + _ = v_1.Args[1] + if x != v_1.Args[0] { + continue + } + v_1_1 := v_1.Args[1] + if v_1_1.Op != OpConst8 { + continue + } + d := v_1_1.AuxInt + if !(d >= c+1 && int8(c+1) > int8(c)) { + continue + } + v.reset(OpLeq8U) + v0 := b.NewValue0(v.Pos, OpSub8, x.Type) + v1 := b.NewValue0(v.Pos, OpConst8, x.Type) + v1.AuxInt = c + 1 + v0.AddArg2(x, v1) + v2 := b.NewValue0(v.Pos, OpConst8, x.Type) + v2.AuxInt = d - c - 1 + v.AddArg2(v0, v2) + return true + } + break + } + // match: (AndB (Leq64U (Const64 [c]) x) (Less64U x (Const64 [d]))) + // cond: uint64(d) >= uint64(c) + // result: (Less64U (Sub64 x (Const64 [c])) (Const64 [d-c])) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpLeq64U { + continue + } + x := v_0.Args[1] + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpConst64 { + continue + } + c := v_0_0.AuxInt + if v_1.Op != OpLess64U { + continue + } + _ = v_1.Args[1] + if x != v_1.Args[0] { + continue + } + v_1_1 := v_1.Args[1] + if v_1_1.Op != OpConst64 { + continue + } + d := v_1_1.AuxInt + if !(uint64(d) >= uint64(c)) { + continue + } + v.reset(OpLess64U) + v0 := b.NewValue0(v.Pos, OpSub64, x.Type) + v1 := b.NewValue0(v.Pos, OpConst64, x.Type) + v1.AuxInt = c + v0.AddArg2(x, v1) + v2 := b.NewValue0(v.Pos, OpConst64, x.Type) + v2.AuxInt = d - c + v.AddArg2(v0, v2) + return true + } + break + } + // match: (AndB (Leq64U (Const64 [c]) x) (Leq64U x (Const64 [d]))) + // cond: uint64(d) >= uint64(c) + // result: (Leq64U (Sub64 x (Const64 [c])) (Const64 [d-c])) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpLeq64U { + continue + } + x := v_0.Args[1] + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpConst64 { + continue + } + c := v_0_0.AuxInt + if v_1.Op != OpLeq64U { + continue + } + _ = v_1.Args[1] + if x != v_1.Args[0] { + continue + } + v_1_1 := v_1.Args[1] + if v_1_1.Op != OpConst64 { + continue + } + d := v_1_1.AuxInt + if !(uint64(d) >= uint64(c)) { + continue + } + v.reset(OpLeq64U) + v0 := b.NewValue0(v.Pos, OpSub64, x.Type) + v1 := b.NewValue0(v.Pos, OpConst64, x.Type) + v1.AuxInt = c + v0.AddArg2(x, v1) + v2 := b.NewValue0(v.Pos, OpConst64, x.Type) + v2.AuxInt = d - c + v.AddArg2(v0, v2) + return true + } + break + } + // match: (AndB (Leq32U (Const32 [c]) x) (Less32U x (Const32 [d]))) + // cond: uint32(d) >= uint32(c) + // result: (Less32U (Sub32 x (Const32 [c])) (Const32 [int64(int32(d-c))])) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpLeq32U { + continue + } + x := v_0.Args[1] + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpConst32 { + continue + } + c := v_0_0.AuxInt + if v_1.Op != OpLess32U { + continue + } + _ = v_1.Args[1] + if x != v_1.Args[0] { + continue + } + v_1_1 := v_1.Args[1] + if v_1_1.Op != OpConst32 { + continue + } + d := v_1_1.AuxInt + if !(uint32(d) >= uint32(c)) { + continue + } + v.reset(OpLess32U) + v0 := b.NewValue0(v.Pos, OpSub32, x.Type) + v1 := b.NewValue0(v.Pos, OpConst32, x.Type) + v1.AuxInt = c + v0.AddArg2(x, v1) + v2 := b.NewValue0(v.Pos, OpConst32, x.Type) + v2.AuxInt = int64(int32(d - c)) + v.AddArg2(v0, v2) + return true + } + break + } + // match: (AndB (Leq32U (Const32 [c]) x) (Leq32U x (Const32 [d]))) + // cond: uint32(d) >= uint32(c) + // result: (Leq32U (Sub32 x (Const32 [c])) (Const32 [int64(int32(d-c))])) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpLeq32U { + continue + } + x := v_0.Args[1] + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpConst32 { + continue + } + c := v_0_0.AuxInt + if v_1.Op != OpLeq32U { + continue + } + _ = v_1.Args[1] + if x != v_1.Args[0] { + continue + } + v_1_1 := v_1.Args[1] + if v_1_1.Op != OpConst32 { + continue + } + d := v_1_1.AuxInt + if !(uint32(d) >= uint32(c)) { + continue + } + v.reset(OpLeq32U) + v0 := b.NewValue0(v.Pos, OpSub32, x.Type) + v1 := b.NewValue0(v.Pos, OpConst32, x.Type) + v1.AuxInt = c + v0.AddArg2(x, v1) + v2 := b.NewValue0(v.Pos, OpConst32, x.Type) + v2.AuxInt = int64(int32(d - c)) + v.AddArg2(v0, v2) + return true + } + break + } + // match: (AndB (Leq16U (Const16 [c]) x) (Less16U x (Const16 [d]))) + // cond: uint16(d) >= uint16(c) + // result: (Less16U (Sub16 x (Const16 [c])) (Const16 [int64(int16(d-c))])) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpLeq16U { + continue + } + x := v_0.Args[1] + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpConst16 { + continue + } + c := v_0_0.AuxInt + if v_1.Op != OpLess16U { + continue + } + _ = v_1.Args[1] + if x != v_1.Args[0] { + continue + } + v_1_1 := v_1.Args[1] + if v_1_1.Op != OpConst16 { + continue + } + d := v_1_1.AuxInt + if !(uint16(d) >= uint16(c)) { + continue + } + v.reset(OpLess16U) + v0 := b.NewValue0(v.Pos, OpSub16, x.Type) + v1 := b.NewValue0(v.Pos, OpConst16, x.Type) + v1.AuxInt = c + v0.AddArg2(x, v1) + v2 := b.NewValue0(v.Pos, OpConst16, x.Type) + v2.AuxInt = int64(int16(d - c)) + v.AddArg2(v0, v2) + return true + } + break + } + // match: (AndB (Leq16U (Const16 [c]) x) (Leq16U x (Const16 [d]))) + // cond: uint16(d) >= uint16(c) + // result: (Leq16U (Sub16 x (Const16 [c])) (Const16 [int64(int16(d-c))])) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpLeq16U { + continue + } + x := v_0.Args[1] + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpConst16 { + continue + } + c := v_0_0.AuxInt + if v_1.Op != OpLeq16U { + continue + } + _ = v_1.Args[1] + if x != v_1.Args[0] { + continue + } + v_1_1 := v_1.Args[1] + if v_1_1.Op != OpConst16 { + continue + } + d := v_1_1.AuxInt + if !(uint16(d) >= uint16(c)) { + continue + } + v.reset(OpLeq16U) + v0 := b.NewValue0(v.Pos, OpSub16, x.Type) + v1 := b.NewValue0(v.Pos, OpConst16, x.Type) + v1.AuxInt = c + v0.AddArg2(x, v1) + v2 := b.NewValue0(v.Pos, OpConst16, x.Type) + v2.AuxInt = int64(int16(d - c)) + v.AddArg2(v0, v2) + return true + } + break + } + // match: (AndB (Leq8U (Const8 [c]) x) (Less8U x (Const8 [d]))) + // cond: uint8(d) >= uint8(c) + // result: (Less8U (Sub8 x (Const8 [c])) (Const8 [int64(int8(d-c))])) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpLeq8U { + continue + } + x := v_0.Args[1] + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpConst8 { + continue + } + c := v_0_0.AuxInt + if v_1.Op != OpLess8U { + continue + } + _ = v_1.Args[1] + if x != v_1.Args[0] { + continue + } + v_1_1 := v_1.Args[1] + if v_1_1.Op != OpConst8 { + continue + } + d := v_1_1.AuxInt + if !(uint8(d) >= uint8(c)) { + continue + } + v.reset(OpLess8U) + v0 := b.NewValue0(v.Pos, OpSub8, x.Type) + v1 := b.NewValue0(v.Pos, OpConst8, x.Type) + v1.AuxInt = c + v0.AddArg2(x, v1) + v2 := b.NewValue0(v.Pos, OpConst8, x.Type) + v2.AuxInt = int64(int8(d - c)) + v.AddArg2(v0, v2) + return true + } + break + } + // match: (AndB (Leq8U (Const8 [c]) x) (Leq8U x (Const8 [d]))) + // cond: uint8(d) >= uint8(c) + // result: (Leq8U (Sub8 x (Const8 [c])) (Const8 [int64(int8(d-c))])) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpLeq8U { + continue + } + x := v_0.Args[1] + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpConst8 { + continue + } + c := v_0_0.AuxInt + if v_1.Op != OpLeq8U { + continue + } + _ = v_1.Args[1] + if x != v_1.Args[0] { + continue + } + v_1_1 := v_1.Args[1] + if v_1_1.Op != OpConst8 { + continue + } + d := v_1_1.AuxInt + if !(uint8(d) >= uint8(c)) { + continue + } + v.reset(OpLeq8U) + v0 := b.NewValue0(v.Pos, OpSub8, x.Type) + v1 := b.NewValue0(v.Pos, OpConst8, x.Type) + v1.AuxInt = c + v0.AddArg2(x, v1) + v2 := b.NewValue0(v.Pos, OpConst8, x.Type) + v2.AuxInt = int64(int8(d - c)) + v.AddArg2(v0, v2) + return true + } + break + } + // match: (AndB (Less64U (Const64 [c]) x) (Less64U x (Const64 [d]))) + // cond: uint64(d) >= uint64(c+1) && uint64(c+1) > uint64(c) + // result: (Less64U (Sub64 x (Const64 [c+1])) (Const64 [d-c-1])) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpLess64U { + continue + } + x := v_0.Args[1] + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpConst64 { + continue + } + c := v_0_0.AuxInt + if v_1.Op != OpLess64U { + continue + } + _ = v_1.Args[1] + if x != v_1.Args[0] { + continue + } + v_1_1 := v_1.Args[1] + if v_1_1.Op != OpConst64 { + continue + } + d := v_1_1.AuxInt + if !(uint64(d) >= uint64(c+1) && uint64(c+1) > uint64(c)) { + continue + } + v.reset(OpLess64U) + v0 := b.NewValue0(v.Pos, OpSub64, x.Type) + v1 := b.NewValue0(v.Pos, OpConst64, x.Type) + v1.AuxInt = c + 1 + v0.AddArg2(x, v1) + v2 := b.NewValue0(v.Pos, OpConst64, x.Type) + v2.AuxInt = d - c - 1 + v.AddArg2(v0, v2) + return true + } + break + } + // match: (AndB (Less64U (Const64 [c]) x) (Leq64U x (Const64 [d]))) + // cond: uint64(d) >= uint64(c+1) && uint64(c+1) > uint64(c) + // result: (Leq64U (Sub64 x (Const64 [c+1])) (Const64 [d-c-1])) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpLess64U { + continue + } + x := v_0.Args[1] + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpConst64 { + continue + } + c := v_0_0.AuxInt + if v_1.Op != OpLeq64U { + continue + } + _ = v_1.Args[1] + if x != v_1.Args[0] { + continue + } + v_1_1 := v_1.Args[1] + if v_1_1.Op != OpConst64 { + continue + } + d := v_1_1.AuxInt + if !(uint64(d) >= uint64(c+1) && uint64(c+1) > uint64(c)) { + continue + } + v.reset(OpLeq64U) + v0 := b.NewValue0(v.Pos, OpSub64, x.Type) + v1 := b.NewValue0(v.Pos, OpConst64, x.Type) + v1.AuxInt = c + 1 + v0.AddArg2(x, v1) + v2 := b.NewValue0(v.Pos, OpConst64, x.Type) + v2.AuxInt = d - c - 1 + v.AddArg2(v0, v2) + return true + } + break + } + // match: (AndB (Less32U (Const32 [c]) x) (Less32U x (Const32 [d]))) + // cond: uint32(d) >= uint32(c+1) && uint32(c+1) > uint32(c) + // result: (Less32U (Sub32 x (Const32 [int64(int32(c+1))])) (Const32 [int64(int32(d-c-1))])) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpLess32U { + continue + } + x := v_0.Args[1] + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpConst32 { + continue + } + c := v_0_0.AuxInt + if v_1.Op != OpLess32U { + continue + } + _ = v_1.Args[1] + if x != v_1.Args[0] { + continue + } + v_1_1 := v_1.Args[1] + if v_1_1.Op != OpConst32 { + continue + } + d := v_1_1.AuxInt + if !(uint32(d) >= uint32(c+1) && uint32(c+1) > uint32(c)) { + continue + } + v.reset(OpLess32U) + v0 := b.NewValue0(v.Pos, OpSub32, x.Type) + v1 := b.NewValue0(v.Pos, OpConst32, x.Type) + v1.AuxInt = int64(int32(c + 1)) + v0.AddArg2(x, v1) + v2 := b.NewValue0(v.Pos, OpConst32, x.Type) + v2.AuxInt = int64(int32(d - c - 1)) + v.AddArg2(v0, v2) + return true + } + break + } + // match: (AndB (Less32U (Const32 [c]) x) (Leq32U x (Const32 [d]))) + // cond: uint32(d) >= uint32(c+1) && uint32(c+1) > uint32(c) + // result: (Leq32U (Sub32 x (Const32 [int64(int32(c+1))])) (Const32 [int64(int32(d-c-1))])) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpLess32U { + continue + } + x := v_0.Args[1] + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpConst32 { + continue + } + c := v_0_0.AuxInt + if v_1.Op != OpLeq32U { + continue + } + _ = v_1.Args[1] + if x != v_1.Args[0] { + continue + } + v_1_1 := v_1.Args[1] + if v_1_1.Op != OpConst32 { + continue + } + d := v_1_1.AuxInt + if !(uint32(d) >= uint32(c+1) && uint32(c+1) > uint32(c)) { + continue + } + v.reset(OpLeq32U) + v0 := b.NewValue0(v.Pos, OpSub32, x.Type) + v1 := b.NewValue0(v.Pos, OpConst32, x.Type) + v1.AuxInt = int64(int32(c + 1)) + v0.AddArg2(x, v1) + v2 := b.NewValue0(v.Pos, OpConst32, x.Type) + v2.AuxInt = int64(int32(d - c - 1)) + v.AddArg2(v0, v2) + return true + } + break + } + // match: (AndB (Less16U (Const16 [c]) x) (Less16U x (Const16 [d]))) + // cond: uint16(d) >= uint16(c+1) && uint16(c+1) > uint16(c) + // result: (Less16U (Sub16 x (Const16 [int64(int16(c+1))])) (Const16 [int64(int16(d-c-1))])) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpLess16U { + continue + } + x := v_0.Args[1] + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpConst16 { + continue + } + c := v_0_0.AuxInt + if v_1.Op != OpLess16U { + continue + } + _ = v_1.Args[1] + if x != v_1.Args[0] { + continue + } + v_1_1 := v_1.Args[1] + if v_1_1.Op != OpConst16 { + continue + } + d := v_1_1.AuxInt + if !(uint16(d) >= uint16(c+1) && uint16(c+1) > uint16(c)) { + continue + } + v.reset(OpLess16U) + v0 := b.NewValue0(v.Pos, OpSub16, x.Type) + v1 := b.NewValue0(v.Pos, OpConst16, x.Type) + v1.AuxInt = int64(int16(c + 1)) + v0.AddArg2(x, v1) + v2 := b.NewValue0(v.Pos, OpConst16, x.Type) + v2.AuxInt = int64(int16(d - c - 1)) + v.AddArg2(v0, v2) + return true + } + break + } + // match: (AndB (Less16U (Const16 [c]) x) (Leq16U x (Const16 [d]))) + // cond: uint16(d) >= uint16(c+1) && uint16(c+1) > uint16(c) + // result: (Leq16U (Sub16 x (Const16 [int64(int16(c+1))])) (Const16 [int64(int16(d-c-1))])) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpLess16U { + continue + } + x := v_0.Args[1] + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpConst16 { + continue + } + c := v_0_0.AuxInt + if v_1.Op != OpLeq16U { + continue + } + _ = v_1.Args[1] + if x != v_1.Args[0] { + continue + } + v_1_1 := v_1.Args[1] + if v_1_1.Op != OpConst16 { + continue + } + d := v_1_1.AuxInt + if !(uint16(d) >= uint16(c+1) && uint16(c+1) > uint16(c)) { + continue + } + v.reset(OpLeq16U) + v0 := b.NewValue0(v.Pos, OpSub16, x.Type) + v1 := b.NewValue0(v.Pos, OpConst16, x.Type) + v1.AuxInt = int64(int16(c + 1)) + v0.AddArg2(x, v1) + v2 := b.NewValue0(v.Pos, OpConst16, x.Type) + v2.AuxInt = int64(int16(d - c - 1)) + v.AddArg2(v0, v2) + return true + } + break + } + // match: (AndB (Less8U (Const8 [c]) x) (Less8U x (Const8 [d]))) + // cond: uint8(d) >= uint8(c+1) && uint8(c+1) > uint8(c) + // result: (Less8U (Sub8 x (Const8 [int64(int8(c+1))])) (Const8 [int64(int8(d-c-1))])) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpLess8U { + continue + } + x := v_0.Args[1] + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpConst8 { + continue + } + c := v_0_0.AuxInt + if v_1.Op != OpLess8U { + continue + } + _ = v_1.Args[1] + if x != v_1.Args[0] { + continue + } + v_1_1 := v_1.Args[1] + if v_1_1.Op != OpConst8 { + continue + } + d := v_1_1.AuxInt + if !(uint8(d) >= uint8(c+1) && uint8(c+1) > uint8(c)) { + continue + } + v.reset(OpLess8U) + v0 := b.NewValue0(v.Pos, OpSub8, x.Type) + v1 := b.NewValue0(v.Pos, OpConst8, x.Type) + v1.AuxInt = int64(int8(c + 1)) + v0.AddArg2(x, v1) + v2 := b.NewValue0(v.Pos, OpConst8, x.Type) + v2.AuxInt = int64(int8(d - c - 1)) + v.AddArg2(v0, v2) + return true + } + break + } + // match: (AndB (Less8U (Const8 [c]) x) (Leq8U x (Const8 [d]))) + // cond: uint8(d) >= uint8(c+1) && uint8(c+1) > uint8(c) + // result: (Leq8U (Sub8 x (Const8 [int64(int8(c+1))])) (Const8 [int64(int8(d-c-1))])) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpLess8U { + continue + } + x := v_0.Args[1] + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpConst8 { + continue + } + c := v_0_0.AuxInt + if v_1.Op != OpLeq8U { + continue + } + _ = v_1.Args[1] + if x != v_1.Args[0] { + continue + } + v_1_1 := v_1.Args[1] + if v_1_1.Op != OpConst8 { + continue + } + d := v_1_1.AuxInt + if !(uint8(d) >= uint8(c+1) && uint8(c+1) > uint8(c)) { + continue + } + v.reset(OpLeq8U) + v0 := b.NewValue0(v.Pos, OpSub8, x.Type) + v1 := b.NewValue0(v.Pos, OpConst8, x.Type) + v1.AuxInt = int64(int8(c + 1)) + v0.AddArg2(x, v1) + v2 := b.NewValue0(v.Pos, OpConst8, x.Type) + v2.AuxInt = int64(int8(d - c - 1)) + v.AddArg2(v0, v2) + return true + } + break + } + return false +} func rewriteValuegeneric_OpArraySelect(v *Value) bool { v_0 := v.Args[0] // match: (ArraySelect (ArrayMake1 x)) @@ -2419,9 +3659,7 @@ func rewriteValuegeneric_OpArraySelect(v *Value) bool { break } x := v_0.Args[0] - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (ArraySelect [0] (IData x)) @@ -2446,9 +3684,7 @@ func rewriteValuegeneric_OpCom16(v *Value) bool { break } x := v_0.Args[0] - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (Com16 (Const16 [c])) @@ -2493,9 +3729,7 @@ func rewriteValuegeneric_OpCom32(v *Value) bool { break } x := v_0.Args[0] - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (Com32 (Const32 [c])) @@ -2540,9 +3774,7 @@ func rewriteValuegeneric_OpCom64(v *Value) bool { break } x := v_0.Args[0] - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (Com64 (Const64 [c])) @@ -2587,9 +3819,7 @@ func rewriteValuegeneric_OpCom8(v *Value) bool { break } x := v_0.Args[0] - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (Com8 (Const8 [c])) @@ -2633,9 +3863,8 @@ func rewriteValuegeneric_OpConstInterface(v *Value) bool { for { v.reset(OpIMake) v0 := b.NewValue0(v.Pos, OpConstNil, typ.Uintptr) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpConstNil, typ.BytePtr) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } } @@ -2652,13 +3881,11 @@ func rewriteValuegeneric_OpConstSlice(v *Value) bool { } v.reset(OpSliceMake) v0 := b.NewValue0(v.Pos, OpConstNil, v.Type.Elem().PtrTo()) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpConst32, typ.Int) v1.AuxInt = 0 - v.AddArg(v1) v2 := b.NewValue0(v.Pos, OpConst32, typ.Int) v2.AuxInt = 0 - v.AddArg(v2) + v.AddArg3(v0, v1, v2) return true } // match: (ConstSlice) @@ -2670,13 +3897,11 @@ func rewriteValuegeneric_OpConstSlice(v *Value) bool { } v.reset(OpSliceMake) v0 := b.NewValue0(v.Pos, OpConstNil, v.Type.Elem().PtrTo()) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpConst64, typ.Int) v1.AuxInt = 0 - v.AddArg(v1) v2 := b.NewValue0(v.Pos, OpConst64, typ.Int) v2.AuxInt = 0 - v.AddArg(v2) + v.AddArg3(v0, v1, v2) return true } return false @@ -2696,10 +3921,9 @@ func rewriteValuegeneric_OpConstString(v *Value) bool { } v.reset(OpStringMake) v0 := b.NewValue0(v.Pos, OpConstNil, typ.BytePtr) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpConst32, typ.Int) v1.AuxInt = 0 - v.AddArg(v1) + v.AddArg2(v0, v1) return true } // match: (ConstString {s}) @@ -2712,10 +3936,9 @@ func rewriteValuegeneric_OpConstString(v *Value) bool { } v.reset(OpStringMake) v0 := b.NewValue0(v.Pos, OpConstNil, typ.BytePtr) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpConst64, typ.Int) v1.AuxInt = 0 - v.AddArg(v1) + v.AddArg2(v0, v1) return true } // match: (ConstString {s}) @@ -2731,10 +3954,9 @@ func rewriteValuegeneric_OpConstString(v *Value) bool { v0.Aux = fe.StringData(s.(string)) v1 := b.NewValue0(v.Pos, OpSB, typ.Uintptr) v0.AddArg(v1) - v.AddArg(v0) v2 := b.NewValue0(v.Pos, OpConst32, typ.Int) v2.AuxInt = int64(len(s.(string))) - v.AddArg(v2) + v.AddArg2(v0, v2) return true } // match: (ConstString {s}) @@ -2750,10 +3972,9 @@ func rewriteValuegeneric_OpConstString(v *Value) bool { v0.Aux = fe.StringData(s.(string)) v1 := b.NewValue0(v.Pos, OpSB, typ.Uintptr) v0.AddArg(v1) - v.AddArg(v0) v2 := b.NewValue0(v.Pos, OpConst64, typ.Int) v2.AuxInt = int64(len(s.(string))) - v.AddArg(v2) + v.AddArg2(v0, v2) return true } return false @@ -2781,8 +4002,7 @@ func rewriteValuegeneric_OpConvert(v *Value) bool { continue } v.reset(OpAdd64) - v.AddArg(ptr) - v.AddArg(off) + v.AddArg2(ptr, off) return true } break @@ -2807,8 +4027,7 @@ func rewriteValuegeneric_OpConvert(v *Value) bool { continue } v.reset(OpAdd32) - v.AddArg(ptr) - v.AddArg(off) + v.AddArg2(ptr, off) return true } break @@ -2824,9 +4043,7 @@ func rewriteValuegeneric_OpConvert(v *Value) bool { if mem != v_1 { break } - v.reset(OpCopy) - v.Type = ptr.Type - v.AddArg(ptr) + v.copyOf(ptr) return true } return false @@ -2981,6 +4198,21 @@ func rewriteValuegeneric_OpCvt64to64F(v *Value) bool { } return false } +func rewriteValuegeneric_OpCvtBoolToUint8(v *Value) bool { + v_0 := v.Args[0] + // match: (CvtBoolToUint8 (ConstBool [c])) + // result: (Const8 [c]) + for { + if v_0.Op != OpConstBool { + break + } + c := v_0.AuxInt + v.reset(OpConst8) + v.AuxInt = c + return true + } + return false +} func rewriteValuegeneric_OpDiv16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] @@ -3018,10 +4250,9 @@ func rewriteValuegeneric_OpDiv16(v *Value) bool { break } v.reset(OpRsh16Ux64) - v.AddArg(n) v0 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) v0.AuxInt = log2(c & 0xffff) - v.AddArg(v0) + v.AddArg2(n, v0) return true } // match: (Div16 n (Const16 [c])) @@ -3039,10 +4270,9 @@ func rewriteValuegeneric_OpDiv16(v *Value) bool { } v.reset(OpNeg16) v0 := b.NewValue0(v.Pos, OpDiv16, t) - v0.AddArg(n) v1 := b.NewValue0(v.Pos, OpConst16, t) v1.AuxInt = -c - v0.AddArg(v1) + v0.AddArg2(n, v1) v.AddArg(v0) return true } @@ -3056,14 +4286,12 @@ func rewriteValuegeneric_OpDiv16(v *Value) bool { } v.reset(OpRsh16Ux64) v0 := b.NewValue0(v.Pos, OpAnd16, t) - v0.AddArg(x) v1 := b.NewValue0(v.Pos, OpNeg16, t) v1.AddArg(x) - v0.AddArg(v1) - v.AddArg(v0) + v0.AddArg2(x, v1) v2 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) v2.AuxInt = 15 - v.AddArg(v2) + v.AddArg2(v0, v2) return true } // match: (Div16 n (Const16 [c])) @@ -3081,22 +4309,18 @@ func rewriteValuegeneric_OpDiv16(v *Value) bool { } v.reset(OpRsh16x64) v0 := b.NewValue0(v.Pos, OpAdd16, t) - v0.AddArg(n) v1 := b.NewValue0(v.Pos, OpRsh16Ux64, t) v2 := b.NewValue0(v.Pos, OpRsh16x64, t) - v2.AddArg(n) v3 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) v3.AuxInt = 15 - v2.AddArg(v3) - v1.AddArg(v2) + v2.AddArg2(n, v3) v4 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) v4.AuxInt = 16 - log2(c) - v1.AddArg(v4) - v0.AddArg(v1) - v.AddArg(v0) + v1.AddArg2(v2, v4) + v0.AddArg2(n, v1) v5 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) v5.AuxInt = log2(c) - v.AddArg(v5) + v.AddArg2(v0, v5) return true } // match: (Div16 x (Const16 [c])) @@ -3118,23 +4342,19 @@ func rewriteValuegeneric_OpDiv16(v *Value) bool { v1 := b.NewValue0(v.Pos, OpMul32, typ.UInt32) v2 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) v2.AuxInt = int64(smagic(16, c).m) - v1.AddArg(v2) v3 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32) v3.AddArg(x) - v1.AddArg(v3) - v0.AddArg(v1) + v1.AddArg2(v2, v3) v4 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) v4.AuxInt = 16 + smagic(16, c).s - v0.AddArg(v4) - v.AddArg(v0) + v0.AddArg2(v1, v4) v5 := b.NewValue0(v.Pos, OpRsh32x64, t) v6 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32) v6.AddArg(x) - v5.AddArg(v6) v7 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) v7.AuxInt = 31 - v5.AddArg(v7) - v.AddArg(v5) + v5.AddArg2(v6, v7) + v.AddArg2(v0, v5) return true } return false @@ -3177,10 +4397,9 @@ func rewriteValuegeneric_OpDiv16u(v *Value) bool { break } v.reset(OpRsh16Ux64) - v.AddArg(n) v0 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) v0.AuxInt = log2(c & 0xffff) - v.AddArg(v0) + v.AddArg2(n, v0) return true } // match: (Div16u x (Const16 [c])) @@ -3200,14 +4419,12 @@ func rewriteValuegeneric_OpDiv16u(v *Value) bool { v1 := b.NewValue0(v.Pos, OpMul64, typ.UInt64) v2 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) v2.AuxInt = int64(1<<16 + umagic(16, c).m) - v1.AddArg(v2) v3 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) v3.AddArg(x) - v1.AddArg(v3) - v0.AddArg(v1) + v1.AddArg2(v2, v3) v4 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) v4.AuxInt = 16 + umagic(16, c).s - v0.AddArg(v4) + v0.AddArg2(v1, v4) v.AddArg(v0) return true } @@ -3228,14 +4445,12 @@ func rewriteValuegeneric_OpDiv16u(v *Value) bool { v1 := b.NewValue0(v.Pos, OpMul32, typ.UInt32) v2 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) v2.AuxInt = int64(1<<15 + umagic(16, c).m/2) - v1.AddArg(v2) v3 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) v3.AddArg(x) - v1.AddArg(v3) - v0.AddArg(v1) + v1.AddArg2(v2, v3) v4 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) v4.AuxInt = 16 + umagic(16, c).s - 1 - v0.AddArg(v4) + v0.AddArg2(v1, v4) v.AddArg(v0) return true } @@ -3256,19 +4471,16 @@ func rewriteValuegeneric_OpDiv16u(v *Value) bool { v1 := b.NewValue0(v.Pos, OpMul32, typ.UInt32) v2 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) v2.AuxInt = int64(1<<15 + (umagic(16, c).m+1)/2) - v1.AddArg(v2) v3 := b.NewValue0(v.Pos, OpRsh32Ux64, typ.UInt32) v4 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) v4.AddArg(x) - v3.AddArg(v4) v5 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) v5.AuxInt = 1 - v3.AddArg(v5) - v1.AddArg(v3) - v0.AddArg(v1) + v3.AddArg2(v4, v5) + v1.AddArg2(v2, v3) v6 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) v6.AuxInt = 16 + umagic(16, c).s - 2 - v0.AddArg(v6) + v0.AddArg2(v1, v6) v.AddArg(v0) return true } @@ -3290,23 +4502,19 @@ func rewriteValuegeneric_OpDiv16u(v *Value) bool { v2 := b.NewValue0(v.Pos, OpLsh32x64, typ.UInt32) v3 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) v3.AddArg(x) - v2.AddArg(v3) v4 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) v4.AuxInt = 16 - v2.AddArg(v4) - v1.AddArg(v2) + v2.AddArg2(v3, v4) v5 := b.NewValue0(v.Pos, OpMul32, typ.UInt32) v6 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) v6.AuxInt = int64(umagic(16, c).m) - v5.AddArg(v6) v7 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) v7.AddArg(x) - v5.AddArg(v7) - v1.AddArg(v5) - v0.AddArg(v1) + v5.AddArg2(v6, v7) + v1.AddArg2(v2, v5) v8 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) v8.AuxInt = 16 + umagic(16, c).s - 1 - v0.AddArg(v8) + v0.AddArg2(v1, v8) v.AddArg(v0) return true } @@ -3350,10 +4558,9 @@ func rewriteValuegeneric_OpDiv32(v *Value) bool { break } v.reset(OpRsh32Ux64) - v.AddArg(n) v0 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) v0.AuxInt = log2(c & 0xffffffff) - v.AddArg(v0) + v.AddArg2(n, v0) return true } // match: (Div32 n (Const32 [c])) @@ -3371,10 +4578,9 @@ func rewriteValuegeneric_OpDiv32(v *Value) bool { } v.reset(OpNeg32) v0 := b.NewValue0(v.Pos, OpDiv32, t) - v0.AddArg(n) v1 := b.NewValue0(v.Pos, OpConst32, t) v1.AuxInt = -c - v0.AddArg(v1) + v0.AddArg2(n, v1) v.AddArg(v0) return true } @@ -3388,14 +4594,12 @@ func rewriteValuegeneric_OpDiv32(v *Value) bool { } v.reset(OpRsh32Ux64) v0 := b.NewValue0(v.Pos, OpAnd32, t) - v0.AddArg(x) v1 := b.NewValue0(v.Pos, OpNeg32, t) v1.AddArg(x) - v0.AddArg(v1) - v.AddArg(v0) + v0.AddArg2(x, v1) v2 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) v2.AuxInt = 31 - v.AddArg(v2) + v.AddArg2(v0, v2) return true } // match: (Div32 n (Const32 [c])) @@ -3413,22 +4617,18 @@ func rewriteValuegeneric_OpDiv32(v *Value) bool { } v.reset(OpRsh32x64) v0 := b.NewValue0(v.Pos, OpAdd32, t) - v0.AddArg(n) v1 := b.NewValue0(v.Pos, OpRsh32Ux64, t) v2 := b.NewValue0(v.Pos, OpRsh32x64, t) - v2.AddArg(n) v3 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) v3.AuxInt = 31 - v2.AddArg(v3) - v1.AddArg(v2) + v2.AddArg2(n, v3) v4 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) v4.AuxInt = 32 - log2(c) - v1.AddArg(v4) - v0.AddArg(v1) - v.AddArg(v0) + v1.AddArg2(v2, v4) + v0.AddArg2(n, v1) v5 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) v5.AuxInt = log2(c) - v.AddArg(v5) + v.AddArg2(v0, v5) return true } // match: (Div32 x (Const32 [c])) @@ -3450,23 +4650,19 @@ func rewriteValuegeneric_OpDiv32(v *Value) bool { v1 := b.NewValue0(v.Pos, OpMul64, typ.UInt64) v2 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) v2.AuxInt = int64(smagic(32, c).m) - v1.AddArg(v2) v3 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64) v3.AddArg(x) - v1.AddArg(v3) - v0.AddArg(v1) + v1.AddArg2(v2, v3) v4 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) v4.AuxInt = 32 + smagic(32, c).s - v0.AddArg(v4) - v.AddArg(v0) + v0.AddArg2(v1, v4) v5 := b.NewValue0(v.Pos, OpRsh64x64, t) v6 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64) v6.AddArg(x) - v5.AddArg(v6) v7 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) v7.AuxInt = 63 - v5.AddArg(v7) - v.AddArg(v5) + v5.AddArg2(v6, v7) + v.AddArg2(v0, v5) return true } // match: (Div32 x (Const32 [c])) @@ -3488,19 +4684,15 @@ func rewriteValuegeneric_OpDiv32(v *Value) bool { v1 := b.NewValue0(v.Pos, OpHmul32, t) v2 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) v2.AuxInt = int64(int32(smagic(32, c).m / 2)) - v1.AddArg(v2) - v1.AddArg(x) - v0.AddArg(v1) + v1.AddArg2(v2, x) v3 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) v3.AuxInt = smagic(32, c).s - 1 - v0.AddArg(v3) - v.AddArg(v0) + v0.AddArg2(v1, v3) v4 := b.NewValue0(v.Pos, OpRsh32x64, t) - v4.AddArg(x) v5 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) v5.AuxInt = 31 - v4.AddArg(v5) - v.AddArg(v4) + v4.AddArg2(x, v5) + v.AddArg2(v0, v4) return true } // match: (Div32 x (Const32 [c])) @@ -3523,21 +4715,16 @@ func rewriteValuegeneric_OpDiv32(v *Value) bool { v2 := b.NewValue0(v.Pos, OpHmul32, t) v3 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) v3.AuxInt = int64(int32(smagic(32, c).m)) - v2.AddArg(v3) - v2.AddArg(x) - v1.AddArg(v2) - v1.AddArg(x) - v0.AddArg(v1) + v2.AddArg2(v3, x) + v1.AddArg2(v2, x) v4 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) v4.AuxInt = smagic(32, c).s - v0.AddArg(v4) - v.AddArg(v0) + v0.AddArg2(v1, v4) v5 := b.NewValue0(v.Pos, OpRsh32x64, t) - v5.AddArg(x) v6 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) v6.AuxInt = 31 - v5.AddArg(v6) - v.AddArg(v5) + v5.AddArg2(x, v6) + v.AddArg2(v0, v5) return true } return false @@ -3547,6 +4734,7 @@ func rewriteValuegeneric_OpDiv32F(v *Value) bool { v_0 := v.Args[0] b := v.Block // match: (Div32F (Const32F [c]) (Const32F [d])) + // cond: !math.IsNaN(float64(auxTo32F(c) / auxTo32F(d))) // result: (Const32F [auxFrom32F(auxTo32F(c) / auxTo32F(d))]) for { if v_0.Op != OpConst32F { @@ -3557,6 +4745,9 @@ func rewriteValuegeneric_OpDiv32F(v *Value) bool { break } d := v_1.AuxInt + if !(!math.IsNaN(float64(auxTo32F(c) / auxTo32F(d)))) { + break + } v.reset(OpConst32F) v.AuxInt = auxFrom32F(auxTo32F(c) / auxTo32F(d)) return true @@ -3575,10 +4766,9 @@ func rewriteValuegeneric_OpDiv32F(v *Value) bool { break } v.reset(OpMul32F) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpConst32F, t) v0.AuxInt = auxFrom32F(1 / auxTo32F(c)) - v.AddArg(v0) + v.AddArg2(x, v0) return true } return false @@ -3621,10 +4811,9 @@ func rewriteValuegeneric_OpDiv32u(v *Value) bool { break } v.reset(OpRsh32Ux64) - v.AddArg(n) v0 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) v0.AuxInt = log2(c & 0xffffffff) - v.AddArg(v0) + v.AddArg2(n, v0) return true } // match: (Div32u x (Const32 [c])) @@ -3644,12 +4833,10 @@ func rewriteValuegeneric_OpDiv32u(v *Value) bool { v0 := b.NewValue0(v.Pos, OpHmul32u, typ.UInt32) v1 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) v1.AuxInt = int64(int32(1<<31 + umagic(32, c).m/2)) - v0.AddArg(v1) - v0.AddArg(x) - v.AddArg(v0) + v0.AddArg2(v1, x) v2 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) v2.AuxInt = umagic(32, c).s - 1 - v.AddArg(v2) + v.AddArg2(v0, v2) return true } // match: (Div32u x (Const32 [c])) @@ -3669,17 +4856,14 @@ func rewriteValuegeneric_OpDiv32u(v *Value) bool { v0 := b.NewValue0(v.Pos, OpHmul32u, typ.UInt32) v1 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) v1.AuxInt = int64(int32(1<<31 + (umagic(32, c).m+1)/2)) - v0.AddArg(v1) v2 := b.NewValue0(v.Pos, OpRsh32Ux64, typ.UInt32) - v2.AddArg(x) v3 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) v3.AuxInt = 1 - v2.AddArg(v3) - v0.AddArg(v2) - v.AddArg(v0) + v2.AddArg2(x, v3) + v0.AddArg2(v1, v2) v4 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) v4.AuxInt = umagic(32, c).s - 2 - v.AddArg(v4) + v.AddArg2(v0, v4) return true } // match: (Div32u x (Const32 [c])) @@ -3697,17 +4881,14 @@ func rewriteValuegeneric_OpDiv32u(v *Value) bool { v.reset(OpRsh32Ux64) v.Type = typ.UInt32 v0 := b.NewValue0(v.Pos, OpAvg32u, typ.UInt32) - v0.AddArg(x) v1 := b.NewValue0(v.Pos, OpHmul32u, typ.UInt32) v2 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) v2.AuxInt = int64(int32(umagic(32, c).m)) - v1.AddArg(v2) - v1.AddArg(x) - v0.AddArg(v1) - v.AddArg(v0) + v1.AddArg2(v2, x) + v0.AddArg2(x, v1) v3 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) v3.AuxInt = umagic(32, c).s - 1 - v.AddArg(v3) + v.AddArg2(v0, v3) return true } // match: (Div32u x (Const32 [c])) @@ -3727,14 +4908,12 @@ func rewriteValuegeneric_OpDiv32u(v *Value) bool { v1 := b.NewValue0(v.Pos, OpMul64, typ.UInt64) v2 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) v2.AuxInt = int64(1<<31 + umagic(32, c).m/2) - v1.AddArg(v2) v3 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) v3.AddArg(x) - v1.AddArg(v3) - v0.AddArg(v1) + v1.AddArg2(v2, v3) v4 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) v4.AuxInt = 32 + umagic(32, c).s - 1 - v0.AddArg(v4) + v0.AddArg2(v1, v4) v.AddArg(v0) return true } @@ -3755,19 +4934,16 @@ func rewriteValuegeneric_OpDiv32u(v *Value) bool { v1 := b.NewValue0(v.Pos, OpMul64, typ.UInt64) v2 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) v2.AuxInt = int64(1<<31 + (umagic(32, c).m+1)/2) - v1.AddArg(v2) v3 := b.NewValue0(v.Pos, OpRsh64Ux64, typ.UInt64) v4 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) v4.AddArg(x) - v3.AddArg(v4) v5 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) v5.AuxInt = 1 - v3.AddArg(v5) - v1.AddArg(v3) - v0.AddArg(v1) + v3.AddArg2(v4, v5) + v1.AddArg2(v2, v3) v6 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) v6.AuxInt = 32 + umagic(32, c).s - 2 - v0.AddArg(v6) + v0.AddArg2(v1, v6) v.AddArg(v0) return true } @@ -3789,23 +4965,19 @@ func rewriteValuegeneric_OpDiv32u(v *Value) bool { v2 := b.NewValue0(v.Pos, OpLsh64x64, typ.UInt64) v3 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) v3.AddArg(x) - v2.AddArg(v3) v4 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) v4.AuxInt = 32 - v2.AddArg(v4) - v1.AddArg(v2) + v2.AddArg2(v3, v4) v5 := b.NewValue0(v.Pos, OpMul64, typ.UInt64) v6 := b.NewValue0(v.Pos, OpConst64, typ.UInt32) v6.AuxInt = int64(umagic(32, c).m) - v5.AddArg(v6) v7 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) v7.AddArg(x) - v5.AddArg(v7) - v1.AddArg(v5) - v0.AddArg(v1) + v5.AddArg2(v6, v7) + v1.AddArg2(v2, v5) v8 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) v8.AuxInt = 32 + umagic(32, c).s - 1 - v0.AddArg(v8) + v0.AddArg2(v1, v8) v.AddArg(v0) return true } @@ -3849,10 +5021,9 @@ func rewriteValuegeneric_OpDiv64(v *Value) bool { break } v.reset(OpRsh64Ux64) - v.AddArg(n) v0 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) v0.AuxInt = log2(c) - v.AddArg(v0) + v.AddArg2(n, v0) return true } // match: (Div64 n (Const64 [-1<<63])) @@ -3882,10 +5053,9 @@ func rewriteValuegeneric_OpDiv64(v *Value) bool { } v.reset(OpNeg64) v0 := b.NewValue0(v.Pos, OpDiv64, t) - v0.AddArg(n) v1 := b.NewValue0(v.Pos, OpConst64, t) v1.AuxInt = -c - v0.AddArg(v1) + v0.AddArg2(n, v1) v.AddArg(v0) return true } @@ -3899,14 +5069,12 @@ func rewriteValuegeneric_OpDiv64(v *Value) bool { } v.reset(OpRsh64Ux64) v0 := b.NewValue0(v.Pos, OpAnd64, t) - v0.AddArg(x) v1 := b.NewValue0(v.Pos, OpNeg64, t) v1.AddArg(x) - v0.AddArg(v1) - v.AddArg(v0) + v0.AddArg2(x, v1) v2 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) v2.AuxInt = 63 - v.AddArg(v2) + v.AddArg2(v0, v2) return true } // match: (Div64 n (Const64 [c])) @@ -3924,22 +5092,18 @@ func rewriteValuegeneric_OpDiv64(v *Value) bool { } v.reset(OpRsh64x64) v0 := b.NewValue0(v.Pos, OpAdd64, t) - v0.AddArg(n) v1 := b.NewValue0(v.Pos, OpRsh64Ux64, t) v2 := b.NewValue0(v.Pos, OpRsh64x64, t) - v2.AddArg(n) v3 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) v3.AuxInt = 63 - v2.AddArg(v3) - v1.AddArg(v2) + v2.AddArg2(n, v3) v4 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) v4.AuxInt = 64 - log2(c) - v1.AddArg(v4) - v0.AddArg(v1) - v.AddArg(v0) + v1.AddArg2(v2, v4) + v0.AddArg2(n, v1) v5 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) v5.AuxInt = log2(c) - v.AddArg(v5) + v.AddArg2(v0, v5) return true } // match: (Div64 x (Const64 [c])) @@ -3961,19 +5125,15 @@ func rewriteValuegeneric_OpDiv64(v *Value) bool { v1 := b.NewValue0(v.Pos, OpHmul64, t) v2 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) v2.AuxInt = int64(smagic(64, c).m / 2) - v1.AddArg(v2) - v1.AddArg(x) - v0.AddArg(v1) + v1.AddArg2(v2, x) v3 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) v3.AuxInt = smagic(64, c).s - 1 - v0.AddArg(v3) - v.AddArg(v0) + v0.AddArg2(v1, v3) v4 := b.NewValue0(v.Pos, OpRsh64x64, t) - v4.AddArg(x) v5 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) v5.AuxInt = 63 - v4.AddArg(v5) - v.AddArg(v4) + v4.AddArg2(x, v5) + v.AddArg2(v0, v4) return true } // match: (Div64 x (Const64 [c])) @@ -3996,21 +5156,16 @@ func rewriteValuegeneric_OpDiv64(v *Value) bool { v2 := b.NewValue0(v.Pos, OpHmul64, t) v3 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) v3.AuxInt = int64(smagic(64, c).m) - v2.AddArg(v3) - v2.AddArg(x) - v1.AddArg(v2) - v1.AddArg(x) - v0.AddArg(v1) + v2.AddArg2(v3, x) + v1.AddArg2(v2, x) v4 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) v4.AuxInt = smagic(64, c).s - v0.AddArg(v4) - v.AddArg(v0) + v0.AddArg2(v1, v4) v5 := b.NewValue0(v.Pos, OpRsh64x64, t) - v5.AddArg(x) v6 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) v6.AuxInt = 63 - v5.AddArg(v6) - v.AddArg(v5) + v5.AddArg2(x, v6) + v.AddArg2(v0, v5) return true } return false @@ -4020,6 +5175,7 @@ func rewriteValuegeneric_OpDiv64F(v *Value) bool { v_0 := v.Args[0] b := v.Block // match: (Div64F (Const64F [c]) (Const64F [d])) + // cond: !math.IsNaN(auxTo64F(c) / auxTo64F(d)) // result: (Const64F [auxFrom64F(auxTo64F(c) / auxTo64F(d))]) for { if v_0.Op != OpConst64F { @@ -4030,6 +5186,9 @@ func rewriteValuegeneric_OpDiv64F(v *Value) bool { break } d := v_1.AuxInt + if !(!math.IsNaN(auxTo64F(c) / auxTo64F(d))) { + break + } v.reset(OpConst64F) v.AuxInt = auxFrom64F(auxTo64F(c) / auxTo64F(d)) return true @@ -4048,10 +5207,9 @@ func rewriteValuegeneric_OpDiv64F(v *Value) bool { break } v.reset(OpMul64F) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpConst64F, t) v0.AuxInt = auxFrom64F(1 / auxTo64F(c)) - v.AddArg(v0) + v.AddArg2(x, v0) return true } return false @@ -4094,10 +5252,9 @@ func rewriteValuegeneric_OpDiv64u(v *Value) bool { break } v.reset(OpRsh64Ux64) - v.AddArg(n) v0 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) v0.AuxInt = log2(c) - v.AddArg(v0) + v.AddArg2(n, v0) return true } // match: (Div64u n (Const64 [-1<<63])) @@ -4108,10 +5265,9 @@ func rewriteValuegeneric_OpDiv64u(v *Value) bool { break } v.reset(OpRsh64Ux64) - v.AddArg(n) v0 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) v0.AuxInt = 63 - v.AddArg(v0) + v.AddArg2(n, v0) return true } // match: (Div64u x (Const64 [c])) @@ -4131,12 +5287,10 @@ func rewriteValuegeneric_OpDiv64u(v *Value) bool { v0 := b.NewValue0(v.Pos, OpHmul64u, typ.UInt64) v1 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) v1.AuxInt = int64(1<<63 + umagic(64, c).m/2) - v0.AddArg(v1) - v0.AddArg(x) - v.AddArg(v0) + v0.AddArg2(v1, x) v2 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) v2.AuxInt = umagic(64, c).s - 1 - v.AddArg(v2) + v.AddArg2(v0, v2) return true } // match: (Div64u x (Const64 [c])) @@ -4156,17 +5310,14 @@ func rewriteValuegeneric_OpDiv64u(v *Value) bool { v0 := b.NewValue0(v.Pos, OpHmul64u, typ.UInt64) v1 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) v1.AuxInt = int64(1<<63 + (umagic(64, c).m+1)/2) - v0.AddArg(v1) v2 := b.NewValue0(v.Pos, OpRsh64Ux64, typ.UInt64) - v2.AddArg(x) v3 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) v3.AuxInt = 1 - v2.AddArg(v3) - v0.AddArg(v2) - v.AddArg(v0) + v2.AddArg2(x, v3) + v0.AddArg2(v1, v2) v4 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) v4.AuxInt = umagic(64, c).s - 2 - v.AddArg(v4) + v.AddArg2(v0, v4) return true } // match: (Div64u x (Const64 [c])) @@ -4184,17 +5335,14 @@ func rewriteValuegeneric_OpDiv64u(v *Value) bool { v.reset(OpRsh64Ux64) v.Type = typ.UInt64 v0 := b.NewValue0(v.Pos, OpAvg64u, typ.UInt64) - v0.AddArg(x) v1 := b.NewValue0(v.Pos, OpHmul64u, typ.UInt64) v2 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) v2.AuxInt = int64(umagic(64, c).m) - v1.AddArg(v2) - v1.AddArg(x) - v0.AddArg(v1) - v.AddArg(v0) + v1.AddArg2(v2, x) + v0.AddArg2(x, v1) v3 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) v3.AuxInt = umagic(64, c).s - 1 - v.AddArg(v3) + v.AddArg2(v0, v3) return true } return false @@ -4236,10 +5384,9 @@ func rewriteValuegeneric_OpDiv8(v *Value) bool { break } v.reset(OpRsh8Ux64) - v.AddArg(n) v0 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) v0.AuxInt = log2(c & 0xff) - v.AddArg(v0) + v.AddArg2(n, v0) return true } // match: (Div8 n (Const8 [c])) @@ -4257,10 +5404,9 @@ func rewriteValuegeneric_OpDiv8(v *Value) bool { } v.reset(OpNeg8) v0 := b.NewValue0(v.Pos, OpDiv8, t) - v0.AddArg(n) v1 := b.NewValue0(v.Pos, OpConst8, t) v1.AuxInt = -c - v0.AddArg(v1) + v0.AddArg2(n, v1) v.AddArg(v0) return true } @@ -4274,14 +5420,12 @@ func rewriteValuegeneric_OpDiv8(v *Value) bool { } v.reset(OpRsh8Ux64) v0 := b.NewValue0(v.Pos, OpAnd8, t) - v0.AddArg(x) v1 := b.NewValue0(v.Pos, OpNeg8, t) v1.AddArg(x) - v0.AddArg(v1) - v.AddArg(v0) + v0.AddArg2(x, v1) v2 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) v2.AuxInt = 7 - v.AddArg(v2) + v.AddArg2(v0, v2) return true } // match: (Div8 n (Const8 [c])) @@ -4299,22 +5443,18 @@ func rewriteValuegeneric_OpDiv8(v *Value) bool { } v.reset(OpRsh8x64) v0 := b.NewValue0(v.Pos, OpAdd8, t) - v0.AddArg(n) v1 := b.NewValue0(v.Pos, OpRsh8Ux64, t) v2 := b.NewValue0(v.Pos, OpRsh8x64, t) - v2.AddArg(n) v3 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) v3.AuxInt = 7 - v2.AddArg(v3) - v1.AddArg(v2) + v2.AddArg2(n, v3) v4 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) v4.AuxInt = 8 - log2(c) - v1.AddArg(v4) - v0.AddArg(v1) - v.AddArg(v0) + v1.AddArg2(v2, v4) + v0.AddArg2(n, v1) v5 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) v5.AuxInt = log2(c) - v.AddArg(v5) + v.AddArg2(v0, v5) return true } // match: (Div8 x (Const8 [c])) @@ -4336,23 +5476,19 @@ func rewriteValuegeneric_OpDiv8(v *Value) bool { v1 := b.NewValue0(v.Pos, OpMul32, typ.UInt32) v2 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) v2.AuxInt = int64(smagic(8, c).m) - v1.AddArg(v2) v3 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32) v3.AddArg(x) - v1.AddArg(v3) - v0.AddArg(v1) + v1.AddArg2(v2, v3) v4 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) v4.AuxInt = 8 + smagic(8, c).s - v0.AddArg(v4) - v.AddArg(v0) + v0.AddArg2(v1, v4) v5 := b.NewValue0(v.Pos, OpRsh32x64, t) v6 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32) v6.AddArg(x) - v5.AddArg(v6) v7 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) v7.AuxInt = 31 - v5.AddArg(v7) - v.AddArg(v5) + v5.AddArg2(v6, v7) + v.AddArg2(v0, v5) return true } return false @@ -4394,10 +5530,9 @@ func rewriteValuegeneric_OpDiv8u(v *Value) bool { break } v.reset(OpRsh8Ux64) - v.AddArg(n) v0 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) v0.AuxInt = log2(c & 0xff) - v.AddArg(v0) + v.AddArg2(n, v0) return true } // match: (Div8u x (Const8 [c])) @@ -4417,14 +5552,12 @@ func rewriteValuegeneric_OpDiv8u(v *Value) bool { v1 := b.NewValue0(v.Pos, OpMul32, typ.UInt32) v2 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) v2.AuxInt = int64(1<<8 + umagic(8, c).m) - v1.AddArg(v2) v3 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) v3.AddArg(x) - v1.AddArg(v3) - v0.AddArg(v1) + v1.AddArg2(v2, v3) v4 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) v4.AuxInt = 8 + umagic(8, c).s - v0.AddArg(v4) + v0.AddArg2(v1, v4) v.AddArg(v0) return true } @@ -4471,8 +5604,7 @@ func rewriteValuegeneric_OpEq16(v *Value) bool { v.reset(OpEq16) v0 := b.NewValue0(v.Pos, OpConst16, t) v0.AuxInt = int64(int16(c - d)) - v.AddArg(v0) - v.AddArg(x) + v.AddArg2(v0, x) return true } } @@ -4518,14 +5650,12 @@ func rewriteValuegeneric_OpEq16(v *Value) bool { v0 := b.NewValue0(v.Pos, OpMod32u, typ.UInt32) v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) v1.AddArg(x) - v0.AddArg(v1) v2 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) v2.AuxInt = c & 0xffff - v0.AddArg(v2) - v.AddArg(v0) + v0.AddArg2(v1, v2) v3 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) v3.AuxInt = 0 - v.AddArg(v3) + v.AddArg2(v0, v3) return true } break @@ -4552,14 +5682,12 @@ func rewriteValuegeneric_OpEq16(v *Value) bool { v0 := b.NewValue0(v.Pos, OpMod32, typ.Int32) v1 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32) v1.AddArg(x) - v0.AddArg(v1) v2 := b.NewValue0(v.Pos, OpConst32, typ.Int32) v2.AuxInt = c - v0.AddArg(v2) - v.AddArg(v0) + v0.AddArg2(v1, v2) v3 := b.NewValue0(v.Pos, OpConst32, typ.Int32) v3.AuxInt = 0 - v.AddArg(v3) + v.AddArg2(v0, v3) return true } break @@ -4617,16 +5745,13 @@ func rewriteValuegeneric_OpEq16(v *Value) bool { v1 := b.NewValue0(v.Pos, OpMul16, typ.UInt16) v2 := b.NewValue0(v.Pos, OpConst16, typ.UInt16) v2.AuxInt = int64(int16(udivisible(16, c).m)) - v1.AddArg(v2) - v1.AddArg(x) - v0.AddArg(v1) + v1.AddArg2(v2, x) v3 := b.NewValue0(v.Pos, OpConst16, typ.UInt16) v3.AuxInt = int64(16 - udivisible(16, c).k) - v0.AddArg(v3) - v.AddArg(v0) + v0.AddArg2(v1, v3) v4 := b.NewValue0(v.Pos, OpConst16, typ.UInt16) v4.AuxInt = int64(int16(udivisible(16, c).max)) - v.AddArg(v4) + v.AddArg2(v0, v4) return true } } @@ -4686,16 +5811,13 @@ func rewriteValuegeneric_OpEq16(v *Value) bool { v1 := b.NewValue0(v.Pos, OpMul16, typ.UInt16) v2 := b.NewValue0(v.Pos, OpConst16, typ.UInt16) v2.AuxInt = int64(int16(udivisible(16, c).m)) - v1.AddArg(v2) - v1.AddArg(x) - v0.AddArg(v1) + v1.AddArg2(v2, x) v3 := b.NewValue0(v.Pos, OpConst16, typ.UInt16) v3.AuxInt = int64(16 - udivisible(16, c).k) - v0.AddArg(v3) - v.AddArg(v0) + v0.AddArg2(v1, v3) v4 := b.NewValue0(v.Pos, OpConst16, typ.UInt16) v4.AuxInt = int64(int16(udivisible(16, c).max)) - v.AddArg(v4) + v.AddArg2(v0, v4) return true } } @@ -4764,16 +5886,13 @@ func rewriteValuegeneric_OpEq16(v *Value) bool { v1 := b.NewValue0(v.Pos, OpMul16, typ.UInt16) v2 := b.NewValue0(v.Pos, OpConst16, typ.UInt16) v2.AuxInt = int64(int16(udivisible(16, c).m)) - v1.AddArg(v2) - v1.AddArg(x) - v0.AddArg(v1) + v1.AddArg2(v2, x) v3 := b.NewValue0(v.Pos, OpConst16, typ.UInt16) v3.AuxInt = int64(16 - udivisible(16, c).k) - v0.AddArg(v3) - v.AddArg(v0) + v0.AddArg2(v1, v3) v4 := b.NewValue0(v.Pos, OpConst16, typ.UInt16) v4.AuxInt = int64(int16(udivisible(16, c).max)) - v.AddArg(v4) + v.AddArg2(v0, v4) return true } } @@ -4851,16 +5970,13 @@ func rewriteValuegeneric_OpEq16(v *Value) bool { v1 := b.NewValue0(v.Pos, OpMul16, typ.UInt16) v2 := b.NewValue0(v.Pos, OpConst16, typ.UInt16) v2.AuxInt = int64(int16(udivisible(16, c).m)) - v1.AddArg(v2) - v1.AddArg(x) - v0.AddArg(v1) + v1.AddArg2(v2, x) v3 := b.NewValue0(v.Pos, OpConst16, typ.UInt16) v3.AuxInt = int64(16 - udivisible(16, c).k) - v0.AddArg(v3) - v.AddArg(v0) + v0.AddArg2(v1, v3) v4 := b.NewValue0(v.Pos, OpConst16, typ.UInt16) v4.AuxInt = int64(int16(udivisible(16, c).max)) - v.AddArg(v4) + v.AddArg2(v0, v4) return true } } @@ -4932,20 +6048,16 @@ func rewriteValuegeneric_OpEq16(v *Value) bool { v2 := b.NewValue0(v.Pos, OpMul16, typ.UInt16) v3 := b.NewValue0(v.Pos, OpConst16, typ.UInt16) v3.AuxInt = int64(int16(sdivisible(16, c).m)) - v2.AddArg(v3) - v2.AddArg(x) - v1.AddArg(v2) + v2.AddArg2(v3, x) v4 := b.NewValue0(v.Pos, OpConst16, typ.UInt16) v4.AuxInt = int64(int16(sdivisible(16, c).a)) - v1.AddArg(v4) - v0.AddArg(v1) + v1.AddArg2(v2, v4) v5 := b.NewValue0(v.Pos, OpConst16, typ.UInt16) v5.AuxInt = int64(16 - sdivisible(16, c).k) - v0.AddArg(v5) - v.AddArg(v0) + v0.AddArg2(v1, v5) v6 := b.NewValue0(v.Pos, OpConst16, typ.UInt16) v6.AuxInt = int64(int16(sdivisible(16, c).max)) - v.AddArg(v6) + v.AddArg2(v0, v6) return true } } @@ -5008,14 +6120,12 @@ func rewriteValuegeneric_OpEq16(v *Value) bool { } v.reset(OpEq16) v0 := b.NewValue0(v.Pos, OpAnd16, t) - v0.AddArg(n) v1 := b.NewValue0(v.Pos, OpConst16, t) v1.AuxInt = int64(1< p1 (Store {t2} p2 _ (Store {t3} p3 x _))) @@ -9183,15 +10168,12 @@ func rewriteValuegeneric_OpLoad(v *Value) bool { break } t3 := v_1_2.Aux - _ = v_1_2.Args[2] - p3 := v_1_2.Args[0] x := v_1_2.Args[1] + p3 := v_1_2.Args[0] if !(isSamePtr(p1, p3) && t1.Compare(x.Type) == types.CMPeq && t1.Size() == sizeof(t2) && disjoint(p3, sizeof(t3), p2, sizeof(t2))) { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (Load p1 (Store {t2} p2 _ (Store {t3} p3 _ (Store {t4} p4 x _)))) @@ -9218,15 +10200,12 @@ func rewriteValuegeneric_OpLoad(v *Value) bool { break } t4 := v_1_2_2.Aux - _ = v_1_2_2.Args[2] - p4 := v_1_2_2.Args[0] x := v_1_2_2.Args[1] + p4 := v_1_2_2.Args[0] if !(isSamePtr(p1, p4) && t1.Compare(x.Type) == types.CMPeq && t1.Size() == sizeof(t2) && disjoint(p4, sizeof(t4), p2, sizeof(t2)) && disjoint(p4, sizeof(t4), p3, sizeof(t3))) { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (Load p1 (Store {t2} p2 _ (Store {t3} p3 _ (Store {t4} p4 _ (Store {t5} p5 x _))))) @@ -9260,19 +10239,16 @@ func rewriteValuegeneric_OpLoad(v *Value) bool { break } t5 := v_1_2_2_2.Aux - _ = v_1_2_2_2.Args[2] - p5 := v_1_2_2_2.Args[0] x := v_1_2_2_2.Args[1] + p5 := v_1_2_2_2.Args[0] if !(isSamePtr(p1, p5) && t1.Compare(x.Type) == types.CMPeq && t1.Size() == sizeof(t2) && disjoint(p5, sizeof(t5), p2, sizeof(t2)) && disjoint(p5, sizeof(t5), p3, sizeof(t3)) && disjoint(p5, sizeof(t5), p4, sizeof(t4))) { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (Load p1 (Store {t2} p2 (Const64 [x]) _)) - // cond: isSamePtr(p1,p2) && sizeof(t2) == 8 && is64BitFloat(t1) + // cond: isSamePtr(p1,p2) && sizeof(t2) == 8 && is64BitFloat(t1) && !math.IsNaN(math.Float64frombits(uint64(x))) // result: (Const64F [x]) for { t1 := v.Type @@ -9281,14 +10257,14 @@ func rewriteValuegeneric_OpLoad(v *Value) bool { break } t2 := v_1.Aux - _ = v_1.Args[2] + _ = v_1.Args[1] p2 := v_1.Args[0] v_1_1 := v_1.Args[1] if v_1_1.Op != OpConst64 { break } x := v_1_1.AuxInt - if !(isSamePtr(p1, p2) && sizeof(t2) == 8 && is64BitFloat(t1)) { + if !(isSamePtr(p1, p2) && sizeof(t2) == 8 && is64BitFloat(t1) && !math.IsNaN(math.Float64frombits(uint64(x)))) { break } v.reset(OpConst64F) @@ -9296,7 +10272,7 @@ func rewriteValuegeneric_OpLoad(v *Value) bool { return true } // match: (Load p1 (Store {t2} p2 (Const32 [x]) _)) - // cond: isSamePtr(p1,p2) && sizeof(t2) == 4 && is32BitFloat(t1) + // cond: isSamePtr(p1,p2) && sizeof(t2) == 4 && is32BitFloat(t1) && !math.IsNaN(float64(math.Float32frombits(uint32(x)))) // result: (Const32F [auxFrom32F(math.Float32frombits(uint32(x)))]) for { t1 := v.Type @@ -9305,14 +10281,14 @@ func rewriteValuegeneric_OpLoad(v *Value) bool { break } t2 := v_1.Aux - _ = v_1.Args[2] + _ = v_1.Args[1] p2 := v_1.Args[0] v_1_1 := v_1.Args[1] if v_1_1.Op != OpConst32 { break } x := v_1_1.AuxInt - if !(isSamePtr(p1, p2) && sizeof(t2) == 4 && is32BitFloat(t1)) { + if !(isSamePtr(p1, p2) && sizeof(t2) == 4 && is32BitFloat(t1) && !math.IsNaN(float64(math.Float32frombits(uint32(x))))) { break } v.reset(OpConst32F) @@ -9329,7 +10305,7 @@ func rewriteValuegeneric_OpLoad(v *Value) bool { break } t2 := v_1.Aux - _ = v_1.Args[2] + _ = v_1.Args[1] p2 := v_1.Args[0] v_1_1 := v_1.Args[1] if v_1_1.Op != OpConst64F { @@ -9353,7 +10329,7 @@ func rewriteValuegeneric_OpLoad(v *Value) bool { break } t2 := v_1.Aux - _ = v_1.Args[2] + _ = v_1.Args[1] p2 := v_1.Args[0] v_1_1 := v_1.Args[1] if v_1_1.Op != OpConst32F { @@ -9389,20 +10365,17 @@ func rewriteValuegeneric_OpLoad(v *Value) bool { break } n := mem.AuxInt - _ = mem.Args[1] p3 := mem.Args[0] if !(o1 >= 0 && o1+t1.Size() <= n && isSamePtr(p1, p3) && fe.CanSSA(t1) && disjoint(op, t1.Size(), p2, sizeof(t2))) { break } b = mem.Block v0 := b.NewValue0(v.Pos, OpLoad, t1) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v1 := b.NewValue0(v.Pos, OpOffPtr, op.Type) v1.AuxInt = o1 v1.AddArg(p3) - v0.AddArg(v1) - v0.AddArg(mem) + v0.AddArg2(v1, mem) return true } // match: (Load op:(OffPtr [o1] p1) (Store {t2} p2 _ (Store {t3} p3 _ mem:(Zero [n] p4 _)))) @@ -9434,20 +10407,17 @@ func rewriteValuegeneric_OpLoad(v *Value) bool { break } n := mem.AuxInt - _ = mem.Args[1] p4 := mem.Args[0] if !(o1 >= 0 && o1+t1.Size() <= n && isSamePtr(p1, p4) && fe.CanSSA(t1) && disjoint(op, t1.Size(), p2, sizeof(t2)) && disjoint(op, t1.Size(), p3, sizeof(t3))) { break } b = mem.Block v0 := b.NewValue0(v.Pos, OpLoad, t1) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v1 := b.NewValue0(v.Pos, OpOffPtr, op.Type) v1.AuxInt = o1 v1.AddArg(p4) - v0.AddArg(v1) - v0.AddArg(mem) + v0.AddArg2(v1, mem) return true } // match: (Load op:(OffPtr [o1] p1) (Store {t2} p2 _ (Store {t3} p3 _ (Store {t4} p4 _ mem:(Zero [n] p5 _))))) @@ -9486,20 +10456,17 @@ func rewriteValuegeneric_OpLoad(v *Value) bool { break } n := mem.AuxInt - _ = mem.Args[1] p5 := mem.Args[0] if !(o1 >= 0 && o1+t1.Size() <= n && isSamePtr(p1, p5) && fe.CanSSA(t1) && disjoint(op, t1.Size(), p2, sizeof(t2)) && disjoint(op, t1.Size(), p3, sizeof(t3)) && disjoint(op, t1.Size(), p4, sizeof(t4))) { break } b = mem.Block v0 := b.NewValue0(v.Pos, OpLoad, t1) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v1 := b.NewValue0(v.Pos, OpOffPtr, op.Type) v1.AuxInt = o1 v1.AddArg(p5) - v0.AddArg(v1) - v0.AddArg(mem) + v0.AddArg2(v1, mem) return true } // match: (Load op:(OffPtr [o1] p1) (Store {t2} p2 _ (Store {t3} p3 _ (Store {t4} p4 _ (Store {t5} p5 _ mem:(Zero [n] p6 _)))))) @@ -9545,20 +10512,17 @@ func rewriteValuegeneric_OpLoad(v *Value) bool { break } n := mem.AuxInt - _ = mem.Args[1] p6 := mem.Args[0] if !(o1 >= 0 && o1+t1.Size() <= n && isSamePtr(p1, p6) && fe.CanSSA(t1) && disjoint(op, t1.Size(), p2, sizeof(t2)) && disjoint(op, t1.Size(), p3, sizeof(t3)) && disjoint(op, t1.Size(), p4, sizeof(t4)) && disjoint(op, t1.Size(), p5, sizeof(t5))) { break } b = mem.Block v0 := b.NewValue0(v.Pos, OpLoad, t1) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v1 := b.NewValue0(v.Pos, OpOffPtr, op.Type) v1.AuxInt = o1 v1.AddArg(p6) - v0.AddArg(v1) - v0.AddArg(mem) + v0.AddArg2(v1, mem) return true } // match: (Load (OffPtr [o] p1) (Zero [n] p2 _)) @@ -9575,7 +10539,6 @@ func rewriteValuegeneric_OpLoad(v *Value) bool { break } n := v_1.AuxInt - _ = v_1.Args[1] p2 := v_1.Args[0] if !(t1.IsBoolean() && isSamePtr(p1, p2) && n >= o+1) { break @@ -9598,7 +10561,6 @@ func rewriteValuegeneric_OpLoad(v *Value) bool { break } n := v_1.AuxInt - _ = v_1.Args[1] p2 := v_1.Args[0] if !(is8BitInt(t1) && isSamePtr(p1, p2) && n >= o+1) { break @@ -9621,7 +10583,6 @@ func rewriteValuegeneric_OpLoad(v *Value) bool { break } n := v_1.AuxInt - _ = v_1.Args[1] p2 := v_1.Args[0] if !(is16BitInt(t1) && isSamePtr(p1, p2) && n >= o+2) { break @@ -9644,7 +10605,6 @@ func rewriteValuegeneric_OpLoad(v *Value) bool { break } n := v_1.AuxInt - _ = v_1.Args[1] p2 := v_1.Args[0] if !(is32BitInt(t1) && isSamePtr(p1, p2) && n >= o+4) { break @@ -9667,7 +10627,6 @@ func rewriteValuegeneric_OpLoad(v *Value) bool { break } n := v_1.AuxInt - _ = v_1.Args[1] p2 := v_1.Args[0] if !(is64BitInt(t1) && isSamePtr(p1, p2) && n >= o+8) { break @@ -9690,7 +10649,6 @@ func rewriteValuegeneric_OpLoad(v *Value) bool { break } n := v_1.AuxInt - _ = v_1.Args[1] p2 := v_1.Args[0] if !(is32BitFloat(t1) && isSamePtr(p1, p2) && n >= o+4) { break @@ -9713,7 +10671,6 @@ func rewriteValuegeneric_OpLoad(v *Value) bool { break } n := v_1.AuxInt - _ = v_1.Args[1] p2 := v_1.Args[0] if !(is64BitFloat(t1) && isSamePtr(p1, p2) && n >= o+8) { break @@ -9748,8 +10705,7 @@ func rewriteValuegeneric_OpLoad(v *Value) bool { v1 := b.NewValue0(v.Pos, OpOffPtr, t.FieldType(0).PtrTo()) v1.AuxInt = 0 v1.AddArg(ptr) - v0.AddArg(v1) - v0.AddArg(mem) + v0.AddArg2(v1, mem) v.AddArg(v0) return true } @@ -9768,16 +10724,13 @@ func rewriteValuegeneric_OpLoad(v *Value) bool { v1 := b.NewValue0(v.Pos, OpOffPtr, t.FieldType(0).PtrTo()) v1.AuxInt = 0 v1.AddArg(ptr) - v0.AddArg(v1) - v0.AddArg(mem) - v.AddArg(v0) + v0.AddArg2(v1, mem) v2 := b.NewValue0(v.Pos, OpLoad, t.FieldType(1)) v3 := b.NewValue0(v.Pos, OpOffPtr, t.FieldType(1).PtrTo()) v3.AuxInt = t.FieldOff(1) v3.AddArg(ptr) - v2.AddArg(v3) - v2.AddArg(mem) - v.AddArg(v2) + v2.AddArg2(v3, mem) + v.AddArg2(v0, v2) return true } // match: (Load ptr mem) @@ -9795,23 +10748,18 @@ func rewriteValuegeneric_OpLoad(v *Value) bool { v1 := b.NewValue0(v.Pos, OpOffPtr, t.FieldType(0).PtrTo()) v1.AuxInt = 0 v1.AddArg(ptr) - v0.AddArg(v1) - v0.AddArg(mem) - v.AddArg(v0) + v0.AddArg2(v1, mem) v2 := b.NewValue0(v.Pos, OpLoad, t.FieldType(1)) v3 := b.NewValue0(v.Pos, OpOffPtr, t.FieldType(1).PtrTo()) v3.AuxInt = t.FieldOff(1) v3.AddArg(ptr) - v2.AddArg(v3) - v2.AddArg(mem) - v.AddArg(v2) + v2.AddArg2(v3, mem) v4 := b.NewValue0(v.Pos, OpLoad, t.FieldType(2)) v5 := b.NewValue0(v.Pos, OpOffPtr, t.FieldType(2).PtrTo()) v5.AuxInt = t.FieldOff(2) v5.AddArg(ptr) - v4.AddArg(v5) - v4.AddArg(mem) - v.AddArg(v4) + v4.AddArg2(v5, mem) + v.AddArg3(v0, v2, v4) return true } // match: (Load ptr mem) @@ -9829,30 +10777,23 @@ func rewriteValuegeneric_OpLoad(v *Value) bool { v1 := b.NewValue0(v.Pos, OpOffPtr, t.FieldType(0).PtrTo()) v1.AuxInt = 0 v1.AddArg(ptr) - v0.AddArg(v1) - v0.AddArg(mem) - v.AddArg(v0) + v0.AddArg2(v1, mem) v2 := b.NewValue0(v.Pos, OpLoad, t.FieldType(1)) v3 := b.NewValue0(v.Pos, OpOffPtr, t.FieldType(1).PtrTo()) v3.AuxInt = t.FieldOff(1) v3.AddArg(ptr) - v2.AddArg(v3) - v2.AddArg(mem) - v.AddArg(v2) + v2.AddArg2(v3, mem) v4 := b.NewValue0(v.Pos, OpLoad, t.FieldType(2)) v5 := b.NewValue0(v.Pos, OpOffPtr, t.FieldType(2).PtrTo()) v5.AuxInt = t.FieldOff(2) v5.AddArg(ptr) - v4.AddArg(v5) - v4.AddArg(mem) - v.AddArg(v4) + v4.AddArg2(v5, mem) v6 := b.NewValue0(v.Pos, OpLoad, t.FieldType(3)) v7 := b.NewValue0(v.Pos, OpOffPtr, t.FieldType(3).PtrTo()) v7.AuxInt = t.FieldOff(3) v7.AddArg(ptr) - v6.AddArg(v7) - v6.AddArg(mem) - v.AddArg(v6) + v6.AddArg2(v7, mem) + v.AddArg4(v0, v2, v4, v6) return true } // match: (Load _ _) @@ -9878,8 +10819,7 @@ func rewriteValuegeneric_OpLoad(v *Value) bool { } v.reset(OpArrayMake1) v0 := b.NewValue0(v.Pos, OpLoad, t.Elem()) - v0.AddArg(ptr) - v0.AddArg(mem) + v0.AddArg2(ptr, mem) v.AddArg(v0) return true } @@ -9899,10 +10839,9 @@ func rewriteValuegeneric_OpLsh16x16(v *Value) bool { } c := v_1.AuxInt v.reset(OpLsh16x64) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpConst64, t) v0.AuxInt = int64(uint16(c)) - v.AddArg(v0) + v.AddArg2(x, v0) return true } // match: (Lsh16x16 (Const16 [0]) _) @@ -9931,10 +10870,9 @@ func rewriteValuegeneric_OpLsh16x32(v *Value) bool { } c := v_1.AuxInt v.reset(OpLsh16x64) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpConst64, t) v0.AuxInt = int64(uint32(c)) - v.AddArg(v0) + v.AddArg2(x, v0) return true } // match: (Lsh16x32 (Const16 [0]) _) @@ -9976,9 +10914,7 @@ func rewriteValuegeneric_OpLsh16x64(v *Value) bool { if v_1.Op != OpConst64 || v_1.AuxInt != 0 { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (Lsh16x64 (Const16 [0]) _) @@ -10029,10 +10965,9 @@ func rewriteValuegeneric_OpLsh16x64(v *Value) bool { break } v.reset(OpLsh16x64) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpConst64, t) v0.AuxInt = c + d - v.AddArg(v0) + v.AddArg2(x, v0) return true } // match: (Lsh16x64 (Rsh16Ux64 (Lsh16x64 x (Const64 [c1])) (Const64 [c2])) (Const64 [c3])) @@ -10067,10 +11002,9 @@ func rewriteValuegeneric_OpLsh16x64(v *Value) bool { break } v.reset(OpLsh16x64) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) v0.AuxInt = c1 - c2 + c3 - v.AddArg(v0) + v.AddArg2(x, v0) return true } return false @@ -10089,10 +11023,9 @@ func rewriteValuegeneric_OpLsh16x8(v *Value) bool { } c := v_1.AuxInt v.reset(OpLsh16x64) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpConst64, t) v0.AuxInt = int64(uint8(c)) - v.AddArg(v0) + v.AddArg2(x, v0) return true } // match: (Lsh16x8 (Const16 [0]) _) @@ -10121,10 +11054,9 @@ func rewriteValuegeneric_OpLsh32x16(v *Value) bool { } c := v_1.AuxInt v.reset(OpLsh32x64) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpConst64, t) v0.AuxInt = int64(uint16(c)) - v.AddArg(v0) + v.AddArg2(x, v0) return true } // match: (Lsh32x16 (Const32 [0]) _) @@ -10153,10 +11085,9 @@ func rewriteValuegeneric_OpLsh32x32(v *Value) bool { } c := v_1.AuxInt v.reset(OpLsh32x64) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpConst64, t) v0.AuxInt = int64(uint32(c)) - v.AddArg(v0) + v.AddArg2(x, v0) return true } // match: (Lsh32x32 (Const32 [0]) _) @@ -10198,9 +11129,7 @@ func rewriteValuegeneric_OpLsh32x64(v *Value) bool { if v_1.Op != OpConst64 || v_1.AuxInt != 0 { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (Lsh32x64 (Const32 [0]) _) @@ -10251,10 +11180,9 @@ func rewriteValuegeneric_OpLsh32x64(v *Value) bool { break } v.reset(OpLsh32x64) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpConst64, t) v0.AuxInt = c + d - v.AddArg(v0) + v.AddArg2(x, v0) return true } // match: (Lsh32x64 (Rsh32Ux64 (Lsh32x64 x (Const64 [c1])) (Const64 [c2])) (Const64 [c3])) @@ -10289,10 +11217,9 @@ func rewriteValuegeneric_OpLsh32x64(v *Value) bool { break } v.reset(OpLsh32x64) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) v0.AuxInt = c1 - c2 + c3 - v.AddArg(v0) + v.AddArg2(x, v0) return true } return false @@ -10311,10 +11238,9 @@ func rewriteValuegeneric_OpLsh32x8(v *Value) bool { } c := v_1.AuxInt v.reset(OpLsh32x64) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpConst64, t) v0.AuxInt = int64(uint8(c)) - v.AddArg(v0) + v.AddArg2(x, v0) return true } // match: (Lsh32x8 (Const32 [0]) _) @@ -10343,10 +11269,9 @@ func rewriteValuegeneric_OpLsh64x16(v *Value) bool { } c := v_1.AuxInt v.reset(OpLsh64x64) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpConst64, t) v0.AuxInt = int64(uint16(c)) - v.AddArg(v0) + v.AddArg2(x, v0) return true } // match: (Lsh64x16 (Const64 [0]) _) @@ -10375,10 +11300,9 @@ func rewriteValuegeneric_OpLsh64x32(v *Value) bool { } c := v_1.AuxInt v.reset(OpLsh64x64) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpConst64, t) v0.AuxInt = int64(uint32(c)) - v.AddArg(v0) + v.AddArg2(x, v0) return true } // match: (Lsh64x32 (Const64 [0]) _) @@ -10420,9 +11344,7 @@ func rewriteValuegeneric_OpLsh64x64(v *Value) bool { if v_1.Op != OpConst64 || v_1.AuxInt != 0 { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (Lsh64x64 (Const64 [0]) _) @@ -10473,10 +11395,9 @@ func rewriteValuegeneric_OpLsh64x64(v *Value) bool { break } v.reset(OpLsh64x64) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpConst64, t) v0.AuxInt = c + d - v.AddArg(v0) + v.AddArg2(x, v0) return true } // match: (Lsh64x64 (Rsh64Ux64 (Lsh64x64 x (Const64 [c1])) (Const64 [c2])) (Const64 [c3])) @@ -10511,10 +11432,9 @@ func rewriteValuegeneric_OpLsh64x64(v *Value) bool { break } v.reset(OpLsh64x64) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) v0.AuxInt = c1 - c2 + c3 - v.AddArg(v0) + v.AddArg2(x, v0) return true } return false @@ -10533,10 +11453,9 @@ func rewriteValuegeneric_OpLsh64x8(v *Value) bool { } c := v_1.AuxInt v.reset(OpLsh64x64) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpConst64, t) v0.AuxInt = int64(uint8(c)) - v.AddArg(v0) + v.AddArg2(x, v0) return true } // match: (Lsh64x8 (Const64 [0]) _) @@ -10565,10 +11484,9 @@ func rewriteValuegeneric_OpLsh8x16(v *Value) bool { } c := v_1.AuxInt v.reset(OpLsh8x64) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpConst64, t) v0.AuxInt = int64(uint16(c)) - v.AddArg(v0) + v.AddArg2(x, v0) return true } // match: (Lsh8x16 (Const8 [0]) _) @@ -10597,10 +11515,9 @@ func rewriteValuegeneric_OpLsh8x32(v *Value) bool { } c := v_1.AuxInt v.reset(OpLsh8x64) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpConst64, t) v0.AuxInt = int64(uint32(c)) - v.AddArg(v0) + v.AddArg2(x, v0) return true } // match: (Lsh8x32 (Const8 [0]) _) @@ -10642,9 +11559,7 @@ func rewriteValuegeneric_OpLsh8x64(v *Value) bool { if v_1.Op != OpConst64 || v_1.AuxInt != 0 { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (Lsh8x64 (Const8 [0]) _) @@ -10695,10 +11610,9 @@ func rewriteValuegeneric_OpLsh8x64(v *Value) bool { break } v.reset(OpLsh8x64) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpConst64, t) v0.AuxInt = c + d - v.AddArg(v0) + v.AddArg2(x, v0) return true } // match: (Lsh8x64 (Rsh8Ux64 (Lsh8x64 x (Const64 [c1])) (Const64 [c2])) (Const64 [c3])) @@ -10733,10 +11647,9 @@ func rewriteValuegeneric_OpLsh8x64(v *Value) bool { break } v.reset(OpLsh8x64) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) v0.AuxInt = c1 - c2 + c3 - v.AddArg(v0) + v.AddArg2(x, v0) return true } return false @@ -10755,10 +11668,9 @@ func rewriteValuegeneric_OpLsh8x8(v *Value) bool { } c := v_1.AuxInt v.reset(OpLsh8x64) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpConst64, t) v0.AuxInt = int64(uint8(c)) - v.AddArg(v0) + v.AddArg2(x, v0) return true } // match: (Lsh8x8 (Const8 [0]) _) @@ -10810,10 +11722,9 @@ func rewriteValuegeneric_OpMod16(v *Value) bool { break } v.reset(OpAnd16) - v.AddArg(n) v0 := b.NewValue0(v.Pos, OpConst16, t) v0.AuxInt = (c & 0xffff) - 1 - v.AddArg(v0) + v.AddArg2(n, v0) return true } // match: (Mod16 n (Const16 [c])) @@ -10831,10 +11742,9 @@ func rewriteValuegeneric_OpMod16(v *Value) bool { } v.reset(OpMod16) v.Type = t - v.AddArg(n) v0 := b.NewValue0(v.Pos, OpConst16, t) v0.AuxInt = -c - v.AddArg(v0) + v.AddArg2(n, v0) return true } // match: (Mod16 x (Const16 [c])) @@ -10851,18 +11761,15 @@ func rewriteValuegeneric_OpMod16(v *Value) bool { break } v.reset(OpSub16) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpMul16, t) v1 := b.NewValue0(v.Pos, OpDiv16, t) - v1.AddArg(x) v2 := b.NewValue0(v.Pos, OpConst16, t) v2.AuxInt = c - v1.AddArg(v2) - v0.AddArg(v1) + v1.AddArg2(x, v2) v3 := b.NewValue0(v.Pos, OpConst16, t) v3.AuxInt = c - v0.AddArg(v3) - v.AddArg(v0) + v0.AddArg2(v1, v3) + v.AddArg2(x, v0) return true } return false @@ -10904,10 +11811,9 @@ func rewriteValuegeneric_OpMod16u(v *Value) bool { break } v.reset(OpAnd16) - v.AddArg(n) v0 := b.NewValue0(v.Pos, OpConst16, t) v0.AuxInt = (c & 0xffff) - 1 - v.AddArg(v0) + v.AddArg2(n, v0) return true } // match: (Mod16u x (Const16 [c])) @@ -10924,18 +11830,15 @@ func rewriteValuegeneric_OpMod16u(v *Value) bool { break } v.reset(OpSub16) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpMul16, t) v1 := b.NewValue0(v.Pos, OpDiv16u, t) - v1.AddArg(x) v2 := b.NewValue0(v.Pos, OpConst16, t) v2.AuxInt = c - v1.AddArg(v2) - v0.AddArg(v1) + v1.AddArg2(x, v2) v3 := b.NewValue0(v.Pos, OpConst16, t) v3.AuxInt = c - v0.AddArg(v3) - v.AddArg(v0) + v0.AddArg2(v1, v3) + v.AddArg2(x, v0) return true } return false @@ -10977,10 +11880,9 @@ func rewriteValuegeneric_OpMod32(v *Value) bool { break } v.reset(OpAnd32) - v.AddArg(n) v0 := b.NewValue0(v.Pos, OpConst32, t) v0.AuxInt = (c & 0xffffffff) - 1 - v.AddArg(v0) + v.AddArg2(n, v0) return true } // match: (Mod32 n (Const32 [c])) @@ -10998,10 +11900,9 @@ func rewriteValuegeneric_OpMod32(v *Value) bool { } v.reset(OpMod32) v.Type = t - v.AddArg(n) v0 := b.NewValue0(v.Pos, OpConst32, t) v0.AuxInt = -c - v.AddArg(v0) + v.AddArg2(n, v0) return true } // match: (Mod32 x (Const32 [c])) @@ -11018,18 +11919,15 @@ func rewriteValuegeneric_OpMod32(v *Value) bool { break } v.reset(OpSub32) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpMul32, t) v1 := b.NewValue0(v.Pos, OpDiv32, t) - v1.AddArg(x) v2 := b.NewValue0(v.Pos, OpConst32, t) v2.AuxInt = c - v1.AddArg(v2) - v0.AddArg(v1) + v1.AddArg2(x, v2) v3 := b.NewValue0(v.Pos, OpConst32, t) v3.AuxInt = c - v0.AddArg(v3) - v.AddArg(v0) + v0.AddArg2(v1, v3) + v.AddArg2(x, v0) return true } return false @@ -11071,10 +11969,9 @@ func rewriteValuegeneric_OpMod32u(v *Value) bool { break } v.reset(OpAnd32) - v.AddArg(n) v0 := b.NewValue0(v.Pos, OpConst32, t) v0.AuxInt = (c & 0xffffffff) - 1 - v.AddArg(v0) + v.AddArg2(n, v0) return true } // match: (Mod32u x (Const32 [c])) @@ -11091,18 +11988,15 @@ func rewriteValuegeneric_OpMod32u(v *Value) bool { break } v.reset(OpSub32) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpMul32, t) v1 := b.NewValue0(v.Pos, OpDiv32u, t) - v1.AddArg(x) v2 := b.NewValue0(v.Pos, OpConst32, t) v2.AuxInt = c - v1.AddArg(v2) - v0.AddArg(v1) + v1.AddArg2(x, v2) v3 := b.NewValue0(v.Pos, OpConst32, t) v3.AuxInt = c - v0.AddArg(v3) - v.AddArg(v0) + v0.AddArg2(v1, v3) + v.AddArg2(x, v0) return true } return false @@ -11144,10 +12038,9 @@ func rewriteValuegeneric_OpMod64(v *Value) bool { break } v.reset(OpAnd64) - v.AddArg(n) v0 := b.NewValue0(v.Pos, OpConst64, t) v0.AuxInt = c - 1 - v.AddArg(v0) + v.AddArg2(n, v0) return true } // match: (Mod64 n (Const64 [-1<<63])) @@ -11158,9 +12051,7 @@ func rewriteValuegeneric_OpMod64(v *Value) bool { if v_1.Op != OpConst64 || v_1.AuxInt != -1<<63 || !(isNonNegative(n)) { break } - v.reset(OpCopy) - v.Type = n.Type - v.AddArg(n) + v.copyOf(n) return true } // match: (Mod64 n (Const64 [c])) @@ -11178,10 +12069,9 @@ func rewriteValuegeneric_OpMod64(v *Value) bool { } v.reset(OpMod64) v.Type = t - v.AddArg(n) v0 := b.NewValue0(v.Pos, OpConst64, t) v0.AuxInt = -c - v.AddArg(v0) + v.AddArg2(n, v0) return true } // match: (Mod64 x (Const64 [c])) @@ -11198,18 +12088,15 @@ func rewriteValuegeneric_OpMod64(v *Value) bool { break } v.reset(OpSub64) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpMul64, t) v1 := b.NewValue0(v.Pos, OpDiv64, t) - v1.AddArg(x) v2 := b.NewValue0(v.Pos, OpConst64, t) v2.AuxInt = c - v1.AddArg(v2) - v0.AddArg(v1) + v1.AddArg2(x, v2) v3 := b.NewValue0(v.Pos, OpConst64, t) v3.AuxInt = c - v0.AddArg(v3) - v.AddArg(v0) + v0.AddArg2(v1, v3) + v.AddArg2(x, v0) return true } return false @@ -11251,10 +12138,9 @@ func rewriteValuegeneric_OpMod64u(v *Value) bool { break } v.reset(OpAnd64) - v.AddArg(n) v0 := b.NewValue0(v.Pos, OpConst64, t) v0.AuxInt = c - 1 - v.AddArg(v0) + v.AddArg2(n, v0) return true } // match: (Mod64u n (Const64 [-1<<63])) @@ -11266,10 +12152,9 @@ func rewriteValuegeneric_OpMod64u(v *Value) bool { break } v.reset(OpAnd64) - v.AddArg(n) v0 := b.NewValue0(v.Pos, OpConst64, t) v0.AuxInt = 1<<63 - 1 - v.AddArg(v0) + v.AddArg2(n, v0) return true } // match: (Mod64u x (Const64 [c])) @@ -11286,18 +12171,15 @@ func rewriteValuegeneric_OpMod64u(v *Value) bool { break } v.reset(OpSub64) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpMul64, t) v1 := b.NewValue0(v.Pos, OpDiv64u, t) - v1.AddArg(x) v2 := b.NewValue0(v.Pos, OpConst64, t) v2.AuxInt = c - v1.AddArg(v2) - v0.AddArg(v1) + v1.AddArg2(x, v2) v3 := b.NewValue0(v.Pos, OpConst64, t) v3.AuxInt = c - v0.AddArg(v3) - v.AddArg(v0) + v0.AddArg2(v1, v3) + v.AddArg2(x, v0) return true } return false @@ -11339,10 +12221,9 @@ func rewriteValuegeneric_OpMod8(v *Value) bool { break } v.reset(OpAnd8) - v.AddArg(n) v0 := b.NewValue0(v.Pos, OpConst8, t) v0.AuxInt = (c & 0xff) - 1 - v.AddArg(v0) + v.AddArg2(n, v0) return true } // match: (Mod8 n (Const8 [c])) @@ -11360,10 +12241,9 @@ func rewriteValuegeneric_OpMod8(v *Value) bool { } v.reset(OpMod8) v.Type = t - v.AddArg(n) v0 := b.NewValue0(v.Pos, OpConst8, t) v0.AuxInt = -c - v.AddArg(v0) + v.AddArg2(n, v0) return true } // match: (Mod8 x (Const8 [c])) @@ -11380,18 +12260,15 @@ func rewriteValuegeneric_OpMod8(v *Value) bool { break } v.reset(OpSub8) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpMul8, t) v1 := b.NewValue0(v.Pos, OpDiv8, t) - v1.AddArg(x) v2 := b.NewValue0(v.Pos, OpConst8, t) v2.AuxInt = c - v1.AddArg(v2) - v0.AddArg(v1) + v1.AddArg2(x, v2) v3 := b.NewValue0(v.Pos, OpConst8, t) v3.AuxInt = c - v0.AddArg(v3) - v.AddArg(v0) + v0.AddArg2(v1, v3) + v.AddArg2(x, v0) return true } return false @@ -11433,10 +12310,9 @@ func rewriteValuegeneric_OpMod8u(v *Value) bool { break } v.reset(OpAnd8) - v.AddArg(n) v0 := b.NewValue0(v.Pos, OpConst8, t) v0.AuxInt = (c & 0xff) - 1 - v.AddArg(v0) + v.AddArg2(n, v0) return true } // match: (Mod8u x (Const8 [c])) @@ -11453,18 +12329,15 @@ func rewriteValuegeneric_OpMod8u(v *Value) bool { break } v.reset(OpSub8) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpMul8, t) v1 := b.NewValue0(v.Pos, OpDiv8u, t) - v1.AddArg(x) v2 := b.NewValue0(v.Pos, OpConst8, t) v2.AuxInt = c - v1.AddArg(v2) - v0.AddArg(v1) + v1.AddArg2(x, v2) v3 := b.NewValue0(v.Pos, OpConst8, t) v3.AuxInt = c - v0.AddArg(v3) - v.AddArg(v0) + v0.AddArg2(v1, v3) + v.AddArg2(x, v0) return true } return false @@ -11487,7 +12360,6 @@ func rewriteValuegeneric_OpMove(v *Value) bool { if mem.Op != OpZero || mem.AuxInt != n || mem.Aux != t { break } - _ = mem.Args[1] dst2 := mem.Args[0] if !(isSamePtr(src, dst2)) { break @@ -11495,8 +12367,7 @@ func rewriteValuegeneric_OpMove(v *Value) bool { v.reset(OpZero) v.AuxInt = n v.Aux = t - v.AddArg(dst1) - v.AddArg(mem) + v.AddArg2(dst1, mem) return true } // match: (Move {t} [n] dst1 src mem:(VarDef (Zero {t} [n] dst0 _))) @@ -11515,7 +12386,6 @@ func rewriteValuegeneric_OpMove(v *Value) bool { if mem_0.Op != OpZero || mem_0.AuxInt != n || mem_0.Aux != t { break } - _ = mem_0.Args[1] dst0 := mem_0.Args[0] if !(isSamePtr(src, dst0)) { break @@ -11523,8 +12393,7 @@ func rewriteValuegeneric_OpMove(v *Value) bool { v.reset(OpZero) v.AuxInt = n v.Aux = t - v.AddArg(dst1) - v.AddArg(mem) + v.AddArg2(dst1, mem) return true } // match: (Move {t1} [n] dst1 src1 store:(Store {t2} op:(OffPtr [o2] dst2) _ mem)) @@ -11553,9 +12422,7 @@ func rewriteValuegeneric_OpMove(v *Value) bool { v.reset(OpMove) v.AuxInt = n v.Aux = t1 - v.AddArg(dst1) - v.AddArg(src1) - v.AddArg(mem) + v.AddArg3(dst1, src1, mem) return true } // match: (Move {t} [n] dst1 src1 move:(Move {t} [n] dst2 _ mem)) @@ -11578,9 +12445,7 @@ func rewriteValuegeneric_OpMove(v *Value) bool { v.reset(OpMove) v.AuxInt = n v.Aux = t - v.AddArg(dst1) - v.AddArg(src1) - v.AddArg(mem) + v.AddArg3(dst1, src1, mem) return true } // match: (Move {t} [n] dst1 src1 vardef:(VarDef {x} move:(Move {t} [n] dst2 _ mem))) @@ -11608,12 +12473,10 @@ func rewriteValuegeneric_OpMove(v *Value) bool { v.reset(OpMove) v.AuxInt = n v.Aux = t - v.AddArg(dst1) - v.AddArg(src1) v0 := b.NewValue0(v.Pos, OpVarDef, types.TypeMem) v0.Aux = x v0.AddArg(mem) - v.AddArg(v0) + v.AddArg3(dst1, src1, v0) return true } // match: (Move {t} [n] dst1 src1 zero:(Zero {t} [n] dst2 mem)) @@ -11636,9 +12499,7 @@ func rewriteValuegeneric_OpMove(v *Value) bool { v.reset(OpMove) v.AuxInt = n v.Aux = t - v.AddArg(dst1) - v.AddArg(src1) - v.AddArg(mem) + v.AddArg3(dst1, src1, mem) return true } // match: (Move {t} [n] dst1 src1 vardef:(VarDef {x} zero:(Zero {t} [n] dst2 mem))) @@ -11666,12 +12527,10 @@ func rewriteValuegeneric_OpMove(v *Value) bool { v.reset(OpMove) v.AuxInt = n v.Aux = t - v.AddArg(dst1) - v.AddArg(src1) v0 := b.NewValue0(v.Pos, OpVarDef, types.TypeMem) v0.Aux = x v0.AddArg(mem) - v.AddArg(v0) + v.AddArg3(dst1, src1, v0) return true } // match: (Move {t1} [n] dst p1 mem:(Store {t2} op2:(OffPtr [o2] p2) d1 (Store {t3} op3:(OffPtr [0] p3) d2 _))) @@ -11701,7 +12560,7 @@ func rewriteValuegeneric_OpMove(v *Value) bool { break } t3 := mem_2.Aux - _ = mem_2.Args[2] + d2 := mem_2.Args[1] op3 := mem_2.Args[0] if op3.Op != OpOffPtr { break @@ -11711,7 +12570,6 @@ func rewriteValuegeneric_OpMove(v *Value) bool { break } p3 := op3.Args[0] - d2 := mem_2.Args[1] if !(isSamePtr(p1, p2) && isSamePtr(p2, p3) && alignof(t2) <= alignof(t1) && alignof(t3) <= alignof(t1) && registerizable(b, t2) && registerizable(b, t3) && o2 == sizeof(t3) && n == sizeof(t2)+sizeof(t3)) { break } @@ -11720,17 +12578,13 @@ func rewriteValuegeneric_OpMove(v *Value) bool { v0 := b.NewValue0(v.Pos, OpOffPtr, tt2) v0.AuxInt = o2 v0.AddArg(dst) - v.AddArg(v0) - v.AddArg(d1) v1 := b.NewValue0(v.Pos, OpStore, types.TypeMem) v1.Aux = t3 v2 := b.NewValue0(v.Pos, OpOffPtr, tt3) v2.AuxInt = 0 v2.AddArg(dst) - v1.AddArg(v2) - v1.AddArg(d2) - v1.AddArg(mem) - v.AddArg(v1) + v1.AddArg3(v2, d2, mem) + v.AddArg3(v0, d1, v1) return true } // match: (Move {t1} [n] dst p1 mem:(Store {t2} op2:(OffPtr [o2] p2) d1 (Store {t3} op3:(OffPtr [o3] p3) d2 (Store {t4} op4:(OffPtr [0] p4) d3 _)))) @@ -11774,7 +12628,7 @@ func rewriteValuegeneric_OpMove(v *Value) bool { break } t4 := mem_2_2.Aux - _ = mem_2_2.Args[2] + d3 := mem_2_2.Args[1] op4 := mem_2_2.Args[0] if op4.Op != OpOffPtr { break @@ -11784,7 +12638,6 @@ func rewriteValuegeneric_OpMove(v *Value) bool { break } p4 := op4.Args[0] - d3 := mem_2_2.Args[1] if !(isSamePtr(p1, p2) && isSamePtr(p2, p3) && isSamePtr(p3, p4) && alignof(t2) <= alignof(t1) && alignof(t3) <= alignof(t1) && alignof(t4) <= alignof(t1) && registerizable(b, t2) && registerizable(b, t3) && registerizable(b, t4) && o3 == sizeof(t4) && o2-o3 == sizeof(t3) && n == sizeof(t2)+sizeof(t3)+sizeof(t4)) { break } @@ -11793,25 +12646,19 @@ func rewriteValuegeneric_OpMove(v *Value) bool { v0 := b.NewValue0(v.Pos, OpOffPtr, tt2) v0.AuxInt = o2 v0.AddArg(dst) - v.AddArg(v0) - v.AddArg(d1) v1 := b.NewValue0(v.Pos, OpStore, types.TypeMem) v1.Aux = t3 v2 := b.NewValue0(v.Pos, OpOffPtr, tt3) v2.AuxInt = o3 v2.AddArg(dst) - v1.AddArg(v2) - v1.AddArg(d2) v3 := b.NewValue0(v.Pos, OpStore, types.TypeMem) v3.Aux = t4 v4 := b.NewValue0(v.Pos, OpOffPtr, tt4) v4.AuxInt = 0 v4.AddArg(dst) - v3.AddArg(v4) - v3.AddArg(d3) - v3.AddArg(mem) - v1.AddArg(v3) - v.AddArg(v1) + v3.AddArg3(v4, d3, mem) + v1.AddArg3(v2, d2, v3) + v.AddArg3(v0, d1, v1) return true } // match: (Move {t1} [n] dst p1 mem:(Store {t2} op2:(OffPtr [o2] p2) d1 (Store {t3} op3:(OffPtr [o3] p3) d2 (Store {t4} op4:(OffPtr [o4] p4) d3 (Store {t5} op5:(OffPtr [0] p5) d4 _))))) @@ -11869,7 +12716,7 @@ func rewriteValuegeneric_OpMove(v *Value) bool { break } t5 := mem_2_2_2.Aux - _ = mem_2_2_2.Args[2] + d4 := mem_2_2_2.Args[1] op5 := mem_2_2_2.Args[0] if op5.Op != OpOffPtr { break @@ -11879,7 +12726,6 @@ func rewriteValuegeneric_OpMove(v *Value) bool { break } p5 := op5.Args[0] - d4 := mem_2_2_2.Args[1] if !(isSamePtr(p1, p2) && isSamePtr(p2, p3) && isSamePtr(p3, p4) && isSamePtr(p4, p5) && alignof(t2) <= alignof(t1) && alignof(t3) <= alignof(t1) && alignof(t4) <= alignof(t1) && alignof(t5) <= alignof(t1) && registerizable(b, t2) && registerizable(b, t3) && registerizable(b, t4) && registerizable(b, t5) && o4 == sizeof(t5) && o3-o4 == sizeof(t4) && o2-o3 == sizeof(t3) && n == sizeof(t2)+sizeof(t3)+sizeof(t4)+sizeof(t5)) { break } @@ -11888,33 +12734,25 @@ func rewriteValuegeneric_OpMove(v *Value) bool { v0 := b.NewValue0(v.Pos, OpOffPtr, tt2) v0.AuxInt = o2 v0.AddArg(dst) - v.AddArg(v0) - v.AddArg(d1) v1 := b.NewValue0(v.Pos, OpStore, types.TypeMem) v1.Aux = t3 v2 := b.NewValue0(v.Pos, OpOffPtr, tt3) v2.AuxInt = o3 v2.AddArg(dst) - v1.AddArg(v2) - v1.AddArg(d2) v3 := b.NewValue0(v.Pos, OpStore, types.TypeMem) v3.Aux = t4 v4 := b.NewValue0(v.Pos, OpOffPtr, tt4) v4.AuxInt = o4 v4.AddArg(dst) - v3.AddArg(v4) - v3.AddArg(d3) v5 := b.NewValue0(v.Pos, OpStore, types.TypeMem) v5.Aux = t5 v6 := b.NewValue0(v.Pos, OpOffPtr, tt5) v6.AuxInt = 0 v6.AddArg(dst) - v5.AddArg(v6) - v5.AddArg(d4) - v5.AddArg(mem) - v3.AddArg(v5) - v1.AddArg(v3) - v.AddArg(v1) + v5.AddArg3(v6, d4, mem) + v3.AddArg3(v4, d3, v5) + v1.AddArg3(v2, d2, v3) + v.AddArg3(v0, d1, v1) return true } // match: (Move {t1} [n] dst p1 mem:(VarDef (Store {t2} op2:(OffPtr [o2] p2) d1 (Store {t3} op3:(OffPtr [0] p3) d2 _)))) @@ -11948,7 +12786,7 @@ func rewriteValuegeneric_OpMove(v *Value) bool { break } t3 := mem_0_2.Aux - _ = mem_0_2.Args[2] + d2 := mem_0_2.Args[1] op3 := mem_0_2.Args[0] if op3.Op != OpOffPtr { break @@ -11958,7 +12796,6 @@ func rewriteValuegeneric_OpMove(v *Value) bool { break } p3 := op3.Args[0] - d2 := mem_0_2.Args[1] if !(isSamePtr(p1, p2) && isSamePtr(p2, p3) && alignof(t2) <= alignof(t1) && alignof(t3) <= alignof(t1) && registerizable(b, t2) && registerizable(b, t3) && o2 == sizeof(t3) && n == sizeof(t2)+sizeof(t3)) { break } @@ -11967,17 +12804,13 @@ func rewriteValuegeneric_OpMove(v *Value) bool { v0 := b.NewValue0(v.Pos, OpOffPtr, tt2) v0.AuxInt = o2 v0.AddArg(dst) - v.AddArg(v0) - v.AddArg(d1) v1 := b.NewValue0(v.Pos, OpStore, types.TypeMem) v1.Aux = t3 v2 := b.NewValue0(v.Pos, OpOffPtr, tt3) v2.AuxInt = 0 v2.AddArg(dst) - v1.AddArg(v2) - v1.AddArg(d2) - v1.AddArg(mem) - v.AddArg(v1) + v1.AddArg3(v2, d2, mem) + v.AddArg3(v0, d1, v1) return true } // match: (Move {t1} [n] dst p1 mem:(VarDef (Store {t2} op2:(OffPtr [o2] p2) d1 (Store {t3} op3:(OffPtr [o3] p3) d2 (Store {t4} op4:(OffPtr [0] p4) d3 _))))) @@ -12025,7 +12858,7 @@ func rewriteValuegeneric_OpMove(v *Value) bool { break } t4 := mem_0_2_2.Aux - _ = mem_0_2_2.Args[2] + d3 := mem_0_2_2.Args[1] op4 := mem_0_2_2.Args[0] if op4.Op != OpOffPtr { break @@ -12035,7 +12868,6 @@ func rewriteValuegeneric_OpMove(v *Value) bool { break } p4 := op4.Args[0] - d3 := mem_0_2_2.Args[1] if !(isSamePtr(p1, p2) && isSamePtr(p2, p3) && isSamePtr(p3, p4) && alignof(t2) <= alignof(t1) && alignof(t3) <= alignof(t1) && alignof(t4) <= alignof(t1) && registerizable(b, t2) && registerizable(b, t3) && registerizable(b, t4) && o3 == sizeof(t4) && o2-o3 == sizeof(t3) && n == sizeof(t2)+sizeof(t3)+sizeof(t4)) { break } @@ -12044,25 +12876,19 @@ func rewriteValuegeneric_OpMove(v *Value) bool { v0 := b.NewValue0(v.Pos, OpOffPtr, tt2) v0.AuxInt = o2 v0.AddArg(dst) - v.AddArg(v0) - v.AddArg(d1) v1 := b.NewValue0(v.Pos, OpStore, types.TypeMem) v1.Aux = t3 v2 := b.NewValue0(v.Pos, OpOffPtr, tt3) v2.AuxInt = o3 v2.AddArg(dst) - v1.AddArg(v2) - v1.AddArg(d2) v3 := b.NewValue0(v.Pos, OpStore, types.TypeMem) v3.Aux = t4 v4 := b.NewValue0(v.Pos, OpOffPtr, tt4) v4.AuxInt = 0 v4.AddArg(dst) - v3.AddArg(v4) - v3.AddArg(d3) - v3.AddArg(mem) - v1.AddArg(v3) - v.AddArg(v1) + v3.AddArg3(v4, d3, mem) + v1.AddArg3(v2, d2, v3) + v.AddArg3(v0, d1, v1) return true } // match: (Move {t1} [n] dst p1 mem:(VarDef (Store {t2} op2:(OffPtr [o2] p2) d1 (Store {t3} op3:(OffPtr [o3] p3) d2 (Store {t4} op4:(OffPtr [o4] p4) d3 (Store {t5} op5:(OffPtr [0] p5) d4 _)))))) @@ -12124,7 +12950,7 @@ func rewriteValuegeneric_OpMove(v *Value) bool { break } t5 := mem_0_2_2_2.Aux - _ = mem_0_2_2_2.Args[2] + d4 := mem_0_2_2_2.Args[1] op5 := mem_0_2_2_2.Args[0] if op5.Op != OpOffPtr { break @@ -12134,7 +12960,6 @@ func rewriteValuegeneric_OpMove(v *Value) bool { break } p5 := op5.Args[0] - d4 := mem_0_2_2_2.Args[1] if !(isSamePtr(p1, p2) && isSamePtr(p2, p3) && isSamePtr(p3, p4) && isSamePtr(p4, p5) && alignof(t2) <= alignof(t1) && alignof(t3) <= alignof(t1) && alignof(t4) <= alignof(t1) && alignof(t5) <= alignof(t1) && registerizable(b, t2) && registerizable(b, t3) && registerizable(b, t4) && registerizable(b, t5) && o4 == sizeof(t5) && o3-o4 == sizeof(t4) && o2-o3 == sizeof(t3) && n == sizeof(t2)+sizeof(t3)+sizeof(t4)+sizeof(t5)) { break } @@ -12143,33 +12968,25 @@ func rewriteValuegeneric_OpMove(v *Value) bool { v0 := b.NewValue0(v.Pos, OpOffPtr, tt2) v0.AuxInt = o2 v0.AddArg(dst) - v.AddArg(v0) - v.AddArg(d1) v1 := b.NewValue0(v.Pos, OpStore, types.TypeMem) v1.Aux = t3 v2 := b.NewValue0(v.Pos, OpOffPtr, tt3) v2.AuxInt = o3 v2.AddArg(dst) - v1.AddArg(v2) - v1.AddArg(d2) v3 := b.NewValue0(v.Pos, OpStore, types.TypeMem) v3.Aux = t4 v4 := b.NewValue0(v.Pos, OpOffPtr, tt4) v4.AuxInt = o4 v4.AddArg(dst) - v3.AddArg(v4) - v3.AddArg(d3) v5 := b.NewValue0(v.Pos, OpStore, types.TypeMem) v5.Aux = t5 v6 := b.NewValue0(v.Pos, OpOffPtr, tt5) v6.AuxInt = 0 v6.AddArg(dst) - v5.AddArg(v6) - v5.AddArg(d4) - v5.AddArg(mem) - v3.AddArg(v5) - v1.AddArg(v3) - v.AddArg(v1) + v5.AddArg3(v6, d4, mem) + v3.AddArg3(v4, d3, v5) + v1.AddArg3(v2, d2, v3) + v.AddArg3(v0, d1, v1) return true } // match: (Move {t1} [n] dst p1 mem:(Store {t2} op2:(OffPtr [o2] p2) d1 (Zero {t3} [n] p3 _))) @@ -12199,7 +13016,6 @@ func rewriteValuegeneric_OpMove(v *Value) bool { break } t3 := mem_2.Aux - _ = mem_2.Args[1] p3 := mem_2.Args[0] if !(isSamePtr(p1, p2) && isSamePtr(p2, p3) && alignof(t2) <= alignof(t1) && alignof(t3) <= alignof(t1) && registerizable(b, t2) && n >= o2+sizeof(t2)) { break @@ -12209,14 +13025,11 @@ func rewriteValuegeneric_OpMove(v *Value) bool { v0 := b.NewValue0(v.Pos, OpOffPtr, tt2) v0.AuxInt = o2 v0.AddArg(dst) - v.AddArg(v0) - v.AddArg(d1) v1 := b.NewValue0(v.Pos, OpZero, types.TypeMem) v1.AuxInt = n v1.Aux = t1 - v1.AddArg(dst) - v1.AddArg(mem) - v.AddArg(v1) + v1.AddArg2(dst, mem) + v.AddArg3(v0, d1, v1) return true } // match: (Move {t1} [n] dst p1 mem:(Store {t2} (OffPtr [o2] p2) d1 (Store {t3} (OffPtr [o3] p3) d2 (Zero {t4} [n] p4 _)))) @@ -12260,7 +13073,6 @@ func rewriteValuegeneric_OpMove(v *Value) bool { break } t4 := mem_2_2.Aux - _ = mem_2_2.Args[1] p4 := mem_2_2.Args[0] if !(isSamePtr(p1, p2) && isSamePtr(p2, p3) && isSamePtr(p3, p4) && alignof(t2) <= alignof(t1) && alignof(t3) <= alignof(t1) && alignof(t4) <= alignof(t1) && registerizable(b, t2) && registerizable(b, t3) && n >= o2+sizeof(t2) && n >= o3+sizeof(t3)) { break @@ -12270,22 +13082,17 @@ func rewriteValuegeneric_OpMove(v *Value) bool { v0 := b.NewValue0(v.Pos, OpOffPtr, tt2) v0.AuxInt = o2 v0.AddArg(dst) - v.AddArg(v0) - v.AddArg(d1) v1 := b.NewValue0(v.Pos, OpStore, types.TypeMem) v1.Aux = t3 v2 := b.NewValue0(v.Pos, OpOffPtr, tt3) v2.AuxInt = o3 v2.AddArg(dst) - v1.AddArg(v2) - v1.AddArg(d2) v3 := b.NewValue0(v.Pos, OpZero, types.TypeMem) v3.AuxInt = n v3.Aux = t1 - v3.AddArg(dst) - v3.AddArg(mem) - v1.AddArg(v3) - v.AddArg(v1) + v3.AddArg2(dst, mem) + v1.AddArg3(v2, d2, v3) + v.AddArg3(v0, d1, v1) return true } // match: (Move {t1} [n] dst p1 mem:(Store {t2} (OffPtr [o2] p2) d1 (Store {t3} (OffPtr [o3] p3) d2 (Store {t4} (OffPtr [o4] p4) d3 (Zero {t5} [n] p5 _))))) @@ -12343,7 +13150,6 @@ func rewriteValuegeneric_OpMove(v *Value) bool { break } t5 := mem_2_2_2.Aux - _ = mem_2_2_2.Args[1] p5 := mem_2_2_2.Args[0] if !(isSamePtr(p1, p2) && isSamePtr(p2, p3) && isSamePtr(p3, p4) && isSamePtr(p4, p5) && alignof(t2) <= alignof(t1) && alignof(t3) <= alignof(t1) && alignof(t4) <= alignof(t1) && alignof(t5) <= alignof(t1) && registerizable(b, t2) && registerizable(b, t3) && registerizable(b, t4) && n >= o2+sizeof(t2) && n >= o3+sizeof(t3) && n >= o4+sizeof(t4)) { break @@ -12353,30 +13159,23 @@ func rewriteValuegeneric_OpMove(v *Value) bool { v0 := b.NewValue0(v.Pos, OpOffPtr, tt2) v0.AuxInt = o2 v0.AddArg(dst) - v.AddArg(v0) - v.AddArg(d1) v1 := b.NewValue0(v.Pos, OpStore, types.TypeMem) v1.Aux = t3 v2 := b.NewValue0(v.Pos, OpOffPtr, tt3) v2.AuxInt = o3 v2.AddArg(dst) - v1.AddArg(v2) - v1.AddArg(d2) v3 := b.NewValue0(v.Pos, OpStore, types.TypeMem) v3.Aux = t4 v4 := b.NewValue0(v.Pos, OpOffPtr, tt4) v4.AuxInt = o4 v4.AddArg(dst) - v3.AddArg(v4) - v3.AddArg(d3) v5 := b.NewValue0(v.Pos, OpZero, types.TypeMem) v5.AuxInt = n v5.Aux = t1 - v5.AddArg(dst) - v5.AddArg(mem) - v3.AddArg(v5) - v1.AddArg(v3) - v.AddArg(v1) + v5.AddArg2(dst, mem) + v3.AddArg3(v4, d3, v5) + v1.AddArg3(v2, d2, v3) + v.AddArg3(v0, d1, v1) return true } // match: (Move {t1} [n] dst p1 mem:(Store {t2} (OffPtr [o2] p2) d1 (Store {t3} (OffPtr [o3] p3) d2 (Store {t4} (OffPtr [o4] p4) d3 (Store {t5} (OffPtr [o5] p5) d4 (Zero {t6} [n] p6 _)))))) @@ -12448,7 +13247,6 @@ func rewriteValuegeneric_OpMove(v *Value) bool { break } t6 := mem_2_2_2_2.Aux - _ = mem_2_2_2_2.Args[1] p6 := mem_2_2_2_2.Args[0] if !(isSamePtr(p1, p2) && isSamePtr(p2, p3) && isSamePtr(p3, p4) && isSamePtr(p4, p5) && isSamePtr(p5, p6) && alignof(t2) <= alignof(t1) && alignof(t3) <= alignof(t1) && alignof(t4) <= alignof(t1) && alignof(t5) <= alignof(t1) && alignof(t6) <= alignof(t1) && registerizable(b, t2) && registerizable(b, t3) && registerizable(b, t4) && registerizable(b, t5) && n >= o2+sizeof(t2) && n >= o3+sizeof(t3) && n >= o4+sizeof(t4) && n >= o5+sizeof(t5)) { break @@ -12458,38 +13256,29 @@ func rewriteValuegeneric_OpMove(v *Value) bool { v0 := b.NewValue0(v.Pos, OpOffPtr, tt2) v0.AuxInt = o2 v0.AddArg(dst) - v.AddArg(v0) - v.AddArg(d1) v1 := b.NewValue0(v.Pos, OpStore, types.TypeMem) v1.Aux = t3 v2 := b.NewValue0(v.Pos, OpOffPtr, tt3) v2.AuxInt = o3 v2.AddArg(dst) - v1.AddArg(v2) - v1.AddArg(d2) v3 := b.NewValue0(v.Pos, OpStore, types.TypeMem) v3.Aux = t4 v4 := b.NewValue0(v.Pos, OpOffPtr, tt4) v4.AuxInt = o4 v4.AddArg(dst) - v3.AddArg(v4) - v3.AddArg(d3) v5 := b.NewValue0(v.Pos, OpStore, types.TypeMem) v5.Aux = t5 v6 := b.NewValue0(v.Pos, OpOffPtr, tt5) v6.AuxInt = o5 v6.AddArg(dst) - v5.AddArg(v6) - v5.AddArg(d4) v7 := b.NewValue0(v.Pos, OpZero, types.TypeMem) v7.AuxInt = n v7.Aux = t1 - v7.AddArg(dst) - v7.AddArg(mem) - v5.AddArg(v7) - v3.AddArg(v5) - v1.AddArg(v3) - v.AddArg(v1) + v7.AddArg2(dst, mem) + v5.AddArg3(v6, d4, v7) + v3.AddArg3(v4, d3, v5) + v1.AddArg3(v2, d2, v3) + v.AddArg3(v0, d1, v1) return true } // match: (Move {t1} [n] dst p1 mem:(VarDef (Store {t2} op2:(OffPtr [o2] p2) d1 (Zero {t3} [n] p3 _)))) @@ -12523,7 +13312,6 @@ func rewriteValuegeneric_OpMove(v *Value) bool { break } t3 := mem_0_2.Aux - _ = mem_0_2.Args[1] p3 := mem_0_2.Args[0] if !(isSamePtr(p1, p2) && isSamePtr(p2, p3) && alignof(t2) <= alignof(t1) && alignof(t3) <= alignof(t1) && registerizable(b, t2) && n >= o2+sizeof(t2)) { break @@ -12533,14 +13321,11 @@ func rewriteValuegeneric_OpMove(v *Value) bool { v0 := b.NewValue0(v.Pos, OpOffPtr, tt2) v0.AuxInt = o2 v0.AddArg(dst) - v.AddArg(v0) - v.AddArg(d1) v1 := b.NewValue0(v.Pos, OpZero, types.TypeMem) v1.AuxInt = n v1.Aux = t1 - v1.AddArg(dst) - v1.AddArg(mem) - v.AddArg(v1) + v1.AddArg2(dst, mem) + v.AddArg3(v0, d1, v1) return true } // match: (Move {t1} [n] dst p1 mem:(VarDef (Store {t2} (OffPtr [o2] p2) d1 (Store {t3} (OffPtr [o3] p3) d2 (Zero {t4} [n] p4 _))))) @@ -12588,7 +13373,6 @@ func rewriteValuegeneric_OpMove(v *Value) bool { break } t4 := mem_0_2_2.Aux - _ = mem_0_2_2.Args[1] p4 := mem_0_2_2.Args[0] if !(isSamePtr(p1, p2) && isSamePtr(p2, p3) && isSamePtr(p3, p4) && alignof(t2) <= alignof(t1) && alignof(t3) <= alignof(t1) && alignof(t4) <= alignof(t1) && registerizable(b, t2) && registerizable(b, t3) && n >= o2+sizeof(t2) && n >= o3+sizeof(t3)) { break @@ -12598,22 +13382,17 @@ func rewriteValuegeneric_OpMove(v *Value) bool { v0 := b.NewValue0(v.Pos, OpOffPtr, tt2) v0.AuxInt = o2 v0.AddArg(dst) - v.AddArg(v0) - v.AddArg(d1) v1 := b.NewValue0(v.Pos, OpStore, types.TypeMem) v1.Aux = t3 v2 := b.NewValue0(v.Pos, OpOffPtr, tt3) v2.AuxInt = o3 v2.AddArg(dst) - v1.AddArg(v2) - v1.AddArg(d2) v3 := b.NewValue0(v.Pos, OpZero, types.TypeMem) v3.AuxInt = n v3.Aux = t1 - v3.AddArg(dst) - v3.AddArg(mem) - v1.AddArg(v3) - v.AddArg(v1) + v3.AddArg2(dst, mem) + v1.AddArg3(v2, d2, v3) + v.AddArg3(v0, d1, v1) return true } // match: (Move {t1} [n] dst p1 mem:(VarDef (Store {t2} (OffPtr [o2] p2) d1 (Store {t3} (OffPtr [o3] p3) d2 (Store {t4} (OffPtr [o4] p4) d3 (Zero {t5} [n] p5 _)))))) @@ -12675,7 +13454,6 @@ func rewriteValuegeneric_OpMove(v *Value) bool { break } t5 := mem_0_2_2_2.Aux - _ = mem_0_2_2_2.Args[1] p5 := mem_0_2_2_2.Args[0] if !(isSamePtr(p1, p2) && isSamePtr(p2, p3) && isSamePtr(p3, p4) && isSamePtr(p4, p5) && alignof(t2) <= alignof(t1) && alignof(t3) <= alignof(t1) && alignof(t4) <= alignof(t1) && alignof(t5) <= alignof(t1) && registerizable(b, t2) && registerizable(b, t3) && registerizable(b, t4) && n >= o2+sizeof(t2) && n >= o3+sizeof(t3) && n >= o4+sizeof(t4)) { break @@ -12685,30 +13463,23 @@ func rewriteValuegeneric_OpMove(v *Value) bool { v0 := b.NewValue0(v.Pos, OpOffPtr, tt2) v0.AuxInt = o2 v0.AddArg(dst) - v.AddArg(v0) - v.AddArg(d1) v1 := b.NewValue0(v.Pos, OpStore, types.TypeMem) v1.Aux = t3 v2 := b.NewValue0(v.Pos, OpOffPtr, tt3) v2.AuxInt = o3 v2.AddArg(dst) - v1.AddArg(v2) - v1.AddArg(d2) v3 := b.NewValue0(v.Pos, OpStore, types.TypeMem) v3.Aux = t4 v4 := b.NewValue0(v.Pos, OpOffPtr, tt4) v4.AuxInt = o4 v4.AddArg(dst) - v3.AddArg(v4) - v3.AddArg(d3) v5 := b.NewValue0(v.Pos, OpZero, types.TypeMem) v5.AuxInt = n v5.Aux = t1 - v5.AddArg(dst) - v5.AddArg(mem) - v3.AddArg(v5) - v1.AddArg(v3) - v.AddArg(v1) + v5.AddArg2(dst, mem) + v3.AddArg3(v4, d3, v5) + v1.AddArg3(v2, d2, v3) + v.AddArg3(v0, d1, v1) return true } // match: (Move {t1} [n] dst p1 mem:(VarDef (Store {t2} (OffPtr [o2] p2) d1 (Store {t3} (OffPtr [o3] p3) d2 (Store {t4} (OffPtr [o4] p4) d3 (Store {t5} (OffPtr [o5] p5) d4 (Zero {t6} [n] p6 _))))))) @@ -12784,7 +13555,6 @@ func rewriteValuegeneric_OpMove(v *Value) bool { break } t6 := mem_0_2_2_2_2.Aux - _ = mem_0_2_2_2_2.Args[1] p6 := mem_0_2_2_2_2.Args[0] if !(isSamePtr(p1, p2) && isSamePtr(p2, p3) && isSamePtr(p3, p4) && isSamePtr(p4, p5) && isSamePtr(p5, p6) && alignof(t2) <= alignof(t1) && alignof(t3) <= alignof(t1) && alignof(t4) <= alignof(t1) && alignof(t5) <= alignof(t1) && alignof(t6) <= alignof(t1) && registerizable(b, t2) && registerizable(b, t3) && registerizable(b, t4) && registerizable(b, t5) && n >= o2+sizeof(t2) && n >= o3+sizeof(t3) && n >= o4+sizeof(t4) && n >= o5+sizeof(t5)) { break @@ -12794,38 +13564,29 @@ func rewriteValuegeneric_OpMove(v *Value) bool { v0 := b.NewValue0(v.Pos, OpOffPtr, tt2) v0.AuxInt = o2 v0.AddArg(dst) - v.AddArg(v0) - v.AddArg(d1) v1 := b.NewValue0(v.Pos, OpStore, types.TypeMem) v1.Aux = t3 v2 := b.NewValue0(v.Pos, OpOffPtr, tt3) v2.AuxInt = o3 v2.AddArg(dst) - v1.AddArg(v2) - v1.AddArg(d2) v3 := b.NewValue0(v.Pos, OpStore, types.TypeMem) v3.Aux = t4 v4 := b.NewValue0(v.Pos, OpOffPtr, tt4) v4.AuxInt = o4 v4.AddArg(dst) - v3.AddArg(v4) - v3.AddArg(d3) v5 := b.NewValue0(v.Pos, OpStore, types.TypeMem) v5.Aux = t5 v6 := b.NewValue0(v.Pos, OpOffPtr, tt5) v6.AuxInt = o5 v6.AddArg(dst) - v5.AddArg(v6) - v5.AddArg(d4) v7 := b.NewValue0(v.Pos, OpZero, types.TypeMem) v7.AuxInt = n v7.Aux = t1 - v7.AddArg(dst) - v7.AddArg(mem) - v5.AddArg(v7) - v3.AddArg(v5) - v1.AddArg(v3) - v.AddArg(v1) + v7.AddArg2(dst, mem) + v5.AddArg3(v6, d4, v7) + v3.AddArg3(v4, d3, v5) + v1.AddArg3(v2, d2, v3) + v.AddArg3(v0, d1, v1) return true } // match: (Move {t1} [s] dst tmp1 midmem:(Move {t2} [s] tmp2 src _)) @@ -12841,18 +13602,15 @@ func rewriteValuegeneric_OpMove(v *Value) bool { break } t2 := midmem.Aux - _ = midmem.Args[2] - tmp2 := midmem.Args[0] src := midmem.Args[1] + tmp2 := midmem.Args[0] if !(t1.(*types.Type).Compare(t2.(*types.Type)) == types.CMPeq && isSamePtr(tmp1, tmp2) && isStackPtr(src) && disjoint(src, s, tmp2, s) && (disjoint(src, s, dst, s) || isInlinableMemmove(dst, src, s, config))) { break } v.reset(OpMove) v.AuxInt = s v.Aux = t1 - v.AddArg(dst) - v.AddArg(src) - v.AddArg(midmem) + v.AddArg3(dst, src, midmem) return true } // match: (Move {t1} [s] dst tmp1 midmem:(VarDef (Move {t2} [s] tmp2 src _))) @@ -12872,18 +13630,15 @@ func rewriteValuegeneric_OpMove(v *Value) bool { break } t2 := midmem_0.Aux - _ = midmem_0.Args[2] - tmp2 := midmem_0.Args[0] src := midmem_0.Args[1] + tmp2 := midmem_0.Args[0] if !(t1.(*types.Type).Compare(t2.(*types.Type)) == types.CMPeq && isSamePtr(tmp1, tmp2) && isStackPtr(src) && disjoint(src, s, tmp2, s) && (disjoint(src, s, dst, s) || isInlinableMemmove(dst, src, s, config))) { break } v.reset(OpMove) v.AuxInt = s v.Aux = t1 - v.AddArg(dst) - v.AddArg(src) - v.AddArg(midmem) + v.AddArg3(dst, src, midmem) return true } // match: (Move dst src mem) @@ -12896,9 +13651,7 @@ func rewriteValuegeneric_OpMove(v *Value) bool { if !(isSamePtr(dst, src)) { break } - v.reset(OpCopy) - v.Type = mem.Type - v.AddArg(mem) + v.copyOf(mem) return true } return false @@ -12934,9 +13687,7 @@ func rewriteValuegeneric_OpMul16(v *Value) bool { continue } x := v_1 - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } break @@ -12971,10 +13722,9 @@ func rewriteValuegeneric_OpMul16(v *Value) bool { } v.reset(OpLsh16x64) v.Type = t - v.AddArg(n) v0 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) v0.AuxInt = log2(c) - v.AddArg(v0) + v.AddArg2(n, v0) return true } break @@ -12995,10 +13745,9 @@ func rewriteValuegeneric_OpMul16(v *Value) bool { } v.reset(OpNeg16) v0 := b.NewValue0(v.Pos, OpLsh16x64, t) - v0.AddArg(n) v1 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) v1.AuxInt = log2(-c) - v0.AddArg(v1) + v0.AddArg2(n, v1) v.AddArg(v0) return true } @@ -13041,8 +13790,7 @@ func rewriteValuegeneric_OpMul16(v *Value) bool { v.reset(OpMul16) v0 := b.NewValue0(v.Pos, OpConst16, t) v0.AuxInt = int64(int16(c * d)) - v.AddArg(v0) - v.AddArg(x) + v.AddArg2(v0, x) return true } } @@ -13081,9 +13829,7 @@ func rewriteValuegeneric_OpMul32(v *Value) bool { continue } x := v_1 - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } break @@ -13118,10 +13864,9 @@ func rewriteValuegeneric_OpMul32(v *Value) bool { } v.reset(OpLsh32x64) v.Type = t - v.AddArg(n) v0 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) v0.AuxInt = log2(c) - v.AddArg(v0) + v.AddArg2(n, v0) return true } break @@ -13142,10 +13887,9 @@ func rewriteValuegeneric_OpMul32(v *Value) bool { } v.reset(OpNeg32) v0 := b.NewValue0(v.Pos, OpLsh32x64, t) - v0.AddArg(n) v1 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) v1.AuxInt = log2(-c) - v0.AddArg(v1) + v0.AddArg2(n, v1) v.AddArg(v0) return true } @@ -13175,13 +13919,11 @@ func rewriteValuegeneric_OpMul32(v *Value) bool { v.reset(OpAdd32) v0 := b.NewValue0(v.Pos, OpConst32, t) v0.AuxInt = int64(int32(c * d)) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpMul32, t) v2 := b.NewValue0(v.Pos, OpConst32, t) v2.AuxInt = c - v1.AddArg(v2) - v1.AddArg(x) - v.AddArg(v1) + v1.AddArg2(v2, x) + v.AddArg2(v0, v1) return true } } @@ -13224,8 +13966,7 @@ func rewriteValuegeneric_OpMul32(v *Value) bool { v.reset(OpMul32) v0 := b.NewValue0(v.Pos, OpConst32, t) v0.AuxInt = int64(int32(c * d)) - v.AddArg(v0) - v.AddArg(x) + v.AddArg2(v0, x) return true } } @@ -13237,6 +13978,7 @@ func rewriteValuegeneric_OpMul32F(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] // match: (Mul32F (Const32F [c]) (Const32F [d])) + // cond: !math.IsNaN(float64(auxTo32F(c) * auxTo32F(d))) // result: (Const32F [auxFrom32F(auxTo32F(c) * auxTo32F(d))]) for { for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { @@ -13248,6 +13990,9 @@ func rewriteValuegeneric_OpMul32F(v *Value) bool { continue } d := v_1.AuxInt + if !(!math.IsNaN(float64(auxTo32F(c) * auxTo32F(d)))) { + continue + } v.reset(OpConst32F) v.AuxInt = auxFrom32F(auxTo32F(c) * auxTo32F(d)) return true @@ -13262,9 +14007,7 @@ func rewriteValuegeneric_OpMul32F(v *Value) bool { if v_1.Op != OpConst32F || v_1.AuxInt != auxFrom64F(1) { continue } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } break @@ -13292,8 +14035,7 @@ func rewriteValuegeneric_OpMul32F(v *Value) bool { continue } v.reset(OpAdd32F) - v.AddArg(x) - v.AddArg(x) + v.AddArg2(x, x) return true } break @@ -13331,9 +14073,7 @@ func rewriteValuegeneric_OpMul64(v *Value) bool { continue } x := v_1 - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } break @@ -13368,10 +14108,9 @@ func rewriteValuegeneric_OpMul64(v *Value) bool { } v.reset(OpLsh64x64) v.Type = t - v.AddArg(n) v0 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) v0.AuxInt = log2(c) - v.AddArg(v0) + v.AddArg2(n, v0) return true } break @@ -13392,10 +14131,9 @@ func rewriteValuegeneric_OpMul64(v *Value) bool { } v.reset(OpNeg64) v0 := b.NewValue0(v.Pos, OpLsh64x64, t) - v0.AddArg(n) v1 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) v1.AuxInt = log2(-c) - v0.AddArg(v1) + v0.AddArg2(n, v1) v.AddArg(v0) return true } @@ -13425,13 +14163,11 @@ func rewriteValuegeneric_OpMul64(v *Value) bool { v.reset(OpAdd64) v0 := b.NewValue0(v.Pos, OpConst64, t) v0.AuxInt = c * d - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpMul64, t) v2 := b.NewValue0(v.Pos, OpConst64, t) v2.AuxInt = c - v1.AddArg(v2) - v1.AddArg(x) - v.AddArg(v1) + v1.AddArg2(v2, x) + v.AddArg2(v0, v1) return true } } @@ -13474,8 +14210,7 @@ func rewriteValuegeneric_OpMul64(v *Value) bool { v.reset(OpMul64) v0 := b.NewValue0(v.Pos, OpConst64, t) v0.AuxInt = c * d - v.AddArg(v0) - v.AddArg(x) + v.AddArg2(v0, x) return true } } @@ -13487,6 +14222,7 @@ func rewriteValuegeneric_OpMul64F(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] // match: (Mul64F (Const64F [c]) (Const64F [d])) + // cond: !math.IsNaN(auxTo64F(c) * auxTo64F(d)) // result: (Const64F [auxFrom64F(auxTo64F(c) * auxTo64F(d))]) for { for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { @@ -13498,6 +14234,9 @@ func rewriteValuegeneric_OpMul64F(v *Value) bool { continue } d := v_1.AuxInt + if !(!math.IsNaN(auxTo64F(c) * auxTo64F(d))) { + continue + } v.reset(OpConst64F) v.AuxInt = auxFrom64F(auxTo64F(c) * auxTo64F(d)) return true @@ -13512,9 +14251,7 @@ func rewriteValuegeneric_OpMul64F(v *Value) bool { if v_1.Op != OpConst64F || v_1.AuxInt != auxFrom64F(1) { continue } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } break @@ -13542,8 +14279,7 @@ func rewriteValuegeneric_OpMul64F(v *Value) bool { continue } v.reset(OpAdd64F) - v.AddArg(x) - v.AddArg(x) + v.AddArg2(x, x) return true } break @@ -13581,9 +14317,7 @@ func rewriteValuegeneric_OpMul8(v *Value) bool { continue } x := v_1 - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } break @@ -13618,10 +14352,9 @@ func rewriteValuegeneric_OpMul8(v *Value) bool { } v.reset(OpLsh8x64) v.Type = t - v.AddArg(n) v0 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) v0.AuxInt = log2(c) - v.AddArg(v0) + v.AddArg2(n, v0) return true } break @@ -13642,10 +14375,9 @@ func rewriteValuegeneric_OpMul8(v *Value) bool { } v.reset(OpNeg8) v0 := b.NewValue0(v.Pos, OpLsh8x64, t) - v0.AddArg(n) v1 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) v1.AuxInt = log2(-c) - v0.AddArg(v1) + v0.AddArg2(n, v1) v.AddArg(v0) return true } @@ -13688,8 +14420,7 @@ func rewriteValuegeneric_OpMul8(v *Value) bool { v.reset(OpMul8) v0 := b.NewValue0(v.Pos, OpConst8, t) v0.AuxInt = int64(int8(c * d)) - v.AddArg(v0) - v.AddArg(x) + v.AddArg2(v0, x) return true } } @@ -13720,8 +14451,7 @@ func rewriteValuegeneric_OpNeg16(v *Value) bool { y := v_0.Args[1] x := v_0.Args[0] v.reset(OpSub16) - v.AddArg(y) - v.AddArg(x) + v.AddArg2(y, x) return true } // match: (Neg16 (Neg16 x)) @@ -13731,9 +14461,7 @@ func rewriteValuegeneric_OpNeg16(v *Value) bool { break } x := v_0.Args[0] - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (Neg16 (Com16 x)) @@ -13747,8 +14475,7 @@ func rewriteValuegeneric_OpNeg16(v *Value) bool { v.reset(OpAdd16) v0 := b.NewValue0(v.Pos, OpConst16, t) v0.AuxInt = 1 - v.AddArg(v0) - v.AddArg(x) + v.AddArg2(v0, x) return true } return false @@ -13776,8 +14503,7 @@ func rewriteValuegeneric_OpNeg32(v *Value) bool { y := v_0.Args[1] x := v_0.Args[0] v.reset(OpSub32) - v.AddArg(y) - v.AddArg(x) + v.AddArg2(y, x) return true } // match: (Neg32 (Neg32 x)) @@ -13787,9 +14513,7 @@ func rewriteValuegeneric_OpNeg32(v *Value) bool { break } x := v_0.Args[0] - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (Neg32 (Com32 x)) @@ -13803,8 +14527,7 @@ func rewriteValuegeneric_OpNeg32(v *Value) bool { v.reset(OpAdd32) v0 := b.NewValue0(v.Pos, OpConst32, t) v0.AuxInt = 1 - v.AddArg(v0) - v.AddArg(x) + v.AddArg2(v0, x) return true } return false @@ -13851,8 +14574,7 @@ func rewriteValuegeneric_OpNeg64(v *Value) bool { y := v_0.Args[1] x := v_0.Args[0] v.reset(OpSub64) - v.AddArg(y) - v.AddArg(x) + v.AddArg2(y, x) return true } // match: (Neg64 (Neg64 x)) @@ -13862,9 +14584,7 @@ func rewriteValuegeneric_OpNeg64(v *Value) bool { break } x := v_0.Args[0] - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (Neg64 (Com64 x)) @@ -13878,8 +14598,7 @@ func rewriteValuegeneric_OpNeg64(v *Value) bool { v.reset(OpAdd64) v0 := b.NewValue0(v.Pos, OpConst64, t) v0.AuxInt = 1 - v.AddArg(v0) - v.AddArg(x) + v.AddArg2(v0, x) return true } return false @@ -13926,8 +14645,7 @@ func rewriteValuegeneric_OpNeg8(v *Value) bool { y := v_0.Args[1] x := v_0.Args[0] v.reset(OpSub8) - v.AddArg(y) - v.AddArg(x) + v.AddArg2(y, x) return true } // match: (Neg8 (Neg8 x)) @@ -13937,9 +14655,7 @@ func rewriteValuegeneric_OpNeg8(v *Value) bool { break } x := v_0.Args[0] - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (Neg8 (Com8 x)) @@ -13953,8 +14669,7 @@ func rewriteValuegeneric_OpNeg8(v *Value) bool { v.reset(OpAdd8) v0 := b.NewValue0(v.Pos, OpConst8, t) v0.AuxInt = 1 - v.AddArg(v0) - v.AddArg(x) + v.AddArg2(v0, x) return true } return false @@ -13999,8 +14714,7 @@ func rewriteValuegeneric_OpNeq16(v *Value) bool { v.reset(OpNeq16) v0 := b.NewValue0(v.Pos, OpConst16, t) v0.AuxInt = int64(int16(c - d)) - v.AddArg(v0) - v.AddArg(x) + v.AddArg2(v0, x) return true } } @@ -14080,14 +14794,12 @@ func rewriteValuegeneric_OpNeq16(v *Value) bool { } v.reset(OpNeq16) v0 := b.NewValue0(v.Pos, OpAnd16, t) - v0.AddArg(n) v1 := b.NewValue0(v.Pos, OpConst16, t) v1.AuxInt = int64(1<= d + // result: (Less64U (Const64 [c-d]) (Sub64 x (Const64 [d]))) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpLess64 { + continue + } + x := v_0.Args[1] + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpConst64 { + continue + } + c := v_0_0.AuxInt + if v_1.Op != OpLess64 { + continue + } + _ = v_1.Args[1] + if x != v_1.Args[0] { + continue + } + v_1_1 := v_1.Args[1] + if v_1_1.Op != OpConst64 { + continue + } + d := v_1_1.AuxInt + if !(c >= d) { + continue + } + v.reset(OpLess64U) + v0 := b.NewValue0(v.Pos, OpConst64, x.Type) + v0.AuxInt = c - d + v1 := b.NewValue0(v.Pos, OpSub64, x.Type) + v2 := b.NewValue0(v.Pos, OpConst64, x.Type) + v2.AuxInt = d + v1.AddArg2(x, v2) + v.AddArg2(v0, v1) + return true + } + break + } + // match: (OrB (Leq64 (Const64 [c]) x) (Less64 x (Const64 [d]))) + // cond: c >= d + // result: (Leq64U (Const64 [c-d]) (Sub64 x (Const64 [d]))) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpLeq64 { + continue + } + x := v_0.Args[1] + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpConst64 { + continue + } + c := v_0_0.AuxInt + if v_1.Op != OpLess64 { + continue + } + _ = v_1.Args[1] + if x != v_1.Args[0] { + continue + } + v_1_1 := v_1.Args[1] + if v_1_1.Op != OpConst64 { + continue + } + d := v_1_1.AuxInt + if !(c >= d) { + continue + } + v.reset(OpLeq64U) + v0 := b.NewValue0(v.Pos, OpConst64, x.Type) + v0.AuxInt = c - d + v1 := b.NewValue0(v.Pos, OpSub64, x.Type) + v2 := b.NewValue0(v.Pos, OpConst64, x.Type) + v2.AuxInt = d + v1.AddArg2(x, v2) + v.AddArg2(v0, v1) + return true + } + break + } + // match: (OrB (Less32 (Const32 [c]) x) (Less32 x (Const32 [d]))) + // cond: c >= d + // result: (Less32U (Const32 [c-d]) (Sub32 x (Const32 [d]))) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpLess32 { + continue + } + x := v_0.Args[1] + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpConst32 { + continue + } + c := v_0_0.AuxInt + if v_1.Op != OpLess32 { + continue + } + _ = v_1.Args[1] + if x != v_1.Args[0] { + continue + } + v_1_1 := v_1.Args[1] + if v_1_1.Op != OpConst32 { + continue + } + d := v_1_1.AuxInt + if !(c >= d) { + continue + } + v.reset(OpLess32U) + v0 := b.NewValue0(v.Pos, OpConst32, x.Type) + v0.AuxInt = c - d + v1 := b.NewValue0(v.Pos, OpSub32, x.Type) + v2 := b.NewValue0(v.Pos, OpConst32, x.Type) + v2.AuxInt = d + v1.AddArg2(x, v2) + v.AddArg2(v0, v1) + return true + } + break + } + // match: (OrB (Leq32 (Const32 [c]) x) (Less32 x (Const32 [d]))) + // cond: c >= d + // result: (Leq32U (Const32 [c-d]) (Sub32 x (Const32 [d]))) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpLeq32 { + continue + } + x := v_0.Args[1] + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpConst32 { + continue + } + c := v_0_0.AuxInt + if v_1.Op != OpLess32 { + continue + } + _ = v_1.Args[1] + if x != v_1.Args[0] { + continue + } + v_1_1 := v_1.Args[1] + if v_1_1.Op != OpConst32 { + continue + } + d := v_1_1.AuxInt + if !(c >= d) { + continue + } + v.reset(OpLeq32U) + v0 := b.NewValue0(v.Pos, OpConst32, x.Type) + v0.AuxInt = c - d + v1 := b.NewValue0(v.Pos, OpSub32, x.Type) + v2 := b.NewValue0(v.Pos, OpConst32, x.Type) + v2.AuxInt = d + v1.AddArg2(x, v2) + v.AddArg2(v0, v1) + return true + } + break + } + // match: (OrB (Less16 (Const16 [c]) x) (Less16 x (Const16 [d]))) + // cond: c >= d + // result: (Less16U (Const16 [c-d]) (Sub16 x (Const16 [d]))) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpLess16 { + continue + } + x := v_0.Args[1] + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpConst16 { + continue + } + c := v_0_0.AuxInt + if v_1.Op != OpLess16 { + continue + } + _ = v_1.Args[1] + if x != v_1.Args[0] { + continue + } + v_1_1 := v_1.Args[1] + if v_1_1.Op != OpConst16 { + continue + } + d := v_1_1.AuxInt + if !(c >= d) { + continue + } + v.reset(OpLess16U) + v0 := b.NewValue0(v.Pos, OpConst16, x.Type) + v0.AuxInt = c - d + v1 := b.NewValue0(v.Pos, OpSub16, x.Type) + v2 := b.NewValue0(v.Pos, OpConst16, x.Type) + v2.AuxInt = d + v1.AddArg2(x, v2) + v.AddArg2(v0, v1) + return true + } + break + } + // match: (OrB (Leq16 (Const16 [c]) x) (Less16 x (Const16 [d]))) + // cond: c >= d + // result: (Leq16U (Const16 [c-d]) (Sub16 x (Const16 [d]))) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpLeq16 { + continue + } + x := v_0.Args[1] + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpConst16 { + continue + } + c := v_0_0.AuxInt + if v_1.Op != OpLess16 { + continue + } + _ = v_1.Args[1] + if x != v_1.Args[0] { + continue + } + v_1_1 := v_1.Args[1] + if v_1_1.Op != OpConst16 { + continue + } + d := v_1_1.AuxInt + if !(c >= d) { + continue + } + v.reset(OpLeq16U) + v0 := b.NewValue0(v.Pos, OpConst16, x.Type) + v0.AuxInt = c - d + v1 := b.NewValue0(v.Pos, OpSub16, x.Type) + v2 := b.NewValue0(v.Pos, OpConst16, x.Type) + v2.AuxInt = d + v1.AddArg2(x, v2) + v.AddArg2(v0, v1) + return true + } + break + } + // match: (OrB (Less8 (Const8 [c]) x) (Less8 x (Const8 [d]))) + // cond: c >= d + // result: (Less8U (Const8 [c-d]) (Sub8 x (Const8 [d]))) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpLess8 { + continue + } + x := v_0.Args[1] + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpConst8 { + continue + } + c := v_0_0.AuxInt + if v_1.Op != OpLess8 { + continue + } + _ = v_1.Args[1] + if x != v_1.Args[0] { + continue + } + v_1_1 := v_1.Args[1] + if v_1_1.Op != OpConst8 { + continue + } + d := v_1_1.AuxInt + if !(c >= d) { + continue + } + v.reset(OpLess8U) + v0 := b.NewValue0(v.Pos, OpConst8, x.Type) + v0.AuxInt = c - d + v1 := b.NewValue0(v.Pos, OpSub8, x.Type) + v2 := b.NewValue0(v.Pos, OpConst8, x.Type) + v2.AuxInt = d + v1.AddArg2(x, v2) + v.AddArg2(v0, v1) + return true + } + break + } + // match: (OrB (Leq8 (Const8 [c]) x) (Less8 x (Const8 [d]))) + // cond: c >= d + // result: (Leq8U (Const8 [c-d]) (Sub8 x (Const8 [d]))) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpLeq8 { + continue + } + x := v_0.Args[1] + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpConst8 { + continue + } + c := v_0_0.AuxInt + if v_1.Op != OpLess8 { + continue + } + _ = v_1.Args[1] + if x != v_1.Args[0] { + continue + } + v_1_1 := v_1.Args[1] + if v_1_1.Op != OpConst8 { + continue + } + d := v_1_1.AuxInt + if !(c >= d) { + continue + } + v.reset(OpLeq8U) + v0 := b.NewValue0(v.Pos, OpConst8, x.Type) + v0.AuxInt = c - d + v1 := b.NewValue0(v.Pos, OpSub8, x.Type) + v2 := b.NewValue0(v.Pos, OpConst8, x.Type) + v2.AuxInt = d + v1.AddArg2(x, v2) + v.AddArg2(v0, v1) + return true + } + break + } + // match: (OrB (Less64 (Const64 [c]) x) (Leq64 x (Const64 [d]))) + // cond: c >= d+1 && int64(d+1) > int64(d) + // result: (Less64U (Const64 [c-d-1]) (Sub64 x (Const64 [d+1]))) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpLess64 { + continue + } + x := v_0.Args[1] + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpConst64 { + continue + } + c := v_0_0.AuxInt + if v_1.Op != OpLeq64 { + continue + } + _ = v_1.Args[1] + if x != v_1.Args[0] { + continue + } + v_1_1 := v_1.Args[1] + if v_1_1.Op != OpConst64 { + continue + } + d := v_1_1.AuxInt + if !(c >= d+1 && int64(d+1) > int64(d)) { + continue + } + v.reset(OpLess64U) + v0 := b.NewValue0(v.Pos, OpConst64, x.Type) + v0.AuxInt = c - d - 1 + v1 := b.NewValue0(v.Pos, OpSub64, x.Type) + v2 := b.NewValue0(v.Pos, OpConst64, x.Type) + v2.AuxInt = d + 1 + v1.AddArg2(x, v2) + v.AddArg2(v0, v1) + return true + } + break + } + // match: (OrB (Leq64 (Const64 [c]) x) (Leq64 x (Const64 [d]))) + // cond: c >= d+1 && int64(d+1) > int64(d) + // result: (Leq64U (Const64 [c-d-1]) (Sub64 x (Const64 [d+1]))) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpLeq64 { + continue + } + x := v_0.Args[1] + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpConst64 { + continue + } + c := v_0_0.AuxInt + if v_1.Op != OpLeq64 { + continue + } + _ = v_1.Args[1] + if x != v_1.Args[0] { + continue + } + v_1_1 := v_1.Args[1] + if v_1_1.Op != OpConst64 { + continue + } + d := v_1_1.AuxInt + if !(c >= d+1 && int64(d+1) > int64(d)) { + continue + } + v.reset(OpLeq64U) + v0 := b.NewValue0(v.Pos, OpConst64, x.Type) + v0.AuxInt = c - d - 1 + v1 := b.NewValue0(v.Pos, OpSub64, x.Type) + v2 := b.NewValue0(v.Pos, OpConst64, x.Type) + v2.AuxInt = d + 1 + v1.AddArg2(x, v2) + v.AddArg2(v0, v1) + return true + } + break + } + // match: (OrB (Less32 (Const32 [c]) x) (Leq32 x (Const32 [d]))) + // cond: c >= d+1 && int32(d+1) > int32(d) + // result: (Less32U (Const32 [c-d-1]) (Sub32 x (Const32 [d+1]))) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpLess32 { + continue + } + x := v_0.Args[1] + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpConst32 { + continue + } + c := v_0_0.AuxInt + if v_1.Op != OpLeq32 { + continue + } + _ = v_1.Args[1] + if x != v_1.Args[0] { + continue + } + v_1_1 := v_1.Args[1] + if v_1_1.Op != OpConst32 { + continue + } + d := v_1_1.AuxInt + if !(c >= d+1 && int32(d+1) > int32(d)) { + continue + } + v.reset(OpLess32U) + v0 := b.NewValue0(v.Pos, OpConst32, x.Type) + v0.AuxInt = c - d - 1 + v1 := b.NewValue0(v.Pos, OpSub32, x.Type) + v2 := b.NewValue0(v.Pos, OpConst32, x.Type) + v2.AuxInt = d + 1 + v1.AddArg2(x, v2) + v.AddArg2(v0, v1) + return true + } + break + } + // match: (OrB (Leq32 (Const32 [c]) x) (Leq32 x (Const32 [d]))) + // cond: c >= d+1 && int32(d+1) > int32(d) + // result: (Leq32U (Const32 [c-d-1]) (Sub32 x (Const32 [d+1]))) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpLeq32 { + continue + } + x := v_0.Args[1] + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpConst32 { + continue + } + c := v_0_0.AuxInt + if v_1.Op != OpLeq32 { + continue + } + _ = v_1.Args[1] + if x != v_1.Args[0] { + continue + } + v_1_1 := v_1.Args[1] + if v_1_1.Op != OpConst32 { + continue + } + d := v_1_1.AuxInt + if !(c >= d+1 && int32(d+1) > int32(d)) { + continue + } + v.reset(OpLeq32U) + v0 := b.NewValue0(v.Pos, OpConst32, x.Type) + v0.AuxInt = c - d - 1 + v1 := b.NewValue0(v.Pos, OpSub32, x.Type) + v2 := b.NewValue0(v.Pos, OpConst32, x.Type) + v2.AuxInt = d + 1 + v1.AddArg2(x, v2) + v.AddArg2(v0, v1) + return true + } + break + } + // match: (OrB (Less16 (Const16 [c]) x) (Leq16 x (Const16 [d]))) + // cond: c >= d+1 && int16(d+1) > int16(d) + // result: (Less16U (Const16 [c-d-1]) (Sub16 x (Const16 [d+1]))) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpLess16 { + continue + } + x := v_0.Args[1] + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpConst16 { + continue + } + c := v_0_0.AuxInt + if v_1.Op != OpLeq16 { + continue + } + _ = v_1.Args[1] + if x != v_1.Args[0] { + continue + } + v_1_1 := v_1.Args[1] + if v_1_1.Op != OpConst16 { + continue + } + d := v_1_1.AuxInt + if !(c >= d+1 && int16(d+1) > int16(d)) { + continue + } + v.reset(OpLess16U) + v0 := b.NewValue0(v.Pos, OpConst16, x.Type) + v0.AuxInt = c - d - 1 + v1 := b.NewValue0(v.Pos, OpSub16, x.Type) + v2 := b.NewValue0(v.Pos, OpConst16, x.Type) + v2.AuxInt = d + 1 + v1.AddArg2(x, v2) + v.AddArg2(v0, v1) + return true + } + break + } + // match: (OrB (Leq16 (Const16 [c]) x) (Leq16 x (Const16 [d]))) + // cond: c >= d+1 && int16(d+1) > int16(d) + // result: (Leq16U (Const16 [c-d-1]) (Sub16 x (Const16 [d+1]))) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpLeq16 { + continue + } + x := v_0.Args[1] + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpConst16 { + continue + } + c := v_0_0.AuxInt + if v_1.Op != OpLeq16 { + continue + } + _ = v_1.Args[1] + if x != v_1.Args[0] { + continue + } + v_1_1 := v_1.Args[1] + if v_1_1.Op != OpConst16 { + continue + } + d := v_1_1.AuxInt + if !(c >= d+1 && int16(d+1) > int16(d)) { + continue + } + v.reset(OpLeq16U) + v0 := b.NewValue0(v.Pos, OpConst16, x.Type) + v0.AuxInt = c - d - 1 + v1 := b.NewValue0(v.Pos, OpSub16, x.Type) + v2 := b.NewValue0(v.Pos, OpConst16, x.Type) + v2.AuxInt = d + 1 + v1.AddArg2(x, v2) + v.AddArg2(v0, v1) + return true + } + break + } + // match: (OrB (Less8 (Const8 [c]) x) (Leq8 x (Const8 [d]))) + // cond: c >= d+1 && int8(d+1) > int8(d) + // result: (Less8U (Const8 [c-d-1]) (Sub8 x (Const8 [d+1]))) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpLess8 { + continue + } + x := v_0.Args[1] + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpConst8 { + continue + } + c := v_0_0.AuxInt + if v_1.Op != OpLeq8 { + continue + } + _ = v_1.Args[1] + if x != v_1.Args[0] { + continue + } + v_1_1 := v_1.Args[1] + if v_1_1.Op != OpConst8 { + continue + } + d := v_1_1.AuxInt + if !(c >= d+1 && int8(d+1) > int8(d)) { + continue + } + v.reset(OpLess8U) + v0 := b.NewValue0(v.Pos, OpConst8, x.Type) + v0.AuxInt = c - d - 1 + v1 := b.NewValue0(v.Pos, OpSub8, x.Type) + v2 := b.NewValue0(v.Pos, OpConst8, x.Type) + v2.AuxInt = d + 1 + v1.AddArg2(x, v2) + v.AddArg2(v0, v1) + return true + } + break + } + // match: (OrB (Leq8 (Const8 [c]) x) (Leq8 x (Const8 [d]))) + // cond: c >= d+1 && int8(d+1) > int8(d) + // result: (Leq8U (Const8 [c-d-1]) (Sub8 x (Const8 [d+1]))) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpLeq8 { + continue + } + x := v_0.Args[1] + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpConst8 { + continue + } + c := v_0_0.AuxInt + if v_1.Op != OpLeq8 { + continue + } + _ = v_1.Args[1] + if x != v_1.Args[0] { + continue + } + v_1_1 := v_1.Args[1] + if v_1_1.Op != OpConst8 { + continue + } + d := v_1_1.AuxInt + if !(c >= d+1 && int8(d+1) > int8(d)) { + continue + } + v.reset(OpLeq8U) + v0 := b.NewValue0(v.Pos, OpConst8, x.Type) + v0.AuxInt = c - d - 1 + v1 := b.NewValue0(v.Pos, OpSub8, x.Type) + v2 := b.NewValue0(v.Pos, OpConst8, x.Type) + v2.AuxInt = d + 1 + v1.AddArg2(x, v2) + v.AddArg2(v0, v1) + return true + } + break + } + // match: (OrB (Less64U (Const64 [c]) x) (Less64U x (Const64 [d]))) + // cond: uint64(c) >= uint64(d) + // result: (Less64U (Const64 [c-d]) (Sub64 x (Const64 [d]))) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpLess64U { + continue + } + x := v_0.Args[1] + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpConst64 { + continue + } + c := v_0_0.AuxInt + if v_1.Op != OpLess64U { + continue + } + _ = v_1.Args[1] + if x != v_1.Args[0] { + continue + } + v_1_1 := v_1.Args[1] + if v_1_1.Op != OpConst64 { + continue + } + d := v_1_1.AuxInt + if !(uint64(c) >= uint64(d)) { + continue + } + v.reset(OpLess64U) + v0 := b.NewValue0(v.Pos, OpConst64, x.Type) + v0.AuxInt = c - d + v1 := b.NewValue0(v.Pos, OpSub64, x.Type) + v2 := b.NewValue0(v.Pos, OpConst64, x.Type) + v2.AuxInt = d + v1.AddArg2(x, v2) + v.AddArg2(v0, v1) + return true + } + break + } + // match: (OrB (Leq64U (Const64 [c]) x) (Less64U x (Const64 [d]))) + // cond: uint64(c) >= uint64(d) + // result: (Leq64U (Const64 [c-d]) (Sub64 x (Const64 [d]))) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpLeq64U { + continue + } + x := v_0.Args[1] + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpConst64 { + continue + } + c := v_0_0.AuxInt + if v_1.Op != OpLess64U { + continue + } + _ = v_1.Args[1] + if x != v_1.Args[0] { + continue + } + v_1_1 := v_1.Args[1] + if v_1_1.Op != OpConst64 { + continue + } + d := v_1_1.AuxInt + if !(uint64(c) >= uint64(d)) { + continue + } + v.reset(OpLeq64U) + v0 := b.NewValue0(v.Pos, OpConst64, x.Type) + v0.AuxInt = c - d + v1 := b.NewValue0(v.Pos, OpSub64, x.Type) + v2 := b.NewValue0(v.Pos, OpConst64, x.Type) + v2.AuxInt = d + v1.AddArg2(x, v2) + v.AddArg2(v0, v1) + return true + } + break + } + // match: (OrB (Less32U (Const32 [c]) x) (Less32U x (Const32 [d]))) + // cond: uint32(c) >= uint32(d) + // result: (Less32U (Const32 [int64(int32(c-d))]) (Sub32 x (Const32 [d]))) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpLess32U { + continue + } + x := v_0.Args[1] + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpConst32 { + continue + } + c := v_0_0.AuxInt + if v_1.Op != OpLess32U { + continue + } + _ = v_1.Args[1] + if x != v_1.Args[0] { + continue + } + v_1_1 := v_1.Args[1] + if v_1_1.Op != OpConst32 { + continue + } + d := v_1_1.AuxInt + if !(uint32(c) >= uint32(d)) { + continue + } + v.reset(OpLess32U) + v0 := b.NewValue0(v.Pos, OpConst32, x.Type) + v0.AuxInt = int64(int32(c - d)) + v1 := b.NewValue0(v.Pos, OpSub32, x.Type) + v2 := b.NewValue0(v.Pos, OpConst32, x.Type) + v2.AuxInt = d + v1.AddArg2(x, v2) + v.AddArg2(v0, v1) + return true + } + break + } + // match: (OrB (Leq32U (Const32 [c]) x) (Less32U x (Const32 [d]))) + // cond: uint32(c) >= uint32(d) + // result: (Leq32U (Const32 [int64(int32(c-d))]) (Sub32 x (Const32 [d]))) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpLeq32U { + continue + } + x := v_0.Args[1] + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpConst32 { + continue + } + c := v_0_0.AuxInt + if v_1.Op != OpLess32U { + continue + } + _ = v_1.Args[1] + if x != v_1.Args[0] { + continue + } + v_1_1 := v_1.Args[1] + if v_1_1.Op != OpConst32 { + continue + } + d := v_1_1.AuxInt + if !(uint32(c) >= uint32(d)) { + continue + } + v.reset(OpLeq32U) + v0 := b.NewValue0(v.Pos, OpConst32, x.Type) + v0.AuxInt = int64(int32(c - d)) + v1 := b.NewValue0(v.Pos, OpSub32, x.Type) + v2 := b.NewValue0(v.Pos, OpConst32, x.Type) + v2.AuxInt = d + v1.AddArg2(x, v2) + v.AddArg2(v0, v1) + return true + } + break + } + // match: (OrB (Less16U (Const16 [c]) x) (Less16U x (Const16 [d]))) + // cond: uint16(c) >= uint16(d) + // result: (Less16U (Const16 [int64(int16(c-d))]) (Sub16 x (Const16 [d]))) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpLess16U { + continue + } + x := v_0.Args[1] + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpConst16 { + continue + } + c := v_0_0.AuxInt + if v_1.Op != OpLess16U { + continue + } + _ = v_1.Args[1] + if x != v_1.Args[0] { + continue + } + v_1_1 := v_1.Args[1] + if v_1_1.Op != OpConst16 { + continue + } + d := v_1_1.AuxInt + if !(uint16(c) >= uint16(d)) { + continue + } + v.reset(OpLess16U) + v0 := b.NewValue0(v.Pos, OpConst16, x.Type) + v0.AuxInt = int64(int16(c - d)) + v1 := b.NewValue0(v.Pos, OpSub16, x.Type) + v2 := b.NewValue0(v.Pos, OpConst16, x.Type) + v2.AuxInt = d + v1.AddArg2(x, v2) + v.AddArg2(v0, v1) + return true + } + break + } + // match: (OrB (Leq16U (Const16 [c]) x) (Less16U x (Const16 [d]))) + // cond: uint16(c) >= uint16(d) + // result: (Leq16U (Const16 [int64(int16(c-d))]) (Sub16 x (Const16 [d]))) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpLeq16U { + continue + } + x := v_0.Args[1] + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpConst16 { + continue + } + c := v_0_0.AuxInt + if v_1.Op != OpLess16U { + continue + } + _ = v_1.Args[1] + if x != v_1.Args[0] { + continue + } + v_1_1 := v_1.Args[1] + if v_1_1.Op != OpConst16 { + continue + } + d := v_1_1.AuxInt + if !(uint16(c) >= uint16(d)) { + continue + } + v.reset(OpLeq16U) + v0 := b.NewValue0(v.Pos, OpConst16, x.Type) + v0.AuxInt = int64(int16(c - d)) + v1 := b.NewValue0(v.Pos, OpSub16, x.Type) + v2 := b.NewValue0(v.Pos, OpConst16, x.Type) + v2.AuxInt = d + v1.AddArg2(x, v2) + v.AddArg2(v0, v1) + return true + } + break + } + // match: (OrB (Less8U (Const8 [c]) x) (Less8U x (Const8 [d]))) + // cond: uint8(c) >= uint8(d) + // result: (Less8U (Const8 [int64( int8(c-d))]) (Sub8 x (Const8 [d]))) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpLess8U { + continue + } + x := v_0.Args[1] + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpConst8 { + continue + } + c := v_0_0.AuxInt + if v_1.Op != OpLess8U { + continue + } + _ = v_1.Args[1] + if x != v_1.Args[0] { + continue + } + v_1_1 := v_1.Args[1] + if v_1_1.Op != OpConst8 { + continue + } + d := v_1_1.AuxInt + if !(uint8(c) >= uint8(d)) { + continue + } + v.reset(OpLess8U) + v0 := b.NewValue0(v.Pos, OpConst8, x.Type) + v0.AuxInt = int64(int8(c - d)) + v1 := b.NewValue0(v.Pos, OpSub8, x.Type) + v2 := b.NewValue0(v.Pos, OpConst8, x.Type) + v2.AuxInt = d + v1.AddArg2(x, v2) + v.AddArg2(v0, v1) + return true + } + break + } + // match: (OrB (Leq8U (Const8 [c]) x) (Less8U x (Const8 [d]))) + // cond: uint8(c) >= uint8(d) + // result: (Leq8U (Const8 [int64( int8(c-d))]) (Sub8 x (Const8 [d]))) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpLeq8U { + continue + } + x := v_0.Args[1] + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpConst8 { + continue + } + c := v_0_0.AuxInt + if v_1.Op != OpLess8U { + continue + } + _ = v_1.Args[1] + if x != v_1.Args[0] { + continue + } + v_1_1 := v_1.Args[1] + if v_1_1.Op != OpConst8 { + continue + } + d := v_1_1.AuxInt + if !(uint8(c) >= uint8(d)) { + continue + } + v.reset(OpLeq8U) + v0 := b.NewValue0(v.Pos, OpConst8, x.Type) + v0.AuxInt = int64(int8(c - d)) + v1 := b.NewValue0(v.Pos, OpSub8, x.Type) + v2 := b.NewValue0(v.Pos, OpConst8, x.Type) + v2.AuxInt = d + v1.AddArg2(x, v2) + v.AddArg2(v0, v1) + return true + } + break + } + // match: (OrB (Less64U (Const64 [c]) x) (Leq64U x (Const64 [d]))) + // cond: uint64(c) >= uint64(d+1) && uint64(d+1) > uint64(d) + // result: (Less64U (Const64 [c-d-1]) (Sub64 x (Const64 [d+1]))) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpLess64U { + continue + } + x := v_0.Args[1] + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpConst64 { + continue + } + c := v_0_0.AuxInt + if v_1.Op != OpLeq64U { + continue + } + _ = v_1.Args[1] + if x != v_1.Args[0] { + continue + } + v_1_1 := v_1.Args[1] + if v_1_1.Op != OpConst64 { + continue + } + d := v_1_1.AuxInt + if !(uint64(c) >= uint64(d+1) && uint64(d+1) > uint64(d)) { + continue + } + v.reset(OpLess64U) + v0 := b.NewValue0(v.Pos, OpConst64, x.Type) + v0.AuxInt = c - d - 1 + v1 := b.NewValue0(v.Pos, OpSub64, x.Type) + v2 := b.NewValue0(v.Pos, OpConst64, x.Type) + v2.AuxInt = d + 1 + v1.AddArg2(x, v2) + v.AddArg2(v0, v1) + return true + } + break + } + // match: (OrB (Leq64U (Const64 [c]) x) (Leq64U x (Const64 [d]))) + // cond: uint64(c) >= uint64(d+1) && uint64(d+1) > uint64(d) + // result: (Leq64U (Const64 [c-d-1]) (Sub64 x (Const64 [d+1]))) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpLeq64U { + continue + } + x := v_0.Args[1] + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpConst64 { + continue + } + c := v_0_0.AuxInt + if v_1.Op != OpLeq64U { + continue + } + _ = v_1.Args[1] + if x != v_1.Args[0] { + continue + } + v_1_1 := v_1.Args[1] + if v_1_1.Op != OpConst64 { + continue + } + d := v_1_1.AuxInt + if !(uint64(c) >= uint64(d+1) && uint64(d+1) > uint64(d)) { + continue + } + v.reset(OpLeq64U) + v0 := b.NewValue0(v.Pos, OpConst64, x.Type) + v0.AuxInt = c - d - 1 + v1 := b.NewValue0(v.Pos, OpSub64, x.Type) + v2 := b.NewValue0(v.Pos, OpConst64, x.Type) + v2.AuxInt = d + 1 + v1.AddArg2(x, v2) + v.AddArg2(v0, v1) + return true + } + break + } + // match: (OrB (Less32U (Const32 [c]) x) (Leq32U x (Const32 [d]))) + // cond: uint32(c) >= uint32(d+1) && uint32(d+1) > uint32(d) + // result: (Less32U (Const32 [int64(int32(c-d-1))]) (Sub32 x (Const32 [int64(int32(d+1))]))) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpLess32U { + continue + } + x := v_0.Args[1] + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpConst32 { + continue + } + c := v_0_0.AuxInt + if v_1.Op != OpLeq32U { + continue + } + _ = v_1.Args[1] + if x != v_1.Args[0] { + continue + } + v_1_1 := v_1.Args[1] + if v_1_1.Op != OpConst32 { + continue + } + d := v_1_1.AuxInt + if !(uint32(c) >= uint32(d+1) && uint32(d+1) > uint32(d)) { + continue + } + v.reset(OpLess32U) + v0 := b.NewValue0(v.Pos, OpConst32, x.Type) + v0.AuxInt = int64(int32(c - d - 1)) + v1 := b.NewValue0(v.Pos, OpSub32, x.Type) + v2 := b.NewValue0(v.Pos, OpConst32, x.Type) + v2.AuxInt = int64(int32(d + 1)) + v1.AddArg2(x, v2) + v.AddArg2(v0, v1) + return true + } + break + } + // match: (OrB (Leq32U (Const32 [c]) x) (Leq32U x (Const32 [d]))) + // cond: uint32(c) >= uint32(d+1) && uint32(d+1) > uint32(d) + // result: (Leq32U (Const32 [int64(int32(c-d-1))]) (Sub32 x (Const32 [int64(int32(d+1))]))) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpLeq32U { + continue + } + x := v_0.Args[1] + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpConst32 { + continue + } + c := v_0_0.AuxInt + if v_1.Op != OpLeq32U { + continue + } + _ = v_1.Args[1] + if x != v_1.Args[0] { + continue + } + v_1_1 := v_1.Args[1] + if v_1_1.Op != OpConst32 { + continue + } + d := v_1_1.AuxInt + if !(uint32(c) >= uint32(d+1) && uint32(d+1) > uint32(d)) { + continue + } + v.reset(OpLeq32U) + v0 := b.NewValue0(v.Pos, OpConst32, x.Type) + v0.AuxInt = int64(int32(c - d - 1)) + v1 := b.NewValue0(v.Pos, OpSub32, x.Type) + v2 := b.NewValue0(v.Pos, OpConst32, x.Type) + v2.AuxInt = int64(int32(d + 1)) + v1.AddArg2(x, v2) + v.AddArg2(v0, v1) + return true + } + break + } + // match: (OrB (Less16U (Const16 [c]) x) (Leq16U x (Const16 [d]))) + // cond: uint16(c) >= uint16(d+1) && uint16(d+1) > uint16(d) + // result: (Less16U (Const16 [int64(int16(c-d-1))]) (Sub16 x (Const16 [int64(int16(d+1))]))) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpLess16U { + continue + } + x := v_0.Args[1] + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpConst16 { + continue + } + c := v_0_0.AuxInt + if v_1.Op != OpLeq16U { + continue + } + _ = v_1.Args[1] + if x != v_1.Args[0] { + continue + } + v_1_1 := v_1.Args[1] + if v_1_1.Op != OpConst16 { + continue + } + d := v_1_1.AuxInt + if !(uint16(c) >= uint16(d+1) && uint16(d+1) > uint16(d)) { + continue + } + v.reset(OpLess16U) + v0 := b.NewValue0(v.Pos, OpConst16, x.Type) + v0.AuxInt = int64(int16(c - d - 1)) + v1 := b.NewValue0(v.Pos, OpSub16, x.Type) + v2 := b.NewValue0(v.Pos, OpConst16, x.Type) + v2.AuxInt = int64(int16(d + 1)) + v1.AddArg2(x, v2) + v.AddArg2(v0, v1) + return true + } + break + } + // match: (OrB (Leq16U (Const16 [c]) x) (Leq16U x (Const16 [d]))) + // cond: uint16(c) >= uint16(d+1) && uint16(d+1) > uint16(d) + // result: (Leq16U (Const16 [int64(int16(c-d-1))]) (Sub16 x (Const16 [int64(int16(d+1))]))) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpLeq16U { + continue + } + x := v_0.Args[1] + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpConst16 { + continue + } + c := v_0_0.AuxInt + if v_1.Op != OpLeq16U { + continue + } + _ = v_1.Args[1] + if x != v_1.Args[0] { + continue + } + v_1_1 := v_1.Args[1] + if v_1_1.Op != OpConst16 { + continue + } + d := v_1_1.AuxInt + if !(uint16(c) >= uint16(d+1) && uint16(d+1) > uint16(d)) { + continue + } + v.reset(OpLeq16U) + v0 := b.NewValue0(v.Pos, OpConst16, x.Type) + v0.AuxInt = int64(int16(c - d - 1)) + v1 := b.NewValue0(v.Pos, OpSub16, x.Type) + v2 := b.NewValue0(v.Pos, OpConst16, x.Type) + v2.AuxInt = int64(int16(d + 1)) + v1.AddArg2(x, v2) + v.AddArg2(v0, v1) + return true + } + break + } + // match: (OrB (Less8U (Const8 [c]) x) (Leq8U x (Const8 [d]))) + // cond: uint8(c) >= uint8(d+1) && uint8(d+1) > uint8(d) + // result: (Less8U (Const8 [int64( int8(c-d-1))]) (Sub8 x (Const8 [int64( int8(d+1))]))) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpLess8U { + continue + } + x := v_0.Args[1] + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpConst8 { + continue + } + c := v_0_0.AuxInt + if v_1.Op != OpLeq8U { + continue + } + _ = v_1.Args[1] + if x != v_1.Args[0] { + continue + } + v_1_1 := v_1.Args[1] + if v_1_1.Op != OpConst8 { + continue + } + d := v_1_1.AuxInt + if !(uint8(c) >= uint8(d+1) && uint8(d+1) > uint8(d)) { + continue + } + v.reset(OpLess8U) + v0 := b.NewValue0(v.Pos, OpConst8, x.Type) + v0.AuxInt = int64(int8(c - d - 1)) + v1 := b.NewValue0(v.Pos, OpSub8, x.Type) + v2 := b.NewValue0(v.Pos, OpConst8, x.Type) + v2.AuxInt = int64(int8(d + 1)) + v1.AddArg2(x, v2) + v.AddArg2(v0, v1) + return true + } + break + } + // match: (OrB (Leq8U (Const8 [c]) x) (Leq8U x (Const8 [d]))) + // cond: uint8(c) >= uint8(d+1) && uint8(d+1) > uint8(d) + // result: (Leq8U (Const8 [int64( int8(c-d-1))]) (Sub8 x (Const8 [int64( int8(d+1))]))) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpLeq8U { + continue + } + x := v_0.Args[1] + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpConst8 { + continue + } + c := v_0_0.AuxInt + if v_1.Op != OpLeq8U { + continue + } + _ = v_1.Args[1] + if x != v_1.Args[0] { + continue + } + v_1_1 := v_1.Args[1] + if v_1_1.Op != OpConst8 { + continue + } + d := v_1_1.AuxInt + if !(uint8(c) >= uint8(d+1) && uint8(d+1) > uint8(d)) { + continue + } + v.reset(OpLeq8U) + v0 := b.NewValue0(v.Pos, OpConst8, x.Type) + v0.AuxInt = int64(int8(c - d - 1)) + v1 := b.NewValue0(v.Pos, OpSub8, x.Type) + v2 := b.NewValue0(v.Pos, OpConst8, x.Type) + v2.AuxInt = int64(int8(d + 1)) + v1.AddArg2(x, v2) + v.AddArg2(v0, v1) + return true + } + break + } + return false +} func rewriteValuegeneric_OpPhi(v *Value) bool { // match: (Phi (Const8 [c]) (Const8 [c])) // result: (Const8 [c]) @@ -16658,13 +18568,11 @@ func rewriteValuegeneric_OpPtrIndex(v *Value) bool { break } v.reset(OpAddPtr) - v.AddArg(ptr) v0 := b.NewValue0(v.Pos, OpMul32, typ.Int) - v0.AddArg(idx) v1 := b.NewValue0(v.Pos, OpConst32, typ.Int) v1.AuxInt = t.Elem().Size() - v0.AddArg(v1) - v.AddArg(v0) + v0.AddArg2(idx, v1) + v.AddArg2(ptr, v0) return true } // match: (PtrIndex ptr idx) @@ -16678,13 +18586,11 @@ func rewriteValuegeneric_OpPtrIndex(v *Value) bool { break } v.reset(OpAddPtr) - v.AddArg(ptr) v0 := b.NewValue0(v.Pos, OpMul64, typ.Int) - v0.AddArg(idx) v1 := b.NewValue0(v.Pos, OpConst64, typ.Int) v1.AuxInt = t.Elem().Size() - v0.AddArg(v1) - v.AddArg(v0) + v0.AddArg2(idx, v1) + v.AddArg2(ptr, v0) return true } return false @@ -16704,9 +18610,7 @@ func rewriteValuegeneric_OpRotateLeft16(v *Value) bool { if !(c%16 == 0) { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } return false @@ -16726,9 +18630,7 @@ func rewriteValuegeneric_OpRotateLeft32(v *Value) bool { if !(c%32 == 0) { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } return false @@ -16748,9 +18650,7 @@ func rewriteValuegeneric_OpRotateLeft64(v *Value) bool { if !(c%64 == 0) { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } return false @@ -16770,9 +18670,7 @@ func rewriteValuegeneric_OpRotateLeft8(v *Value) bool { if !(c%8 == 0) { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } return false @@ -16786,9 +18684,7 @@ func rewriteValuegeneric_OpRound32F(v *Value) bool { if x.Op != OpConst32F { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } return false @@ -16802,9 +18698,7 @@ func rewriteValuegeneric_OpRound64F(v *Value) bool { if x.Op != OpConst64F { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } return false @@ -16823,10 +18717,9 @@ func rewriteValuegeneric_OpRsh16Ux16(v *Value) bool { } c := v_1.AuxInt v.reset(OpRsh16Ux64) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpConst64, t) v0.AuxInt = int64(uint16(c)) - v.AddArg(v0) + v.AddArg2(x, v0) return true } // match: (Rsh16Ux16 (Const16 [0]) _) @@ -16855,10 +18748,9 @@ func rewriteValuegeneric_OpRsh16Ux32(v *Value) bool { } c := v_1.AuxInt v.reset(OpRsh16Ux64) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpConst64, t) v0.AuxInt = int64(uint32(c)) - v.AddArg(v0) + v.AddArg2(x, v0) return true } // match: (Rsh16Ux32 (Const16 [0]) _) @@ -16900,9 +18792,7 @@ func rewriteValuegeneric_OpRsh16Ux64(v *Value) bool { if v_1.Op != OpConst64 || v_1.AuxInt != 0 { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (Rsh16Ux64 (Const16 [0]) _) @@ -16953,10 +18843,9 @@ func rewriteValuegeneric_OpRsh16Ux64(v *Value) bool { break } v.reset(OpRsh16Ux64) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpConst64, t) v0.AuxInt = c + d - v.AddArg(v0) + v.AddArg2(x, v0) return true } // match: (Rsh16Ux64 (Rsh16x64 x _) (Const64 [15])) @@ -16965,7 +18854,6 @@ func rewriteValuegeneric_OpRsh16Ux64(v *Value) bool { if v_0.Op != OpRsh16x64 { break } - _ = v_0.Args[1] x := v_0.Args[0] if v_1.Op != OpConst64 { break @@ -16975,10 +18863,9 @@ func rewriteValuegeneric_OpRsh16Ux64(v *Value) bool { break } v.reset(OpRsh16Ux64) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpConst64, t) v0.AuxInt = 15 - v.AddArg(v0) + v.AddArg2(x, v0) return true } // match: (Rsh16Ux64 (Lsh16x64 (Rsh16Ux64 x (Const64 [c1])) (Const64 [c2])) (Const64 [c3])) @@ -17013,10 +18900,9 @@ func rewriteValuegeneric_OpRsh16Ux64(v *Value) bool { break } v.reset(OpRsh16Ux64) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) v0.AuxInt = c1 - c2 + c3 - v.AddArg(v0) + v.AddArg2(x, v0) return true } // match: (Rsh16Ux64 (Lsh16x64 x (Const64 [8])) (Const64 [8])) @@ -17053,10 +18939,9 @@ func rewriteValuegeneric_OpRsh16Ux8(v *Value) bool { } c := v_1.AuxInt v.reset(OpRsh16Ux64) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpConst64, t) v0.AuxInt = int64(uint8(c)) - v.AddArg(v0) + v.AddArg2(x, v0) return true } // match: (Rsh16Ux8 (Const16 [0]) _) @@ -17085,10 +18970,9 @@ func rewriteValuegeneric_OpRsh16x16(v *Value) bool { } c := v_1.AuxInt v.reset(OpRsh16x64) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpConst64, t) v0.AuxInt = int64(uint16(c)) - v.AddArg(v0) + v.AddArg2(x, v0) return true } // match: (Rsh16x16 (Const16 [0]) _) @@ -17117,10 +19001,9 @@ func rewriteValuegeneric_OpRsh16x32(v *Value) bool { } c := v_1.AuxInt v.reset(OpRsh16x64) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpConst64, t) v0.AuxInt = int64(uint32(c)) - v.AddArg(v0) + v.AddArg2(x, v0) return true } // match: (Rsh16x32 (Const16 [0]) _) @@ -17162,9 +19045,7 @@ func rewriteValuegeneric_OpRsh16x64(v *Value) bool { if v_1.Op != OpConst64 || v_1.AuxInt != 0 { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (Rsh16x64 (Const16 [0]) _) @@ -17200,10 +19081,9 @@ func rewriteValuegeneric_OpRsh16x64(v *Value) bool { break } v.reset(OpRsh16x64) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpConst64, t) v0.AuxInt = c + d - v.AddArg(v0) + v.AddArg2(x, v0) return true } // match: (Rsh16x64 (Lsh16x64 x (Const64 [8])) (Const64 [8])) @@ -17240,10 +19120,9 @@ func rewriteValuegeneric_OpRsh16x8(v *Value) bool { } c := v_1.AuxInt v.reset(OpRsh16x64) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpConst64, t) v0.AuxInt = int64(uint8(c)) - v.AddArg(v0) + v.AddArg2(x, v0) return true } // match: (Rsh16x8 (Const16 [0]) _) @@ -17272,10 +19151,9 @@ func rewriteValuegeneric_OpRsh32Ux16(v *Value) bool { } c := v_1.AuxInt v.reset(OpRsh32Ux64) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpConst64, t) v0.AuxInt = int64(uint16(c)) - v.AddArg(v0) + v.AddArg2(x, v0) return true } // match: (Rsh32Ux16 (Const32 [0]) _) @@ -17304,10 +19182,9 @@ func rewriteValuegeneric_OpRsh32Ux32(v *Value) bool { } c := v_1.AuxInt v.reset(OpRsh32Ux64) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpConst64, t) v0.AuxInt = int64(uint32(c)) - v.AddArg(v0) + v.AddArg2(x, v0) return true } // match: (Rsh32Ux32 (Const32 [0]) _) @@ -17349,9 +19226,7 @@ func rewriteValuegeneric_OpRsh32Ux64(v *Value) bool { if v_1.Op != OpConst64 || v_1.AuxInt != 0 { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (Rsh32Ux64 (Const32 [0]) _) @@ -17402,10 +19277,9 @@ func rewriteValuegeneric_OpRsh32Ux64(v *Value) bool { break } v.reset(OpRsh32Ux64) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpConst64, t) v0.AuxInt = c + d - v.AddArg(v0) + v.AddArg2(x, v0) return true } // match: (Rsh32Ux64 (Rsh32x64 x _) (Const64 [31])) @@ -17414,7 +19288,6 @@ func rewriteValuegeneric_OpRsh32Ux64(v *Value) bool { if v_0.Op != OpRsh32x64 { break } - _ = v_0.Args[1] x := v_0.Args[0] if v_1.Op != OpConst64 { break @@ -17424,10 +19297,9 @@ func rewriteValuegeneric_OpRsh32Ux64(v *Value) bool { break } v.reset(OpRsh32Ux64) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpConst64, t) v0.AuxInt = 31 - v.AddArg(v0) + v.AddArg2(x, v0) return true } // match: (Rsh32Ux64 (Lsh32x64 (Rsh32Ux64 x (Const64 [c1])) (Const64 [c2])) (Const64 [c3])) @@ -17462,10 +19334,9 @@ func rewriteValuegeneric_OpRsh32Ux64(v *Value) bool { break } v.reset(OpRsh32Ux64) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) v0.AuxInt = c1 - c2 + c3 - v.AddArg(v0) + v.AddArg2(x, v0) return true } // match: (Rsh32Ux64 (Lsh32x64 x (Const64 [24])) (Const64 [24])) @@ -17520,10 +19391,9 @@ func rewriteValuegeneric_OpRsh32Ux8(v *Value) bool { } c := v_1.AuxInt v.reset(OpRsh32Ux64) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpConst64, t) v0.AuxInt = int64(uint8(c)) - v.AddArg(v0) + v.AddArg2(x, v0) return true } // match: (Rsh32Ux8 (Const32 [0]) _) @@ -17552,10 +19422,9 @@ func rewriteValuegeneric_OpRsh32x16(v *Value) bool { } c := v_1.AuxInt v.reset(OpRsh32x64) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpConst64, t) v0.AuxInt = int64(uint16(c)) - v.AddArg(v0) + v.AddArg2(x, v0) return true } // match: (Rsh32x16 (Const32 [0]) _) @@ -17584,10 +19453,9 @@ func rewriteValuegeneric_OpRsh32x32(v *Value) bool { } c := v_1.AuxInt v.reset(OpRsh32x64) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpConst64, t) v0.AuxInt = int64(uint32(c)) - v.AddArg(v0) + v.AddArg2(x, v0) return true } // match: (Rsh32x32 (Const32 [0]) _) @@ -17629,9 +19497,7 @@ func rewriteValuegeneric_OpRsh32x64(v *Value) bool { if v_1.Op != OpConst64 || v_1.AuxInt != 0 { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (Rsh32x64 (Const32 [0]) _) @@ -17667,10 +19533,9 @@ func rewriteValuegeneric_OpRsh32x64(v *Value) bool { break } v.reset(OpRsh32x64) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpConst64, t) v0.AuxInt = c + d - v.AddArg(v0) + v.AddArg2(x, v0) return true } // match: (Rsh32x64 (Lsh32x64 x (Const64 [24])) (Const64 [24])) @@ -17725,10 +19590,9 @@ func rewriteValuegeneric_OpRsh32x8(v *Value) bool { } c := v_1.AuxInt v.reset(OpRsh32x64) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpConst64, t) v0.AuxInt = int64(uint8(c)) - v.AddArg(v0) + v.AddArg2(x, v0) return true } // match: (Rsh32x8 (Const32 [0]) _) @@ -17757,10 +19621,9 @@ func rewriteValuegeneric_OpRsh64Ux16(v *Value) bool { } c := v_1.AuxInt v.reset(OpRsh64Ux64) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpConst64, t) v0.AuxInt = int64(uint16(c)) - v.AddArg(v0) + v.AddArg2(x, v0) return true } // match: (Rsh64Ux16 (Const64 [0]) _) @@ -17789,10 +19652,9 @@ func rewriteValuegeneric_OpRsh64Ux32(v *Value) bool { } c := v_1.AuxInt v.reset(OpRsh64Ux64) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpConst64, t) v0.AuxInt = int64(uint32(c)) - v.AddArg(v0) + v.AddArg2(x, v0) return true } // match: (Rsh64Ux32 (Const64 [0]) _) @@ -17834,9 +19696,7 @@ func rewriteValuegeneric_OpRsh64Ux64(v *Value) bool { if v_1.Op != OpConst64 || v_1.AuxInt != 0 { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (Rsh64Ux64 (Const64 [0]) _) @@ -17887,10 +19747,9 @@ func rewriteValuegeneric_OpRsh64Ux64(v *Value) bool { break } v.reset(OpRsh64Ux64) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpConst64, t) v0.AuxInt = c + d - v.AddArg(v0) + v.AddArg2(x, v0) return true } // match: (Rsh64Ux64 (Rsh64x64 x _) (Const64 [63])) @@ -17899,7 +19758,6 @@ func rewriteValuegeneric_OpRsh64Ux64(v *Value) bool { if v_0.Op != OpRsh64x64 { break } - _ = v_0.Args[1] x := v_0.Args[0] if v_1.Op != OpConst64 { break @@ -17909,10 +19767,9 @@ func rewriteValuegeneric_OpRsh64Ux64(v *Value) bool { break } v.reset(OpRsh64Ux64) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpConst64, t) v0.AuxInt = 63 - v.AddArg(v0) + v.AddArg2(x, v0) return true } // match: (Rsh64Ux64 (Lsh64x64 (Rsh64Ux64 x (Const64 [c1])) (Const64 [c2])) (Const64 [c3])) @@ -17947,10 +19804,9 @@ func rewriteValuegeneric_OpRsh64Ux64(v *Value) bool { break } v.reset(OpRsh64Ux64) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) v0.AuxInt = c1 - c2 + c3 - v.AddArg(v0) + v.AddArg2(x, v0) return true } // match: (Rsh64Ux64 (Lsh64x64 x (Const64 [56])) (Const64 [56])) @@ -18023,10 +19879,9 @@ func rewriteValuegeneric_OpRsh64Ux8(v *Value) bool { } c := v_1.AuxInt v.reset(OpRsh64Ux64) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpConst64, t) v0.AuxInt = int64(uint8(c)) - v.AddArg(v0) + v.AddArg2(x, v0) return true } // match: (Rsh64Ux8 (Const64 [0]) _) @@ -18055,10 +19910,9 @@ func rewriteValuegeneric_OpRsh64x16(v *Value) bool { } c := v_1.AuxInt v.reset(OpRsh64x64) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpConst64, t) v0.AuxInt = int64(uint16(c)) - v.AddArg(v0) + v.AddArg2(x, v0) return true } // match: (Rsh64x16 (Const64 [0]) _) @@ -18087,10 +19941,9 @@ func rewriteValuegeneric_OpRsh64x32(v *Value) bool { } c := v_1.AuxInt v.reset(OpRsh64x64) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpConst64, t) v0.AuxInt = int64(uint32(c)) - v.AddArg(v0) + v.AddArg2(x, v0) return true } // match: (Rsh64x32 (Const64 [0]) _) @@ -18132,9 +19985,7 @@ func rewriteValuegeneric_OpRsh64x64(v *Value) bool { if v_1.Op != OpConst64 || v_1.AuxInt != 0 { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (Rsh64x64 (Const64 [0]) _) @@ -18170,10 +20021,9 @@ func rewriteValuegeneric_OpRsh64x64(v *Value) bool { break } v.reset(OpRsh64x64) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpConst64, t) v0.AuxInt = c + d - v.AddArg(v0) + v.AddArg2(x, v0) return true } // match: (Rsh64x64 (Lsh64x64 x (Const64 [56])) (Const64 [56])) @@ -18246,10 +20096,9 @@ func rewriteValuegeneric_OpRsh64x8(v *Value) bool { } c := v_1.AuxInt v.reset(OpRsh64x64) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpConst64, t) v0.AuxInt = int64(uint8(c)) - v.AddArg(v0) + v.AddArg2(x, v0) return true } // match: (Rsh64x8 (Const64 [0]) _) @@ -18278,10 +20127,9 @@ func rewriteValuegeneric_OpRsh8Ux16(v *Value) bool { } c := v_1.AuxInt v.reset(OpRsh8Ux64) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpConst64, t) v0.AuxInt = int64(uint16(c)) - v.AddArg(v0) + v.AddArg2(x, v0) return true } // match: (Rsh8Ux16 (Const8 [0]) _) @@ -18310,10 +20158,9 @@ func rewriteValuegeneric_OpRsh8Ux32(v *Value) bool { } c := v_1.AuxInt v.reset(OpRsh8Ux64) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpConst64, t) v0.AuxInt = int64(uint32(c)) - v.AddArg(v0) + v.AddArg2(x, v0) return true } // match: (Rsh8Ux32 (Const8 [0]) _) @@ -18355,9 +20202,7 @@ func rewriteValuegeneric_OpRsh8Ux64(v *Value) bool { if v_1.Op != OpConst64 || v_1.AuxInt != 0 { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (Rsh8Ux64 (Const8 [0]) _) @@ -18408,10 +20253,9 @@ func rewriteValuegeneric_OpRsh8Ux64(v *Value) bool { break } v.reset(OpRsh8Ux64) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpConst64, t) v0.AuxInt = c + d - v.AddArg(v0) + v.AddArg2(x, v0) return true } // match: (Rsh8Ux64 (Rsh8x64 x _) (Const64 [7] )) @@ -18420,7 +20264,6 @@ func rewriteValuegeneric_OpRsh8Ux64(v *Value) bool { if v_0.Op != OpRsh8x64 { break } - _ = v_0.Args[1] x := v_0.Args[0] if v_1.Op != OpConst64 { break @@ -18430,10 +20273,9 @@ func rewriteValuegeneric_OpRsh8Ux64(v *Value) bool { break } v.reset(OpRsh8Ux64) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpConst64, t) v0.AuxInt = 7 - v.AddArg(v0) + v.AddArg2(x, v0) return true } // match: (Rsh8Ux64 (Lsh8x64 (Rsh8Ux64 x (Const64 [c1])) (Const64 [c2])) (Const64 [c3])) @@ -18468,10 +20310,9 @@ func rewriteValuegeneric_OpRsh8Ux64(v *Value) bool { break } v.reset(OpRsh8Ux64) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) v0.AuxInt = c1 - c2 + c3 - v.AddArg(v0) + v.AddArg2(x, v0) return true } return false @@ -18490,10 +20331,9 @@ func rewriteValuegeneric_OpRsh8Ux8(v *Value) bool { } c := v_1.AuxInt v.reset(OpRsh8Ux64) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpConst64, t) v0.AuxInt = int64(uint8(c)) - v.AddArg(v0) + v.AddArg2(x, v0) return true } // match: (Rsh8Ux8 (Const8 [0]) _) @@ -18522,10 +20362,9 @@ func rewriteValuegeneric_OpRsh8x16(v *Value) bool { } c := v_1.AuxInt v.reset(OpRsh8x64) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpConst64, t) v0.AuxInt = int64(uint16(c)) - v.AddArg(v0) + v.AddArg2(x, v0) return true } // match: (Rsh8x16 (Const8 [0]) _) @@ -18554,10 +20393,9 @@ func rewriteValuegeneric_OpRsh8x32(v *Value) bool { } c := v_1.AuxInt v.reset(OpRsh8x64) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpConst64, t) v0.AuxInt = int64(uint32(c)) - v.AddArg(v0) + v.AddArg2(x, v0) return true } // match: (Rsh8x32 (Const8 [0]) _) @@ -18598,9 +20436,7 @@ func rewriteValuegeneric_OpRsh8x64(v *Value) bool { if v_1.Op != OpConst64 || v_1.AuxInt != 0 { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (Rsh8x64 (Const8 [0]) _) @@ -18636,10 +20472,9 @@ func rewriteValuegeneric_OpRsh8x64(v *Value) bool { break } v.reset(OpRsh8x64) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpConst64, t) v0.AuxInt = c + d - v.AddArg(v0) + v.AddArg2(x, v0) return true } return false @@ -18658,10 +20493,9 @@ func rewriteValuegeneric_OpRsh8x8(v *Value) bool { } c := v_1.AuxInt v.reset(OpRsh8x64) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpConst64, t) v0.AuxInt = int64(uint8(c)) - v.AddArg(v0) + v.AddArg2(x, v0) return true } // match: (Rsh8x8 (Const8 [0]) _) @@ -18691,8 +20525,7 @@ func rewriteValuegeneric_OpSelect0(v *Value) bool { } lo := v_0.Args[1] v.reset(OpDiv64u) - v.AddArg(lo) - v.AddArg(y) + v.AddArg2(lo, y) return true } return false @@ -18712,8 +20545,7 @@ func rewriteValuegeneric_OpSelect1(v *Value) bool { } lo := v_0.Args[1] v.reset(OpMod64u) - v.AddArg(lo) - v.AddArg(y) + v.AddArg2(lo, y) return true } return false @@ -18751,9 +20583,7 @@ func rewriteValuegeneric_OpSignExt16to32(v *Value) bool { if !(s >= 16) { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } return false @@ -18791,9 +20621,7 @@ func rewriteValuegeneric_OpSignExt16to64(v *Value) bool { if !(s >= 48) { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } return false @@ -18831,9 +20659,7 @@ func rewriteValuegeneric_OpSignExt32to64(v *Value) bool { if !(s >= 32) { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } return false @@ -18871,9 +20697,7 @@ func rewriteValuegeneric_OpSignExt8to16(v *Value) bool { if !(s >= 8) { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } return false @@ -18911,9 +20735,7 @@ func rewriteValuegeneric_OpSignExt8to32(v *Value) bool { if !(s >= 24) { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } return false @@ -18951,9 +20773,7 @@ func rewriteValuegeneric_OpSignExt8to64(v *Value) bool { if !(s >= 56) { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } return false @@ -19038,7 +20858,7 @@ func rewriteValuegeneric_OpSliceLen(v *Value) bool { if v_0.Op != OpSliceMake { break } - _ = v_0.Args[2] + _ = v_0.Args[1] v_0_1 := v_0.Args[1] if v_0_1.Op != OpConst64 { break @@ -19056,7 +20876,7 @@ func rewriteValuegeneric_OpSliceLen(v *Value) bool { if v_0.Op != OpSliceMake { break } - _ = v_0.Args[2] + _ = v_0.Args[1] v_0_1 := v_0.Args[1] if v_0_1.Op != OpConst32 { break @@ -19074,7 +20894,7 @@ func rewriteValuegeneric_OpSliceLen(v *Value) bool { if v_0.Op != OpSliceMake { break } - _ = v_0.Args[2] + _ = v_0.Args[1] v_0_1 := v_0.Args[1] if v_0_1.Op != OpSliceLen { break @@ -19094,7 +20914,6 @@ func rewriteValuegeneric_OpSlicePtr(v *Value) bool { if v_0.Op != OpSliceMake { break } - _ = v_0.Args[2] v_0_0 := v_0.Args[0] if v_0_0.Op != OpSlicePtr { break @@ -19163,12 +20982,16 @@ func rewriteValuegeneric_OpSlicemask(v *Value) bool { func rewriteValuegeneric_OpSqrt(v *Value) bool { v_0 := v.Args[0] // match: (Sqrt (Const64F [c])) + // cond: !math.IsNaN(math.Sqrt(auxTo64F(c))) // result: (Const64F [auxFrom64F(math.Sqrt(auxTo64F(c)))]) for { if v_0.Op != OpConst64F { break } c := v_0.AuxInt + if !(!math.IsNaN(math.Sqrt(auxTo64F(c)))) { + break + } v.reset(OpConst64F) v.AuxInt = auxFrom64F(math.Sqrt(auxTo64F(c))) return true @@ -19213,9 +21036,7 @@ func rewriteValuegeneric_OpStaticCall(v *Value) bool { v.reset(OpMove) v.AuxInt = sz v.Aux = t.(*types.Type).Elem() - v.AddArg(dst) - v.AddArg(src) - v.AddArg(mem) + v.AddArg3(dst, src, mem) return true } // match: (StaticCall {sym} s1:(Store _ (Const32 [sz]) s2:(Store _ src s3:(Store {t} _ dst mem)))) @@ -19252,9 +21073,7 @@ func rewriteValuegeneric_OpStaticCall(v *Value) bool { v.reset(OpMove) v.AuxInt = sz v.Aux = t.(*types.Type).Elem() - v.AddArg(dst) - v.AddArg(src) - v.AddArg(mem) + v.AddArg3(dst, src, mem) return true } // match: (StaticCall {sym} x) @@ -19266,9 +21085,7 @@ func rewriteValuegeneric_OpStaticCall(v *Value) bool { if !(needRaceCleanup(sym, v)) { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } return false @@ -19295,9 +21112,7 @@ func rewriteValuegeneric_OpStore(v *Value) bool { if mem != v_2 || !(isSamePtr(p1, p2) && t2.Size() == sizeof(t1)) { break } - v.reset(OpCopy) - v.Type = mem.Type - v.AddArg(mem) + v.copyOf(mem) return true } // match: (Store {t1} p1 (Load p2 oldmem) mem:(Store {t3} p3 _ oldmem)) @@ -19322,9 +21137,7 @@ func rewriteValuegeneric_OpStore(v *Value) bool { if oldmem != mem.Args[2] || !(isSamePtr(p1, p2) && t2.Size() == sizeof(t1) && disjoint(p1, sizeof(t1), p3, sizeof(t3))) { break } - v.reset(OpCopy) - v.Type = mem.Type - v.AddArg(mem) + v.copyOf(mem) return true } // match: (Store {t1} p1 (Load p2 oldmem) mem:(Store {t3} p3 _ (Store {t4} p4 _ oldmem))) @@ -19356,9 +21169,7 @@ func rewriteValuegeneric_OpStore(v *Value) bool { if oldmem != mem_2.Args[2] || !(isSamePtr(p1, p2) && t2.Size() == sizeof(t1) && disjoint(p1, sizeof(t1), p3, sizeof(t3)) && disjoint(p1, sizeof(t1), p4, sizeof(t4))) { break } - v.reset(OpCopy) - v.Type = mem.Type - v.AddArg(mem) + v.copyOf(mem) return true } // match: (Store {t1} p1 (Load p2 oldmem) mem:(Store {t3} p3 _ (Store {t4} p4 _ (Store {t5} p5 _ oldmem)))) @@ -19397,9 +21208,7 @@ func rewriteValuegeneric_OpStore(v *Value) bool { if oldmem != mem_2_2.Args[2] || !(isSamePtr(p1, p2) && t2.Size() == sizeof(t1) && disjoint(p1, sizeof(t1), p3, sizeof(t3)) && disjoint(p1, sizeof(t1), p4, sizeof(t4)) && disjoint(p1, sizeof(t1), p5, sizeof(t5))) { break } - v.reset(OpCopy) - v.Type = mem.Type - v.AddArg(mem) + v.copyOf(mem) return true } // match: (Store {t} (OffPtr [o] p1) x mem:(Zero [n] p2 _)) @@ -19418,14 +21227,11 @@ func rewriteValuegeneric_OpStore(v *Value) bool { break } n := mem.AuxInt - _ = mem.Args[1] p2 := mem.Args[0] if !(isConstZero(x) && o >= 0 && sizeof(t)+o <= n && isSamePtr(p1, p2)) { break } - v.reset(OpCopy) - v.Type = mem.Type - v.AddArg(mem) + v.copyOf(mem) return true } // match: (Store {t1} op:(OffPtr [o1] p1) x mem:(Store {t2} p2 _ (Zero [n] p3 _))) @@ -19452,14 +21258,11 @@ func rewriteValuegeneric_OpStore(v *Value) bool { break } n := mem_2.AuxInt - _ = mem_2.Args[1] p3 := mem_2.Args[0] if !(isConstZero(x) && o1 >= 0 && sizeof(t1)+o1 <= n && isSamePtr(p1, p3) && disjoint(op, sizeof(t1), p2, sizeof(t2))) { break } - v.reset(OpCopy) - v.Type = mem.Type - v.AddArg(mem) + v.copyOf(mem) return true } // match: (Store {t1} op:(OffPtr [o1] p1) x mem:(Store {t2} p2 _ (Store {t3} p3 _ (Zero [n] p4 _)))) @@ -19493,14 +21296,11 @@ func rewriteValuegeneric_OpStore(v *Value) bool { break } n := mem_2_2.AuxInt - _ = mem_2_2.Args[1] p4 := mem_2_2.Args[0] if !(isConstZero(x) && o1 >= 0 && sizeof(t1)+o1 <= n && isSamePtr(p1, p4) && disjoint(op, sizeof(t1), p2, sizeof(t2)) && disjoint(op, sizeof(t1), p3, sizeof(t3))) { break } - v.reset(OpCopy) - v.Type = mem.Type - v.AddArg(mem) + v.copyOf(mem) return true } // match: (Store {t1} op:(OffPtr [o1] p1) x mem:(Store {t2} p2 _ (Store {t3} p3 _ (Store {t4} p4 _ (Zero [n] p5 _))))) @@ -19541,14 +21341,11 @@ func rewriteValuegeneric_OpStore(v *Value) bool { break } n := mem_2_2_2.AuxInt - _ = mem_2_2_2.Args[1] p5 := mem_2_2_2.Args[0] if !(isConstZero(x) && o1 >= 0 && sizeof(t1)+o1 <= n && isSamePtr(p1, p5) && disjoint(op, sizeof(t1), p2, sizeof(t2)) && disjoint(op, sizeof(t1), p3, sizeof(t3)) && disjoint(op, sizeof(t1), p4, sizeof(t4))) { break } - v.reset(OpCopy) - v.Type = mem.Type - v.AddArg(mem) + v.copyOf(mem) return true } // match: (Store _ (StructMake0) mem) @@ -19558,9 +21355,7 @@ func rewriteValuegeneric_OpStore(v *Value) bool { break } mem := v_2 - v.reset(OpCopy) - v.Type = mem.Type - v.AddArg(mem) + v.copyOf(mem) return true } // match: (Store dst (StructMake1 f0) mem) @@ -19578,9 +21373,7 @@ func rewriteValuegeneric_OpStore(v *Value) bool { v0 := b.NewValue0(v.Pos, OpOffPtr, t.FieldType(0).PtrTo()) v0.AuxInt = 0 v0.AddArg(dst) - v.AddArg(v0) - v.AddArg(f0) - v.AddArg(mem) + v.AddArg3(v0, f0, mem) return true } // match: (Store dst (StructMake2 f0 f1) mem) @@ -19599,17 +21392,13 @@ func rewriteValuegeneric_OpStore(v *Value) bool { v0 := b.NewValue0(v.Pos, OpOffPtr, t.FieldType(1).PtrTo()) v0.AuxInt = t.FieldOff(1) v0.AddArg(dst) - v.AddArg(v0) - v.AddArg(f1) v1 := b.NewValue0(v.Pos, OpStore, types.TypeMem) v1.Aux = t.FieldType(0) v2 := b.NewValue0(v.Pos, OpOffPtr, t.FieldType(0).PtrTo()) v2.AuxInt = 0 v2.AddArg(dst) - v1.AddArg(v2) - v1.AddArg(f0) - v1.AddArg(mem) - v.AddArg(v1) + v1.AddArg3(v2, f0, mem) + v.AddArg3(v0, f1, v1) return true } // match: (Store dst (StructMake3 f0 f1 f2) mem) @@ -19629,25 +21418,19 @@ func rewriteValuegeneric_OpStore(v *Value) bool { v0 := b.NewValue0(v.Pos, OpOffPtr, t.FieldType(2).PtrTo()) v0.AuxInt = t.FieldOff(2) v0.AddArg(dst) - v.AddArg(v0) - v.AddArg(f2) v1 := b.NewValue0(v.Pos, OpStore, types.TypeMem) v1.Aux = t.FieldType(1) v2 := b.NewValue0(v.Pos, OpOffPtr, t.FieldType(1).PtrTo()) v2.AuxInt = t.FieldOff(1) v2.AddArg(dst) - v1.AddArg(v2) - v1.AddArg(f1) v3 := b.NewValue0(v.Pos, OpStore, types.TypeMem) v3.Aux = t.FieldType(0) v4 := b.NewValue0(v.Pos, OpOffPtr, t.FieldType(0).PtrTo()) v4.AuxInt = 0 v4.AddArg(dst) - v3.AddArg(v4) - v3.AddArg(f0) - v3.AddArg(mem) - v1.AddArg(v3) - v.AddArg(v1) + v3.AddArg3(v4, f0, mem) + v1.AddArg3(v2, f1, v3) + v.AddArg3(v0, f2, v1) return true } // match: (Store dst (StructMake4 f0 f1 f2 f3) mem) @@ -19668,33 +21451,25 @@ func rewriteValuegeneric_OpStore(v *Value) bool { v0 := b.NewValue0(v.Pos, OpOffPtr, t.FieldType(3).PtrTo()) v0.AuxInt = t.FieldOff(3) v0.AddArg(dst) - v.AddArg(v0) - v.AddArg(f3) v1 := b.NewValue0(v.Pos, OpStore, types.TypeMem) v1.Aux = t.FieldType(2) v2 := b.NewValue0(v.Pos, OpOffPtr, t.FieldType(2).PtrTo()) v2.AuxInt = t.FieldOff(2) v2.AddArg(dst) - v1.AddArg(v2) - v1.AddArg(f2) v3 := b.NewValue0(v.Pos, OpStore, types.TypeMem) v3.Aux = t.FieldType(1) v4 := b.NewValue0(v.Pos, OpOffPtr, t.FieldType(1).PtrTo()) v4.AuxInt = t.FieldOff(1) v4.AddArg(dst) - v3.AddArg(v4) - v3.AddArg(f1) v5 := b.NewValue0(v.Pos, OpStore, types.TypeMem) v5.Aux = t.FieldType(0) v6 := b.NewValue0(v.Pos, OpOffPtr, t.FieldType(0).PtrTo()) v6.AuxInt = 0 v6.AddArg(dst) - v5.AddArg(v6) - v5.AddArg(f0) - v5.AddArg(mem) - v3.AddArg(v5) - v1.AddArg(v3) - v.AddArg(v1) + v5.AddArg3(v6, f0, mem) + v3.AddArg3(v4, f1, v5) + v1.AddArg3(v2, f2, v3) + v.AddArg3(v0, f3, v1) return true } // match: (Store {t} dst (Load src mem) mem) @@ -19714,9 +21489,7 @@ func rewriteValuegeneric_OpStore(v *Value) bool { v.reset(OpMove) v.AuxInt = sizeof(t) v.Aux = t - v.AddArg(dst) - v.AddArg(src) - v.AddArg(mem) + v.AddArg3(dst, src, mem) return true } // match: (Store {t} dst (Load src mem) (VarDef {x} mem)) @@ -19740,12 +21513,10 @@ func rewriteValuegeneric_OpStore(v *Value) bool { v.reset(OpMove) v.AuxInt = sizeof(t) v.Aux = t - v.AddArg(dst) - v.AddArg(src) v0 := b.NewValue0(v.Pos, OpVarDef, types.TypeMem) v0.Aux = x v0.AddArg(mem) - v.AddArg(v0) + v.AddArg3(dst, src, v0) return true } // match: (Store _ (ArrayMake0) mem) @@ -19755,9 +21526,7 @@ func rewriteValuegeneric_OpStore(v *Value) bool { break } mem := v_2 - v.reset(OpCopy) - v.Type = mem.Type - v.AddArg(mem) + v.copyOf(mem) return true } // match: (Store dst (ArrayMake1 e) mem) @@ -19771,9 +21540,7 @@ func rewriteValuegeneric_OpStore(v *Value) bool { mem := v_2 v.reset(OpStore) v.Aux = e.Type - v.AddArg(dst) - v.AddArg(e) - v.AddArg(mem) + v.AddArg3(dst, e, mem) return true } // match: (Store (Load (OffPtr [c] (SP)) mem) x mem) @@ -19797,9 +21564,7 @@ func rewriteValuegeneric_OpStore(v *Value) bool { if mem != v_2 || !(isConstZero(x) && mem.Op == OpStaticCall && isSameSym(mem.Aux, "runtime.newobject") && c == config.ctxt.FixedFrameSize()+config.RegSize) { break } - v.reset(OpCopy) - v.Type = mem.Type - v.AddArg(mem) + v.copyOf(mem) return true } // match: (Store (OffPtr (Load (OffPtr [c] (SP)) mem)) x mem) @@ -19827,9 +21592,7 @@ func rewriteValuegeneric_OpStore(v *Value) bool { if mem != v_2 || !(isConstZero(x) && mem.Op == OpStaticCall && isSameSym(mem.Aux, "runtime.newobject") && c == config.ctxt.FixedFrameSize()+config.RegSize) { break } - v.reset(OpCopy) - v.Type = mem.Type - v.AddArg(mem) + v.copyOf(mem) return true } // match: (Store {t1} op1:(OffPtr [o1] p1) d1 m2:(Store {t2} op2:(OffPtr [0] p2) d2 m3:(Move [n] p3 _ mem))) @@ -19868,14 +21631,10 @@ func rewriteValuegeneric_OpStore(v *Value) bool { } v.reset(OpStore) v.Aux = t1 - v.AddArg(op1) - v.AddArg(d1) v0 := b.NewValue0(v.Pos, OpStore, types.TypeMem) v0.Aux = t2 - v0.AddArg(op2) - v0.AddArg(d2) - v0.AddArg(mem) - v.AddArg(v0) + v0.AddArg3(op2, d2, mem) + v.AddArg3(op1, d1, v0) return true } // match: (Store {t1} op1:(OffPtr [o1] p1) d1 m2:(Store {t2} op2:(OffPtr [o2] p2) d2 m3:(Store {t3} op3:(OffPtr [0] p3) d3 m4:(Move [n] p4 _ mem)))) @@ -19927,19 +21686,13 @@ func rewriteValuegeneric_OpStore(v *Value) bool { } v.reset(OpStore) v.Aux = t1 - v.AddArg(op1) - v.AddArg(d1) v0 := b.NewValue0(v.Pos, OpStore, types.TypeMem) v0.Aux = t2 - v0.AddArg(op2) - v0.AddArg(d2) v1 := b.NewValue0(v.Pos, OpStore, types.TypeMem) v1.Aux = t3 - v1.AddArg(op3) - v1.AddArg(d3) - v1.AddArg(mem) - v0.AddArg(v1) - v.AddArg(v0) + v1.AddArg3(op3, d3, mem) + v0.AddArg3(op2, d2, v1) + v.AddArg3(op1, d1, v0) return true } // match: (Store {t1} op1:(OffPtr [o1] p1) d1 m2:(Store {t2} op2:(OffPtr [o2] p2) d2 m3:(Store {t3} op3:(OffPtr [o3] p3) d3 m4:(Store {t4} op4:(OffPtr [0] p4) d4 m5:(Move [n] p5 _ mem))))) @@ -20004,24 +21757,16 @@ func rewriteValuegeneric_OpStore(v *Value) bool { } v.reset(OpStore) v.Aux = t1 - v.AddArg(op1) - v.AddArg(d1) v0 := b.NewValue0(v.Pos, OpStore, types.TypeMem) v0.Aux = t2 - v0.AddArg(op2) - v0.AddArg(d2) v1 := b.NewValue0(v.Pos, OpStore, types.TypeMem) v1.Aux = t3 - v1.AddArg(op3) - v1.AddArg(d3) v2 := b.NewValue0(v.Pos, OpStore, types.TypeMem) v2.Aux = t4 - v2.AddArg(op4) - v2.AddArg(d4) - v2.AddArg(mem) - v1.AddArg(v2) - v0.AddArg(v1) - v.AddArg(v0) + v2.AddArg3(op4, d4, mem) + v1.AddArg3(op3, d3, v2) + v0.AddArg3(op2, d2, v1) + v.AddArg3(op1, d1, v0) return true } // match: (Store {t1} op1:(OffPtr [o1] p1) d1 m2:(Store {t2} op2:(OffPtr [0] p2) d2 m3:(Zero [n] p3 mem))) @@ -20060,14 +21805,10 @@ func rewriteValuegeneric_OpStore(v *Value) bool { } v.reset(OpStore) v.Aux = t1 - v.AddArg(op1) - v.AddArg(d1) v0 := b.NewValue0(v.Pos, OpStore, types.TypeMem) v0.Aux = t2 - v0.AddArg(op2) - v0.AddArg(d2) - v0.AddArg(mem) - v.AddArg(v0) + v0.AddArg3(op2, d2, mem) + v.AddArg3(op1, d1, v0) return true } // match: (Store {t1} op1:(OffPtr [o1] p1) d1 m2:(Store {t2} op2:(OffPtr [o2] p2) d2 m3:(Store {t3} op3:(OffPtr [0] p3) d3 m4:(Zero [n] p4 mem)))) @@ -20119,19 +21860,13 @@ func rewriteValuegeneric_OpStore(v *Value) bool { } v.reset(OpStore) v.Aux = t1 - v.AddArg(op1) - v.AddArg(d1) v0 := b.NewValue0(v.Pos, OpStore, types.TypeMem) v0.Aux = t2 - v0.AddArg(op2) - v0.AddArg(d2) v1 := b.NewValue0(v.Pos, OpStore, types.TypeMem) v1.Aux = t3 - v1.AddArg(op3) - v1.AddArg(d3) - v1.AddArg(mem) - v0.AddArg(v1) - v.AddArg(v0) + v1.AddArg3(op3, d3, mem) + v0.AddArg3(op2, d2, v1) + v.AddArg3(op1, d1, v0) return true } // match: (Store {t1} op1:(OffPtr [o1] p1) d1 m2:(Store {t2} op2:(OffPtr [o2] p2) d2 m3:(Store {t3} op3:(OffPtr [o3] p3) d3 m4:(Store {t4} op4:(OffPtr [0] p4) d4 m5:(Zero [n] p5 mem))))) @@ -20196,24 +21931,16 @@ func rewriteValuegeneric_OpStore(v *Value) bool { } v.reset(OpStore) v.Aux = t1 - v.AddArg(op1) - v.AddArg(d1) v0 := b.NewValue0(v.Pos, OpStore, types.TypeMem) v0.Aux = t2 - v0.AddArg(op2) - v0.AddArg(d2) v1 := b.NewValue0(v.Pos, OpStore, types.TypeMem) v1.Aux = t3 - v1.AddArg(op3) - v1.AddArg(d3) v2 := b.NewValue0(v.Pos, OpStore, types.TypeMem) v2.Aux = t4 - v2.AddArg(op4) - v2.AddArg(d4) - v2.AddArg(mem) - v1.AddArg(v2) - v0.AddArg(v1) - v.AddArg(v0) + v2.AddArg3(op4, d4, mem) + v1.AddArg3(op3, d3, v2) + v0.AddArg3(op2, d2, v1) + v.AddArg3(op1, d1, v0) return true } return false @@ -20248,7 +21975,6 @@ func rewriteValuegeneric_OpStringPtr(v *Value) bool { if v_0.Op != OpStringMake { break } - _ = v_0.Args[1] v_0_0 := v_0.Args[0] if v_0_0.Op != OpAddr { break @@ -20275,9 +22001,7 @@ func rewriteValuegeneric_OpStructSelect(v *Value) bool { break } x := v_0.Args[0] - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (StructSelect [0] (StructMake2 x _)) @@ -20286,11 +22010,8 @@ func rewriteValuegeneric_OpStructSelect(v *Value) bool { if v.AuxInt != 0 || v_0.Op != OpStructMake2 { break } - _ = v_0.Args[1] x := v_0.Args[0] - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (StructSelect [1] (StructMake2 _ x)) @@ -20300,9 +22021,7 @@ func rewriteValuegeneric_OpStructSelect(v *Value) bool { break } x := v_0.Args[1] - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (StructSelect [0] (StructMake3 x _ _)) @@ -20311,11 +22030,8 @@ func rewriteValuegeneric_OpStructSelect(v *Value) bool { if v.AuxInt != 0 || v_0.Op != OpStructMake3 { break } - _ = v_0.Args[2] x := v_0.Args[0] - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (StructSelect [1] (StructMake3 _ x _)) @@ -20324,11 +22040,8 @@ func rewriteValuegeneric_OpStructSelect(v *Value) bool { if v.AuxInt != 1 || v_0.Op != OpStructMake3 { break } - _ = v_0.Args[2] x := v_0.Args[1] - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (StructSelect [2] (StructMake3 _ _ x)) @@ -20338,9 +22051,7 @@ func rewriteValuegeneric_OpStructSelect(v *Value) bool { break } x := v_0.Args[2] - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (StructSelect [0] (StructMake4 x _ _ _)) @@ -20349,11 +22060,8 @@ func rewriteValuegeneric_OpStructSelect(v *Value) bool { if v.AuxInt != 0 || v_0.Op != OpStructMake4 { break } - _ = v_0.Args[3] x := v_0.Args[0] - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (StructSelect [1] (StructMake4 _ x _ _)) @@ -20362,11 +22070,8 @@ func rewriteValuegeneric_OpStructSelect(v *Value) bool { if v.AuxInt != 1 || v_0.Op != OpStructMake4 { break } - _ = v_0.Args[3] x := v_0.Args[1] - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (StructSelect [2] (StructMake4 _ _ x _)) @@ -20375,11 +22080,8 @@ func rewriteValuegeneric_OpStructSelect(v *Value) bool { if v.AuxInt != 2 || v_0.Op != OpStructMake4 { break } - _ = v_0.Args[3] x := v_0.Args[2] - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (StructSelect [3] (StructMake4 _ _ _ x)) @@ -20389,9 +22091,7 @@ func rewriteValuegeneric_OpStructSelect(v *Value) bool { break } x := v_0.Args[3] - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (StructSelect [i] x:(Load ptr mem)) @@ -20411,13 +22111,11 @@ func rewriteValuegeneric_OpStructSelect(v *Value) bool { } b = x.Block v0 := b.NewValue0(v.Pos, OpLoad, v.Type) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v1 := b.NewValue0(v.Pos, OpOffPtr, v.Type.PtrTo()) v1.AuxInt = t.FieldOff(int(i)) v1.AddArg(ptr) - v0.AddArg(v1) - v0.AddArg(mem) + v0.AddArg2(v1, mem) return true } // match: (StructSelect [0] (IData x)) @@ -20468,8 +22166,7 @@ func rewriteValuegeneric_OpSub16(v *Value) bool { v.reset(OpAdd16) v0 := b.NewValue0(v.Pos, OpConst16, t) v0.AuxInt = int64(int16(-c)) - v.AddArg(v0) - v.AddArg(x) + v.AddArg2(v0, x) return true } // match: (Sub16 (Mul16 x y) (Mul16 x z)) @@ -20497,11 +22194,9 @@ func rewriteValuegeneric_OpSub16(v *Value) bool { } z := v_1_1 v.reset(OpMul16) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpSub16, t) - v0.AddArg(y) - v0.AddArg(z) - v.AddArg(v0) + v0.AddArg2(y, z) + v.AddArg2(x, v0) return true } } @@ -20533,9 +22228,7 @@ func rewriteValuegeneric_OpSub16(v *Value) bool { if x != v_1 { continue } - v.reset(OpCopy) - v.Type = y.Type - v.AddArg(y) + v.copyOf(y) return true } break @@ -20555,9 +22248,7 @@ func rewriteValuegeneric_OpSub16(v *Value) bool { if y != v_1 { continue } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } break @@ -20581,10 +22272,8 @@ func rewriteValuegeneric_OpSub16(v *Value) bool { } v.reset(OpSub16) v0 := b.NewValue0(v.Pos, OpAdd16, t) - v0.AddArg(x) - v0.AddArg(z) - v.AddArg(v0) - v.AddArg(i) + v0.AddArg2(x, z) + v.AddArg2(v0, i) return true } // match: (Sub16 x (Sub16 z i:(Const16 ))) @@ -20606,11 +22295,9 @@ func rewriteValuegeneric_OpSub16(v *Value) bool { break } v.reset(OpAdd16) - v.AddArg(i) v0 := b.NewValue0(v.Pos, OpSub16, t) - v0.AddArg(x) - v0.AddArg(z) - v.AddArg(v0) + v0.AddArg2(x, z) + v.AddArg2(i, v0) return true } // match: (Sub16 (Const16 [c]) (Sub16 x (Const16 [d]))) @@ -20634,8 +22321,7 @@ func rewriteValuegeneric_OpSub16(v *Value) bool { v.reset(OpSub16) v0 := b.NewValue0(v.Pos, OpConst16, t) v0.AuxInt = int64(int16(c + d)) - v.AddArg(v0) - v.AddArg(x) + v.AddArg2(v0, x) return true } // match: (Sub16 (Const16 [c]) (Sub16 (Const16 [d]) x)) @@ -20658,8 +22344,7 @@ func rewriteValuegeneric_OpSub16(v *Value) bool { v.reset(OpAdd16) v0 := b.NewValue0(v.Pos, OpConst16, t) v0.AuxInt = int64(int16(c - d)) - v.AddArg(v0) - v.AddArg(x) + v.AddArg2(v0, x) return true } return false @@ -20699,8 +22384,7 @@ func rewriteValuegeneric_OpSub32(v *Value) bool { v.reset(OpAdd32) v0 := b.NewValue0(v.Pos, OpConst32, t) v0.AuxInt = int64(int32(-c)) - v.AddArg(v0) - v.AddArg(x) + v.AddArg2(v0, x) return true } // match: (Sub32 (Mul32 x y) (Mul32 x z)) @@ -20728,11 +22412,9 @@ func rewriteValuegeneric_OpSub32(v *Value) bool { } z := v_1_1 v.reset(OpMul32) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpSub32, t) - v0.AddArg(y) - v0.AddArg(z) - v.AddArg(v0) + v0.AddArg2(y, z) + v.AddArg2(x, v0) return true } } @@ -20764,9 +22446,7 @@ func rewriteValuegeneric_OpSub32(v *Value) bool { if x != v_1 { continue } - v.reset(OpCopy) - v.Type = y.Type - v.AddArg(y) + v.copyOf(y) return true } break @@ -20786,9 +22466,7 @@ func rewriteValuegeneric_OpSub32(v *Value) bool { if y != v_1 { continue } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } break @@ -20812,10 +22490,8 @@ func rewriteValuegeneric_OpSub32(v *Value) bool { } v.reset(OpSub32) v0 := b.NewValue0(v.Pos, OpAdd32, t) - v0.AddArg(x) - v0.AddArg(z) - v.AddArg(v0) - v.AddArg(i) + v0.AddArg2(x, z) + v.AddArg2(v0, i) return true } // match: (Sub32 x (Sub32 z i:(Const32 ))) @@ -20837,11 +22513,9 @@ func rewriteValuegeneric_OpSub32(v *Value) bool { break } v.reset(OpAdd32) - v.AddArg(i) v0 := b.NewValue0(v.Pos, OpSub32, t) - v0.AddArg(x) - v0.AddArg(z) - v.AddArg(v0) + v0.AddArg2(x, z) + v.AddArg2(i, v0) return true } // match: (Sub32 (Const32 [c]) (Sub32 x (Const32 [d]))) @@ -20865,8 +22539,7 @@ func rewriteValuegeneric_OpSub32(v *Value) bool { v.reset(OpSub32) v0 := b.NewValue0(v.Pos, OpConst32, t) v0.AuxInt = int64(int32(c + d)) - v.AddArg(v0) - v.AddArg(x) + v.AddArg2(v0, x) return true } // match: (Sub32 (Const32 [c]) (Sub32 (Const32 [d]) x)) @@ -20889,8 +22562,7 @@ func rewriteValuegeneric_OpSub32(v *Value) bool { v.reset(OpAdd32) v0 := b.NewValue0(v.Pos, OpConst32, t) v0.AuxInt = int64(int32(c - d)) - v.AddArg(v0) - v.AddArg(x) + v.AddArg2(v0, x) return true } return false @@ -20950,8 +22622,7 @@ func rewriteValuegeneric_OpSub64(v *Value) bool { v.reset(OpAdd64) v0 := b.NewValue0(v.Pos, OpConst64, t) v0.AuxInt = -c - v.AddArg(v0) - v.AddArg(x) + v.AddArg2(v0, x) return true } // match: (Sub64 (Mul64 x y) (Mul64 x z)) @@ -20979,11 +22650,9 @@ func rewriteValuegeneric_OpSub64(v *Value) bool { } z := v_1_1 v.reset(OpMul64) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpSub64, t) - v0.AddArg(y) - v0.AddArg(z) - v.AddArg(v0) + v0.AddArg2(y, z) + v.AddArg2(x, v0) return true } } @@ -21015,9 +22684,7 @@ func rewriteValuegeneric_OpSub64(v *Value) bool { if x != v_1 { continue } - v.reset(OpCopy) - v.Type = y.Type - v.AddArg(y) + v.copyOf(y) return true } break @@ -21037,9 +22704,7 @@ func rewriteValuegeneric_OpSub64(v *Value) bool { if y != v_1 { continue } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } break @@ -21063,10 +22728,8 @@ func rewriteValuegeneric_OpSub64(v *Value) bool { } v.reset(OpSub64) v0 := b.NewValue0(v.Pos, OpAdd64, t) - v0.AddArg(x) - v0.AddArg(z) - v.AddArg(v0) - v.AddArg(i) + v0.AddArg2(x, z) + v.AddArg2(v0, i) return true } // match: (Sub64 x (Sub64 z i:(Const64 ))) @@ -21088,11 +22751,9 @@ func rewriteValuegeneric_OpSub64(v *Value) bool { break } v.reset(OpAdd64) - v.AddArg(i) v0 := b.NewValue0(v.Pos, OpSub64, t) - v0.AddArg(x) - v0.AddArg(z) - v.AddArg(v0) + v0.AddArg2(x, z) + v.AddArg2(i, v0) return true } // match: (Sub64 (Const64 [c]) (Sub64 x (Const64 [d]))) @@ -21116,8 +22777,7 @@ func rewriteValuegeneric_OpSub64(v *Value) bool { v.reset(OpSub64) v0 := b.NewValue0(v.Pos, OpConst64, t) v0.AuxInt = c + d - v.AddArg(v0) - v.AddArg(x) + v.AddArg2(v0, x) return true } // match: (Sub64 (Const64 [c]) (Sub64 (Const64 [d]) x)) @@ -21140,8 +22800,7 @@ func rewriteValuegeneric_OpSub64(v *Value) bool { v.reset(OpAdd64) v0 := b.NewValue0(v.Pos, OpConst64, t) v0.AuxInt = c - d - v.AddArg(v0) - v.AddArg(x) + v.AddArg2(v0, x) return true } return false @@ -21201,8 +22860,7 @@ func rewriteValuegeneric_OpSub8(v *Value) bool { v.reset(OpAdd8) v0 := b.NewValue0(v.Pos, OpConst8, t) v0.AuxInt = int64(int8(-c)) - v.AddArg(v0) - v.AddArg(x) + v.AddArg2(v0, x) return true } // match: (Sub8 (Mul8 x y) (Mul8 x z)) @@ -21230,11 +22888,9 @@ func rewriteValuegeneric_OpSub8(v *Value) bool { } z := v_1_1 v.reset(OpMul8) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpSub8, t) - v0.AddArg(y) - v0.AddArg(z) - v.AddArg(v0) + v0.AddArg2(y, z) + v.AddArg2(x, v0) return true } } @@ -21266,9 +22922,7 @@ func rewriteValuegeneric_OpSub8(v *Value) bool { if x != v_1 { continue } - v.reset(OpCopy) - v.Type = y.Type - v.AddArg(y) + v.copyOf(y) return true } break @@ -21288,9 +22942,7 @@ func rewriteValuegeneric_OpSub8(v *Value) bool { if y != v_1 { continue } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } break @@ -21314,10 +22966,8 @@ func rewriteValuegeneric_OpSub8(v *Value) bool { } v.reset(OpSub8) v0 := b.NewValue0(v.Pos, OpAdd8, t) - v0.AddArg(x) - v0.AddArg(z) - v.AddArg(v0) - v.AddArg(i) + v0.AddArg2(x, z) + v.AddArg2(v0, i) return true } // match: (Sub8 x (Sub8 z i:(Const8 ))) @@ -21339,11 +22989,9 @@ func rewriteValuegeneric_OpSub8(v *Value) bool { break } v.reset(OpAdd8) - v.AddArg(i) v0 := b.NewValue0(v.Pos, OpSub8, t) - v0.AddArg(x) - v0.AddArg(z) - v.AddArg(v0) + v0.AddArg2(x, z) + v.AddArg2(i, v0) return true } // match: (Sub8 (Const8 [c]) (Sub8 x (Const8 [d]))) @@ -21367,8 +23015,7 @@ func rewriteValuegeneric_OpSub8(v *Value) bool { v.reset(OpSub8) v0 := b.NewValue0(v.Pos, OpConst8, t) v0.AuxInt = int64(int8(c + d)) - v.AddArg(v0) - v.AddArg(x) + v.AddArg2(v0, x) return true } // match: (Sub8 (Const8 [c]) (Sub8 (Const8 [d]) x)) @@ -21391,8 +23038,7 @@ func rewriteValuegeneric_OpSub8(v *Value) bool { v.reset(OpAdd8) v0 := b.NewValue0(v.Pos, OpConst8, t) v0.AuxInt = int64(int8(c - d)) - v.AddArg(v0) - v.AddArg(x) + v.AddArg2(v0, x) return true } return false @@ -21417,9 +23063,7 @@ func rewriteValuegeneric_OpTrunc16to8(v *Value) bool { break } x := v_0.Args[0] - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (Trunc16to8 (SignExt8to16 x)) @@ -21429,9 +23073,7 @@ func rewriteValuegeneric_OpTrunc16to8(v *Value) bool { break } x := v_0.Args[0] - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (Trunc16to8 (And16 (Const16 [y]) x)) @@ -21492,9 +23134,7 @@ func rewriteValuegeneric_OpTrunc32to16(v *Value) bool { break } x := v_0.Args[0] - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (Trunc32to16 (SignExt8to32 x)) @@ -21515,9 +23155,7 @@ func rewriteValuegeneric_OpTrunc32to16(v *Value) bool { break } x := v_0.Args[0] - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (Trunc32to16 (And32 (Const32 [y]) x)) @@ -21567,9 +23205,7 @@ func rewriteValuegeneric_OpTrunc32to8(v *Value) bool { break } x := v_0.Args[0] - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (Trunc32to8 (SignExt8to32 x)) @@ -21579,9 +23215,7 @@ func rewriteValuegeneric_OpTrunc32to8(v *Value) bool { break } x := v_0.Args[0] - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (Trunc32to8 (And32 (Const32 [y]) x)) @@ -21642,9 +23276,7 @@ func rewriteValuegeneric_OpTrunc64to16(v *Value) bool { break } x := v_0.Args[0] - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (Trunc64to16 (SignExt8to64 x)) @@ -21665,9 +23297,7 @@ func rewriteValuegeneric_OpTrunc64to16(v *Value) bool { break } x := v_0.Args[0] - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (Trunc64to16 (And64 (Const64 [y]) x)) @@ -21739,9 +23369,7 @@ func rewriteValuegeneric_OpTrunc64to32(v *Value) bool { break } x := v_0.Args[0] - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (Trunc64to32 (SignExt8to64 x)) @@ -21773,9 +23401,7 @@ func rewriteValuegeneric_OpTrunc64to32(v *Value) bool { break } x := v_0.Args[0] - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (Trunc64to32 (And64 (Const64 [y]) x)) @@ -21825,9 +23451,7 @@ func rewriteValuegeneric_OpTrunc64to8(v *Value) bool { break } x := v_0.Args[0] - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (Trunc64to8 (SignExt8to64 x)) @@ -21837,9 +23461,7 @@ func rewriteValuegeneric_OpTrunc64to8(v *Value) bool { break } x := v_0.Args[0] - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (Trunc64to8 (And64 (Const64 [y]) x)) @@ -21910,9 +23532,7 @@ func rewriteValuegeneric_OpXor16(v *Value) bool { continue } x := v_1 - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } break @@ -21933,9 +23553,7 @@ func rewriteValuegeneric_OpXor16(v *Value) bool { continue } y := v_1_1 - v.reset(OpCopy) - v.Type = y.Type - v.AddArg(y) + v.copyOf(y) return true } } @@ -21964,11 +23582,9 @@ func rewriteValuegeneric_OpXor16(v *Value) bool { continue } v.reset(OpXor16) - v.AddArg(i) v0 := b.NewValue0(v.Pos, OpXor16, t) - v0.AddArg(z) - v0.AddArg(x) - v.AddArg(v0) + v0.AddArg2(z, x) + v.AddArg2(i, v0) return true } } @@ -21998,8 +23614,7 @@ func rewriteValuegeneric_OpXor16(v *Value) bool { v.reset(OpXor16) v0 := b.NewValue0(v.Pos, OpConst16, t) v0.AuxInt = int64(int16(c ^ d)) - v.AddArg(v0) - v.AddArg(x) + v.AddArg2(v0, x) return true } } @@ -22048,9 +23663,7 @@ func rewriteValuegeneric_OpXor32(v *Value) bool { continue } x := v_1 - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } break @@ -22071,9 +23684,7 @@ func rewriteValuegeneric_OpXor32(v *Value) bool { continue } y := v_1_1 - v.reset(OpCopy) - v.Type = y.Type - v.AddArg(y) + v.copyOf(y) return true } } @@ -22102,11 +23713,9 @@ func rewriteValuegeneric_OpXor32(v *Value) bool { continue } v.reset(OpXor32) - v.AddArg(i) v0 := b.NewValue0(v.Pos, OpXor32, t) - v0.AddArg(z) - v0.AddArg(x) - v.AddArg(v0) + v0.AddArg2(z, x) + v.AddArg2(i, v0) return true } } @@ -22136,8 +23745,7 @@ func rewriteValuegeneric_OpXor32(v *Value) bool { v.reset(OpXor32) v0 := b.NewValue0(v.Pos, OpConst32, t) v0.AuxInt = int64(int32(c ^ d)) - v.AddArg(v0) - v.AddArg(x) + v.AddArg2(v0, x) return true } } @@ -22186,9 +23794,7 @@ func rewriteValuegeneric_OpXor64(v *Value) bool { continue } x := v_1 - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } break @@ -22209,9 +23815,7 @@ func rewriteValuegeneric_OpXor64(v *Value) bool { continue } y := v_1_1 - v.reset(OpCopy) - v.Type = y.Type - v.AddArg(y) + v.copyOf(y) return true } } @@ -22240,11 +23844,9 @@ func rewriteValuegeneric_OpXor64(v *Value) bool { continue } v.reset(OpXor64) - v.AddArg(i) v0 := b.NewValue0(v.Pos, OpXor64, t) - v0.AddArg(z) - v0.AddArg(x) - v.AddArg(v0) + v0.AddArg2(z, x) + v.AddArg2(i, v0) return true } } @@ -22274,8 +23876,7 @@ func rewriteValuegeneric_OpXor64(v *Value) bool { v.reset(OpXor64) v0 := b.NewValue0(v.Pos, OpConst64, t) v0.AuxInt = c ^ d - v.AddArg(v0) - v.AddArg(x) + v.AddArg2(v0, x) return true } } @@ -22324,9 +23925,7 @@ func rewriteValuegeneric_OpXor8(v *Value) bool { continue } x := v_1 - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } break @@ -22347,9 +23946,7 @@ func rewriteValuegeneric_OpXor8(v *Value) bool { continue } y := v_1_1 - v.reset(OpCopy) - v.Type = y.Type - v.AddArg(y) + v.copyOf(y) return true } } @@ -22378,11 +23975,9 @@ func rewriteValuegeneric_OpXor8(v *Value) bool { continue } v.reset(OpXor8) - v.AddArg(i) v0 := b.NewValue0(v.Pos, OpXor8, t) - v0.AddArg(z) - v0.AddArg(x) - v.AddArg(v0) + v0.AddArg2(z, x) + v.AddArg2(i, v0) return true } } @@ -22412,8 +24007,7 @@ func rewriteValuegeneric_OpXor8(v *Value) bool { v.reset(OpXor8) v0 := b.NewValue0(v.Pos, OpConst8, t) v0.AuxInt = int64(int8(c ^ d)) - v.AddArg(v0) - v.AddArg(x) + v.AddArg2(v0, x) return true } } @@ -22443,9 +24037,7 @@ func rewriteValuegeneric_OpZero(v *Value) bool { if v_0_0_0.Op != OpSP || mem != v_1 || !(mem.Op == OpStaticCall && isSameSym(mem.Aux, "runtime.newobject") && c == config.ctxt.FixedFrameSize()+config.RegSize) { break } - v.reset(OpCopy) - v.Type = mem.Type - v.AddArg(mem) + v.copyOf(mem) return true } // match: (Zero {t1} [n] p1 store:(Store {t2} (OffPtr [o2] p2) _ mem)) @@ -22473,8 +24065,7 @@ func rewriteValuegeneric_OpZero(v *Value) bool { v.reset(OpZero) v.AuxInt = n v.Aux = t1 - v.AddArg(p1) - v.AddArg(mem) + v.AddArg2(p1, mem) return true } // match: (Zero {t} [n] dst1 move:(Move {t} [n] dst2 _ mem)) @@ -22496,8 +24087,7 @@ func rewriteValuegeneric_OpZero(v *Value) bool { v.reset(OpZero) v.AuxInt = n v.Aux = t - v.AddArg(dst1) - v.AddArg(mem) + v.AddArg2(dst1, mem) return true } // match: (Zero {t} [n] dst1 vardef:(VarDef {x} move:(Move {t} [n] dst2 _ mem))) @@ -22524,11 +24114,10 @@ func rewriteValuegeneric_OpZero(v *Value) bool { v.reset(OpZero) v.AuxInt = n v.Aux = t - v.AddArg(dst1) v0 := b.NewValue0(v.Pos, OpVarDef, types.TypeMem) v0.Aux = x v0.AddArg(mem) - v.AddArg(v0) + v.AddArg2(dst1, v0) return true } return false @@ -22566,9 +24155,7 @@ func rewriteValuegeneric_OpZeroExt16to32(v *Value) bool { if !(s >= 16) { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } return false @@ -22606,9 +24193,7 @@ func rewriteValuegeneric_OpZeroExt16to64(v *Value) bool { if !(s >= 48) { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } return false @@ -22646,9 +24231,7 @@ func rewriteValuegeneric_OpZeroExt32to64(v *Value) bool { if !(s >= 32) { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } return false @@ -22686,9 +24269,7 @@ func rewriteValuegeneric_OpZeroExt8to16(v *Value) bool { if !(s >= 8) { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } return false @@ -22726,9 +24307,7 @@ func rewriteValuegeneric_OpZeroExt8to32(v *Value) bool { if !(s >= 24) { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } return false @@ -22766,9 +24345,7 @@ func rewriteValuegeneric_OpZeroExt8to64(v *Value) bool { if !(s >= 56) { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } return false @@ -22781,8 +24358,7 @@ func rewriteBlockgeneric(b *Block) bool { for b.Controls[0].Op == OpNot { v_0 := b.Controls[0] cond := v_0.Args[0] - b.Reset(BlockIf) - b.AddControl(cond) + b.resetWithControl(BlockIf, cond) b.swapSuccessors() return true } diff --git a/src/cmd/compile/internal/ssa/value.go b/src/cmd/compile/internal/ssa/value.go index b877220211..e5246779fc 100644 --- a/src/cmd/compile/internal/ssa/value.go +++ b/src/cmd/compile/internal/ssa/value.go @@ -218,6 +218,58 @@ func (v *Value) AddArg(w *Value) { v.Args = append(v.Args, w) w.Uses++ } + +//go:noinline +func (v *Value) AddArg2(w1, w2 *Value) { + if v.Args == nil { + v.resetArgs() // use argstorage + } + v.Args = append(v.Args, w1, w2) + w1.Uses++ + w2.Uses++ +} + +//go:noinline +func (v *Value) AddArg3(w1, w2, w3 *Value) { + if v.Args == nil { + v.resetArgs() // use argstorage + } + v.Args = append(v.Args, w1, w2, w3) + w1.Uses++ + w2.Uses++ + w3.Uses++ +} + +//go:noinline +func (v *Value) AddArg4(w1, w2, w3, w4 *Value) { + v.Args = append(v.Args, w1, w2, w3, w4) + w1.Uses++ + w2.Uses++ + w3.Uses++ + w4.Uses++ +} + +//go:noinline +func (v *Value) AddArg5(w1, w2, w3, w4, w5 *Value) { + v.Args = append(v.Args, w1, w2, w3, w4, w5) + w1.Uses++ + w2.Uses++ + w3.Uses++ + w4.Uses++ + w5.Uses++ +} + +//go:noinline +func (v *Value) AddArg6(w1, w2, w3, w4, w5, w6 *Value) { + v.Args = append(v.Args, w1, w2, w3, w4, w5, w6) + w1.Uses++ + w2.Uses++ + w3.Uses++ + w4.Uses++ + w5.Uses++ + w6.Uses++ +} + func (v *Value) AddArgs(a ...*Value) { if v.Args == nil { v.resetArgs() // use argstorage @@ -258,17 +310,29 @@ func (v *Value) resetArgs() { v.Args = v.argstorage[:0] } +// reset is called from most rewrite rules. +// Allowing it to be inlined increases the size +// of cmd/compile by almost 10%, and slows it down. +//go:noinline func (v *Value) reset(op Op) { v.Op = op - if op != OpCopy && notStmtBoundary(op) { - // Special case for OpCopy because of how it is used in rewrite - v.Pos = v.Pos.WithNotStmt() - } v.resetArgs() v.AuxInt = 0 v.Aux = nil } +// copyOf is called from rewrite rules. +// It modifies v to be (Copy a). +//go:noinline +func (v *Value) copyOf(a *Value) { + v.Op = OpCopy + v.resetArgs() + v.AddArg(a) + v.AuxInt = 0 + v.Aux = nil + v.Type = a.Type +} + // copyInto makes a new value identical to v and adds it to the end of b. // unlike copyIntoWithXPos this does not check for v.Pos being a statement. func (v *Value) copyInto(b *Block) *Value { diff --git a/src/cmd/compile/internal/ssa/writebarrier.go b/src/cmd/compile/internal/ssa/writebarrier.go index d246fb333c..cebfbb8c9d 100644 --- a/src/cmd/compile/internal/ssa/writebarrier.go +++ b/src/cmd/compile/internal/ssa/writebarrier.go @@ -347,6 +347,7 @@ func writebarrier(f *Func) { bEnd.Values = append(bEnd.Values, last) last.Block = bEnd last.reset(OpPhi) + last.Pos = last.Pos.WithNotStmt() last.Type = types.TypeMem last.AddArg(memThen) last.AddArg(memElse) diff --git a/src/cmd/compile/internal/syntax/parser.go b/src/cmd/compile/internal/syntax/parser.go index 469d9ad69b..5e52800b39 100644 --- a/src/cmd/compile/internal/syntax/parser.go +++ b/src/cmd/compile/internal/syntax/parser.go @@ -419,7 +419,7 @@ func (p *parser) fileOrNil() *File { } // p.tok == _EOF - f.Lines = p.source.line + f.Lines = p.line return f } diff --git a/src/cmd/compile/internal/syntax/parser_test.go b/src/cmd/compile/internal/syntax/parser_test.go index 673339d667..81945faee9 100644 --- a/src/cmd/compile/internal/syntax/parser_test.go +++ b/src/cmd/compile/internal/syntax/parser_test.go @@ -10,6 +10,7 @@ import ( "fmt" "io/ioutil" "path/filepath" + "regexp" "runtime" "strings" "sync" @@ -17,9 +18,12 @@ import ( "time" ) -var fast = flag.Bool("fast", false, "parse package files in parallel") -var src_ = flag.String("src", "parser.go", "source file to parse") -var verify = flag.Bool("verify", false, "verify idempotent printing") +var ( + fast = flag.Bool("fast", false, "parse package files in parallel") + verify = flag.Bool("verify", false, "verify idempotent printing") + src_ = flag.String("src", "parser.go", "source file to parse") + skip = flag.String("skip", "", "files matching this regular expression are skipped by TestStdLib") +) func TestParse(t *testing.T) { ParseFile(*src_, func(err error) { t.Error(err) }, nil, 0) @@ -30,6 +34,15 @@ func TestStdLib(t *testing.T) { t.Skip("skipping test in short mode") } + var skipRx *regexp.Regexp + if *skip != "" { + var err error + skipRx, err = regexp.Compile(*skip) + if err != nil { + t.Fatalf("invalid argument for -skip (%v)", err) + } + } + var m1 runtime.MemStats runtime.ReadMemStats(&m1) start := time.Now() @@ -46,6 +59,12 @@ func TestStdLib(t *testing.T) { runtime.GOROOT(), } { walkDirs(t, dir, func(filename string) { + if skipRx != nil && skipRx.MatchString(filename) { + // Always report skipped files since regexp + // typos can lead to surprising results. + fmt.Printf("skipping %s\n", filename) + return + } if debug { fmt.Printf("parsing %s\n", filename) } diff --git a/src/cmd/compile/internal/syntax/scanner.go b/src/cmd/compile/internal/syntax/scanner.go index fef87171bc..fc2efcced2 100644 --- a/src/cmd/compile/internal/syntax/scanner.go +++ b/src/cmd/compile/internal/syntax/scanner.go @@ -6,9 +6,9 @@ // Go source. After initialization, consecutive calls of // next advance the scanner one token at a time. // -// This file, source.go, and tokens.go are self-contained -// (go tool compile scanner.go source.go tokens.go compiles) -// and thus could be made into its own package. +// This file, source.go, tokens.go, and token_string.go are self-contained +// (`go tool compile scanner.go source.go tokens.go token_string.go` compiles) +// and thus could be made into their own package. package syntax @@ -86,20 +86,21 @@ func (s *scanner) next() { redo: // skip white space - c := s.getr() - for c == ' ' || c == '\t' || c == '\n' && !nlsemi || c == '\r' { - c = s.getr() + s.stop() + for s.ch == ' ' || s.ch == '\t' || s.ch == '\n' && !nlsemi || s.ch == '\r' { + s.nextch() } // token start - s.line, s.col = s.source.line0, s.source.col0 - - if isLetter(c) || c >= utf8.RuneSelf && s.isIdentRune(c, true) { + s.line, s.col = s.pos() + s.start() + if isLetter(s.ch) || s.ch >= utf8.RuneSelf && s.atIdentChar(true) { + s.nextch() s.ident() return } - switch c { + switch s.ch { case -1: if nlsemi { s.lit = "EOF" @@ -109,11 +110,12 @@ redo: s.tok = _EOF case '\n': + s.nextch() s.lit = "newline" s.tok = _Semi case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': - s.number(c) + s.number(false) case '"': s.stdString() @@ -125,97 +127,110 @@ redo: s.rune() case '(': + s.nextch() s.tok = _Lparen case '[': + s.nextch() s.tok = _Lbrack case '{': + s.nextch() s.tok = _Lbrace case ',': + s.nextch() s.tok = _Comma case ';': + s.nextch() s.lit = "semicolon" s.tok = _Semi case ')': + s.nextch() s.nlsemi = true s.tok = _Rparen case ']': + s.nextch() s.nlsemi = true s.tok = _Rbrack case '}': + s.nextch() s.nlsemi = true s.tok = _Rbrace case ':': - if s.getr() == '=' { + s.nextch() + if s.ch == '=' { + s.nextch() s.tok = _Define break } - s.ungetr() s.tok = _Colon case '.': - c = s.getr() - if isDecimal(c) { - s.ungetr() - s.unread(1) // correct position of '.' (needed by startLit in number) - s.number('.') + s.nextch() + if isDecimal(s.ch) { + s.number(true) break } - if c == '.' { - c = s.getr() - if c == '.' { + if s.ch == '.' { + s.nextch() + if s.ch == '.' { + s.nextch() s.tok = _DotDotDot break } - s.unread(1) + s.rewind() // now s.ch holds 1st '.' + s.nextch() // consume 1st '.' again } - s.ungetr() s.tok = _Dot case '+': + s.nextch() s.op, s.prec = Add, precAdd - c = s.getr() - if c != '+' { + if s.ch != '+' { goto assignop } + s.nextch() s.nlsemi = true s.tok = _IncOp case '-': + s.nextch() s.op, s.prec = Sub, precAdd - c = s.getr() - if c != '-' { + if s.ch != '-' { goto assignop } + s.nextch() s.nlsemi = true s.tok = _IncOp case '*': + s.nextch() s.op, s.prec = Mul, precMul // don't goto assignop - want _Star token - if s.getr() == '=' { + if s.ch == '=' { + s.nextch() s.tok = _AssignOp break } - s.ungetr() s.tok = _Star case '/': - c = s.getr() - if c == '/' { + s.nextch() + if s.ch == '/' { + s.nextch() s.lineComment() goto redo } - if c == '*' { + if s.ch == '*' { + s.nextch() s.fullComment() - if s.source.line > s.line && nlsemi { + if line, _ := s.pos(); line > s.line && nlsemi { // A multi-line comment acts like a newline; // it translates to a ';' if nlsemi is set. s.lit = "newline" @@ -228,27 +243,29 @@ redo: goto assignop case '%': + s.nextch() s.op, s.prec = Rem, precMul - c = s.getr() goto assignop case '&': - c = s.getr() - if c == '&' { + s.nextch() + if s.ch == '&' { + s.nextch() s.op, s.prec = AndAnd, precAndAnd s.tok = _Operator break } s.op, s.prec = And, precMul - if c == '^' { + if s.ch == '^' { + s.nextch() s.op = AndNot - c = s.getr() } goto assignop case '|': - c = s.getr() - if c == '|' { + s.nextch() + if s.ch == '|' { + s.nextch() s.op, s.prec = OrOr, precOrOr s.tok = _Operator break @@ -257,106 +274,100 @@ redo: goto assignop case '^': + s.nextch() s.op, s.prec = Xor, precAdd - c = s.getr() goto assignop case '<': - c = s.getr() - if c == '=' { + s.nextch() + if s.ch == '=' { + s.nextch() s.op, s.prec = Leq, precCmp s.tok = _Operator break } - if c == '<' { + if s.ch == '<' { + s.nextch() s.op, s.prec = Shl, precMul - c = s.getr() goto assignop } - if c == '-' { + if s.ch == '-' { + s.nextch() s.tok = _Arrow break } - s.ungetr() s.op, s.prec = Lss, precCmp s.tok = _Operator case '>': - c = s.getr() - if c == '=' { + s.nextch() + if s.ch == '=' { + s.nextch() s.op, s.prec = Geq, precCmp s.tok = _Operator break } - if c == '>' { + if s.ch == '>' { + s.nextch() s.op, s.prec = Shr, precMul - c = s.getr() goto assignop } - s.ungetr() s.op, s.prec = Gtr, precCmp s.tok = _Operator case '=': - if s.getr() == '=' { + s.nextch() + if s.ch == '=' { + s.nextch() s.op, s.prec = Eql, precCmp s.tok = _Operator break } - s.ungetr() s.tok = _Assign case '!': - if s.getr() == '=' { + s.nextch() + if s.ch == '=' { + s.nextch() s.op, s.prec = Neq, precCmp s.tok = _Operator break } - s.ungetr() s.op, s.prec = Not, 0 s.tok = _Operator default: - s.tok = 0 - s.errorf("invalid character %#U", c) + s.errorf("invalid character %#U", s.ch) + s.nextch() goto redo } return assignop: - if c == '=' { + if s.ch == '=' { + s.nextch() s.tok = _AssignOp return } - s.ungetr() s.tok = _Operator } -func isLetter(c rune) bool { - return 'a' <= lower(c) && lower(c) <= 'z' || c == '_' -} - func (s *scanner) ident() { - s.startLit() - // accelerate common case (7bit ASCII) - c := s.getr() - for isLetter(c) || isDecimal(c) { - c = s.getr() + for isLetter(s.ch) || isDecimal(s.ch) { + s.nextch() } // general case - if c >= utf8.RuneSelf { - for s.isIdentRune(c, false) { - c = s.getr() + if s.ch >= utf8.RuneSelf { + for s.atIdentChar(false) { + s.nextch() } } - s.ungetr() - - lit := s.stopLit() // possibly a keyword + lit := s.segment() if len(lit) >= 2 { if tok := keywordMap[hash(lit)]; tok != 0 && tokStrFast(tok) == string(lit) { s.nlsemi = contains(1<<_Break|1<<_Continue|1<<_Fallthrough|1<<_Return, tok) @@ -376,16 +387,16 @@ func tokStrFast(tok token) string { return _token_name[_token_index[tok-1]:_token_index[tok]] } -func (s *scanner) isIdentRune(c rune, first bool) bool { +func (s *scanner) atIdentChar(first bool) bool { switch { - case unicode.IsLetter(c) || c == '_': + case unicode.IsLetter(s.ch) || s.ch == '_': // ok - case unicode.IsDigit(c): + case unicode.IsDigit(s.ch): if first { - s.errorf("identifier cannot begin with digit %#U", c) + s.errorf("identifier cannot begin with digit %#U", s.ch) } - case c >= utf8.RuneSelf: - s.errorf("invalid identifier character %#U", c) + case s.ch >= utf8.RuneSelf: + s.errorf("invalid character %#U in identifier", s.ch) default: return false } @@ -411,46 +422,45 @@ func init() { } } -func lower(c rune) rune { return ('a' - 'A') | c } // returns lower-case c iff c is ASCII letter -func isDecimal(c rune) bool { return '0' <= c && c <= '9' } -func isHex(c rune) bool { return '0' <= c && c <= '9' || 'a' <= lower(c) && lower(c) <= 'f' } +func lower(ch rune) rune { return ('a' - 'A') | ch } // returns lower-case ch iff ch is ASCII letter +func isLetter(ch rune) bool { return 'a' <= lower(ch) && lower(ch) <= 'z' || ch == '_' } +func isDecimal(ch rune) bool { return '0' <= ch && ch <= '9' } +func isHex(ch rune) bool { return '0' <= ch && ch <= '9' || 'a' <= lower(ch) && lower(ch) <= 'f' } -// digits accepts the sequence { digit | '_' } starting with c0. +// digits accepts the sequence { digit | '_' }. // If base <= 10, digits accepts any decimal digit but records // the index (relative to the literal start) of a digit >= base // in *invalid, if *invalid < 0. -// digits returns the first rune that is not part of the sequence -// anymore, and a bitset describing whether the sequence contained +// digits returns a bitset describing whether the sequence contained // digits (bit 0 is set), or separators '_' (bit 1 is set). -func (s *scanner) digits(c0 rune, base int, invalid *int) (c rune, digsep int) { - c = c0 +func (s *scanner) digits(base int, invalid *int) (digsep int) { if base <= 10 { max := rune('0' + base) - for isDecimal(c) || c == '_' { + for isDecimal(s.ch) || s.ch == '_' { ds := 1 - if c == '_' { + if s.ch == '_' { ds = 2 - } else if c >= max && *invalid < 0 { - *invalid = int(s.col0 - s.col) // record invalid rune index + } else if s.ch >= max && *invalid < 0 { + _, col := s.pos() + *invalid = int(col - s.col) // record invalid rune index } digsep |= ds - c = s.getr() + s.nextch() } } else { - for isHex(c) || c == '_' { + for isHex(s.ch) || s.ch == '_' { ds := 1 - if c == '_' { + if s.ch == '_' { ds = 2 } digsep |= ds - c = s.getr() + s.nextch() } } return } -func (s *scanner) number(c rune) { - s.startLit() +func (s *scanner) number(seenPoint bool) { s.bad = false base := 10 // number base @@ -459,38 +469,39 @@ func (s *scanner) number(c rune) { invalid := -1 // index of invalid digit in literal, or < 0 // integer part - var ds int - if c != '.' { + if !seenPoint { s.kind = IntLit - if c == '0' { - c = s.getr() - switch lower(c) { + if s.ch == '0' { + s.nextch() + switch lower(s.ch) { case 'x': - c = s.getr() + s.nextch() base, prefix = 16, 'x' case 'o': - c = s.getr() + s.nextch() base, prefix = 8, 'o' case 'b': - c = s.getr() + s.nextch() base, prefix = 2, 'b' default: base, prefix = 8, '0' digsep = 1 // leading 0 } } - c, ds = s.digits(c, base, &invalid) - digsep |= ds + digsep |= s.digits(base, &invalid) + if s.ch == '.' { + if prefix == 'o' || prefix == 'b' { + s.errorf("invalid radix point in %s", litname(prefix)) + } + s.nextch() + seenPoint = true + } } // fractional part - if c == '.' { + if seenPoint { s.kind = FloatLit - if prefix == 'o' || prefix == 'b' { - s.errorf("invalid radix point in %s", litname(prefix)) - } - c, ds = s.digits(s.getr(), base, &invalid) - digsep |= ds + digsep |= s.digits(base, &invalid) } if digsep&1 == 0 && !s.bad { @@ -498,23 +509,22 @@ func (s *scanner) number(c rune) { } // exponent - if e := lower(c); e == 'e' || e == 'p' { + if e := lower(s.ch); e == 'e' || e == 'p' { if !s.bad { switch { case e == 'e' && prefix != 0 && prefix != '0': - s.errorf("%q exponent requires decimal mantissa", c) + s.errorf("%q exponent requires decimal mantissa", s.ch) case e == 'p' && prefix != 'x': - s.errorf("%q exponent requires hexadecimal mantissa", c) + s.errorf("%q exponent requires hexadecimal mantissa", s.ch) } } - c = s.getr() + s.nextch() s.kind = FloatLit - if c == '+' || c == '-' { - c = s.getr() + if s.ch == '+' || s.ch == '-' { + s.nextch() } - c, ds = s.digits(c, 10, nil) - digsep |= ds - if ds&1 == 0 && !s.bad { + digsep = s.digits(10, nil) | digsep&2 // don't lose sep bit + if digsep&1 == 0 && !s.bad { s.errorf("exponent has no digits") } } else if prefix == 'x' && s.kind == FloatLit && !s.bad { @@ -522,14 +532,13 @@ func (s *scanner) number(c rune) { } // suffix 'i' - if c == 'i' { + if s.ch == 'i' { s.kind = ImagLit - c = s.getr() + s.nextch() } - s.ungetr() s.nlsemi = true - s.lit = string(s.stopLit()) + s.lit = string(s.segment()) s.tok = _Literal if s.kind == IntLit && invalid >= 0 && !s.bad { @@ -596,199 +605,195 @@ func invalidSep(x string) int { } func (s *scanner) rune() { - s.startLit() s.bad = false + s.nextch() n := 0 for ; ; n++ { - r := s.getr() - if r == '\'' { + if s.ch == '\'' { + if !s.bad { + if n == 0 { + s.errorf("empty rune literal or unescaped '") + } else if n != 1 { + s.errorAtf(0, "more than one character in rune literal") + } + } + s.nextch() break } - if r == '\\' { + if s.ch == '\\' { + s.nextch() s.escape('\'') continue } - if r == '\n' { - s.ungetr() // assume newline is not part of literal + if s.ch == '\n' { if !s.bad { - s.errorf("newline in character literal") + s.errorf("newline in rune literal") } break } - if r < 0 { + if s.ch < 0 { if !s.bad { - s.errorAtf(0, "invalid character literal (missing closing ')") + s.errorAtf(0, "rune literal not terminated") } break } - } - - if !s.bad { - if n == 0 { - s.errorf("empty character literal or unescaped ' in character literal") - } else if n != 1 { - s.errorAtf(0, "invalid character literal (more than one character)") - } + s.nextch() } s.nlsemi = true - s.lit = string(s.stopLit()) + s.lit = string(s.segment()) s.kind = RuneLit s.tok = _Literal } func (s *scanner) stdString() { - s.startLit() s.bad = false + s.nextch() for { - r := s.getr() - if r == '"' { + if s.ch == '"' { + s.nextch() break } - if r == '\\' { + if s.ch == '\\' { + s.nextch() s.escape('"') continue } - if r == '\n' { - s.ungetr() // assume newline is not part of literal + if s.ch == '\n' { s.errorf("newline in string") break } - if r < 0 { + if s.ch < 0 { s.errorAtf(0, "string not terminated") break } + s.nextch() } s.nlsemi = true - s.lit = string(s.stopLit()) + s.lit = string(s.segment()) s.kind = StringLit s.tok = _Literal } func (s *scanner) rawString() { - s.startLit() s.bad = false + s.nextch() for { - r := s.getr() - if r == '`' { + if s.ch == '`' { + s.nextch() break } - if r < 0 { + if s.ch < 0 { s.errorAtf(0, "string not terminated") break } + s.nextch() } // We leave CRs in the string since they are part of the // literal (even though they are not part of the literal // value). s.nlsemi = true - s.lit = string(s.stopLit()) + s.lit = string(s.segment()) s.kind = StringLit s.tok = _Literal } func (s *scanner) comment(text string) { - s.errh(s.line, s.col, text) + s.errorAtf(0, "%s", text) } -func (s *scanner) skipLine(r rune) { - for r >= 0 { - if r == '\n' { - s.ungetr() // don't consume '\n' - needed for nlsemi logic - break - } - r = s.getr() +func (s *scanner) skipLine() { + // don't consume '\n' - needed for nlsemi logic + for s.ch >= 0 && s.ch != '\n' { + s.nextch() } } func (s *scanner) lineComment() { - r := s.getr() + // opening has already been consumed if s.mode&comments != 0 { - s.startLit() - s.skipLine(r) - s.comment("//" + string(s.stopLit())) + s.skipLine() + s.comment(string(s.segment())) return } // directives must start at the beginning of the line (s.col == colbase) - if s.mode&directives == 0 || s.col != colbase || (r != 'g' && r != 'l') { - s.skipLine(r) + if s.mode&directives == 0 || s.col != colbase || (s.ch != 'g' && s.ch != 'l') { + s.stop() + s.skipLine() return } // recognize go: or line directives prefix := "go:" - if r == 'l' { + if s.ch == 'l' { prefix = "line " } for _, m := range prefix { - if r != m { - s.skipLine(r) + if s.ch != m { + s.stop() + s.skipLine() return } - r = s.getr() + s.nextch() } // directive text - s.startLit() - s.skipLine(r) - s.comment("//" + prefix + string(s.stopLit())) + s.skipLine() + s.comment(string(s.segment())) } -func (s *scanner) skipComment(r rune) bool { - for r >= 0 { - for r == '*' { - r = s.getr() - if r == '/' { +func (s *scanner) skipComment() bool { + for s.ch >= 0 { + for s.ch == '*' { + s.nextch() + if s.ch == '/' { + s.nextch() return true } } - r = s.getr() + s.nextch() } s.errorAtf(0, "comment not terminated") return false } func (s *scanner) fullComment() { - r := s.getr() + /* opening has already been consumed */ if s.mode&comments != 0 { - s.startLit() - if s.skipComment(r) { - s.comment("/*" + string(s.stopLit())) - } else { - s.killLit() // not a complete comment - ignore + if s.skipComment() { + s.comment(string(s.segment())) } return } - if s.mode&directives == 0 || r != 'l' { - s.skipComment(r) + if s.mode&directives == 0 || s.ch != 'l' { + s.stop() + s.skipComment() return } // recognize line directive const prefix = "line " for _, m := range prefix { - if r != m { - s.skipComment(r) + if s.ch != m { + s.stop() + s.skipComment() return } - r = s.getr() + s.nextch() } // directive text - s.startLit() - if s.skipComment(r) { - s.comment("/*" + prefix + string(s.stopLit())) - } else { - s.killLit() // not a complete comment - ignore + if s.skipComment() { + s.comment(string(s.segment())) } } @@ -796,62 +801,59 @@ func (s *scanner) escape(quote rune) { var n int var base, max uint32 - c := s.getr() - switch c { - case 'a', 'b', 'f', 'n', 'r', 't', 'v', '\\', quote: + switch s.ch { + case quote, 'a', 'b', 'f', 'n', 'r', 't', 'v', '\\': + s.nextch() return case '0', '1', '2', '3', '4', '5', '6', '7': n, base, max = 3, 8, 255 case 'x': - c = s.getr() + s.nextch() n, base, max = 2, 16, 255 case 'u': - c = s.getr() + s.nextch() n, base, max = 4, 16, unicode.MaxRune case 'U': - c = s.getr() + s.nextch() n, base, max = 8, 16, unicode.MaxRune default: - if c < 0 { + if s.ch < 0 { return // complain in caller about EOF } - s.errorf("unknown escape sequence") + s.errorf("unknown escape") return } var x uint32 for i := n; i > 0; i-- { + if s.ch < 0 { + return // complain in caller about EOF + } d := base - switch { - case isDecimal(c): - d = uint32(c) - '0' - case 'a' <= lower(c) && lower(c) <= 'f': - d = uint32(lower(c)) - ('a' - 10) + if isDecimal(s.ch) { + d = uint32(s.ch) - '0' + } else if 'a' <= lower(s.ch) && lower(s.ch) <= 'f' { + d = uint32(lower(s.ch)) - 'a' + 10 } if d >= base { - if c < 0 { - return // complain in caller about EOF - } kind := "hex" if base == 8 { kind = "octal" } - s.errorf("non-%s character in escape sequence: %c", kind, c) - s.ungetr() + s.errorf("invalid character %q in %s escape", s.ch, kind) return } // d < base x = x*base + d - c = s.getr() + s.nextch() } - s.ungetr() if x > max && base == 8 { - s.errorf("octal escape value > 255: %d", x) + s.errorf("octal escape value %d > 255", x) return } if x > max || 0xD800 <= x && x < 0xE000 /* surrogate range */ { - s.errorf("escape sequence is invalid Unicode code point %#U", x) + s.errorf("escape is invalid Unicode code point %#U", x) } } diff --git a/src/cmd/compile/internal/syntax/scanner_test.go b/src/cmd/compile/internal/syntax/scanner_test.go index d76231a4af..78e470c45c 100644 --- a/src/cmd/compile/internal/syntax/scanner_test.go +++ b/src/cmd/compile/internal/syntax/scanner_test.go @@ -12,19 +12,59 @@ import ( "testing" ) +// errh is a default error handler for basic tests. +func errh(line, col uint, msg string) { + panic(fmt.Sprintf("%d:%d: %s", line, col, msg)) +} + +// Don't bother with other tests if TestSmoke doesn't pass. +func TestSmoke(t *testing.T) { + const src = "if (+foo\t+=..123/***/0.9_0e-0i'a'`raw`\"string\"..f;//$" + tokens := []token{_If, _Lparen, _Operator, _Name, _AssignOp, _Dot, _Literal, _Literal, _Literal, _Literal, _Literal, _Dot, _Dot, _Name, _Semi, _EOF} + + var got scanner + got.init(strings.NewReader(src), errh, 0) + for _, want := range tokens { + got.next() + if got.tok != want { + t.Errorf("%d:%d: got %s; want %s", got.line, got.col, got.tok, want) + continue + } + } +} + +// Once TestSmoke passes, run TestTokens next. +func TestTokens(t *testing.T) { + var got scanner + for _, want := range sampleTokens { + got.init(strings.NewReader(want.src), func(line, col uint, msg string) { + t.Errorf("%s:%d:%d: %s", want.src, line, col, msg) + }, 0) + got.next() + if got.tok != want.tok { + t.Errorf("%s: got %s; want %s", want.src, got.tok, want.tok) + continue + } + if (got.tok == _Name || got.tok == _Literal) && got.lit != want.src { + t.Errorf("%s: got %q; want %q", want.src, got.lit, want.src) + } + } +} + func TestScanner(t *testing.T) { if testing.Short() { t.Skip("skipping test in short mode") } - src, err := os.Open("parser.go") + filename := *src_ // can be changed via -src flag + src, err := os.Open(filename) if err != nil { t.Fatal(err) } defer src.Close() var s scanner - s.init(src, nil, 0) + s.init(src, errh, 0) for { s.next() if s.tok == _EOF { @@ -34,64 +74,66 @@ func TestScanner(t *testing.T) { continue } switch s.tok { - case _Name: - fmt.Println(s.line, s.tok, "=>", s.lit) + case _Name, _Literal: + fmt.Printf("%s:%d:%d: %s => %s\n", filename, s.line, s.col, s.tok, s.lit) case _Operator: - fmt.Println(s.line, s.tok, "=>", s.op, s.prec) + fmt.Printf("%s:%d:%d: %s => %s (prec = %d)\n", filename, s.line, s.col, s.tok, s.op, s.prec) default: - fmt.Println(s.line, s.tok) + fmt.Printf("%s:%d:%d: %s\n", filename, s.line, s.col, s.tok) } } } -func TestTokens(t *testing.T) { +func TestEmbeddedTokens(t *testing.T) { // make source var buf bytes.Buffer for i, s := range sampleTokens { - buf.WriteString("\t\t\t\t"[:i&3]) // leading indentation - buf.WriteString(s.src) // token - buf.WriteString(" "[:i&7]) // trailing spaces - fmt.Fprintf(&buf, "/*line foo:%d */ // bar\n", i+linebase) // comments (don't crash w/o directive handler) + buf.WriteString("\t\t\t\t"[:i&3]) // leading indentation + buf.WriteString(s.src) // token + buf.WriteString(" "[:i&7]) // trailing spaces + buf.WriteString(fmt.Sprintf("/*line foo:%d */ // bar\n", i)) // comments + newline (don't crash w/o directive handler) } // scan source var got scanner + var src string got.init(&buf, func(line, col uint, msg string) { - t.Fatalf("%d:%d: %s", line, col, msg) + t.Fatalf("%s:%d:%d: %s", src, line, col, msg) }, 0) got.next() for i, want := range sampleTokens { + src = want.src nlsemi := false - if got.line != uint(i+linebase) { - t.Errorf("got line %d; want %d", got.line, i+linebase) + if got.line-linebase != uint(i) { + t.Errorf("%s: got line %d; want %d", src, got.line-linebase, i) } if got.tok != want.tok { - t.Errorf("got tok = %s; want %s", got.tok, want.tok) + t.Errorf("%s: got tok %s; want %s", src, got.tok, want.tok) continue } switch want.tok { case _Semi: if got.lit != "semicolon" { - t.Errorf("got %s; want semicolon", got.lit) + t.Errorf("%s: got %s; want semicolon", src, got.lit) } case _Name, _Literal: if got.lit != want.src { - t.Errorf("got lit = %q; want %q", got.lit, want.src) + t.Errorf("%s: got lit %q; want %q", src, got.lit, want.src) continue } nlsemi = true case _Operator, _AssignOp, _IncOp: if got.op != want.op { - t.Errorf("got op = %s; want %s", got.op, want.op) + t.Errorf("%s: got op %s; want %s", src, got.op, want.op) continue } if got.prec != want.prec { - t.Errorf("got prec = %d; want %d", got.prec, want.prec) + t.Errorf("%s: got prec %d; want %d", src, got.prec, want.prec) continue } nlsemi = want.tok == _IncOp @@ -103,11 +145,11 @@ func TestTokens(t *testing.T) { if nlsemi { got.next() if got.tok != _Semi { - t.Errorf("got tok = %s; want ;", got.tok) + t.Errorf("%s: got tok %s; want ;", src, got.tok) continue } if got.lit != "newline" { - t.Errorf("got %s; want newline", got.lit) + t.Errorf("%s: got %s; want newline", src, got.lit) } } @@ -299,7 +341,7 @@ func TestComments(t *testing.T) { {"//", comment{0, 0, "//"}}, /*-style comments */ - {"/* regular comment */", comment{0, 0, "/* regular comment */"}}, + {"123/* regular comment */", comment{0, 3, "/* regular comment */"}}, {"package p /* regular comment", comment{0, 0, ""}}, {"\n\n\n/*\n*//* want this one */", comment{4, 2, "/* want this one */"}}, {"\n\n/**/", comment{2, 0, "/**/"}}, @@ -307,17 +349,16 @@ func TestComments(t *testing.T) { } { var s scanner var got comment - s.init(strings.NewReader(test.src), - func(line, col uint, msg string) { - if msg[0] != '/' { - // error - if msg != "comment not terminated" { - t.Errorf("%q: %s", test.src, msg) - } - return + s.init(strings.NewReader(test.src), func(line, col uint, msg string) { + if msg[0] != '/' { + // error + if msg != "comment not terminated" { + t.Errorf("%q: %s", test.src, msg) } - got = comment{line - linebase, col - colbase, msg} // keep last one - }, comments) + return + } + got = comment{line - linebase, col - colbase, msg} // keep last one + }, comments) for { s.next() @@ -542,7 +583,7 @@ func TestNumbers(t *testing.T) { func TestScanErrors(t *testing.T) { for _, test := range []struct { - src, msg string + src, err string line, col uint // 0-based }{ // Note: Positions for lexical errors are the earliest position @@ -555,10 +596,10 @@ func TestScanErrors(t *testing.T) { {"foo\n\n\xff ", "invalid UTF-8 encoding", 2, 0}, // token-level errors - {"\u00BD" /* ½ */, "invalid identifier character U+00BD '½'", 0, 0}, - {"\U0001d736\U0001d737\U0001d738_½" /* 𝜶𝜷𝜸_½ */, "invalid identifier character U+00BD '½'", 0, 13 /* byte offset */}, + {"\u00BD" /* ½ */, "invalid character U+00BD '½' in identifier", 0, 0}, + {"\U0001d736\U0001d737\U0001d738_½" /* 𝜶𝜷𝜸_½ */, "invalid character U+00BD '½' in identifier", 0, 13 /* byte offset */}, {"\U0001d7d8" /* 𝟘 */, "identifier cannot begin with digit U+1D7D8 '𝟘'", 0, 0}, - {"foo\U0001d7d8_½" /* foo𝟘_½ */, "invalid identifier character U+00BD '½'", 0, 8 /* byte offset */}, + {"foo\U0001d7d8_½" /* foo𝟘_½ */, "invalid character U+00BD '½' in identifier", 0, 8 /* byte offset */}, {"x + ~y", "invalid character U+007E '~'", 0, 4}, {"foo$bar = 0", "invalid character U+0024 '$'", 0, 3}, @@ -567,22 +608,22 @@ func TestScanErrors(t *testing.T) { {"0123456789e0 /*\nfoobar", "comment not terminated", 0, 13}, // valid float constant {"var a, b = 09, 07\n", "invalid digit '9' in octal literal", 0, 12}, - {`''`, "empty character literal or unescaped ' in character literal", 0, 1}, - {"'\n", "newline in character literal", 0, 1}, - {`'\`, "invalid character literal (missing closing ')", 0, 0}, - {`'\'`, "invalid character literal (missing closing ')", 0, 0}, - {`'\x`, "invalid character literal (missing closing ')", 0, 0}, - {`'\x'`, "non-hex character in escape sequence: '", 0, 3}, - {`'\y'`, "unknown escape sequence", 0, 2}, - {`'\x0'`, "non-hex character in escape sequence: '", 0, 4}, - {`'\00'`, "non-octal character in escape sequence: '", 0, 4}, + {`''`, "empty rune literal or unescaped '", 0, 1}, + {"'\n", "newline in rune literal", 0, 1}, + {`'\`, "rune literal not terminated", 0, 0}, + {`'\'`, "rune literal not terminated", 0, 0}, + {`'\x`, "rune literal not terminated", 0, 0}, + {`'\x'`, "invalid character '\\'' in hex escape", 0, 3}, + {`'\y'`, "unknown escape", 0, 2}, + {`'\x0'`, "invalid character '\\'' in hex escape", 0, 4}, + {`'\00'`, "invalid character '\\'' in octal escape", 0, 4}, {`'\377' /*`, "comment not terminated", 0, 7}, // valid octal escape - {`'\378`, "non-octal character in escape sequence: 8", 0, 4}, - {`'\400'`, "octal escape value > 255: 256", 0, 5}, - {`'xx`, "invalid character literal (missing closing ')", 0, 0}, - {`'xx'`, "invalid character literal (more than one character)", 0, 0}, + {`'\378`, "invalid character '8' in octal escape", 0, 4}, + {`'\400'`, "octal escape value 256 > 255", 0, 5}, + {`'xx`, "rune literal not terminated", 0, 0}, + {`'xx'`, "more than one character in rune literal", 0, 0}, - {"\"\n", "newline in string", 0, 1}, + {"\n \"foo\n", "newline in string", 1, 7}, {`"`, "string not terminated", 0, 0}, {`"foo`, "string not terminated", 0, 0}, {"`", "string not terminated", 0, 0}, @@ -592,42 +633,34 @@ func TestScanErrors(t *testing.T) { {`"\`, "string not terminated", 0, 0}, {`"\"`, "string not terminated", 0, 0}, {`"\x`, "string not terminated", 0, 0}, - {`"\x"`, "non-hex character in escape sequence: \"", 0, 3}, - {`"\y"`, "unknown escape sequence", 0, 2}, - {`"\x0"`, "non-hex character in escape sequence: \"", 0, 4}, - {`"\00"`, "non-octal character in escape sequence: \"", 0, 4}, + {`"\x"`, "invalid character '\"' in hex escape", 0, 3}, + {`"\y"`, "unknown escape", 0, 2}, + {`"\x0"`, "invalid character '\"' in hex escape", 0, 4}, + {`"\00"`, "invalid character '\"' in octal escape", 0, 4}, {`"\377" /*`, "comment not terminated", 0, 7}, // valid octal escape - {`"\378"`, "non-octal character in escape sequence: 8", 0, 4}, - {`"\400"`, "octal escape value > 255: 256", 0, 5}, + {`"\378"`, "invalid character '8' in octal escape", 0, 4}, + {`"\400"`, "octal escape value 256 > 255", 0, 5}, - {`s := "foo\z"`, "unknown escape sequence", 0, 10}, - {`s := "foo\z00\nbar"`, "unknown escape sequence", 0, 10}, + {`s := "foo\z"`, "unknown escape", 0, 10}, + {`s := "foo\z00\nbar"`, "unknown escape", 0, 10}, {`"\x`, "string not terminated", 0, 0}, - {`"\x"`, "non-hex character in escape sequence: \"", 0, 3}, - {`var s string = "\x"`, "non-hex character in escape sequence: \"", 0, 18}, - {`return "\Uffffffff"`, "escape sequence is invalid Unicode code point U+FFFFFFFF", 0, 18}, + {`"\x"`, "invalid character '\"' in hex escape", 0, 3}, + {`var s string = "\x"`, "invalid character '\"' in hex escape", 0, 18}, + {`return "\Uffffffff"`, "escape is invalid Unicode code point U+FFFFFFFF", 0, 18}, + + {"0b.0", "invalid radix point in binary literal", 0, 2}, + {"0x.p0\n", "hexadecimal literal has no digits", 0, 3}, // former problem cases {"package p\n\n\xef", "invalid UTF-8 encoding", 2, 0}, } { var s scanner - nerrors := 0 - s.init(strings.NewReader(test.src), func(line, col uint, msg string) { - nerrors++ - // only check the first error - if nerrors == 1 { - if msg != test.msg { - t.Errorf("%q: got msg = %q; want %q", test.src, msg, test.msg) - } - if line != test.line+linebase { - t.Errorf("%q: got line = %d; want %d", test.src, line, test.line+linebase) - } - if col != test.col+colbase { - t.Errorf("%q: got col = %d; want %d", test.src, col, test.col+colbase) - } - } else if nerrors > 1 { - // TODO(gri) make this use position info - t.Errorf("%q: got unexpected %q at line = %d", test.src, msg, line) + var line, col uint + var err string + s.init(strings.NewReader(test.src), func(l, c uint, msg string) { + if err == "" { + line, col = l-linebase, c-colbase + err = msg } }, 0) @@ -638,8 +671,18 @@ func TestScanErrors(t *testing.T) { } } - if nerrors == 0 { - t.Errorf("%q: got no error; want %q", test.src, test.msg) + if err != "" { + if err != test.err { + t.Errorf("%q: got err = %q; want %q", test.src, err, test.err) + } + if line != test.line { + t.Errorf("%q: got line = %d; want %d", test.src, line, test.line) + } + if col != test.col { + t.Errorf("%q: got col = %d; want %d", test.src, col, test.col) + } + } else { + t.Errorf("%q: got no error; want %q", test.src, test.err) } } } @@ -648,7 +691,7 @@ func TestIssue21938(t *testing.T) { s := "/*" + strings.Repeat(" ", 4089) + "*/ .5" var got scanner - got.init(strings.NewReader(s), nil, 0) + got.init(strings.NewReader(s), errh, 0) got.next() if got.tok != _Literal || got.lit != ".5" { diff --git a/src/cmd/compile/internal/syntax/source.go b/src/cmd/compile/internal/syntax/source.go index c671e3c11e..01b592152b 100644 --- a/src/cmd/compile/internal/syntax/source.go +++ b/src/cmd/compile/internal/syntax/source.go @@ -3,11 +3,10 @@ // license that can be found in the LICENSE file. // This file implements source, a buffered rune reader -// which is specialized for the needs of the Go scanner: -// Contiguous sequences of runes (literals) are extracted -// directly as []byte without the need to re-encode the -// runes in UTF-8 (as would be necessary with bufio.Reader). -// +// specialized for scanning Go code: Reading +// ASCII characters, maintaining current (line, col) +// position information, and recording of the most +// recently read source segment are highly optimized. // This file is self-contained (go tool compile source.go // compiles) and thus could be made into its own package. @@ -18,202 +17,202 @@ import ( "unicode/utf8" ) +// The source buffer is accessed using three indices b (begin), +// r (read), and e (end): +// +// - If b >= 0, it points to the beginning of a segment of most +// recently read characters (typically a Go literal). +// +// - r points to the byte immediately following the most recently +// read character ch, which starts at r-chw. +// +// - e points to the byte immediately following the last byte that +// was read into the buffer. +// +// The buffer content is terminated at buf[e] with the sentinel +// character utf8.RuneSelf. This makes it possible to test for +// the common case of ASCII characters with a single 'if' (see +// nextch method). +// +// +------ content in use -------+ +// v v +// buf [...read...|...segment...|ch|...unread...|s|...free...] +// ^ ^ ^ ^ +// | | | | +// b r-chw r e +// +// Invariant: -1 <= b < r <= e < len(buf) && buf[e] == sentinel + +type source struct { + in io.Reader + errh func(line, col uint, msg string) + + buf []byte // source buffer + ioerr error // pending I/O error, or nil + b, r, e int // buffer indices (see comment above) + line, col uint // source position of ch (0-based) + ch rune // most recently read character + chw int // width of ch +} + +const sentinel = utf8.RuneSelf + +func (s *source) init(in io.Reader, errh func(line, col uint, msg string)) { + s.in = in + s.errh = errh + + if s.buf == nil { + s.buf = make([]byte, nextSize(0)) + } + s.buf[0] = sentinel + s.ioerr = nil + s.b, s.r, s.e = -1, 0, 0 + s.line, s.col = 0, 0 + s.ch = ' ' + s.chw = 0 +} + // starting points for line and column numbers const linebase = 1 const colbase = 1 -// max. number of bytes to unread -const maxunread = 10 - -// buf [...read...|...|...unread...|s|...free...] -// ^ ^ ^ ^ -// | | | | -// suf r0 r w - -type source struct { - src io.Reader - errh func(line, pos uint, msg string) - - // source buffer - buf [4 << 10]byte - r0, r, w int // previous/current read and write buf positions, excluding sentinel - line0, line uint // previous/current line - col0, col uint // previous/current column (byte offsets from line start) - ioerr error // pending io error - - // literal buffer - lit []byte // literal prefix - suf int // literal suffix; suf >= 0 means we are scanning a literal -} - -// init initializes source to read from src and to report errors via errh. -// errh must not be nil. -func (s *source) init(src io.Reader, errh func(line, pos uint, msg string)) { - s.src = src - s.errh = errh - - s.buf[0] = utf8.RuneSelf // terminate with sentinel - s.r0, s.r, s.w = 0, 0, 0 - s.line0, s.line = 0, linebase - s.col0, s.col = 0, colbase - s.ioerr = nil - - s.lit = s.lit[:0] - s.suf = -1 -} - -// ungetr sets the reading position to a previous reading -// position, usually the one of the most recently read -// rune, but possibly earlier (see unread below). -func (s *source) ungetr() { - s.r, s.line, s.col = s.r0, s.line0, s.col0 -} - -// unread moves the previous reading position to a position -// that is n bytes earlier in the source. The next ungetr -// call will set the reading position to that moved position. -// The "unread" runes must be single byte and not contain any -// newlines; and 0 <= n <= maxunread must hold. -func (s *source) unread(n int) { - s.r0 -= n - s.col0 -= uint(n) +// pos returns the (line, col) source position of s.ch. +func (s *source) pos() (line, col uint) { + return linebase + s.line, colbase + s.col } +// error reports the error msg at source position s.pos(). func (s *source) error(msg string) { - s.errh(s.line0, s.col0, msg) + line, col := s.pos() + s.errh(line, col, msg) } -// getr reads and returns the next rune. -// -// If a read or source encoding error occurs, getr -// calls the error handler installed with init. -// The handler must exist. -// -// The (line, col) position passed to the error handler -// is always at the current source reading position. -func (s *source) getr() rune { +// start starts a new active source segment (including s.ch). +// As long as stop has not been called, the active segment's +// bytes (excluding s.ch) may be retrieved by calling segment. +func (s *source) start() { s.b = s.r - s.chw } +func (s *source) stop() { s.b = -1 } +func (s *source) segment() []byte { return s.buf[s.b : s.r-s.chw] } + +// rewind rewinds the scanner's read position and character s.ch +// to the start of the currently active segment, which must not +// contain any newlines (otherwise position information will be +// incorrect). Currently, rewind is only needed for handling the +// source sequence ".."; it must not be called outside an active +// segment. +func (s *source) rewind() { + // ok to verify precondition - rewind is rarely called + if s.b < 0 { + panic("no active segment") + } + s.col -= uint(s.r - s.b) + s.r = s.b + s.nextch() +} + +func (s *source) nextch() { redo: - s.r0, s.line0, s.col0 = s.r, s.line, s.col - - // We could avoid at least one test that is always taken in the - // for loop below by duplicating the common case code (ASCII) - // here since we always have at least the sentinel (utf8.RuneSelf) - // in the buffer. Measure and optimize if necessary. - - // make sure we have at least one rune in buffer, or we are at EOF - for s.r+utf8.UTFMax > s.w && !utf8.FullRune(s.buf[s.r:s.w]) && s.ioerr == nil && s.w-s.r < len(s.buf) { - s.fill() // s.w-s.r < len(s.buf) => buffer is not full + s.col += uint(s.chw) + if s.ch == '\n' { + s.line++ + s.col = 0 } - // common case: ASCII and enough bytes - // (invariant: s.buf[s.w] == utf8.RuneSelf) - if b := s.buf[s.r]; b < utf8.RuneSelf { + // fast common case: at least one ASCII character + if s.ch = rune(s.buf[s.r]); s.ch < sentinel { s.r++ - // TODO(gri) Optimization: Instead of adjusting s.col for each character, - // remember the line offset instead and then compute the offset as needed - // (which is less often). - s.col++ - if b == 0 { + s.chw = 1 + if s.ch == 0 { s.error("invalid NUL character") goto redo } - if b == '\n' { - s.line++ - s.col = colbase - } - return rune(b) + return + } + + // slower general case: add more bytes to buffer if we don't have a full rune + for s.e-s.r < utf8.UTFMax && !utf8.FullRune(s.buf[s.r:s.e]) && s.ioerr == nil { + s.fill() } // EOF - if s.r == s.w { + if s.r == s.e { if s.ioerr != io.EOF { // ensure we never start with a '/' (e.g., rooted path) in the error message s.error("I/O error: " + s.ioerr.Error()) + s.ioerr = nil } - return -1 + s.ch = -1 + s.chw = 0 + return } - // uncommon case: not ASCII - r, w := utf8.DecodeRune(s.buf[s.r:s.w]) - s.r += w - s.col += uint(w) + s.ch, s.chw = utf8.DecodeRune(s.buf[s.r:s.e]) + s.r += s.chw - if r == utf8.RuneError && w == 1 { + if s.ch == utf8.RuneError && s.chw == 1 { s.error("invalid UTF-8 encoding") goto redo } // BOM's are only allowed as the first character in a file const BOM = 0xfeff - if r == BOM { - if s.r0 > 0 { // s.r0 is always > 0 after 1st character (fill will set it to maxunread) + if s.ch == BOM { + if s.line > 0 || s.col > 0 { s.error("invalid BOM in the middle of the file") } goto redo } - - return r } +// fill reads more source bytes into s.buf. +// It returns with at least one more byte in the buffer, or with s.ioerr != nil. func (s *source) fill() { - // Slide unread bytes to beginning but preserve last read char - // (for one ungetr call) plus maxunread extra bytes (for one - // unread call). - if s.r0 > maxunread { - n := s.r0 - maxunread // number of bytes to slide down - // save literal prefix, if any - // (make sure we keep maxunread bytes and the last - // read char in the buffer) - if s.suf >= 0 { - // we have a literal - if s.suf < n { - // save literal prefix - s.lit = append(s.lit, s.buf[s.suf:n]...) - s.suf = 0 - } else { - s.suf -= n - } - } - copy(s.buf[:], s.buf[n:s.w]) - s.r0 = maxunread // eqv: s.r0 -= n - s.r -= n - s.w -= n + // determine content to preserve + b := s.r + if s.b >= 0 { + b = s.b + s.b = 0 // after buffer has grown or content has been moved down } + content := s.buf[b:s.e] + + // grow buffer or move content down + if len(content)*2 > len(s.buf) { + s.buf = make([]byte, nextSize(len(s.buf))) + copy(s.buf, content) + } else if b > 0 { + copy(s.buf, content) + } + s.r -= b + s.e -= b // read more data: try a limited number of times - for i := 100; i > 0; i-- { - n, err := s.src.Read(s.buf[s.w : len(s.buf)-1]) // -1 to leave space for sentinel + for i := 0; i < 10; i++ { + var n int + n, s.ioerr = s.in.Read(s.buf[s.e : len(s.buf)-1]) // -1 to leave space for sentinel if n < 0 { panic("negative read") // incorrect underlying io.Reader implementation } - s.w += n - if n > 0 || err != nil { - s.buf[s.w] = utf8.RuneSelf // sentinel - if err != nil { - s.ioerr = err - } + if n > 0 || s.ioerr != nil { + s.e += n + s.buf[s.e] = sentinel return } + // n == 0 } - s.buf[s.w] = utf8.RuneSelf // sentinel + s.buf[s.e] = sentinel s.ioerr = io.ErrNoProgress } -func (s *source) startLit() { - s.suf = s.r0 - s.lit = s.lit[:0] // reuse lit -} - -func (s *source) stopLit() []byte { - lit := s.buf[s.suf:s.r] - if len(s.lit) > 0 { - lit = append(s.lit, lit...) +// nextSize returns the next bigger size for a buffer of a given size. +func nextSize(size int) int { + const min = 4 << 10 // 4K: minimum buffer size + const max = 1 << 20 // 1M: maximum buffer size which is still doubled + if size < min { + return min } - s.killLit() - return lit -} - -func (s *source) killLit() { - s.suf = -1 // no pending literal + if size <= max { + return size << 1 + } + return size + max } diff --git a/src/cmd/compile/internal/x86/387.go b/src/cmd/compile/internal/x86/387.go index 18838fb4ca..796aa82f19 100644 --- a/src/cmd/compile/internal/x86/387.go +++ b/src/cmd/compile/internal/x86/387.go @@ -326,7 +326,7 @@ func push(s *gc.SSAGenState, v *ssa.Value) { } // popAndSave pops a value off of the floating-point stack and stores -// it in the reigster assigned to v. +// it in the register assigned to v. func popAndSave(s *gc.SSAGenState, v *ssa.Value) { r := v.Reg() if _, ok := s.SSEto387[r]; ok { diff --git a/src/cmd/dist/test.go b/src/cmd/dist/test.go index ca617e917e..48c36a63fc 100644 --- a/src/cmd/dist/test.go +++ b/src/cmd/dist/test.go @@ -941,6 +941,8 @@ func (t *tester) internalLinkPIE() bool { case "linux-amd64", "linux-arm64", "android-arm64": return true + case "windows-amd64", "windows-386", "windows-arm": + return true } return false } @@ -997,6 +999,8 @@ func (t *tester) supportedBuildmode(mode string) bool { return true case "darwin-amd64": return true + case "windows-amd64", "windows-386", "windows-arm": + return true } return false diff --git a/src/cmd/doc/doc_test.go b/src/cmd/doc/doc_test.go index c0959acca1..fd2ae30827 100644 --- a/src/cmd/doc/doc_test.go +++ b/src/cmd/doc/doc_test.go @@ -724,6 +724,40 @@ var tests = []test{ }, }, + // Merging comments with -src. + { + "merge comments with -src A", + []string{"-src", p + "/merge", `A`}, + []string{ + `A doc`, + `func A`, + `A comment`, + }, + []string{ + `Package A doc`, + `Package B doc`, + `B doc`, + `B comment`, + `B doc`, + }, + }, + { + "merge comments with -src B", + []string{"-src", p + "/merge", `B`}, + []string{ + `B doc`, + `func B`, + `B comment`, + }, + []string{ + `Package A doc`, + `Package B doc`, + `A doc`, + `A comment`, + `A doc`, + }, + }, + // No dups with -u. Issue 21797. { "case matching on, no dups", diff --git a/src/cmd/doc/testdata/merge/aa.go b/src/cmd/doc/testdata/merge/aa.go new file mode 100644 index 0000000000..f8ab92dfd0 --- /dev/null +++ b/src/cmd/doc/testdata/merge/aa.go @@ -0,0 +1,7 @@ +// Package comment A. +package merge + +// A doc. +func A() { + // A comment. +} diff --git a/src/cmd/doc/testdata/merge/bb.go b/src/cmd/doc/testdata/merge/bb.go new file mode 100644 index 0000000000..fd8cf3c446 --- /dev/null +++ b/src/cmd/doc/testdata/merge/bb.go @@ -0,0 +1,7 @@ +// Package comment B. +package merge + +// B doc. +func B() { + // B comment. +} diff --git a/src/cmd/go/go_test.go b/src/cmd/go/go_test.go index 4d5136deea..a5b0f0898b 100644 --- a/src/cmd/go/go_test.go +++ b/src/cmd/go/go_test.go @@ -9,6 +9,7 @@ import ( "context" "debug/elf" "debug/macho" + "debug/pe" "flag" "fmt" "go/format" @@ -2146,19 +2147,37 @@ func TestBuildmodePIE(t *testing.T) { switch platform { case "linux/386", "linux/amd64", "linux/arm", "linux/arm64", "linux/ppc64le", "linux/s390x", "android/amd64", "android/arm", "android/arm64", "android/386", - "freebsd/amd64": + "freebsd/amd64", + "windows/386", "windows/amd64", "windows/arm": case "darwin/amd64": default: t.Skipf("skipping test because buildmode=pie is not supported on %s", platform) } + t.Run("non-cgo", func(t *testing.T) { + testBuildmodePIE(t, false) + }) + if canCgo { + switch runtime.GOOS { + case "darwin", "freebsd", "linux", "windows": + t.Run("cgo", func(t *testing.T) { + testBuildmodePIE(t, true) + }) + } + } +} +func testBuildmodePIE(t *testing.T, useCgo bool) { tg := testgo(t) defer tg.cleanup() tg.parallel() - tg.tempFile("main.go", `package main; func main() { print("hello") }`) + var s string + if useCgo { + s = `import "C";` + } + tg.tempFile("main.go", fmt.Sprintf(`package main;%s func main() { print("hello") }`, s)) src := tg.path("main.go") - obj := tg.path("main") + obj := tg.path("main.exe") tg.run("build", "-buildmode=pie", "-o", obj, src) switch runtime.GOOS { @@ -2183,6 +2202,38 @@ func TestBuildmodePIE(t *testing.T) { if f.Flags&macho.FlagPIE == 0 { t.Error("PIE must have PIE flag, but not") } + case "windows": + f, err := pe.Open(obj) + if err != nil { + t.Fatal(err) + } + defer f.Close() + const ( + IMAGE_FILE_RELOCS_STRIPPED = 0x0001 + IMAGE_DLLCHARACTERISTICS_HIGH_ENTROPY_VA = 0x0020 + IMAGE_DLLCHARACTERISTICS_DYNAMIC_BASE = 0x0040 + ) + if f.Section(".reloc") == nil { + t.Error(".reloc section is not present") + } + if (f.FileHeader.Characteristics & IMAGE_FILE_RELOCS_STRIPPED) != 0 { + t.Error("IMAGE_FILE_RELOCS_STRIPPED flag is set") + } + var dc uint16 + switch oh := f.OptionalHeader.(type) { + case *pe.OptionalHeader32: + dc = oh.DllCharacteristics + case *pe.OptionalHeader64: + dc = oh.DllCharacteristics + if (dc & IMAGE_DLLCHARACTERISTICS_HIGH_ENTROPY_VA) == 0 { + t.Error("IMAGE_DLLCHARACTERISTICS_HIGH_ENTROPY_VA flag is not set") + } + default: + t.Fatalf("unexpected optional header type of %T", f.OptionalHeader) + } + if (dc & IMAGE_DLLCHARACTERISTICS_DYNAMIC_BASE) == 0 { + t.Error("IMAGE_DLLCHARACTERISTICS_DYNAMIC_BASE flag is not set") + } default: panic("unreachable") } @@ -2380,30 +2431,6 @@ func TestTestCache(t *testing.T) { tg.setenv("GOPATH", tg.tempdir) tg.setenv("GOCACHE", tg.path("cache")) - if runtime.Compiler != "gccgo" { - // timeout here should not affect result being cached - // or being retrieved later. - tg.run("test", "-x", "-timeout=10s", "errors") - tg.grepStderr(`[\\/]compile|gccgo`, "did not run compiler") - tg.grepStderr(`[\\/]link|gccgo`, "did not run linker") - tg.grepStderr(`errors\.test`, "did not run test") - - tg.run("test", "-x", "errors") - tg.grepStdout(`ok \terrors\t\(cached\)`, "did not report cached result") - tg.grepStderrNot(`[\\/]compile|gccgo`, "incorrectly ran compiler") - tg.grepStderrNot(`[\\/]link|gccgo`, "incorrectly ran linker") - tg.grepStderrNot(`errors\.test`, "incorrectly ran test") - tg.grepStderrNot("DO NOT USE", "poisoned action status leaked") - - // Even very low timeouts do not disqualify cached entries. - tg.run("test", "-timeout=1ns", "-x", "errors") - tg.grepStderrNot(`errors\.test`, "incorrectly ran test") - - tg.run("clean", "-testcache") - tg.run("test", "-x", "errors") - tg.grepStderr(`errors\.test`, "did not run test") - } - // The -p=1 in the commands below just makes the -x output easier to read. t.Log("\n\nINITIAL\n\n") diff --git a/src/cmd/go/internal/get/get.go b/src/cmd/go/internal/get/get.go index b048eafa74..f7b2fa96e8 100644 --- a/src/cmd/go/internal/get/get.go +++ b/src/cmd/go/internal/get/get.go @@ -286,11 +286,12 @@ func download(arg string, parent *load.Package, stk *load.ImportStack, mode int) if wildcardOkay && strings.Contains(arg, "...") { match := search.NewMatch(arg) if match.IsLocal() { - match.MatchPackagesInFS() + match.MatchDirs() + args = match.Dirs } else { match.MatchPackages() + args = match.Pkgs } - args = match.Pkgs for _, err := range match.Errs { base.Errorf("%s", err) } diff --git a/src/cmd/go/internal/modload/load.go b/src/cmd/go/internal/modload/load.go index 32841d96cb..6ea7d8c69b 100644 --- a/src/cmd/go/internal/modload/load.go +++ b/src/cmd/go/internal/modload/load.go @@ -65,24 +65,13 @@ func ImportPaths(patterns []string) []*search.Match { // packages. The build tags should typically be imports.Tags() or // imports.AnyTags(); a nil map has no special meaning. func ImportPathsQuiet(patterns []string, tags map[string]bool) []*search.Match { - var fsDirs [][]string updateMatches := func(matches []*search.Match, iterating bool) { - for i, m := range matches { + for _, m := range matches { switch { case m.IsLocal(): // Evaluate list of file system directories on first iteration. - if fsDirs == nil { - fsDirs = make([][]string, len(matches)) - } - if fsDirs[i] == nil { - if m.IsLiteral() { - fsDirs[i] = []string{m.Pattern()} - } else { - m.MatchPackagesInFS() - // Pull out the matching directories: we are going to resolve them - // to package paths below. - fsDirs[i], m.Pkgs = m.Pkgs, nil - } + if m.Dirs == nil { + matchLocalDirs(m) } // Make a copy of the directory list and translate to import paths. @@ -91,10 +80,9 @@ func ImportPathsQuiet(patterns []string, tags map[string]bool) []*search.Match { // from not being in the build list to being in it and back as // the exact version of a particular module increases during // the loader iterations. - pkgs := str.StringList(fsDirs[i]) - m.Pkgs = pkgs[:0] - for _, pkg := range pkgs { - pkg, err := resolveLocalPackage(pkg) + m.Pkgs = m.Pkgs[:0] + for _, dir := range m.Dirs { + pkg, err := resolveLocalPackage(dir) if err != nil { if !m.IsLiteral() && (err == errPkgIsBuiltin || err == errPkgIsGorootSrc) { continue // Don't include "builtin" or GOROOT/src in wildcard patterns. @@ -131,7 +119,7 @@ func ImportPathsQuiet(patterns []string, tags map[string]bool) []*search.Match { } case m.Pattern() == "std" || m.Pattern() == "cmd": - if len(m.Pkgs) == 0 { + if m.Pkgs == nil { m.MatchPackages() // Locate the packages within GOROOT/src. } @@ -186,6 +174,34 @@ func checkMultiplePaths() { base.ExitIfErrors() } +// matchLocalDirs is like m.MatchDirs, but tries to avoid scanning directories +// outside of the standard library and active modules. +func matchLocalDirs(m *search.Match) { + if !m.IsLocal() { + panic(fmt.Sprintf("internal error: resolveLocalDirs on non-local pattern %s", m.Pattern())) + } + + if i := strings.Index(m.Pattern(), "..."); i >= 0 { + // The pattern is local, but it is a wildcard. Its packages will + // only resolve to paths if they are inside of the standard + // library, the main module, or some dependency of the main + // module. Verify that before we walk the filesystem: a filesystem + // walk in a directory like /var or /etc can be very expensive! + dir := filepath.Dir(filepath.Clean(m.Pattern()[:i+3])) + absDir := dir + if !filepath.IsAbs(dir) { + absDir = filepath.Join(base.Cwd, dir) + } + if search.InDir(absDir, cfg.GOROOTsrc) == "" && search.InDir(absDir, ModRoot()) == "" && pathInModuleCache(absDir) == "" { + m.Dirs = []string{} + m.AddError(fmt.Errorf("directory prefix %s outside available modules", base.ShortPath(absDir))) + return + } + } + + m.MatchDirs() +} + // resolveLocalPackage resolves a filesystem path to a package path. func resolveLocalPackage(dir string) (string, error) { var absDir string @@ -269,7 +285,11 @@ func resolveLocalPackage(dir string) (string, error) { } if sub := search.InDir(absDir, cfg.GOROOTsrc); sub != "" && sub != "." && !strings.Contains(sub, "@") { - return filepath.ToSlash(sub), nil + pkg := filepath.ToSlash(sub) + if pkg == "builtin" { + return "", errPkgIsBuiltin + } + return pkg, nil } pkg := pathInModuleCache(absDir) diff --git a/src/cmd/go/internal/renameio/renameio_test.go b/src/cmd/go/internal/renameio/renameio_test.go index ee2f3ba1bb..df8ddabdb8 100644 --- a/src/cmd/go/internal/renameio/renameio_test.go +++ b/src/cmd/go/internal/renameio/renameio_test.go @@ -9,11 +9,13 @@ package renameio import ( "encoding/binary" "errors" + "internal/testenv" "io/ioutil" "math/rand" "os" "path/filepath" "runtime" + "strings" "sync" "sync/atomic" "syscall" @@ -24,6 +26,10 @@ import ( ) func TestConcurrentReadsAndWrites(t *testing.T) { + if runtime.GOOS == "darwin" && strings.HasSuffix(testenv.Builder(), "-10_14") { + testenv.SkipFlaky(t, 33041) + } + dir, err := ioutil.TempDir("", "renameio") if err != nil { t.Fatal(err) diff --git a/src/cmd/go/internal/search/search.go b/src/cmd/go/internal/search/search.go index 69d0e2d16f..b588c3e467 100644 --- a/src/cmd/go/internal/search/search.go +++ b/src/cmd/go/internal/search/search.go @@ -19,7 +19,8 @@ import ( // A Match represents the result of matching a single package pattern. type Match struct { pattern string // the pattern itself - Pkgs []string // matching packages (dirs or import paths) + Dirs []string // if the pattern is local, directories that potentially contain matching packages + Pkgs []string // matching packages (import paths) Errs []error // errors matching the patterns to packages, NOT errors loading those packages // Errs may be non-empty even if len(Pkgs) > 0, indicating that some matching @@ -84,20 +85,25 @@ func (e *MatchError) Unwrap() error { return e.Err } -// MatchPackages sets m.Pkgs to contain all the packages that can be found -// under the $GOPATH directories and $GOROOT matching pattern. -// The pattern is either "all" (all packages), "std" (standard packages), -// "cmd" (standard commands), or a path including "...". +// MatchPackages sets m.Pkgs to a non-nil slice containing all the packages that +// can be found under the $GOPATH directories and $GOROOT that match the +// pattern. The pattern must be either "all" (all packages), "std" (standard +// packages), "cmd" (standard commands), or a path including "...". // -// MatchPackages sets m.Errs to contain any errors encountered while processing -// the match. +// If any errors may have caused the set of packages to be incomplete, +// MatchPackages appends those errors to m.Errs. func (m *Match) MatchPackages() { - m.Pkgs, m.Errs = nil, nil + m.Pkgs = []string{} if m.IsLocal() { m.AddError(fmt.Errorf("internal error: MatchPackages: %s is not a valid package pattern", m.pattern)) return } + if m.IsLiteral() { + m.Pkgs = []string{m.pattern} + return + } + match := func(string) bool { return true } treeCanMatch := func(string) bool { return true } if !m.IsMeta() { @@ -197,16 +203,22 @@ func SetModRoot(dir string) { modRoot = dir } -// MatchPackagesInFS is like MatchPackages but is passed a pattern that -// begins with an absolute path or "./" or "../". On Windows, the pattern may -// use slash or backslash separators or a mix of both. +// MatchDirs sets m.Dirs to a non-nil slice containing all directories that +// potentially match a local pattern. The pattern must begin with an absolute +// path, or "./", or "../". On Windows, the pattern may use slash or backslash +// separators or a mix of both. // -// MatchPackagesInFS scans the tree rooted at the directory that contains the -// first "..." wildcard. -func (m *Match) MatchPackagesInFS() { - m.Pkgs, m.Errs = nil, nil +// If any errors may have caused the set of directories to be incomplete, +// MatchDirs appends those errors to m.Errs. +func (m *Match) MatchDirs() { + m.Dirs = []string{} if !m.IsLocal() { - m.AddError(fmt.Errorf("internal error: MatchPackagesInFS: %s is not a valid filesystem pattern", m.pattern)) + m.AddError(fmt.Errorf("internal error: MatchDirs: %s is not a valid filesystem pattern", m.pattern)) + return + } + + if m.IsLiteral() { + m.Dirs = []string{m.pattern} return } @@ -301,7 +313,7 @@ func (m *Match) MatchPackagesInFS() { // which is all that Match promises to do. // Ignore the import error. } - m.Pkgs = append(m.Pkgs, name) + m.Dirs = append(m.Dirs, name) return nil }) if err != nil { @@ -416,25 +428,23 @@ func ImportPathsQuiet(patterns []string) []*Match { for _, a := range CleanPatterns(patterns) { m := NewMatch(a) if m.IsLocal() { - if m.IsLiteral() { - m.Pkgs = []string{a} - } else { - m.MatchPackagesInFS() - } + m.MatchDirs() // Change the file import path to a regular import path if the package // is in GOPATH or GOROOT. We don't report errors here; LoadImport // (or something similar) will report them later. - for i, dir := range m.Pkgs { + m.Pkgs = make([]string, len(m.Dirs)) + for i, dir := range m.Dirs { + absDir := dir if !filepath.IsAbs(dir) { - dir = filepath.Join(base.Cwd, dir) + absDir = filepath.Join(base.Cwd, dir) } - if bp, _ := cfg.BuildContext.ImportDir(dir, build.FindOnly); bp.ImportPath != "" && bp.ImportPath != "." { + if bp, _ := cfg.BuildContext.ImportDir(absDir, build.FindOnly); bp.ImportPath != "" && bp.ImportPath != "." { m.Pkgs[i] = bp.ImportPath + } else { + m.Pkgs[i] = dir } } - } else if m.IsLiteral() { - m.Pkgs = []string{a} } else { m.MatchPackages() } diff --git a/src/cmd/go/internal/test/test.go b/src/cmd/go/internal/test/test.go index 600f76df4c..dbb899219d 100644 --- a/src/cmd/go/internal/test/test.go +++ b/src/cmd/go/internal/test/test.go @@ -1239,6 +1239,14 @@ func (c *runCache) builderRunTest(b *work.Builder, a *work.Action) error { if len(out) == 0 { fmt.Fprintf(cmd.Stdout, "%s\n", err) } + // NOTE(golang.org/issue/37555): test2json reports that a test passes + // unless "FAIL" is printed at the beginning of a line. The test may not + // actually print that if it panics, exits, or terminates abnormally, + // so we print it here. We can't always check whether it was printed + // because some tests need stdout to be a terminal (golang.org/issue/34791), + // not a pipe. + // TODO(golang.org/issue/29062): tests that exit with status 0 without + // printing a final result should fail. fmt.Fprintf(cmd.Stdout, "FAIL\t%s\t%s\n", a.Package.ImportPath, t) } @@ -1291,16 +1299,13 @@ func (c *runCache) tryCacheWithID(b *work.Builder, a *work.Action, id string) bo "-test.parallel", "-test.run", "-test.short", + "-test.timeout", "-test.v": // These are cacheable. // Note that this list is documented above, // so if you add to this list, update the docs too. cacheArgs = append(cacheArgs, arg) - case "-test.timeout": - // Special case: this is cacheable but ignored during the hash. - // Do not add to cacheArgs. - default: // nothing else is cacheable if cache.DebugTest { diff --git a/src/cmd/go/internal/work/init.go b/src/cmd/go/internal/work/init.go index 9091f98636..e970272954 100644 --- a/src/cmd/go/internal/work/init.go +++ b/src/cmd/go/internal/work/init.go @@ -161,8 +161,12 @@ func buildModeInit() { } if gccgo { codegenArg = "-fPIE" - } else if cfg.Goos != "aix" { - codegenArg = "-shared" + } else { + switch cfg.Goos { + case "aix", "windows": + default: + codegenArg = "-shared" + } } ldBuildmode = "pie" case "shared": diff --git a/src/cmd/go/testdata/script/mod_list_std.txt b/src/cmd/go/testdata/script/mod_list_std.txt index 8552aebf42..76a3b00d1c 100644 --- a/src/cmd/go/testdata/script/mod_list_std.txt +++ b/src/cmd/go/testdata/script/mod_list_std.txt @@ -14,6 +14,16 @@ go list cmd/... stdout ^cmd/compile ! stdout ^cmd/vendor/golang\.org/x/arch/x86/x86asm +# GOROOT/src/... should list the packages in std as if it were a module +# dependency: omitting vendored dependencies and stopping at the 'cmd' module +# boundary. + +go list $GOROOT/src/... +stdout ^bytes$ +! stdout ^builtin$ +! stdout ^cmd/ +! stdout ^vendor/ + # Within the std module, listing ./... should omit the 'std' prefix: # the package paths should be the same via ./... or the 'std' meta-pattern. diff --git a/src/cmd/go/testdata/script/test_cache_inputs.txt b/src/cmd/go/testdata/script/test_cache_inputs.txt index 46faca0f42..57602e91dc 100644 --- a/src/cmd/go/testdata/script/test_cache_inputs.txt +++ b/src/cmd/go/testdata/script/test_cache_inputs.txt @@ -29,6 +29,23 @@ go test testcache -run=TestLookupEnv go test testcache -run=TestLookupEnv stdout '\(cached\)' +# Changes in arguments forwarded to the test should invalidate cached test +# results. +go test testcache -run=TestOSArgs -v hello +! stdout '\(cached\)' +stdout 'hello' +go test testcache -run=TestOSArgs -v goodbye +! stdout '\(cached\)' +stdout 'goodbye' + +# golang.org/issue/36134: that includes the `-timeout` argument. +go test testcache -run=TestOSArgs -timeout=20m -v +! stdout '\(cached\)' +stdout '-test\.timeout[= ]20m' +go test testcache -run=TestOSArgs -timeout=5s -v +! stdout '\(cached\)' +stdout '-test\.timeout[= ]5s' + # If the test stats a file, changes to the file should invalidate the cache. go test testcache -run=FileSize go test testcache -run=FileSize @@ -207,6 +224,10 @@ func TestExternalFile(t *testing.T) { t.Fatal(err) } } + +func TestOSArgs(t *testing.T) { + t.Log(os.Args) +} -- mkold.go -- package main diff --git a/src/cmd/go/testdata/script/test_json_panic_exit.txt b/src/cmd/go/testdata/script/test_json_panic_exit.txt new file mode 100644 index 0000000000..d0a7991fe5 --- /dev/null +++ b/src/cmd/go/testdata/script/test_json_panic_exit.txt @@ -0,0 +1,69 @@ +# Verifies golang.org/issue/37555. + +[short] skip + +# 'go test -json' should say a test passes if it says it passes. +go test -json ./pass +stdout '"Action":"pass".*\n\z' +! stdout '"Test":.*\n\z' + +# 'go test -json' should say a test passes if it exits 0 and prints nothing. +# TODO(golang.org/issue/29062): this should fail in the future. +go test -json ./exit0main +stdout '"Action":"pass".*\n\z' +! stdout '"Test":.*\n\z' + +# 'go test -json' should say a test fails if it exits 1 and prints nothing. +! go test -json ./exit1main +stdout '"Action":"fail".*\n\z' +! stdout '"Test":.*\n\z' + +# 'go test -json' should say a test fails if it panics. +! go test -json ./panic +stdout '"Action":"fail".*\n\z' +! stdout '"Test":.*\n\z' + +-- go.mod -- +module example.com/test + +go 1.14 + +-- pass/pass_test.go -- +package pass_test + +import "testing" + +func TestPass(t *testing.T) {} + +-- exit0main/exit0main_test.go -- +package exit0_test + +import ( + "os" + "testing" +) + +func TestMain(m *testing.M) { + os.Exit(0) +} + +-- exit1main/exit1main_test.go -- +package exit1_test + +import ( + "os" + "testing" +) + +func TestMain(m *testing.M) { + os.Exit(1) +} + +-- panic/panic_test.go -- +package panic_test + +import "testing" + +func TestPanic(t *testing.T) { + panic("oh no") +} diff --git a/src/cmd/go/testdata/script/version.txt b/src/cmd/go/testdata/script/version.txt index 0ed1194840..0123ac6d53 100644 --- a/src/cmd/go/testdata/script/version.txt +++ b/src/cmd/go/testdata/script/version.txt @@ -22,8 +22,6 @@ stdout '^\tpath\trsc.io/fortune' stdout '^\tmod\trsc.io/fortune\tv1.0.0' # Repeat the test with -buildmode=pie. -# TODO(golang.org/issue/27144): don't skip after -buildmode=pie is implemented -# on Windows. [!buildmode:pie] stop go build -buildmode=pie -o external.exe rsc.io/fortune go version external.exe @@ -33,8 +31,8 @@ stdout '^\tpath\trsc.io/fortune' stdout '^\tmod\trsc.io/fortune\tv1.0.0' # Also test PIE with internal linking. -# currently only supported on linux/amd64 and linux/arm64. -[!linux] stop +# currently only supported on linux/amd64, linux/arm64 and windows/amd64. +[!linux] [!windows] stop [!amd64] [!arm64] stop go build -buildmode=pie -ldflags=-linkmode=internal -o internal.exe rsc.io/fortune go version internal.exe diff --git a/src/cmd/internal/obj/mips/a.out.go b/src/cmd/internal/obj/mips/a.out.go index b0205ec11a..ddd048a17f 100644 --- a/src/cmd/internal/obj/mips/a.out.go +++ b/src/cmd/internal/obj/mips/a.out.go @@ -43,6 +43,7 @@ const ( NSYM = 50 NREG = 32 /* number of general registers */ NFREG = 32 /* number of floating point registers */ + NWREG = 32 /* number of MSA registers */ ) const ( @@ -180,6 +181,41 @@ const ( REG_FCR30 REG_FCR31 + // MSA registers + // The lower bits of W registers are alias to F registers + REG_W0 // must be a multiple of 32 + REG_W1 + REG_W2 + REG_W3 + REG_W4 + REG_W5 + REG_W6 + REG_W7 + REG_W8 + REG_W9 + REG_W10 + REG_W11 + REG_W12 + REG_W13 + REG_W14 + REG_W15 + REG_W16 + REG_W17 + REG_W18 + REG_W19 + REG_W20 + REG_W21 + REG_W22 + REG_W23 + REG_W24 + REG_W25 + REG_W26 + REG_W27 + REG_W28 + REG_W29 + REG_W30 + REG_W31 + REG_HI REG_LO @@ -217,6 +253,8 @@ func init() { f(REG_F0, REG_F31, 32) // For 32-bit MIPS, compiler only uses even numbered registers -- see cmd/compile/internal/ssa/gen/MIPSOps.go MIPSDWARFRegisters[REG_HI] = 64 MIPSDWARFRegisters[REG_LO] = 65 + // The lower bits of W registers are alias to F registers + f(REG_W0, REG_W31, 32) } const ( @@ -243,6 +281,7 @@ const ( C_FREG C_FCREG C_MREG /* special processor register */ + C_WREG /* MSA registers */ C_HI C_LO C_ZCON @@ -405,6 +444,12 @@ const ( AMOVVF AMOVVD + /* MSA */ + AVMOVB + AVMOVH + AVMOVW + AVMOVD + ALAST // aliases @@ -430,4 +475,7 @@ func init() { if REG_FCR0%32 != 0 { panic("REG_FCR0 is not a multiple of 32") } + if REG_W0%32 != 0 { + panic("REG_W0 is not a multiple of 32") + } } diff --git a/src/cmd/internal/obj/mips/anames.go b/src/cmd/internal/obj/mips/anames.go index 9a2e4f5703..2a44e4ca70 100644 --- a/src/cmd/internal/obj/mips/anames.go +++ b/src/cmd/internal/obj/mips/anames.go @@ -127,5 +127,9 @@ var Anames = []string{ "MOVDV", "MOVVF", "MOVVD", + "VMOVB", + "VMOVH", + "VMOVW", + "VMOVD", "LAST", } diff --git a/src/cmd/internal/obj/mips/anames0.go b/src/cmd/internal/obj/mips/anames0.go index c56d34eaf5..c300696730 100644 --- a/src/cmd/internal/obj/mips/anames0.go +++ b/src/cmd/internal/obj/mips/anames0.go @@ -10,6 +10,7 @@ var cnames0 = []string{ "FREG", "FCREG", "MREG", + "WREG", "HI", "LO", "ZCON", diff --git a/src/cmd/internal/obj/mips/asm0.go b/src/cmd/internal/obj/mips/asm0.go index 934f88a0b1..c19541522f 100644 --- a/src/cmd/internal/obj/mips/asm0.go +++ b/src/cmd/internal/obj/mips/asm0.go @@ -377,6 +377,11 @@ var optab = []Optab{ {ATEQ, C_SCON, C_NONE, C_REG, 15, 4, 0, 0, 0}, {ACMOVT, C_REG, C_NONE, C_REG, 17, 4, 0, 0, 0}, + {AVMOVB, C_SCON, C_NONE, C_WREG, 56, 4, 0, sys.MIPS64, 0}, + {AVMOVB, C_ADDCON, C_NONE, C_WREG, 56, 4, 0, sys.MIPS64, 0}, + {AVMOVB, C_SOREG, C_NONE, C_WREG, 57, 4, 0, sys.MIPS64, 0}, + {AVMOVB, C_WREG, C_NONE, C_SOREG, 58, 4, 0, sys.MIPS64, 0}, + {ABREAK, C_REG, C_NONE, C_SEXT, 7, 4, REGSB, sys.MIPS64, 0}, /* really CACHE instruction */ {ABREAK, C_REG, C_NONE, C_SAUTO, 7, 4, REGSP, sys.MIPS64, 0}, {ABREAK, C_REG, C_NONE, C_SOREG, 7, 4, REGZERO, sys.MIPS64, 0}, @@ -556,6 +561,9 @@ func (c *ctxt0) aclass(a *obj.Addr) int { if REG_FCR0 <= a.Reg && a.Reg <= REG_FCR31 { return C_FCREG } + if REG_W0 <= a.Reg && a.Reg <= REG_W31 { + return C_WREG + } if a.Reg == REG_LO { return C_LO } @@ -1029,6 +1037,11 @@ func buildop(ctxt *obj.Link) { case AMOVVL: opset(AMOVVR, r0) + case AVMOVB: + opset(AVMOVH, r0) + opset(AVMOVW, r0) + opset(AVMOVD, r0) + case AMOVW, AMOVD, AMOVF, @@ -1121,6 +1134,14 @@ func OP_JMP(op uint32, i uint32) uint32 { return op | i&0x3FFFFFF } +func OP_VI10(op uint32, df uint32, s10 int32, wd uint32, minor uint32) uint32 { + return 0x1e<<26 | (op&7)<<23 | (df&3)<<21 | uint32(s10&0x3FF)<<11 | (wd&31)<<6 | minor&0x3F +} + +func OP_VMI10(s10 int32, rs uint32, wd uint32, minor uint32, df uint32) uint32 { + return 0x1e<<26 | uint32(s10&0x3FF)<<16 | (rs&31)<<11 | (wd&31)<<6 | (minor&15)<<2 | df&3 +} + func (c *ctxt0) asmout(p *obj.Prog, o *Optab, out []uint32) { o1 := uint32(0) o2 := uint32(0) @@ -1629,6 +1650,19 @@ func (c *ctxt0) asmout(p *obj.Prog, o *Optab, out []uint32) { rel.Sym = p.From.Sym rel.Add = p.From.Offset rel.Type = objabi.R_ADDRMIPSTLS + + case 56: /* vmov{b,h,w,d} $scon, wr */ + + v := c.regoff(&p.From) + o1 = OP_VI10(110, c.twobitdf(p.As), v, uint32(p.To.Reg), 7) + + case 57: /* vld $soreg, wr */ + v := c.lsoffset(p.As, c.regoff(&p.From)) + o1 = OP_VMI10(v, uint32(p.From.Reg), uint32(p.To.Reg), 8, c.twobitdf(p.As)) + + case 58: /* vst wr, $soreg */ + v := c.lsoffset(p.As, c.regoff(&p.To)) + o1 = OP_VMI10(v, uint32(p.To.Reg), uint32(p.From.Reg), 9, c.twobitdf(p.As)) } out[0] = o1 @@ -2009,3 +2043,43 @@ func vshift(a obj.As) bool { } return false } + +// MSA Two-bit Data Format Field Encoding +func (c *ctxt0) twobitdf(a obj.As) uint32 { + switch a { + case AVMOVB: + return 0 + case AVMOVH: + return 1 + case AVMOVW: + return 2 + case AVMOVD: + return 3 + default: + c.ctxt.Diag("unsupported data format %v", a) + } + return 0 +} + +// MSA Load/Store offset have to be multiple of size of data format +func (c *ctxt0) lsoffset(a obj.As, o int32) int32 { + var mod int32 + switch a { + case AVMOVB: + mod = 1 + case AVMOVH: + mod = 2 + case AVMOVW: + mod = 4 + case AVMOVD: + mod = 8 + default: + c.ctxt.Diag("unsupported instruction:%v", a) + } + + if o%mod != 0 { + c.ctxt.Diag("invalid offset for %v: %d is not a multiple of %d", a, o, mod) + } + + return o / mod +} diff --git a/src/cmd/internal/obj/mips/list0.go b/src/cmd/internal/obj/mips/list0.go index addf9f70d8..f734e21ede 100644 --- a/src/cmd/internal/obj/mips/list0.go +++ b/src/cmd/internal/obj/mips/list0.go @@ -59,6 +59,9 @@ func rconv(r int) string { if REG_FCR0 <= r && r <= REG_FCR31 { return fmt.Sprintf("FCR%d", r-REG_FCR0) } + if REG_W0 <= r && r <= REG_W31 { + return fmt.Sprintf("W%d", r-REG_W0) + } if r == REG_HI { return "HI" } diff --git a/src/cmd/internal/sys/supported.go b/src/cmd/internal/sys/supported.go index c8ab2181b5..639827be86 100644 --- a/src/cmd/internal/sys/supported.go +++ b/src/cmd/internal/sys/supported.go @@ -87,7 +87,8 @@ func BuildModeSupported(compiler, buildmode, goos, goarch string) bool { "android/amd64", "android/arm", "android/arm64", "android/386", "freebsd/amd64", "darwin/amd64", - "aix/ppc64": + "aix/ppc64", + "windows/386", "windows/amd64", "windows/arm": return true } return false diff --git a/src/cmd/internal/test2json/test2json.go b/src/cmd/internal/test2json/test2json.go index aa63c8b9a6..098128ef3a 100644 --- a/src/cmd/internal/test2json/test2json.go +++ b/src/cmd/internal/test2json/test2json.go @@ -128,9 +128,16 @@ func (c *converter) Write(b []byte) (int, error) { } var ( + // printed by test on successful run. bigPass = []byte("PASS\n") + + // printed by test after a normal test failure. bigFail = []byte("FAIL\n") + // printed by 'go test' along with an error if the test binary terminates + // with an error. + bigFailErrorPrefix = []byte("FAIL\t") + updates = [][]byte{ []byte("=== RUN "), []byte("=== PAUSE "), @@ -155,7 +162,7 @@ var ( // before or after emitting other events. func (c *converter) handleInputLine(line []byte) { // Final PASS or FAIL. - if bytes.Equal(line, bigPass) || bytes.Equal(line, bigFail) { + if bytes.Equal(line, bigPass) || bytes.Equal(line, bigFail) || bytes.HasPrefix(line, bigFailErrorPrefix) { c.flushReport(0) c.output.write(line) if bytes.Equal(line, bigPass) { diff --git a/src/cmd/internal/test2json/testdata/panic.json b/src/cmd/internal/test2json/testdata/panic.json index f99679c2e2..f7738142e6 100644 --- a/src/cmd/internal/test2json/testdata/panic.json +++ b/src/cmd/internal/test2json/testdata/panic.json @@ -13,7 +13,7 @@ {"Action":"output","Test":"TestPanic","Output":"\tgo/src/testing/testing.go:909 +0xc9\n"} {"Action":"output","Test":"TestPanic","Output":"created by testing.(*T).Run\n"} {"Action":"output","Test":"TestPanic","Output":"\tgo/src/testing/testing.go:960 +0x350\n"} -{"Action":"output","Test":"TestPanic","Output":"FAIL\tcommand-line-arguments\t0.042s\n"} {"Action":"fail","Test":"TestPanic"} +{"Action":"output","Output":"FAIL\tcommand-line-arguments\t0.042s\n"} {"Action":"output","Output":"FAIL\n"} {"Action":"fail"} diff --git a/src/cmd/link/internal/ld/config.go b/src/cmd/link/internal/ld/config.go index 0eba4dc162..2373b500e3 100644 --- a/src/cmd/link/internal/ld/config.go +++ b/src/cmd/link/internal/ld/config.go @@ -38,7 +38,7 @@ func (mode *BuildMode) Set(s string) error { *mode = BuildModeExe case "pie": switch objabi.GOOS { - case "aix", "android", "linux": + case "aix", "android", "linux", "windows": case "darwin", "freebsd": switch objabi.GOARCH { case "amd64": @@ -209,6 +209,7 @@ func mustLinkExternal(ctxt *Link) (res bool, reason string) { case BuildModePIE: switch objabi.GOOS + "/" + objabi.GOARCH { case "linux/amd64", "linux/arm64", "android/arm64": + case "windows/386", "windows/amd64", "windows/arm": default: // Internal linking does not support TLS_IE. return true, "buildmode=pie" diff --git a/src/cmd/link/internal/ld/lib.go b/src/cmd/link/internal/ld/lib.go index 7eea8a7dad..a133ee20fc 100644 --- a/src/cmd/link/internal/ld/lib.go +++ b/src/cmd/link/internal/ld/lib.go @@ -1287,8 +1287,20 @@ func (ctxt *Link) hostlink() { } } case BuildModePIE: - // ELF. - if ctxt.HeadType != objabi.Hdarwin && ctxt.HeadType != objabi.Haix { + switch ctxt.HeadType { + case objabi.Hdarwin, objabi.Haix: + case objabi.Hwindows: + // Enable ASLR. + argv = append(argv, "-Wl,--dynamicbase") + // enable high-entropy ASLR on 64-bit. + if ctxt.Arch.PtrSize >= 8 { + argv = append(argv, "-Wl,--high-entropy-va") + } + // Work around binutils limitation that strips relocation table for dynamicbase. + // See https://sourceware.org/bugzilla/show_bug.cgi?id=19011 + argv = append(argv, "-Wl,--export-all-symbols") + default: + // ELF. if ctxt.UseRelro() { argv = append(argv, "-Wl,-z,relro") } diff --git a/src/cmd/link/internal/ld/pe.go b/src/cmd/link/internal/ld/pe.go index 6f6d5b2e5a..ad2f6e0fbb 100644 --- a/src/cmd/link/internal/ld/pe.go +++ b/src/cmd/link/internal/ld/pe.go @@ -95,6 +95,7 @@ const ( IMAGE_DIRECTORY_ENTRY_COM_DESCRIPTOR = 14 IMAGE_SUBSYSTEM_WINDOWS_GUI = 2 IMAGE_SUBSYSTEM_WINDOWS_CUI = 3 + IMAGE_DLLCHARACTERISTICS_HIGH_ENTROPY_VA = 0x0020 IMAGE_DLLCHARACTERISTICS_DYNAMIC_BASE = 0x0040 IMAGE_DLLCHARACTERISTICS_NX_COMPAT = 0x0100 IMAGE_DLLCHARACTERISTICS_TERMINAL_SERVER_AWARE = 0x8000 @@ -127,6 +128,7 @@ const ( IMAGE_REL_ARM_SECREL = 0x000F IMAGE_REL_BASED_HIGHLOW = 3 + IMAGE_REL_BASED_DIR64 = 10 ) const ( @@ -753,12 +755,12 @@ func (f *peFile) writeSymbolTableAndStringTable(ctxt *Link) { } // writeFileHeader writes COFF file header for peFile f. -func (f *peFile) writeFileHeader(arch *sys.Arch, out *OutBuf, linkmode LinkMode) { +func (f *peFile) writeFileHeader(ctxt *Link) { var fh pe.FileHeader - switch arch.Family { + switch ctxt.Arch.Family { default: - Exitf("unknown PE architecture: %v", arch.Family) + Exitf("unknown PE architecture: %v", ctxt.Arch.Family) case sys.AMD64: fh.Machine = IMAGE_FILE_MACHINE_AMD64 case sys.I386: @@ -773,16 +775,15 @@ func (f *peFile) writeFileHeader(arch *sys.Arch, out *OutBuf, linkmode LinkMode) // much more beneficial than having build timestamp in the header. fh.TimeDateStamp = 0 - if linkmode == LinkExternal { + if ctxt.LinkMode == LinkExternal { fh.Characteristics = IMAGE_FILE_LINE_NUMS_STRIPPED } else { - switch arch.Family { - default: - Exitf("write COFF(ext): unknown PE architecture: %v", arch.Family) + fh.Characteristics = IMAGE_FILE_EXECUTABLE_IMAGE | IMAGE_FILE_DEBUG_STRIPPED + switch ctxt.Arch.Family { case sys.AMD64, sys.I386: - fh.Characteristics = IMAGE_FILE_RELOCS_STRIPPED | IMAGE_FILE_EXECUTABLE_IMAGE | IMAGE_FILE_DEBUG_STRIPPED - case sys.ARM: - fh.Characteristics = IMAGE_FILE_EXECUTABLE_IMAGE | IMAGE_FILE_DEBUG_STRIPPED + if ctxt.BuildMode != BuildModePIE { + fh.Characteristics |= IMAGE_FILE_RELOCS_STRIPPED + } } } if pe64 != 0 { @@ -798,7 +799,7 @@ func (f *peFile) writeFileHeader(arch *sys.Arch, out *OutBuf, linkmode LinkMode) fh.PointerToSymbolTable = uint32(f.symtabOffset) fh.NumberOfSymbols = uint32(f.symbolCount) - binary.Write(out, binary.LittleEndian, &fh) + binary.Write(ctxt.Out, binary.LittleEndian, &fh) } // writeOptionalHeader writes COFF optional header for peFile f. @@ -860,12 +861,6 @@ func (f *peFile) writeOptionalHeader(ctxt *Link) { oh.Subsystem = IMAGE_SUBSYSTEM_WINDOWS_CUI } - switch ctxt.Arch.Family { - case sys.ARM: - oh64.DllCharacteristics = IMAGE_DLLCHARACTERISTICS_DYNAMIC_BASE - oh.DllCharacteristics = IMAGE_DLLCHARACTERISTICS_DYNAMIC_BASE - } - // Mark as having awareness of terminal services, to avoid ancient compatibility hacks. oh64.DllCharacteristics |= IMAGE_DLLCHARACTERISTICS_TERMINAL_SERVER_AWARE oh.DllCharacteristics |= IMAGE_DLLCHARACTERISTICS_TERMINAL_SERVER_AWARE @@ -874,6 +869,23 @@ func (f *peFile) writeOptionalHeader(ctxt *Link) { oh64.DllCharacteristics |= IMAGE_DLLCHARACTERISTICS_NX_COMPAT oh.DllCharacteristics |= IMAGE_DLLCHARACTERISTICS_NX_COMPAT + // The DLL can be relocated at load time. + switch ctxt.Arch.Family { + case sys.AMD64, sys.I386: + if ctxt.BuildMode == BuildModePIE { + oh64.DllCharacteristics |= IMAGE_DLLCHARACTERISTICS_DYNAMIC_BASE + oh.DllCharacteristics |= IMAGE_DLLCHARACTERISTICS_DYNAMIC_BASE + } + case sys.ARM: + oh64.DllCharacteristics |= IMAGE_DLLCHARACTERISTICS_DYNAMIC_BASE + oh.DllCharacteristics |= IMAGE_DLLCHARACTERISTICS_DYNAMIC_BASE + } + + // Image can handle a high entropy 64-bit virtual address space. + if ctxt.BuildMode == BuildModePIE { + oh64.DllCharacteristics |= IMAGE_DLLCHARACTERISTICS_HIGH_ENTROPY_VA + } + // Disable stack growth as we don't want Windows to // fiddle with the thread stack limits, which we set // ourselves to circumvent the stack checks in the @@ -1005,7 +1017,7 @@ func pewrite(ctxt *Link) { ctxt.Out.WriteStringN("PE", 4) } - pefile.writeFileHeader(ctxt.Arch, ctxt.Out, ctxt.LinkMode) + pefile.writeFileHeader(ctxt) pefile.writeOptionalHeader(ctxt) @@ -1384,6 +1396,8 @@ func (rt *peBaseRelocTable) addentry(ctxt *Link, s *sym.Symbol, r *sym.Reloc) { Exitf("unsupported relocation size %d\n", r.Siz) case 4: e.typeOff |= uint16(IMAGE_REL_BASED_HIGHLOW << 12) + case 8: + e.typeOff |= uint16(IMAGE_REL_BASED_DIR64 << 12) } b.entries = append(b.entries, e) @@ -1438,11 +1452,15 @@ func addPEBaseRelocSym(ctxt *Link, s *sym.Symbol, rt *peBaseRelocTable) { } func addPEBaseReloc(ctxt *Link) { - // We only generate base relocation table for ARM (and ... ARM64), x86, and AMD64 are marked as legacy - // archs and can use fixed base with no base relocation information + // Arm does not work without base relocation table. + // 386 and amd64 will only require the table for BuildModePIE. switch ctxt.Arch.Family { default: return + case sys.I386, sys.AMD64: + if ctxt.BuildMode != BuildModePIE { + return + } case sys.ARM: } diff --git a/src/encoding/hex/hex_test.go b/src/encoding/hex/hex_test.go index dbb00b94ca..31e3f68936 100644 --- a/src/encoding/hex/hex_test.go +++ b/src/encoding/hex/hex_test.go @@ -267,7 +267,6 @@ func BenchmarkDecode(b *testing.B) { func BenchmarkDump(b *testing.B) { for _, size := range []int{256, 1024, 4096, 16384} { src := bytes.Repeat([]byte{2, 3, 5, 7, 9, 11, 13, 17}, size/8) - sink = make([]byte, 2*size) b.Run(fmt.Sprintf("%v", size), func(b *testing.B) { b.SetBytes(int64(size)) diff --git a/src/flag/flag.go b/src/flag/flag.go index abf20b6240..eb88c1faa8 100644 --- a/src/flag/flag.go +++ b/src/flag/flag.go @@ -9,9 +9,9 @@ Define flags using flag.String(), Bool(), Int(), etc. - This declares an integer flag, -flagname, stored in the pointer ip, with type *int. + This declares an integer flag, -n, stored in the pointer nFlag, with type *int: import "flag" - var ip = flag.Int("flagname", 1234, "help message for flagname") + var nFlag = flag.Int("n", 1234, "help message for flag n") If you like, you can bind the flag to a variable using the Var() functions. var flagvar int func init() { diff --git a/src/go/parser/interface.go b/src/go/parser/interface.go index 500c98d496..54f9d7b80a 100644 --- a/src/go/parser/interface.go +++ b/src/go/parser/interface.go @@ -133,13 +133,7 @@ func ParseFile(fset *token.FileSet, filename string, src interface{}, mode Mode) // first error encountered are returned. // func ParseDir(fset *token.FileSet, path string, filter func(os.FileInfo) bool, mode Mode) (pkgs map[string]*ast.Package, first error) { - fd, err := os.Open(path) - if err != nil { - return nil, err - } - defer fd.Close() - - list, err := fd.Readdir(-1) + list, err := ioutil.ReadDir(path) if err != nil { return nil, err } diff --git a/src/go/types/example_test.go b/src/go/types/example_test.go index b8fba7312a..3747f3b15a 100644 --- a/src/go/types/example_test.go +++ b/src/go/types/example_test.go @@ -120,6 +120,9 @@ import "fmt" type Celsius float64 func (c Celsius) String() string { return fmt.Sprintf("%g°C", c) } func (c *Celsius) SetF(f float64) { *c = Celsius(f - 32 / 9 * 5) } + +type S struct { I; m int } +type I interface { m() byte } ` fset := token.NewFileSet() f, err := parser.ParseFile(fset, "celsius.go", input, 0) @@ -147,6 +150,11 @@ func (c *Celsius) SetF(f float64) { *c = Celsius(f - 32 / 9 * 5) } fmt.Println() } + // Print the method set of S. + styp := pkg.Scope().Lookup("S").Type() + fmt.Printf("Method set of %s:\n", styp) + fmt.Println(types.NewMethodSet(styp)) + // Output: // Method set of temperature.Celsius: // method (temperature.Celsius) String() string @@ -154,6 +162,9 @@ func (c *Celsius) SetF(f float64) { *c = Celsius(f - 32 / 9 * 5) } // Method set of *temperature.Celsius: // method (*temperature.Celsius) SetF(f float64) // method (*temperature.Celsius) String() string + // + // Method set of temperature.S: + // MethodSet {} } // ExampleInfo prints various facts recorded by the type checker in a diff --git a/src/go/types/methodset.go b/src/go/types/methodset.go index a236fe2ea8..c34d732b7a 100644 --- a/src/go/types/methodset.go +++ b/src/go/types/methodset.go @@ -99,8 +99,8 @@ func NewMethodSet(T Type) *MethodSet { for len(current) > 0 { var next []embeddedType // embedded types found at current depth - // field and method sets at current depth, allocated lazily - var fset fieldSet + // field and method sets at current depth, indexed by names (Id's), and allocated lazily + var fset map[string]bool // we only care about the field names var mset methodSet for _, e := range current { @@ -131,7 +131,10 @@ func NewMethodSet(T Type) *MethodSet { switch t := typ.(type) { case *Struct: for i, f := range t.fields { - fset = fset.add(f, e.multiples) + if fset == nil { + fset = make(map[string]bool) + } + fset[f.Id()] = true // Embedded fields are always of the form T or *T where // T is a type name. If typ appeared multiple times at @@ -156,7 +159,7 @@ func NewMethodSet(T Type) *MethodSet { for k, m := range mset { if _, found := base[k]; !found { // Fields collide with methods of the same name at this depth. - if _, found := fset[k]; found { + if fset[k] { m = nil // collision } if base == nil { @@ -166,17 +169,14 @@ func NewMethodSet(T Type) *MethodSet { } } - // Multiple fields with matching names collide at this depth and shadow all - // entries further down; add them as collisions to base if no entries with - // matching names exist already. - for k, f := range fset { - if f == nil { - if _, found := base[k]; !found { - if base == nil { - base = make(methodSet) - } - base[k] = nil // collision + // Add all (remaining) fields at this depth as collisions (since they will + // hide any method further down) if no entries with matching names exist already. + for k := range fset { + if _, found := base[k]; !found { + if base == nil { + base = make(methodSet) } + base[k] = nil // collision } } @@ -207,33 +207,9 @@ func NewMethodSet(T Type) *MethodSet { return &MethodSet{list} } -// A fieldSet is a set of fields and name collisions. -// A collision indicates that multiple fields with the -// same unique id appeared. -type fieldSet map[string]*Var // a nil entry indicates a name collision - -// Add adds field f to the field set s. -// If multiples is set, f appears multiple times -// and is treated as a collision. -func (s fieldSet) add(f *Var, multiples bool) fieldSet { - if s == nil { - s = make(fieldSet) - } - key := f.Id() - // if f is not in the set, add it - if !multiples { - if _, found := s[key]; !found { - s[key] = f - return s - } - } - s[key] = nil // collision - return s -} - // A methodSet is a set of methods and name collisions. // A collision indicates that multiple methods with the -// same unique id appeared. +// same unique id, or a field with that id appeared. type methodSet map[string]*Selection // a nil entry indicates a name collision // Add adds all functions in list to the method set s. diff --git a/src/hash/maphash/maphash_test.go b/src/hash/maphash/maphash_test.go index 0164a9e20a..caea43a8c8 100644 --- a/src/hash/maphash/maphash_test.go +++ b/src/hash/maphash/maphash_test.go @@ -106,6 +106,62 @@ func TestRepeat(t *testing.T) { } } +func TestSeedFromSum64(t *testing.T) { + h1 := new(Hash) + h1.WriteString("foo") + x := h1.Sum64() // seed generated here + h2 := new(Hash) + h2.SetSeed(h1.Seed()) + h2.WriteString("foo") + y := h2.Sum64() + if x != y { + t.Errorf("hashes don't match: want %x, got %x", x, y) + } +} + +func TestSeedFromSeed(t *testing.T) { + h1 := new(Hash) + h1.WriteString("foo") + _ = h1.Seed() // seed generated here + x := h1.Sum64() + h2 := new(Hash) + h2.SetSeed(h1.Seed()) + h2.WriteString("foo") + y := h2.Sum64() + if x != y { + t.Errorf("hashes don't match: want %x, got %x", x, y) + } +} + +func TestSeedFromFlush(t *testing.T) { + b := make([]byte, 65) + h1 := new(Hash) + h1.Write(b) // seed generated here + x := h1.Sum64() + h2 := new(Hash) + h2.SetSeed(h1.Seed()) + h2.Write(b) + y := h2.Sum64() + if x != y { + t.Errorf("hashes don't match: want %x, got %x", x, y) + } +} + +func TestSeedFromReset(t *testing.T) { + h1 := new(Hash) + h1.WriteString("foo") + h1.Reset() // seed generated here + h1.WriteString("foo") + x := h1.Sum64() + h2 := new(Hash) + h2.SetSeed(h1.Seed()) + h2.WriteString("foo") + y := h2.Sum64() + if x != y { + t.Errorf("hashes don't match: want %x, got %x", x, y) + } +} + // Make sure a Hash implements the hash.Hash and hash.Hash64 interfaces. var _ hash.Hash = &Hash{} var _ hash.Hash64 = &Hash{} diff --git a/src/internal/bytealg/bytealg.go b/src/internal/bytealg/bytealg.go index 9ecd8eb004..4c90cd3671 100644 --- a/src/internal/bytealg/bytealg.go +++ b/src/internal/bytealg/bytealg.go @@ -21,3 +21,128 @@ const ( // MaxLen is the maximum length of the string to be searched for (argument b) in Index. var MaxLen int + +// FIXME: the logic of HashStrBytes, HashStrRevBytes, IndexRabinKarpBytes and HashStr, HashStrRev, +// IndexRabinKarp are exactly the same, except that the types are different. Can we eliminate +// three of them without causing allocation? + +// PrimeRK is the prime base used in Rabin-Karp algorithm. +const PrimeRK = 16777619 + +// HashStrBytes returns the hash and the appropriate multiplicative +// factor for use in Rabin-Karp algorithm. +func HashStrBytes(sep []byte) (uint32, uint32) { + hash := uint32(0) + for i := 0; i < len(sep); i++ { + hash = hash*PrimeRK + uint32(sep[i]) + } + var pow, sq uint32 = 1, PrimeRK + for i := len(sep); i > 0; i >>= 1 { + if i&1 != 0 { + pow *= sq + } + sq *= sq + } + return hash, pow +} + +// HashStr returns the hash and the appropriate multiplicative +// factor for use in Rabin-Karp algorithm. +func HashStr(sep string) (uint32, uint32) { + hash := uint32(0) + for i := 0; i < len(sep); i++ { + hash = hash*PrimeRK + uint32(sep[i]) + } + var pow, sq uint32 = 1, PrimeRK + for i := len(sep); i > 0; i >>= 1 { + if i&1 != 0 { + pow *= sq + } + sq *= sq + } + return hash, pow +} + +// HashStrRevBytes returns the hash of the reverse of sep and the +// appropriate multiplicative factor for use in Rabin-Karp algorithm. +func HashStrRevBytes(sep []byte) (uint32, uint32) { + hash := uint32(0) + for i := len(sep) - 1; i >= 0; i-- { + hash = hash*PrimeRK + uint32(sep[i]) + } + var pow, sq uint32 = 1, PrimeRK + for i := len(sep); i > 0; i >>= 1 { + if i&1 != 0 { + pow *= sq + } + sq *= sq + } + return hash, pow +} + +// HashStrRev returns the hash of the reverse of sep and the +// appropriate multiplicative factor for use in Rabin-Karp algorithm. +func HashStrRev(sep string) (uint32, uint32) { + hash := uint32(0) + for i := len(sep) - 1; i >= 0; i-- { + hash = hash*PrimeRK + uint32(sep[i]) + } + var pow, sq uint32 = 1, PrimeRK + for i := len(sep); i > 0; i >>= 1 { + if i&1 != 0 { + pow *= sq + } + sq *= sq + } + return hash, pow +} + +// IndexRabinKarpBytes uses the Rabin-Karp search algorithm to return the index of the +// first occurence of substr in s, or -1 if not present. +func IndexRabinKarpBytes(s, sep []byte) int { + // Rabin-Karp search + hashsep, pow := HashStrBytes(sep) + n := len(sep) + var h uint32 + for i := 0; i < n; i++ { + h = h*PrimeRK + uint32(s[i]) + } + if h == hashsep && Equal(s[:n], sep) { + return 0 + } + for i := n; i < len(s); { + h *= PrimeRK + h += uint32(s[i]) + h -= pow * uint32(s[i-n]) + i++ + if h == hashsep && Equal(s[i-n:i], sep) { + return i - n + } + } + return -1 +} + +// IndexRabinKarp uses the Rabin-Karp search algorithm to return the index of the +// first occurence of substr in s, or -1 if not present. +func IndexRabinKarp(s, substr string) int { + // Rabin-Karp search + hashss, pow := HashStr(substr) + n := len(substr) + var h uint32 + for i := 0; i < n; i++ { + h = h*PrimeRK + uint32(s[i]) + } + if h == hashss && s[:n] == substr { + return 0 + } + for i := n; i < len(s); { + h *= PrimeRK + h += uint32(s[i]) + h -= pow * uint32(s[i-n]) + i++ + if h == hashss && s[i-n:i] == substr { + return i - n + } + } + return -1 +} diff --git a/src/internal/cpu/cpu.go b/src/internal/cpu/cpu.go index f326b06332..2829945af0 100644 --- a/src/internal/cpu/cpu.go +++ b/src/internal/cpu/cpu.go @@ -19,13 +19,11 @@ type CacheLinePad struct{ _ [CacheLinePadSize]byte } // so we use the constant per GOARCH CacheLinePadSize as an approximation. var CacheLineSize uintptr = CacheLinePadSize -var X86 x86 - -// The booleans in x86 contain the correspondingly named cpuid feature bit. +// The booleans in X86 contain the correspondingly named cpuid feature bit. // HasAVX and HasAVX2 are only set if the OS does support XMM and YMM registers // in addition to the cpuid feature bit being set. // The struct is padded to avoid false sharing. -type x86 struct { +var X86 struct { _ CacheLinePad HasAES bool HasADX bool @@ -46,38 +44,18 @@ type x86 struct { _ CacheLinePad } -var PPC64 ppc64 - -// For ppc64(le), it is safe to check only for ISA level starting on ISA v3.00, -// since there are no optional categories. There are some exceptions that also -// require kernel support to work (darn, scv), so there are feature bits for -// those as well. The minimum processor requirement is POWER8 (ISA 2.07). +// The booleans in ARM contain the correspondingly named cpu feature bit. // The struct is padded to avoid false sharing. -type ppc64 struct { - _ CacheLinePad - HasDARN bool // Hardware random number generator (requires kernel enablement) - HasSCV bool // Syscall vectored (requires kernel enablement) - IsPOWER8 bool // ISA v2.07 (POWER8) - IsPOWER9 bool // ISA v3.00 (POWER9) - _ CacheLinePad -} - -var ARM arm - -// The booleans in arm contain the correspondingly named cpu feature bit. -// The struct is padded to avoid false sharing. -type arm struct { +var ARM struct { _ CacheLinePad HasVFPv4 bool HasIDIVA bool _ CacheLinePad } -var ARM64 arm64 - -// The booleans in arm64 contain the correspondingly named cpu feature bit. +// The booleans in ARM64 contain the correspondingly named cpu feature bit. // The struct is padded to avoid false sharing. -type arm64 struct { +var ARM64 struct { _ CacheLinePad HasFP bool HasASIMD bool @@ -106,9 +84,27 @@ type arm64 struct { _ CacheLinePad } -var S390X s390x +var MIPS64X struct { + _ CacheLinePad + HasMSA bool // MIPS SIMD architecture + _ CacheLinePad +} -type s390x struct { +// For ppc64(le), it is safe to check only for ISA level starting on ISA v3.00, +// since there are no optional categories. There are some exceptions that also +// require kernel support to work (darn, scv), so there are feature bits for +// those as well. The minimum processor requirement is POWER8 (ISA 2.07). +// The struct is padded to avoid false sharing. +var PPC64 struct { + _ CacheLinePad + HasDARN bool // Hardware random number generator (requires kernel enablement) + HasSCV bool // Syscall vectored (requires kernel enablement) + IsPOWER8 bool // ISA v2.07 (POWER8) + IsPOWER9 bool // ISA v3.00 (POWER9) + _ CacheLinePad +} + +var S390X struct { _ CacheLinePad HasZARCH bool // z architecture mode is active [mandatory] HasSTFLE bool // store facility list extended [mandatory] diff --git a/src/internal/cpu/cpu_mips64.go b/src/internal/cpu/cpu_mips64.go deleted file mode 100644 index 0f821e44e7..0000000000 --- a/src/internal/cpu/cpu_mips64.go +++ /dev/null @@ -1,7 +0,0 @@ -// Copyright 2017 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package cpu - -const CacheLinePadSize = 32 diff --git a/src/internal/cpu/cpu_mips64le.go b/src/internal/cpu/cpu_mips64le.go deleted file mode 100644 index 0f821e44e7..0000000000 --- a/src/internal/cpu/cpu_mips64le.go +++ /dev/null @@ -1,7 +0,0 @@ -// Copyright 2017 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package cpu - -const CacheLinePadSize = 32 diff --git a/src/internal/cpu/cpu_mips64x.go b/src/internal/cpu/cpu_mips64x.go new file mode 100644 index 0000000000..9b0a824ee8 --- /dev/null +++ b/src/internal/cpu/cpu_mips64x.go @@ -0,0 +1,32 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build mips64 mips64le + +package cpu + +const CacheLinePadSize = 32 + +// These are initialized by archauxv in runtime/os_linux_mips64x.go. +// These should not be changed after they are initialized. +var HWCap uint + +// HWCAP bits. These are exposed by the Linux kernel 5.4. +const ( + // CPU features + hwcap_MIPS_MSA = 1 << 1 +) + +func doinit() { + options = []option{ + {Name: "msa", Feature: &MIPS64X.HasMSA}, + } + + // HWCAP feature bits + MIPS64X.HasMSA = isSet(HWCap, hwcap_MIPS_MSA) +} + +func isSet(hwc uint, value uint) bool { + return hwc&value != 0 +} diff --git a/src/internal/cpu/cpu_no_init.go b/src/internal/cpu/cpu_no_init.go index d4b2be8cf4..fb381e1ce2 100644 --- a/src/internal/cpu/cpu_no_init.go +++ b/src/internal/cpu/cpu_no_init.go @@ -9,6 +9,8 @@ // +build !ppc64 // +build !ppc64le // +build !s390x +// +build !mips64 +// +build !mips64le package cpu diff --git a/src/net/http/client.go b/src/net/http/client.go index a496f1c0c7..ec24516833 100644 --- a/src/net/http/client.go +++ b/src/net/http/client.go @@ -265,6 +265,12 @@ func send(ireq *Request, rt RoundTripper, deadline time.Time) (resp *Response, d } return nil, didTimeout, err } + if resp == nil { + return nil, didTimeout, fmt.Errorf("http: RoundTripper implementation (%T) returned a nil *Response with a nil error", rt) + } + if resp.Body == nil { + return nil, didTimeout, fmt.Errorf("http: RoundTripper implementation (%T) returned a *Response with a nil Body", rt) + } if !deadline.IsZero() { resp.Body = &cancelTimerBody{ stop: stopTimer, diff --git a/src/net/http/omithttp2.go b/src/net/http/omithttp2.go index 307d93a3b1..7e2f492579 100644 --- a/src/net/http/omithttp2.go +++ b/src/net/http/omithttp2.go @@ -32,7 +32,7 @@ type http2Transport struct { func (*http2Transport) RoundTrip(*Request) (*Response, error) { panic(noHTTP2) } func (*http2Transport) CloseIdleConnections() {} -type http2erringRoundTripper struct{} +type http2erringRoundTripper struct{ err error } func (http2erringRoundTripper) RoundTrip(*Request) (*Response, error) { panic(noHTTP2) } diff --git a/src/net/http/transport.go b/src/net/http/transport.go index d0bfdb412c..15feeaf41f 100644 --- a/src/net/http/transport.go +++ b/src/net/http/transport.go @@ -569,14 +569,11 @@ func (t *Transport) roundTrip(req *Request) (*Response, error) { } // Failed. Clean up and determine whether to retry. - - _, isH2DialError := pconn.alt.(http2erringRoundTripper) - if http2isNoCachedConnError(err) || isH2DialError { + if http2isNoCachedConnError(err) { if t.removeIdleConn(pconn) { t.decConnsPerHost(pconn.cacheKey) } - } - if !pconn.shouldRetryRequest(req, err) { + } else if !pconn.shouldRetryRequest(req, err) { // Issue 16465: return underlying net.Conn.Read error from peek, // as we've historically done. if e, ok := err.(transportReadFromServerError); ok { @@ -1637,7 +1634,12 @@ func (t *Transport) dialConn(ctx context.Context, cm connectMethod) (pconn *pers if s := pconn.tlsState; s != nil && s.NegotiatedProtocolIsMutual && s.NegotiatedProtocol != "" { if next, ok := t.TLSNextProto[s.NegotiatedProtocol]; ok { - return &persistConn{t: t, cacheKey: pconn.cacheKey, alt: next(cm.targetAddr, pconn.conn.(*tls.Conn))}, nil + alt := next(cm.targetAddr, pconn.conn.(*tls.Conn)) + if e, ok := alt.(http2erringRoundTripper); ok { + // pconn.conn was closed by next (http2configureTransport.upgradeFn). + return nil, e.err + } + return &persistConn{t: t, cacheKey: pconn.cacheKey, alt: alt}, nil } } diff --git a/src/net/textproto/reader.go b/src/net/textproto/reader.go index a505da985c..d26e981ae4 100644 --- a/src/net/textproto/reader.go +++ b/src/net/textproto/reader.go @@ -557,7 +557,7 @@ func noValidation(_ []byte) error { return nil } // contain a colon. func mustHaveFieldNameColon(line []byte) error { if bytes.IndexByte(line, ':') < 0 { - return ProtocolError(fmt.Sprintf("malformed MIME header: missing colon: %q" + string(line))) + return ProtocolError(fmt.Sprintf("malformed MIME header: missing colon: %q", line)) } return nil } diff --git a/src/os/dir_darwin.go b/src/os/dir_darwin.go index a274dd1268..2f9ba78d68 100644 --- a/src/os/dir_darwin.go +++ b/src/os/dir_darwin.go @@ -24,16 +24,6 @@ func (d *dirInfo) close() { d.dir = 0 } -func (f *File) seekInvalidate() { - if f.dirinfo == nil { - return - } - // Free cached dirinfo, so we allocate a new one if we - // access this file as a directory again. See #35767. - f.dirinfo.close() - f.dirinfo = nil -} - func (f *File) readdirnames(n int) (names []string, err error) { if f.dirinfo == nil { dir, call, errno := f.pfd.OpenDir() diff --git a/src/os/dir_unix.go b/src/os/dir_unix.go index 2856a2dc0f..e0c4989756 100644 --- a/src/os/dir_unix.go +++ b/src/os/dir_unix.go @@ -26,8 +26,6 @@ const ( func (d *dirInfo) close() {} -func (f *File) seekInvalidate() {} - func (f *File) readdirnames(n int) (names []string, err error) { // If this file has no dirinfo, create one. if f.dirinfo == nil { diff --git a/src/os/file_plan9.go b/src/os/file_plan9.go index 48bf5f5076..bcf3c625bf 100644 --- a/src/os/file_plan9.go +++ b/src/os/file_plan9.go @@ -290,6 +290,11 @@ func (f *File) pwrite(b []byte, off int64) (n int, err error) { // relative to the current offset, and 2 means relative to the end. // It returns the new offset and an error, if any. func (f *File) seek(offset int64, whence int) (ret int64, err error) { + if f.dirinfo != nil { + // Free cached dirinfo, so we allocate a new one if we + // access this file as a directory again. See #35767 and #37161. + f.dirinfo = nil + } return syscall.Seek(f.fd, offset, whence) } diff --git a/src/os/file_unix.go b/src/os/file_unix.go index 6945937fd6..32e4442e5d 100644 --- a/src/os/file_unix.go +++ b/src/os/file_unix.go @@ -295,7 +295,12 @@ func (f *File) pwrite(b []byte, off int64) (n int, err error) { // relative to the current offset, and 2 means relative to the end. // It returns the new offset and an error, if any. func (f *File) seek(offset int64, whence int) (ret int64, err error) { - f.seekInvalidate() + if f.dirinfo != nil { + // Free cached dirinfo, so we allocate a new one if we + // access this file as a directory again. See #35767 and #37161. + f.dirinfo.close() + f.dirinfo = nil + } ret, err = f.pfd.Seek(offset, whence) runtime.KeepAlive(f) return ret, err diff --git a/src/os/os_test.go b/src/os/os_test.go index 1d8442d808..44e1434dbe 100644 --- a/src/os/os_test.go +++ b/src/os/os_test.go @@ -1242,6 +1242,41 @@ func testChtimes(t *testing.T, name string) { } } +func TestFileChdir(t *testing.T) { + // TODO(brainman): file.Chdir() is not implemented on windows. + if runtime.GOOS == "windows" { + return + } + + wd, err := Getwd() + if err != nil { + t.Fatalf("Getwd: %s", err) + } + defer Chdir(wd) + + fd, err := Open(".") + if err != nil { + t.Fatalf("Open .: %s", err) + } + defer fd.Close() + + if err := Chdir("/"); err != nil { + t.Fatalf("Chdir /: %s", err) + } + + if err := fd.Chdir(); err != nil { + t.Fatalf("fd.Chdir: %s", err) + } + + wdNew, err := Getwd() + if err != nil { + t.Fatalf("Getwd: %s", err) + } + if wdNew != wd { + t.Fatalf("fd.Chdir failed, got %s, want %s", wdNew, wd) + } +} + func TestChdirAndGetwd(t *testing.T) { // TODO(brainman): file.Chdir() is not implemented on windows. if runtime.GOOS == "windows" { @@ -2461,3 +2496,34 @@ func TestDirSeek(t *testing.T) { } } } + +func TestReaddirSmallSeek(t *testing.T) { + // See issue 37161. Read only one entry from a directory, + // seek to the beginning, and read again. We should not see + // duplicate entries. + if runtime.GOOS == "windows" { + testenv.SkipFlaky(t, 36019) + } + wd, err := Getwd() + if err != nil { + t.Fatal(err) + } + df, err := Open(filepath.Join(wd, "testdata", "issue37161")) + if err != nil { + t.Fatal(err) + } + names1, err := df.Readdirnames(1) + if err != nil { + t.Fatal(err) + } + if _, err = df.Seek(0, 0); err != nil { + t.Fatal(err) + } + names2, err := df.Readdirnames(0) + if err != nil { + t.Fatal(err) + } + if len(names2) != 3 { + t.Fatalf("first names: %v, second names: %v", names1, names2) + } +} diff --git a/src/os/testdata/issue37161/a b/src/os/testdata/issue37161/a new file mode 100644 index 0000000000..7898192261 --- /dev/null +++ b/src/os/testdata/issue37161/a @@ -0,0 +1 @@ +a diff --git a/src/os/testdata/issue37161/b b/src/os/testdata/issue37161/b new file mode 100644 index 0000000000..6178079822 --- /dev/null +++ b/src/os/testdata/issue37161/b @@ -0,0 +1 @@ +b diff --git a/src/os/testdata/issue37161/c b/src/os/testdata/issue37161/c new file mode 100644 index 0000000000..f2ad6c76f0 --- /dev/null +++ b/src/os/testdata/issue37161/c @@ -0,0 +1 @@ +c diff --git a/src/runtime/asm_arm64.s b/src/runtime/asm_arm64.s index 0be06d124e..6b3d1e779e 100644 --- a/src/runtime/asm_arm64.s +++ b/src/runtime/asm_arm64.s @@ -27,8 +27,7 @@ TEXT runtime·rt0_go(SB),NOSPLIT,$0 // if there is a _cgo_init, call it using the gcc ABI. MOVD _cgo_init(SB), R12 - CMP $0, R12 - BEQ nocgo + CBZ R12, nocgo MRS_TPIDR_R0 // load TLS base pointer MOVD R0, R3 // arg 3: TLS base pointer @@ -114,8 +113,7 @@ TEXT runtime·gosave(SB), NOSPLIT|NOFRAME, $0-8 MOVD ZR, gobuf_ret(R3) // Assert ctxt is zero. See func save. MOVD gobuf_ctxt(R3), R0 - CMP $0, R0 - BEQ 2(PC) + CBZ R0, 2(PC) CALL runtime·badctxt(SB) RET @@ -448,8 +446,7 @@ CALLFN(·call1073741824, 1073741832 ) // func memhash32(p unsafe.Pointer, h uintptr) uintptr TEXT runtime·memhash32(SB),NOSPLIT|NOFRAME,$0-24 MOVB runtime·useAeshash(SB), R0 - CMP $0, R0 - BEQ noaes + CBZ R0, noaes MOVD p+0(FP), R0 MOVD h+8(FP), R1 MOVD $ret+16(FP), R2 @@ -474,8 +471,7 @@ noaes: // func memhash64(p unsafe.Pointer, h uintptr) uintptr TEXT runtime·memhash64(SB),NOSPLIT|NOFRAME,$0-24 MOVB runtime·useAeshash(SB), R0 - CMP $0, R0 - BEQ noaes + CBZ R0, noaes MOVD p+0(FP), R0 MOVD h+8(FP), R1 MOVD $ret+16(FP), R2 @@ -500,8 +496,7 @@ noaes: // func memhash(p unsafe.Pointer, h, size uintptr) uintptr TEXT runtime·memhash(SB),NOSPLIT|NOFRAME,$0-32 MOVB runtime·useAeshash(SB), R0 - CMP $0, R0 - BEQ noaes + CBZ R0, noaes MOVD p+0(FP), R0 MOVD s+16(FP), R1 MOVD h+8(FP), R3 @@ -513,8 +508,7 @@ noaes: // func strhash(p unsafe.Pointer, h uintptr) uintptr TEXT runtime·strhash(SB),NOSPLIT|NOFRAME,$0-24 MOVB runtime·useAeshash(SB), R0 - CMP $0, R0 - BEQ noaes + CBZ R0, noaes MOVD p+0(FP), R10 // string pointer LDP (R10), (R0, R1) //string data/ length MOVD h+8(FP), R3 @@ -548,8 +542,7 @@ TEXT aeshashbody<>(SB),NOSPLIT|NOFRAME,$0 B aes129plus aes0to15: - CMP $0, R1 - BEQ aes0 + CBZ R1, aes0 VEOR V2.B16, V2.B16, V2.B16 TBZ $3, R1, less_than_8 VLD1.P 8(R0), V2.D[0] @@ -879,8 +872,7 @@ TEXT gosave<>(SB),NOSPLIT|NOFRAME,$0 MOVD $0, (g_sched+gobuf_ret)(g) // Assert ctxt is zero. See func save. MOVD (g_sched+gobuf_ctxt)(g), R0 - CMP $0, R0 - BEQ 2(PC) + CBZ R0, 2(PC) CALL runtime·badctxt(SB) RET @@ -893,8 +885,7 @@ TEXT ·asmcgocall(SB),NOSPLIT,$0-20 MOVD arg+8(FP), R0 MOVD RSP, R2 // save original stack pointer - CMP $0, g - BEQ nosave + CBZ g, nosave MOVD g, R4 // Figure out if we need to switch to m->g0 stack. @@ -990,8 +981,7 @@ TEXT ·cgocallback_gofunc(SB),NOSPLIT,$24-32 // Load g from thread-local storage. MOVB runtime·iscgo(SB), R3 - CMP $0, R3 - BEQ nocgo + CBZ R3, nocgo BL runtime·load_g(SB) nocgo: @@ -1000,8 +990,7 @@ nocgo: // In this case, we're running on the thread stack, so there's // lots of space, but the linker doesn't know. Hide the call from // the linker analysis by using an indirect call. - CMP $0, g - BEQ needm + CBZ g, needm MOVD g_m(g), R8 MOVD R8, savedm-8(SP) @@ -1092,8 +1081,7 @@ havem: // If the m on entry was nil, we called needm above to borrow an m // for the duration of the call. Since the call is over, return it with dropm. MOVD savedm-8(SP), R6 - CMP $0, R6 - BNE droppedm + CBNZ R6, droppedm MOVD $runtime·dropm(SB), R0 BL (R0) droppedm: diff --git a/src/runtime/crash_cgo_test.go b/src/runtime/crash_cgo_test.go index 56cfb0856e..a09ecd8e42 100644 --- a/src/runtime/crash_cgo_test.go +++ b/src/runtime/crash_cgo_test.go @@ -275,7 +275,13 @@ func testCgoPprof(t *testing.T, buildArg, runArg, top, bottom string) { t.Fatal(err) } - got, err := testenv.CleanCmdEnv(exec.Command(exe, runArg)).CombinedOutput() + // pprofCgoTraceback is called whenever CGO code is executing and a signal + // is received. Disable signal preemption to increase the likelihood at + // least one SIGPROF signal fired to capture a sample. See issue #37201. + cmd := testenv.CleanCmdEnv(exec.Command(exe, runArg)) + cmd.Env = append(cmd.Env, "GODEBUG=asyncpreemptoff=1") + + got, err := cmd.CombinedOutput() if err != nil { if testenv.Builder() == "linux-amd64-alpine" { // See Issue 18243 and Issue 19938. diff --git a/src/runtime/error.go b/src/runtime/error.go index 555befa43d..386569bead 100644 --- a/src/runtime/error.go +++ b/src/runtime/error.go @@ -185,11 +185,6 @@ type stringer interface { String() string } -func typestring(x interface{}) string { - e := efaceOf(&x) - return e._type.string() -} - // printany prints an argument passed to panic. // If panic is called with a value that has a String or Error method, // it has already been converted into a string by preprintpanics. @@ -232,7 +227,51 @@ func printany(i interface{}) { case string: print(v) default: - print("(", typestring(i), ") ", i) + printanycustomtype(i) + } +} + +func printanycustomtype(i interface{}) { + eface := efaceOf(&i) + typestring := eface._type.string() + + switch eface._type.kind { + case kindString: + print(typestring, `("`, *(*string)(eface.data), `")`) + case kindBool: + print(typestring, "(", *(*bool)(eface.data), ")") + case kindInt: + print(typestring, "(", *(*int)(eface.data), ")") + case kindInt8: + print(typestring, "(", *(*int8)(eface.data), ")") + case kindInt16: + print(typestring, "(", *(*int16)(eface.data), ")") + case kindInt32: + print(typestring, "(", *(*int32)(eface.data), ")") + case kindInt64: + print(typestring, "(", *(*int64)(eface.data), ")") + case kindUint: + print(typestring, "(", *(*uint)(eface.data), ")") + case kindUint8: + print(typestring, "(", *(*uint8)(eface.data), ")") + case kindUint16: + print(typestring, "(", *(*uint16)(eface.data), ")") + case kindUint32: + print(typestring, "(", *(*uint32)(eface.data), ")") + case kindUint64: + print(typestring, "(", *(*uint64)(eface.data), ")") + case kindUintptr: + print(typestring, "(", *(*uintptr)(eface.data), ")") + case kindFloat32: + print(typestring, "(", *(*float32)(eface.data), ")") + case kindFloat64: + print(typestring, "(", *(*float64)(eface.data), ")") + case kindComplex64: + print(typestring, *(*complex64)(eface.data)) + case kindComplex128: + print(typestring, *(*complex128)(eface.data)) + default: + print("(", typestring, ") ", eface.data) } } diff --git a/src/runtime/iface.go b/src/runtime/iface.go index 05de282aa7..e4b0b6d3d3 100644 --- a/src/runtime/iface.go +++ b/src/runtime/iface.go @@ -331,8 +331,11 @@ func convT2E(t *_type, elem unsafe.Pointer) (e eface) { } func convT16(val uint16) (x unsafe.Pointer) { - if val == 0 { - x = unsafe.Pointer(&zeroVal[0]) + if val < uint16(len(staticuint64s)) { + x = unsafe.Pointer(&staticuint64s[val]) + if sys.BigEndian { + x = add(x, 6) + } } else { x = mallocgc(2, uint16Type, false) *(*uint16)(x) = val @@ -341,8 +344,11 @@ func convT16(val uint16) (x unsafe.Pointer) { } func convT32(val uint32) (x unsafe.Pointer) { - if val == 0 { - x = unsafe.Pointer(&zeroVal[0]) + if val < uint32(len(staticuint64s)) { + x = unsafe.Pointer(&staticuint64s[val]) + if sys.BigEndian { + x = add(x, 4) + } } else { x = mallocgc(4, uint32Type, false) *(*uint32)(x) = val @@ -351,8 +357,8 @@ func convT32(val uint32) (x unsafe.Pointer) { } func convT64(val uint64) (x unsafe.Pointer) { - if val == 0 { - x = unsafe.Pointer(&zeroVal[0]) + if val < uint64(len(staticuint64s)) { + x = unsafe.Pointer(&staticuint64s[val]) } else { x = mallocgc(8, uint64Type, false) *(*uint64)(x) = val @@ -521,8 +527,8 @@ func iterate_itabs(fn func(*itab)) { } } -// staticbytes is used to avoid convT2E for byte-sized values. -var staticbytes = [...]byte{ +// staticuint64s is used to avoid allocating in convTx for small integer values. +var staticuint64s = [...]uint64{ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, diff --git a/src/runtime/iface_test.go b/src/runtime/iface_test.go index 6d8f8614d9..4fab6c968a 100644 --- a/src/runtime/iface_test.go +++ b/src/runtime/iface_test.go @@ -95,6 +95,19 @@ func BenchmarkNeIfaceConcrete(b *testing.B) { } } +func BenchmarkConvT2EByteSized(b *testing.B) { + b.Run("bool", func(b *testing.B) { + for i := 0; i < b.N; i++ { + e = yes + } + }) + b.Run("uint8", func(b *testing.B) { + for i := 0; i < b.N; i++ { + e = eight8 + } + }) +} + func BenchmarkConvT2ESmall(b *testing.B) { for i := 0; i < b.N; i++ { e = ts @@ -310,18 +323,22 @@ func TestZeroConvT2x(t *testing.T) { var ( eight8 uint8 = 8 eight8I T8 = 8 + yes bool = true - zero16 uint16 = 0 - zero16I T16 = 0 - one16 uint16 = 1 + zero16 uint16 = 0 + zero16I T16 = 0 + one16 uint16 = 1 + thousand16 uint16 = 1000 - zero32 uint32 = 0 - zero32I T32 = 0 - one32 uint32 = 1 + zero32 uint32 = 0 + zero32I T32 = 0 + one32 uint32 = 1 + thousand32 uint32 = 1000 - zero64 uint64 = 0 - zero64I T64 = 0 - one64 uint64 = 1 + zero64 uint64 = 0 + zero64I T64 = 0 + one64 uint64 = 1 + thousand64 uint64 = 1000 zerostr string = "" zerostrI Tstr = "" @@ -369,21 +386,6 @@ func BenchmarkConvT2Ezero(b *testing.B) { }) }) b.Run("nonzero", func(b *testing.B) { - b.Run("16", func(b *testing.B) { - for i := 0; i < b.N; i++ { - e = one16 - } - }) - b.Run("32", func(b *testing.B) { - for i := 0; i < b.N; i++ { - e = one32 - } - }) - b.Run("64", func(b *testing.B) { - for i := 0; i < b.N; i++ { - e = one64 - } - }) b.Run("str", func(b *testing.B) { for i := 0; i < b.N; i++ { e = nzstr @@ -400,4 +402,38 @@ func BenchmarkConvT2Ezero(b *testing.B) { } }) }) + b.Run("smallint", func(b *testing.B) { + b.Run("16", func(b *testing.B) { + for i := 0; i < b.N; i++ { + e = one16 + } + }) + b.Run("32", func(b *testing.B) { + for i := 0; i < b.N; i++ { + e = one32 + } + }) + b.Run("64", func(b *testing.B) { + for i := 0; i < b.N; i++ { + e = one64 + } + }) + }) + b.Run("largeint", func(b *testing.B) { + b.Run("16", func(b *testing.B) { + for i := 0; i < b.N; i++ { + e = thousand16 + } + }) + b.Run("32", func(b *testing.B) { + for i := 0; i < b.N; i++ { + e = thousand32 + } + }) + b.Run("64", func(b *testing.B) { + for i := 0; i < b.N; i++ { + e = thousand64 + } + }) + }) } diff --git a/src/runtime/mkpreempt.go b/src/runtime/mkpreempt.go index c26406e55f..35ed42871f 100644 --- a/src/runtime/mkpreempt.go +++ b/src/runtime/mkpreempt.go @@ -382,6 +382,7 @@ func genMIPS(_64bit bool) { sub := "SUB" r28 := "R28" regsize := 4 + softfloat := "GOMIPS_softfloat" if _64bit { mov = "MOVV" movf = "MOVD" @@ -389,6 +390,7 @@ func genMIPS(_64bit bool) { sub = "SUBV" r28 = "RSB" regsize = 8 + softfloat = "GOMIPS64_softfloat" } // Add integer registers R1-R22, R24-R25, R28 @@ -411,28 +413,36 @@ func genMIPS(_64bit bool) { mov+" LO, R1\n"+mov+" R1, %d(R29)", mov+" %d(R29), R1\n"+mov+" R1, LO", regsize) + // Add floating point control/status register FCR31 (FCR0-FCR30 are irrelevant) - l.addSpecial( + var lfp = layout{sp: "R29", stack: l.stack} + lfp.addSpecial( mov+" FCR31, R1\n"+mov+" R1, %d(R29)", mov+" %d(R29), R1\n"+mov+" R1, FCR31", regsize) // Add floating point registers F0-F31. for i := 0; i <= 31; i++ { reg := fmt.Sprintf("F%d", i) - l.add(movf, reg, regsize) + lfp.add(movf, reg, regsize) } // allocate frame, save PC of interrupted instruction (in LR) - p(mov+" R31, -%d(R29)", l.stack) - p(sub+" $%d, R29", l.stack) + p(mov+" R31, -%d(R29)", lfp.stack) + p(sub+" $%d, R29", lfp.stack) l.save() + p("#ifndef %s", softfloat) + lfp.save() + p("#endif") p("CALL ·asyncPreempt2(SB)") + p("#ifndef %s", softfloat) + lfp.restore() + p("#endif") l.restore() - p(mov+" %d(R29), R31", l.stack) // sigctxt.pushCall has pushed LR (at interrupt) on stack, restore it - p(mov + " (R29), R23") // load PC to REGTMP - p(add+" $%d, R29", l.stack+regsize) // pop frame (including the space pushed by sigctxt.pushCall) + p(mov+" %d(R29), R31", lfp.stack) // sigctxt.pushCall has pushed LR (at interrupt) on stack, restore it + p(mov + " (R29), R23") // load PC to REGTMP + p(add+" $%d, R29", lfp.stack+regsize) // pop frame (including the space pushed by sigctxt.pushCall) p("JMP (R23)") } diff --git a/src/runtime/os_linux_mips64x.go b/src/runtime/os_linux_mips64x.go index 464a26a8a4..4ff66f9538 100644 --- a/src/runtime/os_linux_mips64x.go +++ b/src/runtime/os_linux_mips64x.go @@ -7,7 +7,13 @@ package runtime +import "internal/cpu" + func archauxv(tag, val uintptr) { + switch tag { + case _AT_HWCAP: + cpu.HWCap = uint(val) + } } func osArchInit() {} diff --git a/src/runtime/os_windows.go b/src/runtime/os_windows.go index bddc25729a..7576565599 100644 --- a/src/runtime/os_windows.go +++ b/src/runtime/os_windows.go @@ -1031,7 +1031,11 @@ func ctrlhandler1(_type uint32) uint32 { if sigsend(s) { return 1 } - exit(2) // SIGINT, SIGTERM, etc + if !islibrary && !isarchive { + // Only exit the program if we don't have a DLL. + // See https://golang.org/issues/35965. + exit(2) // SIGINT, SIGTERM, etc + } return 0 } diff --git a/src/runtime/panic_test.go b/src/runtime/panic_test.go new file mode 100644 index 0000000000..45ffa9858b --- /dev/null +++ b/src/runtime/panic_test.go @@ -0,0 +1,48 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package runtime_test + +import ( + "strings" + "testing" +) + +// Test that panics print out the underlying value +// when the underlying kind is directly printable. +// Issue: https://golang/go/issues/37531 +func TestPanicWithDirectlyPrintableCustomTypes(t *testing.T) { + tests := []struct { + name string + wantPanicPrefix string + }{ + {"panicCustomBool", `panic: main.MyBool(true)`}, + {"panicCustomComplex128", `panic: main.MyComplex128(+3.210000e+001+1.000000e+001i)`}, + {"panicCustomComplex64", `panic: main.MyComplex64(+1.100000e-001+3.000000e+000i)`}, + {"panicCustomFloat32", `panic: main.MyFloat32(-9.370000e+001)`}, + {"panicCustomFloat64", `panic: main.MyFloat64(-9.370000e+001)`}, + {"panicCustomInt", `panic: main.MyInt(93)`}, + {"panicCustomInt8", `panic: main.MyInt8(93)`}, + {"panicCustomInt16", `panic: main.MyInt16(93)`}, + {"panicCustomInt32", `panic: main.MyInt32(93)`}, + {"panicCustomInt64", `panic: main.MyInt64(93)`}, + {"panicCustomString", `panic: main.MyString("Panic")`}, + {"panicCustomUint", `panic: main.MyUint(93)`}, + {"panicCustomUint8", `panic: main.MyUint8(93)`}, + {"panicCustomUint16", `panic: main.MyUint16(93)`}, + {"panicCustomUint32", `panic: main.MyUint32(93)`}, + {"panicCustomUint64", `panic: main.MyUint64(93)`}, + {"panicCustomUintptr", `panic: main.MyUintptr(93)`}, + } + + for _, tt := range tests { + t := t + t.Run(tt.name, func(t *testing.T) { + output := runTestProg(t, "testprog", tt.name) + if !strings.HasPrefix(output, tt.wantPanicPrefix) { + t.Fatalf("%q\nis not present in\n%s", tt.wantPanicPrefix, output) + } + }) + } +} diff --git a/src/runtime/pprof/internal/profile/proto.go b/src/runtime/pprof/internal/profile/proto.go index 294acfeb92..52cf1ef2b3 100644 --- a/src/runtime/pprof/internal/profile/proto.go +++ b/src/runtime/pprof/internal/profile/proto.go @@ -21,7 +21,10 @@ package profile -import "errors" +import ( + "errors" + "fmt" +) type buffer struct { field int @@ -232,7 +235,7 @@ func decodeField(b *buffer, data []byte) ([]byte, error) { b.u64 = uint64(le32(data[:4])) data = data[4:] default: - return nil, errors.New("unknown type: " + string(rune(b.typ))) + return nil, fmt.Errorf("unknown wire type: %d", b.typ) } return data, nil diff --git a/src/runtime/pprof/map.go b/src/runtime/pprof/map.go index a271ad022e..7c75872351 100644 --- a/src/runtime/pprof/map.go +++ b/src/runtime/pprof/map.go @@ -68,7 +68,8 @@ Search: if len(m.freeStk) < len(stk) { m.freeStk = make([]uintptr, 1024) } - e.stk = m.freeStk[:len(stk)] + // Limit cap to prevent append from clobbering freeStk. + e.stk = m.freeStk[:len(stk):len(stk)] m.freeStk = m.freeStk[len(stk):] for j := range stk { diff --git a/src/runtime/pprof/pprof_test.go b/src/runtime/pprof/pprof_test.go index 20b44e1e01..5bfc3b6134 100644 --- a/src/runtime/pprof/pprof_test.go +++ b/src/runtime/pprof/pprof_test.go @@ -1172,16 +1172,25 @@ func TestTryAdd(t *testing.T) { {Value: []int64{20, 20 * period}, Location: []*profile.Location{{ID: 1}}}, }, }, { - name: "recursive_inlined_funcs", + // If a function is called recursively then it must not be + // inlined in the caller. + // + // N.B. We're generating an impossible profile here, with a + // recursive inlineCallee call. This is simulating a non-Go + // function that looks like an inlined Go function other than + // its recursive property. See pcDeck.tryAdd. + name: "recursive_func_is_not_inlined", input: []uint64{ 3, 0, 500, // hz = 500. Must match the period. 5, 0, 30, inlinedCalleePtr, inlinedCalleePtr, 4, 0, 40, inlinedCalleePtr, }, - wantLocs: [][]string{{"runtime/pprof.inlinedCallee"}}, + // inlinedCaller shows up here because + // runtime_expandFinalInlineFrame adds it to the stack frame. + wantLocs: [][]string{{"runtime/pprof.inlinedCallee"}, {"runtime/pprof.inlinedCaller"}}, wantSamples: []*profile.Sample{ - {Value: []int64{30, 30 * period}, Location: []*profile.Location{{ID: 1}, {ID: 1}}}, - {Value: []int64{40, 40 * period}, Location: []*profile.Location{{ID: 1}}}, + {Value: []int64{30, 30 * period}, Location: []*profile.Location{{ID: 1}, {ID: 1}, {ID: 2}}}, + {Value: []int64{40, 40 * period}, Location: []*profile.Location{{ID: 1}, {ID: 2}}}, }, }, { name: "truncated_stack_trace_later", @@ -1202,12 +1211,36 @@ func TestTryAdd(t *testing.T) { 4, 0, 70, inlinedCalleePtr, 5, 0, 80, inlinedCalleePtr, inlinedCallerPtr, }, - wantLocs: [][]string{ // the inline info is screwed up, but better than a crash. - {"runtime/pprof.inlinedCallee"}, + wantLocs: [][]string{{"runtime/pprof.inlinedCallee", "runtime/pprof.inlinedCaller"}}, + wantSamples: []*profile.Sample{ + {Value: []int64{70, 70 * period}, Location: []*profile.Location{{ID: 1}}}, + {Value: []int64{80, 80 * period}, Location: []*profile.Location{{ID: 1}}}, + }, + }, { + // We can recover the inlined caller from a truncated stack. + name: "truncated_stack_trace_only", + input: []uint64{ + 3, 0, 500, // hz = 500. Must match the period. + 4, 0, 70, inlinedCalleePtr, + }, + wantLocs: [][]string{{"runtime/pprof.inlinedCallee", "runtime/pprof.inlinedCaller"}}, + wantSamples: []*profile.Sample{ + {Value: []int64{70, 70 * period}, Location: []*profile.Location{{ID: 1}}}, + }, + }, { + // The same location is used for duplicated stacks. + name: "truncated_stack_trace_twice", + input: []uint64{ + 3, 0, 500, // hz = 500. Must match the period. + 4, 0, 70, inlinedCalleePtr, + 5, 0, 80, inlinedCallerPtr, inlinedCalleePtr, + }, + wantLocs: [][]string{ + {"runtime/pprof.inlinedCallee", "runtime/pprof.inlinedCaller"}, {"runtime/pprof.inlinedCaller"}}, wantSamples: []*profile.Sample{ {Value: []int64{70, 70 * period}, Location: []*profile.Location{{ID: 1}}}, - {Value: []int64{80, 80 * period}, Location: []*profile.Location{{ID: 1}, {ID: 2}}}, + {Value: []int64{80, 80 * period}, Location: []*profile.Location{{ID: 2}, {ID: 1}}}, }, }} diff --git a/src/runtime/pprof/proto.go b/src/runtime/pprof/proto.go index 8a30c7151d..416ace7ab2 100644 --- a/src/runtime/pprof/proto.go +++ b/src/runtime/pprof/proto.go @@ -384,6 +384,10 @@ func (b *profileBuilder) build() { // It may emit to b.pb, so there must be no message encoding in progress. func (b *profileBuilder) appendLocsForStack(locs []uint64, stk []uintptr) (newLocs []uint64) { b.deck.reset() + + // The last frame might be truncated. Recover lost inline frames. + stk = runtime_expandFinalInlineFrame(stk) + for len(stk) > 0 { addr := stk[0] if l, ok := b.locs[addr]; ok { @@ -395,22 +399,12 @@ func (b *profileBuilder) appendLocsForStack(locs []uint64, stk []uintptr) (newLo // then, record the cached location. locs = append(locs, l.id) - // The stk may be truncated due to the stack depth limit - // (e.g. See maxStack and maxCPUProfStack in runtime) or - // bugs in runtime. Avoid the crash in either case. - // TODO(hyangah): The correct fix may require using the exact - // pcs as the key for b.locs cache management instead of just - // relying on the very first pc. We are late in the go1.14 dev - // cycle, so this is a workaround with little code change. - if len(l.pcs) > len(stk) { - stk = nil - // TODO(hyangah): would be nice if we can enable - // debug print out on demand and report the problematic - // cached location entry and stack traces. Do we already - // have such facility to utilize (e.g. GODEBUG)? - } else { - stk = stk[len(l.pcs):] // skip the matching pcs. - } + // Skip the matching pcs. + // + // Even if stk was truncated due to the stack depth + // limit, expandFinalInlineFrame above has already + // fixed the truncation, ensuring it is long enough. + stk = stk[len(l.pcs):] continue } @@ -427,9 +421,9 @@ func (b *profileBuilder) appendLocsForStack(locs []uint64, stk []uintptr) (newLo stk = stk[1:] continue } - // add failed because this addr is not inlined with - // the existing PCs in the deck. Flush the deck and retry to - // handle this pc. + // add failed because this addr is not inlined with the + // existing PCs in the deck. Flush the deck and retry handling + // this pc. if id := b.emitLocation(); id > 0 { locs = append(locs, id) } @@ -463,8 +457,8 @@ func (b *profileBuilder) appendLocsForStack(locs []uint64, stk []uintptr) (newLo // the fake pcs and restore the inlined and entry functions. Inlined functions // have the following properties: // Frame's Func is nil (note: also true for non-Go functions), and -// Frame's Entry matches its entry function frame's Entry. (note: could also be true for recursive calls and non-Go functions), -// Frame's Name does not match its entry function frame's name. +// Frame's Entry matches its entry function frame's Entry (note: could also be true for recursive calls and non-Go functions), and +// Frame's Name does not match its entry function frame's name (note: inlined functions cannot be recursive). // // As reading and processing the pcs in a stack trace one by one (from leaf to the root), // we use pcDeck to temporarily hold the observed pcs and their expanded frames @@ -486,8 +480,8 @@ func (d *pcDeck) reset() { // to the deck. If it fails the caller needs to flush the deck and retry. func (d *pcDeck) tryAdd(pc uintptr, frames []runtime.Frame, symbolizeResult symbolizeFlag) (success bool) { if existing := len(d.pcs); existing > 0 { - // 'frames' are all expanded from one 'pc' and represent all inlined functions - // so we check only the last one. + // 'd.frames' are all expanded from one 'pc' and represent all + // inlined functions so we check only the last one. newFrame := frames[0] last := d.frames[existing-1] if last.Func != nil { // the last frame can't be inlined. Flush. diff --git a/src/runtime/pprof/runtime.go b/src/runtime/pprof/runtime.go index b71bbad9a6..dd2545b339 100644 --- a/src/runtime/pprof/runtime.go +++ b/src/runtime/pprof/runtime.go @@ -9,6 +9,9 @@ import ( "unsafe" ) +// runtime_expandFinalInlineFrame is defined in runtime/symtab.go. +func runtime_expandFinalInlineFrame(stk []uintptr) []uintptr + // runtime_setProfLabel is defined in runtime/proflabel.go. func runtime_setProfLabel(labels unsafe.Pointer) diff --git a/src/runtime/preempt_mips64x.s b/src/runtime/preempt_mips64x.s index 8048a87cd3..1e123e8077 100644 --- a/src/runtime/preempt_mips64x.s +++ b/src/runtime/preempt_mips64x.s @@ -37,6 +37,7 @@ TEXT ·asyncPreempt(SB),NOSPLIT|NOFRAME,$0-0 MOVV R1, 208(R29) MOVV LO, R1 MOVV R1, 216(R29) + #ifndef GOMIPS64_softfloat MOVV FCR31, R1 MOVV R1, 224(R29) MOVD F0, 232(R29) @@ -71,7 +72,9 @@ TEXT ·asyncPreempt(SB),NOSPLIT|NOFRAME,$0-0 MOVD F29, 464(R29) MOVD F30, 472(R29) MOVD F31, 480(R29) + #endif CALL ·asyncPreempt2(SB) + #ifndef GOMIPS64_softfloat MOVD 480(R29), F31 MOVD 472(R29), F30 MOVD 464(R29), F29 @@ -106,6 +109,7 @@ TEXT ·asyncPreempt(SB),NOSPLIT|NOFRAME,$0-0 MOVD 232(R29), F0 MOVV 224(R29), R1 MOVV R1, FCR31 + #endif MOVV 216(R29), R1 MOVV R1, LO MOVV 208(R29), R1 diff --git a/src/runtime/preempt_mipsx.s b/src/runtime/preempt_mipsx.s index 840e861497..afac33e0a0 100644 --- a/src/runtime/preempt_mipsx.s +++ b/src/runtime/preempt_mipsx.s @@ -37,6 +37,7 @@ TEXT ·asyncPreempt(SB),NOSPLIT|NOFRAME,$0-0 MOVW R1, 104(R29) MOVW LO, R1 MOVW R1, 108(R29) + #ifndef GOMIPS_softfloat MOVW FCR31, R1 MOVW R1, 112(R29) MOVF F0, 116(R29) @@ -71,7 +72,9 @@ TEXT ·asyncPreempt(SB),NOSPLIT|NOFRAME,$0-0 MOVF F29, 232(R29) MOVF F30, 236(R29) MOVF F31, 240(R29) + #endif CALL ·asyncPreempt2(SB) + #ifndef GOMIPS_softfloat MOVF 240(R29), F31 MOVF 236(R29), F30 MOVF 232(R29), F29 @@ -106,6 +109,7 @@ TEXT ·asyncPreempt(SB),NOSPLIT|NOFRAME,$0-0 MOVF 116(R29), F0 MOVW 112(R29), R1 MOVW R1, FCR31 + #endif MOVW 108(R29), R1 MOVW R1, LO MOVW 104(R29), R1 diff --git a/src/runtime/race_arm64.s b/src/runtime/race_arm64.s index 46224f8d73..9b909ac021 100644 --- a/src/runtime/race_arm64.s +++ b/src/runtime/race_arm64.s @@ -421,8 +421,7 @@ TEXT runtime·racecallbackthunk(SB), NOSPLIT|NOFRAME, $0 // First, code below assumes that we are on curg, while raceGetProcCmd // can be executed on g0. Second, it is called frequently, so will // benefit from this fast path. - CMP $0, R0 - BNE rest + CBNZ R0, rest MOVD g, R13 load_g MOVD g_m(g), R0 diff --git a/src/runtime/rt0_linux_arm64.s b/src/runtime/rt0_linux_arm64.s index a6bc99df56..f48a8d6190 100644 --- a/src/runtime/rt0_linux_arm64.s +++ b/src/runtime/rt0_linux_arm64.s @@ -44,8 +44,7 @@ TEXT _rt0_arm64_linux_lib(SB),NOSPLIT,$184 // Create a new thread to do the runtime initialization and return. MOVD _cgo_sys_thread_create(SB), R4 - CMP $0, R4 - BEQ nocgo + CBZ R4, nocgo MOVD $_rt0_arm64_linux_lib_go(SB), R0 MOVD $0, R1 SUB $16, RSP // reserve 16 bytes for sp-8 where fp may be saved. diff --git a/src/runtime/signal_unix.go b/src/runtime/signal_unix.go index d2e6693805..32b192c977 100644 --- a/src/runtime/signal_unix.go +++ b/src/runtime/signal_unix.go @@ -607,6 +607,30 @@ func sighandler(sig uint32, info *siginfo, ctxt unsafe.Pointer, gp *g) { print("signal arrived during cgo execution\n") gp = _g_.m.lockedg.ptr() } + if sig == _SIGILL { + // It would be nice to know how long the instruction is. + // Unfortunately, that's complicated to do in general (mostly for x86 + // and s930x, but other archs have non-standard instruction lengths also). + // Opt to print 16 bytes, which covers most instructions. + const maxN = 16 + n := uintptr(maxN) + // We have to be careful, though. If we're near the end of + // a page and the following page isn't mapped, we could + // segfault. So make sure we don't straddle a page (even though + // that could lead to printing an incomplete instruction). + // We're assuming here we can read at least the page containing the PC. + // I suppose it is possible that the page is mapped executable but not readable? + pc := c.sigpc() + if n > physPageSize-pc%physPageSize { + n = physPageSize - pc%physPageSize + } + print("instruction bytes:") + b := (*[maxN]byte)(unsafe.Pointer(pc)) + for i := uintptr(0); i < n; i++ { + print(" ", hex(b[i])) + } + println() + } print("\n") level, _, docrash := gotraceback() diff --git a/src/runtime/signal_windows_test.go b/src/runtime/signal_windows_test.go index 9748403412..f99857193c 100644 --- a/src/runtime/signal_windows_test.go +++ b/src/runtime/signal_windows_test.go @@ -3,6 +3,9 @@ package runtime_test import ( + "bufio" + "bytes" + "fmt" "internal/testenv" "io/ioutil" "os" @@ -10,6 +13,7 @@ import ( "path/filepath" "runtime" "strings" + "syscall" "testing" ) @@ -59,3 +63,90 @@ func TestVectoredHandlerDontCrashOnLibrary(t *testing.T) { t.Errorf("expected output %q, got %q", expectedOutput, cleanedOut) } } + +func sendCtrlBreak(pid int) error { + kernel32, err := syscall.LoadDLL("kernel32.dll") + if err != nil { + return fmt.Errorf("LoadDLL: %v\n", err) + } + generateEvent, err := kernel32.FindProc("GenerateConsoleCtrlEvent") + if err != nil { + return fmt.Errorf("FindProc: %v\n", err) + } + result, _, err := generateEvent.Call(syscall.CTRL_BREAK_EVENT, uintptr(pid)) + if result == 0 { + return fmt.Errorf("GenerateConsoleCtrlEvent: %v\n", err) + } + return nil +} + +// TestLibraryCtrlHandler tests that Go DLL allows calling program to handle console control events. +// See https://golang.org/issues/35965. +func TestLibraryCtrlHandler(t *testing.T) { + if *flagQuick { + t.Skip("-quick") + } + if runtime.GOARCH != "amd64" { + t.Skip("this test can only run on windows/amd64") + } + testenv.MustHaveGoBuild(t) + testenv.MustHaveExecPath(t, "gcc") + testprog.Lock() + defer testprog.Unlock() + dir, err := ioutil.TempDir("", "go-build") + if err != nil { + t.Fatalf("failed to create temp directory: %v", err) + } + defer os.RemoveAll(dir) + + // build go dll + dll := filepath.Join(dir, "dummy.dll") + cmd := exec.Command(testenv.GoToolPath(t), "build", "-o", dll, "--buildmode", "c-shared", "testdata/testwinlibsignal/dummy.go") + out, err := testenv.CleanCmdEnv(cmd).CombinedOutput() + if err != nil { + t.Fatalf("failed to build go library: %s\n%s", err, out) + } + + // build c program + exe := filepath.Join(dir, "test.exe") + cmd = exec.Command("gcc", "-o", exe, "testdata/testwinlibsignal/main.c") + out, err = testenv.CleanCmdEnv(cmd).CombinedOutput() + if err != nil { + t.Fatalf("failed to build c exe: %s\n%s", err, out) + } + + // run test program + cmd = exec.Command(exe) + var stderr bytes.Buffer + cmd.Stderr = &stderr + outPipe, err := cmd.StdoutPipe() + if err != nil { + t.Fatalf("Failed to create stdout pipe: %v", err) + } + outReader := bufio.NewReader(outPipe) + + cmd.SysProcAttr = &syscall.SysProcAttr{ + CreationFlags: syscall.CREATE_NEW_PROCESS_GROUP, + } + if err := cmd.Start(); err != nil { + t.Fatalf("Start failed: %v", err) + } + + errCh := make(chan error, 1) + go func() { + if line, err := outReader.ReadString('\n'); err != nil { + errCh <- fmt.Errorf("could not read stdout: %v", err) + } else if strings.TrimSpace(line) != "ready" { + errCh <- fmt.Errorf("unexpected message: %v", line) + } else { + errCh <- sendCtrlBreak(cmd.Process.Pid) + } + }() + + if err := <-errCh; err != nil { + t.Fatal(err) + } + if err := cmd.Wait(); err != nil { + t.Fatalf("Program exited with error: %v\n%s", err, &stderr) + } +} diff --git a/src/runtime/softfloat64.go b/src/runtime/softfloat64.go index 8fde0feddc..13bee6c1d7 100644 --- a/src/runtime/softfloat64.go +++ b/src/runtime/softfloat64.go @@ -13,7 +13,7 @@ const ( expbits64 uint = 11 bias64 = -1<<(expbits64-1) + 1 - nan64 uint64 = (1<= 0 && v < runeSelf { - stringStructOf(&s).str = unsafe.Pointer(&staticbytes[v]) - stringStructOf(&s).len = 1 - return - } - var b []byte if buf != nil { b = buf[:] diff --git a/src/runtime/symtab.go b/src/runtime/symtab.go index a6e08d7214..997cfa3f7a 100644 --- a/src/runtime/symtab.go +++ b/src/runtime/symtab.go @@ -148,6 +148,59 @@ func (ci *Frames) Next() (frame Frame, more bool) { return } +// runtime_expandFinalInlineFrame expands the final pc in stk to include all +// "callers" if pc is inline. +// +//go:linkname runtime_expandFinalInlineFrame runtime/pprof.runtime_expandFinalInlineFrame +func runtime_expandFinalInlineFrame(stk []uintptr) []uintptr { + pc := stk[len(stk)-1] + tracepc := pc - 1 + + f := findfunc(tracepc) + if !f.valid() { + // Not a Go function. + return stk + } + + inldata := funcdata(f, _FUNCDATA_InlTree) + if inldata == nil { + // Nothing inline in f. + return stk + } + + // Treat the previous func as normal. We haven't actually checked, but + // since this pc was included in the stack, we know it shouldn't be + // elided. + lastFuncID := funcID_normal + + // Remove pc from stk; we'll re-add it below. + stk = stk[:len(stk)-1] + + // See inline expansion in gentraceback. + var cache pcvalueCache + inltree := (*[1 << 20]inlinedCall)(inldata) + for { + ix := pcdatavalue(f, _PCDATA_InlTreeIndex, tracepc, &cache) + if ix < 0 { + break + } + if inltree[ix].funcID == funcID_wrapper && elideWrapperCalling(lastFuncID) { + // ignore wrappers + } else { + stk = append(stk, pc) + } + lastFuncID = inltree[ix].funcID + // Back up to an instruction in the "caller". + tracepc = f.entry + uintptr(inltree[ix].parentPc) + pc = tracepc + 1 + } + + // N.B. we want to keep the last parentPC which is not inline. + stk = append(stk, pc) + + return stk +} + // expandCgoFrames expands frame information for pc, known to be // a non-Go function, using the cgoSymbolizer hook. expandCgoFrames // returns nil if pc could not be expanded. diff --git a/src/runtime/sys_linux_arm64.s b/src/runtime/sys_linux_arm64.s index b9588cec30..b23e3b9a11 100644 --- a/src/runtime/sys_linux_arm64.s +++ b/src/runtime/sys_linux_arm64.s @@ -419,8 +419,7 @@ TEXT runtime·sigtramp(SB),NOSPLIT,$192 // first save R0, because runtime·load_g will clobber it MOVW R0, 8(RSP) MOVBU runtime·iscgo(SB), R0 - CMP $0, R0 - BEQ 2(PC) + CBZ R0, 2(PC) BL runtime·load_g(SB) MOVD R1, 16(RSP) diff --git a/src/runtime/testdata/testprog/panicprint.go b/src/runtime/testdata/testprog/panicprint.go new file mode 100644 index 0000000000..c8deabe2ab --- /dev/null +++ b/src/runtime/testdata/testprog/panicprint.go @@ -0,0 +1,111 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +type MyBool bool +type MyComplex128 complex128 +type MyComplex64 complex64 +type MyFloat32 float32 +type MyFloat64 float64 +type MyInt int +type MyInt8 int8 +type MyInt16 int16 +type MyInt32 int32 +type MyInt64 int64 +type MyString string +type MyUint uint +type MyUint8 uint8 +type MyUint16 uint16 +type MyUint32 uint32 +type MyUint64 uint64 +type MyUintptr uintptr + +func panicCustomComplex64() { + panic(MyComplex64(0.11 + 3i)) +} + +func panicCustomComplex128() { + panic(MyComplex128(32.1 + 10i)) +} + +func panicCustomString() { + panic(MyString("Panic")) +} + +func panicCustomBool() { + panic(MyBool(true)) +} + +func panicCustomInt() { + panic(MyInt(93)) +} + +func panicCustomInt8() { + panic(MyInt8(93)) +} + +func panicCustomInt16() { + panic(MyInt16(93)) +} + +func panicCustomInt32() { + panic(MyInt32(93)) +} + +func panicCustomInt64() { + panic(MyInt64(93)) +} + +func panicCustomUint() { + panic(MyUint(93)) +} + +func panicCustomUint8() { + panic(MyUint8(93)) +} + +func panicCustomUint16() { + panic(MyUint16(93)) +} + +func panicCustomUint32() { + panic(MyUint32(93)) +} + +func panicCustomUint64() { + panic(MyUint64(93)) +} + +func panicCustomUintptr() { + panic(MyUintptr(93)) +} + +func panicCustomFloat64() { + panic(MyFloat64(-93.70)) +} + +func panicCustomFloat32() { + panic(MyFloat32(-93.70)) +} + +func init() { + register("panicCustomComplex64", panicCustomComplex64) + register("panicCustomComplex128", panicCustomComplex128) + register("panicCustomBool", panicCustomBool) + register("panicCustomFloat32", panicCustomFloat32) + register("panicCustomFloat64", panicCustomFloat64) + register("panicCustomInt", panicCustomInt) + register("panicCustomInt8", panicCustomInt8) + register("panicCustomInt16", panicCustomInt16) + register("panicCustomInt32", panicCustomInt32) + register("panicCustomInt64", panicCustomInt64) + register("panicCustomString", panicCustomString) + register("panicCustomUint", panicCustomUint) + register("panicCustomUint8", panicCustomUint8) + register("panicCustomUint16", panicCustomUint16) + register("panicCustomUint32", panicCustomUint32) + register("panicCustomUint64", panicCustomUint64) + register("panicCustomUintptr", panicCustomUintptr) +} diff --git a/src/runtime/testdata/testwinlibsignal/dummy.go b/src/runtime/testdata/testwinlibsignal/dummy.go new file mode 100644 index 0000000000..82dfd91c93 --- /dev/null +++ b/src/runtime/testdata/testwinlibsignal/dummy.go @@ -0,0 +1,10 @@ +// +build windows + +package main + +//export Dummy +func Dummy() int { + return 42 +} + +func main() {} diff --git a/src/runtime/testdata/testwinlibsignal/main.c b/src/runtime/testdata/testwinlibsignal/main.c new file mode 100644 index 0000000000..1787fef3b9 --- /dev/null +++ b/src/runtime/testdata/testwinlibsignal/main.c @@ -0,0 +1,50 @@ +#include +#include + +HANDLE waitForCtrlBreakEvent; + +BOOL WINAPI CtrlHandler(DWORD fdwCtrlType) +{ + switch (fdwCtrlType) + { + case CTRL_BREAK_EVENT: + SetEvent(waitForCtrlBreakEvent); + return TRUE; + default: + return FALSE; + } +} + +int main(void) +{ + waitForCtrlBreakEvent = CreateEvent(NULL, TRUE, FALSE, NULL); + if (!waitForCtrlBreakEvent) { + fprintf(stderr, "ERROR: Could not create event"); + return 1; + } + + if (!SetConsoleCtrlHandler(CtrlHandler, TRUE)) + { + fprintf(stderr, "ERROR: Could not set control handler"); + return 1; + } + + // The library must be loaded after the SetConsoleCtrlHandler call + // so that the library handler registers after the main program. + // This way the library handler gets called first. + HMODULE dummyDll = LoadLibrary("dummy.dll"); + if (!dummyDll) { + fprintf(stderr, "ERROR: Could not load dummy.dll"); + return 1; + } + + printf("ready\n"); + fflush(stdout); + + if (WaitForSingleObject(waitForCtrlBreakEvent, 5000) != WAIT_OBJECT_0) { + fprintf(stderr, "FAILURE: No signal received"); + return 1; + } + + return 0; +} diff --git a/src/runtime/tls_arm64.s b/src/runtime/tls_arm64.s index fb8627db29..999914d655 100644 --- a/src/runtime/tls_arm64.s +++ b/src/runtime/tls_arm64.s @@ -10,8 +10,7 @@ TEXT runtime·load_g(SB),NOSPLIT,$0 MOVB runtime·iscgo(SB), R0 - CMP $0, R0 - BEQ nocgo + CBZ R0, nocgo MRS_TPIDR_R0 #ifdef GOOS_darwin @@ -27,8 +26,7 @@ nocgo: TEXT runtime·save_g(SB),NOSPLIT,$0 MOVB runtime·iscgo(SB), R0 - CMP $0, R0 - BEQ nocgo + CBZ R0, nocgo MRS_TPIDR_R0 #ifdef GOOS_darwin diff --git a/src/strings/strings.go b/src/strings/strings.go index 238d657f61..7fb05b7d0e 100644 --- a/src/strings/strings.go +++ b/src/strings/strings.go @@ -36,43 +36,6 @@ func explode(s string, n int) []string { return a } -// primeRK is the prime base used in Rabin-Karp algorithm. -const primeRK = 16777619 - -// hashStr returns the hash and the appropriate multiplicative -// factor for use in Rabin-Karp algorithm. -func hashStr(sep string) (uint32, uint32) { - hash := uint32(0) - for i := 0; i < len(sep); i++ { - hash = hash*primeRK + uint32(sep[i]) - } - var pow, sq uint32 = 1, primeRK - for i := len(sep); i > 0; i >>= 1 { - if i&1 != 0 { - pow *= sq - } - sq *= sq - } - return hash, pow -} - -// hashStrRev returns the hash of the reverse of sep and the -// appropriate multiplicative factor for use in Rabin-Karp algorithm. -func hashStrRev(sep string) (uint32, uint32) { - hash := uint32(0) - for i := len(sep) - 1; i >= 0; i-- { - hash = hash*primeRK + uint32(sep[i]) - } - var pow, sq uint32 = 1, primeRK - for i := len(sep); i > 0; i >>= 1 { - if i&1 != 0 { - pow *= sq - } - sq *= sq - } - return hash, pow -} - // Count counts the number of non-overlapping instances of substr in s. // If substr is an empty string, Count returns 1 + the number of Unicode code points in s. func Count(s, substr string) int { @@ -126,17 +89,17 @@ func LastIndex(s, substr string) int { return -1 } // Rabin-Karp search from the end of the string - hashss, pow := hashStrRev(substr) + hashss, pow := bytealg.HashStrRev(substr) last := len(s) - n var h uint32 for i := len(s) - 1; i >= last; i-- { - h = h*primeRK + uint32(s[i]) + h = h*bytealg.PrimeRK + uint32(s[i]) } if h == hashss && s[last:] == substr { return last } for i := last - 1; i >= 0; i-- { - h *= primeRK + h *= bytealg.PrimeRK h += uint32(s[i]) h -= pow * uint32(s[i+n]) if h == hashss && s[i:i+n] == substr { @@ -1095,7 +1058,7 @@ func Index(s, substr string) int { fails++ if fails >= 4+i>>4 && i < t { // See comment in ../bytes/bytes.go. - j := indexRabinKarp(s[i:], substr) + j := bytealg.IndexRabinKarp(s[i:], substr) if j < 0 { return -1 } @@ -1104,26 +1067,3 @@ func Index(s, substr string) int { } return -1 } - -func indexRabinKarp(s, substr string) int { - // Rabin-Karp search - hashss, pow := hashStr(substr) - n := len(substr) - var h uint32 - for i := 0; i < n; i++ { - h = h*primeRK + uint32(s[i]) - } - if h == hashss && s[:n] == substr { - return 0 - } - for i := n; i < len(s); { - h *= primeRK - h += uint32(s[i]) - h -= pow * uint32(s[i-n]) - i++ - if h == hashss && s[i-n:i] == substr { - return i - n - } - } - return -1 -} diff --git a/src/syscall/fs_js.go b/src/syscall/fs_js.go index c1cac97d91..262ec28afd 100644 --- a/src/syscall/fs_js.go +++ b/src/syscall/fs_js.go @@ -102,6 +102,10 @@ func Open(path string, openmode int, perm uint32) (int, error) { } } + if path[0] != '/' { + cwd := jsProcess.Call("cwd").String() + path = cwd + "/" + path + } f := &jsFile{ path: path, entries: entries, diff --git a/src/syscall/js/func.go b/src/syscall/js/func.go index 6c145c9da6..9e99027e9f 100644 --- a/src/syscall/js/func.go +++ b/src/syscall/js/func.go @@ -22,17 +22,22 @@ type Func struct { id uint32 } -// FuncOf returns a wrapped function. +// FuncOf returns a function to be used by JavaScript. // -// Invoking the JavaScript function will synchronously call the Go function fn with the value of JavaScript's -// "this" keyword and the arguments of the invocation. -// The return value of the invocation is the result of the Go function mapped back to JavaScript according to ValueOf. +// The Go function fn is called with the value of JavaScript's "this" keyword and the +// arguments of the invocation. The return value of the invocation is +// the result of the Go function mapped back to JavaScript according to ValueOf. // -// A wrapped function triggered during a call from Go to JavaScript gets executed on the same goroutine. -// A wrapped function triggered by JavaScript's event loop gets executed on an extra goroutine. -// Blocking operations in the wrapped function will block the event loop. -// As a consequence, if one wrapped function blocks, other wrapped funcs will not be processed. -// A blocking function should therefore explicitly start a new goroutine. +// Invoking the wrapped Go function from JavaScript will +// pause the event loop and spawn a new goroutine. +// Other wrapped functions which are triggered during a call from Go to JavaScript +// get executed on the same goroutine. +// +// As a consequence, if one wrapped function blocks, JavaScript's event loop +// is blocked until that function returns. Hence, calling any async JavaScript +// API, which requires the event loop, like fetch (http.Client), will cause an +// immediate deadlock. Therefore a blocking function should explicitly start a +// new goroutine. // // Func.Release must be called to free up resources when the function will not be used any more. func FuncOf(fn func(this Value, args []Value) interface{}) Func { diff --git a/src/time/example_test.go b/src/time/example_test.go index fe8e042d69..f272ee44df 100644 --- a/src/time/example_test.go +++ b/src/time/example_test.go @@ -206,7 +206,7 @@ func ExampleNewTicker() { func ExampleTime_Format() { // Parse a time value from a string in the standard Unix format. - t, err := time.Parse(time.UnixDate, "Sat Mar 7 11:06:39 PST 2015") + t, err := time.Parse(time.UnixDate, "Wed Feb 25 11:06:39 PST 2015") if err != nil { // Always check errors even if they should not happen. panic(err) } @@ -252,8 +252,70 @@ func ExampleTime_Format() { fmt.Printf("\nFormats:\n\n") // Simple starter examples. - do("Basic full date", "Mon Jan 2 15:04:05 MST 2006", "Sat Mar 7 11:06:39 PST 2015") - do("Basic short date", "2006/01/02", "2015/03/07") + do("Basic full date", "Mon Jan 2 15:04:05 MST 2006", "Wed Feb 25 11:06:39 PST 2015") + do("Basic short date", "2006/01/02", "2015/02/25") + + // The hour of the reference time is 15, or 3PM. The layout can express + // it either way, and since our value is the morning we should see it as + // an AM time. We show both in one format string. Lower case too. + do("AM/PM", "3PM==3pm==15h", "11AM==11am==11h") + + // When parsing, if the seconds value is followed by a decimal point + // and some digits, that is taken as a fraction of a second even if + // the layout string does not represent the fractional second. + // Here we add a fractional second to our time value used above. + t, err = time.Parse(time.UnixDate, "Wed Feb 25 11:06:39.1234 PST 2015") + if err != nil { + panic(err) + } + // It does not appear in the output if the layout string does not contain + // a representation of the fractional second. + do("No fraction", time.UnixDate, "Wed Feb 25 11:06:39 PST 2015") + + // Fractional seconds can be printed by adding a run of 0s or 9s after + // a decimal point in the seconds value in the layout string. + // If the layout digits are 0s, the fractional second is of the specified + // width. Note that the output has a trailing zero. + do("0s for fraction", "15:04:05.00000", "11:06:39.12340") + + // If the fraction in the layout is 9s, trailing zeros are dropped. + do("9s for fraction", "15:04:05.99999999", "11:06:39.1234") + + // Output: + // default format: 2015-02-25 11:06:39 -0800 PST + // Unix format: Wed Feb 25 11:06:39 PST 2015 + // Same, in UTC: Wed Feb 25 19:06:39 UTC 2015 + // + // Formats: + // + // Basic full date "Mon Jan 2 15:04:05 MST 2006" gives "Wed Feb 25 11:06:39 PST 2015" + // Basic short date "2006/01/02" gives "2015/02/25" + // AM/PM "3PM==3pm==15h" gives "11AM==11am==11h" + // No fraction "Mon Jan _2 15:04:05 MST 2006" gives "Wed Feb 25 11:06:39 PST 2015" + // 0s for fraction "15:04:05.00000" gives "11:06:39.12340" + // 9s for fraction "15:04:05.99999999" gives "11:06:39.1234" + +} + +func ExampleTime_Format_pad() { + // Parse a time value from a string in the standard Unix format. + t, err := time.Parse(time.UnixDate, "Sat Mar 7 11:06:39 PST 2015") + if err != nil { // Always check errors even if they should not happen. + panic(err) + } + + // Define a helper function to make the examples' output look nice. + do := func(name, layout, want string) { + got := t.Format(layout) + if want != got { + fmt.Printf("error: for %q got %q; expected %q\n", layout, got, want) + return + } + fmt.Printf("%-16s %q gives %q\n", name, layout, got) + } + + // The predefined constant Unix uses an underscore to pad the day. + do("Unix", time.UnixDate, "Sat Mar 7 11:06:39 PST 2015") // For fixed-width printing of values, such as the date, that may be one or // two characters (7 vs. 07), use an _ instead of a space in the layout string. @@ -272,54 +334,12 @@ func ExampleTime_Format() { // so it doesn't need padding, but the minutes (04, 06) does. do("Suppressed pad", "04:05", "06:39") - // The predefined constant Unix uses an underscore to pad the day. - // Compare with our simple starter example. - do("Unix", time.UnixDate, "Sat Mar 7 11:06:39 PST 2015") - - // The hour of the reference time is 15, or 3PM. The layout can express - // it either way, and since our value is the morning we should see it as - // an AM time. We show both in one format string. Lower case too. - do("AM/PM", "3PM==3pm==15h", "11AM==11am==11h") - - // When parsing, if the seconds value is followed by a decimal point - // and some digits, that is taken as a fraction of a second even if - // the layout string does not represent the fractional second. - // Here we add a fractional second to our time value used above. - t, err = time.Parse(time.UnixDate, "Sat Mar 7 11:06:39.1234 PST 2015") - if err != nil { - panic(err) - } - // It does not appear in the output if the layout string does not contain - // a representation of the fractional second. - do("No fraction", time.UnixDate, "Sat Mar 7 11:06:39 PST 2015") - - // Fractional seconds can be printed by adding a run of 0s or 9s after - // a decimal point in the seconds value in the layout string. - // If the layout digits are 0s, the fractional second is of the specified - // width. Note that the output has a trailing zero. - do("0s for fraction", "15:04:05.00000", "11:06:39.12340") - - // If the fraction in the layout is 9s, trailing zeros are dropped. - do("9s for fraction", "15:04:05.99999999", "11:06:39.1234") - // Output: - // default format: 2015-03-07 11:06:39 -0800 PST - // Unix format: Sat Mar 7 11:06:39 PST 2015 - // Same, in UTC: Sat Mar 7 19:06:39 UTC 2015 - // - // Formats: - // - // Basic full date "Mon Jan 2 15:04:05 MST 2006" gives "Sat Mar 7 11:06:39 PST 2015" - // Basic short date "2006/01/02" gives "2015/03/07" + // Unix "Mon Jan _2 15:04:05 MST 2006" gives "Sat Mar 7 11:06:39 PST 2015" // No pad "<2>" gives "<7>" // Spaces "<_2>" gives "< 7>" // Zeros "<02>" gives "<07>" // Suppressed pad "04:05" gives "06:39" - // Unix "Mon Jan _2 15:04:05 MST 2006" gives "Sat Mar 7 11:06:39 PST 2015" - // AM/PM "3PM==3pm==15h" gives "11AM==11am==11h" - // No fraction "Mon Jan _2 15:04:05 MST 2006" gives "Sat Mar 7 11:06:39 PST 2015" - // 0s for fraction "15:04:05.00000" gives "11:06:39.12340" - // 9s for fraction "15:04:05.99999999" gives "11:06:39.1234" } diff --git a/src/time/time.go b/src/time/time.go index 5dc9fa68ac..5fa09687e9 100644 --- a/src/time/time.go +++ b/src/time/time.go @@ -535,58 +535,26 @@ func absWeekday(abs uint64) Weekday { // week 52 or 53 of year n-1, and Dec 29 to Dec 31 might belong to week 1 // of year n+1. func (t Time) ISOWeek() (year, week int) { - year, month, day, yday := t.date(true) - wday := int(t.Weekday()+6) % 7 // weekday but Monday = 0. - const ( - Mon int = iota - Tue - Wed - Thu - Fri - Sat - Sun - ) + // According to the rule that the first calendar week of a calendar year is + // the week including the first Thursday of that year, and that the last one is + // the week immediately preceding the first calendar week of the next calendar year. + // See https://www.iso.org/obp/ui#iso:std:iso:8601:-1:ed-1:v1:en:term:3.1.1.23 for details. - // Calculate week as number of Mondays in year up to - // and including today, plus 1 because the first week is week 0. - // Putting the + 1 inside the numerator as a + 7 keeps the - // numerator from being negative, which would cause it to - // round incorrectly. - week = (yday - wday + 7) / 7 - - // The week number is now correct under the assumption - // that the first Monday of the year is in week 1. - // If Jan 1 is a Tuesday, Wednesday, or Thursday, the first Monday - // is actually in week 2. - jan1wday := (wday - yday + 7*53) % 7 - if Tue <= jan1wday && jan1wday <= Thu { - week++ + // weeks start with Monday + // Monday Tuesday Wednesday Thursday Friday Saturday Sunday + // 1 2 3 4 5 6 7 + // +3 +2 +1 0 -1 -2 -3 + // the offset to Thursday + abs := t.abs() + d := Thursday - absWeekday(abs) + // handle Sunday + if d == 4 { + d = -3 } - - // If the week number is still 0, we're in early January but in - // the last week of last year. - if week == 0 { - year-- - week = 52 - // A year has 53 weeks when Jan 1 or Dec 31 is a Thursday, - // meaning Jan 1 of the next year is a Friday - // or it was a leap year and Jan 1 of the next year is a Saturday. - if jan1wday == Fri || (jan1wday == Sat && isLeap(year)) { - week++ - } - } - - // December 29 to 31 are in week 1 of next year if - // they are after the last Thursday of the year and - // December 31 is a Monday, Tuesday, or Wednesday. - if month == December && day >= 29 && wday < Thu { - if dec31wday := (wday + 31 - day) % 7; Mon <= dec31wday && dec31wday <= Wed { - year++ - week = 1 - } - } - - return + // find the Thursday of the calendar week + abs += uint64(d) * secondsPerDay + year, _, _, yday := absDate(abs, false) + return year, yday/7 + 1 } // Clock returns the hour, minute, and second within the day specified by t. diff --git a/src/time/time_test.go b/src/time/time_test.go index 2fc23c4fee..ffbf92acbc 100644 --- a/src/time/time_test.go +++ b/src/time/time_test.go @@ -1348,6 +1348,13 @@ func BenchmarkDay(b *testing.B) { } } +func BenchmarkISOWeek(b *testing.B) { + t := Now() + for i := 0; i < b.N; i++ { + _, _ = t.ISOWeek() + } +} + func TestMarshalBinaryZeroTime(t *testing.T) { t0 := Time{} enc, err := t0.MarshalBinary() diff --git a/test/codegen/README b/test/codegen/README index f6877e701d..d6b8cf5b32 100644 --- a/test/codegen/README +++ b/test/codegen/README @@ -125,7 +125,7 @@ As a general guideline, test functions should be small, to avoid possible interactions between unrelated lines of code that may be introduced, for example, by the compiler's optimization passes. -Any given line of Go code could get assigned more instructions that it +Any given line of Go code could get assigned more instructions than it may appear from reading the source. In particular, matching all MOV instructions should be avoided; the compiler may add them for unrelated reasons and this may render the test ineffective. diff --git a/test/codegen/fuse.go b/test/codegen/fuse.go new file mode 100644 index 0000000000..79dd337dee --- /dev/null +++ b/test/codegen/fuse.go @@ -0,0 +1,197 @@ +// asmcheck + +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package codegen + +// Notes: +// - these examples use channels to provide a source of +// unknown values that cannot be optimized away +// - these examples use for loops to force branches +// backward (predicted taken) + +// ---------------------------------- // +// signed integer range (conjunction) // +// ---------------------------------- // + +func si1c(c <-chan int64) { + // amd64:"CMPQ\t.+, [$]256" + // s390x:"CLGIJ\t[$]12, R[0-9]+, [$]255" + for x := <-c; x >= 0 && x < 256; x = <-c { + } +} + +func si2c(c <-chan int32) { + // amd64:"CMPL\t.+, [$]256" + // s390x:"CLIJ\t[$]12, R[0-9]+, [$]255" + for x := <-c; x >= 0 && x < 256; x = <-c { + } +} + +func si3c(c <-chan int16) { + // amd64:"CMPW\t.+, [$]256" + // s390x:"CLIJ\t[$]12, R[0-9]+, [$]255" + for x := <-c; x >= 0 && x < 256; x = <-c { + } +} + +func si4c(c <-chan int8) { + // amd64:"CMPB\t.+, [$]10" + // s390x:"CLIJ\t[$]4, R[0-9]+, [$]10" + for x := <-c; x >= 0 && x < 10; x = <-c { + } +} + +func si5c(c <-chan int64) { + // amd64:"CMPQ\t.+, [$]251","ADDQ\t[$]-5," + // s390x:"CLGIJ\t[$]4, R[0-9]+, [$]251","ADD\t[$]-5," + for x := <-c; x < 256 && x > 4; x = <-c { + } +} + +func si6c(c <-chan int32) { + // amd64:"CMPL\t.+, [$]255","DECL\t" + // s390x:"CLIJ\t[$]12, R[0-9]+, [$]255","ADDW\t[$]-1," + for x := <-c; x > 0 && x <= 256; x = <-c { + } +} + +func si7c(c <-chan int16) { + // amd64:"CMPW\t.+, [$]60","ADDL\t[$]10," + // s390x:"CLIJ\t[$]12, R[0-9]+, [$]60","ADDW\t[$]10," + for x := <-c; x >= -10 && x <= 50; x = <-c { + } +} + +func si8c(c <-chan int8) { + // amd64:"CMPB\t.+, [$]126","ADDL\t[$]126," + // s390x:"CLIJ\t[$]4, R[0-9]+, [$]126","ADDW\t[$]126," + for x := <-c; x >= -126 && x < 0; x = <-c { + } +} + +// ---------------------------------- // +// signed integer range (disjunction) // +// ---------------------------------- // + +func si1d(c <-chan int64) { + // amd64:"CMPQ\t.+, [$]256" + // s390x:"CLGIJ\t[$]2, R[0-9]+, [$]255" + for x := <-c; x < 0 || x >= 256; x = <-c { + } +} + +func si2d(c <-chan int32) { + // amd64:"CMPL\t.+, [$]256" + // s390x:"CLIJ\t[$]2, R[0-9]+, [$]255" + for x := <-c; x < 0 || x >= 256; x = <-c { + } +} + +func si3d(c <-chan int16) { + // amd64:"CMPW\t.+, [$]256" + // s390x:"CLIJ\t[$]2, R[0-9]+, [$]255" + for x := <-c; x < 0 || x >= 256; x = <-c { + } +} + +func si4d(c <-chan int8) { + // amd64:"CMPB\t.+, [$]10" + // s390x:"CLIJ\t[$]10, R[0-9]+, [$]10" + for x := <-c; x < 0 || x >= 10; x = <-c { + } +} + +func si5d(c <-chan int64) { + // amd64:"CMPQ\t.+, [$]251","ADDQ\t[$]-5," + // s390x:"CLGIJ\t[$]10, R[0-9]+, [$]251","ADD\t[$]-5," + for x := <-c; x >= 256 || x <= 4; x = <-c { + } +} + +func si6d(c <-chan int32) { + // amd64:"CMPL\t.+, [$]255","DECL\t" + // s390x:"CLIJ\t[$]2, R[0-9]+, [$]255","ADDW\t[$]-1," + for x := <-c; x <= 0 || x > 256; x = <-c { + } +} + +func si7d(c <-chan int16) { + // amd64:"CMPW\t.+, [$]60","ADDL\t[$]10," + // s390x:"CLIJ\t[$]2, R[0-9]+, [$]60","ADDW\t[$]10," + for x := <-c; x < -10 || x > 50; x = <-c { + } +} + +func si8d(c <-chan int8) { + // amd64:"CMPB\t.+, [$]126","ADDL\t[$]126," + // s390x:"CLIJ\t[$]10, R[0-9]+, [$]126","ADDW\t[$]126," + for x := <-c; x < -126 || x >= 0; x = <-c { + } +} + +// ------------------------------------ // +// unsigned integer range (conjunction) // +// ------------------------------------ // + +func ui1c(c <-chan uint64) { + // amd64:"CMPQ\t.+, [$]251","ADDQ\t[$]-5," + // s390x:"CLGIJ\t[$]4, R[0-9]+, [$]251","ADD\t[$]-5," + for x := <-c; x < 256 && x > 4; x = <-c { + } +} + +func ui2c(c <-chan uint32) { + // amd64:"CMPL\t.+, [$]255","DECL\t" + // s390x:"CLIJ\t[$]12, R[0-9]+, [$]255","ADDW\t[$]-1," + for x := <-c; x > 0 && x <= 256; x = <-c { + } +} + +func ui3c(c <-chan uint16) { + // amd64:"CMPW\t.+, [$]40","ADDL\t[$]-10," + // s390x:"CLIJ\t[$]12, R[0-9]+, [$]40","ADDW\t[$]-10," + for x := <-c; x >= 10 && x <= 50; x = <-c { + } +} + +func ui4c(c <-chan uint8) { + // amd64:"CMPB\t.+, [$]2","ADDL\t[$]-126," + // s390x:"CLIJ\t[$]4, R[0-9]+, [$]2","ADDW\t[$]-126," + for x := <-c; x >= 126 && x < 128; x = <-c { + } +} + +// ------------------------------------ // +// unsigned integer range (disjunction) // +// ------------------------------------ // + +func ui1d(c <-chan uint64) { + // amd64:"CMPQ\t.+, [$]251","ADDQ\t[$]-5," + // s390x:"CLGIJ\t[$]10, R[0-9]+, [$]251","ADD\t[$]-5," + for x := <-c; x >= 256 || x <= 4; x = <-c { + } +} + +func ui2d(c <-chan uint32) { + // amd64:"CMPL\t.+, [$]254","ADDL\t[$]-2," + // s390x:"CLIJ\t[$]2, R[0-9]+, [$]254","ADDW\t[$]-2," + for x := <-c; x <= 1 || x > 256; x = <-c { + } +} + +func ui3d(c <-chan uint16) { + // amd64:"CMPW\t.+, [$]40","ADDL\t[$]-10," + // s390x:"CLIJ\t[$]2, R[0-9]+, [$]40","ADDW\t[$]-10," + for x := <-c; x < 10 || x > 50; x = <-c { + } +} + +func ui4d(c <-chan uint8) { + // amd64:"CMPB\t.+, [$]2","ADDL\t[$]-126," + // s390x:"CLIJ\t[$]10, R[0-9]+, [$]2","ADDW\t[$]-126," + for x := <-c; x < 126 || x >= 128; x = <-c { + } +} diff --git a/test/codegen/math.go b/test/codegen/math.go index 80e5d60d96..1ebfda0405 100644 --- a/test/codegen/math.go +++ b/test/codegen/math.go @@ -151,13 +151,13 @@ func toFloat32(u32 uint32) float32 { func constantCheck64() bool { // amd64:"MOVB\t[$]0",-"FCMP",-"MOVB\t[$]1" // s390x:"MOV(B|BZ|D)\t[$]0,",-"FCMPU",-"MOV(B|BZ|D)\t[$]1," - return 0.5 == float64(uint32(1)) || 1.5 > float64(uint64(1<<63)) || math.NaN() == math.NaN() + return 0.5 == float64(uint32(1)) || 1.5 > float64(uint64(1<<63)) } func constantCheck32() bool { // amd64:"MOVB\t[$]1",-"FCMP",-"MOVB\t[$]0" // s390x:"MOV(B|BZ|D)\t[$]1,",-"FCMPU",-"MOV(B|BZ|D)\t[$]0," - return float32(0.5) <= float32(int64(1)) && float32(1.5) >= float32(int32(-1<<31)) && float32(math.NaN()) != float32(math.NaN()) + return float32(0.5) <= float32(int64(1)) && float32(1.5) >= float32(int32(-1<<31)) } // Test that integer constants are converted to floating point constants @@ -186,3 +186,32 @@ func constantConvertInt32(x uint32) uint32 { } return x } + +func nanGenerate64() float64 { + // Test to make sure we don't generate a NaN while constant propagating. + // See issue 36400. + zero := 0.0 + // amd64:-"DIVSD" + inf := 1 / zero // +inf. We can constant propagate this one. + negone := -1.0 + + // amd64:"DIVSD" + z0 := zero / zero + // amd64:"MULSD" + z1 := zero * inf + // amd64:"SQRTSD" + z2 := math.Sqrt(negone) + return z0 + z1 + z2 +} + +func nanGenerate32() float32 { + zero := float32(0.0) + // amd64:-"DIVSS" + inf := 1 / zero // +inf. We can constant propagate this one. + + // amd64:"DIVSS" + z0 := zero / zero + // amd64:"MULSS" + z1 := zero * inf + return z0 + z1 +} diff --git a/test/codegen/smallintiface.go b/test/codegen/smallintiface.go new file mode 100644 index 0000000000..0207a0af79 --- /dev/null +++ b/test/codegen/smallintiface.go @@ -0,0 +1,22 @@ +// asmcheck + +package codegen + +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +func booliface() interface{} { + // amd64:`LEAQ\truntime.staticuint64s\+8\(SB\)` + return true +} + +func smallint8iface() interface{} { + // amd64:`LEAQ\truntime.staticuint64s\+2024\(SB\)` + return int8(-3) +} + +func smalluint8iface() interface{} { + // amd64:`LEAQ\truntime.staticuint64s\+24\(SB\)` + return uint8(3) +} diff --git a/test/fixedbugs/bug169.go b/test/fixedbugs/bug169.go index f63c2f3e1a..62ab7c2fa1 100644 --- a/test/fixedbugs/bug169.go +++ b/test/fixedbugs/bug169.go @@ -5,6 +5,6 @@ // license that can be found in the LICENSE file. package main -var x = '''; // ERROR "char" +var x = '''; // ERROR "char|rune" diff --git a/test/fixedbugs/issue15611.go b/test/fixedbugs/issue15611.go index 6a627d9b5e..3634475418 100644 --- a/test/fixedbugs/issue15611.go +++ b/test/fixedbugs/issue15611.go @@ -8,13 +8,13 @@ package p // These error messages are for the invalid literals on lines 19 and 20: -// ERROR "newline in character literal" -// ERROR "invalid character literal \(missing closing '\)" +// ERROR "newline in character literal|newline in rune literal" +// ERROR "invalid character literal \(missing closing '\)|rune literal not terminated" const ( - _ = '' // ERROR "empty character literal or unescaped ' in character literal" + _ = '' // ERROR "empty character literal or unescaped ' in character literal|empty rune literal" _ = 'f' - _ = 'foo' // ERROR "invalid character literal \(more than one character\)" + _ = 'foo' // ERROR "invalid character literal \(more than one character\)|more than one character in rune literal" //line issue15611.go:11 _ = ' _ = ' \ No newline at end of file diff --git a/test/fixedbugs/issue32133.go b/test/fixedbugs/issue32133.go index 13e4658a0f..f3cca87a72 100644 --- a/test/fixedbugs/issue32133.go +++ b/test/fixedbugs/issue32133.go @@ -8,7 +8,7 @@ package p // errors for the //line-adjusted code below // ERROR "newline in string" -// ERROR "newline in character literal" +// ERROR "newline in character literal|newline in rune literal" // ERROR "newline in string" // ERROR "string not terminated" diff --git a/test/fixedbugs/issue37513.dir/main.go b/test/fixedbugs/issue37513.dir/main.go new file mode 100644 index 0000000000..75106521b6 --- /dev/null +++ b/test/fixedbugs/issue37513.dir/main.go @@ -0,0 +1,27 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import ( + "bytes" + "fmt" + "os" + "os/exec" +) + +func main() { + if len(os.Args) > 1 { + // Generate a SIGILL. + sigill() + return + } + // Run ourselves with an extra argument. That process should SIGILL. + out, _ := exec.Command(os.Args[0], "foo").CombinedOutput() + want := "instruction bytes: 0xf 0xb 0xc3" + if !bytes.Contains(out, []byte(want)) { + fmt.Printf("got:\n%s\nwant:\n%s\n", string(out), want) + } +} +func sigill() diff --git a/test/fixedbugs/issue37513.dir/sigill_amd64.s b/test/fixedbugs/issue37513.dir/sigill_amd64.s new file mode 100644 index 0000000000..43260c21ae --- /dev/null +++ b/test/fixedbugs/issue37513.dir/sigill_amd64.s @@ -0,0 +1,7 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +TEXT ·sigill(SB),0,$0-0 + UD2 // generates a SIGILL + RET diff --git a/test/fixedbugs/issue37513.go b/test/fixedbugs/issue37513.go new file mode 100644 index 0000000000..e05b2d861f --- /dev/null +++ b/test/fixedbugs/issue37513.go @@ -0,0 +1,9 @@ +// buildrundir + +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build linux,amd64 darwin,amd64 linux,386 + +package ignored