From 618126b9895db7f29a861caa4e330d149858ff56 Mon Sep 17 00:00:00 2001 From: "Bryan C. Mills" Date: Fri, 28 Feb 2020 15:03:54 -0500 Subject: [PATCH 01/69] cmd/go: avoid matching wildcards rooted outside of available modules To avoid confusion, also distinguish between packages and dirs in search.Match results. No test because this is technically only a performance optimization: it would be very difficult to write such a test so that it would not be flaky. (However, tested the change manually.) Fixes #37521 Change-Id: I17b443699ce6a8f3a63805a7ef0be806f695a4b3 Reviewed-on: https://go-review.googlesource.com/c/go/+/221544 Run-TryBot: Bryan C. Mills TryBot-Result: Gobot Gobot Reviewed-by: Jay Conrod --- src/cmd/go/internal/get/get.go | 5 +- src/cmd/go/internal/modload/load.go | 60 ++++++++++++------- src/cmd/go/internal/search/search.go | 64 ++++++++++++--------- src/cmd/go/testdata/script/mod_list_std.txt | 10 ++++ 4 files changed, 90 insertions(+), 49 deletions(-) diff --git a/src/cmd/go/internal/get/get.go b/src/cmd/go/internal/get/get.go index b048eafa74..f7b2fa96e8 100644 --- a/src/cmd/go/internal/get/get.go +++ b/src/cmd/go/internal/get/get.go @@ -286,11 +286,12 @@ func download(arg string, parent *load.Package, stk *load.ImportStack, mode int) if wildcardOkay && strings.Contains(arg, "...") { match := search.NewMatch(arg) if match.IsLocal() { - match.MatchPackagesInFS() + match.MatchDirs() + args = match.Dirs } else { match.MatchPackages() + args = match.Pkgs } - args = match.Pkgs for _, err := range match.Errs { base.Errorf("%s", err) } diff --git a/src/cmd/go/internal/modload/load.go b/src/cmd/go/internal/modload/load.go index 32841d96cb..6ea7d8c69b 100644 --- a/src/cmd/go/internal/modload/load.go +++ b/src/cmd/go/internal/modload/load.go @@ -65,24 +65,13 @@ func ImportPaths(patterns []string) []*search.Match { // packages. The build tags should typically be imports.Tags() or // imports.AnyTags(); a nil map has no special meaning. func ImportPathsQuiet(patterns []string, tags map[string]bool) []*search.Match { - var fsDirs [][]string updateMatches := func(matches []*search.Match, iterating bool) { - for i, m := range matches { + for _, m := range matches { switch { case m.IsLocal(): // Evaluate list of file system directories on first iteration. - if fsDirs == nil { - fsDirs = make([][]string, len(matches)) - } - if fsDirs[i] == nil { - if m.IsLiteral() { - fsDirs[i] = []string{m.Pattern()} - } else { - m.MatchPackagesInFS() - // Pull out the matching directories: we are going to resolve them - // to package paths below. - fsDirs[i], m.Pkgs = m.Pkgs, nil - } + if m.Dirs == nil { + matchLocalDirs(m) } // Make a copy of the directory list and translate to import paths. @@ -91,10 +80,9 @@ func ImportPathsQuiet(patterns []string, tags map[string]bool) []*search.Match { // from not being in the build list to being in it and back as // the exact version of a particular module increases during // the loader iterations. - pkgs := str.StringList(fsDirs[i]) - m.Pkgs = pkgs[:0] - for _, pkg := range pkgs { - pkg, err := resolveLocalPackage(pkg) + m.Pkgs = m.Pkgs[:0] + for _, dir := range m.Dirs { + pkg, err := resolveLocalPackage(dir) if err != nil { if !m.IsLiteral() && (err == errPkgIsBuiltin || err == errPkgIsGorootSrc) { continue // Don't include "builtin" or GOROOT/src in wildcard patterns. @@ -131,7 +119,7 @@ func ImportPathsQuiet(patterns []string, tags map[string]bool) []*search.Match { } case m.Pattern() == "std" || m.Pattern() == "cmd": - if len(m.Pkgs) == 0 { + if m.Pkgs == nil { m.MatchPackages() // Locate the packages within GOROOT/src. } @@ -186,6 +174,34 @@ func checkMultiplePaths() { base.ExitIfErrors() } +// matchLocalDirs is like m.MatchDirs, but tries to avoid scanning directories +// outside of the standard library and active modules. +func matchLocalDirs(m *search.Match) { + if !m.IsLocal() { + panic(fmt.Sprintf("internal error: resolveLocalDirs on non-local pattern %s", m.Pattern())) + } + + if i := strings.Index(m.Pattern(), "..."); i >= 0 { + // The pattern is local, but it is a wildcard. Its packages will + // only resolve to paths if they are inside of the standard + // library, the main module, or some dependency of the main + // module. Verify that before we walk the filesystem: a filesystem + // walk in a directory like /var or /etc can be very expensive! + dir := filepath.Dir(filepath.Clean(m.Pattern()[:i+3])) + absDir := dir + if !filepath.IsAbs(dir) { + absDir = filepath.Join(base.Cwd, dir) + } + if search.InDir(absDir, cfg.GOROOTsrc) == "" && search.InDir(absDir, ModRoot()) == "" && pathInModuleCache(absDir) == "" { + m.Dirs = []string{} + m.AddError(fmt.Errorf("directory prefix %s outside available modules", base.ShortPath(absDir))) + return + } + } + + m.MatchDirs() +} + // resolveLocalPackage resolves a filesystem path to a package path. func resolveLocalPackage(dir string) (string, error) { var absDir string @@ -269,7 +285,11 @@ func resolveLocalPackage(dir string) (string, error) { } if sub := search.InDir(absDir, cfg.GOROOTsrc); sub != "" && sub != "." && !strings.Contains(sub, "@") { - return filepath.ToSlash(sub), nil + pkg := filepath.ToSlash(sub) + if pkg == "builtin" { + return "", errPkgIsBuiltin + } + return pkg, nil } pkg := pathInModuleCache(absDir) diff --git a/src/cmd/go/internal/search/search.go b/src/cmd/go/internal/search/search.go index 69d0e2d16f..b588c3e467 100644 --- a/src/cmd/go/internal/search/search.go +++ b/src/cmd/go/internal/search/search.go @@ -19,7 +19,8 @@ import ( // A Match represents the result of matching a single package pattern. type Match struct { pattern string // the pattern itself - Pkgs []string // matching packages (dirs or import paths) + Dirs []string // if the pattern is local, directories that potentially contain matching packages + Pkgs []string // matching packages (import paths) Errs []error // errors matching the patterns to packages, NOT errors loading those packages // Errs may be non-empty even if len(Pkgs) > 0, indicating that some matching @@ -84,20 +85,25 @@ func (e *MatchError) Unwrap() error { return e.Err } -// MatchPackages sets m.Pkgs to contain all the packages that can be found -// under the $GOPATH directories and $GOROOT matching pattern. -// The pattern is either "all" (all packages), "std" (standard packages), -// "cmd" (standard commands), or a path including "...". +// MatchPackages sets m.Pkgs to a non-nil slice containing all the packages that +// can be found under the $GOPATH directories and $GOROOT that match the +// pattern. The pattern must be either "all" (all packages), "std" (standard +// packages), "cmd" (standard commands), or a path including "...". // -// MatchPackages sets m.Errs to contain any errors encountered while processing -// the match. +// If any errors may have caused the set of packages to be incomplete, +// MatchPackages appends those errors to m.Errs. func (m *Match) MatchPackages() { - m.Pkgs, m.Errs = nil, nil + m.Pkgs = []string{} if m.IsLocal() { m.AddError(fmt.Errorf("internal error: MatchPackages: %s is not a valid package pattern", m.pattern)) return } + if m.IsLiteral() { + m.Pkgs = []string{m.pattern} + return + } + match := func(string) bool { return true } treeCanMatch := func(string) bool { return true } if !m.IsMeta() { @@ -197,16 +203,22 @@ func SetModRoot(dir string) { modRoot = dir } -// MatchPackagesInFS is like MatchPackages but is passed a pattern that -// begins with an absolute path or "./" or "../". On Windows, the pattern may -// use slash or backslash separators or a mix of both. +// MatchDirs sets m.Dirs to a non-nil slice containing all directories that +// potentially match a local pattern. The pattern must begin with an absolute +// path, or "./", or "../". On Windows, the pattern may use slash or backslash +// separators or a mix of both. // -// MatchPackagesInFS scans the tree rooted at the directory that contains the -// first "..." wildcard. -func (m *Match) MatchPackagesInFS() { - m.Pkgs, m.Errs = nil, nil +// If any errors may have caused the set of directories to be incomplete, +// MatchDirs appends those errors to m.Errs. +func (m *Match) MatchDirs() { + m.Dirs = []string{} if !m.IsLocal() { - m.AddError(fmt.Errorf("internal error: MatchPackagesInFS: %s is not a valid filesystem pattern", m.pattern)) + m.AddError(fmt.Errorf("internal error: MatchDirs: %s is not a valid filesystem pattern", m.pattern)) + return + } + + if m.IsLiteral() { + m.Dirs = []string{m.pattern} return } @@ -301,7 +313,7 @@ func (m *Match) MatchPackagesInFS() { // which is all that Match promises to do. // Ignore the import error. } - m.Pkgs = append(m.Pkgs, name) + m.Dirs = append(m.Dirs, name) return nil }) if err != nil { @@ -416,25 +428,23 @@ func ImportPathsQuiet(patterns []string) []*Match { for _, a := range CleanPatterns(patterns) { m := NewMatch(a) if m.IsLocal() { - if m.IsLiteral() { - m.Pkgs = []string{a} - } else { - m.MatchPackagesInFS() - } + m.MatchDirs() // Change the file import path to a regular import path if the package // is in GOPATH or GOROOT. We don't report errors here; LoadImport // (or something similar) will report them later. - for i, dir := range m.Pkgs { + m.Pkgs = make([]string, len(m.Dirs)) + for i, dir := range m.Dirs { + absDir := dir if !filepath.IsAbs(dir) { - dir = filepath.Join(base.Cwd, dir) + absDir = filepath.Join(base.Cwd, dir) } - if bp, _ := cfg.BuildContext.ImportDir(dir, build.FindOnly); bp.ImportPath != "" && bp.ImportPath != "." { + if bp, _ := cfg.BuildContext.ImportDir(absDir, build.FindOnly); bp.ImportPath != "" && bp.ImportPath != "." { m.Pkgs[i] = bp.ImportPath + } else { + m.Pkgs[i] = dir } } - } else if m.IsLiteral() { - m.Pkgs = []string{a} } else { m.MatchPackages() } diff --git a/src/cmd/go/testdata/script/mod_list_std.txt b/src/cmd/go/testdata/script/mod_list_std.txt index 8552aebf42..76a3b00d1c 100644 --- a/src/cmd/go/testdata/script/mod_list_std.txt +++ b/src/cmd/go/testdata/script/mod_list_std.txt @@ -14,6 +14,16 @@ go list cmd/... stdout ^cmd/compile ! stdout ^cmd/vendor/golang\.org/x/arch/x86/x86asm +# GOROOT/src/... should list the packages in std as if it were a module +# dependency: omitting vendored dependencies and stopping at the 'cmd' module +# boundary. + +go list $GOROOT/src/... +stdout ^bytes$ +! stdout ^builtin$ +! stdout ^cmd/ +! stdout ^vendor/ + # Within the std module, listing ./... should omit the 'std' prefix: # the package paths should be the same via ./... or the 'std' meta-pattern. From e48a83f077e47bd015d4b57e63e9b6fb5e77dc8b Mon Sep 17 00:00:00 2001 From: Meng Zhuo Date: Fri, 28 Feb 2020 20:14:18 +0800 Subject: [PATCH 02/69] internal/cpu: add MIPS64x feature detection Change-Id: Iacdad1758aa15e4703fccef38c08ecb338b95fd7 Reviewed-on: https://go-review.googlesource.com/c/go/+/200579 Run-TryBot: Meng Zhuo TryBot-Result: Gobot Gobot Reviewed-by: Keith Randall --- src/internal/cpu/cpu.go | 8 ++++++++ src/internal/cpu/cpu_mips64.go | 7 ------- src/internal/cpu/cpu_mips64le.go | 7 ------- src/internal/cpu/cpu_mips64x.go | 32 ++++++++++++++++++++++++++++++++ src/internal/cpu/cpu_no_init.go | 2 ++ src/runtime/os_linux_mips64x.go | 6 ++++++ 6 files changed, 48 insertions(+), 14 deletions(-) delete mode 100644 src/internal/cpu/cpu_mips64.go delete mode 100644 src/internal/cpu/cpu_mips64le.go create mode 100644 src/internal/cpu/cpu_mips64x.go diff --git a/src/internal/cpu/cpu.go b/src/internal/cpu/cpu.go index f326b06332..84df6472eb 100644 --- a/src/internal/cpu/cpu.go +++ b/src/internal/cpu/cpu.go @@ -134,6 +134,14 @@ type s390x struct { _ CacheLinePad } +var MIPS64X mips64x + +type mips64x struct { + _ CacheLinePad + HasMSA bool // MIPS SIMD architecture + _ CacheLinePad +} + // Initialize examines the processor and sets the relevant variables above. // This is called by the runtime package early in program initialization, // before normal init functions are run. env is set by runtime if the OS supports diff --git a/src/internal/cpu/cpu_mips64.go b/src/internal/cpu/cpu_mips64.go deleted file mode 100644 index 0f821e44e7..0000000000 --- a/src/internal/cpu/cpu_mips64.go +++ /dev/null @@ -1,7 +0,0 @@ -// Copyright 2017 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package cpu - -const CacheLinePadSize = 32 diff --git a/src/internal/cpu/cpu_mips64le.go b/src/internal/cpu/cpu_mips64le.go deleted file mode 100644 index 0f821e44e7..0000000000 --- a/src/internal/cpu/cpu_mips64le.go +++ /dev/null @@ -1,7 +0,0 @@ -// Copyright 2017 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package cpu - -const CacheLinePadSize = 32 diff --git a/src/internal/cpu/cpu_mips64x.go b/src/internal/cpu/cpu_mips64x.go new file mode 100644 index 0000000000..9b0a824ee8 --- /dev/null +++ b/src/internal/cpu/cpu_mips64x.go @@ -0,0 +1,32 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build mips64 mips64le + +package cpu + +const CacheLinePadSize = 32 + +// These are initialized by archauxv in runtime/os_linux_mips64x.go. +// These should not be changed after they are initialized. +var HWCap uint + +// HWCAP bits. These are exposed by the Linux kernel 5.4. +const ( + // CPU features + hwcap_MIPS_MSA = 1 << 1 +) + +func doinit() { + options = []option{ + {Name: "msa", Feature: &MIPS64X.HasMSA}, + } + + // HWCAP feature bits + MIPS64X.HasMSA = isSet(HWCap, hwcap_MIPS_MSA) +} + +func isSet(hwc uint, value uint) bool { + return hwc&value != 0 +} diff --git a/src/internal/cpu/cpu_no_init.go b/src/internal/cpu/cpu_no_init.go index d4b2be8cf4..fb381e1ce2 100644 --- a/src/internal/cpu/cpu_no_init.go +++ b/src/internal/cpu/cpu_no_init.go @@ -9,6 +9,8 @@ // +build !ppc64 // +build !ppc64le // +build !s390x +// +build !mips64 +// +build !mips64le package cpu diff --git a/src/runtime/os_linux_mips64x.go b/src/runtime/os_linux_mips64x.go index 464a26a8a4..4ff66f9538 100644 --- a/src/runtime/os_linux_mips64x.go +++ b/src/runtime/os_linux_mips64x.go @@ -7,7 +7,13 @@ package runtime +import "internal/cpu" + func archauxv(tag, val uintptr) { + switch tag { + case _AT_HWCAP: + cpu.HWCap = uint(val) + } } func osArchInit() {} From 5756808ce8eb6f6af99aa8d3e5a125ce7d1b8544 Mon Sep 17 00:00:00 2001 From: martin Date: Thu, 12 Dec 2019 16:03:04 -0800 Subject: [PATCH 03/69] runtime: do not exit(2) if a Go built DLL receives a signal Fixes #35965 Change-Id: I172501fc0b29595e59b058f6e30f31efe5f6d1f9 Reviewed-on: https://go-review.googlesource.com/c/go/+/211139 Run-TryBot: Emmanuel Odeke TryBot-Result: Gobot Gobot Reviewed-by: Alex Brainman Reviewed-by: Emmanuel Odeke --- src/runtime/os_windows.go | 6 +- src/runtime/signal_windows_test.go | 87 +++++++++++++++++++ .../testdata/testwinlibsignal/dummy.go | 10 +++ src/runtime/testdata/testwinlibsignal/main.c | 50 +++++++++++ 4 files changed, 152 insertions(+), 1 deletion(-) create mode 100644 src/runtime/testdata/testwinlibsignal/dummy.go create mode 100644 src/runtime/testdata/testwinlibsignal/main.c diff --git a/src/runtime/os_windows.go b/src/runtime/os_windows.go index bddc25729a..7576565599 100644 --- a/src/runtime/os_windows.go +++ b/src/runtime/os_windows.go @@ -1031,7 +1031,11 @@ func ctrlhandler1(_type uint32) uint32 { if sigsend(s) { return 1 } - exit(2) // SIGINT, SIGTERM, etc + if !islibrary && !isarchive { + // Only exit the program if we don't have a DLL. + // See https://golang.org/issues/35965. + exit(2) // SIGINT, SIGTERM, etc + } return 0 } diff --git a/src/runtime/signal_windows_test.go b/src/runtime/signal_windows_test.go index 9748403412..423516df65 100644 --- a/src/runtime/signal_windows_test.go +++ b/src/runtime/signal_windows_test.go @@ -3,6 +3,8 @@ package runtime_test import ( + "bufio" + "bytes" "internal/testenv" "io/ioutil" "os" @@ -10,6 +12,7 @@ import ( "path/filepath" "runtime" "strings" + "syscall" "testing" ) @@ -59,3 +62,87 @@ func TestVectoredHandlerDontCrashOnLibrary(t *testing.T) { t.Errorf("expected output %q, got %q", expectedOutput, cleanedOut) } } + +func sendCtrlBreak(t *testing.T, pid int) { + kernel32, err := syscall.LoadDLL("kernel32.dll") + if err != nil { + t.Fatalf("LoadDLL: %v\n", err) + } + generateEvent, err := kernel32.FindProc("GenerateConsoleCtrlEvent") + if err != nil { + t.Fatalf("FindProc: %v\n", err) + } + result, _, err := generateEvent.Call(syscall.CTRL_BREAK_EVENT, uintptr(pid)) + if result == 0 { + t.Fatalf("GenerateConsoleCtrlEvent: %v\n", err) + } +} + +// TestLibraryCtrlHandler tests that Go DLL allows calling program to handle console control events. +// See https://golang.org/issues/35965. +func TestLibraryCtrlHandler(t *testing.T) { + if *flagQuick { + t.Skip("-quick") + } + if runtime.GOARCH != "amd64" { + t.Skip("this test can only run on windows/amd64") + } + testenv.MustHaveGoBuild(t) + testenv.MustHaveExecPath(t, "gcc") + testprog.Lock() + defer testprog.Unlock() + dir, err := ioutil.TempDir("", "go-build") + if err != nil { + t.Fatalf("failed to create temp directory: %v", err) + } + defer os.RemoveAll(dir) + + // build go dll + dll := filepath.Join(dir, "dummy.dll") + cmd := exec.Command(testenv.GoToolPath(t), "build", "-o", dll, "--buildmode", "c-shared", "testdata/testwinlibsignal/dummy.go") + out, err := testenv.CleanCmdEnv(cmd).CombinedOutput() + if err != nil { + t.Fatalf("failed to build go library: %s\n%s", err, out) + } + + // build c program + exe := filepath.Join(dir, "test.exe") + cmd = exec.Command("gcc", "-o", exe, "testdata/testwinlibsignal/main.c") + out, err = testenv.CleanCmdEnv(cmd).CombinedOutput() + if err != nil { + t.Fatalf("failed to build c exe: %s\n%s", err, out) + } + + // run test program + cmd = exec.Command(exe) + var stderr bytes.Buffer + cmd.Stderr = &stderr + outPipe, err := cmd.StdoutPipe() + if err != nil { + t.Fatalf("Failed to create stdout pipe: %v", err) + } + outReader := bufio.NewReader(outPipe) + + cmd.SysProcAttr = &syscall.SysProcAttr{ + CreationFlags: syscall.CREATE_NEW_PROCESS_GROUP, + } + if err := cmd.Start(); err != nil { + t.Fatalf("Start failed: %v", err) + } + + sentCtrl := make(chan bool) + go func() { + defer close(sentCtrl) + if line, err := outReader.ReadString('\n'); err != nil { + t.Fatalf("Could not read stdout: %v", err) + } else if strings.TrimSpace(line) != "ready" { + t.Fatalf("Unexpected message: %v", line) + } + sendCtrlBreak(t, cmd.Process.Pid) + }() + + <-sentCtrl + if err := cmd.Wait(); err != nil { + t.Fatalf("Program exited with error: %v\n%s", err, &stderr) + } +} diff --git a/src/runtime/testdata/testwinlibsignal/dummy.go b/src/runtime/testdata/testwinlibsignal/dummy.go new file mode 100644 index 0000000000..82dfd91c93 --- /dev/null +++ b/src/runtime/testdata/testwinlibsignal/dummy.go @@ -0,0 +1,10 @@ +// +build windows + +package main + +//export Dummy +func Dummy() int { + return 42 +} + +func main() {} diff --git a/src/runtime/testdata/testwinlibsignal/main.c b/src/runtime/testdata/testwinlibsignal/main.c new file mode 100644 index 0000000000..1787fef3b9 --- /dev/null +++ b/src/runtime/testdata/testwinlibsignal/main.c @@ -0,0 +1,50 @@ +#include +#include + +HANDLE waitForCtrlBreakEvent; + +BOOL WINAPI CtrlHandler(DWORD fdwCtrlType) +{ + switch (fdwCtrlType) + { + case CTRL_BREAK_EVENT: + SetEvent(waitForCtrlBreakEvent); + return TRUE; + default: + return FALSE; + } +} + +int main(void) +{ + waitForCtrlBreakEvent = CreateEvent(NULL, TRUE, FALSE, NULL); + if (!waitForCtrlBreakEvent) { + fprintf(stderr, "ERROR: Could not create event"); + return 1; + } + + if (!SetConsoleCtrlHandler(CtrlHandler, TRUE)) + { + fprintf(stderr, "ERROR: Could not set control handler"); + return 1; + } + + // The library must be loaded after the SetConsoleCtrlHandler call + // so that the library handler registers after the main program. + // This way the library handler gets called first. + HMODULE dummyDll = LoadLibrary("dummy.dll"); + if (!dummyDll) { + fprintf(stderr, "ERROR: Could not load dummy.dll"); + return 1; + } + + printf("ready\n"); + fflush(stdout); + + if (WaitForSingleObject(waitForCtrlBreakEvent, 5000) != WAIT_OBJECT_0) { + fprintf(stderr, "FAILURE: No signal received"); + return 1; + } + + return 0; +} From d889f0cb1090a043786157f059c29269065626f4 Mon Sep 17 00:00:00 2001 From: Josh Bleecher Snyder Date: Fri, 28 Feb 2020 17:04:16 -0800 Subject: [PATCH 04/69] cmd/compile: use correct types in phiopt MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit We try to preserve type correctness of generic ops. phiopt modified a bool to be an int without a conversion. Add a conversion. There are a few random fluctations in the generated code as a result, but nothing noteworthy or systematic. no binary size changes file before after Δ % math.s 35966 35961 -5 -0.014% debug/dwarf.s 108141 108147 +6 +0.006% crypto/dsa.s 6047 6044 -3 -0.050% image/png.s 42882 42885 +3 +0.007% go/parser.s 80281 80278 -3 -0.004% cmd/internal/obj.s 115116 115113 -3 -0.003% go/types.s 322130 322118 -12 -0.004% cmd/internal/obj/arm64.s 151679 151685 +6 +0.004% go/internal/gccgoimporter.s 56487 56493 +6 +0.011% cmd/test2json.s 1650 1647 -3 -0.182% cmd/link/internal/loadelf.s 35442 35443 +1 +0.003% cmd/go/internal/work.s 305039 305035 -4 -0.001% cmd/link/internal/ld.s 544835 544834 -1 -0.000% net/http.s 558777 558774 -3 -0.001% cmd/compile/internal/ssa.s 3926551 3926994 +443 +0.011% cmd/compile/internal/gc.s 1552320 1552321 +1 +0.000% total 18862241 18862670 +429 +0.002% Change-Id: I4289e773be6be534ea3f907d68f614441b8f9b46 Reviewed-on: https://go-review.googlesource.com/c/go/+/221607 Run-TryBot: Josh Bleecher Snyder Reviewed-by: Keith Randall --- src/cmd/compile/internal/ssa/gen/386.rules | 2 ++ src/cmd/compile/internal/ssa/gen/AMD64.rules | 2 ++ src/cmd/compile/internal/ssa/gen/ARM.rules | 2 ++ src/cmd/compile/internal/ssa/gen/ARM64.rules | 2 ++ src/cmd/compile/internal/ssa/gen/MIPS.rules | 2 ++ src/cmd/compile/internal/ssa/gen/MIPS64.rules | 2 ++ src/cmd/compile/internal/ssa/gen/PPC64.rules | 2 ++ src/cmd/compile/internal/ssa/gen/RISCV64.rules | 2 ++ src/cmd/compile/internal/ssa/gen/S390X.rules | 2 ++ src/cmd/compile/internal/ssa/gen/Wasm.rules | 2 ++ src/cmd/compile/internal/ssa/gen/genericOps.go | 1 + src/cmd/compile/internal/ssa/opGen.go | 6 ++++++ src/cmd/compile/internal/ssa/phiopt.go | 14 ++++++++------ src/cmd/compile/internal/ssa/rewrite386.go | 3 +++ src/cmd/compile/internal/ssa/rewriteAMD64.go | 3 +++ src/cmd/compile/internal/ssa/rewriteARM.go | 3 +++ src/cmd/compile/internal/ssa/rewriteARM64.go | 3 +++ src/cmd/compile/internal/ssa/rewriteMIPS.go | 3 +++ src/cmd/compile/internal/ssa/rewriteMIPS64.go | 3 +++ src/cmd/compile/internal/ssa/rewritePPC64.go | 3 +++ src/cmd/compile/internal/ssa/rewriteRISCV64.go | 3 +++ src/cmd/compile/internal/ssa/rewriteS390X.go | 3 +++ src/cmd/compile/internal/ssa/rewriteWasm.go | 3 +++ 23 files changed, 65 insertions(+), 6 deletions(-) diff --git a/src/cmd/compile/internal/ssa/gen/386.rules b/src/cmd/compile/internal/ssa/gen/386.rules index 78916bebc3..64a6cbaf84 100644 --- a/src/cmd/compile/internal/ssa/gen/386.rules +++ b/src/cmd/compile/internal/ssa/gen/386.rules @@ -92,6 +92,8 @@ (Round32F ...) -> (Copy ...) (Round64F ...) -> (Copy ...) +(CvtBoolToUint8 ...) -> (Copy ...) + // Lowering shifts // Unsigned shifts need to return 0 if shift amount is >= width of shifted value. // result = (arg << shift) & (shift >= argbits ? 0 : 0xffffffffffffffff) diff --git a/src/cmd/compile/internal/ssa/gen/AMD64.rules b/src/cmd/compile/internal/ssa/gen/AMD64.rules index 4fd13a5056..f915ea4355 100644 --- a/src/cmd/compile/internal/ssa/gen/AMD64.rules +++ b/src/cmd/compile/internal/ssa/gen/AMD64.rules @@ -154,6 +154,8 @@ (Round(32|64)F ...) -> (Copy ...) +(CvtBoolToUint8 ...) -> (Copy ...) + // Lowering shifts // Unsigned shifts need to return 0 if shift amount is >= width of shifted value. // result = (arg << shift) & (shift >= argbits ? 0 : 0xffffffffffffffff) diff --git a/src/cmd/compile/internal/ssa/gen/ARM.rules b/src/cmd/compile/internal/ssa/gen/ARM.rules index 77e7b477c6..839d701b8c 100644 --- a/src/cmd/compile/internal/ssa/gen/ARM.rules +++ b/src/cmd/compile/internal/ssa/gen/ARM.rules @@ -207,6 +207,8 @@ (Round(32|64)F ...) -> (Copy ...) +(CvtBoolToUint8 ...) -> (Copy ...) + // fused-multiply-add (FMA x y z) -> (FMULAD z x y) diff --git a/src/cmd/compile/internal/ssa/gen/ARM64.rules b/src/cmd/compile/internal/ssa/gen/ARM64.rules index 4e0ab3288d..61994a15a1 100644 --- a/src/cmd/compile/internal/ssa/gen/ARM64.rules +++ b/src/cmd/compile/internal/ssa/gen/ARM64.rules @@ -244,6 +244,8 @@ (Cvt32Fto64F ...) -> (FCVTSD ...) (Cvt64Fto32F ...) -> (FCVTDS ...) +(CvtBoolToUint8 ...) -> (Copy ...) + (Round32F ...) -> (LoweredRound32F ...) (Round64F ...) -> (LoweredRound64F ...) diff --git a/src/cmd/compile/internal/ssa/gen/MIPS.rules b/src/cmd/compile/internal/ssa/gen/MIPS.rules index 228d5ee454..9ac8e5f471 100644 --- a/src/cmd/compile/internal/ssa/gen/MIPS.rules +++ b/src/cmd/compile/internal/ssa/gen/MIPS.rules @@ -170,6 +170,8 @@ (Cvt32Fto64F ...) -> (MOVFD ...) (Cvt64Fto32F ...) -> (MOVDF ...) +(CvtBoolToUint8 ...) -> (Copy ...) + (Round(32|64)F ...) -> (Copy ...) // comparisons diff --git a/src/cmd/compile/internal/ssa/gen/MIPS64.rules b/src/cmd/compile/internal/ssa/gen/MIPS64.rules index 35c65023cd..6df2b3e6b9 100644 --- a/src/cmd/compile/internal/ssa/gen/MIPS64.rules +++ b/src/cmd/compile/internal/ssa/gen/MIPS64.rules @@ -171,6 +171,8 @@ (Cvt32Fto64F ...) -> (MOVFD ...) (Cvt64Fto32F ...) -> (MOVDF ...) +(CvtBoolToUint8 ...) -> (Copy ...) + (Round(32|64)F ...) -> (Copy ...) // comparisons diff --git a/src/cmd/compile/internal/ssa/gen/PPC64.rules b/src/cmd/compile/internal/ssa/gen/PPC64.rules index d4ef49e20b..961f833e90 100644 --- a/src/cmd/compile/internal/ssa/gen/PPC64.rules +++ b/src/cmd/compile/internal/ssa/gen/PPC64.rules @@ -59,6 +59,8 @@ (Cvt32Fto64F ...) -> (Copy ...) // Note v will have the wrong type for patterns dependent on Float32/Float64 (Cvt64Fto32F ...) -> (FRSP ...) +(CvtBoolToUint8 ...) -> (Copy ...) + (Round(32|64)F ...) -> (LoweredRound(32|64)F ...) (Sqrt ...) -> (FSQRT ...) diff --git a/src/cmd/compile/internal/ssa/gen/RISCV64.rules b/src/cmd/compile/internal/ssa/gen/RISCV64.rules index 720724647e..4ab4656bd5 100644 --- a/src/cmd/compile/internal/ssa/gen/RISCV64.rules +++ b/src/cmd/compile/internal/ssa/gen/RISCV64.rules @@ -131,6 +131,8 @@ (Cvt32Fto64F ...) -> (FCVTDS ...) (Cvt64Fto32F ...) -> (FCVTSD ...) +(CvtBoolToUint8 ...) -> (Copy ...) + (Round32F ...) -> (Copy ...) (Round64F ...) -> (Copy ...) diff --git a/src/cmd/compile/internal/ssa/gen/S390X.rules b/src/cmd/compile/internal/ssa/gen/S390X.rules index 30a0249759..f2c7f62dcf 100644 --- a/src/cmd/compile/internal/ssa/gen/S390X.rules +++ b/src/cmd/compile/internal/ssa/gen/S390X.rules @@ -230,6 +230,8 @@ (Cvt32Fto64F ...) -> (LDEBR ...) (Cvt64Fto32F ...) -> (LEDBR ...) +(CvtBoolToUint8 ...) -> (Copy ...) + (Round(32|64)F ...) -> (LoweredRound(32|64)F ...) // Lowering shifts diff --git a/src/cmd/compile/internal/ssa/gen/Wasm.rules b/src/cmd/compile/internal/ssa/gen/Wasm.rules index 010adcb095..cdcbc28c30 100644 --- a/src/cmd/compile/internal/ssa/gen/Wasm.rules +++ b/src/cmd/compile/internal/ssa/gen/Wasm.rules @@ -91,6 +91,8 @@ (Cvt32Fto64F ...) -> (F64PromoteF32 ...) (Cvt64Fto32F ...) -> (F32DemoteF64 ...) +(CvtBoolToUint8 ...) -> (Copy ...) + (Round32F ...) -> (Copy ...) (Round64F ...) -> (Copy ...) diff --git a/src/cmd/compile/internal/ssa/gen/genericOps.go b/src/cmd/compile/internal/ssa/gen/genericOps.go index 9f17299610..54c6968c5b 100644 --- a/src/cmd/compile/internal/ssa/gen/genericOps.go +++ b/src/cmd/compile/internal/ssa/gen/genericOps.go @@ -418,6 +418,7 @@ var genericOps = []opData{ {name: "Cvt64Fto64", argLength: 1}, {name: "Cvt32Fto64F", argLength: 1}, {name: "Cvt64Fto32F", argLength: 1}, + {name: "CvtBoolToUint8", argLength: 1}, // Force rounding to precision of type. {name: "Round32F", argLength: 1}, diff --git a/src/cmd/compile/internal/ssa/opGen.go b/src/cmd/compile/internal/ssa/opGen.go index 1111316d9b..963f1aa07a 100644 --- a/src/cmd/compile/internal/ssa/opGen.go +++ b/src/cmd/compile/internal/ssa/opGen.go @@ -2584,6 +2584,7 @@ const ( OpCvt64Fto64 OpCvt32Fto64F OpCvt64Fto32F + OpCvtBoolToUint8 OpRound32F OpRound64F OpIsNonNil @@ -32561,6 +32562,11 @@ var opcodeTable = [...]opInfo{ argLen: 1, generic: true, }, + { + name: "CvtBoolToUint8", + argLen: 1, + generic: true, + }, { name: "Round32F", argLen: 1, diff --git a/src/cmd/compile/internal/ssa/phiopt.go b/src/cmd/compile/internal/ssa/phiopt.go index cc3319e188..8643fa584c 100644 --- a/src/cmd/compile/internal/ssa/phiopt.go +++ b/src/cmd/compile/internal/ssa/phiopt.go @@ -148,6 +148,13 @@ func phioptint(v *Value, b0 *Block, reverse int) { negate = !negate } + a := b0.Controls[0] + if negate { + a = v.Block.NewValue1(v.Pos, OpNot, a.Type, a) + } + v.AddArg(a) + + cvt := v.Block.NewValue1(v.Pos, OpCvtBoolToUint8, a.Type, a) switch v.Type.Size() { case 1: v.reset(OpCopy) @@ -160,12 +167,7 @@ func phioptint(v *Value, b0 *Block, reverse int) { default: v.Fatalf("bad int size %d", v.Type.Size()) } - - a := b0.Controls[0] - if negate { - a = v.Block.NewValue1(v.Pos, OpNot, a.Type, a) - } - v.AddArg(a) + v.AddArg(cvt) f := b0.Func if f.pass.debug > 0 { diff --git a/src/cmd/compile/internal/ssa/rewrite386.go b/src/cmd/compile/internal/ssa/rewrite386.go index cf9a7362a2..4efb2d4f74 100644 --- a/src/cmd/compile/internal/ssa/rewrite386.go +++ b/src/cmd/compile/internal/ssa/rewrite386.go @@ -408,6 +408,9 @@ func rewriteValue386(v *Value) bool { case OpCvt64Fto32F: v.Op = Op386CVTSD2SS return true + case OpCvtBoolToUint8: + v.Op = OpCopy + return true case OpDiv16: v.Op = Op386DIVW return true diff --git a/src/cmd/compile/internal/ssa/rewriteAMD64.go b/src/cmd/compile/internal/ssa/rewriteAMD64.go index 665b20c42d..ee7f9ad190 100644 --- a/src/cmd/compile/internal/ssa/rewriteAMD64.go +++ b/src/cmd/compile/internal/ssa/rewriteAMD64.go @@ -780,6 +780,9 @@ func rewriteValueAMD64(v *Value) bool { case OpCvt64to64F: v.Op = OpAMD64CVTSQ2SD return true + case OpCvtBoolToUint8: + v.Op = OpCopy + return true case OpDiv128u: v.Op = OpAMD64DIVQU2 return true diff --git a/src/cmd/compile/internal/ssa/rewriteARM.go b/src/cmd/compile/internal/ssa/rewriteARM.go index baa3c66e0f..6849fecc2a 100644 --- a/src/cmd/compile/internal/ssa/rewriteARM.go +++ b/src/cmd/compile/internal/ssa/rewriteARM.go @@ -545,6 +545,9 @@ func rewriteValueARM(v *Value) bool { case OpCvt64Fto32U: v.Op = OpARMMOVDWU return true + case OpCvtBoolToUint8: + v.Op = OpCopy + return true case OpDiv16: return rewriteValueARM_OpDiv16(v) case OpDiv16u: diff --git a/src/cmd/compile/internal/ssa/rewriteARM64.go b/src/cmd/compile/internal/ssa/rewriteARM64.go index 4bee98e4d3..51051b93b7 100644 --- a/src/cmd/compile/internal/ssa/rewriteARM64.go +++ b/src/cmd/compile/internal/ssa/rewriteARM64.go @@ -604,6 +604,9 @@ func rewriteValueARM64(v *Value) bool { case OpCvt64to64F: v.Op = OpARM64SCVTFD return true + case OpCvtBoolToUint8: + v.Op = OpCopy + return true case OpDiv16: return rewriteValueARM64_OpDiv16(v) case OpDiv16u: diff --git a/src/cmd/compile/internal/ssa/rewriteMIPS.go b/src/cmd/compile/internal/ssa/rewriteMIPS.go index f4d774c96f..b3226cddb5 100644 --- a/src/cmd/compile/internal/ssa/rewriteMIPS.go +++ b/src/cmd/compile/internal/ssa/rewriteMIPS.go @@ -129,6 +129,9 @@ func rewriteValueMIPS(v *Value) bool { case OpCvt64Fto32F: v.Op = OpMIPSMOVDF return true + case OpCvtBoolToUint8: + v.Op = OpCopy + return true case OpDiv16: return rewriteValueMIPS_OpDiv16(v) case OpDiv16u: diff --git a/src/cmd/compile/internal/ssa/rewriteMIPS64.go b/src/cmd/compile/internal/ssa/rewriteMIPS64.go index 4139361b11..315270b16a 100644 --- a/src/cmd/compile/internal/ssa/rewriteMIPS64.go +++ b/src/cmd/compile/internal/ssa/rewriteMIPS64.go @@ -154,6 +154,9 @@ func rewriteValueMIPS64(v *Value) bool { case OpCvt64to64F: v.Op = OpMIPS64MOVVD return true + case OpCvtBoolToUint8: + v.Op = OpCopy + return true case OpDiv16: return rewriteValueMIPS64_OpDiv16(v) case OpDiv16u: diff --git a/src/cmd/compile/internal/ssa/rewritePPC64.go b/src/cmd/compile/internal/ssa/rewritePPC64.go index c4eb25f38e..53549dda74 100644 --- a/src/cmd/compile/internal/ssa/rewritePPC64.go +++ b/src/cmd/compile/internal/ssa/rewritePPC64.go @@ -179,6 +179,9 @@ func rewriteValuePPC64(v *Value) bool { return rewriteValuePPC64_OpCvt64to32F(v) case OpCvt64to64F: return rewriteValuePPC64_OpCvt64to64F(v) + case OpCvtBoolToUint8: + v.Op = OpCopy + return true case OpDiv16: return rewriteValuePPC64_OpDiv16(v) case OpDiv16u: diff --git a/src/cmd/compile/internal/ssa/rewriteRISCV64.go b/src/cmd/compile/internal/ssa/rewriteRISCV64.go index e4480dc366..4d70814cfd 100644 --- a/src/cmd/compile/internal/ssa/rewriteRISCV64.go +++ b/src/cmd/compile/internal/ssa/rewriteRISCV64.go @@ -114,6 +114,9 @@ func rewriteValueRISCV64(v *Value) bool { case OpCvt64to64F: v.Op = OpRISCV64FCVTDL return true + case OpCvtBoolToUint8: + v.Op = OpCopy + return true case OpDiv16: return rewriteValueRISCV64_OpDiv16(v) case OpDiv16u: diff --git a/src/cmd/compile/internal/ssa/rewriteS390X.go b/src/cmd/compile/internal/ssa/rewriteS390X.go index 43ca2ceb92..2195f6aa1e 100644 --- a/src/cmd/compile/internal/ssa/rewriteS390X.go +++ b/src/cmd/compile/internal/ssa/rewriteS390X.go @@ -183,6 +183,9 @@ func rewriteValueS390X(v *Value) bool { case OpCvt64to64F: v.Op = OpS390XCDGBRA return true + case OpCvtBoolToUint8: + v.Op = OpCopy + return true case OpDiv16: return rewriteValueS390X_OpDiv16(v) case OpDiv16u: diff --git a/src/cmd/compile/internal/ssa/rewriteWasm.go b/src/cmd/compile/internal/ssa/rewriteWasm.go index ea365f46b6..90701067ce 100644 --- a/src/cmd/compile/internal/ssa/rewriteWasm.go +++ b/src/cmd/compile/internal/ssa/rewriteWasm.go @@ -169,6 +169,9 @@ func rewriteValueWasm(v *Value) bool { case OpCvt64to64F: v.Op = OpWasmF64ConvertI64S return true + case OpCvtBoolToUint8: + v.Op = OpCopy + return true case OpDiv16: return rewriteValueWasm_OpDiv16(v) case OpDiv16u: From 74f898360d2ea74d885544473cc60943771b36d4 Mon Sep 17 00:00:00 2001 From: Josh Bleecher Snyder Date: Sat, 29 Feb 2020 07:07:56 -0800 Subject: [PATCH 05/69] cmd/compile: constant fold SSA bool to int conversions MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Shaves off a few instructions here and there. file before after Δ % go/types.s 322118 321851 -267 -0.083% go/internal/gcimporter.s 34937 34909 -28 -0.080% go/internal/gccgoimporter.s 56493 56474 -19 -0.034% cmd/compile/internal/ssa.s 3926994 3927177 +183 +0.005% total 18862670 18862539 -131 -0.001% Change-Id: I724f32317b946b5138224808f85709d9c097a247 Reviewed-on: https://go-review.googlesource.com/c/go/+/221428 Reviewed-by: Keith Randall --- src/cmd/compile/internal/ssa/gen/generic.rules | 1 + src/cmd/compile/internal/ssa/rewritegeneric.go | 17 +++++++++++++++++ 2 files changed, 18 insertions(+) diff --git a/src/cmd/compile/internal/ssa/gen/generic.rules b/src/cmd/compile/internal/ssa/gen/generic.rules index f4d487176b..54c5ed646f 100644 --- a/src/cmd/compile/internal/ssa/gen/generic.rules +++ b/src/cmd/compile/internal/ssa/gen/generic.rules @@ -56,6 +56,7 @@ (Cvt64Fto64 (Const64F [c])) -> (Const64 [int64(auxTo64F(c))]) (Round32F x:(Const32F)) -> x (Round64F x:(Const64F)) -> x +(CvtBoolToUint8 (ConstBool [c])) -> (Const8 [c]) (Trunc16to8 (ZeroExt8to16 x)) -> x (Trunc32to8 (ZeroExt8to32 x)) -> x diff --git a/src/cmd/compile/internal/ssa/rewritegeneric.go b/src/cmd/compile/internal/ssa/rewritegeneric.go index c711af249c..94c2353fd9 100644 --- a/src/cmd/compile/internal/ssa/rewritegeneric.go +++ b/src/cmd/compile/internal/ssa/rewritegeneric.go @@ -68,6 +68,8 @@ func rewriteValuegeneric(v *Value) bool { return rewriteValuegeneric_OpCvt64to32F(v) case OpCvt64to64F: return rewriteValuegeneric_OpCvt64to64F(v) + case OpCvtBoolToUint8: + return rewriteValuegeneric_OpCvtBoolToUint8(v) case OpDiv16: return rewriteValuegeneric_OpDiv16(v) case OpDiv16u: @@ -2981,6 +2983,21 @@ func rewriteValuegeneric_OpCvt64to64F(v *Value) bool { } return false } +func rewriteValuegeneric_OpCvtBoolToUint8(v *Value) bool { + v_0 := v.Args[0] + // match: (CvtBoolToUint8 (ConstBool [c])) + // result: (Const8 [c]) + for { + if v_0.Op != OpConstBool { + break + } + c := v_0.AuxInt + v.reset(OpConst8) + v.AuxInt = c + return true + } + return false +} func rewriteValuegeneric_OpDiv16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] From ba0e651130cfe0a8fc8ab2845b2f7c6ae44db1e1 Mon Sep 17 00:00:00 2001 From: Josh Bleecher Snyder Date: Thu, 27 Feb 2020 18:56:28 -0800 Subject: [PATCH 06/69] cmd/compile: add more amd64 constant simplifications More minor optimization opportunities from CL 220499. Change-Id: Ic4f34c41ed8ab0fce227ac194731c1be12c602db Reviewed-on: https://go-review.googlesource.com/c/go/+/221608 Run-TryBot: Josh Bleecher Snyder TryBot-Result: Gobot Gobot Reviewed-by: Keith Randall --- src/cmd/compile/internal/ssa/gen/AMD64.rules | 17 +++ src/cmd/compile/internal/ssa/rewriteAMD64.go | 136 +++++++++++++++++++ 2 files changed, 153 insertions(+) diff --git a/src/cmd/compile/internal/ssa/gen/AMD64.rules b/src/cmd/compile/internal/ssa/gen/AMD64.rules index f915ea4355..c165fed485 100644 --- a/src/cmd/compile/internal/ssa/gen/AMD64.rules +++ b/src/cmd/compile/internal/ssa/gen/AMD64.rules @@ -758,6 +758,7 @@ (MULQconst [c] (MULQconst [d] x)) && is32Bit(c*d) -> (MULQconst [c * d] x) (ORQ x (MOVQconst [c])) && is32Bit(c) -> (ORQconst [c] x) +(ORQ x (MOVLconst [c])) -> (ORQconst [c] x) (ORL x (MOVLconst [c])) -> (ORLconst [c] x) (XORQ x (MOVQconst [c])) && is32Bit(c) -> (XORQconst [c] x) @@ -1307,6 +1308,15 @@ (CMPBconst (MOVLconst [x]) [y]) && int8(x)>int8(y) && uint8(x) (FlagGT_ULT) (CMPBconst (MOVLconst [x]) [y]) && int8(x)>int8(y) && uint8(x)>uint8(y) -> (FlagGT_UGT) +// CMPQconst requires a 32 bit const, but we can still constant-fold 64 bit consts. +// In theory this applies to any of the simplifications above, +// but CMPQ is the only one I've actually seen occur. +(CMPQ (MOVQconst [x]) (MOVQconst [y])) && x==y -> (FlagEQ) +(CMPQ (MOVQconst [x]) (MOVQconst [y])) && x (FlagLT_ULT) +(CMPQ (MOVQconst [x]) (MOVQconst [y])) && xuint64(y) -> (FlagLT_UGT) +(CMPQ (MOVQconst [x]) (MOVQconst [y])) && x>y && uint64(x) (FlagGT_ULT) +(CMPQ (MOVQconst [x]) (MOVQconst [y])) && x>y && uint64(x)>uint64(y) -> (FlagGT_UGT) + // Other known comparisons. (CMPQconst (MOVBQZX _) [c]) && 0xFF < c -> (FlagLT_ULT) (CMPQconst (MOVWQZX _) [c]) && 0xFFFF < c -> (FlagLT_ULT) @@ -1480,6 +1490,12 @@ (BTCQconst [c] (MOVQconst [d])) -> (MOVQconst [d^(1< (MOVLconst [d^(1< (MOVQconst [c|d]) + // generic simplifications // TODO: more of this (ADDQ x (NEGQ y)) -> (SUBQ x y) @@ -1495,6 +1511,7 @@ (SHLLconst [d] (MOVLconst [c])) -> (MOVLconst [int64(int32(c)) << uint64(d)]) (SHLQconst [d] (MOVQconst [c])) -> (MOVQconst [c << uint64(d)]) +(SHLQconst [d] (MOVLconst [c])) -> (MOVQconst [int64(int32(c)) << uint64(d)]) // Fold NEG into ADDconst/MULconst. Take care to keep c in 32 bit range. (NEGQ (ADDQconst [c] (NEGQ x))) && c != -(1<<31) -> (ADDQconst [-c] x) diff --git a/src/cmd/compile/internal/ssa/rewriteAMD64.go b/src/cmd/compile/internal/ssa/rewriteAMD64.go index ee7f9ad190..c0329c1528 100644 --- a/src/cmd/compile/internal/ssa/rewriteAMD64.go +++ b/src/cmd/compile/internal/ssa/rewriteAMD64.go @@ -8136,6 +8136,96 @@ func rewriteValueAMD64_OpAMD64CMPQ(v *Value) bool { v.AddArg(v0) return true } + // match: (CMPQ (MOVQconst [x]) (MOVQconst [y])) + // cond: x==y + // result: (FlagEQ) + for { + if v_0.Op != OpAMD64MOVQconst { + break + } + x := v_0.AuxInt + if v_1.Op != OpAMD64MOVQconst { + break + } + y := v_1.AuxInt + if !(x == y) { + break + } + v.reset(OpAMD64FlagEQ) + return true + } + // match: (CMPQ (MOVQconst [x]) (MOVQconst [y])) + // cond: xuint64(y) + // result: (FlagLT_UGT) + for { + if v_0.Op != OpAMD64MOVQconst { + break + } + x := v_0.AuxInt + if v_1.Op != OpAMD64MOVQconst { + break + } + y := v_1.AuxInt + if !(x < y && uint64(x) > uint64(y)) { + break + } + v.reset(OpAMD64FlagLT_UGT) + return true + } + // match: (CMPQ (MOVQconst [x]) (MOVQconst [y])) + // cond: x>y && uint64(x) y && uint64(x) < uint64(y)) { + break + } + v.reset(OpAMD64FlagGT_ULT) + return true + } + // match: (CMPQ (MOVQconst [x]) (MOVQconst [y])) + // cond: x>y && uint64(x)>uint64(y) + // result: (FlagGT_UGT) + for { + if v_0.Op != OpAMD64MOVQconst { + break + } + x := v_0.AuxInt + if v_1.Op != OpAMD64MOVQconst { + break + } + y := v_1.AuxInt + if !(x > y && uint64(x) > uint64(y)) { + break + } + v.reset(OpAMD64FlagGT_UGT) + return true + } // match: (CMPQ l:(MOVQload {sym} [off] ptr mem) x) // cond: canMergeLoad(v, l) && clobber(l) // result: (CMPQload {sym} [off] ptr x mem) @@ -24138,6 +24228,22 @@ func rewriteValueAMD64_OpAMD64ORQ(v *Value) bool { } break } + // match: (ORQ x (MOVLconst [c])) + // result: (ORQconst [c] x) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpAMD64MOVLconst { + continue + } + c := v_1.AuxInt + v.reset(OpAMD64ORQconst) + v.AuxInt = c + v.AddArg(x) + return true + } + break + } // match: (ORQ (SHLQconst x [c]) (SHRQconst x [d])) // cond: d==64-c // result: (ROLQconst x [c]) @@ -24366,6 +24472,24 @@ func rewriteValueAMD64_OpAMD64ORQ(v *Value) bool { } break } + // match: (ORQ (MOVQconst [c]) (MOVQconst [d])) + // result: (MOVQconst [c|d]) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpAMD64MOVQconst { + continue + } + c := v_0.AuxInt + if v_1.Op != OpAMD64MOVQconst { + continue + } + d := v_1.AuxInt + v.reset(OpAMD64MOVQconst) + v.AuxInt = c | d + return true + } + break + } // match: (ORQ x x) // result: x for { @@ -31787,6 +31911,18 @@ func rewriteValueAMD64_OpAMD64SHLQconst(v *Value) bool { v.AuxInt = c << uint64(d) return true } + // match: (SHLQconst [d] (MOVLconst [c])) + // result: (MOVQconst [int64(int32(c)) << uint64(d)]) + for { + d := v.AuxInt + if v_0.Op != OpAMD64MOVLconst { + break + } + c := v_0.AuxInt + v.reset(OpAMD64MOVQconst) + v.AuxInt = int64(int32(c)) << uint64(d) + return true + } return false } func rewriteValueAMD64_OpAMD64SHRB(v *Value) bool { From 2962c96c9f24b685c86133cdd7612026827278d8 Mon Sep 17 00:00:00 2001 From: "Ruixin(Peter) Bao" Date: Tue, 26 Nov 2019 10:52:43 -0500 Subject: [PATCH 07/69] cmd/compile: lower float to uint conversions on s390x Add rules for lowering float <-> unsigned int on s390x. During compilation, Cvt64Uto64F rule triggers around 80 times, Cvt64Fto64U rule triggers around 20 times, Cvt64Uto32F rule triggers around 5 times. Change-Id: If4c9d128b9132fce8c0bea9abc09cb43a5df7989 Reviewed-on: https://go-review.googlesource.com/c/go/+/209177 Reviewed-by: Michael Munday Run-TryBot: Michael Munday TryBot-Result: Gobot Gobot --- src/cmd/compile/internal/gc/ssa.go | 2 +- src/cmd/compile/internal/s390x/ssa.go | 2 + src/cmd/compile/internal/ssa/gen/S390X.rules | 12 ++ src/cmd/compile/internal/ssa/gen/S390XOps.go | 13 ++- src/cmd/compile/internal/ssa/opGen.go | 112 +++++++++++++++++++ src/cmd/compile/internal/ssa/rewriteS390X.go | 24 ++++ 6 files changed, 162 insertions(+), 3 deletions(-) diff --git a/src/cmd/compile/internal/gc/ssa.go b/src/cmd/compile/internal/gc/ssa.go index a710f81dc5..a89af236f4 100644 --- a/src/cmd/compile/internal/gc/ssa.go +++ b/src/cmd/compile/internal/gc/ssa.go @@ -2203,7 +2203,7 @@ func (s *state) expr(n *Node) *ssa.Value { conv = conv1 } } - if thearch.LinkArch.Family == sys.ARM64 || thearch.LinkArch.Family == sys.Wasm || s.softFloat { + if thearch.LinkArch.Family == sys.ARM64 || thearch.LinkArch.Family == sys.Wasm || thearch.LinkArch.Family == sys.S390X || s.softFloat { if conv1, ok1 := uint64fpConvOpToSSA[twoTypes{s.concreteEtype(ft), s.concreteEtype(tt)}]; ok1 { conv = conv1 } diff --git a/src/cmd/compile/internal/s390x/ssa.go b/src/cmd/compile/internal/s390x/ssa.go index f1725bdda4..2de3ef4b35 100644 --- a/src/cmd/compile/internal/s390x/ssa.go +++ b/src/cmd/compile/internal/s390x/ssa.go @@ -498,6 +498,8 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { ssa.OpS390XLDGR, ssa.OpS390XLGDR, ssa.OpS390XCEFBRA, ssa.OpS390XCDFBRA, ssa.OpS390XCEGBRA, ssa.OpS390XCDGBRA, ssa.OpS390XCFEBRA, ssa.OpS390XCFDBRA, ssa.OpS390XCGEBRA, ssa.OpS390XCGDBRA, + ssa.OpS390XCELFBR, ssa.OpS390XCDLFBR, ssa.OpS390XCELGBR, ssa.OpS390XCDLGBR, + ssa.OpS390XCLFEBR, ssa.OpS390XCLFDBR, ssa.OpS390XCLGEBR, ssa.OpS390XCLGDBR, ssa.OpS390XLDEBR, ssa.OpS390XLEDBR, ssa.OpS390XFNEG, ssa.OpS390XFNEGS, ssa.OpS390XLPDFR, ssa.OpS390XLNDFR: diff --git a/src/cmd/compile/internal/ssa/gen/S390X.rules b/src/cmd/compile/internal/ssa/gen/S390X.rules index f2c7f62dcf..5cff8df3a4 100644 --- a/src/cmd/compile/internal/ssa/gen/S390X.rules +++ b/src/cmd/compile/internal/ssa/gen/S390X.rules @@ -227,6 +227,18 @@ (Cvt64Fto32 ...) -> (CFDBRA ...) (Cvt64Fto64 ...) -> (CGDBRA ...) +// Lowering float <-> uint +(Cvt32Uto32F ...) -> (CELFBR ...) +(Cvt32Uto64F ...) -> (CDLFBR ...) +(Cvt64Uto32F ...) -> (CELGBR ...) +(Cvt64Uto64F ...) -> (CDLGBR ...) + +(Cvt32Fto32U ...) -> (CLFEBR ...) +(Cvt32Fto64U ...) -> (CLGEBR ...) +(Cvt64Fto32U ...) -> (CLFDBR ...) +(Cvt64Fto64U ...) -> (CLGDBR ...) + +// Lowering float32 <-> float64 (Cvt32Fto64F ...) -> (LDEBR ...) (Cvt64Fto32F ...) -> (LEDBR ...) diff --git a/src/cmd/compile/internal/ssa/gen/S390XOps.go b/src/cmd/compile/internal/ssa/gen/S390XOps.go index 6517957fd4..819046d30c 100644 --- a/src/cmd/compile/internal/ssa/gen/S390XOps.go +++ b/src/cmd/compile/internal/ssa/gen/S390XOps.go @@ -401,8 +401,17 @@ func init() { {name: "CDFBRA", argLength: 1, reg: gpfp, asm: "CDFBRA"}, // convert int32 to float64 {name: "CEGBRA", argLength: 1, reg: gpfp, asm: "CEGBRA"}, // convert int64 to float32 {name: "CDGBRA", argLength: 1, reg: gpfp, asm: "CDGBRA"}, // convert int64 to float64 - {name: "LEDBR", argLength: 1, reg: fp11, asm: "LEDBR"}, // convert float64 to float32 - {name: "LDEBR", argLength: 1, reg: fp11, asm: "LDEBR"}, // convert float32 to float64 + {name: "CLFEBR", argLength: 1, reg: fpgp, asm: "CLFEBR"}, // convert float32 to uint32 + {name: "CLFDBR", argLength: 1, reg: fpgp, asm: "CLFDBR"}, // convert float64 to uint32 + {name: "CLGEBR", argLength: 1, reg: fpgp, asm: "CLGEBR"}, // convert float32 to uint64 + {name: "CLGDBR", argLength: 1, reg: fpgp, asm: "CLGDBR"}, // convert float64 to uint64 + {name: "CELFBR", argLength: 1, reg: gpfp, asm: "CELFBR"}, // convert uint32 to float32 + {name: "CDLFBR", argLength: 1, reg: gpfp, asm: "CDLFBR"}, // convert uint32 to float64 + {name: "CELGBR", argLength: 1, reg: gpfp, asm: "CELGBR"}, // convert uint64 to float32 + {name: "CDLGBR", argLength: 1, reg: gpfp, asm: "CDLGBR"}, // convert uint64 to float64 + + {name: "LEDBR", argLength: 1, reg: fp11, asm: "LEDBR"}, // convert float64 to float32 + {name: "LDEBR", argLength: 1, reg: fp11, asm: "LDEBR"}, // convert float32 to float64 {name: "MOVDaddr", argLength: 1, reg: addr, aux: "SymOff", rematerializeable: true, symEffect: "Read"}, // arg0 + auxint + offset encoded in aux {name: "MOVDaddridx", argLength: 2, reg: addridx, aux: "SymOff", symEffect: "Read"}, // arg0 + arg1 + auxint + aux diff --git a/src/cmd/compile/internal/ssa/opGen.go b/src/cmd/compile/internal/ssa/opGen.go index 963f1aa07a..d9d38a8b80 100644 --- a/src/cmd/compile/internal/ssa/opGen.go +++ b/src/cmd/compile/internal/ssa/opGen.go @@ -2113,6 +2113,14 @@ const ( OpS390XCDFBRA OpS390XCEGBRA OpS390XCDGBRA + OpS390XCLFEBR + OpS390XCLFDBR + OpS390XCLGEBR + OpS390XCLGDBR + OpS390XCELFBR + OpS390XCDLFBR + OpS390XCELGBR + OpS390XCDLGBR OpS390XLEDBR OpS390XLDEBR OpS390XMOVDaddr @@ -28314,6 +28322,110 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "CLFEBR", + argLen: 1, + asm: s390x.ACLFEBR, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "CLFDBR", + argLen: 1, + asm: s390x.ACLFDBR, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "CLGEBR", + argLen: 1, + asm: s390x.ACLGEBR, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "CLGDBR", + argLen: 1, + asm: s390x.ACLGDBR, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "CELFBR", + argLen: 1, + asm: s390x.ACELFBR, + reg: regInfo{ + inputs: []inputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + outputs: []outputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, + }, + { + name: "CDLFBR", + argLen: 1, + asm: s390x.ACDLFBR, + reg: regInfo{ + inputs: []inputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + outputs: []outputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, + }, + { + name: "CELGBR", + argLen: 1, + asm: s390x.ACELGBR, + reg: regInfo{ + inputs: []inputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + outputs: []outputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, + }, + { + name: "CDLGBR", + argLen: 1, + asm: s390x.ACDLGBR, + reg: regInfo{ + inputs: []inputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + outputs: []outputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, + }, { name: "LEDBR", argLen: 1, diff --git a/src/cmd/compile/internal/ssa/rewriteS390X.go b/src/cmd/compile/internal/ssa/rewriteS390X.go index 2195f6aa1e..4fc90128d0 100644 --- a/src/cmd/compile/internal/ssa/rewriteS390X.go +++ b/src/cmd/compile/internal/ssa/rewriteS390X.go @@ -156,12 +156,24 @@ func rewriteValueS390X(v *Value) bool { case OpCvt32Fto32: v.Op = OpS390XCFEBRA return true + case OpCvt32Fto32U: + v.Op = OpS390XCLFEBR + return true case OpCvt32Fto64: v.Op = OpS390XCGEBRA return true case OpCvt32Fto64F: v.Op = OpS390XLDEBR return true + case OpCvt32Fto64U: + v.Op = OpS390XCLGEBR + return true + case OpCvt32Uto32F: + v.Op = OpS390XCELFBR + return true + case OpCvt32Uto64F: + v.Op = OpS390XCDLFBR + return true case OpCvt32to32F: v.Op = OpS390XCEFBRA return true @@ -174,9 +186,21 @@ func rewriteValueS390X(v *Value) bool { case OpCvt64Fto32F: v.Op = OpS390XLEDBR return true + case OpCvt64Fto32U: + v.Op = OpS390XCLFDBR + return true case OpCvt64Fto64: v.Op = OpS390XCGDBRA return true + case OpCvt64Fto64U: + v.Op = OpS390XCLGDBR + return true + case OpCvt64Uto32F: + v.Op = OpS390XCELGBR + return true + case OpCvt64Uto64F: + v.Op = OpS390XCDLGBR + return true case OpCvt64to32F: v.Op = OpS390XCEGBRA return true From 33e98326a25d54cef19e94ca73c45eaed8847f56 Mon Sep 17 00:00:00 2001 From: Ian Lance Taylor Date: Wed, 26 Feb 2020 18:34:25 -0800 Subject: [PATCH 08/69] net/textproto: pass missing argument to fmt.Sprintf The vet tool didn't catch this because the fmt.Sprintf format argument was written as an expression. Fixes #37467 Change-Id: I72c20ba45e3f42c195fa5e68adcdb9837c7d7ad5 Reviewed-on: https://go-review.googlesource.com/c/go/+/221297 Run-TryBot: Ian Lance Taylor TryBot-Result: Gobot Gobot Reviewed-by: Emmanuel Odeke --- src/net/textproto/reader.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/net/textproto/reader.go b/src/net/textproto/reader.go index a505da985c..d26e981ae4 100644 --- a/src/net/textproto/reader.go +++ b/src/net/textproto/reader.go @@ -557,7 +557,7 @@ func noValidation(_ []byte) error { return nil } // contain a colon. func mustHaveFieldNameColon(line []byte) error { if bytes.IndexByte(line, ':') < 0 { - return ProtocolError(fmt.Sprintf("malformed MIME header: missing colon: %q" + string(line))) + return ProtocolError(fmt.Sprintf("malformed MIME header: missing colon: %q", line)) } return nil } From c1abd5ab70e1e8ac59a2960d41fe6d83d68ea69b Mon Sep 17 00:00:00 2001 From: Emmanuel T Odeke Date: Sat, 29 Feb 2020 02:32:21 -0800 Subject: [PATCH 09/69] runtime: don't invoke t.Fatal* in goroutine in TestLibraryCtrlHandler Change-Id: I8bb06c360cab3e5a74b0b0f98bb25cca4741d66d Reviewed-on: https://go-review.googlesource.com/c/go/+/221605 Run-TryBot: Emmanuel Odeke TryBot-Result: Gobot Gobot Reviewed-by: Ian Lance Taylor --- src/runtime/signal_windows_test.go | 24 ++++++++++++++---------- 1 file changed, 14 insertions(+), 10 deletions(-) diff --git a/src/runtime/signal_windows_test.go b/src/runtime/signal_windows_test.go index 423516df65..f99857193c 100644 --- a/src/runtime/signal_windows_test.go +++ b/src/runtime/signal_windows_test.go @@ -5,6 +5,7 @@ package runtime_test import ( "bufio" "bytes" + "fmt" "internal/testenv" "io/ioutil" "os" @@ -63,19 +64,20 @@ func TestVectoredHandlerDontCrashOnLibrary(t *testing.T) { } } -func sendCtrlBreak(t *testing.T, pid int) { +func sendCtrlBreak(pid int) error { kernel32, err := syscall.LoadDLL("kernel32.dll") if err != nil { - t.Fatalf("LoadDLL: %v\n", err) + return fmt.Errorf("LoadDLL: %v\n", err) } generateEvent, err := kernel32.FindProc("GenerateConsoleCtrlEvent") if err != nil { - t.Fatalf("FindProc: %v\n", err) + return fmt.Errorf("FindProc: %v\n", err) } result, _, err := generateEvent.Call(syscall.CTRL_BREAK_EVENT, uintptr(pid)) if result == 0 { - t.Fatalf("GenerateConsoleCtrlEvent: %v\n", err) + return fmt.Errorf("GenerateConsoleCtrlEvent: %v\n", err) } + return nil } // TestLibraryCtrlHandler tests that Go DLL allows calling program to handle console control events. @@ -130,18 +132,20 @@ func TestLibraryCtrlHandler(t *testing.T) { t.Fatalf("Start failed: %v", err) } - sentCtrl := make(chan bool) + errCh := make(chan error, 1) go func() { - defer close(sentCtrl) if line, err := outReader.ReadString('\n'); err != nil { - t.Fatalf("Could not read stdout: %v", err) + errCh <- fmt.Errorf("could not read stdout: %v", err) } else if strings.TrimSpace(line) != "ready" { - t.Fatalf("Unexpected message: %v", line) + errCh <- fmt.Errorf("unexpected message: %v", line) + } else { + errCh <- sendCtrlBreak(cmd.Process.Pid) } - sendCtrlBreak(t, cmd.Process.Pid) }() - <-sentCtrl + if err := <-errCh; err != nil { + t.Fatal(err) + } if err := cmd.Wait(); err != nil { t.Fatalf("Program exited with error: %v\n%s", err, &stderr) } From 2172b229b95f483324825806f692303a0a132762 Mon Sep 17 00:00:00 2001 From: Ian Lance Taylor Date: Thu, 27 Feb 2020 11:24:24 -0800 Subject: [PATCH 10/69] runtime/pprof/internal/profile: make error message readable The error message for an unrecognized type in decodeField was using string(i) for an int type i. It was recently changed (by me) to string(rune(i)), but that just avoided a vet warning without fixing the problem. This CL fixes the problem by using fmt.Errorf. We also change the message to "unknown wire type" to match the master copy of this code in github.com/google/pprof/profile/proto.go. Updates #32479 Change-Id: Ia91ea6d5edbd7cd946225d1ee96bb7623b52bb44 Reviewed-on: https://go-review.googlesource.com/c/go/+/221384 Run-TryBot: Ian Lance Taylor TryBot-Result: Gobot Gobot Reviewed-by: Emmanuel Odeke --- src/runtime/pprof/internal/profile/proto.go | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/src/runtime/pprof/internal/profile/proto.go b/src/runtime/pprof/internal/profile/proto.go index 294acfeb92..52cf1ef2b3 100644 --- a/src/runtime/pprof/internal/profile/proto.go +++ b/src/runtime/pprof/internal/profile/proto.go @@ -21,7 +21,10 @@ package profile -import "errors" +import ( + "errors" + "fmt" +) type buffer struct { field int @@ -232,7 +235,7 @@ func decodeField(b *buffer, data []byte) ([]byte, error) { b.u64 = uint64(le32(data[:4])) data = data[4:] default: - return nil, errors.New("unknown type: " + string(rune(b.typ))) + return nil, fmt.Errorf("unknown wire type: %d", b.typ) } return data, nil From 91bc75b4870308b668d497ff22eada75219c3c2e Mon Sep 17 00:00:00 2001 From: Shuo Date: Sun, 1 Mar 2020 02:32:32 +0000 Subject: [PATCH 11/69] time: optimize Time.ISOWeek MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit name old time/op new time/op delta ISOWeek-4 57.7ns ± 5% 27.9ns ±10% -51.54% (p=0.000 n=48+49) Fixes #37534 Change-Id: Ic4673ced44a4b0190018e87207743ed9500fb1e0 GitHub-Last-Rev: a376c57e83a99f8e8fde297335caa85215e7aead GitHub-Pull-Request: golang/go#36316 Reviewed-on: https://go-review.googlesource.com/c/go/+/212837 Run-TryBot: Ian Lance Taylor TryBot-Result: Gobot Gobot Reviewed-by: Ian Lance Taylor --- src/time/time.go | 68 ++++++++++++------------------------------- src/time/time_test.go | 7 +++++ 2 files changed, 25 insertions(+), 50 deletions(-) diff --git a/src/time/time.go b/src/time/time.go index 5dc9fa68ac..5fa09687e9 100644 --- a/src/time/time.go +++ b/src/time/time.go @@ -535,58 +535,26 @@ func absWeekday(abs uint64) Weekday { // week 52 or 53 of year n-1, and Dec 29 to Dec 31 might belong to week 1 // of year n+1. func (t Time) ISOWeek() (year, week int) { - year, month, day, yday := t.date(true) - wday := int(t.Weekday()+6) % 7 // weekday but Monday = 0. - const ( - Mon int = iota - Tue - Wed - Thu - Fri - Sat - Sun - ) + // According to the rule that the first calendar week of a calendar year is + // the week including the first Thursday of that year, and that the last one is + // the week immediately preceding the first calendar week of the next calendar year. + // See https://www.iso.org/obp/ui#iso:std:iso:8601:-1:ed-1:v1:en:term:3.1.1.23 for details. - // Calculate week as number of Mondays in year up to - // and including today, plus 1 because the first week is week 0. - // Putting the + 1 inside the numerator as a + 7 keeps the - // numerator from being negative, which would cause it to - // round incorrectly. - week = (yday - wday + 7) / 7 - - // The week number is now correct under the assumption - // that the first Monday of the year is in week 1. - // If Jan 1 is a Tuesday, Wednesday, or Thursday, the first Monday - // is actually in week 2. - jan1wday := (wday - yday + 7*53) % 7 - if Tue <= jan1wday && jan1wday <= Thu { - week++ + // weeks start with Monday + // Monday Tuesday Wednesday Thursday Friday Saturday Sunday + // 1 2 3 4 5 6 7 + // +3 +2 +1 0 -1 -2 -3 + // the offset to Thursday + abs := t.abs() + d := Thursday - absWeekday(abs) + // handle Sunday + if d == 4 { + d = -3 } - - // If the week number is still 0, we're in early January but in - // the last week of last year. - if week == 0 { - year-- - week = 52 - // A year has 53 weeks when Jan 1 or Dec 31 is a Thursday, - // meaning Jan 1 of the next year is a Friday - // or it was a leap year and Jan 1 of the next year is a Saturday. - if jan1wday == Fri || (jan1wday == Sat && isLeap(year)) { - week++ - } - } - - // December 29 to 31 are in week 1 of next year if - // they are after the last Thursday of the year and - // December 31 is a Monday, Tuesday, or Wednesday. - if month == December && day >= 29 && wday < Thu { - if dec31wday := (wday + 31 - day) % 7; Mon <= dec31wday && dec31wday <= Wed { - year++ - week = 1 - } - } - - return + // find the Thursday of the calendar week + abs += uint64(d) * secondsPerDay + year, _, _, yday := absDate(abs, false) + return year, yday/7 + 1 } // Clock returns the hour, minute, and second within the day specified by t. diff --git a/src/time/time_test.go b/src/time/time_test.go index 2fc23c4fee..ffbf92acbc 100644 --- a/src/time/time_test.go +++ b/src/time/time_test.go @@ -1348,6 +1348,13 @@ func BenchmarkDay(b *testing.B) { } } +func BenchmarkISOWeek(b *testing.B) { + t := Now() + for i := 0; i < b.N; i++ { + _, _ = t.ISOWeek() + } +} + func TestMarshalBinaryZeroTime(t *testing.T) { t0 := Time{} enc, err := t0.MarshalBinary() From 95f382139043059a2a0780ba577b53893408f7e4 Mon Sep 17 00:00:00 2001 From: Alex Brainman Date: Tue, 25 Feb 2020 18:42:24 +1100 Subject: [PATCH 12/69] cmd/go, cmd/link: implement -buildmode=pie on windows MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This CL implements windows version of -buildmode=pie code in both cmd/go and cmd/link. Windows executables built with -buildmode=pie set (unlike the one built with -buildmode=exe) will have extra .reloc PE section, and will have no IMAGE_FILE_RELOCS_STRIPPED flag set. They will also have IMAGE_DLLCHARACTERISTICS_DYNAMIC_BASE flag set, and IMAGE_DLLCHARACTERISTICS_HIGH_ENTROPY_VA flag set for windows/amd64. Both cgo and non-cgo versions are implemented. And TestBuildmodePIE is extended to test both cgo and non-cgo versions on windows and linux. This CL used some code from CLs 152759 and 203602. RELNOTE=yes Fixes #27144 Updates #35192 Change-Id: I1249e4ffbd79bd4277efefb56db321c390c0f76f Reviewed-on: https://go-review.googlesource.com/c/go/+/214397 Run-TryBot: Ian Lance Taylor TryBot-Result: Gobot Gobot Reviewed-by: Ian Lance Taylor --- src/cmd/dist/test.go | 4 ++ src/cmd/go/go_test.go | 57 +++++++++++++++++++++++-- src/cmd/go/internal/work/init.go | 8 +++- src/cmd/go/testdata/script/version.txt | 6 +-- src/cmd/internal/sys/supported.go | 3 +- src/cmd/link/internal/ld/config.go | 3 +- src/cmd/link/internal/ld/lib.go | 16 ++++++- src/cmd/link/internal/ld/pe.go | 58 +++++++++++++++++--------- 8 files changed, 122 insertions(+), 33 deletions(-) diff --git a/src/cmd/dist/test.go b/src/cmd/dist/test.go index ca617e917e..48c36a63fc 100644 --- a/src/cmd/dist/test.go +++ b/src/cmd/dist/test.go @@ -941,6 +941,8 @@ func (t *tester) internalLinkPIE() bool { case "linux-amd64", "linux-arm64", "android-arm64": return true + case "windows-amd64", "windows-386", "windows-arm": + return true } return false } @@ -997,6 +999,8 @@ func (t *tester) supportedBuildmode(mode string) bool { return true case "darwin-amd64": return true + case "windows-amd64", "windows-386", "windows-arm": + return true } return false diff --git a/src/cmd/go/go_test.go b/src/cmd/go/go_test.go index 4d5136deea..6654bd3143 100644 --- a/src/cmd/go/go_test.go +++ b/src/cmd/go/go_test.go @@ -9,6 +9,7 @@ import ( "context" "debug/elf" "debug/macho" + "debug/pe" "flag" "fmt" "go/format" @@ -2146,19 +2147,37 @@ func TestBuildmodePIE(t *testing.T) { switch platform { case "linux/386", "linux/amd64", "linux/arm", "linux/arm64", "linux/ppc64le", "linux/s390x", "android/amd64", "android/arm", "android/arm64", "android/386", - "freebsd/amd64": + "freebsd/amd64", + "windows/386", "windows/amd64", "windows/arm": case "darwin/amd64": default: t.Skipf("skipping test because buildmode=pie is not supported on %s", platform) } + t.Run("non-cgo", func(t *testing.T) { + testBuildmodePIE(t, false) + }) + if canCgo { + switch runtime.GOOS { + case "darwin", "freebsd", "linux", "windows": + t.Run("cgo", func(t *testing.T) { + testBuildmodePIE(t, true) + }) + } + } +} +func testBuildmodePIE(t *testing.T, useCgo bool) { tg := testgo(t) defer tg.cleanup() tg.parallel() - tg.tempFile("main.go", `package main; func main() { print("hello") }`) + var s string + if useCgo { + s = `import "C";` + } + tg.tempFile("main.go", fmt.Sprintf(`package main;%s func main() { print("hello") }`, s)) src := tg.path("main.go") - obj := tg.path("main") + obj := tg.path("main.exe") tg.run("build", "-buildmode=pie", "-o", obj, src) switch runtime.GOOS { @@ -2183,6 +2202,38 @@ func TestBuildmodePIE(t *testing.T) { if f.Flags&macho.FlagPIE == 0 { t.Error("PIE must have PIE flag, but not") } + case "windows": + f, err := pe.Open(obj) + if err != nil { + t.Fatal(err) + } + defer f.Close() + const ( + IMAGE_FILE_RELOCS_STRIPPED = 0x0001 + IMAGE_DLLCHARACTERISTICS_HIGH_ENTROPY_VA = 0x0020 + IMAGE_DLLCHARACTERISTICS_DYNAMIC_BASE = 0x0040 + ) + if f.Section(".reloc") == nil { + t.Error(".reloc section is not present") + } + if (f.FileHeader.Characteristics & IMAGE_FILE_RELOCS_STRIPPED) != 0 { + t.Error("IMAGE_FILE_RELOCS_STRIPPED flag is set") + } + var dc uint16 + switch oh := f.OptionalHeader.(type) { + case *pe.OptionalHeader32: + dc = oh.DllCharacteristics + case *pe.OptionalHeader64: + dc = oh.DllCharacteristics + if (dc & IMAGE_DLLCHARACTERISTICS_HIGH_ENTROPY_VA) == 0 { + t.Error("IMAGE_DLLCHARACTERISTICS_HIGH_ENTROPY_VA flag is not set") + } + default: + t.Fatalf("unexpected optional header type of %T", f.OptionalHeader) + } + if (dc & IMAGE_DLLCHARACTERISTICS_DYNAMIC_BASE) == 0 { + t.Error("IMAGE_DLLCHARACTERISTICS_DYNAMIC_BASE flag is not set") + } default: panic("unreachable") } diff --git a/src/cmd/go/internal/work/init.go b/src/cmd/go/internal/work/init.go index 9091f98636..e970272954 100644 --- a/src/cmd/go/internal/work/init.go +++ b/src/cmd/go/internal/work/init.go @@ -161,8 +161,12 @@ func buildModeInit() { } if gccgo { codegenArg = "-fPIE" - } else if cfg.Goos != "aix" { - codegenArg = "-shared" + } else { + switch cfg.Goos { + case "aix", "windows": + default: + codegenArg = "-shared" + } } ldBuildmode = "pie" case "shared": diff --git a/src/cmd/go/testdata/script/version.txt b/src/cmd/go/testdata/script/version.txt index 0ed1194840..0123ac6d53 100644 --- a/src/cmd/go/testdata/script/version.txt +++ b/src/cmd/go/testdata/script/version.txt @@ -22,8 +22,6 @@ stdout '^\tpath\trsc.io/fortune' stdout '^\tmod\trsc.io/fortune\tv1.0.0' # Repeat the test with -buildmode=pie. -# TODO(golang.org/issue/27144): don't skip after -buildmode=pie is implemented -# on Windows. [!buildmode:pie] stop go build -buildmode=pie -o external.exe rsc.io/fortune go version external.exe @@ -33,8 +31,8 @@ stdout '^\tpath\trsc.io/fortune' stdout '^\tmod\trsc.io/fortune\tv1.0.0' # Also test PIE with internal linking. -# currently only supported on linux/amd64 and linux/arm64. -[!linux] stop +# currently only supported on linux/amd64, linux/arm64 and windows/amd64. +[!linux] [!windows] stop [!amd64] [!arm64] stop go build -buildmode=pie -ldflags=-linkmode=internal -o internal.exe rsc.io/fortune go version internal.exe diff --git a/src/cmd/internal/sys/supported.go b/src/cmd/internal/sys/supported.go index c8ab2181b5..639827be86 100644 --- a/src/cmd/internal/sys/supported.go +++ b/src/cmd/internal/sys/supported.go @@ -87,7 +87,8 @@ func BuildModeSupported(compiler, buildmode, goos, goarch string) bool { "android/amd64", "android/arm", "android/arm64", "android/386", "freebsd/amd64", "darwin/amd64", - "aix/ppc64": + "aix/ppc64", + "windows/386", "windows/amd64", "windows/arm": return true } return false diff --git a/src/cmd/link/internal/ld/config.go b/src/cmd/link/internal/ld/config.go index 0eba4dc162..2373b500e3 100644 --- a/src/cmd/link/internal/ld/config.go +++ b/src/cmd/link/internal/ld/config.go @@ -38,7 +38,7 @@ func (mode *BuildMode) Set(s string) error { *mode = BuildModeExe case "pie": switch objabi.GOOS { - case "aix", "android", "linux": + case "aix", "android", "linux", "windows": case "darwin", "freebsd": switch objabi.GOARCH { case "amd64": @@ -209,6 +209,7 @@ func mustLinkExternal(ctxt *Link) (res bool, reason string) { case BuildModePIE: switch objabi.GOOS + "/" + objabi.GOARCH { case "linux/amd64", "linux/arm64", "android/arm64": + case "windows/386", "windows/amd64", "windows/arm": default: // Internal linking does not support TLS_IE. return true, "buildmode=pie" diff --git a/src/cmd/link/internal/ld/lib.go b/src/cmd/link/internal/ld/lib.go index 7c5877bfbd..a4b4b60ca1 100644 --- a/src/cmd/link/internal/ld/lib.go +++ b/src/cmd/link/internal/ld/lib.go @@ -1258,8 +1258,20 @@ func (ctxt *Link) hostlink() { } } case BuildModePIE: - // ELF. - if ctxt.HeadType != objabi.Hdarwin && ctxt.HeadType != objabi.Haix { + switch ctxt.HeadType { + case objabi.Hdarwin, objabi.Haix: + case objabi.Hwindows: + // Enable ASLR. + argv = append(argv, "-Wl,--dynamicbase") + // enable high-entropy ASLR on 64-bit. + if ctxt.Arch.PtrSize >= 8 { + argv = append(argv, "-Wl,--high-entropy-va") + } + // Work around binutils limitation that strips relocation table for dynamicbase. + // See https://sourceware.org/bugzilla/show_bug.cgi?id=19011 + argv = append(argv, "-Wl,--export-all-symbols") + default: + // ELF. if ctxt.UseRelro() { argv = append(argv, "-Wl,-z,relro") } diff --git a/src/cmd/link/internal/ld/pe.go b/src/cmd/link/internal/ld/pe.go index 4ab346e733..2c6be2d6f3 100644 --- a/src/cmd/link/internal/ld/pe.go +++ b/src/cmd/link/internal/ld/pe.go @@ -94,6 +94,7 @@ const ( IMAGE_DIRECTORY_ENTRY_COM_DESCRIPTOR = 14 IMAGE_SUBSYSTEM_WINDOWS_GUI = 2 IMAGE_SUBSYSTEM_WINDOWS_CUI = 3 + IMAGE_DLLCHARACTERISTICS_HIGH_ENTROPY_VA = 0x0020 IMAGE_DLLCHARACTERISTICS_DYNAMIC_BASE = 0x0040 IMAGE_DLLCHARACTERISTICS_NX_COMPAT = 0x0100 IMAGE_DLLCHARACTERISTICS_TERMINAL_SERVER_AWARE = 0x8000 @@ -126,6 +127,7 @@ const ( IMAGE_REL_ARM_SECREL = 0x000F IMAGE_REL_BASED_HIGHLOW = 3 + IMAGE_REL_BASED_DIR64 = 10 ) const ( @@ -752,12 +754,12 @@ func (f *peFile) writeSymbolTableAndStringTable(ctxt *Link) { } // writeFileHeader writes COFF file header for peFile f. -func (f *peFile) writeFileHeader(arch *sys.Arch, out *OutBuf, linkmode LinkMode) { +func (f *peFile) writeFileHeader(ctxt *Link) { var fh pe.FileHeader - switch arch.Family { + switch ctxt.Arch.Family { default: - Exitf("unknown PE architecture: %v", arch.Family) + Exitf("unknown PE architecture: %v", ctxt.Arch.Family) case sys.AMD64: fh.Machine = IMAGE_FILE_MACHINE_AMD64 case sys.I386: @@ -772,16 +774,15 @@ func (f *peFile) writeFileHeader(arch *sys.Arch, out *OutBuf, linkmode LinkMode) // much more beneficial than having build timestamp in the header. fh.TimeDateStamp = 0 - if linkmode == LinkExternal { + if ctxt.LinkMode == LinkExternal { fh.Characteristics = IMAGE_FILE_LINE_NUMS_STRIPPED } else { - switch arch.Family { - default: - Exitf("write COFF(ext): unknown PE architecture: %v", arch.Family) + fh.Characteristics = IMAGE_FILE_EXECUTABLE_IMAGE | IMAGE_FILE_DEBUG_STRIPPED + switch ctxt.Arch.Family { case sys.AMD64, sys.I386: - fh.Characteristics = IMAGE_FILE_RELOCS_STRIPPED | IMAGE_FILE_EXECUTABLE_IMAGE | IMAGE_FILE_DEBUG_STRIPPED - case sys.ARM: - fh.Characteristics = IMAGE_FILE_EXECUTABLE_IMAGE | IMAGE_FILE_DEBUG_STRIPPED + if ctxt.BuildMode != BuildModePIE { + fh.Characteristics |= IMAGE_FILE_RELOCS_STRIPPED + } } } if pe64 != 0 { @@ -797,7 +798,7 @@ func (f *peFile) writeFileHeader(arch *sys.Arch, out *OutBuf, linkmode LinkMode) fh.PointerToSymbolTable = uint32(f.symtabOffset) fh.NumberOfSymbols = uint32(f.symbolCount) - binary.Write(out, binary.LittleEndian, &fh) + binary.Write(ctxt.Out, binary.LittleEndian, &fh) } // writeOptionalHeader writes COFF optional header for peFile f. @@ -859,12 +860,6 @@ func (f *peFile) writeOptionalHeader(ctxt *Link) { oh.Subsystem = IMAGE_SUBSYSTEM_WINDOWS_CUI } - switch ctxt.Arch.Family { - case sys.ARM: - oh64.DllCharacteristics = IMAGE_DLLCHARACTERISTICS_DYNAMIC_BASE - oh.DllCharacteristics = IMAGE_DLLCHARACTERISTICS_DYNAMIC_BASE - } - // Mark as having awareness of terminal services, to avoid ancient compatibility hacks. oh64.DllCharacteristics |= IMAGE_DLLCHARACTERISTICS_TERMINAL_SERVER_AWARE oh.DllCharacteristics |= IMAGE_DLLCHARACTERISTICS_TERMINAL_SERVER_AWARE @@ -873,6 +868,23 @@ func (f *peFile) writeOptionalHeader(ctxt *Link) { oh64.DllCharacteristics |= IMAGE_DLLCHARACTERISTICS_NX_COMPAT oh.DllCharacteristics |= IMAGE_DLLCHARACTERISTICS_NX_COMPAT + // The DLL can be relocated at load time. + switch ctxt.Arch.Family { + case sys.AMD64, sys.I386: + if ctxt.BuildMode == BuildModePIE { + oh64.DllCharacteristics |= IMAGE_DLLCHARACTERISTICS_DYNAMIC_BASE + oh.DllCharacteristics |= IMAGE_DLLCHARACTERISTICS_DYNAMIC_BASE + } + case sys.ARM: + oh64.DllCharacteristics |= IMAGE_DLLCHARACTERISTICS_DYNAMIC_BASE + oh.DllCharacteristics |= IMAGE_DLLCHARACTERISTICS_DYNAMIC_BASE + } + + // Image can handle a high entropy 64-bit virtual address space. + if ctxt.BuildMode == BuildModePIE { + oh64.DllCharacteristics |= IMAGE_DLLCHARACTERISTICS_HIGH_ENTROPY_VA + } + // Disable stack growth as we don't want Windows to // fiddle with the thread stack limits, which we set // ourselves to circumvent the stack checks in the @@ -997,7 +1009,7 @@ func pewrite(ctxt *Link) { ctxt.Out.WriteStringN("PE", 4) } - pefile.writeFileHeader(ctxt.Arch, ctxt.Out, ctxt.LinkMode) + pefile.writeFileHeader(ctxt) pefile.writeOptionalHeader(ctxt) @@ -1376,6 +1388,8 @@ func (rt *peBaseRelocTable) addentry(ctxt *Link, s *sym.Symbol, r *sym.Reloc) { Exitf("unsupported relocation size %d\n", r.Siz) case 4: e.typeOff |= uint16(IMAGE_REL_BASED_HIGHLOW << 12) + case 8: + e.typeOff |= uint16(IMAGE_REL_BASED_DIR64 << 12) } b.entries = append(b.entries, e) @@ -1430,11 +1444,15 @@ func addPEBaseRelocSym(ctxt *Link, s *sym.Symbol, rt *peBaseRelocTable) { } func addPEBaseReloc(ctxt *Link) { - // We only generate base relocation table for ARM (and ... ARM64), x86, and AMD64 are marked as legacy - // archs and can use fixed base with no base relocation information + // Arm does not work without base relocation table. + // 386 and amd64 will only require the table for BuildModePIE. switch ctxt.Arch.Family { default: return + case sys.I386, sys.AMD64: + if ctxt.BuildMode != BuildModePIE { + return + } case sys.ARM: } From 1e9665da8fd8e2e095eb0e99a3b83118f600dc0b Mon Sep 17 00:00:00 2001 From: Bradford Lamson-Scribner Date: Thu, 20 Feb 2020 09:07:48 -0700 Subject: [PATCH 13/69] cmd/compile: add a dark mode to ssa html generation which can be toggled add a tag that when clicked, toggles a dark mode. It keeps intact the grayed out dead values/blocks, all the highlight colors, and ensures text is always readable. Fixes #34325 Change-Id: I4af1e4b5f4a5b63e54c992e90f8474cc51c63465 Reviewed-on: https://go-review.googlesource.com/c/go/+/220260 Reviewed-by: Josh Bleecher Snyder Run-TryBot: Josh Bleecher Snyder TryBot-Result: Gobot Gobot --- src/cmd/compile/internal/ssa/html.go | 95 ++++++++++++++++++++++++---- 1 file changed, 81 insertions(+), 14 deletions(-) diff --git a/src/cmd/compile/internal/ssa/html.go b/src/cmd/compile/internal/ssa/html.go index 1e76a673ef..c384817d0c 100644 --- a/src/cmd/compile/internal/ssa/html.go +++ b/src/cmd/compile/internal/ssa/html.go @@ -93,7 +93,7 @@ td > h2 { td.collapsed { font-size: 12px; width: 12px; - border: 0px; + border: 1px solid white; padding: 0; cursor: pointer; background: #fafafa; @@ -247,18 +247,61 @@ svg { outline: 1px solid #eee; } -.highlight-aquamarine { background-color: aquamarine; } -.highlight-coral { background-color: coral; } -.highlight-lightpink { background-color: lightpink; } -.highlight-lightsteelblue { background-color: lightsteelblue; } -.highlight-palegreen { background-color: palegreen; } -.highlight-skyblue { background-color: skyblue; } -.highlight-lightgray { background-color: lightgray; } -.highlight-yellow { background-color: yellow; } -.highlight-lime { background-color: lime; } -.highlight-khaki { background-color: khaki; } -.highlight-aqua { background-color: aqua; } -.highlight-salmon { background-color: salmon; } +body.darkmode { + background-color: rgb(21, 21, 21); + color: rgb(230, 255, 255); + opacity: 100%; +} + +td.darkmode { + background-color: rgb(21, 21, 21); + border: 1px solid gray; +} + +body.darkmode table, th { + border: 1px solid gray; +} + +.highlight-aquamarine { background-color: aquamarine; color: black; } +.highlight-coral { background-color: coral; color: black; } +.highlight-lightpink { background-color: lightpink; color: black; } +.highlight-lightsteelblue { background-color: lightsteelblue; color: black; } +.highlight-palegreen { background-color: palegreen; color: black; } +.highlight-skyblue { background-color: skyblue; color: black; } +.highlight-lightgray { background-color: lightgray; color: black; } +.highlight-yellow { background-color: yellow; color: black; } +.highlight-lime { background-color: lime; color: black; } +.highlight-khaki { background-color: khaki; color: black; } +.highlight-aqua { background-color: aqua; color: black; } +.highlight-salmon { background-color: salmon; color: black; } + +/* Ensure all dead values/blocks continue to have gray font color in dark mode with highlights */ +.dead-value span.highlight-aquamarine, +.dead-block.highlight-aquamarine, +.dead-value span.highlight-coral, +.dead-block.highlight-coral, +.dead-value span.highlight-lightpink, +.dead-block.highlight-lightpink, +.dead-value span.highlight-lightsteelblue, +.dead-block.highlight-lightsteelblue, +.dead-value span.highlight-palegreen, +.dead-block.highlight-palegreen, +.dead-value span.highlight-skyblue, +.dead-block.highlight-skyblue, +.dead-value span.highlight-lightgray, +.dead-block.highlight-lightgray, +.dead-value span.highlight-yellow, +.dead-block.highlight-yellow, +.dead-value span.highlight-lime, +.dead-block.highlight-lime, +.dead-value span.highlight-khaki, +.dead-block.highlight-khaki, +.dead-value span.highlight-aqua, +.dead-block.highlight-aqua, +.dead-value span.highlight-salmon, +.dead-block.highlight-salmon { + color: gray; +} .outline-blue { outline: blue solid 2px; } .outline-red { outline: red solid 2px; } @@ -284,6 +327,10 @@ ellipse.outline-teal { stroke-width: 2px; stroke: teal; } ellipse.outline-maroon { stroke-width: 2px; stroke: maroon; } ellipse.outline-black { stroke-width: 2px; stroke: black; } +/* Capture alternative for outline-black and ellipse.outline-black when in dark mode */ +body.darkmode .outline-black { outline: gray solid 2px; } +body.darkmode ellipse.outline-black { outline: gray solid 2px; } + +} + +function toggleDarkMode() { + document.body.classList.toggle('darkmode'); + + const collapsedEls = document.getElementsByClassName('collapsed'); + const len = collapsedEls.length; + + for (let i = 0; i < len; i++) { + collapsedEls[i].classList.toggle('darkmode'); + } +} + + `) w.WriteString("") @@ -616,6 +681,8 @@ Edge with a dot means that this edge follows the order in which blocks were laid

+ + `) w.WriteString("") w.WriteString("") From a908e09a34fff3c47efab69857d27bf6965b948a Mon Sep 17 00:00:00 2001 From: Mark Pulford Date: Fri, 14 Feb 2020 08:34:31 +1100 Subject: [PATCH 14/69] runtime: deflake CGO traceback tests The CGO traceback function is called whenever CGO code is executing and a signal is received. This occurs much more frequently now SIGURG is used for preemption. Disable signal preemption to significantly increase the likelihood that a signal results in a profile sample during the test. Updates #37201 Change-Id: Icb1a33ab0754d1a74882a4ee265b4026abe30bdc Reviewed-on: https://go-review.googlesource.com/c/go/+/219417 Run-TryBot: Emmanuel Odeke TryBot-Result: Gobot Gobot Reviewed-by: Ian Lance Taylor --- src/runtime/crash_cgo_test.go | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/src/runtime/crash_cgo_test.go b/src/runtime/crash_cgo_test.go index 56cfb0856e..a09ecd8e42 100644 --- a/src/runtime/crash_cgo_test.go +++ b/src/runtime/crash_cgo_test.go @@ -275,7 +275,13 @@ func testCgoPprof(t *testing.T, buildArg, runArg, top, bottom string) { t.Fatal(err) } - got, err := testenv.CleanCmdEnv(exec.Command(exe, runArg)).CombinedOutput() + // pprofCgoTraceback is called whenever CGO code is executing and a signal + // is received. Disable signal preemption to increase the likelihood at + // least one SIGPROF signal fired to capture a sample. See issue #37201. + cmd := testenv.CleanCmdEnv(exec.Command(exe, runArg)) + cmd.Env = append(cmd.Env, "GODEBUG=asyncpreemptoff=1") + + got, err := cmd.CombinedOutput() if err != nil { if testenv.Builder() == "linux-amd64-alpine" { // See Issue 18243 and Issue 19938. From 7913f7dfcf2c281b99c6ddd278aa851de47ada9d Mon Sep 17 00:00:00 2001 From: Josh Bleecher Snyder Date: Wed, 26 Feb 2020 11:29:34 -0800 Subject: [PATCH 15/69] cmd/compile: add specialized AddArgN functions for rewrite rules MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This shrinks the compiler without impacting performance. (The performance-sensitive part of rewrite rules is the non-match case.) Passes toolstash-check -all. Executable size: file before after Δ % compile 20356168 20163960 -192208 -0.944% total 115599376 115407168 -192208 -0.166% Text size: file before after Δ % cmd/compile/internal/ssa.s 3928309 3778774 -149535 -3.807% total 18862943 18713408 -149535 -0.793% Memory allocated compiling package SSA: SSA 12.7M ± 0% 12.5M ± 0% -1.74% (p=0.008 n=5+5) Compiler speed impact: name old time/op new time/op delta Template 211ms ± 1% 211ms ± 2% ~ (p=0.832 n=49+49) Unicode 82.8ms ± 2% 83.2ms ± 2% +0.44% (p=0.022 n=46+49) GoTypes 726ms ± 1% 728ms ± 2% ~ (p=0.076 n=46+48) Compiler 3.39s ± 2% 3.40s ± 2% ~ (p=0.633 n=48+49) SSA 7.71s ± 1% 7.65s ± 1% -0.78% (p=0.000 n=45+44) Flate 134ms ± 1% 134ms ± 1% ~ (p=0.195 n=50+49) GoParser 167ms ± 1% 167ms ± 1% ~ (p=0.390 n=47+47) Reflect 453ms ± 3% 452ms ± 2% ~ (p=0.492 n=48+49) Tar 184ms ± 3% 184ms ± 2% ~ (p=0.862 n=50+48) XML 248ms ± 2% 248ms ± 2% ~ (p=0.096 n=49+47) [Geo mean] 415ms 415ms -0.03% name old user-time/op new user-time/op delta Template 273ms ± 1% 273ms ± 2% ~ (p=0.711 n=48+48) Unicode 117ms ± 6% 117ms ± 5% ~ (p=0.633 n=50+50) GoTypes 972ms ± 2% 974ms ± 1% +0.29% (p=0.016 n=47+49) Compiler 4.46s ± 6% 4.51s ± 6% ~ (p=0.093 n=50+50) SSA 10.4s ± 1% 10.3s ± 2% -0.94% (p=0.000 n=45+50) Flate 166ms ± 2% 167ms ± 2% ~ (p=0.148 n=49+48) GoParser 202ms ± 1% 202ms ± 2% -0.28% (p=0.014 n=47+49) Reflect 594ms ± 2% 594ms ± 2% ~ (p=0.717 n=48+49) Tar 224ms ± 2% 224ms ± 2% ~ (p=0.805 n=50+49) XML 311ms ± 1% 310ms ± 1% ~ (p=0.177 n=49+48) [Geo mean] 537ms 537ms +0.01% Change-Id: I562b9f349b34ddcff01771769e6dbbc80604da7a Reviewed-on: https://go-review.googlesource.com/c/go/+/221237 Run-TryBot: Josh Bleecher Snyder TryBot-Result: Gobot Gobot Reviewed-by: Keith Randall --- src/cmd/compile/internal/ssa/gen/rulegen.go | 16 +- src/cmd/compile/internal/ssa/rewrite386.go | 2215 ++------ .../internal/ssa/rewrite386splitload.go | 27 +- src/cmd/compile/internal/ssa/rewriteAMD64.go | 4915 +++++------------ .../internal/ssa/rewriteAMD64splitload.go | 36 +- src/cmd/compile/internal/ssa/rewriteARM.go | 2747 +++------ src/cmd/compile/internal/ssa/rewriteARM64.go | 3244 ++++------- src/cmd/compile/internal/ssa/rewriteMIPS.go | 1060 ++-- src/cmd/compile/internal/ssa/rewriteMIPS64.go | 1379 ++--- src/cmd/compile/internal/ssa/rewritePPC64.go | 1785 ++---- .../compile/internal/ssa/rewriteRISCV64.go | 676 +-- src/cmd/compile/internal/ssa/rewriteS390X.go | 2565 +++------ src/cmd/compile/internal/ssa/rewriteWasm.go | 735 +-- src/cmd/compile/internal/ssa/rewritedec.go | 93 +- src/cmd/compile/internal/ssa/rewritedec64.go | 621 +-- .../compile/internal/ssa/rewritedecArgs.go | 28 +- .../compile/internal/ssa/rewritegeneric.go | 2027 +++---- src/cmd/compile/internal/ssa/value.go | 52 + 18 files changed, 7169 insertions(+), 17052 deletions(-) diff --git a/src/cmd/compile/internal/ssa/gen/rulegen.go b/src/cmd/compile/internal/ssa/gen/rulegen.go index 2a10f2fa25..0fba0546e7 100644 --- a/src/cmd/compile/internal/ssa/gen/rulegen.go +++ b/src/cmd/compile/internal/ssa/gen/rulegen.go @@ -1134,11 +1134,21 @@ func genResult0(rr *RuleRewrite, arch arch, result string, top, move bool, pos s if aux != "" { rr.add(stmtf("%s.Aux = %s", v, aux)) } - for _, arg := range args { + all := new(strings.Builder) + for i, arg := range args { x := genResult0(rr, arch, arg, false, move, pos) - rr.add(stmtf("%s.AddArg(%s)", v, x)) + if i > 0 { + all.WriteString(", ") + } + all.WriteString(x) + } + switch len(args) { + case 0: + case 1: + rr.add(stmtf("%s.AddArg(%s)", v, all.String())) + default: + rr.add(stmtf("%s.AddArg%d(%s)", v, len(args), all.String())) } - return v } diff --git a/src/cmd/compile/internal/ssa/rewrite386.go b/src/cmd/compile/internal/ssa/rewrite386.go index 4efb2d4f74..7a75b7121f 100644 --- a/src/cmd/compile/internal/ssa/rewrite386.go +++ b/src/cmd/compile/internal/ssa/rewrite386.go @@ -791,8 +791,7 @@ func rewriteValue386_Op386ADCL(v *Value) bool { f := v_2 v.reset(Op386ADCLconst) v.AuxInt = c - v.AddArg(x) - v.AddArg(f) + v.AddArg2(x, f) return true } break @@ -902,8 +901,7 @@ func rewriteValue386_Op386ADDL(v *Value) bool { } y := v_1.Args[0] v.reset(Op386LEAL8) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } break @@ -918,8 +916,7 @@ func rewriteValue386_Op386ADDL(v *Value) bool { } y := v_1.Args[0] v.reset(Op386LEAL4) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } break @@ -934,8 +931,7 @@ func rewriteValue386_Op386ADDL(v *Value) bool { } y := v_1.Args[0] v.reset(Op386LEAL2) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } break @@ -953,8 +949,7 @@ func rewriteValue386_Op386ADDL(v *Value) bool { continue } v.reset(Op386LEAL2) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } break @@ -976,8 +971,7 @@ func rewriteValue386_Op386ADDL(v *Value) bool { } y := v_1_1 v.reset(Op386LEAL2) - v.AddArg(y) - v.AddArg(x) + v.AddArg2(y, x) return true } } @@ -995,8 +989,7 @@ func rewriteValue386_Op386ADDL(v *Value) bool { y := v_1 v.reset(Op386LEAL1) v.AuxInt = c - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } break @@ -1019,8 +1012,7 @@ func rewriteValue386_Op386ADDL(v *Value) bool { v.reset(Op386LEAL1) v.AuxInt = c v.Aux = s - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } break @@ -1045,9 +1037,7 @@ func rewriteValue386_Op386ADDL(v *Value) bool { v.reset(Op386ADDLload) v.AuxInt = off v.Aux = sym - v.AddArg(x) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg3(x, ptr, mem) return true } break @@ -1073,10 +1063,7 @@ func rewriteValue386_Op386ADDL(v *Value) bool { v.reset(Op386ADDLloadidx4) v.AuxInt = off v.Aux = sym - v.AddArg(x) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg4(x, ptr, idx, mem) return true } break @@ -1091,8 +1078,7 @@ func rewriteValue386_Op386ADDL(v *Value) bool { } y := v_1.Args[0] v.reset(Op386SUBL) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } break @@ -1133,8 +1119,7 @@ func rewriteValue386_Op386ADDLconst(v *Value) bool { x := v_0.Args[0] v.reset(Op386LEAL1) v.AuxInt = c - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (ADDLconst [c] (LEAL [d] {s} x)) @@ -1175,8 +1160,7 @@ func rewriteValue386_Op386ADDLconst(v *Value) bool { v.reset(Op386LEAL1) v.AuxInt = c + d v.Aux = s - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (ADDLconst [c] (LEAL2 [d] {s} x y)) @@ -1197,8 +1181,7 @@ func rewriteValue386_Op386ADDLconst(v *Value) bool { v.reset(Op386LEAL2) v.AuxInt = c + d v.Aux = s - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (ADDLconst [c] (LEAL4 [d] {s} x y)) @@ -1219,8 +1202,7 @@ func rewriteValue386_Op386ADDLconst(v *Value) bool { v.reset(Op386LEAL4) v.AuxInt = c + d v.Aux = s - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (ADDLconst [c] (LEAL8 [d] {s} x y)) @@ -1241,8 +1223,7 @@ func rewriteValue386_Op386ADDLconst(v *Value) bool { v.reset(Op386LEAL8) v.AuxInt = c + d v.Aux = s - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (ADDLconst [c] x) @@ -1310,8 +1291,7 @@ func rewriteValue386_Op386ADDLconstmodify(v *Value) bool { v.reset(Op386ADDLconstmodify) v.AuxInt = ValAndOff(valoff1).add(off2) v.Aux = sym - v.AddArg(base) - v.AddArg(mem) + v.AddArg2(base, mem) return true } // match: (ADDLconstmodify [valoff1] {sym1} (LEAL [off2] {sym2} base) mem) @@ -1333,8 +1313,7 @@ func rewriteValue386_Op386ADDLconstmodify(v *Value) bool { v.reset(Op386ADDLconstmodify) v.AuxInt = ValAndOff(valoff1).add(off2) v.Aux = mergeSym(sym1, sym2) - v.AddArg(base) - v.AddArg(mem) + v.AddArg2(base, mem) return true } return false @@ -1364,9 +1343,7 @@ func rewriteValue386_Op386ADDLconstmodifyidx4(v *Value) bool { v.reset(Op386ADDLconstmodifyidx4) v.AuxInt = ValAndOff(valoff1).add(off2) v.Aux = sym - v.AddArg(base) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(base, idx, mem) return true } // match: (ADDLconstmodifyidx4 [valoff1] {sym} base (ADDLconst [off2] idx) mem) @@ -1388,9 +1365,7 @@ func rewriteValue386_Op386ADDLconstmodifyidx4(v *Value) bool { v.reset(Op386ADDLconstmodifyidx4) v.AuxInt = ValAndOff(valoff1).add(off2 * 4) v.Aux = sym - v.AddArg(base) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(base, idx, mem) return true } // match: (ADDLconstmodifyidx4 [valoff1] {sym1} (LEAL [off2] {sym2} base) idx mem) @@ -1413,9 +1388,7 @@ func rewriteValue386_Op386ADDLconstmodifyidx4(v *Value) bool { v.reset(Op386ADDLconstmodifyidx4) v.AuxInt = ValAndOff(valoff1).add(off2) v.Aux = mergeSym(sym1, sym2) - v.AddArg(base) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(base, idx, mem) return true } return false @@ -1445,9 +1418,7 @@ func rewriteValue386_Op386ADDLload(v *Value) bool { v.reset(Op386ADDLload) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(val) - v.AddArg(base) - v.AddArg(mem) + v.AddArg3(val, base, mem) return true } // match: (ADDLload [off1] {sym1} val (LEAL [off2] {sym2} base) mem) @@ -1470,9 +1441,7 @@ func rewriteValue386_Op386ADDLload(v *Value) bool { v.reset(Op386ADDLload) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(val) - v.AddArg(base) - v.AddArg(mem) + v.AddArg3(val, base, mem) return true } // match: (ADDLload [off1] {sym1} val (LEAL4 [off2] {sym2} ptr idx) mem) @@ -1496,10 +1465,7 @@ func rewriteValue386_Op386ADDLload(v *Value) bool { v.reset(Op386ADDLloadidx4) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(val) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg4(val, ptr, idx, mem) return true } return false @@ -1531,10 +1497,7 @@ func rewriteValue386_Op386ADDLloadidx4(v *Value) bool { v.reset(Op386ADDLloadidx4) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(val) - v.AddArg(base) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg4(val, base, idx, mem) return true } // match: (ADDLloadidx4 [off1] {sym} val base (ADDLconst [off2] idx) mem) @@ -1557,10 +1520,7 @@ func rewriteValue386_Op386ADDLloadidx4(v *Value) bool { v.reset(Op386ADDLloadidx4) v.AuxInt = off1 + off2*4 v.Aux = sym - v.AddArg(val) - v.AddArg(base) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg4(val, base, idx, mem) return true } // match: (ADDLloadidx4 [off1] {sym1} val (LEAL [off2] {sym2} base) idx mem) @@ -1584,10 +1544,7 @@ func rewriteValue386_Op386ADDLloadidx4(v *Value) bool { v.reset(Op386ADDLloadidx4) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(val) - v.AddArg(base) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg4(val, base, idx, mem) return true } return false @@ -1617,9 +1574,7 @@ func rewriteValue386_Op386ADDLmodify(v *Value) bool { v.reset(Op386ADDLmodify) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(base) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(base, val, mem) return true } // match: (ADDLmodify [off1] {sym1} (LEAL [off2] {sym2} base) val mem) @@ -1642,9 +1597,7 @@ func rewriteValue386_Op386ADDLmodify(v *Value) bool { v.reset(Op386ADDLmodify) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(base) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(base, val, mem) return true } return false @@ -1676,10 +1629,7 @@ func rewriteValue386_Op386ADDLmodifyidx4(v *Value) bool { v.reset(Op386ADDLmodifyidx4) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(base) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) + v.AddArg4(base, idx, val, mem) return true } // match: (ADDLmodifyidx4 [off1] {sym} base (ADDLconst [off2] idx) val mem) @@ -1702,10 +1652,7 @@ func rewriteValue386_Op386ADDLmodifyidx4(v *Value) bool { v.reset(Op386ADDLmodifyidx4) v.AuxInt = off1 + off2*4 v.Aux = sym - v.AddArg(base) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) + v.AddArg4(base, idx, val, mem) return true } // match: (ADDLmodifyidx4 [off1] {sym1} (LEAL [off2] {sym2} base) idx val mem) @@ -1729,10 +1676,7 @@ func rewriteValue386_Op386ADDLmodifyidx4(v *Value) bool { v.reset(Op386ADDLmodifyidx4) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(base) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) + v.AddArg4(base, idx, val, mem) return true } // match: (ADDLmodifyidx4 [off] {sym} ptr idx (MOVLconst [c]) mem) @@ -1754,9 +1698,7 @@ func rewriteValue386_Op386ADDLmodifyidx4(v *Value) bool { v.reset(Op386ADDLconstmodifyidx4) v.AuxInt = makeValAndOff(c, off) v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } return false @@ -1786,9 +1728,7 @@ func rewriteValue386_Op386ADDSD(v *Value) bool { v.reset(Op386ADDSDload) v.AuxInt = off v.Aux = sym - v.AddArg(x) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg3(x, ptr, mem) return true } break @@ -1820,9 +1760,7 @@ func rewriteValue386_Op386ADDSDload(v *Value) bool { v.reset(Op386ADDSDload) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(val) - v.AddArg(base) - v.AddArg(mem) + v.AddArg3(val, base, mem) return true } // match: (ADDSDload [off1] {sym1} val (LEAL [off2] {sym2} base) mem) @@ -1845,9 +1783,7 @@ func rewriteValue386_Op386ADDSDload(v *Value) bool { v.reset(Op386ADDSDload) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(val) - v.AddArg(base) - v.AddArg(mem) + v.AddArg3(val, base, mem) return true } return false @@ -1877,9 +1813,7 @@ func rewriteValue386_Op386ADDSS(v *Value) bool { v.reset(Op386ADDSSload) v.AuxInt = off v.Aux = sym - v.AddArg(x) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg3(x, ptr, mem) return true } break @@ -1911,9 +1845,7 @@ func rewriteValue386_Op386ADDSSload(v *Value) bool { v.reset(Op386ADDSSload) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(val) - v.AddArg(base) - v.AddArg(mem) + v.AddArg3(val, base, mem) return true } // match: (ADDSSload [off1] {sym1} val (LEAL [off2] {sym2} base) mem) @@ -1936,9 +1868,7 @@ func rewriteValue386_Op386ADDSSload(v *Value) bool { v.reset(Op386ADDSSload) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(val) - v.AddArg(base) - v.AddArg(mem) + v.AddArg3(val, base, mem) return true } return false @@ -1982,9 +1912,7 @@ func rewriteValue386_Op386ANDL(v *Value) bool { v.reset(Op386ANDLload) v.AuxInt = off v.Aux = sym - v.AddArg(x) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg3(x, ptr, mem) return true } break @@ -2010,10 +1938,7 @@ func rewriteValue386_Op386ANDL(v *Value) bool { v.reset(Op386ANDLloadidx4) v.AuxInt = off v.Aux = sym - v.AddArg(x) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg4(x, ptr, idx, mem) return true } break @@ -2111,8 +2036,7 @@ func rewriteValue386_Op386ANDLconstmodify(v *Value) bool { v.reset(Op386ANDLconstmodify) v.AuxInt = ValAndOff(valoff1).add(off2) v.Aux = sym - v.AddArg(base) - v.AddArg(mem) + v.AddArg2(base, mem) return true } // match: (ANDLconstmodify [valoff1] {sym1} (LEAL [off2] {sym2} base) mem) @@ -2134,8 +2058,7 @@ func rewriteValue386_Op386ANDLconstmodify(v *Value) bool { v.reset(Op386ANDLconstmodify) v.AuxInt = ValAndOff(valoff1).add(off2) v.Aux = mergeSym(sym1, sym2) - v.AddArg(base) - v.AddArg(mem) + v.AddArg2(base, mem) return true } return false @@ -2165,9 +2088,7 @@ func rewriteValue386_Op386ANDLconstmodifyidx4(v *Value) bool { v.reset(Op386ANDLconstmodifyidx4) v.AuxInt = ValAndOff(valoff1).add(off2) v.Aux = sym - v.AddArg(base) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(base, idx, mem) return true } // match: (ANDLconstmodifyidx4 [valoff1] {sym} base (ADDLconst [off2] idx) mem) @@ -2189,9 +2110,7 @@ func rewriteValue386_Op386ANDLconstmodifyidx4(v *Value) bool { v.reset(Op386ANDLconstmodifyidx4) v.AuxInt = ValAndOff(valoff1).add(off2 * 4) v.Aux = sym - v.AddArg(base) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(base, idx, mem) return true } // match: (ANDLconstmodifyidx4 [valoff1] {sym1} (LEAL [off2] {sym2} base) idx mem) @@ -2214,9 +2133,7 @@ func rewriteValue386_Op386ANDLconstmodifyidx4(v *Value) bool { v.reset(Op386ANDLconstmodifyidx4) v.AuxInt = ValAndOff(valoff1).add(off2) v.Aux = mergeSym(sym1, sym2) - v.AddArg(base) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(base, idx, mem) return true } return false @@ -2246,9 +2163,7 @@ func rewriteValue386_Op386ANDLload(v *Value) bool { v.reset(Op386ANDLload) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(val) - v.AddArg(base) - v.AddArg(mem) + v.AddArg3(val, base, mem) return true } // match: (ANDLload [off1] {sym1} val (LEAL [off2] {sym2} base) mem) @@ -2271,9 +2186,7 @@ func rewriteValue386_Op386ANDLload(v *Value) bool { v.reset(Op386ANDLload) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(val) - v.AddArg(base) - v.AddArg(mem) + v.AddArg3(val, base, mem) return true } // match: (ANDLload [off1] {sym1} val (LEAL4 [off2] {sym2} ptr idx) mem) @@ -2297,10 +2210,7 @@ func rewriteValue386_Op386ANDLload(v *Value) bool { v.reset(Op386ANDLloadidx4) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(val) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg4(val, ptr, idx, mem) return true } return false @@ -2332,10 +2242,7 @@ func rewriteValue386_Op386ANDLloadidx4(v *Value) bool { v.reset(Op386ANDLloadidx4) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(val) - v.AddArg(base) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg4(val, base, idx, mem) return true } // match: (ANDLloadidx4 [off1] {sym} val base (ADDLconst [off2] idx) mem) @@ -2358,10 +2265,7 @@ func rewriteValue386_Op386ANDLloadidx4(v *Value) bool { v.reset(Op386ANDLloadidx4) v.AuxInt = off1 + off2*4 v.Aux = sym - v.AddArg(val) - v.AddArg(base) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg4(val, base, idx, mem) return true } // match: (ANDLloadidx4 [off1] {sym1} val (LEAL [off2] {sym2} base) idx mem) @@ -2385,10 +2289,7 @@ func rewriteValue386_Op386ANDLloadidx4(v *Value) bool { v.reset(Op386ANDLloadidx4) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(val) - v.AddArg(base) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg4(val, base, idx, mem) return true } return false @@ -2418,9 +2319,7 @@ func rewriteValue386_Op386ANDLmodify(v *Value) bool { v.reset(Op386ANDLmodify) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(base) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(base, val, mem) return true } // match: (ANDLmodify [off1] {sym1} (LEAL [off2] {sym2} base) val mem) @@ -2443,9 +2342,7 @@ func rewriteValue386_Op386ANDLmodify(v *Value) bool { v.reset(Op386ANDLmodify) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(base) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(base, val, mem) return true } return false @@ -2477,10 +2374,7 @@ func rewriteValue386_Op386ANDLmodifyidx4(v *Value) bool { v.reset(Op386ANDLmodifyidx4) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(base) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) + v.AddArg4(base, idx, val, mem) return true } // match: (ANDLmodifyidx4 [off1] {sym} base (ADDLconst [off2] idx) val mem) @@ -2503,10 +2397,7 @@ func rewriteValue386_Op386ANDLmodifyidx4(v *Value) bool { v.reset(Op386ANDLmodifyidx4) v.AuxInt = off1 + off2*4 v.Aux = sym - v.AddArg(base) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) + v.AddArg4(base, idx, val, mem) return true } // match: (ANDLmodifyidx4 [off1] {sym1} (LEAL [off2] {sym2} base) idx val mem) @@ -2530,10 +2421,7 @@ func rewriteValue386_Op386ANDLmodifyidx4(v *Value) bool { v.reset(Op386ANDLmodifyidx4) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(base) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) + v.AddArg4(base, idx, val, mem) return true } // match: (ANDLmodifyidx4 [off] {sym} ptr idx (MOVLconst [c]) mem) @@ -2555,9 +2443,7 @@ func rewriteValue386_Op386ANDLmodifyidx4(v *Value) bool { v.reset(Op386ANDLconstmodifyidx4) v.AuxInt = makeValAndOff(c, off) v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } return false @@ -2605,8 +2491,7 @@ func rewriteValue386_Op386CMPB(v *Value) bool { } v.reset(Op386InvertFlags) v0 := b.NewValue0(v.Pos, Op386CMPB, types.TypeFlags) - v0.AddArg(y) - v0.AddArg(x) + v0.AddArg2(y, x) v.AddArg(v0) return true } @@ -2629,9 +2514,7 @@ func rewriteValue386_Op386CMPB(v *Value) bool { v.reset(Op386CMPBload) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(x) - v.AddArg(mem) + v.AddArg3(ptr, x, mem) return true } // match: (CMPB x l:(MOVBload {sym} [off] ptr mem)) @@ -2654,9 +2537,7 @@ func rewriteValue386_Op386CMPB(v *Value) bool { v0 := b.NewValue0(l.Pos, Op386CMPBload, types.TypeFlags) v0.AuxInt = off v0.Aux = sym - v0.AddArg(ptr) - v0.AddArg(x) - v0.AddArg(mem) + v0.AddArg3(ptr, x, mem) v.AddArg(v0) return true } @@ -2772,8 +2653,7 @@ func rewriteValue386_Op386CMPBconst(v *Value) bool { break } v.reset(Op386TESTB) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (CMPBconst l:(ANDLconst [c] x) [0]) @@ -2805,8 +2685,7 @@ func rewriteValue386_Op386CMPBconst(v *Value) bool { } x := v_0 v.reset(Op386TESTB) - v.AddArg(x) - v.AddArg(x) + v.AddArg2(x, x) return true } // match: (CMPBconst l:(MOVBload {sym} [off] ptr mem) [c]) @@ -2831,8 +2710,7 @@ func rewriteValue386_Op386CMPBconst(v *Value) bool { v.AddArg(v0) v0.AuxInt = makeValAndOff(c, off) v0.Aux = sym - v0.AddArg(ptr) - v0.AddArg(mem) + v0.AddArg2(ptr, mem) return true } return false @@ -2859,8 +2737,7 @@ func rewriteValue386_Op386CMPBload(v *Value) bool { v.reset(Op386CMPBconstload) v.AuxInt = makeValAndOff(int64(int8(c)), off) v.Aux = sym - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } return false @@ -2908,8 +2785,7 @@ func rewriteValue386_Op386CMPL(v *Value) bool { } v.reset(Op386InvertFlags) v0 := b.NewValue0(v.Pos, Op386CMPL, types.TypeFlags) - v0.AddArg(y) - v0.AddArg(x) + v0.AddArg2(y, x) v.AddArg(v0) return true } @@ -2932,9 +2808,7 @@ func rewriteValue386_Op386CMPL(v *Value) bool { v.reset(Op386CMPLload) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(x) - v.AddArg(mem) + v.AddArg3(ptr, x, mem) return true } // match: (CMPL x l:(MOVLload {sym} [off] ptr mem)) @@ -2957,9 +2831,7 @@ func rewriteValue386_Op386CMPL(v *Value) bool { v0 := b.NewValue0(l.Pos, Op386CMPLload, types.TypeFlags) v0.AuxInt = off v0.Aux = sym - v0.AddArg(ptr) - v0.AddArg(x) - v0.AddArg(mem) + v0.AddArg3(ptr, x, mem) v.AddArg(v0) return true } @@ -3090,8 +2962,7 @@ func rewriteValue386_Op386CMPLconst(v *Value) bool { break } v.reset(Op386TESTL) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (CMPLconst l:(ANDLconst [c] x) [0]) @@ -3123,8 +2994,7 @@ func rewriteValue386_Op386CMPLconst(v *Value) bool { } x := v_0 v.reset(Op386TESTL) - v.AddArg(x) - v.AddArg(x) + v.AddArg2(x, x) return true } // match: (CMPLconst l:(MOVLload {sym} [off] ptr mem) [c]) @@ -3149,8 +3019,7 @@ func rewriteValue386_Op386CMPLconst(v *Value) bool { v.AddArg(v0) v0.AuxInt = makeValAndOff(c, off) v0.Aux = sym - v0.AddArg(ptr) - v0.AddArg(mem) + v0.AddArg2(ptr, mem) return true } return false @@ -3177,8 +3046,7 @@ func rewriteValue386_Op386CMPLload(v *Value) bool { v.reset(Op386CMPLconstload) v.AuxInt = makeValAndOff(int64(int32(c)), off) v.Aux = sym - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } return false @@ -3226,8 +3094,7 @@ func rewriteValue386_Op386CMPW(v *Value) bool { } v.reset(Op386InvertFlags) v0 := b.NewValue0(v.Pos, Op386CMPW, types.TypeFlags) - v0.AddArg(y) - v0.AddArg(x) + v0.AddArg2(y, x) v.AddArg(v0) return true } @@ -3250,9 +3117,7 @@ func rewriteValue386_Op386CMPW(v *Value) bool { v.reset(Op386CMPWload) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(x) - v.AddArg(mem) + v.AddArg3(ptr, x, mem) return true } // match: (CMPW x l:(MOVWload {sym} [off] ptr mem)) @@ -3275,9 +3140,7 @@ func rewriteValue386_Op386CMPW(v *Value) bool { v0 := b.NewValue0(l.Pos, Op386CMPWload, types.TypeFlags) v0.AuxInt = off v0.Aux = sym - v0.AddArg(ptr) - v0.AddArg(x) - v0.AddArg(mem) + v0.AddArg3(ptr, x, mem) v.AddArg(v0) return true } @@ -3393,8 +3256,7 @@ func rewriteValue386_Op386CMPWconst(v *Value) bool { break } v.reset(Op386TESTW) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (CMPWconst l:(ANDLconst [c] x) [0]) @@ -3426,8 +3288,7 @@ func rewriteValue386_Op386CMPWconst(v *Value) bool { } x := v_0 v.reset(Op386TESTW) - v.AddArg(x) - v.AddArg(x) + v.AddArg2(x, x) return true } // match: (CMPWconst l:(MOVWload {sym} [off] ptr mem) [c]) @@ -3452,8 +3313,7 @@ func rewriteValue386_Op386CMPWconst(v *Value) bool { v.AddArg(v0) v0.AuxInt = makeValAndOff(c, off) v0.Aux = sym - v0.AddArg(ptr) - v0.AddArg(mem) + v0.AddArg2(ptr, mem) return true } return false @@ -3480,8 +3340,7 @@ func rewriteValue386_Op386CMPWload(v *Value) bool { v.reset(Op386CMPWconstload) v.AuxInt = makeValAndOff(int64(int16(c)), off) v.Aux = sym - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } return false @@ -3510,9 +3369,7 @@ func rewriteValue386_Op386DIVSD(v *Value) bool { v.reset(Op386DIVSDload) v.AuxInt = off v.Aux = sym - v.AddArg(x) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg3(x, ptr, mem) return true } return false @@ -3542,9 +3399,7 @@ func rewriteValue386_Op386DIVSDload(v *Value) bool { v.reset(Op386DIVSDload) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(val) - v.AddArg(base) - v.AddArg(mem) + v.AddArg3(val, base, mem) return true } // match: (DIVSDload [off1] {sym1} val (LEAL [off2] {sym2} base) mem) @@ -3567,9 +3422,7 @@ func rewriteValue386_Op386DIVSDload(v *Value) bool { v.reset(Op386DIVSDload) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(val) - v.AddArg(base) - v.AddArg(mem) + v.AddArg3(val, base, mem) return true } return false @@ -3598,9 +3451,7 @@ func rewriteValue386_Op386DIVSS(v *Value) bool { v.reset(Op386DIVSSload) v.AuxInt = off v.Aux = sym - v.AddArg(x) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg3(x, ptr, mem) return true } return false @@ -3630,9 +3481,7 @@ func rewriteValue386_Op386DIVSSload(v *Value) bool { v.reset(Op386DIVSSload) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(val) - v.AddArg(base) - v.AddArg(mem) + v.AddArg3(val, base, mem) return true } // match: (DIVSSload [off1] {sym1} val (LEAL [off2] {sym2} base) mem) @@ -3655,9 +3504,7 @@ func rewriteValue386_Op386DIVSSload(v *Value) bool { v.reset(Op386DIVSSload) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(val) - v.AddArg(base) - v.AddArg(mem) + v.AddArg3(val, base, mem) return true } return false @@ -3705,8 +3552,7 @@ func rewriteValue386_Op386LEAL(v *Value) bool { v.reset(Op386LEAL1) v.AuxInt = c v.Aux = s - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } break @@ -3751,8 +3597,7 @@ func rewriteValue386_Op386LEAL(v *Value) bool { v.reset(Op386LEAL1) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (LEAL [off1] {sym1} (LEAL2 [off2] {sym2} x y)) @@ -3774,8 +3619,7 @@ func rewriteValue386_Op386LEAL(v *Value) bool { v.reset(Op386LEAL2) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (LEAL [off1] {sym1} (LEAL4 [off2] {sym2} x y)) @@ -3797,8 +3641,7 @@ func rewriteValue386_Op386LEAL(v *Value) bool { v.reset(Op386LEAL4) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (LEAL [off1] {sym1} (LEAL8 [off2] {sym2} x y)) @@ -3820,8 +3663,7 @@ func rewriteValue386_Op386LEAL(v *Value) bool { v.reset(Op386LEAL8) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } return false @@ -3848,8 +3690,7 @@ func rewriteValue386_Op386LEAL1(v *Value) bool { v.reset(Op386LEAL1) v.AuxInt = c + d v.Aux = s - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } break @@ -3868,8 +3709,7 @@ func rewriteValue386_Op386LEAL1(v *Value) bool { v.reset(Op386LEAL2) v.AuxInt = c v.Aux = s - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } break @@ -3888,8 +3728,7 @@ func rewriteValue386_Op386LEAL1(v *Value) bool { v.reset(Op386LEAL4) v.AuxInt = c v.Aux = s - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } break @@ -3908,8 +3747,7 @@ func rewriteValue386_Op386LEAL1(v *Value) bool { v.reset(Op386LEAL8) v.AuxInt = c v.Aux = s - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } break @@ -3934,8 +3772,7 @@ func rewriteValue386_Op386LEAL1(v *Value) bool { v.reset(Op386LEAL1) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } break @@ -3963,8 +3800,7 @@ func rewriteValue386_Op386LEAL2(v *Value) bool { v.reset(Op386LEAL2) v.AuxInt = c + d v.Aux = s - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (LEAL2 [c] {s} x (ADDLconst [d] y)) @@ -3985,8 +3821,7 @@ func rewriteValue386_Op386LEAL2(v *Value) bool { v.reset(Op386LEAL2) v.AuxInt = c + 2*d v.Aux = s - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (LEAL2 [c] {s} x (SHLLconst [1] y)) @@ -4002,8 +3837,7 @@ func rewriteValue386_Op386LEAL2(v *Value) bool { v.reset(Op386LEAL4) v.AuxInt = c v.Aux = s - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (LEAL2 [c] {s} x (SHLLconst [2] y)) @@ -4019,8 +3853,7 @@ func rewriteValue386_Op386LEAL2(v *Value) bool { v.reset(Op386LEAL8) v.AuxInt = c v.Aux = s - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (LEAL2 [off1] {sym1} (LEAL [off2] {sym2} x) y) @@ -4042,8 +3875,7 @@ func rewriteValue386_Op386LEAL2(v *Value) bool { v.reset(Op386LEAL2) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } return false @@ -4069,8 +3901,7 @@ func rewriteValue386_Op386LEAL4(v *Value) bool { v.reset(Op386LEAL4) v.AuxInt = c + d v.Aux = s - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (LEAL4 [c] {s} x (ADDLconst [d] y)) @@ -4091,8 +3922,7 @@ func rewriteValue386_Op386LEAL4(v *Value) bool { v.reset(Op386LEAL4) v.AuxInt = c + 4*d v.Aux = s - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (LEAL4 [c] {s} x (SHLLconst [1] y)) @@ -4108,8 +3938,7 @@ func rewriteValue386_Op386LEAL4(v *Value) bool { v.reset(Op386LEAL8) v.AuxInt = c v.Aux = s - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (LEAL4 [off1] {sym1} (LEAL [off2] {sym2} x) y) @@ -4131,8 +3960,7 @@ func rewriteValue386_Op386LEAL4(v *Value) bool { v.reset(Op386LEAL4) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } return false @@ -4158,8 +3986,7 @@ func rewriteValue386_Op386LEAL8(v *Value) bool { v.reset(Op386LEAL8) v.AuxInt = c + d v.Aux = s - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (LEAL8 [c] {s} x (ADDLconst [d] y)) @@ -4180,8 +4007,7 @@ func rewriteValue386_Op386LEAL8(v *Value) bool { v.reset(Op386LEAL8) v.AuxInt = c + 8*d v.Aux = s - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (LEAL8 [off1] {sym1} (LEAL [off2] {sym2} x) y) @@ -4203,8 +4029,7 @@ func rewriteValue386_Op386LEAL8(v *Value) bool { v.reset(Op386LEAL8) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } return false @@ -4233,8 +4058,7 @@ func rewriteValue386_Op386MOVBLSX(v *Value) bool { v.AddArg(v0) v0.AuxInt = off v0.Aux = sym - v0.AddArg(ptr) - v0.AddArg(mem) + v0.AddArg2(ptr, mem) return true } // match: (MOVBLSX (ANDLconst [c] x)) @@ -4302,8 +4126,7 @@ func rewriteValue386_Op386MOVBLSXload(v *Value) bool { v.reset(Op386MOVBLSXload) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(base) - v.AddArg(mem) + v.AddArg2(base, mem) return true } return false @@ -4332,8 +4155,7 @@ func rewriteValue386_Op386MOVBLZX(v *Value) bool { v.AddArg(v0) v0.AuxInt = off v0.Aux = sym - v0.AddArg(ptr) - v0.AddArg(mem) + v0.AddArg2(ptr, mem) return true } // match: (MOVBLZX x:(MOVBloadidx1 [off] {sym} ptr idx mem)) @@ -4358,9 +4180,7 @@ func rewriteValue386_Op386MOVBLZX(v *Value) bool { v.AddArg(v0) v0.AuxInt = off v0.Aux = sym - v0.AddArg(ptr) - v0.AddArg(idx) - v0.AddArg(mem) + v0.AddArg3(ptr, idx, mem) return true } // match: (MOVBLZX (ANDLconst [c] x)) @@ -4423,8 +4243,7 @@ func rewriteValue386_Op386MOVBload(v *Value) bool { v.reset(Op386MOVBload) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVBload [off1] {sym1} (LEAL [off2] {sym2} base) mem) @@ -4446,8 +4265,7 @@ func rewriteValue386_Op386MOVBload(v *Value) bool { v.reset(Op386MOVBload) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(base) - v.AddArg(mem) + v.AddArg2(base, mem) return true } // match: (MOVBload [off1] {sym1} (LEAL1 [off2] {sym2} ptr idx) mem) @@ -4470,9 +4288,7 @@ func rewriteValue386_Op386MOVBload(v *Value) bool { v.reset(Op386MOVBloadidx1) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } // match: (MOVBload [off] {sym} (ADDL ptr idx) mem) @@ -4497,9 +4313,7 @@ func rewriteValue386_Op386MOVBload(v *Value) bool { v.reset(Op386MOVBloadidx1) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } break @@ -4539,9 +4353,7 @@ func rewriteValue386_Op386MOVBloadidx1(v *Value) bool { v.reset(Op386MOVBloadidx1) v.AuxInt = int64(int32(c + d)) v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } break @@ -4562,9 +4374,7 @@ func rewriteValue386_Op386MOVBloadidx1(v *Value) bool { v.reset(Op386MOVBloadidx1) v.AuxInt = int64(int32(c + d)) v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } break @@ -4591,9 +4401,7 @@ func rewriteValue386_Op386MOVBstore(v *Value) bool { v.reset(Op386MOVBstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(x) - v.AddArg(mem) + v.AddArg3(ptr, x, mem) return true } // match: (MOVBstore [off] {sym} ptr (MOVBLZX x) mem) @@ -4610,9 +4418,7 @@ func rewriteValue386_Op386MOVBstore(v *Value) bool { v.reset(Op386MOVBstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(x) - v.AddArg(mem) + v.AddArg3(ptr, x, mem) return true } // match: (MOVBstore [off1] {sym} (ADDLconst [off2] ptr) val mem) @@ -4634,9 +4440,7 @@ func rewriteValue386_Op386MOVBstore(v *Value) bool { v.reset(Op386MOVBstore) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } // match: (MOVBstore [off] {sym} ptr (MOVLconst [c]) mem) @@ -4657,8 +4461,7 @@ func rewriteValue386_Op386MOVBstore(v *Value) bool { v.reset(Op386MOVBstoreconst) v.AuxInt = makeValAndOff(int64(int8(c)), off) v.Aux = sym - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVBstore [off1] {sym1} (LEAL [off2] {sym2} base) val mem) @@ -4681,9 +4484,7 @@ func rewriteValue386_Op386MOVBstore(v *Value) bool { v.reset(Op386MOVBstore) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(base) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(base, val, mem) return true } // match: (MOVBstore [off1] {sym1} (LEAL1 [off2] {sym2} ptr idx) val mem) @@ -4707,10 +4508,7 @@ func rewriteValue386_Op386MOVBstore(v *Value) bool { v.reset(Op386MOVBstoreidx1) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) + v.AddArg4(ptr, idx, val, mem) return true } // match: (MOVBstore [off] {sym} (ADDL ptr idx) val mem) @@ -4736,10 +4534,7 @@ func rewriteValue386_Op386MOVBstore(v *Value) bool { v.reset(Op386MOVBstoreidx1) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) + v.AddArg4(ptr, idx, val, mem) return true } break @@ -4766,9 +4561,7 @@ func rewriteValue386_Op386MOVBstore(v *Value) bool { v.reset(Op386MOVWstore) v.AuxInt = i - 1 v.Aux = s - v.AddArg(p) - v.AddArg(w) - v.AddArg(mem) + v.AddArg3(p, w, mem) return true } // match: (MOVBstore [i] {s} p (SHRLconst [8] w) x:(MOVBstore [i-1] {s} p w mem)) @@ -4793,9 +4586,7 @@ func rewriteValue386_Op386MOVBstore(v *Value) bool { v.reset(Op386MOVWstore) v.AuxInt = i - 1 v.Aux = s - v.AddArg(p) - v.AddArg(w) - v.AddArg(mem) + v.AddArg3(p, w, mem) return true } // match: (MOVBstore [i] {s} p w x:(MOVBstore {s} [i+1] p (SHRWconst [8] w) mem)) @@ -4821,9 +4612,7 @@ func rewriteValue386_Op386MOVBstore(v *Value) bool { v.reset(Op386MOVWstore) v.AuxInt = i v.Aux = s - v.AddArg(p) - v.AddArg(w) - v.AddArg(mem) + v.AddArg3(p, w, mem) return true } // match: (MOVBstore [i] {s} p w x:(MOVBstore {s} [i+1] p (SHRLconst [8] w) mem)) @@ -4849,9 +4638,7 @@ func rewriteValue386_Op386MOVBstore(v *Value) bool { v.reset(Op386MOVWstore) v.AuxInt = i v.Aux = s - v.AddArg(p) - v.AddArg(w) - v.AddArg(mem) + v.AddArg3(p, w, mem) return true } // match: (MOVBstore [i] {s} p (SHRLconst [j] w) x:(MOVBstore [i-1] {s} p w0:(SHRLconst [j-8] w) mem)) @@ -4881,9 +4668,7 @@ func rewriteValue386_Op386MOVBstore(v *Value) bool { v.reset(Op386MOVWstore) v.AuxInt = i - 1 v.Aux = s - v.AddArg(p) - v.AddArg(w0) - v.AddArg(mem) + v.AddArg3(p, w0, mem) return true } return false @@ -4911,8 +4696,7 @@ func rewriteValue386_Op386MOVBstoreconst(v *Value) bool { v.reset(Op386MOVBstoreconst) v.AuxInt = ValAndOff(sc).add(off) v.Aux = s - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVBstoreconst [sc] {sym1} (LEAL [off] {sym2} ptr) mem) @@ -4934,8 +4718,7 @@ func rewriteValue386_Op386MOVBstoreconst(v *Value) bool { v.reset(Op386MOVBstoreconst) v.AuxInt = ValAndOff(sc).add(off) v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVBstoreconst [x] {sym1} (LEAL1 [off] {sym2} ptr idx) mem) @@ -4958,9 +4741,7 @@ func rewriteValue386_Op386MOVBstoreconst(v *Value) bool { v.reset(Op386MOVBstoreconstidx1) v.AuxInt = ValAndOff(x).add(off) v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } // match: (MOVBstoreconst [x] {sym} (ADDL ptr idx) mem) @@ -4977,9 +4758,7 @@ func rewriteValue386_Op386MOVBstoreconst(v *Value) bool { v.reset(Op386MOVBstoreconstidx1) v.AuxInt = x v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } // match: (MOVBstoreconst [c] {s} p x:(MOVBstoreconst [a] {s} p mem)) @@ -5004,8 +4783,7 @@ func rewriteValue386_Op386MOVBstoreconst(v *Value) bool { v.reset(Op386MOVWstoreconst) v.AuxInt = makeValAndOff(ValAndOff(a).Val()&0xff|ValAndOff(c).Val()<<8, ValAndOff(a).Off()) v.Aux = s - v.AddArg(p) - v.AddArg(mem) + v.AddArg2(p, mem) return true } // match: (MOVBstoreconst [a] {s} p x:(MOVBstoreconst [c] {s} p mem)) @@ -5030,8 +4808,7 @@ func rewriteValue386_Op386MOVBstoreconst(v *Value) bool { v.reset(Op386MOVWstoreconst) v.AuxInt = makeValAndOff(ValAndOff(a).Val()&0xff|ValAndOff(c).Val()<<8, ValAndOff(a).Off()) v.Aux = s - v.AddArg(p) - v.AddArg(mem) + v.AddArg2(p, mem) return true } return false @@ -5055,9 +4832,7 @@ func rewriteValue386_Op386MOVBstoreconstidx1(v *Value) bool { v.reset(Op386MOVBstoreconstidx1) v.AuxInt = ValAndOff(x).add(c) v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } // match: (MOVBstoreconstidx1 [x] {sym} ptr (ADDLconst [c] idx) mem) @@ -5075,9 +4850,7 @@ func rewriteValue386_Op386MOVBstoreconstidx1(v *Value) bool { v.reset(Op386MOVBstoreconstidx1) v.AuxInt = ValAndOff(x).add(c) v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } // match: (MOVBstoreconstidx1 [c] {s} p i x:(MOVBstoreconstidx1 [a] {s} p i mem)) @@ -5103,9 +4876,7 @@ func rewriteValue386_Op386MOVBstoreconstidx1(v *Value) bool { v.reset(Op386MOVWstoreconstidx1) v.AuxInt = makeValAndOff(ValAndOff(a).Val()&0xff|ValAndOff(c).Val()<<8, ValAndOff(a).Off()) v.Aux = s - v.AddArg(p) - v.AddArg(i) - v.AddArg(mem) + v.AddArg3(p, i, mem) return true } return false @@ -5132,10 +4903,7 @@ func rewriteValue386_Op386MOVBstoreidx1(v *Value) bool { v.reset(Op386MOVBstoreidx1) v.AuxInt = int64(int32(c + d)) v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) + v.AddArg4(ptr, idx, val, mem) return true } break @@ -5157,10 +4925,7 @@ func rewriteValue386_Op386MOVBstoreidx1(v *Value) bool { v.reset(Op386MOVBstoreidx1) v.AuxInt = int64(int32(c + d)) v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) + v.AddArg4(ptr, idx, val, mem) return true } break @@ -5192,10 +4957,7 @@ func rewriteValue386_Op386MOVBstoreidx1(v *Value) bool { v.reset(Op386MOVWstoreidx1) v.AuxInt = i - 1 v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v.AddArg(w) - v.AddArg(mem) + v.AddArg4(p, idx, w, mem) return true } } @@ -5228,10 +4990,7 @@ func rewriteValue386_Op386MOVBstoreidx1(v *Value) bool { v.reset(Op386MOVWstoreidx1) v.AuxInt = i - 1 v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v.AddArg(w) - v.AddArg(mem) + v.AddArg4(p, idx, w, mem) return true } } @@ -5265,10 +5024,7 @@ func rewriteValue386_Op386MOVBstoreidx1(v *Value) bool { v.reset(Op386MOVWstoreidx1) v.AuxInt = i v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v.AddArg(w) - v.AddArg(mem) + v.AddArg4(p, idx, w, mem) return true } } @@ -5302,10 +5058,7 @@ func rewriteValue386_Op386MOVBstoreidx1(v *Value) bool { v.reset(Op386MOVWstoreidx1) v.AuxInt = i v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v.AddArg(w) - v.AddArg(mem) + v.AddArg4(p, idx, w, mem) return true } } @@ -5343,10 +5096,7 @@ func rewriteValue386_Op386MOVBstoreidx1(v *Value) bool { v.reset(Op386MOVWstoreidx1) v.AuxInt = i - 1 v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v.AddArg(w0) - v.AddArg(mem) + v.AddArg4(p, idx, w0, mem) return true } } @@ -5400,8 +5150,7 @@ func rewriteValue386_Op386MOVLload(v *Value) bool { v.reset(Op386MOVLload) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVLload [off1] {sym1} (LEAL [off2] {sym2} base) mem) @@ -5423,8 +5172,7 @@ func rewriteValue386_Op386MOVLload(v *Value) bool { v.reset(Op386MOVLload) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(base) - v.AddArg(mem) + v.AddArg2(base, mem) return true } // match: (MOVLload [off1] {sym1} (LEAL1 [off2] {sym2} ptr idx) mem) @@ -5447,9 +5195,7 @@ func rewriteValue386_Op386MOVLload(v *Value) bool { v.reset(Op386MOVLloadidx1) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } // match: (MOVLload [off1] {sym1} (LEAL4 [off2] {sym2} ptr idx) mem) @@ -5472,9 +5218,7 @@ func rewriteValue386_Op386MOVLload(v *Value) bool { v.reset(Op386MOVLloadidx4) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } // match: (MOVLload [off] {sym} (ADDL ptr idx) mem) @@ -5499,9 +5243,7 @@ func rewriteValue386_Op386MOVLload(v *Value) bool { v.reset(Op386MOVLloadidx1) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } break @@ -5540,9 +5282,7 @@ func rewriteValue386_Op386MOVLloadidx1(v *Value) bool { v.reset(Op386MOVLloadidx4) v.AuxInt = c v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } break @@ -5563,9 +5303,7 @@ func rewriteValue386_Op386MOVLloadidx1(v *Value) bool { v.reset(Op386MOVLloadidx1) v.AuxInt = int64(int32(c + d)) v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } break @@ -5586,9 +5324,7 @@ func rewriteValue386_Op386MOVLloadidx1(v *Value) bool { v.reset(Op386MOVLloadidx1) v.AuxInt = int64(int32(c + d)) v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } break @@ -5614,9 +5350,7 @@ func rewriteValue386_Op386MOVLloadidx4(v *Value) bool { v.reset(Op386MOVLloadidx4) v.AuxInt = int64(int32(c + d)) v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } // match: (MOVLloadidx4 [c] {sym} ptr (ADDLconst [d] idx) mem) @@ -5634,9 +5368,7 @@ func rewriteValue386_Op386MOVLloadidx4(v *Value) bool { v.reset(Op386MOVLloadidx4) v.AuxInt = int64(int32(c + 4*d)) v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } return false @@ -5666,9 +5398,7 @@ func rewriteValue386_Op386MOVLstore(v *Value) bool { v.reset(Op386MOVLstore) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } // match: (MOVLstore [off] {sym} ptr (MOVLconst [c]) mem) @@ -5689,8 +5419,7 @@ func rewriteValue386_Op386MOVLstore(v *Value) bool { v.reset(Op386MOVLstoreconst) v.AuxInt = makeValAndOff(int64(int32(c)), off) v.Aux = sym - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVLstore [off1] {sym1} (LEAL [off2] {sym2} base) val mem) @@ -5713,9 +5442,7 @@ func rewriteValue386_Op386MOVLstore(v *Value) bool { v.reset(Op386MOVLstore) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(base) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(base, val, mem) return true } // match: (MOVLstore [off1] {sym1} (LEAL1 [off2] {sym2} ptr idx) val mem) @@ -5739,10 +5466,7 @@ func rewriteValue386_Op386MOVLstore(v *Value) bool { v.reset(Op386MOVLstoreidx1) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) + v.AddArg4(ptr, idx, val, mem) return true } // match: (MOVLstore [off1] {sym1} (LEAL4 [off2] {sym2} ptr idx) val mem) @@ -5766,10 +5490,7 @@ func rewriteValue386_Op386MOVLstore(v *Value) bool { v.reset(Op386MOVLstoreidx4) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) + v.AddArg4(ptr, idx, val, mem) return true } // match: (MOVLstore [off] {sym} (ADDL ptr idx) val mem) @@ -5795,10 +5516,7 @@ func rewriteValue386_Op386MOVLstore(v *Value) bool { v.reset(Op386MOVLstoreidx1) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) + v.AddArg4(ptr, idx, val, mem) return true } break @@ -5822,9 +5540,7 @@ func rewriteValue386_Op386MOVLstore(v *Value) bool { v.reset(Op386ADDLmodify) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(x) - v.AddArg(mem) + v.AddArg3(ptr, x, mem) return true } // match: (MOVLstore {sym} [off] ptr y:(ANDLload x [off] {sym} ptr mem) mem) @@ -5846,9 +5562,7 @@ func rewriteValue386_Op386MOVLstore(v *Value) bool { v.reset(Op386ANDLmodify) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(x) - v.AddArg(mem) + v.AddArg3(ptr, x, mem) return true } // match: (MOVLstore {sym} [off] ptr y:(ORLload x [off] {sym} ptr mem) mem) @@ -5870,9 +5584,7 @@ func rewriteValue386_Op386MOVLstore(v *Value) bool { v.reset(Op386ORLmodify) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(x) - v.AddArg(mem) + v.AddArg3(ptr, x, mem) return true } // match: (MOVLstore {sym} [off] ptr y:(XORLload x [off] {sym} ptr mem) mem) @@ -5894,9 +5606,7 @@ func rewriteValue386_Op386MOVLstore(v *Value) bool { v.reset(Op386XORLmodify) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(x) - v.AddArg(mem) + v.AddArg3(ptr, x, mem) return true } // match: (MOVLstore {sym} [off] ptr y:(ADDL l:(MOVLload [off] {sym} ptr mem) x) mem) @@ -5929,9 +5639,7 @@ func rewriteValue386_Op386MOVLstore(v *Value) bool { v.reset(Op386ADDLmodify) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(x) - v.AddArg(mem) + v.AddArg3(ptr, x, mem) return true } break @@ -5959,9 +5667,7 @@ func rewriteValue386_Op386MOVLstore(v *Value) bool { v.reset(Op386SUBLmodify) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(x) - v.AddArg(mem) + v.AddArg3(ptr, x, mem) return true } // match: (MOVLstore {sym} [off] ptr y:(ANDL l:(MOVLload [off] {sym} ptr mem) x) mem) @@ -5994,9 +5700,7 @@ func rewriteValue386_Op386MOVLstore(v *Value) bool { v.reset(Op386ANDLmodify) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(x) - v.AddArg(mem) + v.AddArg3(ptr, x, mem) return true } break @@ -6031,9 +5735,7 @@ func rewriteValue386_Op386MOVLstore(v *Value) bool { v.reset(Op386ORLmodify) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(x) - v.AddArg(mem) + v.AddArg3(ptr, x, mem) return true } break @@ -6068,9 +5770,7 @@ func rewriteValue386_Op386MOVLstore(v *Value) bool { v.reset(Op386XORLmodify) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(x) - v.AddArg(mem) + v.AddArg3(ptr, x, mem) return true } break @@ -6098,8 +5798,7 @@ func rewriteValue386_Op386MOVLstore(v *Value) bool { v.reset(Op386ADDLconstmodify) v.AuxInt = makeValAndOff(c, off) v.Aux = sym - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVLstore {sym} [off] ptr y:(ANDLconst [c] l:(MOVLload [off] {sym} ptr mem)) mem) @@ -6125,8 +5824,7 @@ func rewriteValue386_Op386MOVLstore(v *Value) bool { v.reset(Op386ANDLconstmodify) v.AuxInt = makeValAndOff(c, off) v.Aux = sym - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVLstore {sym} [off] ptr y:(ORLconst [c] l:(MOVLload [off] {sym} ptr mem)) mem) @@ -6152,8 +5850,7 @@ func rewriteValue386_Op386MOVLstore(v *Value) bool { v.reset(Op386ORLconstmodify) v.AuxInt = makeValAndOff(c, off) v.Aux = sym - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVLstore {sym} [off] ptr y:(XORLconst [c] l:(MOVLload [off] {sym} ptr mem)) mem) @@ -6179,8 +5876,7 @@ func rewriteValue386_Op386MOVLstore(v *Value) bool { v.reset(Op386XORLconstmodify) v.AuxInt = makeValAndOff(c, off) v.Aux = sym - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } return false @@ -6208,8 +5904,7 @@ func rewriteValue386_Op386MOVLstoreconst(v *Value) bool { v.reset(Op386MOVLstoreconst) v.AuxInt = ValAndOff(sc).add(off) v.Aux = s - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVLstoreconst [sc] {sym1} (LEAL [off] {sym2} ptr) mem) @@ -6231,8 +5926,7 @@ func rewriteValue386_Op386MOVLstoreconst(v *Value) bool { v.reset(Op386MOVLstoreconst) v.AuxInt = ValAndOff(sc).add(off) v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVLstoreconst [x] {sym1} (LEAL1 [off] {sym2} ptr idx) mem) @@ -6255,9 +5949,7 @@ func rewriteValue386_Op386MOVLstoreconst(v *Value) bool { v.reset(Op386MOVLstoreconstidx1) v.AuxInt = ValAndOff(x).add(off) v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } // match: (MOVLstoreconst [x] {sym1} (LEAL4 [off] {sym2} ptr idx) mem) @@ -6280,9 +5972,7 @@ func rewriteValue386_Op386MOVLstoreconst(v *Value) bool { v.reset(Op386MOVLstoreconstidx4) v.AuxInt = ValAndOff(x).add(off) v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } // match: (MOVLstoreconst [x] {sym} (ADDL ptr idx) mem) @@ -6299,9 +5989,7 @@ func rewriteValue386_Op386MOVLstoreconst(v *Value) bool { v.reset(Op386MOVLstoreconstidx1) v.AuxInt = x v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } return false @@ -6324,9 +6012,7 @@ func rewriteValue386_Op386MOVLstoreconstidx1(v *Value) bool { v.reset(Op386MOVLstoreconstidx4) v.AuxInt = c v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } // match: (MOVLstoreconstidx1 [x] {sym} (ADDLconst [c] ptr) idx mem) @@ -6344,9 +6030,7 @@ func rewriteValue386_Op386MOVLstoreconstidx1(v *Value) bool { v.reset(Op386MOVLstoreconstidx1) v.AuxInt = ValAndOff(x).add(c) v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } // match: (MOVLstoreconstidx1 [x] {sym} ptr (ADDLconst [c] idx) mem) @@ -6364,9 +6048,7 @@ func rewriteValue386_Op386MOVLstoreconstidx1(v *Value) bool { v.reset(Op386MOVLstoreconstidx1) v.AuxInt = ValAndOff(x).add(c) v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } return false @@ -6390,9 +6072,7 @@ func rewriteValue386_Op386MOVLstoreconstidx4(v *Value) bool { v.reset(Op386MOVLstoreconstidx4) v.AuxInt = ValAndOff(x).add(c) v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } // match: (MOVLstoreconstidx4 [x] {sym} ptr (ADDLconst [c] idx) mem) @@ -6410,9 +6090,7 @@ func rewriteValue386_Op386MOVLstoreconstidx4(v *Value) bool { v.reset(Op386MOVLstoreconstidx4) v.AuxInt = ValAndOff(x).add(4 * c) v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } return false @@ -6438,10 +6116,7 @@ func rewriteValue386_Op386MOVLstoreidx1(v *Value) bool { v.reset(Op386MOVLstoreidx4) v.AuxInt = c v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) + v.AddArg4(ptr, idx, val, mem) return true } break @@ -6463,10 +6138,7 @@ func rewriteValue386_Op386MOVLstoreidx1(v *Value) bool { v.reset(Op386MOVLstoreidx1) v.AuxInt = int64(int32(c + d)) v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) + v.AddArg4(ptr, idx, val, mem) return true } break @@ -6488,10 +6160,7 @@ func rewriteValue386_Op386MOVLstoreidx1(v *Value) bool { v.reset(Op386MOVLstoreidx1) v.AuxInt = int64(int32(c + d)) v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) + v.AddArg4(ptr, idx, val, mem) return true } break @@ -6519,10 +6188,7 @@ func rewriteValue386_Op386MOVLstoreidx4(v *Value) bool { v.reset(Op386MOVLstoreidx4) v.AuxInt = int64(int32(c + d)) v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) + v.AddArg4(ptr, idx, val, mem) return true } // match: (MOVLstoreidx4 [c] {sym} ptr (ADDLconst [d] idx) val mem) @@ -6541,10 +6207,7 @@ func rewriteValue386_Op386MOVLstoreidx4(v *Value) bool { v.reset(Op386MOVLstoreidx4) v.AuxInt = int64(int32(c + 4*d)) v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) + v.AddArg4(ptr, idx, val, mem) return true } // match: (MOVLstoreidx4 {sym} [off] ptr idx y:(ADDLloadidx4 x [off] {sym} ptr idx mem) mem) @@ -6567,10 +6230,7 @@ func rewriteValue386_Op386MOVLstoreidx4(v *Value) bool { v.reset(Op386ADDLmodifyidx4) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(x) - v.AddArg(mem) + v.AddArg4(ptr, idx, x, mem) return true } // match: (MOVLstoreidx4 {sym} [off] ptr idx y:(ANDLloadidx4 x [off] {sym} ptr idx mem) mem) @@ -6593,10 +6253,7 @@ func rewriteValue386_Op386MOVLstoreidx4(v *Value) bool { v.reset(Op386ANDLmodifyidx4) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(x) - v.AddArg(mem) + v.AddArg4(ptr, idx, x, mem) return true } // match: (MOVLstoreidx4 {sym} [off] ptr idx y:(ORLloadidx4 x [off] {sym} ptr idx mem) mem) @@ -6619,10 +6276,7 @@ func rewriteValue386_Op386MOVLstoreidx4(v *Value) bool { v.reset(Op386ORLmodifyidx4) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(x) - v.AddArg(mem) + v.AddArg4(ptr, idx, x, mem) return true } // match: (MOVLstoreidx4 {sym} [off] ptr idx y:(XORLloadidx4 x [off] {sym} ptr idx mem) mem) @@ -6645,10 +6299,7 @@ func rewriteValue386_Op386MOVLstoreidx4(v *Value) bool { v.reset(Op386XORLmodifyidx4) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(x) - v.AddArg(mem) + v.AddArg4(ptr, idx, x, mem) return true } // match: (MOVLstoreidx4 {sym} [off] ptr idx y:(ADDL l:(MOVLloadidx4 [off] {sym} ptr idx mem) x) mem) @@ -6682,10 +6333,7 @@ func rewriteValue386_Op386MOVLstoreidx4(v *Value) bool { v.reset(Op386ADDLmodifyidx4) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(x) - v.AddArg(mem) + v.AddArg4(ptr, idx, x, mem) return true } break @@ -6714,10 +6362,7 @@ func rewriteValue386_Op386MOVLstoreidx4(v *Value) bool { v.reset(Op386SUBLmodifyidx4) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(x) - v.AddArg(mem) + v.AddArg4(ptr, idx, x, mem) return true } // match: (MOVLstoreidx4 {sym} [off] ptr idx y:(ANDL l:(MOVLloadidx4 [off] {sym} ptr idx mem) x) mem) @@ -6751,10 +6396,7 @@ func rewriteValue386_Op386MOVLstoreidx4(v *Value) bool { v.reset(Op386ANDLmodifyidx4) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(x) - v.AddArg(mem) + v.AddArg4(ptr, idx, x, mem) return true } break @@ -6790,10 +6432,7 @@ func rewriteValue386_Op386MOVLstoreidx4(v *Value) bool { v.reset(Op386ORLmodifyidx4) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(x) - v.AddArg(mem) + v.AddArg4(ptr, idx, x, mem) return true } break @@ -6829,10 +6468,7 @@ func rewriteValue386_Op386MOVLstoreidx4(v *Value) bool { v.reset(Op386XORLmodifyidx4) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(x) - v.AddArg(mem) + v.AddArg4(ptr, idx, x, mem) return true } break @@ -6861,9 +6497,7 @@ func rewriteValue386_Op386MOVLstoreidx4(v *Value) bool { v.reset(Op386ADDLconstmodifyidx4) v.AuxInt = makeValAndOff(c, off) v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } // match: (MOVLstoreidx4 {sym} [off] ptr idx y:(ANDLconst [c] l:(MOVLloadidx4 [off] {sym} ptr idx mem)) mem) @@ -6890,9 +6524,7 @@ func rewriteValue386_Op386MOVLstoreidx4(v *Value) bool { v.reset(Op386ANDLconstmodifyidx4) v.AuxInt = makeValAndOff(c, off) v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } // match: (MOVLstoreidx4 {sym} [off] ptr idx y:(ORLconst [c] l:(MOVLloadidx4 [off] {sym} ptr idx mem)) mem) @@ -6919,9 +6551,7 @@ func rewriteValue386_Op386MOVLstoreidx4(v *Value) bool { v.reset(Op386ORLconstmodifyidx4) v.AuxInt = makeValAndOff(c, off) v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } // match: (MOVLstoreidx4 {sym} [off] ptr idx y:(XORLconst [c] l:(MOVLloadidx4 [off] {sym} ptr idx mem)) mem) @@ -6948,9 +6578,7 @@ func rewriteValue386_Op386MOVLstoreidx4(v *Value) bool { v.reset(Op386XORLconstmodifyidx4) v.AuxInt = makeValAndOff(c, off) v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } return false @@ -6998,8 +6626,7 @@ func rewriteValue386_Op386MOVSDload(v *Value) bool { v.reset(Op386MOVSDload) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVSDload [off1] {sym1} (LEAL [off2] {sym2} base) mem) @@ -7021,8 +6648,7 @@ func rewriteValue386_Op386MOVSDload(v *Value) bool { v.reset(Op386MOVSDload) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(base) - v.AddArg(mem) + v.AddArg2(base, mem) return true } // match: (MOVSDload [off1] {sym1} (LEAL1 [off2] {sym2} ptr idx) mem) @@ -7045,9 +6671,7 @@ func rewriteValue386_Op386MOVSDload(v *Value) bool { v.reset(Op386MOVSDloadidx1) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } // match: (MOVSDload [off1] {sym1} (LEAL8 [off2] {sym2} ptr idx) mem) @@ -7070,9 +6694,7 @@ func rewriteValue386_Op386MOVSDload(v *Value) bool { v.reset(Op386MOVSDloadidx8) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } // match: (MOVSDload [off] {sym} (ADDL ptr idx) mem) @@ -7097,9 +6719,7 @@ func rewriteValue386_Op386MOVSDload(v *Value) bool { v.reset(Op386MOVSDloadidx1) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } break @@ -7125,9 +6745,7 @@ func rewriteValue386_Op386MOVSDloadidx1(v *Value) bool { v.reset(Op386MOVSDloadidx1) v.AuxInt = int64(int32(c + d)) v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } // match: (MOVSDloadidx1 [c] {sym} ptr (ADDLconst [d] idx) mem) @@ -7145,9 +6763,7 @@ func rewriteValue386_Op386MOVSDloadidx1(v *Value) bool { v.reset(Op386MOVSDloadidx1) v.AuxInt = int64(int32(c + d)) v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } return false @@ -7171,9 +6787,7 @@ func rewriteValue386_Op386MOVSDloadidx8(v *Value) bool { v.reset(Op386MOVSDloadidx8) v.AuxInt = int64(int32(c + d)) v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } // match: (MOVSDloadidx8 [c] {sym} ptr (ADDLconst [d] idx) mem) @@ -7191,9 +6805,7 @@ func rewriteValue386_Op386MOVSDloadidx8(v *Value) bool { v.reset(Op386MOVSDloadidx8) v.AuxInt = int64(int32(c + 8*d)) v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } return false @@ -7223,9 +6835,7 @@ func rewriteValue386_Op386MOVSDstore(v *Value) bool { v.reset(Op386MOVSDstore) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } // match: (MOVSDstore [off1] {sym1} (LEAL [off2] {sym2} base) val mem) @@ -7248,9 +6858,7 @@ func rewriteValue386_Op386MOVSDstore(v *Value) bool { v.reset(Op386MOVSDstore) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(base) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(base, val, mem) return true } // match: (MOVSDstore [off1] {sym1} (LEAL1 [off2] {sym2} ptr idx) val mem) @@ -7274,10 +6882,7 @@ func rewriteValue386_Op386MOVSDstore(v *Value) bool { v.reset(Op386MOVSDstoreidx1) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) + v.AddArg4(ptr, idx, val, mem) return true } // match: (MOVSDstore [off1] {sym1} (LEAL8 [off2] {sym2} ptr idx) val mem) @@ -7301,10 +6906,7 @@ func rewriteValue386_Op386MOVSDstore(v *Value) bool { v.reset(Op386MOVSDstoreidx8) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) + v.AddArg4(ptr, idx, val, mem) return true } // match: (MOVSDstore [off] {sym} (ADDL ptr idx) val mem) @@ -7330,10 +6932,7 @@ func rewriteValue386_Op386MOVSDstore(v *Value) bool { v.reset(Op386MOVSDstoreidx1) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) + v.AddArg4(ptr, idx, val, mem) return true } break @@ -7361,10 +6960,7 @@ func rewriteValue386_Op386MOVSDstoreidx1(v *Value) bool { v.reset(Op386MOVSDstoreidx1) v.AuxInt = int64(int32(c + d)) v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) + v.AddArg4(ptr, idx, val, mem) return true } // match: (MOVSDstoreidx1 [c] {sym} ptr (ADDLconst [d] idx) val mem) @@ -7383,10 +6979,7 @@ func rewriteValue386_Op386MOVSDstoreidx1(v *Value) bool { v.reset(Op386MOVSDstoreidx1) v.AuxInt = int64(int32(c + d)) v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) + v.AddArg4(ptr, idx, val, mem) return true } return false @@ -7412,10 +7005,7 @@ func rewriteValue386_Op386MOVSDstoreidx8(v *Value) bool { v.reset(Op386MOVSDstoreidx8) v.AuxInt = int64(int32(c + d)) v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) + v.AddArg4(ptr, idx, val, mem) return true } // match: (MOVSDstoreidx8 [c] {sym} ptr (ADDLconst [d] idx) val mem) @@ -7434,10 +7024,7 @@ func rewriteValue386_Op386MOVSDstoreidx8(v *Value) bool { v.reset(Op386MOVSDstoreidx8) v.AuxInt = int64(int32(c + 8*d)) v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) + v.AddArg4(ptr, idx, val, mem) return true } return false @@ -7485,8 +7072,7 @@ func rewriteValue386_Op386MOVSSload(v *Value) bool { v.reset(Op386MOVSSload) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVSSload [off1] {sym1} (LEAL [off2] {sym2} base) mem) @@ -7508,8 +7094,7 @@ func rewriteValue386_Op386MOVSSload(v *Value) bool { v.reset(Op386MOVSSload) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(base) - v.AddArg(mem) + v.AddArg2(base, mem) return true } // match: (MOVSSload [off1] {sym1} (LEAL1 [off2] {sym2} ptr idx) mem) @@ -7532,9 +7117,7 @@ func rewriteValue386_Op386MOVSSload(v *Value) bool { v.reset(Op386MOVSSloadidx1) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } // match: (MOVSSload [off1] {sym1} (LEAL4 [off2] {sym2} ptr idx) mem) @@ -7557,9 +7140,7 @@ func rewriteValue386_Op386MOVSSload(v *Value) bool { v.reset(Op386MOVSSloadidx4) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } // match: (MOVSSload [off] {sym} (ADDL ptr idx) mem) @@ -7584,9 +7165,7 @@ func rewriteValue386_Op386MOVSSload(v *Value) bool { v.reset(Op386MOVSSloadidx1) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } break @@ -7612,9 +7191,7 @@ func rewriteValue386_Op386MOVSSloadidx1(v *Value) bool { v.reset(Op386MOVSSloadidx1) v.AuxInt = int64(int32(c + d)) v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } // match: (MOVSSloadidx1 [c] {sym} ptr (ADDLconst [d] idx) mem) @@ -7632,9 +7209,7 @@ func rewriteValue386_Op386MOVSSloadidx1(v *Value) bool { v.reset(Op386MOVSSloadidx1) v.AuxInt = int64(int32(c + d)) v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } return false @@ -7658,9 +7233,7 @@ func rewriteValue386_Op386MOVSSloadidx4(v *Value) bool { v.reset(Op386MOVSSloadidx4) v.AuxInt = int64(int32(c + d)) v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } // match: (MOVSSloadidx4 [c] {sym} ptr (ADDLconst [d] idx) mem) @@ -7678,9 +7251,7 @@ func rewriteValue386_Op386MOVSSloadidx4(v *Value) bool { v.reset(Op386MOVSSloadidx4) v.AuxInt = int64(int32(c + 4*d)) v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } return false @@ -7710,9 +7281,7 @@ func rewriteValue386_Op386MOVSSstore(v *Value) bool { v.reset(Op386MOVSSstore) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } // match: (MOVSSstore [off1] {sym1} (LEAL [off2] {sym2} base) val mem) @@ -7735,9 +7304,7 @@ func rewriteValue386_Op386MOVSSstore(v *Value) bool { v.reset(Op386MOVSSstore) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(base) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(base, val, mem) return true } // match: (MOVSSstore [off1] {sym1} (LEAL1 [off2] {sym2} ptr idx) val mem) @@ -7761,10 +7328,7 @@ func rewriteValue386_Op386MOVSSstore(v *Value) bool { v.reset(Op386MOVSSstoreidx1) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) + v.AddArg4(ptr, idx, val, mem) return true } // match: (MOVSSstore [off1] {sym1} (LEAL4 [off2] {sym2} ptr idx) val mem) @@ -7788,10 +7352,7 @@ func rewriteValue386_Op386MOVSSstore(v *Value) bool { v.reset(Op386MOVSSstoreidx4) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) + v.AddArg4(ptr, idx, val, mem) return true } // match: (MOVSSstore [off] {sym} (ADDL ptr idx) val mem) @@ -7817,10 +7378,7 @@ func rewriteValue386_Op386MOVSSstore(v *Value) bool { v.reset(Op386MOVSSstoreidx1) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) + v.AddArg4(ptr, idx, val, mem) return true } break @@ -7848,10 +7406,7 @@ func rewriteValue386_Op386MOVSSstoreidx1(v *Value) bool { v.reset(Op386MOVSSstoreidx1) v.AuxInt = int64(int32(c + d)) v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) + v.AddArg4(ptr, idx, val, mem) return true } // match: (MOVSSstoreidx1 [c] {sym} ptr (ADDLconst [d] idx) val mem) @@ -7870,10 +7425,7 @@ func rewriteValue386_Op386MOVSSstoreidx1(v *Value) bool { v.reset(Op386MOVSSstoreidx1) v.AuxInt = int64(int32(c + d)) v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) + v.AddArg4(ptr, idx, val, mem) return true } return false @@ -7899,10 +7451,7 @@ func rewriteValue386_Op386MOVSSstoreidx4(v *Value) bool { v.reset(Op386MOVSSstoreidx4) v.AuxInt = int64(int32(c + d)) v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) + v.AddArg4(ptr, idx, val, mem) return true } // match: (MOVSSstoreidx4 [c] {sym} ptr (ADDLconst [d] idx) val mem) @@ -7921,10 +7470,7 @@ func rewriteValue386_Op386MOVSSstoreidx4(v *Value) bool { v.reset(Op386MOVSSstoreidx4) v.AuxInt = int64(int32(c + 4*d)) v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) + v.AddArg4(ptr, idx, val, mem) return true } return false @@ -7953,8 +7499,7 @@ func rewriteValue386_Op386MOVWLSX(v *Value) bool { v.AddArg(v0) v0.AuxInt = off v0.Aux = sym - v0.AddArg(ptr) - v0.AddArg(mem) + v0.AddArg2(ptr, mem) return true } // match: (MOVWLSX (ANDLconst [c] x)) @@ -8022,8 +7567,7 @@ func rewriteValue386_Op386MOVWLSXload(v *Value) bool { v.reset(Op386MOVWLSXload) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(base) - v.AddArg(mem) + v.AddArg2(base, mem) return true } return false @@ -8052,8 +7596,7 @@ func rewriteValue386_Op386MOVWLZX(v *Value) bool { v.AddArg(v0) v0.AuxInt = off v0.Aux = sym - v0.AddArg(ptr) - v0.AddArg(mem) + v0.AddArg2(ptr, mem) return true } // match: (MOVWLZX x:(MOVWloadidx1 [off] {sym} ptr idx mem)) @@ -8078,9 +7621,7 @@ func rewriteValue386_Op386MOVWLZX(v *Value) bool { v.AddArg(v0) v0.AuxInt = off v0.Aux = sym - v0.AddArg(ptr) - v0.AddArg(idx) - v0.AddArg(mem) + v0.AddArg3(ptr, idx, mem) return true } // match: (MOVWLZX x:(MOVWloadidx2 [off] {sym} ptr idx mem)) @@ -8105,9 +7646,7 @@ func rewriteValue386_Op386MOVWLZX(v *Value) bool { v.AddArg(v0) v0.AuxInt = off v0.Aux = sym - v0.AddArg(ptr) - v0.AddArg(idx) - v0.AddArg(mem) + v0.AddArg3(ptr, idx, mem) return true } // match: (MOVWLZX (ANDLconst [c] x)) @@ -8170,8 +7709,7 @@ func rewriteValue386_Op386MOVWload(v *Value) bool { v.reset(Op386MOVWload) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVWload [off1] {sym1} (LEAL [off2] {sym2} base) mem) @@ -8193,8 +7731,7 @@ func rewriteValue386_Op386MOVWload(v *Value) bool { v.reset(Op386MOVWload) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(base) - v.AddArg(mem) + v.AddArg2(base, mem) return true } // match: (MOVWload [off1] {sym1} (LEAL1 [off2] {sym2} ptr idx) mem) @@ -8217,9 +7754,7 @@ func rewriteValue386_Op386MOVWload(v *Value) bool { v.reset(Op386MOVWloadidx1) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } // match: (MOVWload [off1] {sym1} (LEAL2 [off2] {sym2} ptr idx) mem) @@ -8242,9 +7777,7 @@ func rewriteValue386_Op386MOVWload(v *Value) bool { v.reset(Op386MOVWloadidx2) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } // match: (MOVWload [off] {sym} (ADDL ptr idx) mem) @@ -8269,9 +7802,7 @@ func rewriteValue386_Op386MOVWload(v *Value) bool { v.reset(Op386MOVWloadidx1) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } break @@ -8310,9 +7841,7 @@ func rewriteValue386_Op386MOVWloadidx1(v *Value) bool { v.reset(Op386MOVWloadidx2) v.AuxInt = c v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } break @@ -8333,9 +7862,7 @@ func rewriteValue386_Op386MOVWloadidx1(v *Value) bool { v.reset(Op386MOVWloadidx1) v.AuxInt = int64(int32(c + d)) v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } break @@ -8356,9 +7883,7 @@ func rewriteValue386_Op386MOVWloadidx1(v *Value) bool { v.reset(Op386MOVWloadidx1) v.AuxInt = int64(int32(c + d)) v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } break @@ -8384,9 +7909,7 @@ func rewriteValue386_Op386MOVWloadidx2(v *Value) bool { v.reset(Op386MOVWloadidx2) v.AuxInt = int64(int32(c + d)) v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } // match: (MOVWloadidx2 [c] {sym} ptr (ADDLconst [d] idx) mem) @@ -8404,9 +7927,7 @@ func rewriteValue386_Op386MOVWloadidx2(v *Value) bool { v.reset(Op386MOVWloadidx2) v.AuxInt = int64(int32(c + 2*d)) v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } return false @@ -8431,9 +7952,7 @@ func rewriteValue386_Op386MOVWstore(v *Value) bool { v.reset(Op386MOVWstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(x) - v.AddArg(mem) + v.AddArg3(ptr, x, mem) return true } // match: (MOVWstore [off] {sym} ptr (MOVWLZX x) mem) @@ -8450,9 +7969,7 @@ func rewriteValue386_Op386MOVWstore(v *Value) bool { v.reset(Op386MOVWstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(x) - v.AddArg(mem) + v.AddArg3(ptr, x, mem) return true } // match: (MOVWstore [off1] {sym} (ADDLconst [off2] ptr) val mem) @@ -8474,9 +7991,7 @@ func rewriteValue386_Op386MOVWstore(v *Value) bool { v.reset(Op386MOVWstore) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } // match: (MOVWstore [off] {sym} ptr (MOVLconst [c]) mem) @@ -8497,8 +8012,7 @@ func rewriteValue386_Op386MOVWstore(v *Value) bool { v.reset(Op386MOVWstoreconst) v.AuxInt = makeValAndOff(int64(int16(c)), off) v.Aux = sym - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVWstore [off1] {sym1} (LEAL [off2] {sym2} base) val mem) @@ -8521,9 +8035,7 @@ func rewriteValue386_Op386MOVWstore(v *Value) bool { v.reset(Op386MOVWstore) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(base) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(base, val, mem) return true } // match: (MOVWstore [off1] {sym1} (LEAL1 [off2] {sym2} ptr idx) val mem) @@ -8547,10 +8059,7 @@ func rewriteValue386_Op386MOVWstore(v *Value) bool { v.reset(Op386MOVWstoreidx1) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) + v.AddArg4(ptr, idx, val, mem) return true } // match: (MOVWstore [off1] {sym1} (LEAL2 [off2] {sym2} ptr idx) val mem) @@ -8574,10 +8083,7 @@ func rewriteValue386_Op386MOVWstore(v *Value) bool { v.reset(Op386MOVWstoreidx2) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) + v.AddArg4(ptr, idx, val, mem) return true } // match: (MOVWstore [off] {sym} (ADDL ptr idx) val mem) @@ -8603,10 +8109,7 @@ func rewriteValue386_Op386MOVWstore(v *Value) bool { v.reset(Op386MOVWstoreidx1) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) + v.AddArg4(ptr, idx, val, mem) return true } break @@ -8633,9 +8136,7 @@ func rewriteValue386_Op386MOVWstore(v *Value) bool { v.reset(Op386MOVLstore) v.AuxInt = i - 2 v.Aux = s - v.AddArg(p) - v.AddArg(w) - v.AddArg(mem) + v.AddArg3(p, w, mem) return true } // match: (MOVWstore [i] {s} p (SHRLconst [j] w) x:(MOVWstore [i-2] {s} p w0:(SHRLconst [j-16] w) mem)) @@ -8665,9 +8166,7 @@ func rewriteValue386_Op386MOVWstore(v *Value) bool { v.reset(Op386MOVLstore) v.AuxInt = i - 2 v.Aux = s - v.AddArg(p) - v.AddArg(w0) - v.AddArg(mem) + v.AddArg3(p, w0, mem) return true } return false @@ -8695,8 +8194,7 @@ func rewriteValue386_Op386MOVWstoreconst(v *Value) bool { v.reset(Op386MOVWstoreconst) v.AuxInt = ValAndOff(sc).add(off) v.Aux = s - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVWstoreconst [sc] {sym1} (LEAL [off] {sym2} ptr) mem) @@ -8718,8 +8216,7 @@ func rewriteValue386_Op386MOVWstoreconst(v *Value) bool { v.reset(Op386MOVWstoreconst) v.AuxInt = ValAndOff(sc).add(off) v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVWstoreconst [x] {sym1} (LEAL1 [off] {sym2} ptr idx) mem) @@ -8742,9 +8239,7 @@ func rewriteValue386_Op386MOVWstoreconst(v *Value) bool { v.reset(Op386MOVWstoreconstidx1) v.AuxInt = ValAndOff(x).add(off) v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } // match: (MOVWstoreconst [x] {sym1} (LEAL2 [off] {sym2} ptr idx) mem) @@ -8767,9 +8262,7 @@ func rewriteValue386_Op386MOVWstoreconst(v *Value) bool { v.reset(Op386MOVWstoreconstidx2) v.AuxInt = ValAndOff(x).add(off) v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } // match: (MOVWstoreconst [x] {sym} (ADDL ptr idx) mem) @@ -8786,9 +8279,7 @@ func rewriteValue386_Op386MOVWstoreconst(v *Value) bool { v.reset(Op386MOVWstoreconstidx1) v.AuxInt = x v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } // match: (MOVWstoreconst [c] {s} p x:(MOVWstoreconst [a] {s} p mem)) @@ -8813,8 +8304,7 @@ func rewriteValue386_Op386MOVWstoreconst(v *Value) bool { v.reset(Op386MOVLstoreconst) v.AuxInt = makeValAndOff(ValAndOff(a).Val()&0xffff|ValAndOff(c).Val()<<16, ValAndOff(a).Off()) v.Aux = s - v.AddArg(p) - v.AddArg(mem) + v.AddArg2(p, mem) return true } // match: (MOVWstoreconst [a] {s} p x:(MOVWstoreconst [c] {s} p mem)) @@ -8839,8 +8329,7 @@ func rewriteValue386_Op386MOVWstoreconst(v *Value) bool { v.reset(Op386MOVLstoreconst) v.AuxInt = makeValAndOff(ValAndOff(a).Val()&0xffff|ValAndOff(c).Val()<<16, ValAndOff(a).Off()) v.Aux = s - v.AddArg(p) - v.AddArg(mem) + v.AddArg2(p, mem) return true } return false @@ -8863,9 +8352,7 @@ func rewriteValue386_Op386MOVWstoreconstidx1(v *Value) bool { v.reset(Op386MOVWstoreconstidx2) v.AuxInt = c v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } // match: (MOVWstoreconstidx1 [x] {sym} (ADDLconst [c] ptr) idx mem) @@ -8883,9 +8370,7 @@ func rewriteValue386_Op386MOVWstoreconstidx1(v *Value) bool { v.reset(Op386MOVWstoreconstidx1) v.AuxInt = ValAndOff(x).add(c) v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } // match: (MOVWstoreconstidx1 [x] {sym} ptr (ADDLconst [c] idx) mem) @@ -8903,9 +8388,7 @@ func rewriteValue386_Op386MOVWstoreconstidx1(v *Value) bool { v.reset(Op386MOVWstoreconstidx1) v.AuxInt = ValAndOff(x).add(c) v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } // match: (MOVWstoreconstidx1 [c] {s} p i x:(MOVWstoreconstidx1 [a] {s} p i mem)) @@ -8931,9 +8414,7 @@ func rewriteValue386_Op386MOVWstoreconstidx1(v *Value) bool { v.reset(Op386MOVLstoreconstidx1) v.AuxInt = makeValAndOff(ValAndOff(a).Val()&0xffff|ValAndOff(c).Val()<<16, ValAndOff(a).Off()) v.Aux = s - v.AddArg(p) - v.AddArg(i) - v.AddArg(mem) + v.AddArg3(p, i, mem) return true } return false @@ -8958,9 +8439,7 @@ func rewriteValue386_Op386MOVWstoreconstidx2(v *Value) bool { v.reset(Op386MOVWstoreconstidx2) v.AuxInt = ValAndOff(x).add(c) v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } // match: (MOVWstoreconstidx2 [x] {sym} ptr (ADDLconst [c] idx) mem) @@ -8978,9 +8457,7 @@ func rewriteValue386_Op386MOVWstoreconstidx2(v *Value) bool { v.reset(Op386MOVWstoreconstidx2) v.AuxInt = ValAndOff(x).add(2 * c) v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } // match: (MOVWstoreconstidx2 [c] {s} p i x:(MOVWstoreconstidx2 [a] {s} p i mem)) @@ -9006,12 +8483,10 @@ func rewriteValue386_Op386MOVWstoreconstidx2(v *Value) bool { v.reset(Op386MOVLstoreconstidx1) v.AuxInt = makeValAndOff(ValAndOff(a).Val()&0xffff|ValAndOff(c).Val()<<16, ValAndOff(a).Off()) v.Aux = s - v.AddArg(p) v0 := b.NewValue0(v.Pos, Op386SHLLconst, i.Type) v0.AuxInt = 1 v0.AddArg(i) - v.AddArg(v0) - v.AddArg(mem) + v.AddArg3(p, v0, mem) return true } return false @@ -9037,10 +8512,7 @@ func rewriteValue386_Op386MOVWstoreidx1(v *Value) bool { v.reset(Op386MOVWstoreidx2) v.AuxInt = c v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) + v.AddArg4(ptr, idx, val, mem) return true } break @@ -9062,10 +8534,7 @@ func rewriteValue386_Op386MOVWstoreidx1(v *Value) bool { v.reset(Op386MOVWstoreidx1) v.AuxInt = int64(int32(c + d)) v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) + v.AddArg4(ptr, idx, val, mem) return true } break @@ -9087,10 +8556,7 @@ func rewriteValue386_Op386MOVWstoreidx1(v *Value) bool { v.reset(Op386MOVWstoreidx1) v.AuxInt = int64(int32(c + d)) v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) + v.AddArg4(ptr, idx, val, mem) return true } break @@ -9122,10 +8588,7 @@ func rewriteValue386_Op386MOVWstoreidx1(v *Value) bool { v.reset(Op386MOVLstoreidx1) v.AuxInt = i - 2 v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v.AddArg(w) - v.AddArg(mem) + v.AddArg4(p, idx, w, mem) return true } } @@ -9163,10 +8626,7 @@ func rewriteValue386_Op386MOVWstoreidx1(v *Value) bool { v.reset(Op386MOVLstoreidx1) v.AuxInt = i - 2 v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v.AddArg(w0) - v.AddArg(mem) + v.AddArg4(p, idx, w0, mem) return true } } @@ -9196,10 +8656,7 @@ func rewriteValue386_Op386MOVWstoreidx2(v *Value) bool { v.reset(Op386MOVWstoreidx2) v.AuxInt = int64(int32(c + d)) v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) + v.AddArg4(ptr, idx, val, mem) return true } // match: (MOVWstoreidx2 [c] {sym} ptr (ADDLconst [d] idx) val mem) @@ -9218,10 +8675,7 @@ func rewriteValue386_Op386MOVWstoreidx2(v *Value) bool { v.reset(Op386MOVWstoreidx2) v.AuxInt = int64(int32(c + 2*d)) v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) + v.AddArg4(ptr, idx, val, mem) return true } // match: (MOVWstoreidx2 [i] {s} p idx (SHRLconst [16] w) x:(MOVWstoreidx2 [i-2] {s} p idx w mem)) @@ -9247,13 +8701,10 @@ func rewriteValue386_Op386MOVWstoreidx2(v *Value) bool { v.reset(Op386MOVLstoreidx1) v.AuxInt = i - 2 v.Aux = s - v.AddArg(p) v0 := b.NewValue0(v.Pos, Op386SHLLconst, idx.Type) v0.AuxInt = 1 v0.AddArg(idx) - v.AddArg(v0) - v.AddArg(w) - v.AddArg(mem) + v.AddArg4(p, v0, w, mem) return true } // match: (MOVWstoreidx2 [i] {s} p idx (SHRLconst [j] w) x:(MOVWstoreidx2 [i-2] {s} p idx w0:(SHRLconst [j-16] w) mem)) @@ -9284,13 +8735,10 @@ func rewriteValue386_Op386MOVWstoreidx2(v *Value) bool { v.reset(Op386MOVLstoreidx1) v.AuxInt = i - 2 v.Aux = s - v.AddArg(p) v0 := b.NewValue0(v.Pos, Op386SHLLconst, idx.Type) v0.AuxInt = 1 v0.AddArg(idx) - v.AddArg(v0) - v.AddArg(w0) - v.AddArg(mem) + v.AddArg4(p, v0, w0, mem) return true } return false @@ -9334,9 +8782,7 @@ func rewriteValue386_Op386MULL(v *Value) bool { v.reset(Op386MULLload) v.AuxInt = off v.Aux = sym - v.AddArg(x) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg3(x, ptr, mem) return true } break @@ -9362,10 +8808,7 @@ func rewriteValue386_Op386MULL(v *Value) bool { v.reset(Op386MULLloadidx4) v.AuxInt = off v.Aux = sym - v.AddArg(x) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg4(x, ptr, idx, mem) return true } break @@ -9398,8 +8841,7 @@ func rewriteValue386_Op386MULLconst(v *Value) bool { x := v_0 v.reset(Op386NEGL) v0 := b.NewValue0(v.Pos, Op386LEAL8, v.Type) - v0.AddArg(x) - v0.AddArg(x) + v0.AddArg2(x, x) v.AddArg(v0) return true } @@ -9412,8 +8854,7 @@ func rewriteValue386_Op386MULLconst(v *Value) bool { x := v_0 v.reset(Op386NEGL) v0 := b.NewValue0(v.Pos, Op386LEAL4, v.Type) - v0.AddArg(x) - v0.AddArg(x) + v0.AddArg2(x, x) v.AddArg(v0) return true } @@ -9426,8 +8867,7 @@ func rewriteValue386_Op386MULLconst(v *Value) bool { x := v_0 v.reset(Op386NEGL) v0 := b.NewValue0(v.Pos, Op386LEAL2, v.Type) - v0.AddArg(x) - v0.AddArg(x) + v0.AddArg2(x, x) v.AddArg(v0) return true } @@ -9472,8 +8912,7 @@ func rewriteValue386_Op386MULLconst(v *Value) bool { } x := v_0 v.reset(Op386LEAL2) - v.AddArg(x) - v.AddArg(x) + v.AddArg2(x, x) return true } // match: (MULLconst [5] x) @@ -9484,8 +8923,7 @@ func rewriteValue386_Op386MULLconst(v *Value) bool { } x := v_0 v.reset(Op386LEAL4) - v.AddArg(x) - v.AddArg(x) + v.AddArg2(x, x) return true } // match: (MULLconst [7] x) @@ -9496,11 +8934,9 @@ func rewriteValue386_Op386MULLconst(v *Value) bool { } x := v_0 v.reset(Op386LEAL2) - v.AddArg(x) v0 := b.NewValue0(v.Pos, Op386LEAL2, v.Type) - v0.AddArg(x) - v0.AddArg(x) - v.AddArg(v0) + v0.AddArg2(x, x) + v.AddArg2(x, v0) return true } // match: (MULLconst [9] x) @@ -9511,8 +8947,7 @@ func rewriteValue386_Op386MULLconst(v *Value) bool { } x := v_0 v.reset(Op386LEAL8) - v.AddArg(x) - v.AddArg(x) + v.AddArg2(x, x) return true } // match: (MULLconst [11] x) @@ -9523,11 +8958,9 @@ func rewriteValue386_Op386MULLconst(v *Value) bool { } x := v_0 v.reset(Op386LEAL2) - v.AddArg(x) v0 := b.NewValue0(v.Pos, Op386LEAL4, v.Type) - v0.AddArg(x) - v0.AddArg(x) - v.AddArg(v0) + v0.AddArg2(x, x) + v.AddArg2(x, v0) return true } // match: (MULLconst [13] x) @@ -9538,11 +8971,9 @@ func rewriteValue386_Op386MULLconst(v *Value) bool { } x := v_0 v.reset(Op386LEAL4) - v.AddArg(x) v0 := b.NewValue0(v.Pos, Op386LEAL2, v.Type) - v0.AddArg(x) - v0.AddArg(x) - v.AddArg(v0) + v0.AddArg2(x, x) + v.AddArg2(x, v0) return true } // match: (MULLconst [19] x) @@ -9553,11 +8984,9 @@ func rewriteValue386_Op386MULLconst(v *Value) bool { } x := v_0 v.reset(Op386LEAL2) - v.AddArg(x) v0 := b.NewValue0(v.Pos, Op386LEAL8, v.Type) - v0.AddArg(x) - v0.AddArg(x) - v.AddArg(v0) + v0.AddArg2(x, x) + v.AddArg2(x, v0) return true } // match: (MULLconst [21] x) @@ -9568,11 +8997,9 @@ func rewriteValue386_Op386MULLconst(v *Value) bool { } x := v_0 v.reset(Op386LEAL4) - v.AddArg(x) v0 := b.NewValue0(v.Pos, Op386LEAL4, v.Type) - v0.AddArg(x) - v0.AddArg(x) - v.AddArg(v0) + v0.AddArg2(x, x) + v.AddArg2(x, v0) return true } // match: (MULLconst [25] x) @@ -9583,11 +9010,9 @@ func rewriteValue386_Op386MULLconst(v *Value) bool { } x := v_0 v.reset(Op386LEAL8) - v.AddArg(x) v0 := b.NewValue0(v.Pos, Op386LEAL2, v.Type) - v0.AddArg(x) - v0.AddArg(x) - v.AddArg(v0) + v0.AddArg2(x, x) + v.AddArg2(x, v0) return true } // match: (MULLconst [27] x) @@ -9599,13 +9024,10 @@ func rewriteValue386_Op386MULLconst(v *Value) bool { x := v_0 v.reset(Op386LEAL8) v0 := b.NewValue0(v.Pos, Op386LEAL2, v.Type) - v0.AddArg(x) - v0.AddArg(x) - v.AddArg(v0) + v0.AddArg2(x, x) v1 := b.NewValue0(v.Pos, Op386LEAL2, v.Type) - v1.AddArg(x) - v1.AddArg(x) - v.AddArg(v1) + v1.AddArg2(x, x) + v.AddArg2(v0, v1) return true } // match: (MULLconst [37] x) @@ -9616,11 +9038,9 @@ func rewriteValue386_Op386MULLconst(v *Value) bool { } x := v_0 v.reset(Op386LEAL4) - v.AddArg(x) v0 := b.NewValue0(v.Pos, Op386LEAL8, v.Type) - v0.AddArg(x) - v0.AddArg(x) - v.AddArg(v0) + v0.AddArg2(x, x) + v.AddArg2(x, v0) return true } // match: (MULLconst [41] x) @@ -9631,11 +9051,9 @@ func rewriteValue386_Op386MULLconst(v *Value) bool { } x := v_0 v.reset(Op386LEAL8) - v.AddArg(x) v0 := b.NewValue0(v.Pos, Op386LEAL4, v.Type) - v0.AddArg(x) - v0.AddArg(x) - v.AddArg(v0) + v0.AddArg2(x, x) + v.AddArg2(x, v0) return true } // match: (MULLconst [45] x) @@ -9647,13 +9065,10 @@ func rewriteValue386_Op386MULLconst(v *Value) bool { x := v_0 v.reset(Op386LEAL8) v0 := b.NewValue0(v.Pos, Op386LEAL4, v.Type) - v0.AddArg(x) - v0.AddArg(x) - v.AddArg(v0) + v0.AddArg2(x, x) v1 := b.NewValue0(v.Pos, Op386LEAL4, v.Type) - v1.AddArg(x) - v1.AddArg(x) - v.AddArg(v1) + v1.AddArg2(x, x) + v.AddArg2(v0, v1) return true } // match: (MULLconst [73] x) @@ -9664,11 +9079,9 @@ func rewriteValue386_Op386MULLconst(v *Value) bool { } x := v_0 v.reset(Op386LEAL8) - v.AddArg(x) v0 := b.NewValue0(v.Pos, Op386LEAL8, v.Type) - v0.AddArg(x) - v0.AddArg(x) - v.AddArg(v0) + v0.AddArg2(x, x) + v.AddArg2(x, v0) return true } // match: (MULLconst [81] x) @@ -9680,13 +9093,10 @@ func rewriteValue386_Op386MULLconst(v *Value) bool { x := v_0 v.reset(Op386LEAL8) v0 := b.NewValue0(v.Pos, Op386LEAL8, v.Type) - v0.AddArg(x) - v0.AddArg(x) - v.AddArg(v0) + v0.AddArg2(x, x) v1 := b.NewValue0(v.Pos, Op386LEAL8, v.Type) - v1.AddArg(x) - v1.AddArg(x) - v.AddArg(v1) + v1.AddArg2(x, x) + v.AddArg2(v0, v1) return true } // match: (MULLconst [c] x) @@ -9702,8 +9112,7 @@ func rewriteValue386_Op386MULLconst(v *Value) bool { v0 := b.NewValue0(v.Pos, Op386SHLLconst, v.Type) v0.AuxInt = log2(c + 1) v0.AddArg(x) - v.AddArg(v0) - v.AddArg(x) + v.AddArg2(v0, x) return true } // match: (MULLconst [c] x) @@ -9719,8 +9128,7 @@ func rewriteValue386_Op386MULLconst(v *Value) bool { v0 := b.NewValue0(v.Pos, Op386SHLLconst, v.Type) v0.AuxInt = log2(c - 1) v0.AddArg(x) - v.AddArg(v0) - v.AddArg(x) + v.AddArg2(v0, x) return true } // match: (MULLconst [c] x) @@ -9736,8 +9144,7 @@ func rewriteValue386_Op386MULLconst(v *Value) bool { v0 := b.NewValue0(v.Pos, Op386SHLLconst, v.Type) v0.AuxInt = log2(c - 2) v0.AddArg(x) - v.AddArg(v0) - v.AddArg(x) + v.AddArg2(v0, x) return true } // match: (MULLconst [c] x) @@ -9753,8 +9160,7 @@ func rewriteValue386_Op386MULLconst(v *Value) bool { v0 := b.NewValue0(v.Pos, Op386SHLLconst, v.Type) v0.AuxInt = log2(c - 4) v0.AddArg(x) - v.AddArg(v0) - v.AddArg(x) + v.AddArg2(v0, x) return true } // match: (MULLconst [c] x) @@ -9770,8 +9176,7 @@ func rewriteValue386_Op386MULLconst(v *Value) bool { v0 := b.NewValue0(v.Pos, Op386SHLLconst, v.Type) v0.AuxInt = log2(c - 8) v0.AddArg(x) - v.AddArg(v0) - v.AddArg(x) + v.AddArg2(v0, x) return true } // match: (MULLconst [c] x) @@ -9786,8 +9191,7 @@ func rewriteValue386_Op386MULLconst(v *Value) bool { v.reset(Op386SHLLconst) v.AuxInt = log2(c / 3) v0 := b.NewValue0(v.Pos, Op386LEAL2, v.Type) - v0.AddArg(x) - v0.AddArg(x) + v0.AddArg2(x, x) v.AddArg(v0) return true } @@ -9803,8 +9207,7 @@ func rewriteValue386_Op386MULLconst(v *Value) bool { v.reset(Op386SHLLconst) v.AuxInt = log2(c / 5) v0 := b.NewValue0(v.Pos, Op386LEAL4, v.Type) - v0.AddArg(x) - v0.AddArg(x) + v0.AddArg2(x, x) v.AddArg(v0) return true } @@ -9820,8 +9223,7 @@ func rewriteValue386_Op386MULLconst(v *Value) bool { v.reset(Op386SHLLconst) v.AuxInt = log2(c / 9) v0 := b.NewValue0(v.Pos, Op386LEAL8, v.Type) - v0.AddArg(x) - v0.AddArg(x) + v0.AddArg2(x, x) v.AddArg(v0) return true } @@ -9864,9 +9266,7 @@ func rewriteValue386_Op386MULLload(v *Value) bool { v.reset(Op386MULLload) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(val) - v.AddArg(base) - v.AddArg(mem) + v.AddArg3(val, base, mem) return true } // match: (MULLload [off1] {sym1} val (LEAL [off2] {sym2} base) mem) @@ -9889,9 +9289,7 @@ func rewriteValue386_Op386MULLload(v *Value) bool { v.reset(Op386MULLload) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(val) - v.AddArg(base) - v.AddArg(mem) + v.AddArg3(val, base, mem) return true } // match: (MULLload [off1] {sym1} val (LEAL4 [off2] {sym2} ptr idx) mem) @@ -9915,10 +9313,7 @@ func rewriteValue386_Op386MULLload(v *Value) bool { v.reset(Op386MULLloadidx4) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(val) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg4(val, ptr, idx, mem) return true } return false @@ -9950,10 +9345,7 @@ func rewriteValue386_Op386MULLloadidx4(v *Value) bool { v.reset(Op386MULLloadidx4) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(val) - v.AddArg(base) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg4(val, base, idx, mem) return true } // match: (MULLloadidx4 [off1] {sym} val base (ADDLconst [off2] idx) mem) @@ -9976,10 +9368,7 @@ func rewriteValue386_Op386MULLloadidx4(v *Value) bool { v.reset(Op386MULLloadidx4) v.AuxInt = off1 + off2*4 v.Aux = sym - v.AddArg(val) - v.AddArg(base) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg4(val, base, idx, mem) return true } // match: (MULLloadidx4 [off1] {sym1} val (LEAL [off2] {sym2} base) idx mem) @@ -10003,10 +9392,7 @@ func rewriteValue386_Op386MULLloadidx4(v *Value) bool { v.reset(Op386MULLloadidx4) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(val) - v.AddArg(base) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg4(val, base, idx, mem) return true } return false @@ -10036,9 +9422,7 @@ func rewriteValue386_Op386MULSD(v *Value) bool { v.reset(Op386MULSDload) v.AuxInt = off v.Aux = sym - v.AddArg(x) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg3(x, ptr, mem) return true } break @@ -10070,9 +9454,7 @@ func rewriteValue386_Op386MULSDload(v *Value) bool { v.reset(Op386MULSDload) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(val) - v.AddArg(base) - v.AddArg(mem) + v.AddArg3(val, base, mem) return true } // match: (MULSDload [off1] {sym1} val (LEAL [off2] {sym2} base) mem) @@ -10095,9 +9477,7 @@ func rewriteValue386_Op386MULSDload(v *Value) bool { v.reset(Op386MULSDload) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(val) - v.AddArg(base) - v.AddArg(mem) + v.AddArg3(val, base, mem) return true } return false @@ -10127,9 +9507,7 @@ func rewriteValue386_Op386MULSS(v *Value) bool { v.reset(Op386MULSSload) v.AuxInt = off v.Aux = sym - v.AddArg(x) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg3(x, ptr, mem) return true } break @@ -10161,9 +9539,7 @@ func rewriteValue386_Op386MULSSload(v *Value) bool { v.reset(Op386MULSSload) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(val) - v.AddArg(base) - v.AddArg(mem) + v.AddArg3(val, base, mem) return true } // match: (MULSSload [off1] {sym1} val (LEAL [off2] {sym2} base) mem) @@ -10186,9 +9562,7 @@ func rewriteValue386_Op386MULSSload(v *Value) bool { v.reset(Op386MULSSload) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(val) - v.AddArg(base) - v.AddArg(mem) + v.AddArg3(val, base, mem) return true } return false @@ -10338,9 +9712,7 @@ func rewriteValue386_Op386ORL(v *Value) bool { v.reset(Op386ORLload) v.AuxInt = off v.Aux = sym - v.AddArg(x) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg3(x, ptr, mem) return true } break @@ -10366,10 +9738,7 @@ func rewriteValue386_Op386ORL(v *Value) bool { v.reset(Op386ORLloadidx4) v.AuxInt = off v.Aux = sym - v.AddArg(x) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg4(x, ptr, idx, mem) return true } break @@ -10421,8 +9790,7 @@ func rewriteValue386_Op386ORL(v *Value) bool { v.AddArg(v0) v0.AuxInt = i0 v0.Aux = s - v0.AddArg(p) - v0.AddArg(mem) + v0.AddArg2(p, mem) return true } break @@ -10486,8 +9854,7 @@ func rewriteValue386_Op386ORL(v *Value) bool { v.AddArg(v0) v0.AuxInt = i0 v0.Aux = s - v0.AddArg(p) - v0.AddArg(mem) + v0.AddArg2(p, mem) return true } } @@ -10535,9 +9902,7 @@ func rewriteValue386_Op386ORL(v *Value) bool { v.AddArg(v0) v0.AuxInt = i0 v0.Aux = s - v0.AddArg(p) - v0.AddArg(idx) - v0.AddArg(mem) + v0.AddArg3(p, idx, mem) return true } } @@ -10613,9 +9978,7 @@ func rewriteValue386_Op386ORL(v *Value) bool { v.AddArg(v0) v0.AuxInt = i0 v0.Aux = s - v0.AddArg(p) - v0.AddArg(idx) - v0.AddArg(mem) + v0.AddArg3(p, idx, mem) return true } } @@ -10691,8 +10054,7 @@ func rewriteValue386_Op386ORLconstmodify(v *Value) bool { v.reset(Op386ORLconstmodify) v.AuxInt = ValAndOff(valoff1).add(off2) v.Aux = sym - v.AddArg(base) - v.AddArg(mem) + v.AddArg2(base, mem) return true } // match: (ORLconstmodify [valoff1] {sym1} (LEAL [off2] {sym2} base) mem) @@ -10714,8 +10076,7 @@ func rewriteValue386_Op386ORLconstmodify(v *Value) bool { v.reset(Op386ORLconstmodify) v.AuxInt = ValAndOff(valoff1).add(off2) v.Aux = mergeSym(sym1, sym2) - v.AddArg(base) - v.AddArg(mem) + v.AddArg2(base, mem) return true } return false @@ -10745,9 +10106,7 @@ func rewriteValue386_Op386ORLconstmodifyidx4(v *Value) bool { v.reset(Op386ORLconstmodifyidx4) v.AuxInt = ValAndOff(valoff1).add(off2) v.Aux = sym - v.AddArg(base) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(base, idx, mem) return true } // match: (ORLconstmodifyidx4 [valoff1] {sym} base (ADDLconst [off2] idx) mem) @@ -10769,9 +10128,7 @@ func rewriteValue386_Op386ORLconstmodifyidx4(v *Value) bool { v.reset(Op386ORLconstmodifyidx4) v.AuxInt = ValAndOff(valoff1).add(off2 * 4) v.Aux = sym - v.AddArg(base) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(base, idx, mem) return true } // match: (ORLconstmodifyidx4 [valoff1] {sym1} (LEAL [off2] {sym2} base) idx mem) @@ -10794,9 +10151,7 @@ func rewriteValue386_Op386ORLconstmodifyidx4(v *Value) bool { v.reset(Op386ORLconstmodifyidx4) v.AuxInt = ValAndOff(valoff1).add(off2) v.Aux = mergeSym(sym1, sym2) - v.AddArg(base) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(base, idx, mem) return true } return false @@ -10826,9 +10181,7 @@ func rewriteValue386_Op386ORLload(v *Value) bool { v.reset(Op386ORLload) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(val) - v.AddArg(base) - v.AddArg(mem) + v.AddArg3(val, base, mem) return true } // match: (ORLload [off1] {sym1} val (LEAL [off2] {sym2} base) mem) @@ -10851,9 +10204,7 @@ func rewriteValue386_Op386ORLload(v *Value) bool { v.reset(Op386ORLload) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(val) - v.AddArg(base) - v.AddArg(mem) + v.AddArg3(val, base, mem) return true } // match: (ORLload [off1] {sym1} val (LEAL4 [off2] {sym2} ptr idx) mem) @@ -10877,10 +10228,7 @@ func rewriteValue386_Op386ORLload(v *Value) bool { v.reset(Op386ORLloadidx4) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(val) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg4(val, ptr, idx, mem) return true } return false @@ -10912,10 +10260,7 @@ func rewriteValue386_Op386ORLloadidx4(v *Value) bool { v.reset(Op386ORLloadidx4) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(val) - v.AddArg(base) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg4(val, base, idx, mem) return true } // match: (ORLloadidx4 [off1] {sym} val base (ADDLconst [off2] idx) mem) @@ -10938,10 +10283,7 @@ func rewriteValue386_Op386ORLloadidx4(v *Value) bool { v.reset(Op386ORLloadidx4) v.AuxInt = off1 + off2*4 v.Aux = sym - v.AddArg(val) - v.AddArg(base) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg4(val, base, idx, mem) return true } // match: (ORLloadidx4 [off1] {sym1} val (LEAL [off2] {sym2} base) idx mem) @@ -10965,10 +10307,7 @@ func rewriteValue386_Op386ORLloadidx4(v *Value) bool { v.reset(Op386ORLloadidx4) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(val) - v.AddArg(base) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg4(val, base, idx, mem) return true } return false @@ -10998,9 +10337,7 @@ func rewriteValue386_Op386ORLmodify(v *Value) bool { v.reset(Op386ORLmodify) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(base) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(base, val, mem) return true } // match: (ORLmodify [off1] {sym1} (LEAL [off2] {sym2} base) val mem) @@ -11023,9 +10360,7 @@ func rewriteValue386_Op386ORLmodify(v *Value) bool { v.reset(Op386ORLmodify) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(base) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(base, val, mem) return true } return false @@ -11057,10 +10392,7 @@ func rewriteValue386_Op386ORLmodifyidx4(v *Value) bool { v.reset(Op386ORLmodifyidx4) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(base) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) + v.AddArg4(base, idx, val, mem) return true } // match: (ORLmodifyidx4 [off1] {sym} base (ADDLconst [off2] idx) val mem) @@ -11083,10 +10415,7 @@ func rewriteValue386_Op386ORLmodifyidx4(v *Value) bool { v.reset(Op386ORLmodifyidx4) v.AuxInt = off1 + off2*4 v.Aux = sym - v.AddArg(base) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) + v.AddArg4(base, idx, val, mem) return true } // match: (ORLmodifyidx4 [off1] {sym1} (LEAL [off2] {sym2} base) idx val mem) @@ -11110,10 +10439,7 @@ func rewriteValue386_Op386ORLmodifyidx4(v *Value) bool { v.reset(Op386ORLmodifyidx4) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(base) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) + v.AddArg4(base, idx, val, mem) return true } // match: (ORLmodifyidx4 [off] {sym} ptr idx (MOVLconst [c]) mem) @@ -11135,9 +10461,7 @@ func rewriteValue386_Op386ORLmodifyidx4(v *Value) bool { v.reset(Op386ORLconstmodifyidx4) v.AuxInt = makeValAndOff(c, off) v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } return false @@ -11303,8 +10627,7 @@ func rewriteValue386_Op386SARL(v *Value) bool { } y := v_1.Args[0] v.reset(Op386SARL) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } return false @@ -11398,8 +10721,7 @@ func rewriteValue386_Op386SBBL(v *Value) bool { f := v_2 v.reset(Op386SBBLconst) v.AuxInt = c - v.AddArg(x) - v.AddArg(f) + v.AddArg2(x, f) return true } return false @@ -12133,8 +11455,7 @@ func rewriteValue386_Op386SHLL(v *Value) bool { } y := v_1.Args[0] v.reset(Op386SHLL) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } return false @@ -12233,8 +11554,7 @@ func rewriteValue386_Op386SHRL(v *Value) bool { } y := v_1.Args[0] v.reset(Op386SHRL) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } return false @@ -12359,9 +11679,7 @@ func rewriteValue386_Op386SUBL(v *Value) bool { v.reset(Op386SUBLload) v.AuxInt = off v.Aux = sym - v.AddArg(x) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg3(x, ptr, mem) return true } // match: (SUBL x l:(MOVLloadidx4 [off] {sym} ptr idx mem)) @@ -12384,10 +11702,7 @@ func rewriteValue386_Op386SUBL(v *Value) bool { v.reset(Op386SUBLloadidx4) v.AuxInt = off v.Aux = sym - v.AddArg(x) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg4(x, ptr, idx, mem) return true } // match: (SUBL x x) @@ -12473,9 +11788,7 @@ func rewriteValue386_Op386SUBLload(v *Value) bool { v.reset(Op386SUBLload) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(val) - v.AddArg(base) - v.AddArg(mem) + v.AddArg3(val, base, mem) return true } // match: (SUBLload [off1] {sym1} val (LEAL [off2] {sym2} base) mem) @@ -12498,9 +11811,7 @@ func rewriteValue386_Op386SUBLload(v *Value) bool { v.reset(Op386SUBLload) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(val) - v.AddArg(base) - v.AddArg(mem) + v.AddArg3(val, base, mem) return true } // match: (SUBLload [off1] {sym1} val (LEAL4 [off2] {sym2} ptr idx) mem) @@ -12524,10 +11835,7 @@ func rewriteValue386_Op386SUBLload(v *Value) bool { v.reset(Op386SUBLloadidx4) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(val) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg4(val, ptr, idx, mem) return true } return false @@ -12559,10 +11867,7 @@ func rewriteValue386_Op386SUBLloadidx4(v *Value) bool { v.reset(Op386SUBLloadidx4) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(val) - v.AddArg(base) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg4(val, base, idx, mem) return true } // match: (SUBLloadidx4 [off1] {sym} val base (ADDLconst [off2] idx) mem) @@ -12585,10 +11890,7 @@ func rewriteValue386_Op386SUBLloadidx4(v *Value) bool { v.reset(Op386SUBLloadidx4) v.AuxInt = off1 + off2*4 v.Aux = sym - v.AddArg(val) - v.AddArg(base) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg4(val, base, idx, mem) return true } // match: (SUBLloadidx4 [off1] {sym1} val (LEAL [off2] {sym2} base) idx mem) @@ -12612,10 +11914,7 @@ func rewriteValue386_Op386SUBLloadidx4(v *Value) bool { v.reset(Op386SUBLloadidx4) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(val) - v.AddArg(base) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg4(val, base, idx, mem) return true } return false @@ -12645,9 +11944,7 @@ func rewriteValue386_Op386SUBLmodify(v *Value) bool { v.reset(Op386SUBLmodify) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(base) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(base, val, mem) return true } // match: (SUBLmodify [off1] {sym1} (LEAL [off2] {sym2} base) val mem) @@ -12670,9 +11967,7 @@ func rewriteValue386_Op386SUBLmodify(v *Value) bool { v.reset(Op386SUBLmodify) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(base) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(base, val, mem) return true } return false @@ -12704,10 +11999,7 @@ func rewriteValue386_Op386SUBLmodifyidx4(v *Value) bool { v.reset(Op386SUBLmodifyidx4) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(base) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) + v.AddArg4(base, idx, val, mem) return true } // match: (SUBLmodifyidx4 [off1] {sym} base (ADDLconst [off2] idx) val mem) @@ -12730,10 +12022,7 @@ func rewriteValue386_Op386SUBLmodifyidx4(v *Value) bool { v.reset(Op386SUBLmodifyidx4) v.AuxInt = off1 + off2*4 v.Aux = sym - v.AddArg(base) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) + v.AddArg4(base, idx, val, mem) return true } // match: (SUBLmodifyidx4 [off1] {sym1} (LEAL [off2] {sym2} base) idx val mem) @@ -12757,10 +12046,7 @@ func rewriteValue386_Op386SUBLmodifyidx4(v *Value) bool { v.reset(Op386SUBLmodifyidx4) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(base) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) + v.AddArg4(base, idx, val, mem) return true } // match: (SUBLmodifyidx4 [off] {sym} ptr idx (MOVLconst [c]) mem) @@ -12782,9 +12068,7 @@ func rewriteValue386_Op386SUBLmodifyidx4(v *Value) bool { v.reset(Op386ADDLconstmodifyidx4) v.AuxInt = makeValAndOff(-c, off) v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } return false @@ -12813,9 +12097,7 @@ func rewriteValue386_Op386SUBSD(v *Value) bool { v.reset(Op386SUBSDload) v.AuxInt = off v.Aux = sym - v.AddArg(x) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg3(x, ptr, mem) return true } return false @@ -12845,9 +12127,7 @@ func rewriteValue386_Op386SUBSDload(v *Value) bool { v.reset(Op386SUBSDload) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(val) - v.AddArg(base) - v.AddArg(mem) + v.AddArg3(val, base, mem) return true } // match: (SUBSDload [off1] {sym1} val (LEAL [off2] {sym2} base) mem) @@ -12870,9 +12150,7 @@ func rewriteValue386_Op386SUBSDload(v *Value) bool { v.reset(Op386SUBSDload) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(val) - v.AddArg(base) - v.AddArg(mem) + v.AddArg3(val, base, mem) return true } return false @@ -12901,9 +12179,7 @@ func rewriteValue386_Op386SUBSS(v *Value) bool { v.reset(Op386SUBSSload) v.AuxInt = off v.Aux = sym - v.AddArg(x) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg3(x, ptr, mem) return true } return false @@ -12933,9 +12209,7 @@ func rewriteValue386_Op386SUBSSload(v *Value) bool { v.reset(Op386SUBSSload) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(val) - v.AddArg(base) - v.AddArg(mem) + v.AddArg3(val, base, mem) return true } // match: (SUBSSload [off1] {sym1} val (LEAL [off2] {sym2} base) mem) @@ -12958,9 +12232,7 @@ func rewriteValue386_Op386SUBSSload(v *Value) bool { v.reset(Op386SUBSSload) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(val) - v.AddArg(base) - v.AddArg(mem) + v.AddArg3(val, base, mem) return true } return false @@ -13078,9 +12350,7 @@ func rewriteValue386_Op386XORL(v *Value) bool { v.reset(Op386XORLload) v.AuxInt = off v.Aux = sym - v.AddArg(x) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg3(x, ptr, mem) return true } break @@ -13106,10 +12376,7 @@ func rewriteValue386_Op386XORL(v *Value) bool { v.reset(Op386XORLloadidx4) v.AuxInt = off v.Aux = sym - v.AddArg(x) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg4(x, ptr, idx, mem) return true } break @@ -13194,8 +12461,7 @@ func rewriteValue386_Op386XORLconstmodify(v *Value) bool { v.reset(Op386XORLconstmodify) v.AuxInt = ValAndOff(valoff1).add(off2) v.Aux = sym - v.AddArg(base) - v.AddArg(mem) + v.AddArg2(base, mem) return true } // match: (XORLconstmodify [valoff1] {sym1} (LEAL [off2] {sym2} base) mem) @@ -13217,8 +12483,7 @@ func rewriteValue386_Op386XORLconstmodify(v *Value) bool { v.reset(Op386XORLconstmodify) v.AuxInt = ValAndOff(valoff1).add(off2) v.Aux = mergeSym(sym1, sym2) - v.AddArg(base) - v.AddArg(mem) + v.AddArg2(base, mem) return true } return false @@ -13248,9 +12513,7 @@ func rewriteValue386_Op386XORLconstmodifyidx4(v *Value) bool { v.reset(Op386XORLconstmodifyidx4) v.AuxInt = ValAndOff(valoff1).add(off2) v.Aux = sym - v.AddArg(base) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(base, idx, mem) return true } // match: (XORLconstmodifyidx4 [valoff1] {sym} base (ADDLconst [off2] idx) mem) @@ -13272,9 +12535,7 @@ func rewriteValue386_Op386XORLconstmodifyidx4(v *Value) bool { v.reset(Op386XORLconstmodifyidx4) v.AuxInt = ValAndOff(valoff1).add(off2 * 4) v.Aux = sym - v.AddArg(base) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(base, idx, mem) return true } // match: (XORLconstmodifyidx4 [valoff1] {sym1} (LEAL [off2] {sym2} base) idx mem) @@ -13297,9 +12558,7 @@ func rewriteValue386_Op386XORLconstmodifyidx4(v *Value) bool { v.reset(Op386XORLconstmodifyidx4) v.AuxInt = ValAndOff(valoff1).add(off2) v.Aux = mergeSym(sym1, sym2) - v.AddArg(base) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(base, idx, mem) return true } return false @@ -13329,9 +12588,7 @@ func rewriteValue386_Op386XORLload(v *Value) bool { v.reset(Op386XORLload) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(val) - v.AddArg(base) - v.AddArg(mem) + v.AddArg3(val, base, mem) return true } // match: (XORLload [off1] {sym1} val (LEAL [off2] {sym2} base) mem) @@ -13354,9 +12611,7 @@ func rewriteValue386_Op386XORLload(v *Value) bool { v.reset(Op386XORLload) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(val) - v.AddArg(base) - v.AddArg(mem) + v.AddArg3(val, base, mem) return true } // match: (XORLload [off1] {sym1} val (LEAL4 [off2] {sym2} ptr idx) mem) @@ -13380,10 +12635,7 @@ func rewriteValue386_Op386XORLload(v *Value) bool { v.reset(Op386XORLloadidx4) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(val) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg4(val, ptr, idx, mem) return true } return false @@ -13415,10 +12667,7 @@ func rewriteValue386_Op386XORLloadidx4(v *Value) bool { v.reset(Op386XORLloadidx4) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(val) - v.AddArg(base) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg4(val, base, idx, mem) return true } // match: (XORLloadidx4 [off1] {sym} val base (ADDLconst [off2] idx) mem) @@ -13441,10 +12690,7 @@ func rewriteValue386_Op386XORLloadidx4(v *Value) bool { v.reset(Op386XORLloadidx4) v.AuxInt = off1 + off2*4 v.Aux = sym - v.AddArg(val) - v.AddArg(base) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg4(val, base, idx, mem) return true } // match: (XORLloadidx4 [off1] {sym1} val (LEAL [off2] {sym2} base) idx mem) @@ -13468,10 +12714,7 @@ func rewriteValue386_Op386XORLloadidx4(v *Value) bool { v.reset(Op386XORLloadidx4) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(val) - v.AddArg(base) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg4(val, base, idx, mem) return true } return false @@ -13501,9 +12744,7 @@ func rewriteValue386_Op386XORLmodify(v *Value) bool { v.reset(Op386XORLmodify) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(base) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(base, val, mem) return true } // match: (XORLmodify [off1] {sym1} (LEAL [off2] {sym2} base) val mem) @@ -13526,9 +12767,7 @@ func rewriteValue386_Op386XORLmodify(v *Value) bool { v.reset(Op386XORLmodify) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(base) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(base, val, mem) return true } return false @@ -13560,10 +12799,7 @@ func rewriteValue386_Op386XORLmodifyidx4(v *Value) bool { v.reset(Op386XORLmodifyidx4) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(base) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) + v.AddArg4(base, idx, val, mem) return true } // match: (XORLmodifyidx4 [off1] {sym} base (ADDLconst [off2] idx) val mem) @@ -13586,10 +12822,7 @@ func rewriteValue386_Op386XORLmodifyidx4(v *Value) bool { v.reset(Op386XORLmodifyidx4) v.AuxInt = off1 + off2*4 v.Aux = sym - v.AddArg(base) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) + v.AddArg4(base, idx, val, mem) return true } // match: (XORLmodifyidx4 [off1] {sym1} (LEAL [off2] {sym2} base) idx val mem) @@ -13613,10 +12846,7 @@ func rewriteValue386_Op386XORLmodifyidx4(v *Value) bool { v.reset(Op386XORLmodifyidx4) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(base) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) + v.AddArg4(base, idx, val, mem) return true } // match: (XORLmodifyidx4 [off] {sym} ptr idx (MOVLconst [c]) mem) @@ -13638,9 +12868,7 @@ func rewriteValue386_Op386XORLmodifyidx4(v *Value) bool { v.reset(Op386XORLconstmodifyidx4) v.AuxInt = makeValAndOff(c, off) v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } return false @@ -13683,10 +12911,9 @@ func rewriteValue386_OpDiv8(v *Value) bool { v.reset(Op386DIVW) v0 := b.NewValue0(v.Pos, OpSignExt8to16, typ.Int16) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpSignExt8to16, typ.Int16) v1.AddArg(y) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } } @@ -13703,10 +12930,9 @@ func rewriteValue386_OpDiv8u(v *Value) bool { v.reset(Op386DIVWU) v0 := b.NewValue0(v.Pos, OpZeroExt8to16, typ.UInt16) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpZeroExt8to16, typ.UInt16) v1.AddArg(y) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } } @@ -13721,8 +12947,7 @@ func rewriteValue386_OpEq16(v *Value) bool { y := v_1 v.reset(Op386SETEQ) v0 := b.NewValue0(v.Pos, Op386CMPW, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -13738,8 +12963,7 @@ func rewriteValue386_OpEq32(v *Value) bool { y := v_1 v.reset(Op386SETEQ) v0 := b.NewValue0(v.Pos, Op386CMPL, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -13755,8 +12979,7 @@ func rewriteValue386_OpEq32F(v *Value) bool { y := v_1 v.reset(Op386SETEQF) v0 := b.NewValue0(v.Pos, Op386UCOMISS, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -13772,8 +12995,7 @@ func rewriteValue386_OpEq64F(v *Value) bool { y := v_1 v.reset(Op386SETEQF) v0 := b.NewValue0(v.Pos, Op386UCOMISD, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -13789,8 +13011,7 @@ func rewriteValue386_OpEq8(v *Value) bool { y := v_1 v.reset(Op386SETEQ) v0 := b.NewValue0(v.Pos, Op386CMPB, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -13806,8 +13027,7 @@ func rewriteValue386_OpEqB(v *Value) bool { y := v_1 v.reset(Op386SETEQ) v0 := b.NewValue0(v.Pos, Op386CMPB, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -13823,8 +13043,7 @@ func rewriteValue386_OpEqPtr(v *Value) bool { y := v_1 v.reset(Op386SETEQ) v0 := b.NewValue0(v.Pos, Op386CMPL, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -13840,8 +13059,7 @@ func rewriteValue386_OpGeq32F(v *Value) bool { y := v_1 v.reset(Op386SETGEF) v0 := b.NewValue0(v.Pos, Op386UCOMISS, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -13857,8 +13075,7 @@ func rewriteValue386_OpGeq64F(v *Value) bool { y := v_1 v.reset(Op386SETGEF) v0 := b.NewValue0(v.Pos, Op386UCOMISD, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -13874,8 +13091,7 @@ func rewriteValue386_OpGreater32F(v *Value) bool { y := v_1 v.reset(Op386SETGF) v0 := b.NewValue0(v.Pos, Op386UCOMISS, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -13891,8 +13107,7 @@ func rewriteValue386_OpGreater64F(v *Value) bool { y := v_1 v.reset(Op386SETGF) v0 := b.NewValue0(v.Pos, Op386UCOMISD, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -13908,8 +13123,7 @@ func rewriteValue386_OpIsInBounds(v *Value) bool { len := v_1 v.reset(Op386SETB) v0 := b.NewValue0(v.Pos, Op386CMPL, types.TypeFlags) - v0.AddArg(idx) - v0.AddArg(len) + v0.AddArg2(idx, len) v.AddArg(v0) return true } @@ -13923,8 +13137,7 @@ func rewriteValue386_OpIsNonNil(v *Value) bool { p := v_0 v.reset(Op386SETNE) v0 := b.NewValue0(v.Pos, Op386TESTL, types.TypeFlags) - v0.AddArg(p) - v0.AddArg(p) + v0.AddArg2(p, p) v.AddArg(v0) return true } @@ -13940,8 +13153,7 @@ func rewriteValue386_OpIsSliceInBounds(v *Value) bool { len := v_1 v.reset(Op386SETBE) v0 := b.NewValue0(v.Pos, Op386CMPL, types.TypeFlags) - v0.AddArg(idx) - v0.AddArg(len) + v0.AddArg2(idx, len) v.AddArg(v0) return true } @@ -13957,8 +13169,7 @@ func rewriteValue386_OpLeq16(v *Value) bool { y := v_1 v.reset(Op386SETLE) v0 := b.NewValue0(v.Pos, Op386CMPW, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -13974,8 +13185,7 @@ func rewriteValue386_OpLeq16U(v *Value) bool { y := v_1 v.reset(Op386SETBE) v0 := b.NewValue0(v.Pos, Op386CMPW, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -13991,8 +13201,7 @@ func rewriteValue386_OpLeq32(v *Value) bool { y := v_1 v.reset(Op386SETLE) v0 := b.NewValue0(v.Pos, Op386CMPL, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -14008,8 +13217,7 @@ func rewriteValue386_OpLeq32F(v *Value) bool { y := v_1 v.reset(Op386SETGEF) v0 := b.NewValue0(v.Pos, Op386UCOMISS, types.TypeFlags) - v0.AddArg(y) - v0.AddArg(x) + v0.AddArg2(y, x) v.AddArg(v0) return true } @@ -14025,8 +13233,7 @@ func rewriteValue386_OpLeq32U(v *Value) bool { y := v_1 v.reset(Op386SETBE) v0 := b.NewValue0(v.Pos, Op386CMPL, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -14042,8 +13249,7 @@ func rewriteValue386_OpLeq64F(v *Value) bool { y := v_1 v.reset(Op386SETGEF) v0 := b.NewValue0(v.Pos, Op386UCOMISD, types.TypeFlags) - v0.AddArg(y) - v0.AddArg(x) + v0.AddArg2(y, x) v.AddArg(v0) return true } @@ -14059,8 +13265,7 @@ func rewriteValue386_OpLeq8(v *Value) bool { y := v_1 v.reset(Op386SETLE) v0 := b.NewValue0(v.Pos, Op386CMPB, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -14076,8 +13281,7 @@ func rewriteValue386_OpLeq8U(v *Value) bool { y := v_1 v.reset(Op386SETBE) v0 := b.NewValue0(v.Pos, Op386CMPB, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -14093,8 +13297,7 @@ func rewriteValue386_OpLess16(v *Value) bool { y := v_1 v.reset(Op386SETL) v0 := b.NewValue0(v.Pos, Op386CMPW, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -14110,8 +13313,7 @@ func rewriteValue386_OpLess16U(v *Value) bool { y := v_1 v.reset(Op386SETB) v0 := b.NewValue0(v.Pos, Op386CMPW, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -14127,8 +13329,7 @@ func rewriteValue386_OpLess32(v *Value) bool { y := v_1 v.reset(Op386SETL) v0 := b.NewValue0(v.Pos, Op386CMPL, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -14144,8 +13345,7 @@ func rewriteValue386_OpLess32F(v *Value) bool { y := v_1 v.reset(Op386SETGF) v0 := b.NewValue0(v.Pos, Op386UCOMISS, types.TypeFlags) - v0.AddArg(y) - v0.AddArg(x) + v0.AddArg2(y, x) v.AddArg(v0) return true } @@ -14161,8 +13361,7 @@ func rewriteValue386_OpLess32U(v *Value) bool { y := v_1 v.reset(Op386SETB) v0 := b.NewValue0(v.Pos, Op386CMPL, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -14178,8 +13377,7 @@ func rewriteValue386_OpLess64F(v *Value) bool { y := v_1 v.reset(Op386SETGF) v0 := b.NewValue0(v.Pos, Op386UCOMISD, types.TypeFlags) - v0.AddArg(y) - v0.AddArg(x) + v0.AddArg2(y, x) v.AddArg(v0) return true } @@ -14195,8 +13393,7 @@ func rewriteValue386_OpLess8(v *Value) bool { y := v_1 v.reset(Op386SETL) v0 := b.NewValue0(v.Pos, Op386CMPB, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -14212,8 +13409,7 @@ func rewriteValue386_OpLess8U(v *Value) bool { y := v_1 v.reset(Op386SETB) v0 := b.NewValue0(v.Pos, Op386CMPB, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -14232,8 +13428,7 @@ func rewriteValue386_OpLoad(v *Value) bool { break } v.reset(Op386MOVLload) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (Load ptr mem) @@ -14247,8 +13442,7 @@ func rewriteValue386_OpLoad(v *Value) bool { break } v.reset(Op386MOVWload) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (Load ptr mem) @@ -14262,8 +13456,7 @@ func rewriteValue386_OpLoad(v *Value) bool { break } v.reset(Op386MOVBload) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (Load ptr mem) @@ -14277,8 +13470,7 @@ func rewriteValue386_OpLoad(v *Value) bool { break } v.reset(Op386MOVSSload) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (Load ptr mem) @@ -14292,8 +13484,7 @@ func rewriteValue386_OpLoad(v *Value) bool { break } v.reset(Op386MOVSDload) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } return false @@ -14327,15 +13518,13 @@ func rewriteValue386_OpLsh16x16(v *Value) bool { } v.reset(Op386ANDL) v0 := b.NewValue0(v.Pos, Op386SHLL, t) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) + v0.AddArg2(x, y) v1 := b.NewValue0(v.Pos, Op386SBBLcarrymask, t) v2 := b.NewValue0(v.Pos, Op386CMPWconst, types.TypeFlags) v2.AuxInt = 32 v2.AddArg(y) v1.AddArg(v2) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } // match: (Lsh16x16 x y) @@ -14350,8 +13539,7 @@ func rewriteValue386_OpLsh16x16(v *Value) bool { } v.reset(Op386SHLL) v.Type = t - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } return false @@ -14372,15 +13560,13 @@ func rewriteValue386_OpLsh16x32(v *Value) bool { } v.reset(Op386ANDL) v0 := b.NewValue0(v.Pos, Op386SHLL, t) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) + v0.AddArg2(x, y) v1 := b.NewValue0(v.Pos, Op386SBBLcarrymask, t) v2 := b.NewValue0(v.Pos, Op386CMPLconst, types.TypeFlags) v2.AuxInt = 32 v2.AddArg(y) v1.AddArg(v2) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } // match: (Lsh16x32 x y) @@ -14395,8 +13581,7 @@ func rewriteValue386_OpLsh16x32(v *Value) bool { } v.reset(Op386SHLL) v.Type = t - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } return false @@ -14454,15 +13639,13 @@ func rewriteValue386_OpLsh16x8(v *Value) bool { } v.reset(Op386ANDL) v0 := b.NewValue0(v.Pos, Op386SHLL, t) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) + v0.AddArg2(x, y) v1 := b.NewValue0(v.Pos, Op386SBBLcarrymask, t) v2 := b.NewValue0(v.Pos, Op386CMPBconst, types.TypeFlags) v2.AuxInt = 32 v2.AddArg(y) v1.AddArg(v2) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } // match: (Lsh16x8 x y) @@ -14477,8 +13660,7 @@ func rewriteValue386_OpLsh16x8(v *Value) bool { } v.reset(Op386SHLL) v.Type = t - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } return false @@ -14499,15 +13681,13 @@ func rewriteValue386_OpLsh32x16(v *Value) bool { } v.reset(Op386ANDL) v0 := b.NewValue0(v.Pos, Op386SHLL, t) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) + v0.AddArg2(x, y) v1 := b.NewValue0(v.Pos, Op386SBBLcarrymask, t) v2 := b.NewValue0(v.Pos, Op386CMPWconst, types.TypeFlags) v2.AuxInt = 32 v2.AddArg(y) v1.AddArg(v2) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } // match: (Lsh32x16 x y) @@ -14522,8 +13702,7 @@ func rewriteValue386_OpLsh32x16(v *Value) bool { } v.reset(Op386SHLL) v.Type = t - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } return false @@ -14544,15 +13723,13 @@ func rewriteValue386_OpLsh32x32(v *Value) bool { } v.reset(Op386ANDL) v0 := b.NewValue0(v.Pos, Op386SHLL, t) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) + v0.AddArg2(x, y) v1 := b.NewValue0(v.Pos, Op386SBBLcarrymask, t) v2 := b.NewValue0(v.Pos, Op386CMPLconst, types.TypeFlags) v2.AuxInt = 32 v2.AddArg(y) v1.AddArg(v2) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } // match: (Lsh32x32 x y) @@ -14567,8 +13744,7 @@ func rewriteValue386_OpLsh32x32(v *Value) bool { } v.reset(Op386SHLL) v.Type = t - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } return false @@ -14626,15 +13802,13 @@ func rewriteValue386_OpLsh32x8(v *Value) bool { } v.reset(Op386ANDL) v0 := b.NewValue0(v.Pos, Op386SHLL, t) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) + v0.AddArg2(x, y) v1 := b.NewValue0(v.Pos, Op386SBBLcarrymask, t) v2 := b.NewValue0(v.Pos, Op386CMPBconst, types.TypeFlags) v2.AuxInt = 32 v2.AddArg(y) v1.AddArg(v2) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } // match: (Lsh32x8 x y) @@ -14649,8 +13823,7 @@ func rewriteValue386_OpLsh32x8(v *Value) bool { } v.reset(Op386SHLL) v.Type = t - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } return false @@ -14671,15 +13844,13 @@ func rewriteValue386_OpLsh8x16(v *Value) bool { } v.reset(Op386ANDL) v0 := b.NewValue0(v.Pos, Op386SHLL, t) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) + v0.AddArg2(x, y) v1 := b.NewValue0(v.Pos, Op386SBBLcarrymask, t) v2 := b.NewValue0(v.Pos, Op386CMPWconst, types.TypeFlags) v2.AuxInt = 32 v2.AddArg(y) v1.AddArg(v2) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } // match: (Lsh8x16 x y) @@ -14694,8 +13865,7 @@ func rewriteValue386_OpLsh8x16(v *Value) bool { } v.reset(Op386SHLL) v.Type = t - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } return false @@ -14716,15 +13886,13 @@ func rewriteValue386_OpLsh8x32(v *Value) bool { } v.reset(Op386ANDL) v0 := b.NewValue0(v.Pos, Op386SHLL, t) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) + v0.AddArg2(x, y) v1 := b.NewValue0(v.Pos, Op386SBBLcarrymask, t) v2 := b.NewValue0(v.Pos, Op386CMPLconst, types.TypeFlags) v2.AuxInt = 32 v2.AddArg(y) v1.AddArg(v2) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } // match: (Lsh8x32 x y) @@ -14739,8 +13907,7 @@ func rewriteValue386_OpLsh8x32(v *Value) bool { } v.reset(Op386SHLL) v.Type = t - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } return false @@ -14798,15 +13965,13 @@ func rewriteValue386_OpLsh8x8(v *Value) bool { } v.reset(Op386ANDL) v0 := b.NewValue0(v.Pos, Op386SHLL, t) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) + v0.AddArg2(x, y) v1 := b.NewValue0(v.Pos, Op386SBBLcarrymask, t) v2 := b.NewValue0(v.Pos, Op386CMPBconst, types.TypeFlags) v2.AuxInt = 32 v2.AddArg(y) v1.AddArg(v2) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } // match: (Lsh8x8 x y) @@ -14821,8 +13986,7 @@ func rewriteValue386_OpLsh8x8(v *Value) bool { } v.reset(Op386SHLL) v.Type = t - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } return false @@ -14840,10 +14004,9 @@ func rewriteValue386_OpMod8(v *Value) bool { v.reset(Op386MODW) v0 := b.NewValue0(v.Pos, OpSignExt8to16, typ.Int16) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpSignExt8to16, typ.Int16) v1.AddArg(y) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } } @@ -14860,10 +14023,9 @@ func rewriteValue386_OpMod8u(v *Value) bool { v.reset(Op386MODWU) v0 := b.NewValue0(v.Pos, OpZeroExt8to16, typ.UInt16) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpZeroExt8to16, typ.UInt16) v1.AddArg(y) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } } @@ -14896,12 +14058,9 @@ func rewriteValue386_OpMove(v *Value) bool { src := v_1 mem := v_2 v.reset(Op386MOVBstore) - v.AddArg(dst) v0 := b.NewValue0(v.Pos, Op386MOVBload, typ.UInt8) - v0.AddArg(src) - v0.AddArg(mem) - v.AddArg(v0) - v.AddArg(mem) + v0.AddArg2(src, mem) + v.AddArg3(dst, v0, mem) return true } // match: (Move [2] dst src mem) @@ -14914,12 +14073,9 @@ func rewriteValue386_OpMove(v *Value) bool { src := v_1 mem := v_2 v.reset(Op386MOVWstore) - v.AddArg(dst) v0 := b.NewValue0(v.Pos, Op386MOVWload, typ.UInt16) - v0.AddArg(src) - v0.AddArg(mem) - v.AddArg(v0) - v.AddArg(mem) + v0.AddArg2(src, mem) + v.AddArg3(dst, v0, mem) return true } // match: (Move [4] dst src mem) @@ -14932,12 +14088,9 @@ func rewriteValue386_OpMove(v *Value) bool { src := v_1 mem := v_2 v.reset(Op386MOVLstore) - v.AddArg(dst) v0 := b.NewValue0(v.Pos, Op386MOVLload, typ.UInt32) - v0.AddArg(src) - v0.AddArg(mem) - v.AddArg(v0) - v.AddArg(mem) + v0.AddArg2(src, mem) + v.AddArg3(dst, v0, mem) return true } // match: (Move [3] dst src mem) @@ -14951,20 +14104,14 @@ func rewriteValue386_OpMove(v *Value) bool { mem := v_2 v.reset(Op386MOVBstore) v.AuxInt = 2 - v.AddArg(dst) v0 := b.NewValue0(v.Pos, Op386MOVBload, typ.UInt8) v0.AuxInt = 2 - v0.AddArg(src) - v0.AddArg(mem) - v.AddArg(v0) + v0.AddArg2(src, mem) v1 := b.NewValue0(v.Pos, Op386MOVWstore, types.TypeMem) - v1.AddArg(dst) v2 := b.NewValue0(v.Pos, Op386MOVWload, typ.UInt16) - v2.AddArg(src) - v2.AddArg(mem) - v1.AddArg(v2) - v1.AddArg(mem) - v.AddArg(v1) + v2.AddArg2(src, mem) + v1.AddArg3(dst, v2, mem) + v.AddArg3(dst, v0, v1) return true } // match: (Move [5] dst src mem) @@ -14978,20 +14125,14 @@ func rewriteValue386_OpMove(v *Value) bool { mem := v_2 v.reset(Op386MOVBstore) v.AuxInt = 4 - v.AddArg(dst) v0 := b.NewValue0(v.Pos, Op386MOVBload, typ.UInt8) v0.AuxInt = 4 - v0.AddArg(src) - v0.AddArg(mem) - v.AddArg(v0) + v0.AddArg2(src, mem) v1 := b.NewValue0(v.Pos, Op386MOVLstore, types.TypeMem) - v1.AddArg(dst) v2 := b.NewValue0(v.Pos, Op386MOVLload, typ.UInt32) - v2.AddArg(src) - v2.AddArg(mem) - v1.AddArg(v2) - v1.AddArg(mem) - v.AddArg(v1) + v2.AddArg2(src, mem) + v1.AddArg3(dst, v2, mem) + v.AddArg3(dst, v0, v1) return true } // match: (Move [6] dst src mem) @@ -15005,20 +14146,14 @@ func rewriteValue386_OpMove(v *Value) bool { mem := v_2 v.reset(Op386MOVWstore) v.AuxInt = 4 - v.AddArg(dst) v0 := b.NewValue0(v.Pos, Op386MOVWload, typ.UInt16) v0.AuxInt = 4 - v0.AddArg(src) - v0.AddArg(mem) - v.AddArg(v0) + v0.AddArg2(src, mem) v1 := b.NewValue0(v.Pos, Op386MOVLstore, types.TypeMem) - v1.AddArg(dst) v2 := b.NewValue0(v.Pos, Op386MOVLload, typ.UInt32) - v2.AddArg(src) - v2.AddArg(mem) - v1.AddArg(v2) - v1.AddArg(mem) - v.AddArg(v1) + v2.AddArg2(src, mem) + v1.AddArg3(dst, v2, mem) + v.AddArg3(dst, v0, v1) return true } // match: (Move [7] dst src mem) @@ -15032,20 +14167,14 @@ func rewriteValue386_OpMove(v *Value) bool { mem := v_2 v.reset(Op386MOVLstore) v.AuxInt = 3 - v.AddArg(dst) v0 := b.NewValue0(v.Pos, Op386MOVLload, typ.UInt32) v0.AuxInt = 3 - v0.AddArg(src) - v0.AddArg(mem) - v.AddArg(v0) + v0.AddArg2(src, mem) v1 := b.NewValue0(v.Pos, Op386MOVLstore, types.TypeMem) - v1.AddArg(dst) v2 := b.NewValue0(v.Pos, Op386MOVLload, typ.UInt32) - v2.AddArg(src) - v2.AddArg(mem) - v1.AddArg(v2) - v1.AddArg(mem) - v.AddArg(v1) + v2.AddArg2(src, mem) + v1.AddArg3(dst, v2, mem) + v.AddArg3(dst, v0, v1) return true } // match: (Move [8] dst src mem) @@ -15059,20 +14188,14 @@ func rewriteValue386_OpMove(v *Value) bool { mem := v_2 v.reset(Op386MOVLstore) v.AuxInt = 4 - v.AddArg(dst) v0 := b.NewValue0(v.Pos, Op386MOVLload, typ.UInt32) v0.AuxInt = 4 - v0.AddArg(src) - v0.AddArg(mem) - v.AddArg(v0) + v0.AddArg2(src, mem) v1 := b.NewValue0(v.Pos, Op386MOVLstore, types.TypeMem) - v1.AddArg(dst) v2 := b.NewValue0(v.Pos, Op386MOVLload, typ.UInt32) - v2.AddArg(src) - v2.AddArg(mem) - v1.AddArg(v2) - v1.AddArg(mem) - v.AddArg(v1) + v2.AddArg2(src, mem) + v1.AddArg3(dst, v2, mem) + v.AddArg3(dst, v0, v1) return true } // match: (Move [s] dst src mem) @@ -15091,19 +14214,14 @@ func rewriteValue386_OpMove(v *Value) bool { v0 := b.NewValue0(v.Pos, Op386ADDLconst, dst.Type) v0.AuxInt = s % 4 v0.AddArg(dst) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, Op386ADDLconst, src.Type) v1.AuxInt = s % 4 v1.AddArg(src) - v.AddArg(v1) v2 := b.NewValue0(v.Pos, Op386MOVLstore, types.TypeMem) - v2.AddArg(dst) v3 := b.NewValue0(v.Pos, Op386MOVLload, typ.UInt32) - v3.AddArg(src) - v3.AddArg(mem) - v2.AddArg(v3) - v2.AddArg(mem) - v.AddArg(v2) + v3.AddArg2(src, mem) + v2.AddArg3(dst, v3, mem) + v.AddArg3(v0, v1, v2) return true } // match: (Move [s] dst src mem) @@ -15119,9 +14237,7 @@ func rewriteValue386_OpMove(v *Value) bool { } v.reset(Op386DUFFCOPY) v.AuxInt = 10 * (128 - s/4) - v.AddArg(dst) - v.AddArg(src) - v.AddArg(mem) + v.AddArg3(dst, src, mem) return true } // match: (Move [s] dst src mem) @@ -15136,12 +14252,9 @@ func rewriteValue386_OpMove(v *Value) bool { break } v.reset(Op386REPMOVSL) - v.AddArg(dst) - v.AddArg(src) v0 := b.NewValue0(v.Pos, Op386MOVLconst, typ.UInt32) v0.AuxInt = s / 4 - v.AddArg(v0) - v.AddArg(mem) + v.AddArg4(dst, src, v0, mem) return true } return false @@ -15160,10 +14273,9 @@ func rewriteValue386_OpNeg32F(v *Value) bool { break } v.reset(Op386PXOR) - v.AddArg(x) v0 := b.NewValue0(v.Pos, Op386MOVSSconst, typ.Float32) v0.AuxInt = auxFrom32F(float32(math.Copysign(0, -1))) - v.AddArg(v0) + v.AddArg2(x, v0) return true } // match: (Neg32F x) @@ -15194,10 +14306,9 @@ func rewriteValue386_OpNeg64F(v *Value) bool { break } v.reset(Op386PXOR) - v.AddArg(x) v0 := b.NewValue0(v.Pos, Op386MOVSDconst, typ.Float64) v0.AuxInt = auxFrom64F(math.Copysign(0, -1)) - v.AddArg(v0) + v.AddArg2(x, v0) return true } // match: (Neg64F x) @@ -15225,8 +14336,7 @@ func rewriteValue386_OpNeq16(v *Value) bool { y := v_1 v.reset(Op386SETNE) v0 := b.NewValue0(v.Pos, Op386CMPW, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -15242,8 +14352,7 @@ func rewriteValue386_OpNeq32(v *Value) bool { y := v_1 v.reset(Op386SETNE) v0 := b.NewValue0(v.Pos, Op386CMPL, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -15259,8 +14368,7 @@ func rewriteValue386_OpNeq32F(v *Value) bool { y := v_1 v.reset(Op386SETNEF) v0 := b.NewValue0(v.Pos, Op386UCOMISS, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -15276,8 +14384,7 @@ func rewriteValue386_OpNeq64F(v *Value) bool { y := v_1 v.reset(Op386SETNEF) v0 := b.NewValue0(v.Pos, Op386UCOMISD, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -15293,8 +14400,7 @@ func rewriteValue386_OpNeq8(v *Value) bool { y := v_1 v.reset(Op386SETNE) v0 := b.NewValue0(v.Pos, Op386CMPB, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -15310,8 +14416,7 @@ func rewriteValue386_OpNeqB(v *Value) bool { y := v_1 v.reset(Op386SETNE) v0 := b.NewValue0(v.Pos, Op386CMPB, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -15327,8 +14432,7 @@ func rewriteValue386_OpNeqPtr(v *Value) bool { y := v_1 v.reset(Op386SETNE) v0 := b.NewValue0(v.Pos, Op386CMPL, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -15362,9 +14466,7 @@ func rewriteValue386_OpPanicBounds(v *Value) bool { } v.reset(Op386LoweredPanicBoundsA) v.AuxInt = kind - v.AddArg(x) - v.AddArg(y) - v.AddArg(mem) + v.AddArg3(x, y, mem) return true } // match: (PanicBounds [kind] x y mem) @@ -15380,9 +14482,7 @@ func rewriteValue386_OpPanicBounds(v *Value) bool { } v.reset(Op386LoweredPanicBoundsB) v.AuxInt = kind - v.AddArg(x) - v.AddArg(y) - v.AddArg(mem) + v.AddArg3(x, y, mem) return true } // match: (PanicBounds [kind] x y mem) @@ -15398,9 +14498,7 @@ func rewriteValue386_OpPanicBounds(v *Value) bool { } v.reset(Op386LoweredPanicBoundsC) v.AuxInt = kind - v.AddArg(x) - v.AddArg(y) - v.AddArg(mem) + v.AddArg3(x, y, mem) return true } return false @@ -15424,10 +14522,7 @@ func rewriteValue386_OpPanicExtend(v *Value) bool { } v.reset(Op386LoweredPanicExtendA) v.AuxInt = kind - v.AddArg(hi) - v.AddArg(lo) - v.AddArg(y) - v.AddArg(mem) + v.AddArg4(hi, lo, y, mem) return true } // match: (PanicExtend [kind] hi lo y mem) @@ -15444,10 +14539,7 @@ func rewriteValue386_OpPanicExtend(v *Value) bool { } v.reset(Op386LoweredPanicExtendB) v.AuxInt = kind - v.AddArg(hi) - v.AddArg(lo) - v.AddArg(y) - v.AddArg(mem) + v.AddArg4(hi, lo, y, mem) return true } // match: (PanicExtend [kind] hi lo y mem) @@ -15464,10 +14556,7 @@ func rewriteValue386_OpPanicExtend(v *Value) bool { } v.reset(Op386LoweredPanicExtendC) v.AuxInt = kind - v.AddArg(hi) - v.AddArg(lo) - v.AddArg(y) - v.AddArg(mem) + v.AddArg4(hi, lo, y, mem) return true } return false @@ -15542,15 +14631,13 @@ func rewriteValue386_OpRsh16Ux16(v *Value) bool { } v.reset(Op386ANDL) v0 := b.NewValue0(v.Pos, Op386SHRW, t) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) + v0.AddArg2(x, y) v1 := b.NewValue0(v.Pos, Op386SBBLcarrymask, t) v2 := b.NewValue0(v.Pos, Op386CMPWconst, types.TypeFlags) v2.AuxInt = 16 v2.AddArg(y) v1.AddArg(v2) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } // match: (Rsh16Ux16 x y) @@ -15565,8 +14652,7 @@ func rewriteValue386_OpRsh16Ux16(v *Value) bool { } v.reset(Op386SHRW) v.Type = t - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } return false @@ -15587,15 +14673,13 @@ func rewriteValue386_OpRsh16Ux32(v *Value) bool { } v.reset(Op386ANDL) v0 := b.NewValue0(v.Pos, Op386SHRW, t) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) + v0.AddArg2(x, y) v1 := b.NewValue0(v.Pos, Op386SBBLcarrymask, t) v2 := b.NewValue0(v.Pos, Op386CMPLconst, types.TypeFlags) v2.AuxInt = 16 v2.AddArg(y) v1.AddArg(v2) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } // match: (Rsh16Ux32 x y) @@ -15610,8 +14694,7 @@ func rewriteValue386_OpRsh16Ux32(v *Value) bool { } v.reset(Op386SHRW) v.Type = t - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } return false @@ -15669,15 +14752,13 @@ func rewriteValue386_OpRsh16Ux8(v *Value) bool { } v.reset(Op386ANDL) v0 := b.NewValue0(v.Pos, Op386SHRW, t) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) + v0.AddArg2(x, y) v1 := b.NewValue0(v.Pos, Op386SBBLcarrymask, t) v2 := b.NewValue0(v.Pos, Op386CMPBconst, types.TypeFlags) v2.AuxInt = 16 v2.AddArg(y) v1.AddArg(v2) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } // match: (Rsh16Ux8 x y) @@ -15692,8 +14773,7 @@ func rewriteValue386_OpRsh16Ux8(v *Value) bool { } v.reset(Op386SHRW) v.Type = t - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } return false @@ -15714,9 +14794,7 @@ func rewriteValue386_OpRsh16x16(v *Value) bool { } v.reset(Op386SARW) v.Type = t - v.AddArg(x) v0 := b.NewValue0(v.Pos, Op386ORL, y.Type) - v0.AddArg(y) v1 := b.NewValue0(v.Pos, Op386NOTL, y.Type) v2 := b.NewValue0(v.Pos, Op386SBBLcarrymask, y.Type) v3 := b.NewValue0(v.Pos, Op386CMPWconst, types.TypeFlags) @@ -15724,8 +14802,8 @@ func rewriteValue386_OpRsh16x16(v *Value) bool { v3.AddArg(y) v2.AddArg(v3) v1.AddArg(v2) - v0.AddArg(v1) - v.AddArg(v0) + v0.AddArg2(y, v1) + v.AddArg2(x, v0) return true } // match: (Rsh16x16 x y) @@ -15738,8 +14816,7 @@ func rewriteValue386_OpRsh16x16(v *Value) bool { break } v.reset(Op386SARW) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } return false @@ -15760,9 +14837,7 @@ func rewriteValue386_OpRsh16x32(v *Value) bool { } v.reset(Op386SARW) v.Type = t - v.AddArg(x) v0 := b.NewValue0(v.Pos, Op386ORL, y.Type) - v0.AddArg(y) v1 := b.NewValue0(v.Pos, Op386NOTL, y.Type) v2 := b.NewValue0(v.Pos, Op386SBBLcarrymask, y.Type) v3 := b.NewValue0(v.Pos, Op386CMPLconst, types.TypeFlags) @@ -15770,8 +14845,8 @@ func rewriteValue386_OpRsh16x32(v *Value) bool { v3.AddArg(y) v2.AddArg(v3) v1.AddArg(v2) - v0.AddArg(v1) - v.AddArg(v0) + v0.AddArg2(y, v1) + v.AddArg2(x, v0) return true } // match: (Rsh16x32 x y) @@ -15784,8 +14859,7 @@ func rewriteValue386_OpRsh16x32(v *Value) bool { break } v.reset(Op386SARW) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } return false @@ -15845,9 +14919,7 @@ func rewriteValue386_OpRsh16x8(v *Value) bool { } v.reset(Op386SARW) v.Type = t - v.AddArg(x) v0 := b.NewValue0(v.Pos, Op386ORL, y.Type) - v0.AddArg(y) v1 := b.NewValue0(v.Pos, Op386NOTL, y.Type) v2 := b.NewValue0(v.Pos, Op386SBBLcarrymask, y.Type) v3 := b.NewValue0(v.Pos, Op386CMPBconst, types.TypeFlags) @@ -15855,8 +14927,8 @@ func rewriteValue386_OpRsh16x8(v *Value) bool { v3.AddArg(y) v2.AddArg(v3) v1.AddArg(v2) - v0.AddArg(v1) - v.AddArg(v0) + v0.AddArg2(y, v1) + v.AddArg2(x, v0) return true } // match: (Rsh16x8 x y) @@ -15869,8 +14941,7 @@ func rewriteValue386_OpRsh16x8(v *Value) bool { break } v.reset(Op386SARW) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } return false @@ -15891,15 +14962,13 @@ func rewriteValue386_OpRsh32Ux16(v *Value) bool { } v.reset(Op386ANDL) v0 := b.NewValue0(v.Pos, Op386SHRL, t) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) + v0.AddArg2(x, y) v1 := b.NewValue0(v.Pos, Op386SBBLcarrymask, t) v2 := b.NewValue0(v.Pos, Op386CMPWconst, types.TypeFlags) v2.AuxInt = 32 v2.AddArg(y) v1.AddArg(v2) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } // match: (Rsh32Ux16 x y) @@ -15914,8 +14983,7 @@ func rewriteValue386_OpRsh32Ux16(v *Value) bool { } v.reset(Op386SHRL) v.Type = t - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } return false @@ -15936,15 +15004,13 @@ func rewriteValue386_OpRsh32Ux32(v *Value) bool { } v.reset(Op386ANDL) v0 := b.NewValue0(v.Pos, Op386SHRL, t) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) + v0.AddArg2(x, y) v1 := b.NewValue0(v.Pos, Op386SBBLcarrymask, t) v2 := b.NewValue0(v.Pos, Op386CMPLconst, types.TypeFlags) v2.AuxInt = 32 v2.AddArg(y) v1.AddArg(v2) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } // match: (Rsh32Ux32 x y) @@ -15959,8 +15025,7 @@ func rewriteValue386_OpRsh32Ux32(v *Value) bool { } v.reset(Op386SHRL) v.Type = t - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } return false @@ -16018,15 +15083,13 @@ func rewriteValue386_OpRsh32Ux8(v *Value) bool { } v.reset(Op386ANDL) v0 := b.NewValue0(v.Pos, Op386SHRL, t) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) + v0.AddArg2(x, y) v1 := b.NewValue0(v.Pos, Op386SBBLcarrymask, t) v2 := b.NewValue0(v.Pos, Op386CMPBconst, types.TypeFlags) v2.AuxInt = 32 v2.AddArg(y) v1.AddArg(v2) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } // match: (Rsh32Ux8 x y) @@ -16041,8 +15104,7 @@ func rewriteValue386_OpRsh32Ux8(v *Value) bool { } v.reset(Op386SHRL) v.Type = t - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } return false @@ -16063,9 +15125,7 @@ func rewriteValue386_OpRsh32x16(v *Value) bool { } v.reset(Op386SARL) v.Type = t - v.AddArg(x) v0 := b.NewValue0(v.Pos, Op386ORL, y.Type) - v0.AddArg(y) v1 := b.NewValue0(v.Pos, Op386NOTL, y.Type) v2 := b.NewValue0(v.Pos, Op386SBBLcarrymask, y.Type) v3 := b.NewValue0(v.Pos, Op386CMPWconst, types.TypeFlags) @@ -16073,8 +15133,8 @@ func rewriteValue386_OpRsh32x16(v *Value) bool { v3.AddArg(y) v2.AddArg(v3) v1.AddArg(v2) - v0.AddArg(v1) - v.AddArg(v0) + v0.AddArg2(y, v1) + v.AddArg2(x, v0) return true } // match: (Rsh32x16 x y) @@ -16087,8 +15147,7 @@ func rewriteValue386_OpRsh32x16(v *Value) bool { break } v.reset(Op386SARL) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } return false @@ -16109,9 +15168,7 @@ func rewriteValue386_OpRsh32x32(v *Value) bool { } v.reset(Op386SARL) v.Type = t - v.AddArg(x) v0 := b.NewValue0(v.Pos, Op386ORL, y.Type) - v0.AddArg(y) v1 := b.NewValue0(v.Pos, Op386NOTL, y.Type) v2 := b.NewValue0(v.Pos, Op386SBBLcarrymask, y.Type) v3 := b.NewValue0(v.Pos, Op386CMPLconst, types.TypeFlags) @@ -16119,8 +15176,8 @@ func rewriteValue386_OpRsh32x32(v *Value) bool { v3.AddArg(y) v2.AddArg(v3) v1.AddArg(v2) - v0.AddArg(v1) - v.AddArg(v0) + v0.AddArg2(y, v1) + v.AddArg2(x, v0) return true } // match: (Rsh32x32 x y) @@ -16133,8 +15190,7 @@ func rewriteValue386_OpRsh32x32(v *Value) bool { break } v.reset(Op386SARL) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } return false @@ -16194,9 +15250,7 @@ func rewriteValue386_OpRsh32x8(v *Value) bool { } v.reset(Op386SARL) v.Type = t - v.AddArg(x) v0 := b.NewValue0(v.Pos, Op386ORL, y.Type) - v0.AddArg(y) v1 := b.NewValue0(v.Pos, Op386NOTL, y.Type) v2 := b.NewValue0(v.Pos, Op386SBBLcarrymask, y.Type) v3 := b.NewValue0(v.Pos, Op386CMPBconst, types.TypeFlags) @@ -16204,8 +15258,8 @@ func rewriteValue386_OpRsh32x8(v *Value) bool { v3.AddArg(y) v2.AddArg(v3) v1.AddArg(v2) - v0.AddArg(v1) - v.AddArg(v0) + v0.AddArg2(y, v1) + v.AddArg2(x, v0) return true } // match: (Rsh32x8 x y) @@ -16218,8 +15272,7 @@ func rewriteValue386_OpRsh32x8(v *Value) bool { break } v.reset(Op386SARL) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } return false @@ -16240,15 +15293,13 @@ func rewriteValue386_OpRsh8Ux16(v *Value) bool { } v.reset(Op386ANDL) v0 := b.NewValue0(v.Pos, Op386SHRB, t) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) + v0.AddArg2(x, y) v1 := b.NewValue0(v.Pos, Op386SBBLcarrymask, t) v2 := b.NewValue0(v.Pos, Op386CMPWconst, types.TypeFlags) v2.AuxInt = 8 v2.AddArg(y) v1.AddArg(v2) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } // match: (Rsh8Ux16 x y) @@ -16263,8 +15314,7 @@ func rewriteValue386_OpRsh8Ux16(v *Value) bool { } v.reset(Op386SHRB) v.Type = t - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } return false @@ -16285,15 +15335,13 @@ func rewriteValue386_OpRsh8Ux32(v *Value) bool { } v.reset(Op386ANDL) v0 := b.NewValue0(v.Pos, Op386SHRB, t) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) + v0.AddArg2(x, y) v1 := b.NewValue0(v.Pos, Op386SBBLcarrymask, t) v2 := b.NewValue0(v.Pos, Op386CMPLconst, types.TypeFlags) v2.AuxInt = 8 v2.AddArg(y) v1.AddArg(v2) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } // match: (Rsh8Ux32 x y) @@ -16308,8 +15356,7 @@ func rewriteValue386_OpRsh8Ux32(v *Value) bool { } v.reset(Op386SHRB) v.Type = t - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } return false @@ -16367,15 +15414,13 @@ func rewriteValue386_OpRsh8Ux8(v *Value) bool { } v.reset(Op386ANDL) v0 := b.NewValue0(v.Pos, Op386SHRB, t) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) + v0.AddArg2(x, y) v1 := b.NewValue0(v.Pos, Op386SBBLcarrymask, t) v2 := b.NewValue0(v.Pos, Op386CMPBconst, types.TypeFlags) v2.AuxInt = 8 v2.AddArg(y) v1.AddArg(v2) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } // match: (Rsh8Ux8 x y) @@ -16390,8 +15435,7 @@ func rewriteValue386_OpRsh8Ux8(v *Value) bool { } v.reset(Op386SHRB) v.Type = t - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } return false @@ -16412,9 +15456,7 @@ func rewriteValue386_OpRsh8x16(v *Value) bool { } v.reset(Op386SARB) v.Type = t - v.AddArg(x) v0 := b.NewValue0(v.Pos, Op386ORL, y.Type) - v0.AddArg(y) v1 := b.NewValue0(v.Pos, Op386NOTL, y.Type) v2 := b.NewValue0(v.Pos, Op386SBBLcarrymask, y.Type) v3 := b.NewValue0(v.Pos, Op386CMPWconst, types.TypeFlags) @@ -16422,8 +15464,8 @@ func rewriteValue386_OpRsh8x16(v *Value) bool { v3.AddArg(y) v2.AddArg(v3) v1.AddArg(v2) - v0.AddArg(v1) - v.AddArg(v0) + v0.AddArg2(y, v1) + v.AddArg2(x, v0) return true } // match: (Rsh8x16 x y) @@ -16436,8 +15478,7 @@ func rewriteValue386_OpRsh8x16(v *Value) bool { break } v.reset(Op386SARB) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } return false @@ -16458,9 +15499,7 @@ func rewriteValue386_OpRsh8x32(v *Value) bool { } v.reset(Op386SARB) v.Type = t - v.AddArg(x) v0 := b.NewValue0(v.Pos, Op386ORL, y.Type) - v0.AddArg(y) v1 := b.NewValue0(v.Pos, Op386NOTL, y.Type) v2 := b.NewValue0(v.Pos, Op386SBBLcarrymask, y.Type) v3 := b.NewValue0(v.Pos, Op386CMPLconst, types.TypeFlags) @@ -16468,8 +15507,8 @@ func rewriteValue386_OpRsh8x32(v *Value) bool { v3.AddArg(y) v2.AddArg(v3) v1.AddArg(v2) - v0.AddArg(v1) - v.AddArg(v0) + v0.AddArg2(y, v1) + v.AddArg2(x, v0) return true } // match: (Rsh8x32 x y) @@ -16482,8 +15521,7 @@ func rewriteValue386_OpRsh8x32(v *Value) bool { break } v.reset(Op386SARB) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } return false @@ -16543,9 +15581,7 @@ func rewriteValue386_OpRsh8x8(v *Value) bool { } v.reset(Op386SARB) v.Type = t - v.AddArg(x) v0 := b.NewValue0(v.Pos, Op386ORL, y.Type) - v0.AddArg(y) v1 := b.NewValue0(v.Pos, Op386NOTL, y.Type) v2 := b.NewValue0(v.Pos, Op386SBBLcarrymask, y.Type) v3 := b.NewValue0(v.Pos, Op386CMPBconst, types.TypeFlags) @@ -16553,8 +15589,8 @@ func rewriteValue386_OpRsh8x8(v *Value) bool { v3.AddArg(y) v2.AddArg(v3) v1.AddArg(v2) - v0.AddArg(v1) - v.AddArg(v0) + v0.AddArg2(y, v1) + v.AddArg2(x, v0) return true } // match: (Rsh8x8 x y) @@ -16567,8 +15603,7 @@ func rewriteValue386_OpRsh8x8(v *Value) bool { break } v.reset(Op386SARB) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } return false @@ -16588,8 +15623,7 @@ func rewriteValue386_OpSelect0(v *Value) bool { v.reset(OpSelect0) v.Type = typ.UInt32 v0 := b.NewValue0(v.Pos, Op386MULLU, types.NewTuple(typ.UInt32, types.TypeFlags)) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -16610,8 +15644,7 @@ func rewriteValue386_OpSelect1(v *Value) bool { v.reset(Op386SETO) v0 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) v1 := b.NewValue0(v.Pos, Op386MULLU, types.NewTuple(typ.UInt32, types.TypeFlags)) - v1.AddArg(x) - v1.AddArg(y) + v1.AddArg2(x, y) v0.AddArg(v1) v.AddArg(v0) return true @@ -16662,9 +15695,7 @@ func rewriteValue386_OpStore(v *Value) bool { break } v.reset(Op386MOVSDstore) - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } // match: (Store {t} ptr val mem) @@ -16679,9 +15710,7 @@ func rewriteValue386_OpStore(v *Value) bool { break } v.reset(Op386MOVSSstore) - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } // match: (Store {t} ptr val mem) @@ -16696,9 +15725,7 @@ func rewriteValue386_OpStore(v *Value) bool { break } v.reset(Op386MOVLstore) - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } // match: (Store {t} ptr val mem) @@ -16713,9 +15740,7 @@ func rewriteValue386_OpStore(v *Value) bool { break } v.reset(Op386MOVWstore) - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } // match: (Store {t} ptr val mem) @@ -16730,9 +15755,7 @@ func rewriteValue386_OpStore(v *Value) bool { break } v.reset(Op386MOVBstore) - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } return false @@ -16765,8 +15788,7 @@ func rewriteValue386_OpZero(v *Value) bool { mem := v_1 v.reset(Op386MOVBstoreconst) v.AuxInt = 0 - v.AddArg(destptr) - v.AddArg(mem) + v.AddArg2(destptr, mem) return true } // match: (Zero [2] destptr mem) @@ -16779,8 +15801,7 @@ func rewriteValue386_OpZero(v *Value) bool { mem := v_1 v.reset(Op386MOVWstoreconst) v.AuxInt = 0 - v.AddArg(destptr) - v.AddArg(mem) + v.AddArg2(destptr, mem) return true } // match: (Zero [4] destptr mem) @@ -16793,8 +15814,7 @@ func rewriteValue386_OpZero(v *Value) bool { mem := v_1 v.reset(Op386MOVLstoreconst) v.AuxInt = 0 - v.AddArg(destptr) - v.AddArg(mem) + v.AddArg2(destptr, mem) return true } // match: (Zero [3] destptr mem) @@ -16807,12 +15827,10 @@ func rewriteValue386_OpZero(v *Value) bool { mem := v_1 v.reset(Op386MOVBstoreconst) v.AuxInt = makeValAndOff(0, 2) - v.AddArg(destptr) v0 := b.NewValue0(v.Pos, Op386MOVWstoreconst, types.TypeMem) v0.AuxInt = 0 - v0.AddArg(destptr) - v0.AddArg(mem) - v.AddArg(v0) + v0.AddArg2(destptr, mem) + v.AddArg2(destptr, v0) return true } // match: (Zero [5] destptr mem) @@ -16825,12 +15843,10 @@ func rewriteValue386_OpZero(v *Value) bool { mem := v_1 v.reset(Op386MOVBstoreconst) v.AuxInt = makeValAndOff(0, 4) - v.AddArg(destptr) v0 := b.NewValue0(v.Pos, Op386MOVLstoreconst, types.TypeMem) v0.AuxInt = 0 - v0.AddArg(destptr) - v0.AddArg(mem) - v.AddArg(v0) + v0.AddArg2(destptr, mem) + v.AddArg2(destptr, v0) return true } // match: (Zero [6] destptr mem) @@ -16843,12 +15859,10 @@ func rewriteValue386_OpZero(v *Value) bool { mem := v_1 v.reset(Op386MOVWstoreconst) v.AuxInt = makeValAndOff(0, 4) - v.AddArg(destptr) v0 := b.NewValue0(v.Pos, Op386MOVLstoreconst, types.TypeMem) v0.AuxInt = 0 - v0.AddArg(destptr) - v0.AddArg(mem) - v.AddArg(v0) + v0.AddArg2(destptr, mem) + v.AddArg2(destptr, v0) return true } // match: (Zero [7] destptr mem) @@ -16861,12 +15875,10 @@ func rewriteValue386_OpZero(v *Value) bool { mem := v_1 v.reset(Op386MOVLstoreconst) v.AuxInt = makeValAndOff(0, 3) - v.AddArg(destptr) v0 := b.NewValue0(v.Pos, Op386MOVLstoreconst, types.TypeMem) v0.AuxInt = 0 - v0.AddArg(destptr) - v0.AddArg(mem) - v.AddArg(v0) + v0.AddArg2(destptr, mem) + v.AddArg2(destptr, v0) return true } // match: (Zero [s] destptr mem) @@ -16884,12 +15896,10 @@ func rewriteValue386_OpZero(v *Value) bool { v0 := b.NewValue0(v.Pos, Op386ADDLconst, typ.UInt32) v0.AuxInt = s % 4 v0.AddArg(destptr) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, Op386MOVLstoreconst, types.TypeMem) v1.AuxInt = 0 - v1.AddArg(destptr) - v1.AddArg(mem) - v.AddArg(v1) + v1.AddArg2(destptr, mem) + v.AddArg2(v0, v1) return true } // match: (Zero [8] destptr mem) @@ -16902,12 +15912,10 @@ func rewriteValue386_OpZero(v *Value) bool { mem := v_1 v.reset(Op386MOVLstoreconst) v.AuxInt = makeValAndOff(0, 4) - v.AddArg(destptr) v0 := b.NewValue0(v.Pos, Op386MOVLstoreconst, types.TypeMem) v0.AuxInt = 0 - v0.AddArg(destptr) - v0.AddArg(mem) - v.AddArg(v0) + v0.AddArg2(destptr, mem) + v.AddArg2(destptr, v0) return true } // match: (Zero [12] destptr mem) @@ -16920,16 +15928,13 @@ func rewriteValue386_OpZero(v *Value) bool { mem := v_1 v.reset(Op386MOVLstoreconst) v.AuxInt = makeValAndOff(0, 8) - v.AddArg(destptr) v0 := b.NewValue0(v.Pos, Op386MOVLstoreconst, types.TypeMem) v0.AuxInt = makeValAndOff(0, 4) - v0.AddArg(destptr) v1 := b.NewValue0(v.Pos, Op386MOVLstoreconst, types.TypeMem) v1.AuxInt = 0 - v1.AddArg(destptr) - v1.AddArg(mem) - v0.AddArg(v1) - v.AddArg(v0) + v1.AddArg2(destptr, mem) + v0.AddArg2(destptr, v1) + v.AddArg2(destptr, v0) return true } // match: (Zero [16] destptr mem) @@ -16942,20 +15947,16 @@ func rewriteValue386_OpZero(v *Value) bool { mem := v_1 v.reset(Op386MOVLstoreconst) v.AuxInt = makeValAndOff(0, 12) - v.AddArg(destptr) v0 := b.NewValue0(v.Pos, Op386MOVLstoreconst, types.TypeMem) v0.AuxInt = makeValAndOff(0, 8) - v0.AddArg(destptr) v1 := b.NewValue0(v.Pos, Op386MOVLstoreconst, types.TypeMem) v1.AuxInt = makeValAndOff(0, 4) - v1.AddArg(destptr) v2 := b.NewValue0(v.Pos, Op386MOVLstoreconst, types.TypeMem) v2.AuxInt = 0 - v2.AddArg(destptr) - v2.AddArg(mem) - v1.AddArg(v2) - v0.AddArg(v1) - v.AddArg(v0) + v2.AddArg2(destptr, mem) + v1.AddArg2(destptr, v2) + v0.AddArg2(destptr, v1) + v.AddArg2(destptr, v0) return true } // match: (Zero [s] destptr mem) @@ -16970,11 +15971,9 @@ func rewriteValue386_OpZero(v *Value) bool { } v.reset(Op386DUFFZERO) v.AuxInt = 1 * (128 - s/4) - v.AddArg(destptr) v0 := b.NewValue0(v.Pos, Op386MOVLconst, typ.UInt32) v0.AuxInt = 0 - v.AddArg(v0) - v.AddArg(mem) + v.AddArg3(destptr, v0, mem) return true } // match: (Zero [s] destptr mem) @@ -16988,14 +15987,11 @@ func rewriteValue386_OpZero(v *Value) bool { break } v.reset(Op386REPSTOSL) - v.AddArg(destptr) v0 := b.NewValue0(v.Pos, Op386MOVLconst, typ.UInt32) v0.AuxInt = s / 4 - v.AddArg(v0) v1 := b.NewValue0(v.Pos, Op386MOVLconst, typ.UInt32) v1.AuxInt = 0 - v.AddArg(v1) - v.AddArg(mem) + v.AddArg4(destptr, v0, v1, mem) return true } return false @@ -17292,8 +16288,7 @@ func rewriteBlock386(b *Block) bool { cond := b.Controls[0] b.Reset(Block386NE) v0 := b.NewValue0(cond.Pos, Op386TESTB, types.TypeFlags) - v0.AddArg(cond) - v0.AddArg(cond) + v0.AddArg2(cond, cond) b.AddControl(v0) return true } diff --git a/src/cmd/compile/internal/ssa/rewrite386splitload.go b/src/cmd/compile/internal/ssa/rewrite386splitload.go index cce1b2d05a..f82eae99ab 100644 --- a/src/cmd/compile/internal/ssa/rewrite386splitload.go +++ b/src/cmd/compile/internal/ssa/rewrite386splitload.go @@ -37,8 +37,7 @@ func rewriteValue386splitload_Op386CMPBconstload(v *Value) bool { v0 := b.NewValue0(v.Pos, Op386MOVBload, typ.UInt8) v0.AuxInt = offOnly(vo) v0.Aux = sym - v0.AddArg(ptr) - v0.AddArg(mem) + v0.AddArg2(ptr, mem) v.AddArg(v0) return true } @@ -61,10 +60,8 @@ func rewriteValue386splitload_Op386CMPBload(v *Value) bool { v0 := b.NewValue0(v.Pos, Op386MOVBload, typ.UInt8) v0.AuxInt = off v0.Aux = sym - v0.AddArg(ptr) - v0.AddArg(mem) - v.AddArg(v0) - v.AddArg(x) + v0.AddArg2(ptr, mem) + v.AddArg2(v0, x) return true } } @@ -85,8 +82,7 @@ func rewriteValue386splitload_Op386CMPLconstload(v *Value) bool { v0 := b.NewValue0(v.Pos, Op386MOVLload, typ.UInt32) v0.AuxInt = offOnly(vo) v0.Aux = sym - v0.AddArg(ptr) - v0.AddArg(mem) + v0.AddArg2(ptr, mem) v.AddArg(v0) return true } @@ -109,10 +105,8 @@ func rewriteValue386splitload_Op386CMPLload(v *Value) bool { v0 := b.NewValue0(v.Pos, Op386MOVLload, typ.UInt32) v0.AuxInt = off v0.Aux = sym - v0.AddArg(ptr) - v0.AddArg(mem) - v.AddArg(v0) - v.AddArg(x) + v0.AddArg2(ptr, mem) + v.AddArg2(v0, x) return true } } @@ -133,8 +127,7 @@ func rewriteValue386splitload_Op386CMPWconstload(v *Value) bool { v0 := b.NewValue0(v.Pos, Op386MOVWload, typ.UInt16) v0.AuxInt = offOnly(vo) v0.Aux = sym - v0.AddArg(ptr) - v0.AddArg(mem) + v0.AddArg2(ptr, mem) v.AddArg(v0) return true } @@ -157,10 +150,8 @@ func rewriteValue386splitload_Op386CMPWload(v *Value) bool { v0 := b.NewValue0(v.Pos, Op386MOVWload, typ.UInt16) v0.AuxInt = off v0.Aux = sym - v0.AddArg(ptr) - v0.AddArg(mem) - v.AddArg(v0) - v.AddArg(x) + v0.AddArg2(ptr, mem) + v.AddArg2(v0, x) return true } } diff --git a/src/cmd/compile/internal/ssa/rewriteAMD64.go b/src/cmd/compile/internal/ssa/rewriteAMD64.go index c0329c1528..566a7aaf66 100644 --- a/src/cmd/compile/internal/ssa/rewriteAMD64.go +++ b/src/cmd/compile/internal/ssa/rewriteAMD64.go @@ -1265,8 +1265,7 @@ func rewriteValueAMD64_OpAMD64ADCQ(v *Value) bool { } v.reset(OpAMD64ADCQconst) v.AuxInt = c - v.AddArg(x) - v.AddArg(carry) + v.AddArg2(x, carry) return true } break @@ -1280,8 +1279,7 @@ func rewriteValueAMD64_OpAMD64ADCQ(v *Value) bool { break } v.reset(OpAMD64ADDQcarry) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } return false @@ -1407,8 +1405,7 @@ func rewriteValueAMD64_OpAMD64ADDL(v *Value) bool { } y := v_1.Args[0] v.reset(OpAMD64LEAL8) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } break @@ -1423,8 +1420,7 @@ func rewriteValueAMD64_OpAMD64ADDL(v *Value) bool { } y := v_1.Args[0] v.reset(OpAMD64LEAL4) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } break @@ -1439,8 +1435,7 @@ func rewriteValueAMD64_OpAMD64ADDL(v *Value) bool { } y := v_1.Args[0] v.reset(OpAMD64LEAL2) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } break @@ -1458,8 +1453,7 @@ func rewriteValueAMD64_OpAMD64ADDL(v *Value) bool { continue } v.reset(OpAMD64LEAL2) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } break @@ -1481,8 +1475,7 @@ func rewriteValueAMD64_OpAMD64ADDL(v *Value) bool { } y := v_1_1 v.reset(OpAMD64LEAL2) - v.AddArg(y) - v.AddArg(x) + v.AddArg2(y, x) return true } } @@ -1500,8 +1493,7 @@ func rewriteValueAMD64_OpAMD64ADDL(v *Value) bool { y := v_1 v.reset(OpAMD64LEAL1) v.AuxInt = c - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } break @@ -1524,8 +1516,7 @@ func rewriteValueAMD64_OpAMD64ADDL(v *Value) bool { v.reset(OpAMD64LEAL1) v.AuxInt = c v.Aux = s - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } break @@ -1540,8 +1531,7 @@ func rewriteValueAMD64_OpAMD64ADDL(v *Value) bool { } y := v_1.Args[0] v.reset(OpAMD64SUBL) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } break @@ -1566,9 +1556,7 @@ func rewriteValueAMD64_OpAMD64ADDL(v *Value) bool { v.reset(OpAMD64ADDLload) v.AuxInt = off v.Aux = sym - v.AddArg(x) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg3(x, ptr, mem) return true } break @@ -1588,8 +1576,7 @@ func rewriteValueAMD64_OpAMD64ADDLconst(v *Value) bool { x := v_0.Args[0] v.reset(OpAMD64LEAL1) v.AuxInt = c - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (ADDLconst [c] (SHLLconst [1] x)) @@ -1602,8 +1589,7 @@ func rewriteValueAMD64_OpAMD64ADDLconst(v *Value) bool { x := v_0.Args[0] v.reset(OpAMD64LEAL1) v.AuxInt = c - v.AddArg(x) - v.AddArg(x) + v.AddArg2(x, x) return true } // match: (ADDLconst [c] (LEAL [d] {s} x)) @@ -1644,8 +1630,7 @@ func rewriteValueAMD64_OpAMD64ADDLconst(v *Value) bool { v.reset(OpAMD64LEAL1) v.AuxInt = c + d v.Aux = s - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (ADDLconst [c] (LEAL2 [d] {s} x y)) @@ -1666,8 +1651,7 @@ func rewriteValueAMD64_OpAMD64ADDLconst(v *Value) bool { v.reset(OpAMD64LEAL2) v.AuxInt = c + d v.Aux = s - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (ADDLconst [c] (LEAL4 [d] {s} x y)) @@ -1688,8 +1672,7 @@ func rewriteValueAMD64_OpAMD64ADDLconst(v *Value) bool { v.reset(OpAMD64LEAL4) v.AuxInt = c + d v.Aux = s - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (ADDLconst [c] (LEAL8 [d] {s} x y)) @@ -1710,8 +1693,7 @@ func rewriteValueAMD64_OpAMD64ADDLconst(v *Value) bool { v.reset(OpAMD64LEAL8) v.AuxInt = c + d v.Aux = s - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (ADDLconst [c] x) @@ -1790,8 +1772,7 @@ func rewriteValueAMD64_OpAMD64ADDLconstmodify(v *Value) bool { v.reset(OpAMD64ADDLconstmodify) v.AuxInt = ValAndOff(valoff1).add(off2) v.Aux = sym - v.AddArg(base) - v.AddArg(mem) + v.AddArg2(base, mem) return true } // match: (ADDLconstmodify [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem) @@ -1813,8 +1794,7 @@ func rewriteValueAMD64_OpAMD64ADDLconstmodify(v *Value) bool { v.reset(OpAMD64ADDLconstmodify) v.AuxInt = ValAndOff(valoff1).add(off2) v.Aux = mergeSym(sym1, sym2) - v.AddArg(base) - v.AddArg(mem) + v.AddArg2(base, mem) return true } return false @@ -1844,9 +1824,7 @@ func rewriteValueAMD64_OpAMD64ADDLload(v *Value) bool { v.reset(OpAMD64ADDLload) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(val) - v.AddArg(base) - v.AddArg(mem) + v.AddArg3(val, base, mem) return true } // match: (ADDLload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem) @@ -1869,9 +1847,7 @@ func rewriteValueAMD64_OpAMD64ADDLload(v *Value) bool { v.reset(OpAMD64ADDLload) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(val) - v.AddArg(base) - v.AddArg(mem) + v.AddArg3(val, base, mem) return true } // match: (ADDLload x [off] {sym} ptr (MOVSSstore [off] {sym} ptr y _)) @@ -1890,10 +1866,9 @@ func rewriteValueAMD64_OpAMD64ADDLload(v *Value) bool { } y := v_2.Args[1] v.reset(OpAMD64ADDL) - v.AddArg(x) v0 := b.NewValue0(v_2.Pos, OpAMD64MOVLf2i, typ.UInt32) v0.AddArg(y) - v.AddArg(v0) + v.AddArg2(x, v0) return true } return false @@ -1921,9 +1896,7 @@ func rewriteValueAMD64_OpAMD64ADDLmodify(v *Value) bool { v.reset(OpAMD64ADDLmodify) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(base) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(base, val, mem) return true } // match: (ADDLmodify [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) @@ -1946,9 +1919,7 @@ func rewriteValueAMD64_OpAMD64ADDLmodify(v *Value) bool { v.reset(OpAMD64ADDLmodify) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(base) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(base, val, mem) return true } return false @@ -2030,8 +2001,7 @@ func rewriteValueAMD64_OpAMD64ADDQ(v *Value) bool { } y := v_1.Args[0] v.reset(OpAMD64LEAQ8) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } break @@ -2046,8 +2016,7 @@ func rewriteValueAMD64_OpAMD64ADDQ(v *Value) bool { } y := v_1.Args[0] v.reset(OpAMD64LEAQ4) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } break @@ -2062,8 +2031,7 @@ func rewriteValueAMD64_OpAMD64ADDQ(v *Value) bool { } y := v_1.Args[0] v.reset(OpAMD64LEAQ2) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } break @@ -2081,8 +2049,7 @@ func rewriteValueAMD64_OpAMD64ADDQ(v *Value) bool { continue } v.reset(OpAMD64LEAQ2) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } break @@ -2104,8 +2071,7 @@ func rewriteValueAMD64_OpAMD64ADDQ(v *Value) bool { } y := v_1_1 v.reset(OpAMD64LEAQ2) - v.AddArg(y) - v.AddArg(x) + v.AddArg2(y, x) return true } } @@ -2123,8 +2089,7 @@ func rewriteValueAMD64_OpAMD64ADDQ(v *Value) bool { y := v_1 v.reset(OpAMD64LEAQ1) v.AuxInt = c - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } break @@ -2147,8 +2112,7 @@ func rewriteValueAMD64_OpAMD64ADDQ(v *Value) bool { v.reset(OpAMD64LEAQ1) v.AuxInt = c v.Aux = s - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } break @@ -2163,8 +2127,7 @@ func rewriteValueAMD64_OpAMD64ADDQ(v *Value) bool { } y := v_1.Args[0] v.reset(OpAMD64SUBQ) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } break @@ -2189,9 +2152,7 @@ func rewriteValueAMD64_OpAMD64ADDQ(v *Value) bool { v.reset(OpAMD64ADDQload) v.AuxInt = off v.Aux = sym - v.AddArg(x) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg3(x, ptr, mem) return true } break @@ -2236,8 +2197,7 @@ func rewriteValueAMD64_OpAMD64ADDQconst(v *Value) bool { x := v_0.Args[0] v.reset(OpAMD64LEAQ1) v.AuxInt = c - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (ADDQconst [c] (SHLQconst [1] x)) @@ -2250,8 +2210,7 @@ func rewriteValueAMD64_OpAMD64ADDQconst(v *Value) bool { x := v_0.Args[0] v.reset(OpAMD64LEAQ1) v.AuxInt = c - v.AddArg(x) - v.AddArg(x) + v.AddArg2(x, x) return true } // match: (ADDQconst [c] (LEAQ [d] {s} x)) @@ -2292,8 +2251,7 @@ func rewriteValueAMD64_OpAMD64ADDQconst(v *Value) bool { v.reset(OpAMD64LEAQ1) v.AuxInt = c + d v.Aux = s - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (ADDQconst [c] (LEAQ2 [d] {s} x y)) @@ -2314,8 +2272,7 @@ func rewriteValueAMD64_OpAMD64ADDQconst(v *Value) bool { v.reset(OpAMD64LEAQ2) v.AuxInt = c + d v.Aux = s - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (ADDQconst [c] (LEAQ4 [d] {s} x y)) @@ -2336,8 +2293,7 @@ func rewriteValueAMD64_OpAMD64ADDQconst(v *Value) bool { v.reset(OpAMD64LEAQ4) v.AuxInt = c + d v.Aux = s - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (ADDQconst [c] (LEAQ8 [d] {s} x y)) @@ -2358,8 +2314,7 @@ func rewriteValueAMD64_OpAMD64ADDQconst(v *Value) bool { v.reset(OpAMD64LEAQ8) v.AuxInt = c + d v.Aux = s - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (ADDQconst [0] x) @@ -2440,8 +2395,7 @@ func rewriteValueAMD64_OpAMD64ADDQconstmodify(v *Value) bool { v.reset(OpAMD64ADDQconstmodify) v.AuxInt = ValAndOff(valoff1).add(off2) v.Aux = sym - v.AddArg(base) - v.AddArg(mem) + v.AddArg2(base, mem) return true } // match: (ADDQconstmodify [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem) @@ -2463,8 +2417,7 @@ func rewriteValueAMD64_OpAMD64ADDQconstmodify(v *Value) bool { v.reset(OpAMD64ADDQconstmodify) v.AuxInt = ValAndOff(valoff1).add(off2) v.Aux = mergeSym(sym1, sym2) - v.AddArg(base) - v.AddArg(mem) + v.AddArg2(base, mem) return true } return false @@ -2494,9 +2447,7 @@ func rewriteValueAMD64_OpAMD64ADDQload(v *Value) bool { v.reset(OpAMD64ADDQload) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(val) - v.AddArg(base) - v.AddArg(mem) + v.AddArg3(val, base, mem) return true } // match: (ADDQload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem) @@ -2519,9 +2470,7 @@ func rewriteValueAMD64_OpAMD64ADDQload(v *Value) bool { v.reset(OpAMD64ADDQload) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(val) - v.AddArg(base) - v.AddArg(mem) + v.AddArg3(val, base, mem) return true } // match: (ADDQload x [off] {sym} ptr (MOVSDstore [off] {sym} ptr y _)) @@ -2540,10 +2489,9 @@ func rewriteValueAMD64_OpAMD64ADDQload(v *Value) bool { } y := v_2.Args[1] v.reset(OpAMD64ADDQ) - v.AddArg(x) v0 := b.NewValue0(v_2.Pos, OpAMD64MOVQf2i, typ.UInt64) v0.AddArg(y) - v.AddArg(v0) + v.AddArg2(x, v0) return true } return false @@ -2571,9 +2519,7 @@ func rewriteValueAMD64_OpAMD64ADDQmodify(v *Value) bool { v.reset(OpAMD64ADDQmodify) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(base) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(base, val, mem) return true } // match: (ADDQmodify [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) @@ -2596,9 +2542,7 @@ func rewriteValueAMD64_OpAMD64ADDQmodify(v *Value) bool { v.reset(OpAMD64ADDQmodify) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(base) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(base, val, mem) return true } return false @@ -2626,9 +2570,7 @@ func rewriteValueAMD64_OpAMD64ADDSD(v *Value) bool { v.reset(OpAMD64ADDSDload) v.AuxInt = off v.Aux = sym - v.AddArg(x) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg3(x, ptr, mem) return true } break @@ -2660,9 +2602,7 @@ func rewriteValueAMD64_OpAMD64ADDSDload(v *Value) bool { v.reset(OpAMD64ADDSDload) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(val) - v.AddArg(base) - v.AddArg(mem) + v.AddArg3(val, base, mem) return true } // match: (ADDSDload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem) @@ -2685,9 +2625,7 @@ func rewriteValueAMD64_OpAMD64ADDSDload(v *Value) bool { v.reset(OpAMD64ADDSDload) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(val) - v.AddArg(base) - v.AddArg(mem) + v.AddArg3(val, base, mem) return true } // match: (ADDSDload x [off] {sym} ptr (MOVQstore [off] {sym} ptr y _)) @@ -2706,10 +2644,9 @@ func rewriteValueAMD64_OpAMD64ADDSDload(v *Value) bool { } y := v_2.Args[1] v.reset(OpAMD64ADDSD) - v.AddArg(x) v0 := b.NewValue0(v_2.Pos, OpAMD64MOVQi2f, typ.Float64) v0.AddArg(y) - v.AddArg(v0) + v.AddArg2(x, v0) return true } return false @@ -2737,9 +2674,7 @@ func rewriteValueAMD64_OpAMD64ADDSS(v *Value) bool { v.reset(OpAMD64ADDSSload) v.AuxInt = off v.Aux = sym - v.AddArg(x) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg3(x, ptr, mem) return true } break @@ -2771,9 +2706,7 @@ func rewriteValueAMD64_OpAMD64ADDSSload(v *Value) bool { v.reset(OpAMD64ADDSSload) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(val) - v.AddArg(base) - v.AddArg(mem) + v.AddArg3(val, base, mem) return true } // match: (ADDSSload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem) @@ -2796,9 +2729,7 @@ func rewriteValueAMD64_OpAMD64ADDSSload(v *Value) bool { v.reset(OpAMD64ADDSSload) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(val) - v.AddArg(base) - v.AddArg(mem) + v.AddArg3(val, base, mem) return true } // match: (ADDSSload x [off] {sym} ptr (MOVLstore [off] {sym} ptr y _)) @@ -2817,10 +2748,9 @@ func rewriteValueAMD64_OpAMD64ADDSSload(v *Value) bool { } y := v_2.Args[1] v.reset(OpAMD64ADDSS) - v.AddArg(x) v0 := b.NewValue0(v_2.Pos, OpAMD64MOVLi2f, typ.Float32) v0.AddArg(y) - v.AddArg(v0) + v.AddArg2(x, v0) return true } return false @@ -2846,8 +2776,7 @@ func rewriteValueAMD64_OpAMD64ANDL(v *Value) bool { } x := v_1 v.reset(OpAMD64BTRL) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } break @@ -2920,9 +2849,7 @@ func rewriteValueAMD64_OpAMD64ANDL(v *Value) bool { v.reset(OpAMD64ANDLload) v.AuxInt = off v.Aux = sym - v.AddArg(x) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg3(x, ptr, mem) return true } break @@ -3056,8 +2983,7 @@ func rewriteValueAMD64_OpAMD64ANDLconstmodify(v *Value) bool { v.reset(OpAMD64ANDLconstmodify) v.AuxInt = ValAndOff(valoff1).add(off2) v.Aux = sym - v.AddArg(base) - v.AddArg(mem) + v.AddArg2(base, mem) return true } // match: (ANDLconstmodify [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem) @@ -3079,8 +3005,7 @@ func rewriteValueAMD64_OpAMD64ANDLconstmodify(v *Value) bool { v.reset(OpAMD64ANDLconstmodify) v.AuxInt = ValAndOff(valoff1).add(off2) v.Aux = mergeSym(sym1, sym2) - v.AddArg(base) - v.AddArg(mem) + v.AddArg2(base, mem) return true } return false @@ -3110,9 +3035,7 @@ func rewriteValueAMD64_OpAMD64ANDLload(v *Value) bool { v.reset(OpAMD64ANDLload) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(val) - v.AddArg(base) - v.AddArg(mem) + v.AddArg3(val, base, mem) return true } // match: (ANDLload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem) @@ -3135,9 +3058,7 @@ func rewriteValueAMD64_OpAMD64ANDLload(v *Value) bool { v.reset(OpAMD64ANDLload) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(val) - v.AddArg(base) - v.AddArg(mem) + v.AddArg3(val, base, mem) return true } // match: (ANDLload x [off] {sym} ptr (MOVSSstore [off] {sym} ptr y _)) @@ -3156,10 +3077,9 @@ func rewriteValueAMD64_OpAMD64ANDLload(v *Value) bool { } y := v_2.Args[1] v.reset(OpAMD64ANDL) - v.AddArg(x) v0 := b.NewValue0(v_2.Pos, OpAMD64MOVLf2i, typ.UInt32) v0.AddArg(y) - v.AddArg(v0) + v.AddArg2(x, v0) return true } return false @@ -3187,9 +3107,7 @@ func rewriteValueAMD64_OpAMD64ANDLmodify(v *Value) bool { v.reset(OpAMD64ANDLmodify) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(base) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(base, val, mem) return true } // match: (ANDLmodify [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) @@ -3212,9 +3130,7 @@ func rewriteValueAMD64_OpAMD64ANDLmodify(v *Value) bool { v.reset(OpAMD64ANDLmodify) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(base) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(base, val, mem) return true } return false @@ -3240,8 +3156,7 @@ func rewriteValueAMD64_OpAMD64ANDQ(v *Value) bool { } x := v_1 v.reset(OpAMD64BTRQ) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } break @@ -3318,9 +3233,7 @@ func rewriteValueAMD64_OpAMD64ANDQ(v *Value) bool { v.reset(OpAMD64ANDQload) v.AuxInt = off v.Aux = sym - v.AddArg(x) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg3(x, ptr, mem) return true } break @@ -3461,8 +3374,7 @@ func rewriteValueAMD64_OpAMD64ANDQconstmodify(v *Value) bool { v.reset(OpAMD64ANDQconstmodify) v.AuxInt = ValAndOff(valoff1).add(off2) v.Aux = sym - v.AddArg(base) - v.AddArg(mem) + v.AddArg2(base, mem) return true } // match: (ANDQconstmodify [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem) @@ -3484,8 +3396,7 @@ func rewriteValueAMD64_OpAMD64ANDQconstmodify(v *Value) bool { v.reset(OpAMD64ANDQconstmodify) v.AuxInt = ValAndOff(valoff1).add(off2) v.Aux = mergeSym(sym1, sym2) - v.AddArg(base) - v.AddArg(mem) + v.AddArg2(base, mem) return true } return false @@ -3515,9 +3426,7 @@ func rewriteValueAMD64_OpAMD64ANDQload(v *Value) bool { v.reset(OpAMD64ANDQload) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(val) - v.AddArg(base) - v.AddArg(mem) + v.AddArg3(val, base, mem) return true } // match: (ANDQload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem) @@ -3540,9 +3449,7 @@ func rewriteValueAMD64_OpAMD64ANDQload(v *Value) bool { v.reset(OpAMD64ANDQload) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(val) - v.AddArg(base) - v.AddArg(mem) + v.AddArg3(val, base, mem) return true } // match: (ANDQload x [off] {sym} ptr (MOVSDstore [off] {sym} ptr y _)) @@ -3561,10 +3468,9 @@ func rewriteValueAMD64_OpAMD64ANDQload(v *Value) bool { } y := v_2.Args[1] v.reset(OpAMD64ANDQ) - v.AddArg(x) v0 := b.NewValue0(v_2.Pos, OpAMD64MOVQf2i, typ.UInt64) v0.AddArg(y) - v.AddArg(v0) + v.AddArg2(x, v0) return true } return false @@ -3592,9 +3498,7 @@ func rewriteValueAMD64_OpAMD64ANDQmodify(v *Value) bool { v.reset(OpAMD64ANDQmodify) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(base) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(base, val, mem) return true } // match: (ANDQmodify [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) @@ -3617,9 +3521,7 @@ func rewriteValueAMD64_OpAMD64ANDQmodify(v *Value) bool { v.reset(OpAMD64ANDQmodify) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(base) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(base, val, mem) return true } return false @@ -3738,8 +3640,7 @@ func rewriteValueAMD64_OpAMD64BTCLconstmodify(v *Value) bool { v.reset(OpAMD64BTCLconstmodify) v.AuxInt = ValAndOff(valoff1).add(off2) v.Aux = sym - v.AddArg(base) - v.AddArg(mem) + v.AddArg2(base, mem) return true } // match: (BTCLconstmodify [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem) @@ -3761,8 +3662,7 @@ func rewriteValueAMD64_OpAMD64BTCLconstmodify(v *Value) bool { v.reset(OpAMD64BTCLconstmodify) v.AuxInt = ValAndOff(valoff1).add(off2) v.Aux = mergeSym(sym1, sym2) - v.AddArg(base) - v.AddArg(mem) + v.AddArg2(base, mem) return true } return false @@ -3790,9 +3690,7 @@ func rewriteValueAMD64_OpAMD64BTCLmodify(v *Value) bool { v.reset(OpAMD64BTCLmodify) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(base) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(base, val, mem) return true } // match: (BTCLmodify [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) @@ -3815,9 +3713,7 @@ func rewriteValueAMD64_OpAMD64BTCLmodify(v *Value) bool { v.reset(OpAMD64BTCLmodify) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(base) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(base, val, mem) return true } return false @@ -3887,8 +3783,7 @@ func rewriteValueAMD64_OpAMD64BTCQconstmodify(v *Value) bool { v.reset(OpAMD64BTCQconstmodify) v.AuxInt = ValAndOff(valoff1).add(off2) v.Aux = sym - v.AddArg(base) - v.AddArg(mem) + v.AddArg2(base, mem) return true } // match: (BTCQconstmodify [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem) @@ -3910,8 +3805,7 @@ func rewriteValueAMD64_OpAMD64BTCQconstmodify(v *Value) bool { v.reset(OpAMD64BTCQconstmodify) v.AuxInt = ValAndOff(valoff1).add(off2) v.Aux = mergeSym(sym1, sym2) - v.AddArg(base) - v.AddArg(mem) + v.AddArg2(base, mem) return true } return false @@ -3939,9 +3833,7 @@ func rewriteValueAMD64_OpAMD64BTCQmodify(v *Value) bool { v.reset(OpAMD64BTCQmodify) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(base) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(base, val, mem) return true } // match: (BTCQmodify [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) @@ -3964,9 +3856,7 @@ func rewriteValueAMD64_OpAMD64BTCQmodify(v *Value) bool { v.reset(OpAMD64BTCQmodify) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(base) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(base, val, mem) return true } return false @@ -4022,8 +3912,7 @@ func rewriteValueAMD64_OpAMD64BTLconst(v *Value) bool { y := s.Args[1] x := s.Args[0] v.reset(OpAMD64BTQ) - v.AddArg(y) - v.AddArg(x) + v.AddArg2(y, x) return true } // match: (BTLconst [c] (SHRLconst [d] x)) @@ -4075,8 +3964,7 @@ func rewriteValueAMD64_OpAMD64BTLconst(v *Value) bool { y := s.Args[1] x := s.Args[0] v.reset(OpAMD64BTL) - v.AddArg(y) - v.AddArg(x) + v.AddArg2(y, x) return true } return false @@ -4132,8 +4020,7 @@ func rewriteValueAMD64_OpAMD64BTQconst(v *Value) bool { y := s.Args[1] x := s.Args[0] v.reset(OpAMD64BTQ) - v.AddArg(y) - v.AddArg(x) + v.AddArg2(y, x) return true } return false @@ -4229,8 +4116,7 @@ func rewriteValueAMD64_OpAMD64BTRLconstmodify(v *Value) bool { v.reset(OpAMD64BTRLconstmodify) v.AuxInt = ValAndOff(valoff1).add(off2) v.Aux = sym - v.AddArg(base) - v.AddArg(mem) + v.AddArg2(base, mem) return true } // match: (BTRLconstmodify [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem) @@ -4252,8 +4138,7 @@ func rewriteValueAMD64_OpAMD64BTRLconstmodify(v *Value) bool { v.reset(OpAMD64BTRLconstmodify) v.AuxInt = ValAndOff(valoff1).add(off2) v.Aux = mergeSym(sym1, sym2) - v.AddArg(base) - v.AddArg(mem) + v.AddArg2(base, mem) return true } return false @@ -4281,9 +4166,7 @@ func rewriteValueAMD64_OpAMD64BTRLmodify(v *Value) bool { v.reset(OpAMD64BTRLmodify) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(base) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(base, val, mem) return true } // match: (BTRLmodify [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) @@ -4306,9 +4189,7 @@ func rewriteValueAMD64_OpAMD64BTRLmodify(v *Value) bool { v.reset(OpAMD64BTRLmodify) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(base) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(base, val, mem) return true } return false @@ -4404,8 +4285,7 @@ func rewriteValueAMD64_OpAMD64BTRQconstmodify(v *Value) bool { v.reset(OpAMD64BTRQconstmodify) v.AuxInt = ValAndOff(valoff1).add(off2) v.Aux = sym - v.AddArg(base) - v.AddArg(mem) + v.AddArg2(base, mem) return true } // match: (BTRQconstmodify [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem) @@ -4427,8 +4307,7 @@ func rewriteValueAMD64_OpAMD64BTRQconstmodify(v *Value) bool { v.reset(OpAMD64BTRQconstmodify) v.AuxInt = ValAndOff(valoff1).add(off2) v.Aux = mergeSym(sym1, sym2) - v.AddArg(base) - v.AddArg(mem) + v.AddArg2(base, mem) return true } return false @@ -4456,9 +4335,7 @@ func rewriteValueAMD64_OpAMD64BTRQmodify(v *Value) bool { v.reset(OpAMD64BTRQmodify) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(base) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(base, val, mem) return true } // match: (BTRQmodify [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) @@ -4481,9 +4358,7 @@ func rewriteValueAMD64_OpAMD64BTRQmodify(v *Value) bool { v.reset(OpAMD64BTRQmodify) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(base) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(base, val, mem) return true } return false @@ -4579,8 +4454,7 @@ func rewriteValueAMD64_OpAMD64BTSLconstmodify(v *Value) bool { v.reset(OpAMD64BTSLconstmodify) v.AuxInt = ValAndOff(valoff1).add(off2) v.Aux = sym - v.AddArg(base) - v.AddArg(mem) + v.AddArg2(base, mem) return true } // match: (BTSLconstmodify [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem) @@ -4602,8 +4476,7 @@ func rewriteValueAMD64_OpAMD64BTSLconstmodify(v *Value) bool { v.reset(OpAMD64BTSLconstmodify) v.AuxInt = ValAndOff(valoff1).add(off2) v.Aux = mergeSym(sym1, sym2) - v.AddArg(base) - v.AddArg(mem) + v.AddArg2(base, mem) return true } return false @@ -4631,9 +4504,7 @@ func rewriteValueAMD64_OpAMD64BTSLmodify(v *Value) bool { v.reset(OpAMD64BTSLmodify) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(base) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(base, val, mem) return true } // match: (BTSLmodify [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) @@ -4656,9 +4527,7 @@ func rewriteValueAMD64_OpAMD64BTSLmodify(v *Value) bool { v.reset(OpAMD64BTSLmodify) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(base) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(base, val, mem) return true } return false @@ -4754,8 +4623,7 @@ func rewriteValueAMD64_OpAMD64BTSQconstmodify(v *Value) bool { v.reset(OpAMD64BTSQconstmodify) v.AuxInt = ValAndOff(valoff1).add(off2) v.Aux = sym - v.AddArg(base) - v.AddArg(mem) + v.AddArg2(base, mem) return true } // match: (BTSQconstmodify [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem) @@ -4777,8 +4645,7 @@ func rewriteValueAMD64_OpAMD64BTSQconstmodify(v *Value) bool { v.reset(OpAMD64BTSQconstmodify) v.AuxInt = ValAndOff(valoff1).add(off2) v.Aux = mergeSym(sym1, sym2) - v.AddArg(base) - v.AddArg(mem) + v.AddArg2(base, mem) return true } return false @@ -4806,9 +4673,7 @@ func rewriteValueAMD64_OpAMD64BTSQmodify(v *Value) bool { v.reset(OpAMD64BTSQmodify) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(base) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(base, val, mem) return true } // match: (BTSQmodify [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) @@ -4831,9 +4696,7 @@ func rewriteValueAMD64_OpAMD64BTSQmodify(v *Value) bool { v.reset(OpAMD64BTSQmodify) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(base) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(base, val, mem) return true } return false @@ -4852,9 +4715,7 @@ func rewriteValueAMD64_OpAMD64CMOVLCC(v *Value) bool { } cond := v_2.Args[0] v.reset(OpAMD64CMOVLLS) - v.AddArg(x) - v.AddArg(y) - v.AddArg(cond) + v.AddArg3(x, y, cond) return true } // match: (CMOVLCC _ x (FlagEQ)) @@ -4933,9 +4794,7 @@ func rewriteValueAMD64_OpAMD64CMOVLCS(v *Value) bool { } cond := v_2.Args[0] v.reset(OpAMD64CMOVLHI) - v.AddArg(x) - v.AddArg(y) - v.AddArg(cond) + v.AddArg3(x, y, cond) return true } // match: (CMOVLCS y _ (FlagEQ)) @@ -5014,9 +4873,7 @@ func rewriteValueAMD64_OpAMD64CMOVLEQ(v *Value) bool { } cond := v_2.Args[0] v.reset(OpAMD64CMOVLEQ) - v.AddArg(x) - v.AddArg(y) - v.AddArg(cond) + v.AddArg3(x, y, cond) return true } // match: (CMOVLEQ _ x (FlagEQ)) @@ -5095,9 +4952,7 @@ func rewriteValueAMD64_OpAMD64CMOVLGE(v *Value) bool { } cond := v_2.Args[0] v.reset(OpAMD64CMOVLLE) - v.AddArg(x) - v.AddArg(y) - v.AddArg(cond) + v.AddArg3(x, y, cond) return true } // match: (CMOVLGE _ x (FlagEQ)) @@ -5176,9 +5031,7 @@ func rewriteValueAMD64_OpAMD64CMOVLGT(v *Value) bool { } cond := v_2.Args[0] v.reset(OpAMD64CMOVLLT) - v.AddArg(x) - v.AddArg(y) - v.AddArg(cond) + v.AddArg3(x, y, cond) return true } // match: (CMOVLGT y _ (FlagEQ)) @@ -5257,9 +5110,7 @@ func rewriteValueAMD64_OpAMD64CMOVLHI(v *Value) bool { } cond := v_2.Args[0] v.reset(OpAMD64CMOVLCS) - v.AddArg(x) - v.AddArg(y) - v.AddArg(cond) + v.AddArg3(x, y, cond) return true } // match: (CMOVLHI y _ (FlagEQ)) @@ -5338,9 +5189,7 @@ func rewriteValueAMD64_OpAMD64CMOVLLE(v *Value) bool { } cond := v_2.Args[0] v.reset(OpAMD64CMOVLGE) - v.AddArg(x) - v.AddArg(y) - v.AddArg(cond) + v.AddArg3(x, y, cond) return true } // match: (CMOVLLE _ x (FlagEQ)) @@ -5419,9 +5268,7 @@ func rewriteValueAMD64_OpAMD64CMOVLLS(v *Value) bool { } cond := v_2.Args[0] v.reset(OpAMD64CMOVLCC) - v.AddArg(x) - v.AddArg(y) - v.AddArg(cond) + v.AddArg3(x, y, cond) return true } // match: (CMOVLLS _ x (FlagEQ)) @@ -5500,9 +5347,7 @@ func rewriteValueAMD64_OpAMD64CMOVLLT(v *Value) bool { } cond := v_2.Args[0] v.reset(OpAMD64CMOVLGT) - v.AddArg(x) - v.AddArg(y) - v.AddArg(cond) + v.AddArg3(x, y, cond) return true } // match: (CMOVLLT y _ (FlagEQ)) @@ -5581,9 +5426,7 @@ func rewriteValueAMD64_OpAMD64CMOVLNE(v *Value) bool { } cond := v_2.Args[0] v.reset(OpAMD64CMOVLNE) - v.AddArg(x) - v.AddArg(y) - v.AddArg(cond) + v.AddArg3(x, y, cond) return true } // match: (CMOVLNE y _ (FlagEQ)) @@ -5662,9 +5505,7 @@ func rewriteValueAMD64_OpAMD64CMOVQCC(v *Value) bool { } cond := v_2.Args[0] v.reset(OpAMD64CMOVQLS) - v.AddArg(x) - v.AddArg(y) - v.AddArg(cond) + v.AddArg3(x, y, cond) return true } // match: (CMOVQCC _ x (FlagEQ)) @@ -5743,9 +5584,7 @@ func rewriteValueAMD64_OpAMD64CMOVQCS(v *Value) bool { } cond := v_2.Args[0] v.reset(OpAMD64CMOVQHI) - v.AddArg(x) - v.AddArg(y) - v.AddArg(cond) + v.AddArg3(x, y, cond) return true } // match: (CMOVQCS y _ (FlagEQ)) @@ -5824,9 +5663,7 @@ func rewriteValueAMD64_OpAMD64CMOVQEQ(v *Value) bool { } cond := v_2.Args[0] v.reset(OpAMD64CMOVQEQ) - v.AddArg(x) - v.AddArg(y) - v.AddArg(cond) + v.AddArg3(x, y, cond) return true } // match: (CMOVQEQ _ x (FlagEQ)) @@ -5930,9 +5767,7 @@ func rewriteValueAMD64_OpAMD64CMOVQGE(v *Value) bool { } cond := v_2.Args[0] v.reset(OpAMD64CMOVQLE) - v.AddArg(x) - v.AddArg(y) - v.AddArg(cond) + v.AddArg3(x, y, cond) return true } // match: (CMOVQGE _ x (FlagEQ)) @@ -6011,9 +5846,7 @@ func rewriteValueAMD64_OpAMD64CMOVQGT(v *Value) bool { } cond := v_2.Args[0] v.reset(OpAMD64CMOVQLT) - v.AddArg(x) - v.AddArg(y) - v.AddArg(cond) + v.AddArg3(x, y, cond) return true } // match: (CMOVQGT y _ (FlagEQ)) @@ -6092,9 +5925,7 @@ func rewriteValueAMD64_OpAMD64CMOVQHI(v *Value) bool { } cond := v_2.Args[0] v.reset(OpAMD64CMOVQCS) - v.AddArg(x) - v.AddArg(y) - v.AddArg(cond) + v.AddArg3(x, y, cond) return true } // match: (CMOVQHI y _ (FlagEQ)) @@ -6173,9 +6004,7 @@ func rewriteValueAMD64_OpAMD64CMOVQLE(v *Value) bool { } cond := v_2.Args[0] v.reset(OpAMD64CMOVQGE) - v.AddArg(x) - v.AddArg(y) - v.AddArg(cond) + v.AddArg3(x, y, cond) return true } // match: (CMOVQLE _ x (FlagEQ)) @@ -6254,9 +6083,7 @@ func rewriteValueAMD64_OpAMD64CMOVQLS(v *Value) bool { } cond := v_2.Args[0] v.reset(OpAMD64CMOVQCC) - v.AddArg(x) - v.AddArg(y) - v.AddArg(cond) + v.AddArg3(x, y, cond) return true } // match: (CMOVQLS _ x (FlagEQ)) @@ -6335,9 +6162,7 @@ func rewriteValueAMD64_OpAMD64CMOVQLT(v *Value) bool { } cond := v_2.Args[0] v.reset(OpAMD64CMOVQGT) - v.AddArg(x) - v.AddArg(y) - v.AddArg(cond) + v.AddArg3(x, y, cond) return true } // match: (CMOVQLT y _ (FlagEQ)) @@ -6416,9 +6241,7 @@ func rewriteValueAMD64_OpAMD64CMOVQNE(v *Value) bool { } cond := v_2.Args[0] v.reset(OpAMD64CMOVQNE) - v.AddArg(x) - v.AddArg(y) - v.AddArg(cond) + v.AddArg3(x, y, cond) return true } // match: (CMOVQNE y _ (FlagEQ)) @@ -6497,9 +6320,7 @@ func rewriteValueAMD64_OpAMD64CMOVWCC(v *Value) bool { } cond := v_2.Args[0] v.reset(OpAMD64CMOVWLS) - v.AddArg(x) - v.AddArg(y) - v.AddArg(cond) + v.AddArg3(x, y, cond) return true } // match: (CMOVWCC _ x (FlagEQ)) @@ -6578,9 +6399,7 @@ func rewriteValueAMD64_OpAMD64CMOVWCS(v *Value) bool { } cond := v_2.Args[0] v.reset(OpAMD64CMOVWHI) - v.AddArg(x) - v.AddArg(y) - v.AddArg(cond) + v.AddArg3(x, y, cond) return true } // match: (CMOVWCS y _ (FlagEQ)) @@ -6659,9 +6478,7 @@ func rewriteValueAMD64_OpAMD64CMOVWEQ(v *Value) bool { } cond := v_2.Args[0] v.reset(OpAMD64CMOVWEQ) - v.AddArg(x) - v.AddArg(y) - v.AddArg(cond) + v.AddArg3(x, y, cond) return true } // match: (CMOVWEQ _ x (FlagEQ)) @@ -6740,9 +6557,7 @@ func rewriteValueAMD64_OpAMD64CMOVWGE(v *Value) bool { } cond := v_2.Args[0] v.reset(OpAMD64CMOVWLE) - v.AddArg(x) - v.AddArg(y) - v.AddArg(cond) + v.AddArg3(x, y, cond) return true } // match: (CMOVWGE _ x (FlagEQ)) @@ -6821,9 +6636,7 @@ func rewriteValueAMD64_OpAMD64CMOVWGT(v *Value) bool { } cond := v_2.Args[0] v.reset(OpAMD64CMOVWLT) - v.AddArg(x) - v.AddArg(y) - v.AddArg(cond) + v.AddArg3(x, y, cond) return true } // match: (CMOVWGT y _ (FlagEQ)) @@ -6902,9 +6715,7 @@ func rewriteValueAMD64_OpAMD64CMOVWHI(v *Value) bool { } cond := v_2.Args[0] v.reset(OpAMD64CMOVWCS) - v.AddArg(x) - v.AddArg(y) - v.AddArg(cond) + v.AddArg3(x, y, cond) return true } // match: (CMOVWHI y _ (FlagEQ)) @@ -6983,9 +6794,7 @@ func rewriteValueAMD64_OpAMD64CMOVWLE(v *Value) bool { } cond := v_2.Args[0] v.reset(OpAMD64CMOVWGE) - v.AddArg(x) - v.AddArg(y) - v.AddArg(cond) + v.AddArg3(x, y, cond) return true } // match: (CMOVWLE _ x (FlagEQ)) @@ -7064,9 +6873,7 @@ func rewriteValueAMD64_OpAMD64CMOVWLS(v *Value) bool { } cond := v_2.Args[0] v.reset(OpAMD64CMOVWCC) - v.AddArg(x) - v.AddArg(y) - v.AddArg(cond) + v.AddArg3(x, y, cond) return true } // match: (CMOVWLS _ x (FlagEQ)) @@ -7145,9 +6952,7 @@ func rewriteValueAMD64_OpAMD64CMOVWLT(v *Value) bool { } cond := v_2.Args[0] v.reset(OpAMD64CMOVWGT) - v.AddArg(x) - v.AddArg(y) - v.AddArg(cond) + v.AddArg3(x, y, cond) return true } // match: (CMOVWLT y _ (FlagEQ)) @@ -7226,9 +7031,7 @@ func rewriteValueAMD64_OpAMD64CMOVWNE(v *Value) bool { } cond := v_2.Args[0] v.reset(OpAMD64CMOVWNE) - v.AddArg(x) - v.AddArg(y) - v.AddArg(cond) + v.AddArg3(x, y, cond) return true } // match: (CMOVWNE y _ (FlagEQ)) @@ -7336,8 +7139,7 @@ func rewriteValueAMD64_OpAMD64CMPB(v *Value) bool { } v.reset(OpAMD64InvertFlags) v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags) - v0.AddArg(y) - v0.AddArg(x) + v0.AddArg2(y, x) v.AddArg(v0) return true } @@ -7360,9 +7162,7 @@ func rewriteValueAMD64_OpAMD64CMPB(v *Value) bool { v.reset(OpAMD64CMPBload) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(x) - v.AddArg(mem) + v.AddArg3(ptr, x, mem) return true } // match: (CMPB x l:(MOVBload {sym} [off] ptr mem)) @@ -7385,9 +7185,7 @@ func rewriteValueAMD64_OpAMD64CMPB(v *Value) bool { v0 := b.NewValue0(l.Pos, OpAMD64CMPBload, types.TypeFlags) v0.AuxInt = off v0.Aux = sym - v0.AddArg(ptr) - v0.AddArg(x) - v0.AddArg(mem) + v0.AddArg3(ptr, x, mem) v.AddArg(v0) return true } @@ -7495,8 +7293,7 @@ func rewriteValueAMD64_OpAMD64CMPBconst(v *Value) bool { y := v_0.Args[1] x := v_0.Args[0] v.reset(OpAMD64TESTB) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (CMPBconst (ANDLconst [c] x) [0]) @@ -7520,8 +7317,7 @@ func rewriteValueAMD64_OpAMD64CMPBconst(v *Value) bool { } x := v_0 v.reset(OpAMD64TESTB) - v.AddArg(x) - v.AddArg(x) + v.AddArg2(x, x) return true } // match: (CMPBconst l:(MOVBload {sym} [off] ptr mem) [c]) @@ -7546,8 +7342,7 @@ func rewriteValueAMD64_OpAMD64CMPBconst(v *Value) bool { v.AddArg(v0) v0.AuxInt = makeValAndOff(c, off) v0.Aux = sym - v0.AddArg(ptr) - v0.AddArg(mem) + v0.AddArg2(ptr, mem) return true } return false @@ -7573,8 +7368,7 @@ func rewriteValueAMD64_OpAMD64CMPBconstload(v *Value) bool { v.reset(OpAMD64CMPBconstload) v.AuxInt = ValAndOff(valoff1).add(off2) v.Aux = sym - v.AddArg(base) - v.AddArg(mem) + v.AddArg2(base, mem) return true } // match: (CMPBconstload [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem) @@ -7596,8 +7390,7 @@ func rewriteValueAMD64_OpAMD64CMPBconstload(v *Value) bool { v.reset(OpAMD64CMPBconstload) v.AuxInt = ValAndOff(valoff1).add(off2) v.Aux = mergeSym(sym1, sym2) - v.AddArg(base) - v.AddArg(mem) + v.AddArg2(base, mem) return true } return false @@ -7625,9 +7418,7 @@ func rewriteValueAMD64_OpAMD64CMPBload(v *Value) bool { v.reset(OpAMD64CMPBload) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(base) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(base, val, mem) return true } // match: (CMPBload [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) @@ -7650,9 +7441,7 @@ func rewriteValueAMD64_OpAMD64CMPBload(v *Value) bool { v.reset(OpAMD64CMPBload) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(base) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(base, val, mem) return true } // match: (CMPBload {sym} [off] ptr (MOVLconst [c]) mem) @@ -7673,8 +7462,7 @@ func rewriteValueAMD64_OpAMD64CMPBload(v *Value) bool { v.reset(OpAMD64CMPBconstload) v.AuxInt = makeValAndOff(int64(int8(c)), off) v.Aux = sym - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } return false @@ -7722,8 +7510,7 @@ func rewriteValueAMD64_OpAMD64CMPL(v *Value) bool { } v.reset(OpAMD64InvertFlags) v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags) - v0.AddArg(y) - v0.AddArg(x) + v0.AddArg2(y, x) v.AddArg(v0) return true } @@ -7746,9 +7533,7 @@ func rewriteValueAMD64_OpAMD64CMPL(v *Value) bool { v.reset(OpAMD64CMPLload) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(x) - v.AddArg(mem) + v.AddArg3(ptr, x, mem) return true } // match: (CMPL x l:(MOVLload {sym} [off] ptr mem)) @@ -7771,9 +7556,7 @@ func rewriteValueAMD64_OpAMD64CMPL(v *Value) bool { v0 := b.NewValue0(l.Pos, OpAMD64CMPLload, types.TypeFlags) v0.AuxInt = off v0.Aux = sym - v0.AddArg(ptr) - v0.AddArg(x) - v0.AddArg(mem) + v0.AddArg3(ptr, x, mem) v.AddArg(v0) return true } @@ -7896,8 +7679,7 @@ func rewriteValueAMD64_OpAMD64CMPLconst(v *Value) bool { y := v_0.Args[1] x := v_0.Args[0] v.reset(OpAMD64TESTL) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (CMPLconst (ANDLconst [c] x) [0]) @@ -7921,8 +7703,7 @@ func rewriteValueAMD64_OpAMD64CMPLconst(v *Value) bool { } x := v_0 v.reset(OpAMD64TESTL) - v.AddArg(x) - v.AddArg(x) + v.AddArg2(x, x) return true } // match: (CMPLconst l:(MOVLload {sym} [off] ptr mem) [c]) @@ -7947,8 +7728,7 @@ func rewriteValueAMD64_OpAMD64CMPLconst(v *Value) bool { v.AddArg(v0) v0.AuxInt = makeValAndOff(c, off) v0.Aux = sym - v0.AddArg(ptr) - v0.AddArg(mem) + v0.AddArg2(ptr, mem) return true } return false @@ -7974,8 +7754,7 @@ func rewriteValueAMD64_OpAMD64CMPLconstload(v *Value) bool { v.reset(OpAMD64CMPLconstload) v.AuxInt = ValAndOff(valoff1).add(off2) v.Aux = sym - v.AddArg(base) - v.AddArg(mem) + v.AddArg2(base, mem) return true } // match: (CMPLconstload [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem) @@ -7997,8 +7776,7 @@ func rewriteValueAMD64_OpAMD64CMPLconstload(v *Value) bool { v.reset(OpAMD64CMPLconstload) v.AuxInt = ValAndOff(valoff1).add(off2) v.Aux = mergeSym(sym1, sym2) - v.AddArg(base) - v.AddArg(mem) + v.AddArg2(base, mem) return true } return false @@ -8026,9 +7804,7 @@ func rewriteValueAMD64_OpAMD64CMPLload(v *Value) bool { v.reset(OpAMD64CMPLload) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(base) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(base, val, mem) return true } // match: (CMPLload [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) @@ -8051,9 +7827,7 @@ func rewriteValueAMD64_OpAMD64CMPLload(v *Value) bool { v.reset(OpAMD64CMPLload) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(base) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(base, val, mem) return true } // match: (CMPLload {sym} [off] ptr (MOVLconst [c]) mem) @@ -8074,8 +7848,7 @@ func rewriteValueAMD64_OpAMD64CMPLload(v *Value) bool { v.reset(OpAMD64CMPLconstload) v.AuxInt = makeValAndOff(c, off) v.Aux = sym - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } return false @@ -8131,8 +7904,7 @@ func rewriteValueAMD64_OpAMD64CMPQ(v *Value) bool { } v.reset(OpAMD64InvertFlags) v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) - v0.AddArg(y) - v0.AddArg(x) + v0.AddArg2(y, x) v.AddArg(v0) return true } @@ -8245,9 +8017,7 @@ func rewriteValueAMD64_OpAMD64CMPQ(v *Value) bool { v.reset(OpAMD64CMPQload) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(x) - v.AddArg(mem) + v.AddArg3(ptr, x, mem) return true } // match: (CMPQ x l:(MOVQload {sym} [off] ptr mem)) @@ -8270,9 +8040,7 @@ func rewriteValueAMD64_OpAMD64CMPQ(v *Value) bool { v0 := b.NewValue0(l.Pos, OpAMD64CMPQload, types.TypeFlags) v0.AuxInt = off v0.Aux = sym - v0.AddArg(ptr) - v0.AddArg(x) - v0.AddArg(mem) + v0.AddArg3(ptr, x, mem) v.AddArg(v0) return true } @@ -8477,8 +8245,7 @@ func rewriteValueAMD64_OpAMD64CMPQconst(v *Value) bool { y := v_0.Args[1] x := v_0.Args[0] v.reset(OpAMD64TESTQ) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (CMPQconst (ANDQconst [c] x) [0]) @@ -8502,8 +8269,7 @@ func rewriteValueAMD64_OpAMD64CMPQconst(v *Value) bool { } x := v_0 v.reset(OpAMD64TESTQ) - v.AddArg(x) - v.AddArg(x) + v.AddArg2(x, x) return true } // match: (CMPQconst l:(MOVQload {sym} [off] ptr mem) [c]) @@ -8528,8 +8294,7 @@ func rewriteValueAMD64_OpAMD64CMPQconst(v *Value) bool { v.AddArg(v0) v0.AuxInt = makeValAndOff(c, off) v0.Aux = sym - v0.AddArg(ptr) - v0.AddArg(mem) + v0.AddArg2(ptr, mem) return true } return false @@ -8555,8 +8320,7 @@ func rewriteValueAMD64_OpAMD64CMPQconstload(v *Value) bool { v.reset(OpAMD64CMPQconstload) v.AuxInt = ValAndOff(valoff1).add(off2) v.Aux = sym - v.AddArg(base) - v.AddArg(mem) + v.AddArg2(base, mem) return true } // match: (CMPQconstload [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem) @@ -8578,8 +8342,7 @@ func rewriteValueAMD64_OpAMD64CMPQconstload(v *Value) bool { v.reset(OpAMD64CMPQconstload) v.AuxInt = ValAndOff(valoff1).add(off2) v.Aux = mergeSym(sym1, sym2) - v.AddArg(base) - v.AddArg(mem) + v.AddArg2(base, mem) return true } return false @@ -8607,9 +8370,7 @@ func rewriteValueAMD64_OpAMD64CMPQload(v *Value) bool { v.reset(OpAMD64CMPQload) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(base) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(base, val, mem) return true } // match: (CMPQload [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) @@ -8632,9 +8393,7 @@ func rewriteValueAMD64_OpAMD64CMPQload(v *Value) bool { v.reset(OpAMD64CMPQload) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(base) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(base, val, mem) return true } // match: (CMPQload {sym} [off] ptr (MOVQconst [c]) mem) @@ -8655,8 +8414,7 @@ func rewriteValueAMD64_OpAMD64CMPQload(v *Value) bool { v.reset(OpAMD64CMPQconstload) v.AuxInt = makeValAndOff(c, off) v.Aux = sym - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } return false @@ -8704,8 +8462,7 @@ func rewriteValueAMD64_OpAMD64CMPW(v *Value) bool { } v.reset(OpAMD64InvertFlags) v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags) - v0.AddArg(y) - v0.AddArg(x) + v0.AddArg2(y, x) v.AddArg(v0) return true } @@ -8728,9 +8485,7 @@ func rewriteValueAMD64_OpAMD64CMPW(v *Value) bool { v.reset(OpAMD64CMPWload) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(x) - v.AddArg(mem) + v.AddArg3(ptr, x, mem) return true } // match: (CMPW x l:(MOVWload {sym} [off] ptr mem)) @@ -8753,9 +8508,7 @@ func rewriteValueAMD64_OpAMD64CMPW(v *Value) bool { v0 := b.NewValue0(l.Pos, OpAMD64CMPWload, types.TypeFlags) v0.AuxInt = off v0.Aux = sym - v0.AddArg(ptr) - v0.AddArg(x) - v0.AddArg(mem) + v0.AddArg3(ptr, x, mem) v.AddArg(v0) return true } @@ -8863,8 +8616,7 @@ func rewriteValueAMD64_OpAMD64CMPWconst(v *Value) bool { y := v_0.Args[1] x := v_0.Args[0] v.reset(OpAMD64TESTW) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (CMPWconst (ANDLconst [c] x) [0]) @@ -8888,8 +8640,7 @@ func rewriteValueAMD64_OpAMD64CMPWconst(v *Value) bool { } x := v_0 v.reset(OpAMD64TESTW) - v.AddArg(x) - v.AddArg(x) + v.AddArg2(x, x) return true } // match: (CMPWconst l:(MOVWload {sym} [off] ptr mem) [c]) @@ -8914,8 +8665,7 @@ func rewriteValueAMD64_OpAMD64CMPWconst(v *Value) bool { v.AddArg(v0) v0.AuxInt = makeValAndOff(c, off) v0.Aux = sym - v0.AddArg(ptr) - v0.AddArg(mem) + v0.AddArg2(ptr, mem) return true } return false @@ -8941,8 +8691,7 @@ func rewriteValueAMD64_OpAMD64CMPWconstload(v *Value) bool { v.reset(OpAMD64CMPWconstload) v.AuxInt = ValAndOff(valoff1).add(off2) v.Aux = sym - v.AddArg(base) - v.AddArg(mem) + v.AddArg2(base, mem) return true } // match: (CMPWconstload [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem) @@ -8964,8 +8713,7 @@ func rewriteValueAMD64_OpAMD64CMPWconstload(v *Value) bool { v.reset(OpAMD64CMPWconstload) v.AuxInt = ValAndOff(valoff1).add(off2) v.Aux = mergeSym(sym1, sym2) - v.AddArg(base) - v.AddArg(mem) + v.AddArg2(base, mem) return true } return false @@ -8993,9 +8741,7 @@ func rewriteValueAMD64_OpAMD64CMPWload(v *Value) bool { v.reset(OpAMD64CMPWload) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(base) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(base, val, mem) return true } // match: (CMPWload [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) @@ -9018,9 +8764,7 @@ func rewriteValueAMD64_OpAMD64CMPWload(v *Value) bool { v.reset(OpAMD64CMPWload) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(base) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(base, val, mem) return true } // match: (CMPWload {sym} [off] ptr (MOVLconst [c]) mem) @@ -9041,8 +8785,7 @@ func rewriteValueAMD64_OpAMD64CMPWload(v *Value) bool { v.reset(OpAMD64CMPWconstload) v.AuxInt = makeValAndOff(int64(int16(c)), off) v.Aux = sym - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } return false @@ -9072,10 +8815,7 @@ func rewriteValueAMD64_OpAMD64CMPXCHGLlock(v *Value) bool { v.reset(OpAMD64CMPXCHGLlock) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(ptr) - v.AddArg(old) - v.AddArg(new_) - v.AddArg(mem) + v.AddArg4(ptr, old, new_, mem) return true } return false @@ -9105,10 +8845,7 @@ func rewriteValueAMD64_OpAMD64CMPXCHGQlock(v *Value) bool { v.reset(OpAMD64CMPXCHGQlock) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(ptr) - v.AddArg(old) - v.AddArg(new_) - v.AddArg(mem) + v.AddArg4(ptr, old, new_, mem) return true } return false @@ -9135,9 +8872,7 @@ func rewriteValueAMD64_OpAMD64DIVSD(v *Value) bool { v.reset(OpAMD64DIVSDload) v.AuxInt = off v.Aux = sym - v.AddArg(x) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg3(x, ptr, mem) return true } return false @@ -9165,9 +8900,7 @@ func rewriteValueAMD64_OpAMD64DIVSDload(v *Value) bool { v.reset(OpAMD64DIVSDload) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(val) - v.AddArg(base) - v.AddArg(mem) + v.AddArg3(val, base, mem) return true } // match: (DIVSDload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem) @@ -9190,9 +8923,7 @@ func rewriteValueAMD64_OpAMD64DIVSDload(v *Value) bool { v.reset(OpAMD64DIVSDload) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(val) - v.AddArg(base) - v.AddArg(mem) + v.AddArg3(val, base, mem) return true } return false @@ -9219,9 +8950,7 @@ func rewriteValueAMD64_OpAMD64DIVSS(v *Value) bool { v.reset(OpAMD64DIVSSload) v.AuxInt = off v.Aux = sym - v.AddArg(x) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg3(x, ptr, mem) return true } return false @@ -9249,9 +8978,7 @@ func rewriteValueAMD64_OpAMD64DIVSSload(v *Value) bool { v.reset(OpAMD64DIVSSload) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(val) - v.AddArg(base) - v.AddArg(mem) + v.AddArg3(val, base, mem) return true } // match: (DIVSSload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem) @@ -9274,9 +9001,7 @@ func rewriteValueAMD64_OpAMD64DIVSSload(v *Value) bool { v.reset(OpAMD64DIVSSload) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(val) - v.AddArg(base) - v.AddArg(mem) + v.AddArg3(val, base, mem) return true } return false @@ -9294,8 +9019,7 @@ func rewriteValueAMD64_OpAMD64HMULL(v *Value) bool { break } v.reset(OpAMD64HMULL) - v.AddArg(y) - v.AddArg(x) + v.AddArg2(y, x) return true } return false @@ -9313,8 +9037,7 @@ func rewriteValueAMD64_OpAMD64HMULLU(v *Value) bool { break } v.reset(OpAMD64HMULLU) - v.AddArg(y) - v.AddArg(x) + v.AddArg2(y, x) return true } return false @@ -9332,8 +9055,7 @@ func rewriteValueAMD64_OpAMD64HMULQ(v *Value) bool { break } v.reset(OpAMD64HMULQ) - v.AddArg(y) - v.AddArg(x) + v.AddArg2(y, x) return true } return false @@ -9351,8 +9073,7 @@ func rewriteValueAMD64_OpAMD64HMULQU(v *Value) bool { break } v.reset(OpAMD64HMULQU) - v.AddArg(y) - v.AddArg(x) + v.AddArg2(y, x) return true } return false @@ -9400,8 +9121,7 @@ func rewriteValueAMD64_OpAMD64LEAL(v *Value) bool { v.reset(OpAMD64LEAL1) v.AuxInt = c v.Aux = s - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } break @@ -9430,8 +9150,7 @@ func rewriteValueAMD64_OpAMD64LEAL1(v *Value) bool { v.reset(OpAMD64LEAL1) v.AuxInt = c + d v.Aux = s - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } break @@ -9450,8 +9169,7 @@ func rewriteValueAMD64_OpAMD64LEAL1(v *Value) bool { v.reset(OpAMD64LEAL2) v.AuxInt = c v.Aux = s - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } break @@ -9470,8 +9188,7 @@ func rewriteValueAMD64_OpAMD64LEAL1(v *Value) bool { v.reset(OpAMD64LEAL4) v.AuxInt = c v.Aux = s - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } break @@ -9490,8 +9207,7 @@ func rewriteValueAMD64_OpAMD64LEAL1(v *Value) bool { v.reset(OpAMD64LEAL8) v.AuxInt = c v.Aux = s - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } break @@ -9519,8 +9235,7 @@ func rewriteValueAMD64_OpAMD64LEAL2(v *Value) bool { v.reset(OpAMD64LEAL2) v.AuxInt = c + d v.Aux = s - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (LEAL2 [c] {s} x (ADDLconst [d] y)) @@ -9541,8 +9256,7 @@ func rewriteValueAMD64_OpAMD64LEAL2(v *Value) bool { v.reset(OpAMD64LEAL2) v.AuxInt = c + 2*d v.Aux = s - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (LEAL2 [c] {s} x (SHLLconst [1] y)) @@ -9558,8 +9272,7 @@ func rewriteValueAMD64_OpAMD64LEAL2(v *Value) bool { v.reset(OpAMD64LEAL4) v.AuxInt = c v.Aux = s - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (LEAL2 [c] {s} x (SHLLconst [2] y)) @@ -9575,8 +9288,7 @@ func rewriteValueAMD64_OpAMD64LEAL2(v *Value) bool { v.reset(OpAMD64LEAL8) v.AuxInt = c v.Aux = s - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } return false @@ -9602,8 +9314,7 @@ func rewriteValueAMD64_OpAMD64LEAL4(v *Value) bool { v.reset(OpAMD64LEAL4) v.AuxInt = c + d v.Aux = s - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (LEAL4 [c] {s} x (ADDLconst [d] y)) @@ -9624,8 +9335,7 @@ func rewriteValueAMD64_OpAMD64LEAL4(v *Value) bool { v.reset(OpAMD64LEAL4) v.AuxInt = c + 4*d v.Aux = s - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (LEAL4 [c] {s} x (SHLLconst [1] y)) @@ -9641,8 +9351,7 @@ func rewriteValueAMD64_OpAMD64LEAL4(v *Value) bool { v.reset(OpAMD64LEAL8) v.AuxInt = c v.Aux = s - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } return false @@ -9668,8 +9377,7 @@ func rewriteValueAMD64_OpAMD64LEAL8(v *Value) bool { v.reset(OpAMD64LEAL8) v.AuxInt = c + d v.Aux = s - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (LEAL8 [c] {s} x (ADDLconst [d] y)) @@ -9690,8 +9398,7 @@ func rewriteValueAMD64_OpAMD64LEAL8(v *Value) bool { v.reset(OpAMD64LEAL8) v.AuxInt = c + 8*d v.Aux = s - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } return false @@ -9739,8 +9446,7 @@ func rewriteValueAMD64_OpAMD64LEAQ(v *Value) bool { v.reset(OpAMD64LEAQ1) v.AuxInt = c v.Aux = s - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } break @@ -9785,8 +9491,7 @@ func rewriteValueAMD64_OpAMD64LEAQ(v *Value) bool { v.reset(OpAMD64LEAQ1) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (LEAQ [off1] {sym1} (LEAQ2 [off2] {sym2} x y)) @@ -9808,8 +9513,7 @@ func rewriteValueAMD64_OpAMD64LEAQ(v *Value) bool { v.reset(OpAMD64LEAQ2) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (LEAQ [off1] {sym1} (LEAQ4 [off2] {sym2} x y)) @@ -9831,8 +9535,7 @@ func rewriteValueAMD64_OpAMD64LEAQ(v *Value) bool { v.reset(OpAMD64LEAQ4) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (LEAQ [off1] {sym1} (LEAQ8 [off2] {sym2} x y)) @@ -9854,8 +9557,7 @@ func rewriteValueAMD64_OpAMD64LEAQ(v *Value) bool { v.reset(OpAMD64LEAQ8) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } return false @@ -9882,8 +9584,7 @@ func rewriteValueAMD64_OpAMD64LEAQ1(v *Value) bool { v.reset(OpAMD64LEAQ1) v.AuxInt = c + d v.Aux = s - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } break @@ -9902,8 +9603,7 @@ func rewriteValueAMD64_OpAMD64LEAQ1(v *Value) bool { v.reset(OpAMD64LEAQ2) v.AuxInt = c v.Aux = s - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } break @@ -9922,8 +9622,7 @@ func rewriteValueAMD64_OpAMD64LEAQ1(v *Value) bool { v.reset(OpAMD64LEAQ4) v.AuxInt = c v.Aux = s - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } break @@ -9942,8 +9641,7 @@ func rewriteValueAMD64_OpAMD64LEAQ1(v *Value) bool { v.reset(OpAMD64LEAQ8) v.AuxInt = c v.Aux = s - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } break @@ -9968,8 +9666,7 @@ func rewriteValueAMD64_OpAMD64LEAQ1(v *Value) bool { v.reset(OpAMD64LEAQ1) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } break @@ -9987,8 +9684,7 @@ func rewriteValueAMD64_OpAMD64LEAQ1(v *Value) bool { break } v.reset(OpAMD64ADDQ) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } return false @@ -10014,8 +9710,7 @@ func rewriteValueAMD64_OpAMD64LEAQ2(v *Value) bool { v.reset(OpAMD64LEAQ2) v.AuxInt = c + d v.Aux = s - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (LEAQ2 [c] {s} x (ADDQconst [d] y)) @@ -10036,8 +9731,7 @@ func rewriteValueAMD64_OpAMD64LEAQ2(v *Value) bool { v.reset(OpAMD64LEAQ2) v.AuxInt = c + 2*d v.Aux = s - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (LEAQ2 [c] {s} x (SHLQconst [1] y)) @@ -10053,8 +9747,7 @@ func rewriteValueAMD64_OpAMD64LEAQ2(v *Value) bool { v.reset(OpAMD64LEAQ4) v.AuxInt = c v.Aux = s - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (LEAQ2 [c] {s} x (SHLQconst [2] y)) @@ -10070,8 +9763,7 @@ func rewriteValueAMD64_OpAMD64LEAQ2(v *Value) bool { v.reset(OpAMD64LEAQ8) v.AuxInt = c v.Aux = s - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (LEAQ2 [off1] {sym1} (LEAQ [off2] {sym2} x) y) @@ -10093,8 +9785,7 @@ func rewriteValueAMD64_OpAMD64LEAQ2(v *Value) bool { v.reset(OpAMD64LEAQ2) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } return false @@ -10120,8 +9811,7 @@ func rewriteValueAMD64_OpAMD64LEAQ4(v *Value) bool { v.reset(OpAMD64LEAQ4) v.AuxInt = c + d v.Aux = s - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (LEAQ4 [c] {s} x (ADDQconst [d] y)) @@ -10142,8 +9832,7 @@ func rewriteValueAMD64_OpAMD64LEAQ4(v *Value) bool { v.reset(OpAMD64LEAQ4) v.AuxInt = c + 4*d v.Aux = s - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (LEAQ4 [c] {s} x (SHLQconst [1] y)) @@ -10159,8 +9848,7 @@ func rewriteValueAMD64_OpAMD64LEAQ4(v *Value) bool { v.reset(OpAMD64LEAQ8) v.AuxInt = c v.Aux = s - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (LEAQ4 [off1] {sym1} (LEAQ [off2] {sym2} x) y) @@ -10182,8 +9870,7 @@ func rewriteValueAMD64_OpAMD64LEAQ4(v *Value) bool { v.reset(OpAMD64LEAQ4) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } return false @@ -10209,8 +9896,7 @@ func rewriteValueAMD64_OpAMD64LEAQ8(v *Value) bool { v.reset(OpAMD64LEAQ8) v.AuxInt = c + d v.Aux = s - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (LEAQ8 [c] {s} x (ADDQconst [d] y)) @@ -10231,8 +9917,7 @@ func rewriteValueAMD64_OpAMD64LEAQ8(v *Value) bool { v.reset(OpAMD64LEAQ8) v.AuxInt = c + 8*d v.Aux = s - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (LEAQ8 [off1] {sym1} (LEAQ [off2] {sym2} x) y) @@ -10254,8 +9939,7 @@ func rewriteValueAMD64_OpAMD64LEAQ8(v *Value) bool { v.reset(OpAMD64LEAQ8) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } return false @@ -10284,8 +9968,7 @@ func rewriteValueAMD64_OpAMD64MOVBQSX(v *Value) bool { v.AddArg(v0) v0.AuxInt = off v0.Aux = sym - v0.AddArg(ptr) - v0.AddArg(mem) + v0.AddArg2(ptr, mem) return true } // match: (MOVBQSX x:(MOVWload [off] {sym} ptr mem)) @@ -10309,8 +9992,7 @@ func rewriteValueAMD64_OpAMD64MOVBQSX(v *Value) bool { v.AddArg(v0) v0.AuxInt = off v0.Aux = sym - v0.AddArg(ptr) - v0.AddArg(mem) + v0.AddArg2(ptr, mem) return true } // match: (MOVBQSX x:(MOVLload [off] {sym} ptr mem)) @@ -10334,8 +10016,7 @@ func rewriteValueAMD64_OpAMD64MOVBQSX(v *Value) bool { v.AddArg(v0) v0.AuxInt = off v0.Aux = sym - v0.AddArg(ptr) - v0.AddArg(mem) + v0.AddArg2(ptr, mem) return true } // match: (MOVBQSX x:(MOVQload [off] {sym} ptr mem)) @@ -10359,8 +10040,7 @@ func rewriteValueAMD64_OpAMD64MOVBQSX(v *Value) bool { v.AddArg(v0) v0.AuxInt = off v0.Aux = sym - v0.AddArg(ptr) - v0.AddArg(mem) + v0.AddArg2(ptr, mem) return true } // match: (MOVBQSX (ANDLconst [c] x)) @@ -10437,8 +10117,7 @@ func rewriteValueAMD64_OpAMD64MOVBQSXload(v *Value) bool { v.reset(OpAMD64MOVBQSXload) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(base) - v.AddArg(mem) + v.AddArg2(base, mem) return true } return false @@ -10467,8 +10146,7 @@ func rewriteValueAMD64_OpAMD64MOVBQZX(v *Value) bool { v.AddArg(v0) v0.AuxInt = off v0.Aux = sym - v0.AddArg(ptr) - v0.AddArg(mem) + v0.AddArg2(ptr, mem) return true } // match: (MOVBQZX x:(MOVWload [off] {sym} ptr mem)) @@ -10492,8 +10170,7 @@ func rewriteValueAMD64_OpAMD64MOVBQZX(v *Value) bool { v.AddArg(v0) v0.AuxInt = off v0.Aux = sym - v0.AddArg(ptr) - v0.AddArg(mem) + v0.AddArg2(ptr, mem) return true } // match: (MOVBQZX x:(MOVLload [off] {sym} ptr mem)) @@ -10517,8 +10194,7 @@ func rewriteValueAMD64_OpAMD64MOVBQZX(v *Value) bool { v.AddArg(v0) v0.AuxInt = off v0.Aux = sym - v0.AddArg(ptr) - v0.AddArg(mem) + v0.AddArg2(ptr, mem) return true } // match: (MOVBQZX x:(MOVQload [off] {sym} ptr mem)) @@ -10542,8 +10218,7 @@ func rewriteValueAMD64_OpAMD64MOVBQZX(v *Value) bool { v.AddArg(v0) v0.AuxInt = off v0.Aux = sym - v0.AddArg(ptr) - v0.AddArg(mem) + v0.AddArg2(ptr, mem) return true } // match: (MOVBQZX x) @@ -10581,9 +10256,7 @@ func rewriteValueAMD64_OpAMD64MOVBQZX(v *Value) bool { v.AddArg(v0) v0.AuxInt = off v0.Aux = sym - v0.AddArg(ptr) - v0.AddArg(idx) - v0.AddArg(mem) + v0.AddArg3(ptr, idx, mem) return true } // match: (MOVBQZX (ANDLconst [c] x)) @@ -10633,8 +10306,7 @@ func rewriteValueAMD64_OpAMD64MOVBatomicload(v *Value) bool { v.reset(OpAMD64MOVBatomicload) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVBatomicload [off1] {sym1} (LEAQ [off2] {sym2} ptr) mem) @@ -10656,8 +10328,7 @@ func rewriteValueAMD64_OpAMD64MOVBatomicload(v *Value) bool { v.reset(OpAMD64MOVBatomicload) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } return false @@ -10705,8 +10376,7 @@ func rewriteValueAMD64_OpAMD64MOVBload(v *Value) bool { v.reset(OpAMD64MOVBload) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVBload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) @@ -10728,8 +10398,7 @@ func rewriteValueAMD64_OpAMD64MOVBload(v *Value) bool { v.reset(OpAMD64MOVBload) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(base) - v.AddArg(mem) + v.AddArg2(base, mem) return true } // match: (MOVBload [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) mem) @@ -10752,9 +10421,7 @@ func rewriteValueAMD64_OpAMD64MOVBload(v *Value) bool { v.reset(OpAMD64MOVBloadidx1) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } // match: (MOVBload [off] {sym} (ADDQ ptr idx) mem) @@ -10779,9 +10446,7 @@ func rewriteValueAMD64_OpAMD64MOVBload(v *Value) bool { v.reset(OpAMD64MOVBloadidx1) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } break @@ -10805,8 +10470,7 @@ func rewriteValueAMD64_OpAMD64MOVBload(v *Value) bool { v.reset(OpAMD64MOVBload) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(base) - v.AddArg(mem) + v.AddArg2(base, mem) return true } // match: (MOVBload [off1] {sym} (ADDLconst [off2] ptr) mem) @@ -10827,8 +10491,7 @@ func rewriteValueAMD64_OpAMD64MOVBload(v *Value) bool { v.reset(OpAMD64MOVBload) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVBload [off] {sym} (SB) _) @@ -10870,9 +10533,7 @@ func rewriteValueAMD64_OpAMD64MOVBloadidx1(v *Value) bool { v.reset(OpAMD64MOVBloadidx1) v.AuxInt = c + d v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } break @@ -10897,9 +10558,7 @@ func rewriteValueAMD64_OpAMD64MOVBloadidx1(v *Value) bool { v.reset(OpAMD64MOVBloadidx1) v.AuxInt = c + d v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } break @@ -10923,8 +10582,7 @@ func rewriteValueAMD64_OpAMD64MOVBloadidx1(v *Value) bool { v.reset(OpAMD64MOVBload) v.AuxInt = i + c v.Aux = s - v.AddArg(p) - v.AddArg(mem) + v.AddArg2(p, mem) return true } break @@ -10956,9 +10614,7 @@ func rewriteValueAMD64_OpAMD64MOVBstore(v *Value) bool { v.reset(OpAMD64SETLstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(x) - v.AddArg(mem) + v.AddArg3(ptr, x, mem) return true } // match: (MOVBstore [off] {sym} ptr y:(SETLE x) mem) @@ -10980,9 +10636,7 @@ func rewriteValueAMD64_OpAMD64MOVBstore(v *Value) bool { v.reset(OpAMD64SETLEstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(x) - v.AddArg(mem) + v.AddArg3(ptr, x, mem) return true } // match: (MOVBstore [off] {sym} ptr y:(SETG x) mem) @@ -11004,9 +10658,7 @@ func rewriteValueAMD64_OpAMD64MOVBstore(v *Value) bool { v.reset(OpAMD64SETGstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(x) - v.AddArg(mem) + v.AddArg3(ptr, x, mem) return true } // match: (MOVBstore [off] {sym} ptr y:(SETGE x) mem) @@ -11028,9 +10680,7 @@ func rewriteValueAMD64_OpAMD64MOVBstore(v *Value) bool { v.reset(OpAMD64SETGEstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(x) - v.AddArg(mem) + v.AddArg3(ptr, x, mem) return true } // match: (MOVBstore [off] {sym} ptr y:(SETEQ x) mem) @@ -11052,9 +10702,7 @@ func rewriteValueAMD64_OpAMD64MOVBstore(v *Value) bool { v.reset(OpAMD64SETEQstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(x) - v.AddArg(mem) + v.AddArg3(ptr, x, mem) return true } // match: (MOVBstore [off] {sym} ptr y:(SETNE x) mem) @@ -11076,9 +10724,7 @@ func rewriteValueAMD64_OpAMD64MOVBstore(v *Value) bool { v.reset(OpAMD64SETNEstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(x) - v.AddArg(mem) + v.AddArg3(ptr, x, mem) return true } // match: (MOVBstore [off] {sym} ptr y:(SETB x) mem) @@ -11100,9 +10746,7 @@ func rewriteValueAMD64_OpAMD64MOVBstore(v *Value) bool { v.reset(OpAMD64SETBstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(x) - v.AddArg(mem) + v.AddArg3(ptr, x, mem) return true } // match: (MOVBstore [off] {sym} ptr y:(SETBE x) mem) @@ -11124,9 +10768,7 @@ func rewriteValueAMD64_OpAMD64MOVBstore(v *Value) bool { v.reset(OpAMD64SETBEstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(x) - v.AddArg(mem) + v.AddArg3(ptr, x, mem) return true } // match: (MOVBstore [off] {sym} ptr y:(SETA x) mem) @@ -11148,9 +10790,7 @@ func rewriteValueAMD64_OpAMD64MOVBstore(v *Value) bool { v.reset(OpAMD64SETAstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(x) - v.AddArg(mem) + v.AddArg3(ptr, x, mem) return true } // match: (MOVBstore [off] {sym} ptr y:(SETAE x) mem) @@ -11172,9 +10812,7 @@ func rewriteValueAMD64_OpAMD64MOVBstore(v *Value) bool { v.reset(OpAMD64SETAEstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(x) - v.AddArg(mem) + v.AddArg3(ptr, x, mem) return true } // match: (MOVBstore [off] {sym} ptr (MOVBQSX x) mem) @@ -11191,9 +10829,7 @@ func rewriteValueAMD64_OpAMD64MOVBstore(v *Value) bool { v.reset(OpAMD64MOVBstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(x) - v.AddArg(mem) + v.AddArg3(ptr, x, mem) return true } // match: (MOVBstore [off] {sym} ptr (MOVBQZX x) mem) @@ -11210,9 +10846,7 @@ func rewriteValueAMD64_OpAMD64MOVBstore(v *Value) bool { v.reset(OpAMD64MOVBstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(x) - v.AddArg(mem) + v.AddArg3(ptr, x, mem) return true } // match: (MOVBstore [off1] {sym} (ADDQconst [off2] ptr) val mem) @@ -11234,9 +10868,7 @@ func rewriteValueAMD64_OpAMD64MOVBstore(v *Value) bool { v.reset(OpAMD64MOVBstore) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } // match: (MOVBstore [off] {sym} ptr (MOVLconst [c]) mem) @@ -11257,8 +10889,7 @@ func rewriteValueAMD64_OpAMD64MOVBstore(v *Value) bool { v.reset(OpAMD64MOVBstoreconst) v.AuxInt = makeValAndOff(int64(int8(c)), off) v.Aux = sym - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVBstore [off] {sym} ptr (MOVQconst [c]) mem) @@ -11279,8 +10910,7 @@ func rewriteValueAMD64_OpAMD64MOVBstore(v *Value) bool { v.reset(OpAMD64MOVBstoreconst) v.AuxInt = makeValAndOff(int64(int8(c)), off) v.Aux = sym - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVBstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) @@ -11303,9 +10933,7 @@ func rewriteValueAMD64_OpAMD64MOVBstore(v *Value) bool { v.reset(OpAMD64MOVBstore) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(base) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(base, val, mem) return true } // match: (MOVBstore [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) val mem) @@ -11329,10 +10957,7 @@ func rewriteValueAMD64_OpAMD64MOVBstore(v *Value) bool { v.reset(OpAMD64MOVBstoreidx1) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) + v.AddArg4(ptr, idx, val, mem) return true } // match: (MOVBstore [off] {sym} (ADDQ ptr idx) val mem) @@ -11358,10 +10983,7 @@ func rewriteValueAMD64_OpAMD64MOVBstore(v *Value) bool { v.reset(OpAMD64MOVBstoreidx1) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) + v.AddArg4(ptr, idx, val, mem) return true } break @@ -11389,12 +11011,10 @@ func rewriteValueAMD64_OpAMD64MOVBstore(v *Value) bool { v.reset(OpAMD64MOVWstore) v.AuxInt = i - 1 v.Aux = s - v.AddArg(p) v0 := b.NewValue0(x0.Pos, OpAMD64ROLWconst, w.Type) v0.AuxInt = 8 v0.AddArg(w) - v.AddArg(v0) - v.AddArg(mem) + v.AddArg3(p, v0, mem) return true } // match: (MOVBstore [i] {s} p w x2:(MOVBstore [i-1] {s} p (SHRLconst [8] w) x1:(MOVBstore [i-2] {s} p (SHRLconst [16] w) x0:(MOVBstore [i-3] {s} p (SHRLconst [24] w) mem)))) @@ -11444,11 +11064,9 @@ func rewriteValueAMD64_OpAMD64MOVBstore(v *Value) bool { v.reset(OpAMD64MOVLstore) v.AuxInt = i - 3 v.Aux = s - v.AddArg(p) v0 := b.NewValue0(x0.Pos, OpAMD64BSWAPL, w.Type) v0.AddArg(w) - v.AddArg(v0) - v.AddArg(mem) + v.AddArg3(p, v0, mem) return true } // match: (MOVBstore [i] {s} p w x6:(MOVBstore [i-1] {s} p (SHRQconst [8] w) x5:(MOVBstore [i-2] {s} p (SHRQconst [16] w) x4:(MOVBstore [i-3] {s} p (SHRQconst [24] w) x3:(MOVBstore [i-4] {s} p (SHRQconst [32] w) x2:(MOVBstore [i-5] {s} p (SHRQconst [40] w) x1:(MOVBstore [i-6] {s} p (SHRQconst [48] w) x0:(MOVBstore [i-7] {s} p (SHRQconst [56] w) mem)))))))) @@ -11546,11 +11164,9 @@ func rewriteValueAMD64_OpAMD64MOVBstore(v *Value) bool { v.reset(OpAMD64MOVQstore) v.AuxInt = i - 7 v.Aux = s - v.AddArg(p) v0 := b.NewValue0(x0.Pos, OpAMD64BSWAPQ, w.Type) v0.AddArg(w) - v.AddArg(v0) - v.AddArg(mem) + v.AddArg3(p, v0, mem) return true } // match: (MOVBstore [i] {s} p (SHRWconst [8] w) x:(MOVBstore [i-1] {s} p w mem)) @@ -11575,9 +11191,7 @@ func rewriteValueAMD64_OpAMD64MOVBstore(v *Value) bool { v.reset(OpAMD64MOVWstore) v.AuxInt = i - 1 v.Aux = s - v.AddArg(p) - v.AddArg(w) - v.AddArg(mem) + v.AddArg3(p, w, mem) return true } // match: (MOVBstore [i] {s} p (SHRLconst [8] w) x:(MOVBstore [i-1] {s} p w mem)) @@ -11602,9 +11216,7 @@ func rewriteValueAMD64_OpAMD64MOVBstore(v *Value) bool { v.reset(OpAMD64MOVWstore) v.AuxInt = i - 1 v.Aux = s - v.AddArg(p) - v.AddArg(w) - v.AddArg(mem) + v.AddArg3(p, w, mem) return true } // match: (MOVBstore [i] {s} p (SHRQconst [8] w) x:(MOVBstore [i-1] {s} p w mem)) @@ -11629,9 +11241,7 @@ func rewriteValueAMD64_OpAMD64MOVBstore(v *Value) bool { v.reset(OpAMD64MOVWstore) v.AuxInt = i - 1 v.Aux = s - v.AddArg(p) - v.AddArg(w) - v.AddArg(mem) + v.AddArg3(p, w, mem) return true } // match: (MOVBstore [i] {s} p w x:(MOVBstore [i+1] {s} p (SHRWconst [8] w) mem)) @@ -11657,9 +11267,7 @@ func rewriteValueAMD64_OpAMD64MOVBstore(v *Value) bool { v.reset(OpAMD64MOVWstore) v.AuxInt = i v.Aux = s - v.AddArg(p) - v.AddArg(w) - v.AddArg(mem) + v.AddArg3(p, w, mem) return true } // match: (MOVBstore [i] {s} p w x:(MOVBstore [i+1] {s} p (SHRLconst [8] w) mem)) @@ -11685,9 +11293,7 @@ func rewriteValueAMD64_OpAMD64MOVBstore(v *Value) bool { v.reset(OpAMD64MOVWstore) v.AuxInt = i v.Aux = s - v.AddArg(p) - v.AddArg(w) - v.AddArg(mem) + v.AddArg3(p, w, mem) return true } // match: (MOVBstore [i] {s} p w x:(MOVBstore [i+1] {s} p (SHRQconst [8] w) mem)) @@ -11713,9 +11319,7 @@ func rewriteValueAMD64_OpAMD64MOVBstore(v *Value) bool { v.reset(OpAMD64MOVWstore) v.AuxInt = i v.Aux = s - v.AddArg(p) - v.AddArg(w) - v.AddArg(mem) + v.AddArg3(p, w, mem) return true } // match: (MOVBstore [i] {s} p (SHRLconst [j] w) x:(MOVBstore [i-1] {s} p w0:(SHRLconst [j-8] w) mem)) @@ -11745,9 +11349,7 @@ func rewriteValueAMD64_OpAMD64MOVBstore(v *Value) bool { v.reset(OpAMD64MOVWstore) v.AuxInt = i - 1 v.Aux = s - v.AddArg(p) - v.AddArg(w0) - v.AddArg(mem) + v.AddArg3(p, w0, mem) return true } // match: (MOVBstore [i] {s} p (SHRQconst [j] w) x:(MOVBstore [i-1] {s} p w0:(SHRQconst [j-8] w) mem)) @@ -11777,9 +11379,7 @@ func rewriteValueAMD64_OpAMD64MOVBstore(v *Value) bool { v.reset(OpAMD64MOVWstore) v.AuxInt = i - 1 v.Aux = s - v.AddArg(p) - v.AddArg(w0) - v.AddArg(mem) + v.AddArg3(p, w0, mem) return true } // match: (MOVBstore [i] {s} p x1:(MOVBload [j] {s2} p2 mem) mem2:(MOVBstore [i-1] {s} p x2:(MOVBload [j-1] {s2} p2 mem) mem)) @@ -11816,14 +11416,11 @@ func rewriteValueAMD64_OpAMD64MOVBstore(v *Value) bool { v.reset(OpAMD64MOVWstore) v.AuxInt = i - 1 v.Aux = s - v.AddArg(p) v0 := b.NewValue0(x2.Pos, OpAMD64MOVWload, typ.UInt16) v0.AuxInt = j - 1 v0.Aux = s2 - v0.AddArg(p2) - v0.AddArg(mem) - v.AddArg(v0) - v.AddArg(mem) + v0.AddArg2(p2, mem) + v.AddArg3(p, v0, mem) return true } // match: (MOVBstore [off1] {sym1} (LEAL [off2] {sym2} base) val mem) @@ -11846,9 +11443,7 @@ func rewriteValueAMD64_OpAMD64MOVBstore(v *Value) bool { v.reset(OpAMD64MOVBstore) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(base) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(base, val, mem) return true } // match: (MOVBstore [off1] {sym} (ADDLconst [off2] ptr) val mem) @@ -11870,9 +11465,7 @@ func rewriteValueAMD64_OpAMD64MOVBstore(v *Value) bool { v.reset(OpAMD64MOVBstore) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } return false @@ -11898,8 +11491,7 @@ func rewriteValueAMD64_OpAMD64MOVBstoreconst(v *Value) bool { v.reset(OpAMD64MOVBstoreconst) v.AuxInt = ValAndOff(sc).add(off) v.Aux = s - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVBstoreconst [sc] {sym1} (LEAQ [off] {sym2} ptr) mem) @@ -11921,8 +11513,7 @@ func rewriteValueAMD64_OpAMD64MOVBstoreconst(v *Value) bool { v.reset(OpAMD64MOVBstoreconst) v.AuxInt = ValAndOff(sc).add(off) v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVBstoreconst [x] {sym1} (LEAQ1 [off] {sym2} ptr idx) mem) @@ -11945,9 +11536,7 @@ func rewriteValueAMD64_OpAMD64MOVBstoreconst(v *Value) bool { v.reset(OpAMD64MOVBstoreconstidx1) v.AuxInt = ValAndOff(x).add(off) v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } // match: (MOVBstoreconst [x] {sym} (ADDQ ptr idx) mem) @@ -11964,9 +11553,7 @@ func rewriteValueAMD64_OpAMD64MOVBstoreconst(v *Value) bool { v.reset(OpAMD64MOVBstoreconstidx1) v.AuxInt = x v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } // match: (MOVBstoreconst [c] {s} p x:(MOVBstoreconst [a] {s} p mem)) @@ -11991,8 +11578,7 @@ func rewriteValueAMD64_OpAMD64MOVBstoreconst(v *Value) bool { v.reset(OpAMD64MOVWstoreconst) v.AuxInt = makeValAndOff(ValAndOff(a).Val()&0xff|ValAndOff(c).Val()<<8, ValAndOff(a).Off()) v.Aux = s - v.AddArg(p) - v.AddArg(mem) + v.AddArg2(p, mem) return true } // match: (MOVBstoreconst [a] {s} p x:(MOVBstoreconst [c] {s} p mem)) @@ -12017,8 +11603,7 @@ func rewriteValueAMD64_OpAMD64MOVBstoreconst(v *Value) bool { v.reset(OpAMD64MOVWstoreconst) v.AuxInt = makeValAndOff(ValAndOff(a).Val()&0xff|ValAndOff(c).Val()<<8, ValAndOff(a).Off()) v.Aux = s - v.AddArg(p) - v.AddArg(mem) + v.AddArg2(p, mem) return true } // match: (MOVBstoreconst [sc] {sym1} (LEAL [off] {sym2} ptr) mem) @@ -12040,8 +11625,7 @@ func rewriteValueAMD64_OpAMD64MOVBstoreconst(v *Value) bool { v.reset(OpAMD64MOVBstoreconst) v.AuxInt = ValAndOff(sc).add(off) v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVBstoreconst [sc] {s} (ADDLconst [off] ptr) mem) @@ -12062,8 +11646,7 @@ func rewriteValueAMD64_OpAMD64MOVBstoreconst(v *Value) bool { v.reset(OpAMD64MOVBstoreconst) v.AuxInt = ValAndOff(sc).add(off) v.Aux = s - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } return false @@ -12092,9 +11675,7 @@ func rewriteValueAMD64_OpAMD64MOVBstoreconstidx1(v *Value) bool { v.reset(OpAMD64MOVBstoreconstidx1) v.AuxInt = ValAndOff(x).add(c) v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } break @@ -12119,9 +11700,7 @@ func rewriteValueAMD64_OpAMD64MOVBstoreconstidx1(v *Value) bool { v.reset(OpAMD64MOVBstoreconstidx1) v.AuxInt = ValAndOff(x).add(c) v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } break @@ -12153,9 +11732,7 @@ func rewriteValueAMD64_OpAMD64MOVBstoreconstidx1(v *Value) bool { v.reset(OpAMD64MOVWstoreconstidx1) v.AuxInt = makeValAndOff(ValAndOff(a).Val()&0xff|ValAndOff(c).Val()<<8, ValAndOff(a).Off()) v.Aux = s - v.AddArg(p) - v.AddArg(i) - v.AddArg(mem) + v.AddArg3(p, i, mem) return true } } @@ -12190,10 +11767,7 @@ func rewriteValueAMD64_OpAMD64MOVBstoreidx1(v *Value) bool { v.reset(OpAMD64MOVBstoreidx1) v.AuxInt = c + d v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) + v.AddArg4(ptr, idx, val, mem) return true } break @@ -12219,10 +11793,7 @@ func rewriteValueAMD64_OpAMD64MOVBstoreidx1(v *Value) bool { v.reset(OpAMD64MOVBstoreidx1) v.AuxInt = c + d v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) + v.AddArg4(ptr, idx, val, mem) return true } break @@ -12255,13 +11826,10 @@ func rewriteValueAMD64_OpAMD64MOVBstoreidx1(v *Value) bool { v.reset(OpAMD64MOVWstoreidx1) v.AuxInt = i - 1 v.Aux = s - v.AddArg(p) - v.AddArg(idx) v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, w.Type) v0.AuxInt = 8 v0.AddArg(w) - v.AddArg(v0) - v.AddArg(mem) + v.AddArg4(p, idx, v0, mem) return true } } @@ -12325,12 +11893,9 @@ func rewriteValueAMD64_OpAMD64MOVBstoreidx1(v *Value) bool { v.reset(OpAMD64MOVLstoreidx1) v.AuxInt = i - 3 v.Aux = s - v.AddArg(p) - v.AddArg(idx) v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, w.Type) v0.AddArg(w) - v.AddArg(v0) - v.AddArg(mem) + v.AddArg4(p, idx, v0, mem) return true } } @@ -12456,12 +12021,9 @@ func rewriteValueAMD64_OpAMD64MOVBstoreidx1(v *Value) bool { v.reset(OpAMD64MOVQstoreidx1) v.AuxInt = i - 7 v.Aux = s - v.AddArg(p) - v.AddArg(idx) v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, w.Type) v0.AddArg(w) - v.AddArg(v0) - v.AddArg(mem) + v.AddArg4(p, idx, v0, mem) return true } } @@ -12500,10 +12062,7 @@ func rewriteValueAMD64_OpAMD64MOVBstoreidx1(v *Value) bool { v.reset(OpAMD64MOVWstoreidx1) v.AuxInt = i - 1 v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v.AddArg(w) - v.AddArg(mem) + v.AddArg4(p, idx, w, mem) return true } } @@ -12536,10 +12095,7 @@ func rewriteValueAMD64_OpAMD64MOVBstoreidx1(v *Value) bool { v.reset(OpAMD64MOVWstoreidx1) v.AuxInt = i - 1 v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v.AddArg(w) - v.AddArg(mem) + v.AddArg4(p, idx, w, mem) return true } } @@ -12572,10 +12128,7 @@ func rewriteValueAMD64_OpAMD64MOVBstoreidx1(v *Value) bool { v.reset(OpAMD64MOVWstoreidx1) v.AuxInt = i - 1 v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v.AddArg(w) - v.AddArg(mem) + v.AddArg4(p, idx, w, mem) return true } } @@ -12613,10 +12166,7 @@ func rewriteValueAMD64_OpAMD64MOVBstoreidx1(v *Value) bool { v.reset(OpAMD64MOVWstoreidx1) v.AuxInt = i - 1 v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v.AddArg(w0) - v.AddArg(mem) + v.AddArg4(p, idx, w0, mem) return true } } @@ -12654,10 +12204,7 @@ func rewriteValueAMD64_OpAMD64MOVBstoreidx1(v *Value) bool { v.reset(OpAMD64MOVWstoreidx1) v.AuxInt = i - 1 v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v.AddArg(w0) - v.AddArg(mem) + v.AddArg4(p, idx, w0, mem) return true } } @@ -12683,9 +12230,7 @@ func rewriteValueAMD64_OpAMD64MOVBstoreidx1(v *Value) bool { v.reset(OpAMD64MOVBstore) v.AuxInt = i + c v.Aux = s - v.AddArg(p) - v.AddArg(w) - v.AddArg(mem) + v.AddArg3(p, w, mem) return true } break @@ -12709,9 +12254,7 @@ func rewriteValueAMD64_OpAMD64MOVBstoreidx1(v *Value) bool { v.reset(OpAMD64MOVBstoreconstidx1) v.AuxInt = makeValAndOff(int64(int8(c)), off) v.Aux = s - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } return false @@ -12740,8 +12283,7 @@ func rewriteValueAMD64_OpAMD64MOVLQSX(v *Value) bool { v.AddArg(v0) v0.AuxInt = off v0.Aux = sym - v0.AddArg(ptr) - v0.AddArg(mem) + v0.AddArg2(ptr, mem) return true } // match: (MOVLQSX x:(MOVQload [off] {sym} ptr mem)) @@ -12765,8 +12307,7 @@ func rewriteValueAMD64_OpAMD64MOVLQSX(v *Value) bool { v.AddArg(v0) v0.AuxInt = off v0.Aux = sym - v0.AddArg(ptr) - v0.AddArg(mem) + v0.AddArg2(ptr, mem) return true } // match: (MOVLQSX (ANDLconst [c] x)) @@ -12865,8 +12406,7 @@ func rewriteValueAMD64_OpAMD64MOVLQSXload(v *Value) bool { v.reset(OpAMD64MOVLQSXload) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(base) - v.AddArg(mem) + v.AddArg2(base, mem) return true } return false @@ -12895,8 +12435,7 @@ func rewriteValueAMD64_OpAMD64MOVLQZX(v *Value) bool { v.AddArg(v0) v0.AuxInt = off v0.Aux = sym - v0.AddArg(ptr) - v0.AddArg(mem) + v0.AddArg2(ptr, mem) return true } // match: (MOVLQZX x:(MOVQload [off] {sym} ptr mem)) @@ -12920,8 +12459,7 @@ func rewriteValueAMD64_OpAMD64MOVLQZX(v *Value) bool { v.AddArg(v0) v0.AuxInt = off v0.Aux = sym - v0.AddArg(ptr) - v0.AddArg(mem) + v0.AddArg2(ptr, mem) return true } // match: (MOVLQZX x) @@ -12959,9 +12497,7 @@ func rewriteValueAMD64_OpAMD64MOVLQZX(v *Value) bool { v.AddArg(v0) v0.AuxInt = off v0.Aux = sym - v0.AddArg(ptr) - v0.AddArg(idx) - v0.AddArg(mem) + v0.AddArg3(ptr, idx, mem) return true } // match: (MOVLQZX x:(MOVLloadidx4 [off] {sym} ptr idx mem)) @@ -12986,9 +12522,7 @@ func rewriteValueAMD64_OpAMD64MOVLQZX(v *Value) bool { v.AddArg(v0) v0.AuxInt = off v0.Aux = sym - v0.AddArg(ptr) - v0.AddArg(idx) - v0.AddArg(mem) + v0.AddArg3(ptr, idx, mem) return true } // match: (MOVLQZX (ANDLconst [c] x)) @@ -13060,8 +12594,7 @@ func rewriteValueAMD64_OpAMD64MOVLatomicload(v *Value) bool { v.reset(OpAMD64MOVLatomicload) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVLatomicload [off1] {sym1} (LEAQ [off2] {sym2} ptr) mem) @@ -13083,8 +12616,7 @@ func rewriteValueAMD64_OpAMD64MOVLatomicload(v *Value) bool { v.reset(OpAMD64MOVLatomicload) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } return false @@ -13188,8 +12720,7 @@ func rewriteValueAMD64_OpAMD64MOVLload(v *Value) bool { v.reset(OpAMD64MOVLload) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVLload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) @@ -13211,8 +12742,7 @@ func rewriteValueAMD64_OpAMD64MOVLload(v *Value) bool { v.reset(OpAMD64MOVLload) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(base) - v.AddArg(mem) + v.AddArg2(base, mem) return true } // match: (MOVLload [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) mem) @@ -13235,9 +12765,7 @@ func rewriteValueAMD64_OpAMD64MOVLload(v *Value) bool { v.reset(OpAMD64MOVLloadidx1) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } // match: (MOVLload [off1] {sym1} (LEAQ4 [off2] {sym2} ptr idx) mem) @@ -13260,9 +12788,7 @@ func rewriteValueAMD64_OpAMD64MOVLload(v *Value) bool { v.reset(OpAMD64MOVLloadidx4) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } // match: (MOVLload [off1] {sym1} (LEAQ8 [off2] {sym2} ptr idx) mem) @@ -13285,9 +12811,7 @@ func rewriteValueAMD64_OpAMD64MOVLload(v *Value) bool { v.reset(OpAMD64MOVLloadidx8) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } // match: (MOVLload [off] {sym} (ADDQ ptr idx) mem) @@ -13312,9 +12836,7 @@ func rewriteValueAMD64_OpAMD64MOVLload(v *Value) bool { v.reset(OpAMD64MOVLloadidx1) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } break @@ -13338,8 +12860,7 @@ func rewriteValueAMD64_OpAMD64MOVLload(v *Value) bool { v.reset(OpAMD64MOVLload) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(base) - v.AddArg(mem) + v.AddArg2(base, mem) return true } // match: (MOVLload [off1] {sym} (ADDLconst [off2] ptr) mem) @@ -13360,8 +12881,7 @@ func rewriteValueAMD64_OpAMD64MOVLload(v *Value) bool { v.reset(OpAMD64MOVLload) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVLload [off] {sym} ptr (MOVSSstore [off] {sym} ptr val _)) @@ -13416,9 +12936,7 @@ func rewriteValueAMD64_OpAMD64MOVLloadidx1(v *Value) bool { v.reset(OpAMD64MOVLloadidx4) v.AuxInt = c v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } break @@ -13438,9 +12956,7 @@ func rewriteValueAMD64_OpAMD64MOVLloadidx1(v *Value) bool { v.reset(OpAMD64MOVLloadidx8) v.AuxInt = c v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } break @@ -13465,9 +12981,7 @@ func rewriteValueAMD64_OpAMD64MOVLloadidx1(v *Value) bool { v.reset(OpAMD64MOVLloadidx1) v.AuxInt = c + d v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } break @@ -13492,9 +13006,7 @@ func rewriteValueAMD64_OpAMD64MOVLloadidx1(v *Value) bool { v.reset(OpAMD64MOVLloadidx1) v.AuxInt = c + d v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } break @@ -13518,8 +13030,7 @@ func rewriteValueAMD64_OpAMD64MOVLloadidx1(v *Value) bool { v.reset(OpAMD64MOVLload) v.AuxInt = i + c v.Aux = s - v.AddArg(p) - v.AddArg(mem) + v.AddArg2(p, mem) return true } break @@ -13549,9 +13060,7 @@ func rewriteValueAMD64_OpAMD64MOVLloadidx4(v *Value) bool { v.reset(OpAMD64MOVLloadidx4) v.AuxInt = c + d v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } // match: (MOVLloadidx4 [c] {sym} ptr (ADDQconst [d] idx) mem) @@ -13573,9 +13082,7 @@ func rewriteValueAMD64_OpAMD64MOVLloadidx4(v *Value) bool { v.reset(OpAMD64MOVLloadidx4) v.AuxInt = c + 4*d v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } // match: (MOVLloadidx4 [i] {s} p (MOVQconst [c]) mem) @@ -13596,8 +13103,7 @@ func rewriteValueAMD64_OpAMD64MOVLloadidx4(v *Value) bool { v.reset(OpAMD64MOVLload) v.AuxInt = i + 4*c v.Aux = s - v.AddArg(p) - v.AddArg(mem) + v.AddArg2(p, mem) return true } return false @@ -13625,9 +13131,7 @@ func rewriteValueAMD64_OpAMD64MOVLloadidx8(v *Value) bool { v.reset(OpAMD64MOVLloadidx8) v.AuxInt = c + d v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } // match: (MOVLloadidx8 [c] {sym} ptr (ADDQconst [d] idx) mem) @@ -13649,9 +13153,7 @@ func rewriteValueAMD64_OpAMD64MOVLloadidx8(v *Value) bool { v.reset(OpAMD64MOVLloadidx8) v.AuxInt = c + 8*d v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } // match: (MOVLloadidx8 [i] {s} p (MOVQconst [c]) mem) @@ -13672,8 +13174,7 @@ func rewriteValueAMD64_OpAMD64MOVLloadidx8(v *Value) bool { v.reset(OpAMD64MOVLload) v.AuxInt = i + 8*c v.Aux = s - v.AddArg(p) - v.AddArg(mem) + v.AddArg2(p, mem) return true } return false @@ -13698,9 +13199,7 @@ func rewriteValueAMD64_OpAMD64MOVLstore(v *Value) bool { v.reset(OpAMD64MOVLstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(x) - v.AddArg(mem) + v.AddArg3(ptr, x, mem) return true } // match: (MOVLstore [off] {sym} ptr (MOVLQZX x) mem) @@ -13717,9 +13216,7 @@ func rewriteValueAMD64_OpAMD64MOVLstore(v *Value) bool { v.reset(OpAMD64MOVLstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(x) - v.AddArg(mem) + v.AddArg3(ptr, x, mem) return true } // match: (MOVLstore [off1] {sym} (ADDQconst [off2] ptr) val mem) @@ -13741,9 +13238,7 @@ func rewriteValueAMD64_OpAMD64MOVLstore(v *Value) bool { v.reset(OpAMD64MOVLstore) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } // match: (MOVLstore [off] {sym} ptr (MOVLconst [c]) mem) @@ -13764,8 +13259,7 @@ func rewriteValueAMD64_OpAMD64MOVLstore(v *Value) bool { v.reset(OpAMD64MOVLstoreconst) v.AuxInt = makeValAndOff(int64(int32(c)), off) v.Aux = sym - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVLstore [off] {sym} ptr (MOVQconst [c]) mem) @@ -13786,8 +13280,7 @@ func rewriteValueAMD64_OpAMD64MOVLstore(v *Value) bool { v.reset(OpAMD64MOVLstoreconst) v.AuxInt = makeValAndOff(int64(int32(c)), off) v.Aux = sym - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVLstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) @@ -13810,9 +13303,7 @@ func rewriteValueAMD64_OpAMD64MOVLstore(v *Value) bool { v.reset(OpAMD64MOVLstore) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(base) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(base, val, mem) return true } // match: (MOVLstore [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) val mem) @@ -13836,10 +13327,7 @@ func rewriteValueAMD64_OpAMD64MOVLstore(v *Value) bool { v.reset(OpAMD64MOVLstoreidx1) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) + v.AddArg4(ptr, idx, val, mem) return true } // match: (MOVLstore [off1] {sym1} (LEAQ4 [off2] {sym2} ptr idx) val mem) @@ -13863,10 +13351,7 @@ func rewriteValueAMD64_OpAMD64MOVLstore(v *Value) bool { v.reset(OpAMD64MOVLstoreidx4) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) + v.AddArg4(ptr, idx, val, mem) return true } // match: (MOVLstore [off1] {sym1} (LEAQ8 [off2] {sym2} ptr idx) val mem) @@ -13890,10 +13375,7 @@ func rewriteValueAMD64_OpAMD64MOVLstore(v *Value) bool { v.reset(OpAMD64MOVLstoreidx8) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) + v.AddArg4(ptr, idx, val, mem) return true } // match: (MOVLstore [off] {sym} (ADDQ ptr idx) val mem) @@ -13919,10 +13401,7 @@ func rewriteValueAMD64_OpAMD64MOVLstore(v *Value) bool { v.reset(OpAMD64MOVLstoreidx1) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) + v.AddArg4(ptr, idx, val, mem) return true } break @@ -13949,9 +13428,7 @@ func rewriteValueAMD64_OpAMD64MOVLstore(v *Value) bool { v.reset(OpAMD64MOVQstore) v.AuxInt = i - 4 v.Aux = s - v.AddArg(p) - v.AddArg(w) - v.AddArg(mem) + v.AddArg3(p, w, mem) return true } // match: (MOVLstore [i] {s} p (SHRQconst [j] w) x:(MOVLstore [i-4] {s} p w0:(SHRQconst [j-32] w) mem)) @@ -13981,9 +13458,7 @@ func rewriteValueAMD64_OpAMD64MOVLstore(v *Value) bool { v.reset(OpAMD64MOVQstore) v.AuxInt = i - 4 v.Aux = s - v.AddArg(p) - v.AddArg(w0) - v.AddArg(mem) + v.AddArg3(p, w0, mem) return true } // match: (MOVLstore [i] {s} p x1:(MOVLload [j] {s2} p2 mem) mem2:(MOVLstore [i-4] {s} p x2:(MOVLload [j-4] {s2} p2 mem) mem)) @@ -14020,14 +13495,11 @@ func rewriteValueAMD64_OpAMD64MOVLstore(v *Value) bool { v.reset(OpAMD64MOVQstore) v.AuxInt = i - 4 v.Aux = s - v.AddArg(p) v0 := b.NewValue0(x2.Pos, OpAMD64MOVQload, typ.UInt64) v0.AuxInt = j - 4 v0.Aux = s2 - v0.AddArg(p2) - v0.AddArg(mem) - v.AddArg(v0) - v.AddArg(mem) + v0.AddArg2(p2, mem) + v.AddArg3(p, v0, mem) return true } // match: (MOVLstore [off1] {sym1} (LEAL [off2] {sym2} base) val mem) @@ -14050,9 +13522,7 @@ func rewriteValueAMD64_OpAMD64MOVLstore(v *Value) bool { v.reset(OpAMD64MOVLstore) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(base) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(base, val, mem) return true } // match: (MOVLstore [off1] {sym} (ADDLconst [off2] ptr) val mem) @@ -14074,9 +13544,7 @@ func rewriteValueAMD64_OpAMD64MOVLstore(v *Value) bool { v.reset(OpAMD64MOVLstore) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } // match: (MOVLstore {sym} [off] ptr y:(ADDLload x [off] {sym} ptr mem) mem) @@ -14098,9 +13566,7 @@ func rewriteValueAMD64_OpAMD64MOVLstore(v *Value) bool { v.reset(OpAMD64ADDLmodify) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(x) - v.AddArg(mem) + v.AddArg3(ptr, x, mem) return true } // match: (MOVLstore {sym} [off] ptr y:(ANDLload x [off] {sym} ptr mem) mem) @@ -14122,9 +13588,7 @@ func rewriteValueAMD64_OpAMD64MOVLstore(v *Value) bool { v.reset(OpAMD64ANDLmodify) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(x) - v.AddArg(mem) + v.AddArg3(ptr, x, mem) return true } // match: (MOVLstore {sym} [off] ptr y:(ORLload x [off] {sym} ptr mem) mem) @@ -14146,9 +13610,7 @@ func rewriteValueAMD64_OpAMD64MOVLstore(v *Value) bool { v.reset(OpAMD64ORLmodify) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(x) - v.AddArg(mem) + v.AddArg3(ptr, x, mem) return true } // match: (MOVLstore {sym} [off] ptr y:(XORLload x [off] {sym} ptr mem) mem) @@ -14170,9 +13632,7 @@ func rewriteValueAMD64_OpAMD64MOVLstore(v *Value) bool { v.reset(OpAMD64XORLmodify) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(x) - v.AddArg(mem) + v.AddArg3(ptr, x, mem) return true } // match: (MOVLstore {sym} [off] ptr y:(ADDL l:(MOVLload [off] {sym} ptr mem) x) mem) @@ -14205,9 +13665,7 @@ func rewriteValueAMD64_OpAMD64MOVLstore(v *Value) bool { v.reset(OpAMD64ADDLmodify) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(x) - v.AddArg(mem) + v.AddArg3(ptr, x, mem) return true } break @@ -14235,9 +13693,7 @@ func rewriteValueAMD64_OpAMD64MOVLstore(v *Value) bool { v.reset(OpAMD64SUBLmodify) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(x) - v.AddArg(mem) + v.AddArg3(ptr, x, mem) return true } // match: (MOVLstore {sym} [off] ptr y:(ANDL l:(MOVLload [off] {sym} ptr mem) x) mem) @@ -14270,9 +13726,7 @@ func rewriteValueAMD64_OpAMD64MOVLstore(v *Value) bool { v.reset(OpAMD64ANDLmodify) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(x) - v.AddArg(mem) + v.AddArg3(ptr, x, mem) return true } break @@ -14307,9 +13761,7 @@ func rewriteValueAMD64_OpAMD64MOVLstore(v *Value) bool { v.reset(OpAMD64ORLmodify) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(x) - v.AddArg(mem) + v.AddArg3(ptr, x, mem) return true } break @@ -14344,9 +13796,7 @@ func rewriteValueAMD64_OpAMD64MOVLstore(v *Value) bool { v.reset(OpAMD64XORLmodify) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(x) - v.AddArg(mem) + v.AddArg3(ptr, x, mem) return true } break @@ -14374,9 +13824,7 @@ func rewriteValueAMD64_OpAMD64MOVLstore(v *Value) bool { v.reset(OpAMD64BTCLmodify) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(x) - v.AddArg(mem) + v.AddArg3(ptr, x, mem) return true } // match: (MOVLstore {sym} [off] ptr y:(BTRL l:(MOVLload [off] {sym} ptr mem) x) mem) @@ -14402,9 +13850,7 @@ func rewriteValueAMD64_OpAMD64MOVLstore(v *Value) bool { v.reset(OpAMD64BTRLmodify) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(x) - v.AddArg(mem) + v.AddArg3(ptr, x, mem) return true } // match: (MOVLstore {sym} [off] ptr y:(BTSL l:(MOVLload [off] {sym} ptr mem) x) mem) @@ -14430,9 +13876,7 @@ func rewriteValueAMD64_OpAMD64MOVLstore(v *Value) bool { v.reset(OpAMD64BTSLmodify) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(x) - v.AddArg(mem) + v.AddArg3(ptr, x, mem) return true } // match: (MOVLstore [off] {sym} ptr a:(ADDLconst [c] l:(MOVLload [off] {sym} ptr2 mem)) mem) @@ -14459,8 +13903,7 @@ func rewriteValueAMD64_OpAMD64MOVLstore(v *Value) bool { v.reset(OpAMD64ADDLconstmodify) v.AuxInt = makeValAndOff(c, off) v.Aux = sym - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVLstore [off] {sym} ptr a:(ANDLconst [c] l:(MOVLload [off] {sym} ptr2 mem)) mem) @@ -14487,8 +13930,7 @@ func rewriteValueAMD64_OpAMD64MOVLstore(v *Value) bool { v.reset(OpAMD64ANDLconstmodify) v.AuxInt = makeValAndOff(c, off) v.Aux = sym - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVLstore [off] {sym} ptr a:(ORLconst [c] l:(MOVLload [off] {sym} ptr2 mem)) mem) @@ -14515,8 +13957,7 @@ func rewriteValueAMD64_OpAMD64MOVLstore(v *Value) bool { v.reset(OpAMD64ORLconstmodify) v.AuxInt = makeValAndOff(c, off) v.Aux = sym - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVLstore [off] {sym} ptr a:(XORLconst [c] l:(MOVLload [off] {sym} ptr2 mem)) mem) @@ -14543,8 +13984,7 @@ func rewriteValueAMD64_OpAMD64MOVLstore(v *Value) bool { v.reset(OpAMD64XORLconstmodify) v.AuxInt = makeValAndOff(c, off) v.Aux = sym - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVLstore [off] {sym} ptr a:(BTCLconst [c] l:(MOVLload [off] {sym} ptr2 mem)) mem) @@ -14571,8 +14011,7 @@ func rewriteValueAMD64_OpAMD64MOVLstore(v *Value) bool { v.reset(OpAMD64BTCLconstmodify) v.AuxInt = makeValAndOff(c, off) v.Aux = sym - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVLstore [off] {sym} ptr a:(BTRLconst [c] l:(MOVLload [off] {sym} ptr2 mem)) mem) @@ -14599,8 +14038,7 @@ func rewriteValueAMD64_OpAMD64MOVLstore(v *Value) bool { v.reset(OpAMD64BTRLconstmodify) v.AuxInt = makeValAndOff(c, off) v.Aux = sym - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVLstore [off] {sym} ptr a:(BTSLconst [c] l:(MOVLload [off] {sym} ptr2 mem)) mem) @@ -14627,8 +14065,7 @@ func rewriteValueAMD64_OpAMD64MOVLstore(v *Value) bool { v.reset(OpAMD64BTSLconstmodify) v.AuxInt = makeValAndOff(c, off) v.Aux = sym - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVLstore [off] {sym} ptr (MOVLf2i val) mem) @@ -14645,9 +14082,7 @@ func rewriteValueAMD64_OpAMD64MOVLstore(v *Value) bool { v.reset(OpAMD64MOVSSstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } return false @@ -14675,8 +14110,7 @@ func rewriteValueAMD64_OpAMD64MOVLstoreconst(v *Value) bool { v.reset(OpAMD64MOVLstoreconst) v.AuxInt = ValAndOff(sc).add(off) v.Aux = s - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVLstoreconst [sc] {sym1} (LEAQ [off] {sym2} ptr) mem) @@ -14698,8 +14132,7 @@ func rewriteValueAMD64_OpAMD64MOVLstoreconst(v *Value) bool { v.reset(OpAMD64MOVLstoreconst) v.AuxInt = ValAndOff(sc).add(off) v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVLstoreconst [x] {sym1} (LEAQ1 [off] {sym2} ptr idx) mem) @@ -14722,9 +14155,7 @@ func rewriteValueAMD64_OpAMD64MOVLstoreconst(v *Value) bool { v.reset(OpAMD64MOVLstoreconstidx1) v.AuxInt = ValAndOff(x).add(off) v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } // match: (MOVLstoreconst [x] {sym1} (LEAQ4 [off] {sym2} ptr idx) mem) @@ -14747,9 +14178,7 @@ func rewriteValueAMD64_OpAMD64MOVLstoreconst(v *Value) bool { v.reset(OpAMD64MOVLstoreconstidx4) v.AuxInt = ValAndOff(x).add(off) v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } // match: (MOVLstoreconst [x] {sym} (ADDQ ptr idx) mem) @@ -14766,9 +14195,7 @@ func rewriteValueAMD64_OpAMD64MOVLstoreconst(v *Value) bool { v.reset(OpAMD64MOVLstoreconstidx1) v.AuxInt = x v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } // match: (MOVLstoreconst [c] {s} p x:(MOVLstoreconst [a] {s} p mem)) @@ -14793,11 +14220,9 @@ func rewriteValueAMD64_OpAMD64MOVLstoreconst(v *Value) bool { v.reset(OpAMD64MOVQstore) v.AuxInt = ValAndOff(a).Off() v.Aux = s - v.AddArg(p) v0 := b.NewValue0(x.Pos, OpAMD64MOVQconst, typ.UInt64) v0.AuxInt = ValAndOff(a).Val()&0xffffffff | ValAndOff(c).Val()<<32 - v.AddArg(v0) - v.AddArg(mem) + v.AddArg3(p, v0, mem) return true } // match: (MOVLstoreconst [a] {s} p x:(MOVLstoreconst [c] {s} p mem)) @@ -14822,11 +14247,9 @@ func rewriteValueAMD64_OpAMD64MOVLstoreconst(v *Value) bool { v.reset(OpAMD64MOVQstore) v.AuxInt = ValAndOff(a).Off() v.Aux = s - v.AddArg(p) v0 := b.NewValue0(x.Pos, OpAMD64MOVQconst, typ.UInt64) v0.AuxInt = ValAndOff(a).Val()&0xffffffff | ValAndOff(c).Val()<<32 - v.AddArg(v0) - v.AddArg(mem) + v.AddArg3(p, v0, mem) return true } // match: (MOVLstoreconst [sc] {sym1} (LEAL [off] {sym2} ptr) mem) @@ -14848,8 +14271,7 @@ func rewriteValueAMD64_OpAMD64MOVLstoreconst(v *Value) bool { v.reset(OpAMD64MOVLstoreconst) v.AuxInt = ValAndOff(sc).add(off) v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVLstoreconst [sc] {s} (ADDLconst [off] ptr) mem) @@ -14870,8 +14292,7 @@ func rewriteValueAMD64_OpAMD64MOVLstoreconst(v *Value) bool { v.reset(OpAMD64MOVLstoreconst) v.AuxInt = ValAndOff(sc).add(off) v.Aux = s - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } return false @@ -14897,9 +14318,7 @@ func rewriteValueAMD64_OpAMD64MOVLstoreconstidx1(v *Value) bool { v.reset(OpAMD64MOVLstoreconstidx4) v.AuxInt = c v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } break @@ -14924,9 +14343,7 @@ func rewriteValueAMD64_OpAMD64MOVLstoreconstidx1(v *Value) bool { v.reset(OpAMD64MOVLstoreconstidx1) v.AuxInt = ValAndOff(x).add(c) v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } break @@ -14951,9 +14368,7 @@ func rewriteValueAMD64_OpAMD64MOVLstoreconstidx1(v *Value) bool { v.reset(OpAMD64MOVLstoreconstidx1) v.AuxInt = ValAndOff(x).add(c) v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } break @@ -14985,12 +14400,9 @@ func rewriteValueAMD64_OpAMD64MOVLstoreconstidx1(v *Value) bool { v.reset(OpAMD64MOVQstoreidx1) v.AuxInt = ValAndOff(a).Off() v.Aux = s - v.AddArg(p) - v.AddArg(i) v0 := b.NewValue0(v.Pos, OpAMD64MOVQconst, typ.UInt64) v0.AuxInt = ValAndOff(a).Val()&0xffffffff | ValAndOff(c).Val()<<32 - v.AddArg(v0) - v.AddArg(mem) + v.AddArg4(p, i, v0, mem) return true } } @@ -15023,9 +14435,7 @@ func rewriteValueAMD64_OpAMD64MOVLstoreconstidx4(v *Value) bool { v.reset(OpAMD64MOVLstoreconstidx4) v.AuxInt = ValAndOff(x).add(c) v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } // match: (MOVLstoreconstidx4 [x] {sym} ptr (ADDQconst [c] idx) mem) @@ -15047,9 +14457,7 @@ func rewriteValueAMD64_OpAMD64MOVLstoreconstidx4(v *Value) bool { v.reset(OpAMD64MOVLstoreconstidx4) v.AuxInt = ValAndOff(x).add(4 * c) v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } // match: (MOVLstoreconstidx4 [c] {s} p i x:(MOVLstoreconstidx4 [a] {s} p i mem)) @@ -15075,15 +14483,12 @@ func rewriteValueAMD64_OpAMD64MOVLstoreconstidx4(v *Value) bool { v.reset(OpAMD64MOVQstoreidx1) v.AuxInt = ValAndOff(a).Off() v.Aux = s - v.AddArg(p) v0 := b.NewValue0(v.Pos, OpAMD64SHLQconst, i.Type) v0.AuxInt = 2 v0.AddArg(i) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpAMD64MOVQconst, typ.UInt64) v1.AuxInt = ValAndOff(a).Val()&0xffffffff | ValAndOff(c).Val()<<32 - v.AddArg(v1) - v.AddArg(mem) + v.AddArg4(p, v0, v1, mem) return true } return false @@ -15109,10 +14514,7 @@ func rewriteValueAMD64_OpAMD64MOVLstoreidx1(v *Value) bool { v.reset(OpAMD64MOVLstoreidx4) v.AuxInt = c v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) + v.AddArg4(ptr, idx, val, mem) return true } break @@ -15133,10 +14535,7 @@ func rewriteValueAMD64_OpAMD64MOVLstoreidx1(v *Value) bool { v.reset(OpAMD64MOVLstoreidx8) v.AuxInt = c v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) + v.AddArg4(ptr, idx, val, mem) return true } break @@ -15162,10 +14561,7 @@ func rewriteValueAMD64_OpAMD64MOVLstoreidx1(v *Value) bool { v.reset(OpAMD64MOVLstoreidx1) v.AuxInt = c + d v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) + v.AddArg4(ptr, idx, val, mem) return true } break @@ -15191,10 +14587,7 @@ func rewriteValueAMD64_OpAMD64MOVLstoreidx1(v *Value) bool { v.reset(OpAMD64MOVLstoreidx1) v.AuxInt = c + d v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) + v.AddArg4(ptr, idx, val, mem) return true } break @@ -15226,10 +14619,7 @@ func rewriteValueAMD64_OpAMD64MOVLstoreidx1(v *Value) bool { v.reset(OpAMD64MOVQstoreidx1) v.AuxInt = i - 4 v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v.AddArg(w) - v.AddArg(mem) + v.AddArg4(p, idx, w, mem) return true } } @@ -15267,10 +14657,7 @@ func rewriteValueAMD64_OpAMD64MOVLstoreidx1(v *Value) bool { v.reset(OpAMD64MOVQstoreidx1) v.AuxInt = i - 4 v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v.AddArg(w0) - v.AddArg(mem) + v.AddArg4(p, idx, w0, mem) return true } } @@ -15296,9 +14683,7 @@ func rewriteValueAMD64_OpAMD64MOVLstoreidx1(v *Value) bool { v.reset(OpAMD64MOVLstore) v.AuxInt = i + c v.Aux = s - v.AddArg(p) - v.AddArg(w) - v.AddArg(mem) + v.AddArg3(p, w, mem) return true } break @@ -15322,9 +14707,7 @@ func rewriteValueAMD64_OpAMD64MOVLstoreidx1(v *Value) bool { v.reset(OpAMD64MOVLstoreconstidx1) v.AuxInt = makeValAndOff(int64(int32(c)), off) v.Aux = s - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } return false @@ -15355,10 +14738,7 @@ func rewriteValueAMD64_OpAMD64MOVLstoreidx4(v *Value) bool { v.reset(OpAMD64MOVLstoreidx4) v.AuxInt = c + d v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) + v.AddArg4(ptr, idx, val, mem) return true } // match: (MOVLstoreidx4 [c] {sym} ptr (ADDQconst [d] idx) val mem) @@ -15381,10 +14761,7 @@ func rewriteValueAMD64_OpAMD64MOVLstoreidx4(v *Value) bool { v.reset(OpAMD64MOVLstoreidx4) v.AuxInt = c + 4*d v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) + v.AddArg4(ptr, idx, val, mem) return true } // match: (MOVLstoreidx4 [i] {s} p idx (SHRQconst [32] w) x:(MOVLstoreidx4 [i-4] {s} p idx w mem)) @@ -15410,13 +14787,10 @@ func rewriteValueAMD64_OpAMD64MOVLstoreidx4(v *Value) bool { v.reset(OpAMD64MOVQstoreidx1) v.AuxInt = i - 4 v.Aux = s - v.AddArg(p) v0 := b.NewValue0(v.Pos, OpAMD64SHLQconst, idx.Type) v0.AuxInt = 2 v0.AddArg(idx) - v.AddArg(v0) - v.AddArg(w) - v.AddArg(mem) + v.AddArg4(p, v0, w, mem) return true } // match: (MOVLstoreidx4 [i] {s} p idx (SHRQconst [j] w) x:(MOVLstoreidx4 [i-4] {s} p idx w0:(SHRQconst [j-32] w) mem)) @@ -15447,13 +14821,10 @@ func rewriteValueAMD64_OpAMD64MOVLstoreidx4(v *Value) bool { v.reset(OpAMD64MOVQstoreidx1) v.AuxInt = i - 4 v.Aux = s - v.AddArg(p) v0 := b.NewValue0(v.Pos, OpAMD64SHLQconst, idx.Type) v0.AuxInt = 2 v0.AddArg(idx) - v.AddArg(v0) - v.AddArg(w0) - v.AddArg(mem) + v.AddArg4(p, v0, w0, mem) return true } // match: (MOVLstoreidx4 [i] {s} p (MOVQconst [c]) w mem) @@ -15475,9 +14846,7 @@ func rewriteValueAMD64_OpAMD64MOVLstoreidx4(v *Value) bool { v.reset(OpAMD64MOVLstore) v.AuxInt = i + 4*c v.Aux = s - v.AddArg(p) - v.AddArg(w) - v.AddArg(mem) + v.AddArg3(p, w, mem) return true } // match: (MOVLstoreidx4 [off] {s} ptr idx (MOVQconst [c]) mem) @@ -15499,9 +14868,7 @@ func rewriteValueAMD64_OpAMD64MOVLstoreidx4(v *Value) bool { v.reset(OpAMD64MOVLstoreconstidx4) v.AuxInt = makeValAndOff(int64(int32(c)), off) v.Aux = s - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } return false @@ -15531,10 +14898,7 @@ func rewriteValueAMD64_OpAMD64MOVLstoreidx8(v *Value) bool { v.reset(OpAMD64MOVLstoreidx8) v.AuxInt = c + d v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) + v.AddArg4(ptr, idx, val, mem) return true } // match: (MOVLstoreidx8 [c] {sym} ptr (ADDQconst [d] idx) val mem) @@ -15557,10 +14921,7 @@ func rewriteValueAMD64_OpAMD64MOVLstoreidx8(v *Value) bool { v.reset(OpAMD64MOVLstoreidx8) v.AuxInt = c + 8*d v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) + v.AddArg4(ptr, idx, val, mem) return true } // match: (MOVLstoreidx8 [i] {s} p (MOVQconst [c]) w mem) @@ -15582,9 +14943,7 @@ func rewriteValueAMD64_OpAMD64MOVLstoreidx8(v *Value) bool { v.reset(OpAMD64MOVLstore) v.AuxInt = i + 8*c v.Aux = s - v.AddArg(p) - v.AddArg(w) - v.AddArg(mem) + v.AddArg3(p, w, mem) return true } return false @@ -15610,8 +14969,7 @@ func rewriteValueAMD64_OpAMD64MOVOload(v *Value) bool { v.reset(OpAMD64MOVOload) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVOload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) @@ -15633,8 +14991,7 @@ func rewriteValueAMD64_OpAMD64MOVOload(v *Value) bool { v.reset(OpAMD64MOVOload) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(base) - v.AddArg(mem) + v.AddArg2(base, mem) return true } return false @@ -15665,9 +15022,7 @@ func rewriteValueAMD64_OpAMD64MOVOstore(v *Value) bool { v.reset(OpAMD64MOVOstore) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } // match: (MOVOstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) @@ -15690,9 +15045,7 @@ func rewriteValueAMD64_OpAMD64MOVOstore(v *Value) bool { v.reset(OpAMD64MOVOstore) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(base) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(base, val, mem) return true } // match: (MOVOstore [dstOff] {dstSym} ptr (MOVOload [srcOff] {srcSym} (SB) _) mem) @@ -15719,19 +15072,15 @@ func rewriteValueAMD64_OpAMD64MOVOstore(v *Value) bool { v.reset(OpAMD64MOVQstore) v.AuxInt = dstOff + 8 v.Aux = dstSym - v.AddArg(ptr) v0 := b.NewValue0(v_1.Pos, OpAMD64MOVQconst, typ.UInt64) v0.AuxInt = int64(read64(srcSym, srcOff+8, config.ctxt.Arch.ByteOrder)) - v.AddArg(v0) v1 := b.NewValue0(v_1.Pos, OpAMD64MOVQstore, types.TypeMem) v1.AuxInt = dstOff v1.Aux = dstSym - v1.AddArg(ptr) v2 := b.NewValue0(v_1.Pos, OpAMD64MOVQconst, typ.UInt64) v2.AuxInt = int64(read64(srcSym, srcOff, config.ctxt.Arch.ByteOrder)) - v1.AddArg(v2) - v1.AddArg(mem) - v.AddArg(v1) + v1.AddArg3(ptr, v2, mem) + v.AddArg3(ptr, v0, v1) return true } return false @@ -15757,8 +15106,7 @@ func rewriteValueAMD64_OpAMD64MOVQatomicload(v *Value) bool { v.reset(OpAMD64MOVQatomicload) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVQatomicload [off1] {sym1} (LEAQ [off2] {sym2} ptr) mem) @@ -15780,8 +15128,7 @@ func rewriteValueAMD64_OpAMD64MOVQatomicload(v *Value) bool { v.reset(OpAMD64MOVQatomicload) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } return false @@ -15886,8 +15233,7 @@ func rewriteValueAMD64_OpAMD64MOVQload(v *Value) bool { v.reset(OpAMD64MOVQload) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVQload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) @@ -15909,8 +15255,7 @@ func rewriteValueAMD64_OpAMD64MOVQload(v *Value) bool { v.reset(OpAMD64MOVQload) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(base) - v.AddArg(mem) + v.AddArg2(base, mem) return true } // match: (MOVQload [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) mem) @@ -15933,9 +15278,7 @@ func rewriteValueAMD64_OpAMD64MOVQload(v *Value) bool { v.reset(OpAMD64MOVQloadidx1) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } // match: (MOVQload [off1] {sym1} (LEAQ8 [off2] {sym2} ptr idx) mem) @@ -15958,9 +15301,7 @@ func rewriteValueAMD64_OpAMD64MOVQload(v *Value) bool { v.reset(OpAMD64MOVQloadidx8) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } // match: (MOVQload [off] {sym} (ADDQ ptr idx) mem) @@ -15985,9 +15326,7 @@ func rewriteValueAMD64_OpAMD64MOVQload(v *Value) bool { v.reset(OpAMD64MOVQloadidx1) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } break @@ -16011,8 +15350,7 @@ func rewriteValueAMD64_OpAMD64MOVQload(v *Value) bool { v.reset(OpAMD64MOVQload) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(base) - v.AddArg(mem) + v.AddArg2(base, mem) return true } // match: (MOVQload [off1] {sym} (ADDLconst [off2] ptr) mem) @@ -16033,8 +15371,7 @@ func rewriteValueAMD64_OpAMD64MOVQload(v *Value) bool { v.reset(OpAMD64MOVQload) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVQload [off] {sym} ptr (MOVSDstore [off] {sym} ptr val _)) @@ -16089,9 +15426,7 @@ func rewriteValueAMD64_OpAMD64MOVQloadidx1(v *Value) bool { v.reset(OpAMD64MOVQloadidx8) v.AuxInt = c v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } break @@ -16116,9 +15451,7 @@ func rewriteValueAMD64_OpAMD64MOVQloadidx1(v *Value) bool { v.reset(OpAMD64MOVQloadidx1) v.AuxInt = c + d v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } break @@ -16143,9 +15476,7 @@ func rewriteValueAMD64_OpAMD64MOVQloadidx1(v *Value) bool { v.reset(OpAMD64MOVQloadidx1) v.AuxInt = c + d v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } break @@ -16169,8 +15500,7 @@ func rewriteValueAMD64_OpAMD64MOVQloadidx1(v *Value) bool { v.reset(OpAMD64MOVQload) v.AuxInt = i + c v.Aux = s - v.AddArg(p) - v.AddArg(mem) + v.AddArg2(p, mem) return true } break @@ -16200,9 +15530,7 @@ func rewriteValueAMD64_OpAMD64MOVQloadidx8(v *Value) bool { v.reset(OpAMD64MOVQloadidx8) v.AuxInt = c + d v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } // match: (MOVQloadidx8 [c] {sym} ptr (ADDQconst [d] idx) mem) @@ -16224,9 +15552,7 @@ func rewriteValueAMD64_OpAMD64MOVQloadidx8(v *Value) bool { v.reset(OpAMD64MOVQloadidx8) v.AuxInt = c + 8*d v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } // match: (MOVQloadidx8 [i] {s} p (MOVQconst [c]) mem) @@ -16247,8 +15573,7 @@ func rewriteValueAMD64_OpAMD64MOVQloadidx8(v *Value) bool { v.reset(OpAMD64MOVQload) v.AuxInt = i + 8*c v.Aux = s - v.AddArg(p) - v.AddArg(mem) + v.AddArg2(p, mem) return true } return false @@ -16276,9 +15601,7 @@ func rewriteValueAMD64_OpAMD64MOVQstore(v *Value) bool { v.reset(OpAMD64MOVQstore) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } // match: (MOVQstore [off] {sym} ptr (MOVQconst [c]) mem) @@ -16299,8 +15622,7 @@ func rewriteValueAMD64_OpAMD64MOVQstore(v *Value) bool { v.reset(OpAMD64MOVQstoreconst) v.AuxInt = makeValAndOff(c, off) v.Aux = sym - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVQstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) @@ -16323,9 +15645,7 @@ func rewriteValueAMD64_OpAMD64MOVQstore(v *Value) bool { v.reset(OpAMD64MOVQstore) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(base) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(base, val, mem) return true } // match: (MOVQstore [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) val mem) @@ -16349,10 +15669,7 @@ func rewriteValueAMD64_OpAMD64MOVQstore(v *Value) bool { v.reset(OpAMD64MOVQstoreidx1) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) + v.AddArg4(ptr, idx, val, mem) return true } // match: (MOVQstore [off1] {sym1} (LEAQ8 [off2] {sym2} ptr idx) val mem) @@ -16376,10 +15693,7 @@ func rewriteValueAMD64_OpAMD64MOVQstore(v *Value) bool { v.reset(OpAMD64MOVQstoreidx8) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) + v.AddArg4(ptr, idx, val, mem) return true } // match: (MOVQstore [off] {sym} (ADDQ ptr idx) val mem) @@ -16405,10 +15719,7 @@ func rewriteValueAMD64_OpAMD64MOVQstore(v *Value) bool { v.reset(OpAMD64MOVQstoreidx1) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) + v.AddArg4(ptr, idx, val, mem) return true } break @@ -16433,9 +15744,7 @@ func rewriteValueAMD64_OpAMD64MOVQstore(v *Value) bool { v.reset(OpAMD64MOVQstore) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(base) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(base, val, mem) return true } // match: (MOVQstore [off1] {sym} (ADDLconst [off2] ptr) val mem) @@ -16457,9 +15766,7 @@ func rewriteValueAMD64_OpAMD64MOVQstore(v *Value) bool { v.reset(OpAMD64MOVQstore) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } // match: (MOVQstore {sym} [off] ptr y:(ADDQload x [off] {sym} ptr mem) mem) @@ -16481,9 +15788,7 @@ func rewriteValueAMD64_OpAMD64MOVQstore(v *Value) bool { v.reset(OpAMD64ADDQmodify) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(x) - v.AddArg(mem) + v.AddArg3(ptr, x, mem) return true } // match: (MOVQstore {sym} [off] ptr y:(ANDQload x [off] {sym} ptr mem) mem) @@ -16505,9 +15810,7 @@ func rewriteValueAMD64_OpAMD64MOVQstore(v *Value) bool { v.reset(OpAMD64ANDQmodify) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(x) - v.AddArg(mem) + v.AddArg3(ptr, x, mem) return true } // match: (MOVQstore {sym} [off] ptr y:(ORQload x [off] {sym} ptr mem) mem) @@ -16529,9 +15832,7 @@ func rewriteValueAMD64_OpAMD64MOVQstore(v *Value) bool { v.reset(OpAMD64ORQmodify) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(x) - v.AddArg(mem) + v.AddArg3(ptr, x, mem) return true } // match: (MOVQstore {sym} [off] ptr y:(XORQload x [off] {sym} ptr mem) mem) @@ -16553,9 +15854,7 @@ func rewriteValueAMD64_OpAMD64MOVQstore(v *Value) bool { v.reset(OpAMD64XORQmodify) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(x) - v.AddArg(mem) + v.AddArg3(ptr, x, mem) return true } // match: (MOVQstore {sym} [off] ptr y:(ADDQ l:(MOVQload [off] {sym} ptr mem) x) mem) @@ -16588,9 +15887,7 @@ func rewriteValueAMD64_OpAMD64MOVQstore(v *Value) bool { v.reset(OpAMD64ADDQmodify) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(x) - v.AddArg(mem) + v.AddArg3(ptr, x, mem) return true } break @@ -16618,9 +15915,7 @@ func rewriteValueAMD64_OpAMD64MOVQstore(v *Value) bool { v.reset(OpAMD64SUBQmodify) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(x) - v.AddArg(mem) + v.AddArg3(ptr, x, mem) return true } // match: (MOVQstore {sym} [off] ptr y:(ANDQ l:(MOVQload [off] {sym} ptr mem) x) mem) @@ -16653,9 +15948,7 @@ func rewriteValueAMD64_OpAMD64MOVQstore(v *Value) bool { v.reset(OpAMD64ANDQmodify) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(x) - v.AddArg(mem) + v.AddArg3(ptr, x, mem) return true } break @@ -16690,9 +15983,7 @@ func rewriteValueAMD64_OpAMD64MOVQstore(v *Value) bool { v.reset(OpAMD64ORQmodify) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(x) - v.AddArg(mem) + v.AddArg3(ptr, x, mem) return true } break @@ -16727,9 +16018,7 @@ func rewriteValueAMD64_OpAMD64MOVQstore(v *Value) bool { v.reset(OpAMD64XORQmodify) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(x) - v.AddArg(mem) + v.AddArg3(ptr, x, mem) return true } break @@ -16757,9 +16046,7 @@ func rewriteValueAMD64_OpAMD64MOVQstore(v *Value) bool { v.reset(OpAMD64BTCQmodify) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(x) - v.AddArg(mem) + v.AddArg3(ptr, x, mem) return true } // match: (MOVQstore {sym} [off] ptr y:(BTRQ l:(MOVQload [off] {sym} ptr mem) x) mem) @@ -16785,9 +16072,7 @@ func rewriteValueAMD64_OpAMD64MOVQstore(v *Value) bool { v.reset(OpAMD64BTRQmodify) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(x) - v.AddArg(mem) + v.AddArg3(ptr, x, mem) return true } // match: (MOVQstore {sym} [off] ptr y:(BTSQ l:(MOVQload [off] {sym} ptr mem) x) mem) @@ -16813,9 +16098,7 @@ func rewriteValueAMD64_OpAMD64MOVQstore(v *Value) bool { v.reset(OpAMD64BTSQmodify) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(x) - v.AddArg(mem) + v.AddArg3(ptr, x, mem) return true } // match: (MOVQstore [off] {sym} ptr a:(ADDQconst [c] l:(MOVQload [off] {sym} ptr2 mem)) mem) @@ -16842,8 +16125,7 @@ func rewriteValueAMD64_OpAMD64MOVQstore(v *Value) bool { v.reset(OpAMD64ADDQconstmodify) v.AuxInt = makeValAndOff(c, off) v.Aux = sym - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVQstore [off] {sym} ptr a:(ANDQconst [c] l:(MOVQload [off] {sym} ptr2 mem)) mem) @@ -16870,8 +16152,7 @@ func rewriteValueAMD64_OpAMD64MOVQstore(v *Value) bool { v.reset(OpAMD64ANDQconstmodify) v.AuxInt = makeValAndOff(c, off) v.Aux = sym - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVQstore [off] {sym} ptr a:(ORQconst [c] l:(MOVQload [off] {sym} ptr2 mem)) mem) @@ -16898,8 +16179,7 @@ func rewriteValueAMD64_OpAMD64MOVQstore(v *Value) bool { v.reset(OpAMD64ORQconstmodify) v.AuxInt = makeValAndOff(c, off) v.Aux = sym - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVQstore [off] {sym} ptr a:(XORQconst [c] l:(MOVQload [off] {sym} ptr2 mem)) mem) @@ -16926,8 +16206,7 @@ func rewriteValueAMD64_OpAMD64MOVQstore(v *Value) bool { v.reset(OpAMD64XORQconstmodify) v.AuxInt = makeValAndOff(c, off) v.Aux = sym - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVQstore [off] {sym} ptr a:(BTCQconst [c] l:(MOVQload [off] {sym} ptr2 mem)) mem) @@ -16954,8 +16233,7 @@ func rewriteValueAMD64_OpAMD64MOVQstore(v *Value) bool { v.reset(OpAMD64BTCQconstmodify) v.AuxInt = makeValAndOff(c, off) v.Aux = sym - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVQstore [off] {sym} ptr a:(BTRQconst [c] l:(MOVQload [off] {sym} ptr2 mem)) mem) @@ -16982,8 +16260,7 @@ func rewriteValueAMD64_OpAMD64MOVQstore(v *Value) bool { v.reset(OpAMD64BTRQconstmodify) v.AuxInt = makeValAndOff(c, off) v.Aux = sym - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVQstore [off] {sym} ptr a:(BTSQconst [c] l:(MOVQload [off] {sym} ptr2 mem)) mem) @@ -17010,8 +16287,7 @@ func rewriteValueAMD64_OpAMD64MOVQstore(v *Value) bool { v.reset(OpAMD64BTSQconstmodify) v.AuxInt = makeValAndOff(c, off) v.Aux = sym - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVQstore [off] {sym} ptr (MOVQf2i val) mem) @@ -17028,9 +16304,7 @@ func rewriteValueAMD64_OpAMD64MOVQstore(v *Value) bool { v.reset(OpAMD64MOVSDstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } return false @@ -17058,8 +16332,7 @@ func rewriteValueAMD64_OpAMD64MOVQstoreconst(v *Value) bool { v.reset(OpAMD64MOVQstoreconst) v.AuxInt = ValAndOff(sc).add(off) v.Aux = s - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVQstoreconst [sc] {sym1} (LEAQ [off] {sym2} ptr) mem) @@ -17081,8 +16354,7 @@ func rewriteValueAMD64_OpAMD64MOVQstoreconst(v *Value) bool { v.reset(OpAMD64MOVQstoreconst) v.AuxInt = ValAndOff(sc).add(off) v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVQstoreconst [x] {sym1} (LEAQ1 [off] {sym2} ptr idx) mem) @@ -17105,9 +16377,7 @@ func rewriteValueAMD64_OpAMD64MOVQstoreconst(v *Value) bool { v.reset(OpAMD64MOVQstoreconstidx1) v.AuxInt = ValAndOff(x).add(off) v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } // match: (MOVQstoreconst [x] {sym1} (LEAQ8 [off] {sym2} ptr idx) mem) @@ -17130,9 +16400,7 @@ func rewriteValueAMD64_OpAMD64MOVQstoreconst(v *Value) bool { v.reset(OpAMD64MOVQstoreconstidx8) v.AuxInt = ValAndOff(x).add(off) v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } // match: (MOVQstoreconst [x] {sym} (ADDQ ptr idx) mem) @@ -17149,9 +16417,7 @@ func rewriteValueAMD64_OpAMD64MOVQstoreconst(v *Value) bool { v.reset(OpAMD64MOVQstoreconstidx1) v.AuxInt = x v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } // match: (MOVQstoreconst [c] {s} p x:(MOVQstoreconst [c2] {s} p mem)) @@ -17176,11 +16442,9 @@ func rewriteValueAMD64_OpAMD64MOVQstoreconst(v *Value) bool { v.reset(OpAMD64MOVOstore) v.AuxInt = ValAndOff(c2).Off() v.Aux = s - v.AddArg(p) v0 := b.NewValue0(x.Pos, OpAMD64MOVOconst, types.TypeInt128) v0.AuxInt = 0 - v.AddArg(v0) - v.AddArg(mem) + v.AddArg3(p, v0, mem) return true } // match: (MOVQstoreconst [sc] {sym1} (LEAL [off] {sym2} ptr) mem) @@ -17202,8 +16466,7 @@ func rewriteValueAMD64_OpAMD64MOVQstoreconst(v *Value) bool { v.reset(OpAMD64MOVQstoreconst) v.AuxInt = ValAndOff(sc).add(off) v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVQstoreconst [sc] {s} (ADDLconst [off] ptr) mem) @@ -17224,8 +16487,7 @@ func rewriteValueAMD64_OpAMD64MOVQstoreconst(v *Value) bool { v.reset(OpAMD64MOVQstoreconst) v.AuxInt = ValAndOff(sc).add(off) v.Aux = s - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } return false @@ -17249,9 +16511,7 @@ func rewriteValueAMD64_OpAMD64MOVQstoreconstidx1(v *Value) bool { v.reset(OpAMD64MOVQstoreconstidx8) v.AuxInt = c v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } break @@ -17276,9 +16536,7 @@ func rewriteValueAMD64_OpAMD64MOVQstoreconstidx1(v *Value) bool { v.reset(OpAMD64MOVQstoreconstidx1) v.AuxInt = ValAndOff(x).add(c) v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } break @@ -17303,9 +16561,7 @@ func rewriteValueAMD64_OpAMD64MOVQstoreconstidx1(v *Value) bool { v.reset(OpAMD64MOVQstoreconstidx1) v.AuxInt = ValAndOff(x).add(c) v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } break @@ -17335,9 +16591,7 @@ func rewriteValueAMD64_OpAMD64MOVQstoreconstidx8(v *Value) bool { v.reset(OpAMD64MOVQstoreconstidx8) v.AuxInt = ValAndOff(x).add(c) v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } // match: (MOVQstoreconstidx8 [x] {sym} ptr (ADDQconst [c] idx) mem) @@ -17359,9 +16613,7 @@ func rewriteValueAMD64_OpAMD64MOVQstoreconstidx8(v *Value) bool { v.reset(OpAMD64MOVQstoreconstidx8) v.AuxInt = ValAndOff(x).add(8 * c) v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } return false @@ -17387,10 +16639,7 @@ func rewriteValueAMD64_OpAMD64MOVQstoreidx1(v *Value) bool { v.reset(OpAMD64MOVQstoreidx8) v.AuxInt = c v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) + v.AddArg4(ptr, idx, val, mem) return true } break @@ -17416,10 +16665,7 @@ func rewriteValueAMD64_OpAMD64MOVQstoreidx1(v *Value) bool { v.reset(OpAMD64MOVQstoreidx1) v.AuxInt = c + d v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) + v.AddArg4(ptr, idx, val, mem) return true } break @@ -17445,10 +16691,7 @@ func rewriteValueAMD64_OpAMD64MOVQstoreidx1(v *Value) bool { v.reset(OpAMD64MOVQstoreidx1) v.AuxInt = c + d v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) + v.AddArg4(ptr, idx, val, mem) return true } break @@ -17473,9 +16716,7 @@ func rewriteValueAMD64_OpAMD64MOVQstoreidx1(v *Value) bool { v.reset(OpAMD64MOVQstore) v.AuxInt = i + c v.Aux = s - v.AddArg(p) - v.AddArg(w) - v.AddArg(mem) + v.AddArg3(p, w, mem) return true } break @@ -17499,9 +16740,7 @@ func rewriteValueAMD64_OpAMD64MOVQstoreidx1(v *Value) bool { v.reset(OpAMD64MOVQstoreconstidx1) v.AuxInt = makeValAndOff(c, off) v.Aux = s - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } return false @@ -17531,10 +16770,7 @@ func rewriteValueAMD64_OpAMD64MOVQstoreidx8(v *Value) bool { v.reset(OpAMD64MOVQstoreidx8) v.AuxInt = c + d v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) + v.AddArg4(ptr, idx, val, mem) return true } // match: (MOVQstoreidx8 [c] {sym} ptr (ADDQconst [d] idx) val mem) @@ -17557,10 +16793,7 @@ func rewriteValueAMD64_OpAMD64MOVQstoreidx8(v *Value) bool { v.reset(OpAMD64MOVQstoreidx8) v.AuxInt = c + 8*d v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) + v.AddArg4(ptr, idx, val, mem) return true } // match: (MOVQstoreidx8 [i] {s} p (MOVQconst [c]) w mem) @@ -17582,9 +16815,7 @@ func rewriteValueAMD64_OpAMD64MOVQstoreidx8(v *Value) bool { v.reset(OpAMD64MOVQstore) v.AuxInt = i + 8*c v.Aux = s - v.AddArg(p) - v.AddArg(w) - v.AddArg(mem) + v.AddArg3(p, w, mem) return true } // match: (MOVQstoreidx8 [off] {s} ptr idx (MOVQconst [c]) mem) @@ -17606,9 +16837,7 @@ func rewriteValueAMD64_OpAMD64MOVQstoreidx8(v *Value) bool { v.reset(OpAMD64MOVQstoreconstidx8) v.AuxInt = makeValAndOff(c, off) v.Aux = s - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } return false @@ -17634,8 +16863,7 @@ func rewriteValueAMD64_OpAMD64MOVSDload(v *Value) bool { v.reset(OpAMD64MOVSDload) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVSDload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) @@ -17657,8 +16885,7 @@ func rewriteValueAMD64_OpAMD64MOVSDload(v *Value) bool { v.reset(OpAMD64MOVSDload) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(base) - v.AddArg(mem) + v.AddArg2(base, mem) return true } // match: (MOVSDload [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) mem) @@ -17681,9 +16908,7 @@ func rewriteValueAMD64_OpAMD64MOVSDload(v *Value) bool { v.reset(OpAMD64MOVSDloadidx1) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } // match: (MOVSDload [off1] {sym1} (LEAQ8 [off2] {sym2} ptr idx) mem) @@ -17706,9 +16931,7 @@ func rewriteValueAMD64_OpAMD64MOVSDload(v *Value) bool { v.reset(OpAMD64MOVSDloadidx8) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } // match: (MOVSDload [off] {sym} (ADDQ ptr idx) mem) @@ -17733,9 +16956,7 @@ func rewriteValueAMD64_OpAMD64MOVSDload(v *Value) bool { v.reset(OpAMD64MOVSDloadidx1) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } break @@ -17778,9 +16999,7 @@ func rewriteValueAMD64_OpAMD64MOVSDloadidx1(v *Value) bool { v.reset(OpAMD64MOVSDloadidx8) v.AuxInt = c v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } // match: (MOVSDloadidx1 [c] {sym} (ADDQconst [d] ptr) idx mem) @@ -17802,9 +17021,7 @@ func rewriteValueAMD64_OpAMD64MOVSDloadidx1(v *Value) bool { v.reset(OpAMD64MOVSDloadidx1) v.AuxInt = c + d v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } // match: (MOVSDloadidx1 [c] {sym} ptr (ADDQconst [d] idx) mem) @@ -17826,9 +17043,7 @@ func rewriteValueAMD64_OpAMD64MOVSDloadidx1(v *Value) bool { v.reset(OpAMD64MOVSDloadidx1) v.AuxInt = c + d v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } // match: (MOVSDloadidx1 [i] {s} p (MOVQconst [c]) mem) @@ -17849,8 +17064,7 @@ func rewriteValueAMD64_OpAMD64MOVSDloadidx1(v *Value) bool { v.reset(OpAMD64MOVSDload) v.AuxInt = i + c v.Aux = s - v.AddArg(p) - v.AddArg(mem) + v.AddArg2(p, mem) return true } return false @@ -17878,9 +17092,7 @@ func rewriteValueAMD64_OpAMD64MOVSDloadidx8(v *Value) bool { v.reset(OpAMD64MOVSDloadidx8) v.AuxInt = c + d v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } // match: (MOVSDloadidx8 [c] {sym} ptr (ADDQconst [d] idx) mem) @@ -17902,9 +17114,7 @@ func rewriteValueAMD64_OpAMD64MOVSDloadidx8(v *Value) bool { v.reset(OpAMD64MOVSDloadidx8) v.AuxInt = c + 8*d v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } // match: (MOVSDloadidx8 [i] {s} p (MOVQconst [c]) mem) @@ -17925,8 +17135,7 @@ func rewriteValueAMD64_OpAMD64MOVSDloadidx8(v *Value) bool { v.reset(OpAMD64MOVSDload) v.AuxInt = i + 8*c v.Aux = s - v.AddArg(p) - v.AddArg(mem) + v.AddArg2(p, mem) return true } return false @@ -17954,9 +17163,7 @@ func rewriteValueAMD64_OpAMD64MOVSDstore(v *Value) bool { v.reset(OpAMD64MOVSDstore) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } // match: (MOVSDstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) @@ -17979,9 +17186,7 @@ func rewriteValueAMD64_OpAMD64MOVSDstore(v *Value) bool { v.reset(OpAMD64MOVSDstore) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(base) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(base, val, mem) return true } // match: (MOVSDstore [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) val mem) @@ -18005,10 +17210,7 @@ func rewriteValueAMD64_OpAMD64MOVSDstore(v *Value) bool { v.reset(OpAMD64MOVSDstoreidx1) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) + v.AddArg4(ptr, idx, val, mem) return true } // match: (MOVSDstore [off1] {sym1} (LEAQ8 [off2] {sym2} ptr idx) val mem) @@ -18032,10 +17234,7 @@ func rewriteValueAMD64_OpAMD64MOVSDstore(v *Value) bool { v.reset(OpAMD64MOVSDstoreidx8) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) + v.AddArg4(ptr, idx, val, mem) return true } // match: (MOVSDstore [off] {sym} (ADDQ ptr idx) val mem) @@ -18061,10 +17260,7 @@ func rewriteValueAMD64_OpAMD64MOVSDstore(v *Value) bool { v.reset(OpAMD64MOVSDstoreidx1) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) + v.AddArg4(ptr, idx, val, mem) return true } break @@ -18083,9 +17279,7 @@ func rewriteValueAMD64_OpAMD64MOVSDstore(v *Value) bool { v.reset(OpAMD64MOVQstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } return false @@ -18110,10 +17304,7 @@ func rewriteValueAMD64_OpAMD64MOVSDstoreidx1(v *Value) bool { v.reset(OpAMD64MOVSDstoreidx8) v.AuxInt = c v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) + v.AddArg4(ptr, idx, val, mem) return true } // match: (MOVSDstoreidx1 [c] {sym} (ADDQconst [d] ptr) idx val mem) @@ -18136,10 +17327,7 @@ func rewriteValueAMD64_OpAMD64MOVSDstoreidx1(v *Value) bool { v.reset(OpAMD64MOVSDstoreidx1) v.AuxInt = c + d v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) + v.AddArg4(ptr, idx, val, mem) return true } // match: (MOVSDstoreidx1 [c] {sym} ptr (ADDQconst [d] idx) val mem) @@ -18162,10 +17350,7 @@ func rewriteValueAMD64_OpAMD64MOVSDstoreidx1(v *Value) bool { v.reset(OpAMD64MOVSDstoreidx1) v.AuxInt = c + d v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) + v.AddArg4(ptr, idx, val, mem) return true } // match: (MOVSDstoreidx1 [i] {s} p (MOVQconst [c]) w mem) @@ -18187,9 +17372,7 @@ func rewriteValueAMD64_OpAMD64MOVSDstoreidx1(v *Value) bool { v.reset(OpAMD64MOVSDstore) v.AuxInt = i + c v.Aux = s - v.AddArg(p) - v.AddArg(w) - v.AddArg(mem) + v.AddArg3(p, w, mem) return true } return false @@ -18219,10 +17402,7 @@ func rewriteValueAMD64_OpAMD64MOVSDstoreidx8(v *Value) bool { v.reset(OpAMD64MOVSDstoreidx8) v.AuxInt = c + d v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) + v.AddArg4(ptr, idx, val, mem) return true } // match: (MOVSDstoreidx8 [c] {sym} ptr (ADDQconst [d] idx) val mem) @@ -18245,10 +17425,7 @@ func rewriteValueAMD64_OpAMD64MOVSDstoreidx8(v *Value) bool { v.reset(OpAMD64MOVSDstoreidx8) v.AuxInt = c + 8*d v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) + v.AddArg4(ptr, idx, val, mem) return true } // match: (MOVSDstoreidx8 [i] {s} p (MOVQconst [c]) w mem) @@ -18270,9 +17447,7 @@ func rewriteValueAMD64_OpAMD64MOVSDstoreidx8(v *Value) bool { v.reset(OpAMD64MOVSDstore) v.AuxInt = i + 8*c v.Aux = s - v.AddArg(p) - v.AddArg(w) - v.AddArg(mem) + v.AddArg3(p, w, mem) return true } return false @@ -18298,8 +17473,7 @@ func rewriteValueAMD64_OpAMD64MOVSSload(v *Value) bool { v.reset(OpAMD64MOVSSload) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVSSload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) @@ -18321,8 +17495,7 @@ func rewriteValueAMD64_OpAMD64MOVSSload(v *Value) bool { v.reset(OpAMD64MOVSSload) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(base) - v.AddArg(mem) + v.AddArg2(base, mem) return true } // match: (MOVSSload [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) mem) @@ -18345,9 +17518,7 @@ func rewriteValueAMD64_OpAMD64MOVSSload(v *Value) bool { v.reset(OpAMD64MOVSSloadidx1) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } // match: (MOVSSload [off1] {sym1} (LEAQ4 [off2] {sym2} ptr idx) mem) @@ -18370,9 +17541,7 @@ func rewriteValueAMD64_OpAMD64MOVSSload(v *Value) bool { v.reset(OpAMD64MOVSSloadidx4) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } // match: (MOVSSload [off] {sym} (ADDQ ptr idx) mem) @@ -18397,9 +17566,7 @@ func rewriteValueAMD64_OpAMD64MOVSSload(v *Value) bool { v.reset(OpAMD64MOVSSloadidx1) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } break @@ -18442,9 +17609,7 @@ func rewriteValueAMD64_OpAMD64MOVSSloadidx1(v *Value) bool { v.reset(OpAMD64MOVSSloadidx4) v.AuxInt = c v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } // match: (MOVSSloadidx1 [c] {sym} (ADDQconst [d] ptr) idx mem) @@ -18466,9 +17631,7 @@ func rewriteValueAMD64_OpAMD64MOVSSloadidx1(v *Value) bool { v.reset(OpAMD64MOVSSloadidx1) v.AuxInt = c + d v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } // match: (MOVSSloadidx1 [c] {sym} ptr (ADDQconst [d] idx) mem) @@ -18490,9 +17653,7 @@ func rewriteValueAMD64_OpAMD64MOVSSloadidx1(v *Value) bool { v.reset(OpAMD64MOVSSloadidx1) v.AuxInt = c + d v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } // match: (MOVSSloadidx1 [i] {s} p (MOVQconst [c]) mem) @@ -18513,8 +17674,7 @@ func rewriteValueAMD64_OpAMD64MOVSSloadidx1(v *Value) bool { v.reset(OpAMD64MOVSSload) v.AuxInt = i + c v.Aux = s - v.AddArg(p) - v.AddArg(mem) + v.AddArg2(p, mem) return true } return false @@ -18542,9 +17702,7 @@ func rewriteValueAMD64_OpAMD64MOVSSloadidx4(v *Value) bool { v.reset(OpAMD64MOVSSloadidx4) v.AuxInt = c + d v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } // match: (MOVSSloadidx4 [c] {sym} ptr (ADDQconst [d] idx) mem) @@ -18566,9 +17724,7 @@ func rewriteValueAMD64_OpAMD64MOVSSloadidx4(v *Value) bool { v.reset(OpAMD64MOVSSloadidx4) v.AuxInt = c + 4*d v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } // match: (MOVSSloadidx4 [i] {s} p (MOVQconst [c]) mem) @@ -18589,8 +17745,7 @@ func rewriteValueAMD64_OpAMD64MOVSSloadidx4(v *Value) bool { v.reset(OpAMD64MOVSSload) v.AuxInt = i + 4*c v.Aux = s - v.AddArg(p) - v.AddArg(mem) + v.AddArg2(p, mem) return true } return false @@ -18618,9 +17773,7 @@ func rewriteValueAMD64_OpAMD64MOVSSstore(v *Value) bool { v.reset(OpAMD64MOVSSstore) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } // match: (MOVSSstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) @@ -18643,9 +17796,7 @@ func rewriteValueAMD64_OpAMD64MOVSSstore(v *Value) bool { v.reset(OpAMD64MOVSSstore) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(base) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(base, val, mem) return true } // match: (MOVSSstore [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) val mem) @@ -18669,10 +17820,7 @@ func rewriteValueAMD64_OpAMD64MOVSSstore(v *Value) bool { v.reset(OpAMD64MOVSSstoreidx1) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) + v.AddArg4(ptr, idx, val, mem) return true } // match: (MOVSSstore [off1] {sym1} (LEAQ4 [off2] {sym2} ptr idx) val mem) @@ -18696,10 +17844,7 @@ func rewriteValueAMD64_OpAMD64MOVSSstore(v *Value) bool { v.reset(OpAMD64MOVSSstoreidx4) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) + v.AddArg4(ptr, idx, val, mem) return true } // match: (MOVSSstore [off] {sym} (ADDQ ptr idx) val mem) @@ -18725,10 +17870,7 @@ func rewriteValueAMD64_OpAMD64MOVSSstore(v *Value) bool { v.reset(OpAMD64MOVSSstoreidx1) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) + v.AddArg4(ptr, idx, val, mem) return true } break @@ -18747,9 +17889,7 @@ func rewriteValueAMD64_OpAMD64MOVSSstore(v *Value) bool { v.reset(OpAMD64MOVLstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } return false @@ -18774,10 +17914,7 @@ func rewriteValueAMD64_OpAMD64MOVSSstoreidx1(v *Value) bool { v.reset(OpAMD64MOVSSstoreidx4) v.AuxInt = c v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) + v.AddArg4(ptr, idx, val, mem) return true } // match: (MOVSSstoreidx1 [c] {sym} (ADDQconst [d] ptr) idx val mem) @@ -18800,10 +17937,7 @@ func rewriteValueAMD64_OpAMD64MOVSSstoreidx1(v *Value) bool { v.reset(OpAMD64MOVSSstoreidx1) v.AuxInt = c + d v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) + v.AddArg4(ptr, idx, val, mem) return true } // match: (MOVSSstoreidx1 [c] {sym} ptr (ADDQconst [d] idx) val mem) @@ -18826,10 +17960,7 @@ func rewriteValueAMD64_OpAMD64MOVSSstoreidx1(v *Value) bool { v.reset(OpAMD64MOVSSstoreidx1) v.AuxInt = c + d v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) + v.AddArg4(ptr, idx, val, mem) return true } // match: (MOVSSstoreidx1 [i] {s} p (MOVQconst [c]) w mem) @@ -18851,9 +17982,7 @@ func rewriteValueAMD64_OpAMD64MOVSSstoreidx1(v *Value) bool { v.reset(OpAMD64MOVSSstore) v.AuxInt = i + c v.Aux = s - v.AddArg(p) - v.AddArg(w) - v.AddArg(mem) + v.AddArg3(p, w, mem) return true } return false @@ -18883,10 +18012,7 @@ func rewriteValueAMD64_OpAMD64MOVSSstoreidx4(v *Value) bool { v.reset(OpAMD64MOVSSstoreidx4) v.AuxInt = c + d v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) + v.AddArg4(ptr, idx, val, mem) return true } // match: (MOVSSstoreidx4 [c] {sym} ptr (ADDQconst [d] idx) val mem) @@ -18909,10 +18035,7 @@ func rewriteValueAMD64_OpAMD64MOVSSstoreidx4(v *Value) bool { v.reset(OpAMD64MOVSSstoreidx4) v.AuxInt = c + 4*d v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) + v.AddArg4(ptr, idx, val, mem) return true } // match: (MOVSSstoreidx4 [i] {s} p (MOVQconst [c]) w mem) @@ -18934,9 +18057,7 @@ func rewriteValueAMD64_OpAMD64MOVSSstoreidx4(v *Value) bool { v.reset(OpAMD64MOVSSstore) v.AuxInt = i + 4*c v.Aux = s - v.AddArg(p) - v.AddArg(w) - v.AddArg(mem) + v.AddArg3(p, w, mem) return true } return false @@ -18965,8 +18086,7 @@ func rewriteValueAMD64_OpAMD64MOVWQSX(v *Value) bool { v.AddArg(v0) v0.AuxInt = off v0.Aux = sym - v0.AddArg(ptr) - v0.AddArg(mem) + v0.AddArg2(ptr, mem) return true } // match: (MOVWQSX x:(MOVLload [off] {sym} ptr mem)) @@ -18990,8 +18110,7 @@ func rewriteValueAMD64_OpAMD64MOVWQSX(v *Value) bool { v.AddArg(v0) v0.AuxInt = off v0.Aux = sym - v0.AddArg(ptr) - v0.AddArg(mem) + v0.AddArg2(ptr, mem) return true } // match: (MOVWQSX x:(MOVQload [off] {sym} ptr mem)) @@ -19015,8 +18134,7 @@ func rewriteValueAMD64_OpAMD64MOVWQSX(v *Value) bool { v.AddArg(v0) v0.AuxInt = off v0.Aux = sym - v0.AddArg(ptr) - v0.AddArg(mem) + v0.AddArg2(ptr, mem) return true } // match: (MOVWQSX (ANDLconst [c] x)) @@ -19104,8 +18222,7 @@ func rewriteValueAMD64_OpAMD64MOVWQSXload(v *Value) bool { v.reset(OpAMD64MOVWQSXload) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(base) - v.AddArg(mem) + v.AddArg2(base, mem) return true } return false @@ -19134,8 +18251,7 @@ func rewriteValueAMD64_OpAMD64MOVWQZX(v *Value) bool { v.AddArg(v0) v0.AuxInt = off v0.Aux = sym - v0.AddArg(ptr) - v0.AddArg(mem) + v0.AddArg2(ptr, mem) return true } // match: (MOVWQZX x:(MOVLload [off] {sym} ptr mem)) @@ -19159,8 +18275,7 @@ func rewriteValueAMD64_OpAMD64MOVWQZX(v *Value) bool { v.AddArg(v0) v0.AuxInt = off v0.Aux = sym - v0.AddArg(ptr) - v0.AddArg(mem) + v0.AddArg2(ptr, mem) return true } // match: (MOVWQZX x:(MOVQload [off] {sym} ptr mem)) @@ -19184,8 +18299,7 @@ func rewriteValueAMD64_OpAMD64MOVWQZX(v *Value) bool { v.AddArg(v0) v0.AuxInt = off v0.Aux = sym - v0.AddArg(ptr) - v0.AddArg(mem) + v0.AddArg2(ptr, mem) return true } // match: (MOVWQZX x) @@ -19223,9 +18337,7 @@ func rewriteValueAMD64_OpAMD64MOVWQZX(v *Value) bool { v.AddArg(v0) v0.AuxInt = off v0.Aux = sym - v0.AddArg(ptr) - v0.AddArg(idx) - v0.AddArg(mem) + v0.AddArg3(ptr, idx, mem) return true } // match: (MOVWQZX x:(MOVWloadidx2 [off] {sym} ptr idx mem)) @@ -19250,9 +18362,7 @@ func rewriteValueAMD64_OpAMD64MOVWQZX(v *Value) bool { v.AddArg(v0) v0.AuxInt = off v0.Aux = sym - v0.AddArg(ptr) - v0.AddArg(idx) - v0.AddArg(mem) + v0.AddArg3(ptr, idx, mem) return true } // match: (MOVWQZX (ANDLconst [c] x)) @@ -19337,8 +18447,7 @@ func rewriteValueAMD64_OpAMD64MOVWload(v *Value) bool { v.reset(OpAMD64MOVWload) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVWload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) @@ -19360,8 +18469,7 @@ func rewriteValueAMD64_OpAMD64MOVWload(v *Value) bool { v.reset(OpAMD64MOVWload) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(base) - v.AddArg(mem) + v.AddArg2(base, mem) return true } // match: (MOVWload [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) mem) @@ -19384,9 +18492,7 @@ func rewriteValueAMD64_OpAMD64MOVWload(v *Value) bool { v.reset(OpAMD64MOVWloadidx1) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } // match: (MOVWload [off1] {sym1} (LEAQ2 [off2] {sym2} ptr idx) mem) @@ -19409,9 +18515,7 @@ func rewriteValueAMD64_OpAMD64MOVWload(v *Value) bool { v.reset(OpAMD64MOVWloadidx2) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } // match: (MOVWload [off] {sym} (ADDQ ptr idx) mem) @@ -19436,9 +18540,7 @@ func rewriteValueAMD64_OpAMD64MOVWload(v *Value) bool { v.reset(OpAMD64MOVWloadidx1) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } break @@ -19462,8 +18564,7 @@ func rewriteValueAMD64_OpAMD64MOVWload(v *Value) bool { v.reset(OpAMD64MOVWload) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(base) - v.AddArg(mem) + v.AddArg2(base, mem) return true } // match: (MOVWload [off1] {sym} (ADDLconst [off2] ptr) mem) @@ -19484,8 +18585,7 @@ func rewriteValueAMD64_OpAMD64MOVWload(v *Value) bool { v.reset(OpAMD64MOVWload) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVWload [off] {sym} (SB) _) @@ -19522,9 +18622,7 @@ func rewriteValueAMD64_OpAMD64MOVWloadidx1(v *Value) bool { v.reset(OpAMD64MOVWloadidx2) v.AuxInt = c v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } break @@ -19549,9 +18647,7 @@ func rewriteValueAMD64_OpAMD64MOVWloadidx1(v *Value) bool { v.reset(OpAMD64MOVWloadidx1) v.AuxInt = c + d v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } break @@ -19576,9 +18672,7 @@ func rewriteValueAMD64_OpAMD64MOVWloadidx1(v *Value) bool { v.reset(OpAMD64MOVWloadidx1) v.AuxInt = c + d v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } break @@ -19602,8 +18696,7 @@ func rewriteValueAMD64_OpAMD64MOVWloadidx1(v *Value) bool { v.reset(OpAMD64MOVWload) v.AuxInt = i + c v.Aux = s - v.AddArg(p) - v.AddArg(mem) + v.AddArg2(p, mem) return true } break @@ -19633,9 +18726,7 @@ func rewriteValueAMD64_OpAMD64MOVWloadidx2(v *Value) bool { v.reset(OpAMD64MOVWloadidx2) v.AuxInt = c + d v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } // match: (MOVWloadidx2 [c] {sym} ptr (ADDQconst [d] idx) mem) @@ -19657,9 +18748,7 @@ func rewriteValueAMD64_OpAMD64MOVWloadidx2(v *Value) bool { v.reset(OpAMD64MOVWloadidx2) v.AuxInt = c + 2*d v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } // match: (MOVWloadidx2 [i] {s} p (MOVQconst [c]) mem) @@ -19680,8 +18769,7 @@ func rewriteValueAMD64_OpAMD64MOVWloadidx2(v *Value) bool { v.reset(OpAMD64MOVWload) v.AuxInt = i + 2*c v.Aux = s - v.AddArg(p) - v.AddArg(mem) + v.AddArg2(p, mem) return true } return false @@ -19706,9 +18794,7 @@ func rewriteValueAMD64_OpAMD64MOVWstore(v *Value) bool { v.reset(OpAMD64MOVWstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(x) - v.AddArg(mem) + v.AddArg3(ptr, x, mem) return true } // match: (MOVWstore [off] {sym} ptr (MOVWQZX x) mem) @@ -19725,9 +18811,7 @@ func rewriteValueAMD64_OpAMD64MOVWstore(v *Value) bool { v.reset(OpAMD64MOVWstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(x) - v.AddArg(mem) + v.AddArg3(ptr, x, mem) return true } // match: (MOVWstore [off1] {sym} (ADDQconst [off2] ptr) val mem) @@ -19749,9 +18833,7 @@ func rewriteValueAMD64_OpAMD64MOVWstore(v *Value) bool { v.reset(OpAMD64MOVWstore) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } // match: (MOVWstore [off] {sym} ptr (MOVLconst [c]) mem) @@ -19772,8 +18854,7 @@ func rewriteValueAMD64_OpAMD64MOVWstore(v *Value) bool { v.reset(OpAMD64MOVWstoreconst) v.AuxInt = makeValAndOff(int64(int16(c)), off) v.Aux = sym - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVWstore [off] {sym} ptr (MOVQconst [c]) mem) @@ -19794,8 +18875,7 @@ func rewriteValueAMD64_OpAMD64MOVWstore(v *Value) bool { v.reset(OpAMD64MOVWstoreconst) v.AuxInt = makeValAndOff(int64(int16(c)), off) v.Aux = sym - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVWstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) @@ -19818,9 +18898,7 @@ func rewriteValueAMD64_OpAMD64MOVWstore(v *Value) bool { v.reset(OpAMD64MOVWstore) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(base) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(base, val, mem) return true } // match: (MOVWstore [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) val mem) @@ -19844,10 +18922,7 @@ func rewriteValueAMD64_OpAMD64MOVWstore(v *Value) bool { v.reset(OpAMD64MOVWstoreidx1) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) + v.AddArg4(ptr, idx, val, mem) return true } // match: (MOVWstore [off1] {sym1} (LEAQ2 [off2] {sym2} ptr idx) val mem) @@ -19871,10 +18946,7 @@ func rewriteValueAMD64_OpAMD64MOVWstore(v *Value) bool { v.reset(OpAMD64MOVWstoreidx2) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) + v.AddArg4(ptr, idx, val, mem) return true } // match: (MOVWstore [off] {sym} (ADDQ ptr idx) val mem) @@ -19900,10 +18972,7 @@ func rewriteValueAMD64_OpAMD64MOVWstore(v *Value) bool { v.reset(OpAMD64MOVWstoreidx1) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) + v.AddArg4(ptr, idx, val, mem) return true } break @@ -19930,9 +18999,7 @@ func rewriteValueAMD64_OpAMD64MOVWstore(v *Value) bool { v.reset(OpAMD64MOVLstore) v.AuxInt = i - 2 v.Aux = s - v.AddArg(p) - v.AddArg(w) - v.AddArg(mem) + v.AddArg3(p, w, mem) return true } // match: (MOVWstore [i] {s} p (SHRQconst [16] w) x:(MOVWstore [i-2] {s} p w mem)) @@ -19957,9 +19024,7 @@ func rewriteValueAMD64_OpAMD64MOVWstore(v *Value) bool { v.reset(OpAMD64MOVLstore) v.AuxInt = i - 2 v.Aux = s - v.AddArg(p) - v.AddArg(w) - v.AddArg(mem) + v.AddArg3(p, w, mem) return true } // match: (MOVWstore [i] {s} p (SHRLconst [j] w) x:(MOVWstore [i-2] {s} p w0:(SHRLconst [j-16] w) mem)) @@ -19989,9 +19054,7 @@ func rewriteValueAMD64_OpAMD64MOVWstore(v *Value) bool { v.reset(OpAMD64MOVLstore) v.AuxInt = i - 2 v.Aux = s - v.AddArg(p) - v.AddArg(w0) - v.AddArg(mem) + v.AddArg3(p, w0, mem) return true } // match: (MOVWstore [i] {s} p (SHRQconst [j] w) x:(MOVWstore [i-2] {s} p w0:(SHRQconst [j-16] w) mem)) @@ -20021,9 +19084,7 @@ func rewriteValueAMD64_OpAMD64MOVWstore(v *Value) bool { v.reset(OpAMD64MOVLstore) v.AuxInt = i - 2 v.Aux = s - v.AddArg(p) - v.AddArg(w0) - v.AddArg(mem) + v.AddArg3(p, w0, mem) return true } // match: (MOVWstore [i] {s} p x1:(MOVWload [j] {s2} p2 mem) mem2:(MOVWstore [i-2] {s} p x2:(MOVWload [j-2] {s2} p2 mem) mem)) @@ -20060,14 +19121,11 @@ func rewriteValueAMD64_OpAMD64MOVWstore(v *Value) bool { v.reset(OpAMD64MOVLstore) v.AuxInt = i - 2 v.Aux = s - v.AddArg(p) v0 := b.NewValue0(x2.Pos, OpAMD64MOVLload, typ.UInt32) v0.AuxInt = j - 2 v0.Aux = s2 - v0.AddArg(p2) - v0.AddArg(mem) - v.AddArg(v0) - v.AddArg(mem) + v0.AddArg2(p2, mem) + v.AddArg3(p, v0, mem) return true } // match: (MOVWstore [off1] {sym1} (LEAL [off2] {sym2} base) val mem) @@ -20090,9 +19148,7 @@ func rewriteValueAMD64_OpAMD64MOVWstore(v *Value) bool { v.reset(OpAMD64MOVWstore) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(base) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(base, val, mem) return true } // match: (MOVWstore [off1] {sym} (ADDLconst [off2] ptr) val mem) @@ -20114,9 +19170,7 @@ func rewriteValueAMD64_OpAMD64MOVWstore(v *Value) bool { v.reset(OpAMD64MOVWstore) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } return false @@ -20142,8 +19196,7 @@ func rewriteValueAMD64_OpAMD64MOVWstoreconst(v *Value) bool { v.reset(OpAMD64MOVWstoreconst) v.AuxInt = ValAndOff(sc).add(off) v.Aux = s - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVWstoreconst [sc] {sym1} (LEAQ [off] {sym2} ptr) mem) @@ -20165,8 +19218,7 @@ func rewriteValueAMD64_OpAMD64MOVWstoreconst(v *Value) bool { v.reset(OpAMD64MOVWstoreconst) v.AuxInt = ValAndOff(sc).add(off) v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVWstoreconst [x] {sym1} (LEAQ1 [off] {sym2} ptr idx) mem) @@ -20189,9 +19241,7 @@ func rewriteValueAMD64_OpAMD64MOVWstoreconst(v *Value) bool { v.reset(OpAMD64MOVWstoreconstidx1) v.AuxInt = ValAndOff(x).add(off) v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } // match: (MOVWstoreconst [x] {sym1} (LEAQ2 [off] {sym2} ptr idx) mem) @@ -20214,9 +19264,7 @@ func rewriteValueAMD64_OpAMD64MOVWstoreconst(v *Value) bool { v.reset(OpAMD64MOVWstoreconstidx2) v.AuxInt = ValAndOff(x).add(off) v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } // match: (MOVWstoreconst [x] {sym} (ADDQ ptr idx) mem) @@ -20233,9 +19281,7 @@ func rewriteValueAMD64_OpAMD64MOVWstoreconst(v *Value) bool { v.reset(OpAMD64MOVWstoreconstidx1) v.AuxInt = x v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } // match: (MOVWstoreconst [c] {s} p x:(MOVWstoreconst [a] {s} p mem)) @@ -20260,8 +19306,7 @@ func rewriteValueAMD64_OpAMD64MOVWstoreconst(v *Value) bool { v.reset(OpAMD64MOVLstoreconst) v.AuxInt = makeValAndOff(ValAndOff(a).Val()&0xffff|ValAndOff(c).Val()<<16, ValAndOff(a).Off()) v.Aux = s - v.AddArg(p) - v.AddArg(mem) + v.AddArg2(p, mem) return true } // match: (MOVWstoreconst [a] {s} p x:(MOVWstoreconst [c] {s} p mem)) @@ -20286,8 +19331,7 @@ func rewriteValueAMD64_OpAMD64MOVWstoreconst(v *Value) bool { v.reset(OpAMD64MOVLstoreconst) v.AuxInt = makeValAndOff(ValAndOff(a).Val()&0xffff|ValAndOff(c).Val()<<16, ValAndOff(a).Off()) v.Aux = s - v.AddArg(p) - v.AddArg(mem) + v.AddArg2(p, mem) return true } // match: (MOVWstoreconst [sc] {sym1} (LEAL [off] {sym2} ptr) mem) @@ -20309,8 +19353,7 @@ func rewriteValueAMD64_OpAMD64MOVWstoreconst(v *Value) bool { v.reset(OpAMD64MOVWstoreconst) v.AuxInt = ValAndOff(sc).add(off) v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVWstoreconst [sc] {s} (ADDLconst [off] ptr) mem) @@ -20331,8 +19374,7 @@ func rewriteValueAMD64_OpAMD64MOVWstoreconst(v *Value) bool { v.reset(OpAMD64MOVWstoreconst) v.AuxInt = ValAndOff(sc).add(off) v.Aux = s - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } return false @@ -20356,9 +19398,7 @@ func rewriteValueAMD64_OpAMD64MOVWstoreconstidx1(v *Value) bool { v.reset(OpAMD64MOVWstoreconstidx2) v.AuxInt = c v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } break @@ -20383,9 +19423,7 @@ func rewriteValueAMD64_OpAMD64MOVWstoreconstidx1(v *Value) bool { v.reset(OpAMD64MOVWstoreconstidx1) v.AuxInt = ValAndOff(x).add(c) v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } break @@ -20410,9 +19448,7 @@ func rewriteValueAMD64_OpAMD64MOVWstoreconstidx1(v *Value) bool { v.reset(OpAMD64MOVWstoreconstidx1) v.AuxInt = ValAndOff(x).add(c) v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } break @@ -20444,9 +19480,7 @@ func rewriteValueAMD64_OpAMD64MOVWstoreconstidx1(v *Value) bool { v.reset(OpAMD64MOVLstoreconstidx1) v.AuxInt = makeValAndOff(ValAndOff(a).Val()&0xffff|ValAndOff(c).Val()<<16, ValAndOff(a).Off()) v.Aux = s - v.AddArg(p) - v.AddArg(i) - v.AddArg(mem) + v.AddArg3(p, i, mem) return true } } @@ -20478,9 +19512,7 @@ func rewriteValueAMD64_OpAMD64MOVWstoreconstidx2(v *Value) bool { v.reset(OpAMD64MOVWstoreconstidx2) v.AuxInt = ValAndOff(x).add(c) v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } // match: (MOVWstoreconstidx2 [x] {sym} ptr (ADDQconst [c] idx) mem) @@ -20502,9 +19534,7 @@ func rewriteValueAMD64_OpAMD64MOVWstoreconstidx2(v *Value) bool { v.reset(OpAMD64MOVWstoreconstidx2) v.AuxInt = ValAndOff(x).add(2 * c) v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } // match: (MOVWstoreconstidx2 [c] {s} p i x:(MOVWstoreconstidx2 [a] {s} p i mem)) @@ -20530,12 +19560,10 @@ func rewriteValueAMD64_OpAMD64MOVWstoreconstidx2(v *Value) bool { v.reset(OpAMD64MOVLstoreconstidx1) v.AuxInt = makeValAndOff(ValAndOff(a).Val()&0xffff|ValAndOff(c).Val()<<16, ValAndOff(a).Off()) v.Aux = s - v.AddArg(p) v0 := b.NewValue0(v.Pos, OpAMD64SHLQconst, i.Type) v0.AuxInt = 1 v0.AddArg(i) - v.AddArg(v0) - v.AddArg(mem) + v.AddArg3(p, v0, mem) return true } return false @@ -20561,10 +19589,7 @@ func rewriteValueAMD64_OpAMD64MOVWstoreidx1(v *Value) bool { v.reset(OpAMD64MOVWstoreidx2) v.AuxInt = c v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) + v.AddArg4(ptr, idx, val, mem) return true } break @@ -20590,10 +19615,7 @@ func rewriteValueAMD64_OpAMD64MOVWstoreidx1(v *Value) bool { v.reset(OpAMD64MOVWstoreidx1) v.AuxInt = c + d v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) + v.AddArg4(ptr, idx, val, mem) return true } break @@ -20619,10 +19641,7 @@ func rewriteValueAMD64_OpAMD64MOVWstoreidx1(v *Value) bool { v.reset(OpAMD64MOVWstoreidx1) v.AuxInt = c + d v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) + v.AddArg4(ptr, idx, val, mem) return true } break @@ -20654,10 +19673,7 @@ func rewriteValueAMD64_OpAMD64MOVWstoreidx1(v *Value) bool { v.reset(OpAMD64MOVLstoreidx1) v.AuxInt = i - 2 v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v.AddArg(w) - v.AddArg(mem) + v.AddArg4(p, idx, w, mem) return true } } @@ -20690,10 +19706,7 @@ func rewriteValueAMD64_OpAMD64MOVWstoreidx1(v *Value) bool { v.reset(OpAMD64MOVLstoreidx1) v.AuxInt = i - 2 v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v.AddArg(w) - v.AddArg(mem) + v.AddArg4(p, idx, w, mem) return true } } @@ -20731,10 +19744,7 @@ func rewriteValueAMD64_OpAMD64MOVWstoreidx1(v *Value) bool { v.reset(OpAMD64MOVLstoreidx1) v.AuxInt = i - 2 v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v.AddArg(w0) - v.AddArg(mem) + v.AddArg4(p, idx, w0, mem) return true } } @@ -20772,10 +19782,7 @@ func rewriteValueAMD64_OpAMD64MOVWstoreidx1(v *Value) bool { v.reset(OpAMD64MOVLstoreidx1) v.AuxInt = i - 2 v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v.AddArg(w0) - v.AddArg(mem) + v.AddArg4(p, idx, w0, mem) return true } } @@ -20801,9 +19808,7 @@ func rewriteValueAMD64_OpAMD64MOVWstoreidx1(v *Value) bool { v.reset(OpAMD64MOVWstore) v.AuxInt = i + c v.Aux = s - v.AddArg(p) - v.AddArg(w) - v.AddArg(mem) + v.AddArg3(p, w, mem) return true } break @@ -20827,9 +19832,7 @@ func rewriteValueAMD64_OpAMD64MOVWstoreidx1(v *Value) bool { v.reset(OpAMD64MOVWstoreconstidx1) v.AuxInt = makeValAndOff(int64(int16(c)), off) v.Aux = s - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } return false @@ -20860,10 +19863,7 @@ func rewriteValueAMD64_OpAMD64MOVWstoreidx2(v *Value) bool { v.reset(OpAMD64MOVWstoreidx2) v.AuxInt = c + d v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) + v.AddArg4(ptr, idx, val, mem) return true } // match: (MOVWstoreidx2 [c] {sym} ptr (ADDQconst [d] idx) val mem) @@ -20886,10 +19886,7 @@ func rewriteValueAMD64_OpAMD64MOVWstoreidx2(v *Value) bool { v.reset(OpAMD64MOVWstoreidx2) v.AuxInt = c + 2*d v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) + v.AddArg4(ptr, idx, val, mem) return true } // match: (MOVWstoreidx2 [i] {s} p idx (SHRLconst [16] w) x:(MOVWstoreidx2 [i-2] {s} p idx w mem)) @@ -20915,13 +19912,10 @@ func rewriteValueAMD64_OpAMD64MOVWstoreidx2(v *Value) bool { v.reset(OpAMD64MOVLstoreidx1) v.AuxInt = i - 2 v.Aux = s - v.AddArg(p) v0 := b.NewValue0(v.Pos, OpAMD64SHLQconst, idx.Type) v0.AuxInt = 1 v0.AddArg(idx) - v.AddArg(v0) - v.AddArg(w) - v.AddArg(mem) + v.AddArg4(p, v0, w, mem) return true } // match: (MOVWstoreidx2 [i] {s} p idx (SHRQconst [16] w) x:(MOVWstoreidx2 [i-2] {s} p idx w mem)) @@ -20947,13 +19941,10 @@ func rewriteValueAMD64_OpAMD64MOVWstoreidx2(v *Value) bool { v.reset(OpAMD64MOVLstoreidx1) v.AuxInt = i - 2 v.Aux = s - v.AddArg(p) v0 := b.NewValue0(v.Pos, OpAMD64SHLQconst, idx.Type) v0.AuxInt = 1 v0.AddArg(idx) - v.AddArg(v0) - v.AddArg(w) - v.AddArg(mem) + v.AddArg4(p, v0, w, mem) return true } // match: (MOVWstoreidx2 [i] {s} p idx (SHRQconst [j] w) x:(MOVWstoreidx2 [i-2] {s} p idx w0:(SHRQconst [j-16] w) mem)) @@ -20984,13 +19975,10 @@ func rewriteValueAMD64_OpAMD64MOVWstoreidx2(v *Value) bool { v.reset(OpAMD64MOVLstoreidx1) v.AuxInt = i - 2 v.Aux = s - v.AddArg(p) v0 := b.NewValue0(v.Pos, OpAMD64SHLQconst, idx.Type) v0.AuxInt = 1 v0.AddArg(idx) - v.AddArg(v0) - v.AddArg(w0) - v.AddArg(mem) + v.AddArg4(p, v0, w0, mem) return true } // match: (MOVWstoreidx2 [i] {s} p (MOVQconst [c]) w mem) @@ -21012,9 +20000,7 @@ func rewriteValueAMD64_OpAMD64MOVWstoreidx2(v *Value) bool { v.reset(OpAMD64MOVWstore) v.AuxInt = i + 2*c v.Aux = s - v.AddArg(p) - v.AddArg(w) - v.AddArg(mem) + v.AddArg3(p, w, mem) return true } // match: (MOVWstoreidx2 [off] {s} ptr idx (MOVLconst [c]) mem) @@ -21036,9 +20022,7 @@ func rewriteValueAMD64_OpAMD64MOVWstoreidx2(v *Value) bool { v.reset(OpAMD64MOVWstoreconstidx2) v.AuxInt = makeValAndOff(int64(int16(c)), off) v.Aux = s - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } return false @@ -21090,8 +20074,7 @@ func rewriteValueAMD64_OpAMD64MULLconst(v *Value) bool { x := v_0 v.reset(OpAMD64NEGL) v0 := b.NewValue0(v.Pos, OpAMD64LEAL8, v.Type) - v0.AddArg(x) - v0.AddArg(x) + v0.AddArg2(x, x) v.AddArg(v0) return true } @@ -21104,8 +20087,7 @@ func rewriteValueAMD64_OpAMD64MULLconst(v *Value) bool { x := v_0 v.reset(OpAMD64NEGL) v0 := b.NewValue0(v.Pos, OpAMD64LEAL4, v.Type) - v0.AddArg(x) - v0.AddArg(x) + v0.AddArg2(x, x) v.AddArg(v0) return true } @@ -21118,8 +20100,7 @@ func rewriteValueAMD64_OpAMD64MULLconst(v *Value) bool { x := v_0 v.reset(OpAMD64NEGL) v0 := b.NewValue0(v.Pos, OpAMD64LEAL2, v.Type) - v0.AddArg(x) - v0.AddArg(x) + v0.AddArg2(x, x) v.AddArg(v0) return true } @@ -21164,8 +20145,7 @@ func rewriteValueAMD64_OpAMD64MULLconst(v *Value) bool { } x := v_0 v.reset(OpAMD64LEAL2) - v.AddArg(x) - v.AddArg(x) + v.AddArg2(x, x) return true } // match: (MULLconst [ 5] x) @@ -21176,8 +20156,7 @@ func rewriteValueAMD64_OpAMD64MULLconst(v *Value) bool { } x := v_0 v.reset(OpAMD64LEAL4) - v.AddArg(x) - v.AddArg(x) + v.AddArg2(x, x) return true } // match: (MULLconst [ 7] x) @@ -21188,11 +20167,9 @@ func rewriteValueAMD64_OpAMD64MULLconst(v *Value) bool { } x := v_0 v.reset(OpAMD64LEAL2) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpAMD64LEAL2, v.Type) - v0.AddArg(x) - v0.AddArg(x) - v.AddArg(v0) + v0.AddArg2(x, x) + v.AddArg2(x, v0) return true } // match: (MULLconst [ 9] x) @@ -21203,8 +20180,7 @@ func rewriteValueAMD64_OpAMD64MULLconst(v *Value) bool { } x := v_0 v.reset(OpAMD64LEAL8) - v.AddArg(x) - v.AddArg(x) + v.AddArg2(x, x) return true } // match: (MULLconst [11] x) @@ -21215,11 +20191,9 @@ func rewriteValueAMD64_OpAMD64MULLconst(v *Value) bool { } x := v_0 v.reset(OpAMD64LEAL2) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpAMD64LEAL4, v.Type) - v0.AddArg(x) - v0.AddArg(x) - v.AddArg(v0) + v0.AddArg2(x, x) + v.AddArg2(x, v0) return true } // match: (MULLconst [13] x) @@ -21230,11 +20204,9 @@ func rewriteValueAMD64_OpAMD64MULLconst(v *Value) bool { } x := v_0 v.reset(OpAMD64LEAL4) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpAMD64LEAL2, v.Type) - v0.AddArg(x) - v0.AddArg(x) - v.AddArg(v0) + v0.AddArg2(x, x) + v.AddArg2(x, v0) return true } // match: (MULLconst [19] x) @@ -21245,11 +20217,9 @@ func rewriteValueAMD64_OpAMD64MULLconst(v *Value) bool { } x := v_0 v.reset(OpAMD64LEAL2) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpAMD64LEAL8, v.Type) - v0.AddArg(x) - v0.AddArg(x) - v.AddArg(v0) + v0.AddArg2(x, x) + v.AddArg2(x, v0) return true } // match: (MULLconst [21] x) @@ -21260,11 +20230,9 @@ func rewriteValueAMD64_OpAMD64MULLconst(v *Value) bool { } x := v_0 v.reset(OpAMD64LEAL4) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpAMD64LEAL4, v.Type) - v0.AddArg(x) - v0.AddArg(x) - v.AddArg(v0) + v0.AddArg2(x, x) + v.AddArg2(x, v0) return true } // match: (MULLconst [25] x) @@ -21275,11 +20243,9 @@ func rewriteValueAMD64_OpAMD64MULLconst(v *Value) bool { } x := v_0 v.reset(OpAMD64LEAL8) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpAMD64LEAL2, v.Type) - v0.AddArg(x) - v0.AddArg(x) - v.AddArg(v0) + v0.AddArg2(x, x) + v.AddArg2(x, v0) return true } // match: (MULLconst [27] x) @@ -21291,13 +20257,10 @@ func rewriteValueAMD64_OpAMD64MULLconst(v *Value) bool { x := v_0 v.reset(OpAMD64LEAL8) v0 := b.NewValue0(v.Pos, OpAMD64LEAL2, v.Type) - v0.AddArg(x) - v0.AddArg(x) - v.AddArg(v0) + v0.AddArg2(x, x) v1 := b.NewValue0(v.Pos, OpAMD64LEAL2, v.Type) - v1.AddArg(x) - v1.AddArg(x) - v.AddArg(v1) + v1.AddArg2(x, x) + v.AddArg2(v0, v1) return true } // match: (MULLconst [37] x) @@ -21308,11 +20271,9 @@ func rewriteValueAMD64_OpAMD64MULLconst(v *Value) bool { } x := v_0 v.reset(OpAMD64LEAL4) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpAMD64LEAL8, v.Type) - v0.AddArg(x) - v0.AddArg(x) - v.AddArg(v0) + v0.AddArg2(x, x) + v.AddArg2(x, v0) return true } // match: (MULLconst [41] x) @@ -21323,11 +20284,9 @@ func rewriteValueAMD64_OpAMD64MULLconst(v *Value) bool { } x := v_0 v.reset(OpAMD64LEAL8) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpAMD64LEAL4, v.Type) - v0.AddArg(x) - v0.AddArg(x) - v.AddArg(v0) + v0.AddArg2(x, x) + v.AddArg2(x, v0) return true } // match: (MULLconst [45] x) @@ -21339,13 +20298,10 @@ func rewriteValueAMD64_OpAMD64MULLconst(v *Value) bool { x := v_0 v.reset(OpAMD64LEAL8) v0 := b.NewValue0(v.Pos, OpAMD64LEAL4, v.Type) - v0.AddArg(x) - v0.AddArg(x) - v.AddArg(v0) + v0.AddArg2(x, x) v1 := b.NewValue0(v.Pos, OpAMD64LEAL4, v.Type) - v1.AddArg(x) - v1.AddArg(x) - v.AddArg(v1) + v1.AddArg2(x, x) + v.AddArg2(v0, v1) return true } // match: (MULLconst [73] x) @@ -21356,11 +20312,9 @@ func rewriteValueAMD64_OpAMD64MULLconst(v *Value) bool { } x := v_0 v.reset(OpAMD64LEAL8) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpAMD64LEAL8, v.Type) - v0.AddArg(x) - v0.AddArg(x) - v.AddArg(v0) + v0.AddArg2(x, x) + v.AddArg2(x, v0) return true } // match: (MULLconst [81] x) @@ -21372,13 +20326,10 @@ func rewriteValueAMD64_OpAMD64MULLconst(v *Value) bool { x := v_0 v.reset(OpAMD64LEAL8) v0 := b.NewValue0(v.Pos, OpAMD64LEAL8, v.Type) - v0.AddArg(x) - v0.AddArg(x) - v.AddArg(v0) + v0.AddArg2(x, x) v1 := b.NewValue0(v.Pos, OpAMD64LEAL8, v.Type) - v1.AddArg(x) - v1.AddArg(x) - v.AddArg(v1) + v1.AddArg2(x, x) + v.AddArg2(v0, v1) return true } // match: (MULLconst [c] x) @@ -21394,8 +20345,7 @@ func rewriteValueAMD64_OpAMD64MULLconst(v *Value) bool { v0 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) v0.AuxInt = log2(c + 1) v0.AddArg(x) - v.AddArg(v0) - v.AddArg(x) + v.AddArg2(v0, x) return true } // match: (MULLconst [c] x) @@ -21411,8 +20361,7 @@ func rewriteValueAMD64_OpAMD64MULLconst(v *Value) bool { v0 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) v0.AuxInt = log2(c - 1) v0.AddArg(x) - v.AddArg(v0) - v.AddArg(x) + v.AddArg2(v0, x) return true } // match: (MULLconst [c] x) @@ -21428,8 +20377,7 @@ func rewriteValueAMD64_OpAMD64MULLconst(v *Value) bool { v0 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) v0.AuxInt = log2(c - 2) v0.AddArg(x) - v.AddArg(v0) - v.AddArg(x) + v.AddArg2(v0, x) return true } // match: (MULLconst [c] x) @@ -21445,8 +20393,7 @@ func rewriteValueAMD64_OpAMD64MULLconst(v *Value) bool { v0 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) v0.AuxInt = log2(c - 4) v0.AddArg(x) - v.AddArg(v0) - v.AddArg(x) + v.AddArg2(v0, x) return true } // match: (MULLconst [c] x) @@ -21462,8 +20409,7 @@ func rewriteValueAMD64_OpAMD64MULLconst(v *Value) bool { v0 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) v0.AuxInt = log2(c - 8) v0.AddArg(x) - v.AddArg(v0) - v.AddArg(x) + v.AddArg2(v0, x) return true } // match: (MULLconst [c] x) @@ -21478,8 +20424,7 @@ func rewriteValueAMD64_OpAMD64MULLconst(v *Value) bool { v.reset(OpAMD64SHLLconst) v.AuxInt = log2(c / 3) v0 := b.NewValue0(v.Pos, OpAMD64LEAL2, v.Type) - v0.AddArg(x) - v0.AddArg(x) + v0.AddArg2(x, x) v.AddArg(v0) return true } @@ -21495,8 +20440,7 @@ func rewriteValueAMD64_OpAMD64MULLconst(v *Value) bool { v.reset(OpAMD64SHLLconst) v.AuxInt = log2(c / 5) v0 := b.NewValue0(v.Pos, OpAMD64LEAL4, v.Type) - v0.AddArg(x) - v0.AddArg(x) + v0.AddArg2(x, x) v.AddArg(v0) return true } @@ -21512,8 +20456,7 @@ func rewriteValueAMD64_OpAMD64MULLconst(v *Value) bool { v.reset(OpAMD64SHLLconst) v.AuxInt = log2(c / 9) v0 := b.NewValue0(v.Pos, OpAMD64LEAL8, v.Type) - v0.AddArg(x) - v0.AddArg(x) + v0.AddArg2(x, x) v.AddArg(v0) return true } @@ -21586,8 +20529,7 @@ func rewriteValueAMD64_OpAMD64MULQconst(v *Value) bool { x := v_0 v.reset(OpAMD64NEGQ) v0 := b.NewValue0(v.Pos, OpAMD64LEAQ8, v.Type) - v0.AddArg(x) - v0.AddArg(x) + v0.AddArg2(x, x) v.AddArg(v0) return true } @@ -21600,8 +20542,7 @@ func rewriteValueAMD64_OpAMD64MULQconst(v *Value) bool { x := v_0 v.reset(OpAMD64NEGQ) v0 := b.NewValue0(v.Pos, OpAMD64LEAQ4, v.Type) - v0.AddArg(x) - v0.AddArg(x) + v0.AddArg2(x, x) v.AddArg(v0) return true } @@ -21614,8 +20555,7 @@ func rewriteValueAMD64_OpAMD64MULQconst(v *Value) bool { x := v_0 v.reset(OpAMD64NEGQ) v0 := b.NewValue0(v.Pos, OpAMD64LEAQ2, v.Type) - v0.AddArg(x) - v0.AddArg(x) + v0.AddArg2(x, x) v.AddArg(v0) return true } @@ -21660,8 +20600,7 @@ func rewriteValueAMD64_OpAMD64MULQconst(v *Value) bool { } x := v_0 v.reset(OpAMD64LEAQ2) - v.AddArg(x) - v.AddArg(x) + v.AddArg2(x, x) return true } // match: (MULQconst [ 5] x) @@ -21672,8 +20611,7 @@ func rewriteValueAMD64_OpAMD64MULQconst(v *Value) bool { } x := v_0 v.reset(OpAMD64LEAQ4) - v.AddArg(x) - v.AddArg(x) + v.AddArg2(x, x) return true } // match: (MULQconst [ 7] x) @@ -21684,11 +20622,9 @@ func rewriteValueAMD64_OpAMD64MULQconst(v *Value) bool { } x := v_0 v.reset(OpAMD64LEAQ2) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpAMD64LEAQ2, v.Type) - v0.AddArg(x) - v0.AddArg(x) - v.AddArg(v0) + v0.AddArg2(x, x) + v.AddArg2(x, v0) return true } // match: (MULQconst [ 9] x) @@ -21699,8 +20635,7 @@ func rewriteValueAMD64_OpAMD64MULQconst(v *Value) bool { } x := v_0 v.reset(OpAMD64LEAQ8) - v.AddArg(x) - v.AddArg(x) + v.AddArg2(x, x) return true } // match: (MULQconst [11] x) @@ -21711,11 +20646,9 @@ func rewriteValueAMD64_OpAMD64MULQconst(v *Value) bool { } x := v_0 v.reset(OpAMD64LEAQ2) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpAMD64LEAQ4, v.Type) - v0.AddArg(x) - v0.AddArg(x) - v.AddArg(v0) + v0.AddArg2(x, x) + v.AddArg2(x, v0) return true } // match: (MULQconst [13] x) @@ -21726,11 +20659,9 @@ func rewriteValueAMD64_OpAMD64MULQconst(v *Value) bool { } x := v_0 v.reset(OpAMD64LEAQ4) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpAMD64LEAQ2, v.Type) - v0.AddArg(x) - v0.AddArg(x) - v.AddArg(v0) + v0.AddArg2(x, x) + v.AddArg2(x, v0) return true } // match: (MULQconst [19] x) @@ -21741,11 +20672,9 @@ func rewriteValueAMD64_OpAMD64MULQconst(v *Value) bool { } x := v_0 v.reset(OpAMD64LEAQ2) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpAMD64LEAQ8, v.Type) - v0.AddArg(x) - v0.AddArg(x) - v.AddArg(v0) + v0.AddArg2(x, x) + v.AddArg2(x, v0) return true } // match: (MULQconst [21] x) @@ -21756,11 +20685,9 @@ func rewriteValueAMD64_OpAMD64MULQconst(v *Value) bool { } x := v_0 v.reset(OpAMD64LEAQ4) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpAMD64LEAQ4, v.Type) - v0.AddArg(x) - v0.AddArg(x) - v.AddArg(v0) + v0.AddArg2(x, x) + v.AddArg2(x, v0) return true } // match: (MULQconst [25] x) @@ -21771,11 +20698,9 @@ func rewriteValueAMD64_OpAMD64MULQconst(v *Value) bool { } x := v_0 v.reset(OpAMD64LEAQ8) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpAMD64LEAQ2, v.Type) - v0.AddArg(x) - v0.AddArg(x) - v.AddArg(v0) + v0.AddArg2(x, x) + v.AddArg2(x, v0) return true } // match: (MULQconst [27] x) @@ -21787,13 +20712,10 @@ func rewriteValueAMD64_OpAMD64MULQconst(v *Value) bool { x := v_0 v.reset(OpAMD64LEAQ8) v0 := b.NewValue0(v.Pos, OpAMD64LEAQ2, v.Type) - v0.AddArg(x) - v0.AddArg(x) - v.AddArg(v0) + v0.AddArg2(x, x) v1 := b.NewValue0(v.Pos, OpAMD64LEAQ2, v.Type) - v1.AddArg(x) - v1.AddArg(x) - v.AddArg(v1) + v1.AddArg2(x, x) + v.AddArg2(v0, v1) return true } // match: (MULQconst [37] x) @@ -21804,11 +20726,9 @@ func rewriteValueAMD64_OpAMD64MULQconst(v *Value) bool { } x := v_0 v.reset(OpAMD64LEAQ4) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpAMD64LEAQ8, v.Type) - v0.AddArg(x) - v0.AddArg(x) - v.AddArg(v0) + v0.AddArg2(x, x) + v.AddArg2(x, v0) return true } // match: (MULQconst [41] x) @@ -21819,11 +20739,9 @@ func rewriteValueAMD64_OpAMD64MULQconst(v *Value) bool { } x := v_0 v.reset(OpAMD64LEAQ8) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpAMD64LEAQ4, v.Type) - v0.AddArg(x) - v0.AddArg(x) - v.AddArg(v0) + v0.AddArg2(x, x) + v.AddArg2(x, v0) return true } // match: (MULQconst [45] x) @@ -21835,13 +20753,10 @@ func rewriteValueAMD64_OpAMD64MULQconst(v *Value) bool { x := v_0 v.reset(OpAMD64LEAQ8) v0 := b.NewValue0(v.Pos, OpAMD64LEAQ4, v.Type) - v0.AddArg(x) - v0.AddArg(x) - v.AddArg(v0) + v0.AddArg2(x, x) v1 := b.NewValue0(v.Pos, OpAMD64LEAQ4, v.Type) - v1.AddArg(x) - v1.AddArg(x) - v.AddArg(v1) + v1.AddArg2(x, x) + v.AddArg2(v0, v1) return true } // match: (MULQconst [73] x) @@ -21852,11 +20767,9 @@ func rewriteValueAMD64_OpAMD64MULQconst(v *Value) bool { } x := v_0 v.reset(OpAMD64LEAQ8) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpAMD64LEAQ8, v.Type) - v0.AddArg(x) - v0.AddArg(x) - v.AddArg(v0) + v0.AddArg2(x, x) + v.AddArg2(x, v0) return true } // match: (MULQconst [81] x) @@ -21868,13 +20781,10 @@ func rewriteValueAMD64_OpAMD64MULQconst(v *Value) bool { x := v_0 v.reset(OpAMD64LEAQ8) v0 := b.NewValue0(v.Pos, OpAMD64LEAQ8, v.Type) - v0.AddArg(x) - v0.AddArg(x) - v.AddArg(v0) + v0.AddArg2(x, x) v1 := b.NewValue0(v.Pos, OpAMD64LEAQ8, v.Type) - v1.AddArg(x) - v1.AddArg(x) - v.AddArg(v1) + v1.AddArg2(x, x) + v.AddArg2(v0, v1) return true } // match: (MULQconst [c] x) @@ -21890,8 +20800,7 @@ func rewriteValueAMD64_OpAMD64MULQconst(v *Value) bool { v0 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) v0.AuxInt = log2(c + 1) v0.AddArg(x) - v.AddArg(v0) - v.AddArg(x) + v.AddArg2(v0, x) return true } // match: (MULQconst [c] x) @@ -21907,8 +20816,7 @@ func rewriteValueAMD64_OpAMD64MULQconst(v *Value) bool { v0 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) v0.AuxInt = log2(c - 1) v0.AddArg(x) - v.AddArg(v0) - v.AddArg(x) + v.AddArg2(v0, x) return true } // match: (MULQconst [c] x) @@ -21924,8 +20832,7 @@ func rewriteValueAMD64_OpAMD64MULQconst(v *Value) bool { v0 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) v0.AuxInt = log2(c - 2) v0.AddArg(x) - v.AddArg(v0) - v.AddArg(x) + v.AddArg2(v0, x) return true } // match: (MULQconst [c] x) @@ -21941,8 +20848,7 @@ func rewriteValueAMD64_OpAMD64MULQconst(v *Value) bool { v0 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) v0.AuxInt = log2(c - 4) v0.AddArg(x) - v.AddArg(v0) - v.AddArg(x) + v.AddArg2(v0, x) return true } // match: (MULQconst [c] x) @@ -21958,8 +20864,7 @@ func rewriteValueAMD64_OpAMD64MULQconst(v *Value) bool { v0 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) v0.AuxInt = log2(c - 8) v0.AddArg(x) - v.AddArg(v0) - v.AddArg(x) + v.AddArg2(v0, x) return true } // match: (MULQconst [c] x) @@ -21974,8 +20879,7 @@ func rewriteValueAMD64_OpAMD64MULQconst(v *Value) bool { v.reset(OpAMD64SHLQconst) v.AuxInt = log2(c / 3) v0 := b.NewValue0(v.Pos, OpAMD64LEAQ2, v.Type) - v0.AddArg(x) - v0.AddArg(x) + v0.AddArg2(x, x) v.AddArg(v0) return true } @@ -21991,8 +20895,7 @@ func rewriteValueAMD64_OpAMD64MULQconst(v *Value) bool { v.reset(OpAMD64SHLQconst) v.AuxInt = log2(c / 5) v0 := b.NewValue0(v.Pos, OpAMD64LEAQ4, v.Type) - v0.AddArg(x) - v0.AddArg(x) + v0.AddArg2(x, x) v.AddArg(v0) return true } @@ -22008,8 +20911,7 @@ func rewriteValueAMD64_OpAMD64MULQconst(v *Value) bool { v.reset(OpAMD64SHLQconst) v.AuxInt = log2(c / 9) v0 := b.NewValue0(v.Pos, OpAMD64LEAQ8, v.Type) - v0.AddArg(x) - v0.AddArg(x) + v0.AddArg2(x, x) v.AddArg(v0) return true } @@ -22067,9 +20969,7 @@ func rewriteValueAMD64_OpAMD64MULSD(v *Value) bool { v.reset(OpAMD64MULSDload) v.AuxInt = off v.Aux = sym - v.AddArg(x) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg3(x, ptr, mem) return true } break @@ -22101,9 +21001,7 @@ func rewriteValueAMD64_OpAMD64MULSDload(v *Value) bool { v.reset(OpAMD64MULSDload) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(val) - v.AddArg(base) - v.AddArg(mem) + v.AddArg3(val, base, mem) return true } // match: (MULSDload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem) @@ -22126,9 +21024,7 @@ func rewriteValueAMD64_OpAMD64MULSDload(v *Value) bool { v.reset(OpAMD64MULSDload) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(val) - v.AddArg(base) - v.AddArg(mem) + v.AddArg3(val, base, mem) return true } // match: (MULSDload x [off] {sym} ptr (MOVQstore [off] {sym} ptr y _)) @@ -22147,10 +21043,9 @@ func rewriteValueAMD64_OpAMD64MULSDload(v *Value) bool { } y := v_2.Args[1] v.reset(OpAMD64MULSD) - v.AddArg(x) v0 := b.NewValue0(v_2.Pos, OpAMD64MOVQi2f, typ.Float64) v0.AddArg(y) - v.AddArg(v0) + v.AddArg2(x, v0) return true } return false @@ -22178,9 +21073,7 @@ func rewriteValueAMD64_OpAMD64MULSS(v *Value) bool { v.reset(OpAMD64MULSSload) v.AuxInt = off v.Aux = sym - v.AddArg(x) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg3(x, ptr, mem) return true } break @@ -22212,9 +21105,7 @@ func rewriteValueAMD64_OpAMD64MULSSload(v *Value) bool { v.reset(OpAMD64MULSSload) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(val) - v.AddArg(base) - v.AddArg(mem) + v.AddArg3(val, base, mem) return true } // match: (MULSSload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem) @@ -22237,9 +21128,7 @@ func rewriteValueAMD64_OpAMD64MULSSload(v *Value) bool { v.reset(OpAMD64MULSSload) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(val) - v.AddArg(base) - v.AddArg(mem) + v.AddArg3(val, base, mem) return true } // match: (MULSSload x [off] {sym} ptr (MOVLstore [off] {sym} ptr y _)) @@ -22258,10 +21147,9 @@ func rewriteValueAMD64_OpAMD64MULSSload(v *Value) bool { } y := v_2.Args[1] v.reset(OpAMD64MULSS) - v.AddArg(x) v0 := b.NewValue0(v_2.Pos, OpAMD64MOVLi2f, typ.Float32) v0.AddArg(y) - v.AddArg(v0) + v.AddArg2(x, v0) return true } return false @@ -22294,8 +21182,7 @@ func rewriteValueAMD64_OpAMD64NEGL(v *Value) bool { break } v.reset(OpAMD64SUBL) - v.AddArg(y) - v.AddArg(x) + v.AddArg2(y, x) return true } // match: (NEGL (MOVLconst [c])) @@ -22339,8 +21226,7 @@ func rewriteValueAMD64_OpAMD64NEGQ(v *Value) bool { break } v.reset(OpAMD64SUBQ) - v.AddArg(y) - v.AddArg(x) + v.AddArg2(y, x) return true } // match: (NEGQ (MOVQconst [c])) @@ -22426,8 +21312,7 @@ func rewriteValueAMD64_OpAMD64ORL(v *Value) bool { } x := v_1 v.reset(OpAMD64BTSL) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } break @@ -22586,8 +21471,7 @@ func rewriteValueAMD64_OpAMD64ORL(v *Value) bool { continue } v.reset(OpAMD64ROLL) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } } @@ -22637,8 +21521,7 @@ func rewriteValueAMD64_OpAMD64ORL(v *Value) bool { continue } v.reset(OpAMD64ROLL) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } } @@ -22688,8 +21571,7 @@ func rewriteValueAMD64_OpAMD64ORL(v *Value) bool { continue } v.reset(OpAMD64RORL) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } } @@ -22739,8 +21621,7 @@ func rewriteValueAMD64_OpAMD64ORL(v *Value) bool { continue } v.reset(OpAMD64RORL) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } } @@ -22804,8 +21685,7 @@ func rewriteValueAMD64_OpAMD64ORL(v *Value) bool { continue } v.reset(OpAMD64ROLW) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } } @@ -22869,8 +21749,7 @@ func rewriteValueAMD64_OpAMD64ORL(v *Value) bool { continue } v.reset(OpAMD64ROLW) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } } @@ -22911,8 +21790,7 @@ func rewriteValueAMD64_OpAMD64ORL(v *Value) bool { continue } v.reset(OpAMD64RORW) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } break @@ -22952,8 +21830,7 @@ func rewriteValueAMD64_OpAMD64ORL(v *Value) bool { continue } v.reset(OpAMD64RORW) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } break @@ -23016,8 +21893,7 @@ func rewriteValueAMD64_OpAMD64ORL(v *Value) bool { continue } v.reset(OpAMD64ROLB) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } } @@ -23081,8 +21957,7 @@ func rewriteValueAMD64_OpAMD64ORL(v *Value) bool { continue } v.reset(OpAMD64ROLB) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } } @@ -23123,8 +21998,7 @@ func rewriteValueAMD64_OpAMD64ORL(v *Value) bool { continue } v.reset(OpAMD64RORB) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } break @@ -23164,8 +22038,7 @@ func rewriteValueAMD64_OpAMD64ORL(v *Value) bool { continue } v.reset(OpAMD64RORB) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } break @@ -23217,8 +22090,7 @@ func rewriteValueAMD64_OpAMD64ORL(v *Value) bool { v.AddArg(v0) v0.AuxInt = i0 v0.Aux = s - v0.AddArg(p) - v0.AddArg(mem) + v0.AddArg2(p, mem) return true } break @@ -23258,8 +22130,7 @@ func rewriteValueAMD64_OpAMD64ORL(v *Value) bool { v.AddArg(v0) v0.AuxInt = i0 v0.Aux = s - v0.AddArg(p) - v0.AddArg(mem) + v0.AddArg2(p, mem) return true } break @@ -23320,11 +22191,9 @@ func rewriteValueAMD64_OpAMD64ORL(v *Value) bool { v2 := b.NewValue0(x0.Pos, OpAMD64MOVWload, typ.UInt16) v2.AuxInt = i0 v2.Aux = s - v2.AddArg(p) - v2.AddArg(mem) + v2.AddArg2(p, mem) v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) + v0.AddArg2(v1, y) return true } } @@ -23372,9 +22241,7 @@ func rewriteValueAMD64_OpAMD64ORL(v *Value) bool { v.AddArg(v0) v0.AuxInt = i0 v0.Aux = s - v0.AddArg(p) - v0.AddArg(idx) - v0.AddArg(mem) + v0.AddArg3(p, idx, mem) return true } } @@ -23423,9 +22290,7 @@ func rewriteValueAMD64_OpAMD64ORL(v *Value) bool { v.AddArg(v0) v0.AuxInt = i0 v0.Aux = s - v0.AddArg(p) - v0.AddArg(idx) - v0.AddArg(mem) + v0.AddArg3(p, idx, mem) return true } } @@ -23495,12 +22360,9 @@ func rewriteValueAMD64_OpAMD64ORL(v *Value) bool { v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) v2.AuxInt = i0 v2.Aux = s - v2.AddArg(p) - v2.AddArg(idx) - v2.AddArg(mem) + v2.AddArg3(p, idx, mem) v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) + v0.AddArg2(v1, y) return true } } @@ -23545,8 +22407,7 @@ func rewriteValueAMD64_OpAMD64ORL(v *Value) bool { v1 := b.NewValue0(x0.Pos, OpAMD64MOVWload, typ.UInt16) v1.AuxInt = i0 v1.Aux = s - v1.AddArg(p) - v1.AddArg(mem) + v1.AddArg2(p, mem) v0.AddArg(v1) return true } @@ -23596,8 +22457,7 @@ func rewriteValueAMD64_OpAMD64ORL(v *Value) bool { v1 := b.NewValue0(x0.Pos, OpAMD64MOVLload, typ.UInt32) v1.AuxInt = i0 v1.Aux = s - v1.AddArg(p) - v1.AddArg(mem) + v1.AddArg2(p, mem) v0.AddArg(v1) return true } @@ -23661,12 +22521,10 @@ func rewriteValueAMD64_OpAMD64ORL(v *Value) bool { v3 := b.NewValue0(x1.Pos, OpAMD64MOVWload, typ.UInt16) v3.AuxInt = i0 v3.Aux = s - v3.AddArg(p) - v3.AddArg(mem) + v3.AddArg2(p, mem) v2.AddArg(v3) v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) + v0.AddArg2(v1, y) return true } } @@ -23716,9 +22574,7 @@ func rewriteValueAMD64_OpAMD64ORL(v *Value) bool { v1 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) v1.AuxInt = i0 v1.Aux = s - v1.AddArg(p) - v1.AddArg(idx) - v1.AddArg(mem) + v1.AddArg3(p, idx, mem) v0.AddArg(v1) return true } @@ -23777,9 +22633,7 @@ func rewriteValueAMD64_OpAMD64ORL(v *Value) bool { v1 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) v1.AuxInt = i0 v1.Aux = s - v1.AddArg(p) - v1.AddArg(idx) - v1.AddArg(mem) + v1.AddArg3(p, idx, mem) v0.AddArg(v1) return true } @@ -23852,13 +22706,10 @@ func rewriteValueAMD64_OpAMD64ORL(v *Value) bool { v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) v3.AuxInt = i0 v3.Aux = s - v3.AddArg(p) - v3.AddArg(idx) - v3.AddArg(mem) + v3.AddArg3(p, idx, mem) v2.AddArg(v3) v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) + v0.AddArg2(v1, y) return true } } @@ -23886,9 +22737,7 @@ func rewriteValueAMD64_OpAMD64ORL(v *Value) bool { v.reset(OpAMD64ORLload) v.AuxInt = off v.Aux = sym - v.AddArg(x) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg3(x, ptr, mem) return true } break @@ -24000,8 +22849,7 @@ func rewriteValueAMD64_OpAMD64ORLconstmodify(v *Value) bool { v.reset(OpAMD64ORLconstmodify) v.AuxInt = ValAndOff(valoff1).add(off2) v.Aux = sym - v.AddArg(base) - v.AddArg(mem) + v.AddArg2(base, mem) return true } // match: (ORLconstmodify [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem) @@ -24023,8 +22871,7 @@ func rewriteValueAMD64_OpAMD64ORLconstmodify(v *Value) bool { v.reset(OpAMD64ORLconstmodify) v.AuxInt = ValAndOff(valoff1).add(off2) v.Aux = mergeSym(sym1, sym2) - v.AddArg(base) - v.AddArg(mem) + v.AddArg2(base, mem) return true } return false @@ -24054,9 +22901,7 @@ func rewriteValueAMD64_OpAMD64ORLload(v *Value) bool { v.reset(OpAMD64ORLload) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(val) - v.AddArg(base) - v.AddArg(mem) + v.AddArg3(val, base, mem) return true } // match: (ORLload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem) @@ -24079,9 +22924,7 @@ func rewriteValueAMD64_OpAMD64ORLload(v *Value) bool { v.reset(OpAMD64ORLload) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(val) - v.AddArg(base) - v.AddArg(mem) + v.AddArg3(val, base, mem) return true } // match: ( ORLload x [off] {sym} ptr (MOVSSstore [off] {sym} ptr y _)) @@ -24100,10 +22943,9 @@ func rewriteValueAMD64_OpAMD64ORLload(v *Value) bool { } y := v_2.Args[1] v.reset(OpAMD64ORL) - v.AddArg(x) v0 := b.NewValue0(v_2.Pos, OpAMD64MOVLf2i, typ.UInt32) v0.AddArg(y) - v.AddArg(v0) + v.AddArg2(x, v0) return true } return false @@ -24131,9 +22973,7 @@ func rewriteValueAMD64_OpAMD64ORLmodify(v *Value) bool { v.reset(OpAMD64ORLmodify) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(base) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(base, val, mem) return true } // match: (ORLmodify [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) @@ -24156,9 +22996,7 @@ func rewriteValueAMD64_OpAMD64ORLmodify(v *Value) bool { v.reset(OpAMD64ORLmodify) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(base) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(base, val, mem) return true } return false @@ -24182,8 +23020,7 @@ func rewriteValueAMD64_OpAMD64ORQ(v *Value) bool { } x := v_1 v.reset(OpAMD64BTSQ) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } break @@ -24312,8 +23149,7 @@ func rewriteValueAMD64_OpAMD64ORQ(v *Value) bool { continue } v.reset(OpAMD64ROLQ) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } } @@ -24363,8 +23199,7 @@ func rewriteValueAMD64_OpAMD64ORQ(v *Value) bool { continue } v.reset(OpAMD64ROLQ) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } } @@ -24414,8 +23249,7 @@ func rewriteValueAMD64_OpAMD64ORQ(v *Value) bool { continue } v.reset(OpAMD64RORQ) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } } @@ -24465,8 +23299,7 @@ func rewriteValueAMD64_OpAMD64ORQ(v *Value) bool { continue } v.reset(OpAMD64RORQ) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } } @@ -24537,8 +23370,7 @@ func rewriteValueAMD64_OpAMD64ORQ(v *Value) bool { v.AddArg(v0) v0.AuxInt = i0 v0.Aux = s - v0.AddArg(p) - v0.AddArg(mem) + v0.AddArg2(p, mem) return true } break @@ -24578,8 +23410,7 @@ func rewriteValueAMD64_OpAMD64ORQ(v *Value) bool { v.AddArg(v0) v0.AuxInt = i0 v0.Aux = s - v0.AddArg(p) - v0.AddArg(mem) + v0.AddArg2(p, mem) return true } break @@ -24619,8 +23450,7 @@ func rewriteValueAMD64_OpAMD64ORQ(v *Value) bool { v.AddArg(v0) v0.AuxInt = i0 v0.Aux = s - v0.AddArg(p) - v0.AddArg(mem) + v0.AddArg2(p, mem) return true } break @@ -24681,11 +23511,9 @@ func rewriteValueAMD64_OpAMD64ORQ(v *Value) bool { v2 := b.NewValue0(x0.Pos, OpAMD64MOVWload, typ.UInt16) v2.AuxInt = i0 v2.Aux = s - v2.AddArg(p) - v2.AddArg(mem) + v2.AddArg2(p, mem) v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) + v0.AddArg2(v1, y) return true } } @@ -24747,11 +23575,9 @@ func rewriteValueAMD64_OpAMD64ORQ(v *Value) bool { v2 := b.NewValue0(x0.Pos, OpAMD64MOVLload, typ.UInt32) v2.AuxInt = i0 v2.Aux = s - v2.AddArg(p) - v2.AddArg(mem) + v2.AddArg2(p, mem) v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) + v0.AddArg2(v1, y) return true } } @@ -24799,9 +23625,7 @@ func rewriteValueAMD64_OpAMD64ORQ(v *Value) bool { v.AddArg(v0) v0.AuxInt = i0 v0.Aux = s - v0.AddArg(p) - v0.AddArg(idx) - v0.AddArg(mem) + v0.AddArg3(p, idx, mem) return true } } @@ -24850,9 +23674,7 @@ func rewriteValueAMD64_OpAMD64ORQ(v *Value) bool { v.AddArg(v0) v0.AuxInt = i0 v0.Aux = s - v0.AddArg(p) - v0.AddArg(idx) - v0.AddArg(mem) + v0.AddArg3(p, idx, mem) return true } } @@ -24901,9 +23723,7 @@ func rewriteValueAMD64_OpAMD64ORQ(v *Value) bool { v.AddArg(v0) v0.AuxInt = i0 v0.Aux = s - v0.AddArg(p) - v0.AddArg(idx) - v0.AddArg(mem) + v0.AddArg3(p, idx, mem) return true } } @@ -24973,12 +23793,9 @@ func rewriteValueAMD64_OpAMD64ORQ(v *Value) bool { v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) v2.AuxInt = i0 v2.Aux = s - v2.AddArg(p) - v2.AddArg(idx) - v2.AddArg(mem) + v2.AddArg3(p, idx, mem) v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) + v0.AddArg2(v1, y) return true } } @@ -25049,12 +23866,9 @@ func rewriteValueAMD64_OpAMD64ORQ(v *Value) bool { v2 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) v2.AuxInt = i0 v2.Aux = s - v2.AddArg(p) - v2.AddArg(idx) - v2.AddArg(mem) + v2.AddArg3(p, idx, mem) v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) + v0.AddArg2(v1, y) return true } } @@ -25099,8 +23913,7 @@ func rewriteValueAMD64_OpAMD64ORQ(v *Value) bool { v1 := b.NewValue0(x0.Pos, OpAMD64MOVWload, typ.UInt16) v1.AuxInt = i0 v1.Aux = s - v1.AddArg(p) - v1.AddArg(mem) + v1.AddArg2(p, mem) v0.AddArg(v1) return true } @@ -25150,8 +23963,7 @@ func rewriteValueAMD64_OpAMD64ORQ(v *Value) bool { v1 := b.NewValue0(x0.Pos, OpAMD64MOVLload, typ.UInt32) v1.AuxInt = i0 v1.Aux = s - v1.AddArg(p) - v1.AddArg(mem) + v1.AddArg2(p, mem) v0.AddArg(v1) return true } @@ -25201,8 +24013,7 @@ func rewriteValueAMD64_OpAMD64ORQ(v *Value) bool { v1 := b.NewValue0(x0.Pos, OpAMD64MOVQload, typ.UInt64) v1.AuxInt = i0 v1.Aux = s - v1.AddArg(p) - v1.AddArg(mem) + v1.AddArg2(p, mem) v0.AddArg(v1) return true } @@ -25266,12 +24077,10 @@ func rewriteValueAMD64_OpAMD64ORQ(v *Value) bool { v3 := b.NewValue0(x1.Pos, OpAMD64MOVWload, typ.UInt16) v3.AuxInt = i0 v3.Aux = s - v3.AddArg(p) - v3.AddArg(mem) + v3.AddArg2(p, mem) v2.AddArg(v3) v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) + v0.AddArg2(v1, y) return true } } @@ -25342,12 +24151,10 @@ func rewriteValueAMD64_OpAMD64ORQ(v *Value) bool { v3 := b.NewValue0(x1.Pos, OpAMD64MOVLload, typ.UInt32) v3.AuxInt = i0 v3.Aux = s - v3.AddArg(p) - v3.AddArg(mem) + v3.AddArg2(p, mem) v2.AddArg(v3) v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) + v0.AddArg2(v1, y) return true } } @@ -25397,9 +24204,7 @@ func rewriteValueAMD64_OpAMD64ORQ(v *Value) bool { v1 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) v1.AuxInt = i0 v1.Aux = s - v1.AddArg(p) - v1.AddArg(idx) - v1.AddArg(mem) + v1.AddArg3(p, idx, mem) v0.AddArg(v1) return true } @@ -25458,9 +24263,7 @@ func rewriteValueAMD64_OpAMD64ORQ(v *Value) bool { v1 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) v1.AuxInt = i0 v1.Aux = s - v1.AddArg(p) - v1.AddArg(idx) - v1.AddArg(mem) + v1.AddArg3(p, idx, mem) v0.AddArg(v1) return true } @@ -25519,9 +24322,7 @@ func rewriteValueAMD64_OpAMD64ORQ(v *Value) bool { v1 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx1, typ.UInt64) v1.AuxInt = i0 v1.Aux = s - v1.AddArg(p) - v1.AddArg(idx) - v1.AddArg(mem) + v1.AddArg3(p, idx, mem) v0.AddArg(v1) return true } @@ -25594,13 +24395,10 @@ func rewriteValueAMD64_OpAMD64ORQ(v *Value) bool { v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) v3.AuxInt = i0 v3.Aux = s - v3.AddArg(p) - v3.AddArg(idx) - v3.AddArg(mem) + v3.AddArg3(p, idx, mem) v2.AddArg(v3) v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) + v0.AddArg2(v1, y) return true } } @@ -25680,13 +24478,10 @@ func rewriteValueAMD64_OpAMD64ORQ(v *Value) bool { v3 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) v3.AuxInt = i0 v3.Aux = s - v3.AddArg(p) - v3.AddArg(idx) - v3.AddArg(mem) + v3.AddArg3(p, idx, mem) v2.AddArg(v3) v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) + v0.AddArg2(v1, y) return true } } @@ -25714,9 +24509,7 @@ func rewriteValueAMD64_OpAMD64ORQ(v *Value) bool { v.reset(OpAMD64ORQload) v.AuxInt = off v.Aux = sym - v.AddArg(x) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg3(x, ptr, mem) return true } break @@ -25824,8 +24617,7 @@ func rewriteValueAMD64_OpAMD64ORQconstmodify(v *Value) bool { v.reset(OpAMD64ORQconstmodify) v.AuxInt = ValAndOff(valoff1).add(off2) v.Aux = sym - v.AddArg(base) - v.AddArg(mem) + v.AddArg2(base, mem) return true } // match: (ORQconstmodify [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem) @@ -25847,8 +24639,7 @@ func rewriteValueAMD64_OpAMD64ORQconstmodify(v *Value) bool { v.reset(OpAMD64ORQconstmodify) v.AuxInt = ValAndOff(valoff1).add(off2) v.Aux = mergeSym(sym1, sym2) - v.AddArg(base) - v.AddArg(mem) + v.AddArg2(base, mem) return true } return false @@ -25878,9 +24669,7 @@ func rewriteValueAMD64_OpAMD64ORQload(v *Value) bool { v.reset(OpAMD64ORQload) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(val) - v.AddArg(base) - v.AddArg(mem) + v.AddArg3(val, base, mem) return true } // match: (ORQload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem) @@ -25903,9 +24692,7 @@ func rewriteValueAMD64_OpAMD64ORQload(v *Value) bool { v.reset(OpAMD64ORQload) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(val) - v.AddArg(base) - v.AddArg(mem) + v.AddArg3(val, base, mem) return true } // match: ( ORQload x [off] {sym} ptr (MOVSDstore [off] {sym} ptr y _)) @@ -25924,10 +24711,9 @@ func rewriteValueAMD64_OpAMD64ORQload(v *Value) bool { } y := v_2.Args[1] v.reset(OpAMD64ORQ) - v.AddArg(x) v0 := b.NewValue0(v_2.Pos, OpAMD64MOVQf2i, typ.UInt64) v0.AddArg(y) - v.AddArg(v0) + v.AddArg2(x, v0) return true } return false @@ -25955,9 +24741,7 @@ func rewriteValueAMD64_OpAMD64ORQmodify(v *Value) bool { v.reset(OpAMD64ORQmodify) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(base) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(base, val, mem) return true } // match: (ORQmodify [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) @@ -25980,9 +24764,7 @@ func rewriteValueAMD64_OpAMD64ORQmodify(v *Value) bool { v.reset(OpAMD64ORQmodify) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(base) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(base, val, mem) return true } return false @@ -25999,8 +24781,7 @@ func rewriteValueAMD64_OpAMD64ROLB(v *Value) bool { } y := v_1.Args[0] v.reset(OpAMD64RORB) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (ROLB x (NEGL y)) @@ -26012,8 +24793,7 @@ func rewriteValueAMD64_OpAMD64ROLB(v *Value) bool { } y := v_1.Args[0] v.reset(OpAMD64RORB) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (ROLB x (MOVQconst [c])) @@ -26086,8 +24866,7 @@ func rewriteValueAMD64_OpAMD64ROLL(v *Value) bool { } y := v_1.Args[0] v.reset(OpAMD64RORL) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (ROLL x (NEGL y)) @@ -26099,8 +24878,7 @@ func rewriteValueAMD64_OpAMD64ROLL(v *Value) bool { } y := v_1.Args[0] v.reset(OpAMD64RORL) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (ROLL x (MOVQconst [c])) @@ -26173,8 +24951,7 @@ func rewriteValueAMD64_OpAMD64ROLQ(v *Value) bool { } y := v_1.Args[0] v.reset(OpAMD64RORQ) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (ROLQ x (NEGL y)) @@ -26186,8 +24963,7 @@ func rewriteValueAMD64_OpAMD64ROLQ(v *Value) bool { } y := v_1.Args[0] v.reset(OpAMD64RORQ) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (ROLQ x (MOVQconst [c])) @@ -26260,8 +25036,7 @@ func rewriteValueAMD64_OpAMD64ROLW(v *Value) bool { } y := v_1.Args[0] v.reset(OpAMD64RORW) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (ROLW x (NEGL y)) @@ -26273,8 +25048,7 @@ func rewriteValueAMD64_OpAMD64ROLW(v *Value) bool { } y := v_1.Args[0] v.reset(OpAMD64RORW) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (ROLW x (MOVQconst [c])) @@ -26347,8 +25121,7 @@ func rewriteValueAMD64_OpAMD64RORB(v *Value) bool { } y := v_1.Args[0] v.reset(OpAMD64ROLB) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (RORB x (NEGL y)) @@ -26360,8 +25133,7 @@ func rewriteValueAMD64_OpAMD64RORB(v *Value) bool { } y := v_1.Args[0] v.reset(OpAMD64ROLB) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (RORB x (MOVQconst [c])) @@ -26404,8 +25176,7 @@ func rewriteValueAMD64_OpAMD64RORL(v *Value) bool { } y := v_1.Args[0] v.reset(OpAMD64ROLL) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (RORL x (NEGL y)) @@ -26417,8 +25188,7 @@ func rewriteValueAMD64_OpAMD64RORL(v *Value) bool { } y := v_1.Args[0] v.reset(OpAMD64ROLL) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (RORL x (MOVQconst [c])) @@ -26461,8 +25231,7 @@ func rewriteValueAMD64_OpAMD64RORQ(v *Value) bool { } y := v_1.Args[0] v.reset(OpAMD64ROLQ) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (RORQ x (NEGL y)) @@ -26474,8 +25243,7 @@ func rewriteValueAMD64_OpAMD64RORQ(v *Value) bool { } y := v_1.Args[0] v.reset(OpAMD64ROLQ) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (RORQ x (MOVQconst [c])) @@ -26518,8 +25286,7 @@ func rewriteValueAMD64_OpAMD64RORW(v *Value) bool { } y := v_1.Args[0] v.reset(OpAMD64ROLW) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (RORW x (NEGL y)) @@ -26531,8 +25298,7 @@ func rewriteValueAMD64_OpAMD64RORW(v *Value) bool { } y := v_1.Args[0] v.reset(OpAMD64ROLW) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (RORW x (MOVQconst [c])) @@ -26666,8 +25432,7 @@ func rewriteValueAMD64_OpAMD64SARL(v *Value) bool { break } v.reset(OpAMD64SARL) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (SARL x (NEGQ (ADDQconst [c] y))) @@ -26689,10 +25454,9 @@ func rewriteValueAMD64_OpAMD64SARL(v *Value) bool { break } v.reset(OpAMD64SARL) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t) v0.AddArg(y) - v.AddArg(v0) + v.AddArg2(x, v0) return true } // match: (SARL x (ANDQconst [c] y)) @@ -26709,8 +25473,7 @@ func rewriteValueAMD64_OpAMD64SARL(v *Value) bool { break } v.reset(OpAMD64SARL) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (SARL x (NEGQ (ANDQconst [c] y))) @@ -26732,10 +25495,9 @@ func rewriteValueAMD64_OpAMD64SARL(v *Value) bool { break } v.reset(OpAMD64SARL) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t) v0.AddArg(y) - v.AddArg(v0) + v.AddArg2(x, v0) return true } // match: (SARL x (ADDLconst [c] y)) @@ -26752,8 +25514,7 @@ func rewriteValueAMD64_OpAMD64SARL(v *Value) bool { break } v.reset(OpAMD64SARL) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (SARL x (NEGL (ADDLconst [c] y))) @@ -26775,10 +25536,9 @@ func rewriteValueAMD64_OpAMD64SARL(v *Value) bool { break } v.reset(OpAMD64SARL) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t) v0.AddArg(y) - v.AddArg(v0) + v.AddArg2(x, v0) return true } // match: (SARL x (ANDLconst [c] y)) @@ -26795,8 +25555,7 @@ func rewriteValueAMD64_OpAMD64SARL(v *Value) bool { break } v.reset(OpAMD64SARL) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (SARL x (NEGL (ANDLconst [c] y))) @@ -26818,10 +25577,9 @@ func rewriteValueAMD64_OpAMD64SARL(v *Value) bool { break } v.reset(OpAMD64SARL) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t) v0.AddArg(y) - v.AddArg(v0) + v.AddArg2(x, v0) return true } return false @@ -26898,8 +25656,7 @@ func rewriteValueAMD64_OpAMD64SARQ(v *Value) bool { break } v.reset(OpAMD64SARQ) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (SARQ x (NEGQ (ADDQconst [c] y))) @@ -26921,10 +25678,9 @@ func rewriteValueAMD64_OpAMD64SARQ(v *Value) bool { break } v.reset(OpAMD64SARQ) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t) v0.AddArg(y) - v.AddArg(v0) + v.AddArg2(x, v0) return true } // match: (SARQ x (ANDQconst [c] y)) @@ -26941,8 +25697,7 @@ func rewriteValueAMD64_OpAMD64SARQ(v *Value) bool { break } v.reset(OpAMD64SARQ) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (SARQ x (NEGQ (ANDQconst [c] y))) @@ -26964,10 +25719,9 @@ func rewriteValueAMD64_OpAMD64SARQ(v *Value) bool { break } v.reset(OpAMD64SARQ) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t) v0.AddArg(y) - v.AddArg(v0) + v.AddArg2(x, v0) return true } // match: (SARQ x (ADDLconst [c] y)) @@ -26984,8 +25738,7 @@ func rewriteValueAMD64_OpAMD64SARQ(v *Value) bool { break } v.reset(OpAMD64SARQ) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (SARQ x (NEGL (ADDLconst [c] y))) @@ -27007,10 +25760,9 @@ func rewriteValueAMD64_OpAMD64SARQ(v *Value) bool { break } v.reset(OpAMD64SARQ) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t) v0.AddArg(y) - v.AddArg(v0) + v.AddArg2(x, v0) return true } // match: (SARQ x (ANDLconst [c] y)) @@ -27027,8 +25779,7 @@ func rewriteValueAMD64_OpAMD64SARQ(v *Value) bool { break } v.reset(OpAMD64SARQ) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (SARQ x (NEGL (ANDLconst [c] y))) @@ -27050,10 +25801,9 @@ func rewriteValueAMD64_OpAMD64SARQ(v *Value) bool { break } v.reset(OpAMD64SARQ) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t) v0.AddArg(y) - v.AddArg(v0) + v.AddArg2(x, v0) return true } return false @@ -27218,8 +25968,7 @@ func rewriteValueAMD64_OpAMD64SBBQ(v *Value) bool { } v.reset(OpAMD64SBBQconst) v.AuxInt = c - v.AddArg(x) - v.AddArg(borrow) + v.AddArg2(x, borrow) return true } // match: (SBBQ x y (FlagEQ)) @@ -27231,8 +25980,7 @@ func rewriteValueAMD64_OpAMD64SBBQ(v *Value) bool { break } v.reset(OpAMD64SUBQborrow) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } return false @@ -27515,9 +26263,7 @@ func rewriteValueAMD64_OpAMD64SETAEstore(v *Value) bool { v.reset(OpAMD64SETBEstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(x) - v.AddArg(mem) + v.AddArg3(ptr, x, mem) return true } // match: (SETAEstore [off1] {sym} (ADDQconst [off2] base) val mem) @@ -27539,9 +26285,7 @@ func rewriteValueAMD64_OpAMD64SETAEstore(v *Value) bool { v.reset(OpAMD64SETAEstore) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(base) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(base, val, mem) return true } // match: (SETAEstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) @@ -27564,9 +26308,7 @@ func rewriteValueAMD64_OpAMD64SETAEstore(v *Value) bool { v.reset(OpAMD64SETAEstore) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(base) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(base, val, mem) return true } // match: (SETAEstore [off] {sym} ptr (FlagEQ) mem) @@ -27582,11 +26324,9 @@ func rewriteValueAMD64_OpAMD64SETAEstore(v *Value) bool { v.reset(OpAMD64MOVBstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) v0.AuxInt = 1 - v.AddArg(v0) - v.AddArg(mem) + v.AddArg3(ptr, v0, mem) return true } // match: (SETAEstore [off] {sym} ptr (FlagLT_ULT) mem) @@ -27602,11 +26342,9 @@ func rewriteValueAMD64_OpAMD64SETAEstore(v *Value) bool { v.reset(OpAMD64MOVBstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) v0.AuxInt = 0 - v.AddArg(v0) - v.AddArg(mem) + v.AddArg3(ptr, v0, mem) return true } // match: (SETAEstore [off] {sym} ptr (FlagLT_UGT) mem) @@ -27622,11 +26360,9 @@ func rewriteValueAMD64_OpAMD64SETAEstore(v *Value) bool { v.reset(OpAMD64MOVBstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) v0.AuxInt = 1 - v.AddArg(v0) - v.AddArg(mem) + v.AddArg3(ptr, v0, mem) return true } // match: (SETAEstore [off] {sym} ptr (FlagGT_ULT) mem) @@ -27642,11 +26378,9 @@ func rewriteValueAMD64_OpAMD64SETAEstore(v *Value) bool { v.reset(OpAMD64MOVBstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) v0.AuxInt = 0 - v.AddArg(v0) - v.AddArg(mem) + v.AddArg3(ptr, v0, mem) return true } // match: (SETAEstore [off] {sym} ptr (FlagGT_UGT) mem) @@ -27662,11 +26396,9 @@ func rewriteValueAMD64_OpAMD64SETAEstore(v *Value) bool { v.reset(OpAMD64MOVBstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) v0.AuxInt = 1 - v.AddArg(v0) - v.AddArg(mem) + v.AddArg3(ptr, v0, mem) return true } return false @@ -27691,9 +26423,7 @@ func rewriteValueAMD64_OpAMD64SETAstore(v *Value) bool { v.reset(OpAMD64SETBstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(x) - v.AddArg(mem) + v.AddArg3(ptr, x, mem) return true } // match: (SETAstore [off1] {sym} (ADDQconst [off2] base) val mem) @@ -27715,9 +26445,7 @@ func rewriteValueAMD64_OpAMD64SETAstore(v *Value) bool { v.reset(OpAMD64SETAstore) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(base) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(base, val, mem) return true } // match: (SETAstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) @@ -27740,9 +26468,7 @@ func rewriteValueAMD64_OpAMD64SETAstore(v *Value) bool { v.reset(OpAMD64SETAstore) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(base) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(base, val, mem) return true } // match: (SETAstore [off] {sym} ptr (FlagEQ) mem) @@ -27758,11 +26484,9 @@ func rewriteValueAMD64_OpAMD64SETAstore(v *Value) bool { v.reset(OpAMD64MOVBstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) v0.AuxInt = 0 - v.AddArg(v0) - v.AddArg(mem) + v.AddArg3(ptr, v0, mem) return true } // match: (SETAstore [off] {sym} ptr (FlagLT_ULT) mem) @@ -27778,11 +26502,9 @@ func rewriteValueAMD64_OpAMD64SETAstore(v *Value) bool { v.reset(OpAMD64MOVBstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) v0.AuxInt = 0 - v.AddArg(v0) - v.AddArg(mem) + v.AddArg3(ptr, v0, mem) return true } // match: (SETAstore [off] {sym} ptr (FlagLT_UGT) mem) @@ -27798,11 +26520,9 @@ func rewriteValueAMD64_OpAMD64SETAstore(v *Value) bool { v.reset(OpAMD64MOVBstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) v0.AuxInt = 1 - v.AddArg(v0) - v.AddArg(mem) + v.AddArg3(ptr, v0, mem) return true } // match: (SETAstore [off] {sym} ptr (FlagGT_ULT) mem) @@ -27818,11 +26538,9 @@ func rewriteValueAMD64_OpAMD64SETAstore(v *Value) bool { v.reset(OpAMD64MOVBstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) v0.AuxInt = 0 - v.AddArg(v0) - v.AddArg(mem) + v.AddArg3(ptr, v0, mem) return true } // match: (SETAstore [off] {sym} ptr (FlagGT_UGT) mem) @@ -27838,11 +26556,9 @@ func rewriteValueAMD64_OpAMD64SETAstore(v *Value) bool { v.reset(OpAMD64MOVBstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) v0.AuxInt = 1 - v.AddArg(v0) - v.AddArg(mem) + v.AddArg3(ptr, v0, mem) return true } return false @@ -28053,9 +26769,7 @@ func rewriteValueAMD64_OpAMD64SETBEstore(v *Value) bool { v.reset(OpAMD64SETAEstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(x) - v.AddArg(mem) + v.AddArg3(ptr, x, mem) return true } // match: (SETBEstore [off1] {sym} (ADDQconst [off2] base) val mem) @@ -28077,9 +26791,7 @@ func rewriteValueAMD64_OpAMD64SETBEstore(v *Value) bool { v.reset(OpAMD64SETBEstore) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(base) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(base, val, mem) return true } // match: (SETBEstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) @@ -28102,9 +26814,7 @@ func rewriteValueAMD64_OpAMD64SETBEstore(v *Value) bool { v.reset(OpAMD64SETBEstore) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(base) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(base, val, mem) return true } // match: (SETBEstore [off] {sym} ptr (FlagEQ) mem) @@ -28120,11 +26830,9 @@ func rewriteValueAMD64_OpAMD64SETBEstore(v *Value) bool { v.reset(OpAMD64MOVBstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) v0.AuxInt = 1 - v.AddArg(v0) - v.AddArg(mem) + v.AddArg3(ptr, v0, mem) return true } // match: (SETBEstore [off] {sym} ptr (FlagLT_ULT) mem) @@ -28140,11 +26848,9 @@ func rewriteValueAMD64_OpAMD64SETBEstore(v *Value) bool { v.reset(OpAMD64MOVBstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) v0.AuxInt = 1 - v.AddArg(v0) - v.AddArg(mem) + v.AddArg3(ptr, v0, mem) return true } // match: (SETBEstore [off] {sym} ptr (FlagLT_UGT) mem) @@ -28160,11 +26866,9 @@ func rewriteValueAMD64_OpAMD64SETBEstore(v *Value) bool { v.reset(OpAMD64MOVBstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) v0.AuxInt = 0 - v.AddArg(v0) - v.AddArg(mem) + v.AddArg3(ptr, v0, mem) return true } // match: (SETBEstore [off] {sym} ptr (FlagGT_ULT) mem) @@ -28180,11 +26884,9 @@ func rewriteValueAMD64_OpAMD64SETBEstore(v *Value) bool { v.reset(OpAMD64MOVBstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) v0.AuxInt = 1 - v.AddArg(v0) - v.AddArg(mem) + v.AddArg3(ptr, v0, mem) return true } // match: (SETBEstore [off] {sym} ptr (FlagGT_UGT) mem) @@ -28200,11 +26902,9 @@ func rewriteValueAMD64_OpAMD64SETBEstore(v *Value) bool { v.reset(OpAMD64MOVBstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) v0.AuxInt = 0 - v.AddArg(v0) - v.AddArg(mem) + v.AddArg3(ptr, v0, mem) return true } return false @@ -28229,9 +26929,7 @@ func rewriteValueAMD64_OpAMD64SETBstore(v *Value) bool { v.reset(OpAMD64SETAstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(x) - v.AddArg(mem) + v.AddArg3(ptr, x, mem) return true } // match: (SETBstore [off1] {sym} (ADDQconst [off2] base) val mem) @@ -28253,9 +26951,7 @@ func rewriteValueAMD64_OpAMD64SETBstore(v *Value) bool { v.reset(OpAMD64SETBstore) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(base) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(base, val, mem) return true } // match: (SETBstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) @@ -28278,9 +26974,7 @@ func rewriteValueAMD64_OpAMD64SETBstore(v *Value) bool { v.reset(OpAMD64SETBstore) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(base) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(base, val, mem) return true } // match: (SETBstore [off] {sym} ptr (FlagEQ) mem) @@ -28296,11 +26990,9 @@ func rewriteValueAMD64_OpAMD64SETBstore(v *Value) bool { v.reset(OpAMD64MOVBstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) v0.AuxInt = 0 - v.AddArg(v0) - v.AddArg(mem) + v.AddArg3(ptr, v0, mem) return true } // match: (SETBstore [off] {sym} ptr (FlagLT_ULT) mem) @@ -28316,11 +27008,9 @@ func rewriteValueAMD64_OpAMD64SETBstore(v *Value) bool { v.reset(OpAMD64MOVBstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) v0.AuxInt = 1 - v.AddArg(v0) - v.AddArg(mem) + v.AddArg3(ptr, v0, mem) return true } // match: (SETBstore [off] {sym} ptr (FlagLT_UGT) mem) @@ -28336,11 +27026,9 @@ func rewriteValueAMD64_OpAMD64SETBstore(v *Value) bool { v.reset(OpAMD64MOVBstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) v0.AuxInt = 0 - v.AddArg(v0) - v.AddArg(mem) + v.AddArg3(ptr, v0, mem) return true } // match: (SETBstore [off] {sym} ptr (FlagGT_ULT) mem) @@ -28356,11 +27044,9 @@ func rewriteValueAMD64_OpAMD64SETBstore(v *Value) bool { v.reset(OpAMD64MOVBstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) v0.AuxInt = 1 - v.AddArg(v0) - v.AddArg(mem) + v.AddArg3(ptr, v0, mem) return true } // match: (SETBstore [off] {sym} ptr (FlagGT_UGT) mem) @@ -28376,11 +27062,9 @@ func rewriteValueAMD64_OpAMD64SETBstore(v *Value) bool { v.reset(OpAMD64MOVBstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) v0.AuxInt = 0 - v.AddArg(v0) - v.AddArg(mem) + v.AddArg3(ptr, v0, mem) return true } return false @@ -28409,8 +27093,7 @@ func rewriteValueAMD64_OpAMD64SETEQ(v *Value) bool { y := v_0_1 v.reset(OpAMD64SETAE) v0 := b.NewValue0(v.Pos, OpAMD64BTL, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -28437,8 +27120,7 @@ func rewriteValueAMD64_OpAMD64SETEQ(v *Value) bool { y := v_0_1 v.reset(OpAMD64SETAE) v0 := b.NewValue0(v.Pos, OpAMD64BTQ, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -28829,12 +27511,9 @@ func rewriteValueAMD64_OpAMD64SETEQstore(v *Value) bool { v.reset(OpAMD64SETAEstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) v0 := b.NewValue0(v.Pos, OpAMD64BTL, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) - v.AddArg(mem) + v0.AddArg2(x, y) + v.AddArg3(ptr, v0, mem) return true } break @@ -28865,12 +27544,9 @@ func rewriteValueAMD64_OpAMD64SETEQstore(v *Value) bool { v.reset(OpAMD64SETAEstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) v0 := b.NewValue0(v.Pos, OpAMD64BTQ, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) - v.AddArg(mem) + v0.AddArg2(x, y) + v.AddArg3(ptr, v0, mem) return true } break @@ -28894,12 +27570,10 @@ func rewriteValueAMD64_OpAMD64SETEQstore(v *Value) bool { v.reset(OpAMD64SETAEstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags) v0.AuxInt = log2uint32(c) v0.AddArg(x) - v.AddArg(v0) - v.AddArg(mem) + v.AddArg3(ptr, v0, mem) return true } // match: (SETEQstore [off] {sym} ptr (TESTQconst [c] x) mem) @@ -28921,12 +27595,10 @@ func rewriteValueAMD64_OpAMD64SETEQstore(v *Value) bool { v.reset(OpAMD64SETAEstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) v0.AuxInt = log2(c) v0.AddArg(x) - v.AddArg(v0) - v.AddArg(mem) + v.AddArg3(ptr, v0, mem) return true } // match: (SETEQstore [off] {sym} ptr (TESTQ (MOVQconst [c]) x) mem) @@ -28955,12 +27627,10 @@ func rewriteValueAMD64_OpAMD64SETEQstore(v *Value) bool { v.reset(OpAMD64SETAEstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) v0.AuxInt = log2(c) v0.AddArg(x) - v.AddArg(v0) - v.AddArg(mem) + v.AddArg3(ptr, v0, mem) return true } break @@ -28982,12 +27652,10 @@ func rewriteValueAMD64_OpAMD64SETEQstore(v *Value) bool { v.reset(OpAMD64SETNEstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) v0 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) v0.AuxInt = 0 v0.AddArg(s) - v.AddArg(v0) - v.AddArg(mem) + v.AddArg3(ptr, v0, mem) return true } // match: (SETEQstore [off] {sym} ptr (CMPQconst [1] s:(ANDQconst [1] _)) mem) @@ -29007,12 +27675,10 @@ func rewriteValueAMD64_OpAMD64SETEQstore(v *Value) bool { v.reset(OpAMD64SETNEstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) v0 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) v0.AuxInt = 0 v0.AddArg(s) - v.AddArg(v0) - v.AddArg(mem) + v.AddArg3(ptr, v0, mem) return true } // match: (SETEQstore [off] {sym} ptr (TESTQ z1:(SHLQconst [63] (SHRQconst [63] x)) z2) mem) @@ -29046,12 +27712,10 @@ func rewriteValueAMD64_OpAMD64SETEQstore(v *Value) bool { v.reset(OpAMD64SETAEstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) v0.AuxInt = 63 v0.AddArg(x) - v.AddArg(v0) - v.AddArg(mem) + v.AddArg3(ptr, v0, mem) return true } break @@ -29087,12 +27751,10 @@ func rewriteValueAMD64_OpAMD64SETEQstore(v *Value) bool { v.reset(OpAMD64SETAEstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags) v0.AuxInt = 31 v0.AddArg(x) - v.AddArg(v0) - v.AddArg(mem) + v.AddArg3(ptr, v0, mem) return true } break @@ -29128,12 +27790,10 @@ func rewriteValueAMD64_OpAMD64SETEQstore(v *Value) bool { v.reset(OpAMD64SETAEstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) v0.AuxInt = 0 v0.AddArg(x) - v.AddArg(v0) - v.AddArg(mem) + v.AddArg3(ptr, v0, mem) return true } break @@ -29169,12 +27829,10 @@ func rewriteValueAMD64_OpAMD64SETEQstore(v *Value) bool { v.reset(OpAMD64SETAEstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags) v0.AuxInt = 0 v0.AddArg(x) - v.AddArg(v0) - v.AddArg(mem) + v.AddArg3(ptr, v0, mem) return true } break @@ -29206,12 +27864,10 @@ func rewriteValueAMD64_OpAMD64SETEQstore(v *Value) bool { v.reset(OpAMD64SETAEstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) v0.AuxInt = 63 v0.AddArg(x) - v.AddArg(v0) - v.AddArg(mem) + v.AddArg3(ptr, v0, mem) return true } break @@ -29243,12 +27899,10 @@ func rewriteValueAMD64_OpAMD64SETEQstore(v *Value) bool { v.reset(OpAMD64SETAEstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags) v0.AuxInt = 31 v0.AddArg(x) - v.AddArg(v0) - v.AddArg(mem) + v.AddArg3(ptr, v0, mem) return true } break @@ -29267,9 +27921,7 @@ func rewriteValueAMD64_OpAMD64SETEQstore(v *Value) bool { v.reset(OpAMD64SETEQstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(x) - v.AddArg(mem) + v.AddArg3(ptr, x, mem) return true } // match: (SETEQstore [off1] {sym} (ADDQconst [off2] base) val mem) @@ -29291,9 +27943,7 @@ func rewriteValueAMD64_OpAMD64SETEQstore(v *Value) bool { v.reset(OpAMD64SETEQstore) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(base) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(base, val, mem) return true } // match: (SETEQstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) @@ -29316,9 +27966,7 @@ func rewriteValueAMD64_OpAMD64SETEQstore(v *Value) bool { v.reset(OpAMD64SETEQstore) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(base) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(base, val, mem) return true } // match: (SETEQstore [off] {sym} ptr (FlagEQ) mem) @@ -29334,11 +27982,9 @@ func rewriteValueAMD64_OpAMD64SETEQstore(v *Value) bool { v.reset(OpAMD64MOVBstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) v0.AuxInt = 1 - v.AddArg(v0) - v.AddArg(mem) + v.AddArg3(ptr, v0, mem) return true } // match: (SETEQstore [off] {sym} ptr (FlagLT_ULT) mem) @@ -29354,11 +28000,9 @@ func rewriteValueAMD64_OpAMD64SETEQstore(v *Value) bool { v.reset(OpAMD64MOVBstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) v0.AuxInt = 0 - v.AddArg(v0) - v.AddArg(mem) + v.AddArg3(ptr, v0, mem) return true } // match: (SETEQstore [off] {sym} ptr (FlagLT_UGT) mem) @@ -29374,11 +28018,9 @@ func rewriteValueAMD64_OpAMD64SETEQstore(v *Value) bool { v.reset(OpAMD64MOVBstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) v0.AuxInt = 0 - v.AddArg(v0) - v.AddArg(mem) + v.AddArg3(ptr, v0, mem) return true } // match: (SETEQstore [off] {sym} ptr (FlagGT_ULT) mem) @@ -29394,11 +28036,9 @@ func rewriteValueAMD64_OpAMD64SETEQstore(v *Value) bool { v.reset(OpAMD64MOVBstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) v0.AuxInt = 0 - v.AddArg(v0) - v.AddArg(mem) + v.AddArg3(ptr, v0, mem) return true } // match: (SETEQstore [off] {sym} ptr (FlagGT_UGT) mem) @@ -29414,11 +28054,9 @@ func rewriteValueAMD64_OpAMD64SETEQstore(v *Value) bool { v.reset(OpAMD64MOVBstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) v0.AuxInt = 0 - v.AddArg(v0) - v.AddArg(mem) + v.AddArg3(ptr, v0, mem) return true } return false @@ -29573,9 +28211,7 @@ func rewriteValueAMD64_OpAMD64SETGEstore(v *Value) bool { v.reset(OpAMD64SETLEstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(x) - v.AddArg(mem) + v.AddArg3(ptr, x, mem) return true } // match: (SETGEstore [off1] {sym} (ADDQconst [off2] base) val mem) @@ -29597,9 +28233,7 @@ func rewriteValueAMD64_OpAMD64SETGEstore(v *Value) bool { v.reset(OpAMD64SETGEstore) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(base) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(base, val, mem) return true } // match: (SETGEstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) @@ -29622,9 +28256,7 @@ func rewriteValueAMD64_OpAMD64SETGEstore(v *Value) bool { v.reset(OpAMD64SETGEstore) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(base) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(base, val, mem) return true } // match: (SETGEstore [off] {sym} ptr (FlagEQ) mem) @@ -29640,11 +28272,9 @@ func rewriteValueAMD64_OpAMD64SETGEstore(v *Value) bool { v.reset(OpAMD64MOVBstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) v0.AuxInt = 1 - v.AddArg(v0) - v.AddArg(mem) + v.AddArg3(ptr, v0, mem) return true } // match: (SETGEstore [off] {sym} ptr (FlagLT_ULT) mem) @@ -29660,11 +28290,9 @@ func rewriteValueAMD64_OpAMD64SETGEstore(v *Value) bool { v.reset(OpAMD64MOVBstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) v0.AuxInt = 0 - v.AddArg(v0) - v.AddArg(mem) + v.AddArg3(ptr, v0, mem) return true } // match: (SETGEstore [off] {sym} ptr (FlagLT_UGT) mem) @@ -29680,11 +28308,9 @@ func rewriteValueAMD64_OpAMD64SETGEstore(v *Value) bool { v.reset(OpAMD64MOVBstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) v0.AuxInt = 0 - v.AddArg(v0) - v.AddArg(mem) + v.AddArg3(ptr, v0, mem) return true } // match: (SETGEstore [off] {sym} ptr (FlagGT_ULT) mem) @@ -29700,11 +28326,9 @@ func rewriteValueAMD64_OpAMD64SETGEstore(v *Value) bool { v.reset(OpAMD64MOVBstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) v0.AuxInt = 1 - v.AddArg(v0) - v.AddArg(mem) + v.AddArg3(ptr, v0, mem) return true } // match: (SETGEstore [off] {sym} ptr (FlagGT_UGT) mem) @@ -29720,11 +28344,9 @@ func rewriteValueAMD64_OpAMD64SETGEstore(v *Value) bool { v.reset(OpAMD64MOVBstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) v0.AuxInt = 1 - v.AddArg(v0) - v.AddArg(mem) + v.AddArg3(ptr, v0, mem) return true } return false @@ -29749,9 +28371,7 @@ func rewriteValueAMD64_OpAMD64SETGstore(v *Value) bool { v.reset(OpAMD64SETLstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(x) - v.AddArg(mem) + v.AddArg3(ptr, x, mem) return true } // match: (SETGstore [off1] {sym} (ADDQconst [off2] base) val mem) @@ -29773,9 +28393,7 @@ func rewriteValueAMD64_OpAMD64SETGstore(v *Value) bool { v.reset(OpAMD64SETGstore) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(base) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(base, val, mem) return true } // match: (SETGstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) @@ -29798,9 +28416,7 @@ func rewriteValueAMD64_OpAMD64SETGstore(v *Value) bool { v.reset(OpAMD64SETGstore) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(base) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(base, val, mem) return true } // match: (SETGstore [off] {sym} ptr (FlagEQ) mem) @@ -29816,11 +28432,9 @@ func rewriteValueAMD64_OpAMD64SETGstore(v *Value) bool { v.reset(OpAMD64MOVBstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) v0.AuxInt = 0 - v.AddArg(v0) - v.AddArg(mem) + v.AddArg3(ptr, v0, mem) return true } // match: (SETGstore [off] {sym} ptr (FlagLT_ULT) mem) @@ -29836,11 +28450,9 @@ func rewriteValueAMD64_OpAMD64SETGstore(v *Value) bool { v.reset(OpAMD64MOVBstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) v0.AuxInt = 0 - v.AddArg(v0) - v.AddArg(mem) + v.AddArg3(ptr, v0, mem) return true } // match: (SETGstore [off] {sym} ptr (FlagLT_UGT) mem) @@ -29856,11 +28468,9 @@ func rewriteValueAMD64_OpAMD64SETGstore(v *Value) bool { v.reset(OpAMD64MOVBstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) v0.AuxInt = 0 - v.AddArg(v0) - v.AddArg(mem) + v.AddArg3(ptr, v0, mem) return true } // match: (SETGstore [off] {sym} ptr (FlagGT_ULT) mem) @@ -29876,11 +28486,9 @@ func rewriteValueAMD64_OpAMD64SETGstore(v *Value) bool { v.reset(OpAMD64MOVBstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) v0.AuxInt = 1 - v.AddArg(v0) - v.AddArg(mem) + v.AddArg3(ptr, v0, mem) return true } // match: (SETGstore [off] {sym} ptr (FlagGT_UGT) mem) @@ -29896,11 +28504,9 @@ func rewriteValueAMD64_OpAMD64SETGstore(v *Value) bool { v.reset(OpAMD64MOVBstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) v0.AuxInt = 1 - v.AddArg(v0) - v.AddArg(mem) + v.AddArg3(ptr, v0, mem) return true } return false @@ -30055,9 +28661,7 @@ func rewriteValueAMD64_OpAMD64SETLEstore(v *Value) bool { v.reset(OpAMD64SETGEstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(x) - v.AddArg(mem) + v.AddArg3(ptr, x, mem) return true } // match: (SETLEstore [off1] {sym} (ADDQconst [off2] base) val mem) @@ -30079,9 +28683,7 @@ func rewriteValueAMD64_OpAMD64SETLEstore(v *Value) bool { v.reset(OpAMD64SETLEstore) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(base) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(base, val, mem) return true } // match: (SETLEstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) @@ -30104,9 +28706,7 @@ func rewriteValueAMD64_OpAMD64SETLEstore(v *Value) bool { v.reset(OpAMD64SETLEstore) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(base) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(base, val, mem) return true } // match: (SETLEstore [off] {sym} ptr (FlagEQ) mem) @@ -30122,11 +28722,9 @@ func rewriteValueAMD64_OpAMD64SETLEstore(v *Value) bool { v.reset(OpAMD64MOVBstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) v0.AuxInt = 1 - v.AddArg(v0) - v.AddArg(mem) + v.AddArg3(ptr, v0, mem) return true } // match: (SETLEstore [off] {sym} ptr (FlagLT_ULT) mem) @@ -30142,11 +28740,9 @@ func rewriteValueAMD64_OpAMD64SETLEstore(v *Value) bool { v.reset(OpAMD64MOVBstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) v0.AuxInt = 1 - v.AddArg(v0) - v.AddArg(mem) + v.AddArg3(ptr, v0, mem) return true } // match: (SETLEstore [off] {sym} ptr (FlagLT_UGT) mem) @@ -30162,11 +28758,9 @@ func rewriteValueAMD64_OpAMD64SETLEstore(v *Value) bool { v.reset(OpAMD64MOVBstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) v0.AuxInt = 1 - v.AddArg(v0) - v.AddArg(mem) + v.AddArg3(ptr, v0, mem) return true } // match: (SETLEstore [off] {sym} ptr (FlagGT_ULT) mem) @@ -30182,11 +28776,9 @@ func rewriteValueAMD64_OpAMD64SETLEstore(v *Value) bool { v.reset(OpAMD64MOVBstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) v0.AuxInt = 0 - v.AddArg(v0) - v.AddArg(mem) + v.AddArg3(ptr, v0, mem) return true } // match: (SETLEstore [off] {sym} ptr (FlagGT_UGT) mem) @@ -30202,11 +28794,9 @@ func rewriteValueAMD64_OpAMD64SETLEstore(v *Value) bool { v.reset(OpAMD64MOVBstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) v0.AuxInt = 0 - v.AddArg(v0) - v.AddArg(mem) + v.AddArg3(ptr, v0, mem) return true } return false @@ -30231,9 +28821,7 @@ func rewriteValueAMD64_OpAMD64SETLstore(v *Value) bool { v.reset(OpAMD64SETGstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(x) - v.AddArg(mem) + v.AddArg3(ptr, x, mem) return true } // match: (SETLstore [off1] {sym} (ADDQconst [off2] base) val mem) @@ -30255,9 +28843,7 @@ func rewriteValueAMD64_OpAMD64SETLstore(v *Value) bool { v.reset(OpAMD64SETLstore) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(base) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(base, val, mem) return true } // match: (SETLstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) @@ -30280,9 +28866,7 @@ func rewriteValueAMD64_OpAMD64SETLstore(v *Value) bool { v.reset(OpAMD64SETLstore) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(base) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(base, val, mem) return true } // match: (SETLstore [off] {sym} ptr (FlagEQ) mem) @@ -30298,11 +28882,9 @@ func rewriteValueAMD64_OpAMD64SETLstore(v *Value) bool { v.reset(OpAMD64MOVBstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) v0.AuxInt = 0 - v.AddArg(v0) - v.AddArg(mem) + v.AddArg3(ptr, v0, mem) return true } // match: (SETLstore [off] {sym} ptr (FlagLT_ULT) mem) @@ -30318,11 +28900,9 @@ func rewriteValueAMD64_OpAMD64SETLstore(v *Value) bool { v.reset(OpAMD64MOVBstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) v0.AuxInt = 1 - v.AddArg(v0) - v.AddArg(mem) + v.AddArg3(ptr, v0, mem) return true } // match: (SETLstore [off] {sym} ptr (FlagLT_UGT) mem) @@ -30338,11 +28918,9 @@ func rewriteValueAMD64_OpAMD64SETLstore(v *Value) bool { v.reset(OpAMD64MOVBstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) v0.AuxInt = 1 - v.AddArg(v0) - v.AddArg(mem) + v.AddArg3(ptr, v0, mem) return true } // match: (SETLstore [off] {sym} ptr (FlagGT_ULT) mem) @@ -30358,11 +28936,9 @@ func rewriteValueAMD64_OpAMD64SETLstore(v *Value) bool { v.reset(OpAMD64MOVBstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) v0.AuxInt = 0 - v.AddArg(v0) - v.AddArg(mem) + v.AddArg3(ptr, v0, mem) return true } // match: (SETLstore [off] {sym} ptr (FlagGT_UGT) mem) @@ -30378,11 +28954,9 @@ func rewriteValueAMD64_OpAMD64SETLstore(v *Value) bool { v.reset(OpAMD64MOVBstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) v0.AuxInt = 0 - v.AddArg(v0) - v.AddArg(mem) + v.AddArg3(ptr, v0, mem) return true } return false @@ -30411,8 +28985,7 @@ func rewriteValueAMD64_OpAMD64SETNE(v *Value) bool { y := v_0_1 v.reset(OpAMD64SETB) v0 := b.NewValue0(v.Pos, OpAMD64BTL, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -30439,8 +29012,7 @@ func rewriteValueAMD64_OpAMD64SETNE(v *Value) bool { y := v_0_1 v.reset(OpAMD64SETB) v0 := b.NewValue0(v.Pos, OpAMD64BTQ, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -30831,12 +29403,9 @@ func rewriteValueAMD64_OpAMD64SETNEstore(v *Value) bool { v.reset(OpAMD64SETBstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) v0 := b.NewValue0(v.Pos, OpAMD64BTL, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) - v.AddArg(mem) + v0.AddArg2(x, y) + v.AddArg3(ptr, v0, mem) return true } break @@ -30867,12 +29436,9 @@ func rewriteValueAMD64_OpAMD64SETNEstore(v *Value) bool { v.reset(OpAMD64SETBstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) v0 := b.NewValue0(v.Pos, OpAMD64BTQ, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) - v.AddArg(mem) + v0.AddArg2(x, y) + v.AddArg3(ptr, v0, mem) return true } break @@ -30896,12 +29462,10 @@ func rewriteValueAMD64_OpAMD64SETNEstore(v *Value) bool { v.reset(OpAMD64SETBstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags) v0.AuxInt = log2uint32(c) v0.AddArg(x) - v.AddArg(v0) - v.AddArg(mem) + v.AddArg3(ptr, v0, mem) return true } // match: (SETNEstore [off] {sym} ptr (TESTQconst [c] x) mem) @@ -30923,12 +29487,10 @@ func rewriteValueAMD64_OpAMD64SETNEstore(v *Value) bool { v.reset(OpAMD64SETBstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) v0.AuxInt = log2(c) v0.AddArg(x) - v.AddArg(v0) - v.AddArg(mem) + v.AddArg3(ptr, v0, mem) return true } // match: (SETNEstore [off] {sym} ptr (TESTQ (MOVQconst [c]) x) mem) @@ -30957,12 +29519,10 @@ func rewriteValueAMD64_OpAMD64SETNEstore(v *Value) bool { v.reset(OpAMD64SETBstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) v0.AuxInt = log2(c) v0.AddArg(x) - v.AddArg(v0) - v.AddArg(mem) + v.AddArg3(ptr, v0, mem) return true } break @@ -30984,12 +29544,10 @@ func rewriteValueAMD64_OpAMD64SETNEstore(v *Value) bool { v.reset(OpAMD64SETEQstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) v0 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) v0.AuxInt = 0 v0.AddArg(s) - v.AddArg(v0) - v.AddArg(mem) + v.AddArg3(ptr, v0, mem) return true } // match: (SETNEstore [off] {sym} ptr (CMPQconst [1] s:(ANDQconst [1] _)) mem) @@ -31009,12 +29567,10 @@ func rewriteValueAMD64_OpAMD64SETNEstore(v *Value) bool { v.reset(OpAMD64SETEQstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) v0 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) v0.AuxInt = 0 v0.AddArg(s) - v.AddArg(v0) - v.AddArg(mem) + v.AddArg3(ptr, v0, mem) return true } // match: (SETNEstore [off] {sym} ptr (TESTQ z1:(SHLQconst [63] (SHRQconst [63] x)) z2) mem) @@ -31048,12 +29604,10 @@ func rewriteValueAMD64_OpAMD64SETNEstore(v *Value) bool { v.reset(OpAMD64SETBstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) v0.AuxInt = 63 v0.AddArg(x) - v.AddArg(v0) - v.AddArg(mem) + v.AddArg3(ptr, v0, mem) return true } break @@ -31089,12 +29643,10 @@ func rewriteValueAMD64_OpAMD64SETNEstore(v *Value) bool { v.reset(OpAMD64SETBstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags) v0.AuxInt = 31 v0.AddArg(x) - v.AddArg(v0) - v.AddArg(mem) + v.AddArg3(ptr, v0, mem) return true } break @@ -31130,12 +29682,10 @@ func rewriteValueAMD64_OpAMD64SETNEstore(v *Value) bool { v.reset(OpAMD64SETBstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) v0.AuxInt = 0 v0.AddArg(x) - v.AddArg(v0) - v.AddArg(mem) + v.AddArg3(ptr, v0, mem) return true } break @@ -31171,12 +29721,10 @@ func rewriteValueAMD64_OpAMD64SETNEstore(v *Value) bool { v.reset(OpAMD64SETBstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags) v0.AuxInt = 0 v0.AddArg(x) - v.AddArg(v0) - v.AddArg(mem) + v.AddArg3(ptr, v0, mem) return true } break @@ -31208,12 +29756,10 @@ func rewriteValueAMD64_OpAMD64SETNEstore(v *Value) bool { v.reset(OpAMD64SETBstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) v0.AuxInt = 63 v0.AddArg(x) - v.AddArg(v0) - v.AddArg(mem) + v.AddArg3(ptr, v0, mem) return true } break @@ -31245,12 +29791,10 @@ func rewriteValueAMD64_OpAMD64SETNEstore(v *Value) bool { v.reset(OpAMD64SETBstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags) v0.AuxInt = 31 v0.AddArg(x) - v.AddArg(v0) - v.AddArg(mem) + v.AddArg3(ptr, v0, mem) return true } break @@ -31269,9 +29813,7 @@ func rewriteValueAMD64_OpAMD64SETNEstore(v *Value) bool { v.reset(OpAMD64SETNEstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(x) - v.AddArg(mem) + v.AddArg3(ptr, x, mem) return true } // match: (SETNEstore [off1] {sym} (ADDQconst [off2] base) val mem) @@ -31293,9 +29835,7 @@ func rewriteValueAMD64_OpAMD64SETNEstore(v *Value) bool { v.reset(OpAMD64SETNEstore) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(base) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(base, val, mem) return true } // match: (SETNEstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) @@ -31318,9 +29858,7 @@ func rewriteValueAMD64_OpAMD64SETNEstore(v *Value) bool { v.reset(OpAMD64SETNEstore) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(base) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(base, val, mem) return true } // match: (SETNEstore [off] {sym} ptr (FlagEQ) mem) @@ -31336,11 +29874,9 @@ func rewriteValueAMD64_OpAMD64SETNEstore(v *Value) bool { v.reset(OpAMD64MOVBstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) v0.AuxInt = 0 - v.AddArg(v0) - v.AddArg(mem) + v.AddArg3(ptr, v0, mem) return true } // match: (SETNEstore [off] {sym} ptr (FlagLT_ULT) mem) @@ -31356,11 +29892,9 @@ func rewriteValueAMD64_OpAMD64SETNEstore(v *Value) bool { v.reset(OpAMD64MOVBstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) v0.AuxInt = 1 - v.AddArg(v0) - v.AddArg(mem) + v.AddArg3(ptr, v0, mem) return true } // match: (SETNEstore [off] {sym} ptr (FlagLT_UGT) mem) @@ -31376,11 +29910,9 @@ func rewriteValueAMD64_OpAMD64SETNEstore(v *Value) bool { v.reset(OpAMD64MOVBstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) v0.AuxInt = 1 - v.AddArg(v0) - v.AddArg(mem) + v.AddArg3(ptr, v0, mem) return true } // match: (SETNEstore [off] {sym} ptr (FlagGT_ULT) mem) @@ -31396,11 +29928,9 @@ func rewriteValueAMD64_OpAMD64SETNEstore(v *Value) bool { v.reset(OpAMD64MOVBstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) v0.AuxInt = 1 - v.AddArg(v0) - v.AddArg(mem) + v.AddArg3(ptr, v0, mem) return true } // match: (SETNEstore [off] {sym} ptr (FlagGT_UGT) mem) @@ -31416,11 +29946,9 @@ func rewriteValueAMD64_OpAMD64SETNEstore(v *Value) bool { v.reset(OpAMD64MOVBstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) v0.AuxInt = 1 - v.AddArg(v0) - v.AddArg(mem) + v.AddArg3(ptr, v0, mem) return true } return false @@ -31469,8 +29997,7 @@ func rewriteValueAMD64_OpAMD64SHLL(v *Value) bool { break } v.reset(OpAMD64SHLL) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (SHLL x (NEGQ (ADDQconst [c] y))) @@ -31492,10 +30019,9 @@ func rewriteValueAMD64_OpAMD64SHLL(v *Value) bool { break } v.reset(OpAMD64SHLL) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t) v0.AddArg(y) - v.AddArg(v0) + v.AddArg2(x, v0) return true } // match: (SHLL x (ANDQconst [c] y)) @@ -31512,8 +30038,7 @@ func rewriteValueAMD64_OpAMD64SHLL(v *Value) bool { break } v.reset(OpAMD64SHLL) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (SHLL x (NEGQ (ANDQconst [c] y))) @@ -31535,10 +30060,9 @@ func rewriteValueAMD64_OpAMD64SHLL(v *Value) bool { break } v.reset(OpAMD64SHLL) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t) v0.AddArg(y) - v.AddArg(v0) + v.AddArg2(x, v0) return true } // match: (SHLL x (ADDLconst [c] y)) @@ -31555,8 +30079,7 @@ func rewriteValueAMD64_OpAMD64SHLL(v *Value) bool { break } v.reset(OpAMD64SHLL) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (SHLL x (NEGL (ADDLconst [c] y))) @@ -31578,10 +30101,9 @@ func rewriteValueAMD64_OpAMD64SHLL(v *Value) bool { break } v.reset(OpAMD64SHLL) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t) v0.AddArg(y) - v.AddArg(v0) + v.AddArg2(x, v0) return true } // match: (SHLL x (ANDLconst [c] y)) @@ -31598,8 +30120,7 @@ func rewriteValueAMD64_OpAMD64SHLL(v *Value) bool { break } v.reset(OpAMD64SHLL) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (SHLL x (NEGL (ANDLconst [c] y))) @@ -31621,10 +30142,9 @@ func rewriteValueAMD64_OpAMD64SHLL(v *Value) bool { break } v.reset(OpAMD64SHLL) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t) v0.AddArg(y) - v.AddArg(v0) + v.AddArg2(x, v0) return true } return false @@ -31713,8 +30233,7 @@ func rewriteValueAMD64_OpAMD64SHLQ(v *Value) bool { break } v.reset(OpAMD64SHLQ) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (SHLQ x (NEGQ (ADDQconst [c] y))) @@ -31736,10 +30255,9 @@ func rewriteValueAMD64_OpAMD64SHLQ(v *Value) bool { break } v.reset(OpAMD64SHLQ) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t) v0.AddArg(y) - v.AddArg(v0) + v.AddArg2(x, v0) return true } // match: (SHLQ x (ANDQconst [c] y)) @@ -31756,8 +30274,7 @@ func rewriteValueAMD64_OpAMD64SHLQ(v *Value) bool { break } v.reset(OpAMD64SHLQ) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (SHLQ x (NEGQ (ANDQconst [c] y))) @@ -31779,10 +30296,9 @@ func rewriteValueAMD64_OpAMD64SHLQ(v *Value) bool { break } v.reset(OpAMD64SHLQ) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t) v0.AddArg(y) - v.AddArg(v0) + v.AddArg2(x, v0) return true } // match: (SHLQ x (ADDLconst [c] y)) @@ -31799,8 +30315,7 @@ func rewriteValueAMD64_OpAMD64SHLQ(v *Value) bool { break } v.reset(OpAMD64SHLQ) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (SHLQ x (NEGL (ADDLconst [c] y))) @@ -31822,10 +30337,9 @@ func rewriteValueAMD64_OpAMD64SHLQ(v *Value) bool { break } v.reset(OpAMD64SHLQ) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t) v0.AddArg(y) - v.AddArg(v0) + v.AddArg2(x, v0) return true } // match: (SHLQ x (ANDLconst [c] y)) @@ -31842,8 +30356,7 @@ func rewriteValueAMD64_OpAMD64SHLQ(v *Value) bool { break } v.reset(OpAMD64SHLQ) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (SHLQ x (NEGL (ANDLconst [c] y))) @@ -31865,10 +30378,9 @@ func rewriteValueAMD64_OpAMD64SHLQ(v *Value) bool { break } v.reset(OpAMD64SHLQ) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t) v0.AddArg(y) - v.AddArg(v0) + v.AddArg2(x, v0) return true } return false @@ -32054,8 +30566,7 @@ func rewriteValueAMD64_OpAMD64SHRL(v *Value) bool { break } v.reset(OpAMD64SHRL) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (SHRL x (NEGQ (ADDQconst [c] y))) @@ -32077,10 +30588,9 @@ func rewriteValueAMD64_OpAMD64SHRL(v *Value) bool { break } v.reset(OpAMD64SHRL) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t) v0.AddArg(y) - v.AddArg(v0) + v.AddArg2(x, v0) return true } // match: (SHRL x (ANDQconst [c] y)) @@ -32097,8 +30607,7 @@ func rewriteValueAMD64_OpAMD64SHRL(v *Value) bool { break } v.reset(OpAMD64SHRL) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (SHRL x (NEGQ (ANDQconst [c] y))) @@ -32120,10 +30629,9 @@ func rewriteValueAMD64_OpAMD64SHRL(v *Value) bool { break } v.reset(OpAMD64SHRL) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t) v0.AddArg(y) - v.AddArg(v0) + v.AddArg2(x, v0) return true } // match: (SHRL x (ADDLconst [c] y)) @@ -32140,8 +30648,7 @@ func rewriteValueAMD64_OpAMD64SHRL(v *Value) bool { break } v.reset(OpAMD64SHRL) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (SHRL x (NEGL (ADDLconst [c] y))) @@ -32163,10 +30670,9 @@ func rewriteValueAMD64_OpAMD64SHRL(v *Value) bool { break } v.reset(OpAMD64SHRL) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t) v0.AddArg(y) - v.AddArg(v0) + v.AddArg2(x, v0) return true } // match: (SHRL x (ANDLconst [c] y)) @@ -32183,8 +30689,7 @@ func rewriteValueAMD64_OpAMD64SHRL(v *Value) bool { break } v.reset(OpAMD64SHRL) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (SHRL x (NEGL (ANDLconst [c] y))) @@ -32206,10 +30711,9 @@ func rewriteValueAMD64_OpAMD64SHRL(v *Value) bool { break } v.reset(OpAMD64SHRL) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t) v0.AddArg(y) - v.AddArg(v0) + v.AddArg2(x, v0) return true } return false @@ -32286,8 +30790,7 @@ func rewriteValueAMD64_OpAMD64SHRQ(v *Value) bool { break } v.reset(OpAMD64SHRQ) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (SHRQ x (NEGQ (ADDQconst [c] y))) @@ -32309,10 +30812,9 @@ func rewriteValueAMD64_OpAMD64SHRQ(v *Value) bool { break } v.reset(OpAMD64SHRQ) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t) v0.AddArg(y) - v.AddArg(v0) + v.AddArg2(x, v0) return true } // match: (SHRQ x (ANDQconst [c] y)) @@ -32329,8 +30831,7 @@ func rewriteValueAMD64_OpAMD64SHRQ(v *Value) bool { break } v.reset(OpAMD64SHRQ) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (SHRQ x (NEGQ (ANDQconst [c] y))) @@ -32352,10 +30853,9 @@ func rewriteValueAMD64_OpAMD64SHRQ(v *Value) bool { break } v.reset(OpAMD64SHRQ) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t) v0.AddArg(y) - v.AddArg(v0) + v.AddArg2(x, v0) return true } // match: (SHRQ x (ADDLconst [c] y)) @@ -32372,8 +30872,7 @@ func rewriteValueAMD64_OpAMD64SHRQ(v *Value) bool { break } v.reset(OpAMD64SHRQ) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (SHRQ x (NEGL (ADDLconst [c] y))) @@ -32395,10 +30894,9 @@ func rewriteValueAMD64_OpAMD64SHRQ(v *Value) bool { break } v.reset(OpAMD64SHRQ) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t) v0.AddArg(y) - v.AddArg(v0) + v.AddArg2(x, v0) return true } // match: (SHRQ x (ANDLconst [c] y)) @@ -32415,8 +30913,7 @@ func rewriteValueAMD64_OpAMD64SHRQ(v *Value) bool { break } v.reset(OpAMD64SHRQ) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (SHRQ x (NEGL (ANDLconst [c] y))) @@ -32438,10 +30935,9 @@ func rewriteValueAMD64_OpAMD64SHRQ(v *Value) bool { break } v.reset(OpAMD64SHRQ) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t) v0.AddArg(y) - v.AddArg(v0) + v.AddArg2(x, v0) return true } return false @@ -32621,9 +31117,7 @@ func rewriteValueAMD64_OpAMD64SUBL(v *Value) bool { v.reset(OpAMD64SUBLload) v.AuxInt = off v.Aux = sym - v.AddArg(x) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg3(x, ptr, mem) return true } return false @@ -32680,9 +31174,7 @@ func rewriteValueAMD64_OpAMD64SUBLload(v *Value) bool { v.reset(OpAMD64SUBLload) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(val) - v.AddArg(base) - v.AddArg(mem) + v.AddArg3(val, base, mem) return true } // match: (SUBLload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem) @@ -32705,9 +31197,7 @@ func rewriteValueAMD64_OpAMD64SUBLload(v *Value) bool { v.reset(OpAMD64SUBLload) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(val) - v.AddArg(base) - v.AddArg(mem) + v.AddArg3(val, base, mem) return true } // match: (SUBLload x [off] {sym} ptr (MOVSSstore [off] {sym} ptr y _)) @@ -32726,10 +31216,9 @@ func rewriteValueAMD64_OpAMD64SUBLload(v *Value) bool { } y := v_2.Args[1] v.reset(OpAMD64SUBL) - v.AddArg(x) v0 := b.NewValue0(v_2.Pos, OpAMD64MOVLf2i, typ.UInt32) v0.AddArg(y) - v.AddArg(v0) + v.AddArg2(x, v0) return true } return false @@ -32757,9 +31246,7 @@ func rewriteValueAMD64_OpAMD64SUBLmodify(v *Value) bool { v.reset(OpAMD64SUBLmodify) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(base) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(base, val, mem) return true } // match: (SUBLmodify [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) @@ -32782,9 +31269,7 @@ func rewriteValueAMD64_OpAMD64SUBLmodify(v *Value) bool { v.reset(OpAMD64SUBLmodify) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(base) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(base, val, mem) return true } return false @@ -32859,9 +31344,7 @@ func rewriteValueAMD64_OpAMD64SUBQ(v *Value) bool { v.reset(OpAMD64SUBQload) v.AuxInt = off v.Aux = sym - v.AddArg(x) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg3(x, ptr, mem) return true } return false @@ -32973,9 +31456,7 @@ func rewriteValueAMD64_OpAMD64SUBQload(v *Value) bool { v.reset(OpAMD64SUBQload) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(val) - v.AddArg(base) - v.AddArg(mem) + v.AddArg3(val, base, mem) return true } // match: (SUBQload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem) @@ -32998,9 +31479,7 @@ func rewriteValueAMD64_OpAMD64SUBQload(v *Value) bool { v.reset(OpAMD64SUBQload) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(val) - v.AddArg(base) - v.AddArg(mem) + v.AddArg3(val, base, mem) return true } // match: (SUBQload x [off] {sym} ptr (MOVSDstore [off] {sym} ptr y _)) @@ -33019,10 +31498,9 @@ func rewriteValueAMD64_OpAMD64SUBQload(v *Value) bool { } y := v_2.Args[1] v.reset(OpAMD64SUBQ) - v.AddArg(x) v0 := b.NewValue0(v_2.Pos, OpAMD64MOVQf2i, typ.UInt64) v0.AddArg(y) - v.AddArg(v0) + v.AddArg2(x, v0) return true } return false @@ -33050,9 +31528,7 @@ func rewriteValueAMD64_OpAMD64SUBQmodify(v *Value) bool { v.reset(OpAMD64SUBQmodify) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(base) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(base, val, mem) return true } // match: (SUBQmodify [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) @@ -33075,9 +31551,7 @@ func rewriteValueAMD64_OpAMD64SUBQmodify(v *Value) bool { v.reset(OpAMD64SUBQmodify) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(base) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(base, val, mem) return true } return false @@ -33104,9 +31578,7 @@ func rewriteValueAMD64_OpAMD64SUBSD(v *Value) bool { v.reset(OpAMD64SUBSDload) v.AuxInt = off v.Aux = sym - v.AddArg(x) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg3(x, ptr, mem) return true } return false @@ -33136,9 +31608,7 @@ func rewriteValueAMD64_OpAMD64SUBSDload(v *Value) bool { v.reset(OpAMD64SUBSDload) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(val) - v.AddArg(base) - v.AddArg(mem) + v.AddArg3(val, base, mem) return true } // match: (SUBSDload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem) @@ -33161,9 +31631,7 @@ func rewriteValueAMD64_OpAMD64SUBSDload(v *Value) bool { v.reset(OpAMD64SUBSDload) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(val) - v.AddArg(base) - v.AddArg(mem) + v.AddArg3(val, base, mem) return true } // match: (SUBSDload x [off] {sym} ptr (MOVQstore [off] {sym} ptr y _)) @@ -33182,10 +31650,9 @@ func rewriteValueAMD64_OpAMD64SUBSDload(v *Value) bool { } y := v_2.Args[1] v.reset(OpAMD64SUBSD) - v.AddArg(x) v0 := b.NewValue0(v_2.Pos, OpAMD64MOVQi2f, typ.Float64) v0.AddArg(y) - v.AddArg(v0) + v.AddArg2(x, v0) return true } return false @@ -33212,9 +31679,7 @@ func rewriteValueAMD64_OpAMD64SUBSS(v *Value) bool { v.reset(OpAMD64SUBSSload) v.AuxInt = off v.Aux = sym - v.AddArg(x) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg3(x, ptr, mem) return true } return false @@ -33244,9 +31709,7 @@ func rewriteValueAMD64_OpAMD64SUBSSload(v *Value) bool { v.reset(OpAMD64SUBSSload) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(val) - v.AddArg(base) - v.AddArg(mem) + v.AddArg3(val, base, mem) return true } // match: (SUBSSload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem) @@ -33269,9 +31732,7 @@ func rewriteValueAMD64_OpAMD64SUBSSload(v *Value) bool { v.reset(OpAMD64SUBSSload) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(val) - v.AddArg(base) - v.AddArg(mem) + v.AddArg3(val, base, mem) return true } // match: (SUBSSload x [off] {sym} ptr (MOVLstore [off] {sym} ptr y _)) @@ -33290,10 +31751,9 @@ func rewriteValueAMD64_OpAMD64SUBSSload(v *Value) bool { } y := v_2.Args[1] v.reset(OpAMD64SUBSS) - v.AddArg(x) v0 := b.NewValue0(v_2.Pos, OpAMD64MOVLi2f, typ.Float32) v0.AddArg(y) - v.AddArg(v0) + v.AddArg2(x, v0) return true } return false @@ -33341,8 +31801,7 @@ func rewriteValueAMD64_OpAMD64TESTB(v *Value) bool { v.AddArg(v0) v0.AuxInt = makeValAndOff(0, off) v0.Aux = sym - v0.AddArg(ptr) - v0.AddArg(mem) + v0.AddArg2(ptr, mem) return true } break @@ -33363,8 +31822,7 @@ func rewriteValueAMD64_OpAMD64TESTBconst(v *Value) bool { break } v.reset(OpAMD64TESTB) - v.AddArg(x) - v.AddArg(x) + v.AddArg2(x, x) return true } return false @@ -33412,8 +31870,7 @@ func rewriteValueAMD64_OpAMD64TESTL(v *Value) bool { v.AddArg(v0) v0.AuxInt = makeValAndOff(0, off) v0.Aux = sym - v0.AddArg(ptr) - v0.AddArg(mem) + v0.AddArg2(ptr, mem) return true } break @@ -33444,8 +31901,7 @@ func rewriteValueAMD64_OpAMD64TESTLconst(v *Value) bool { break } v.reset(OpAMD64TESTL) - v.AddArg(x) - v.AddArg(x) + v.AddArg2(x, x) return true } return false @@ -33497,8 +31953,7 @@ func rewriteValueAMD64_OpAMD64TESTQ(v *Value) bool { v.AddArg(v0) v0.AuxInt = makeValAndOff(0, off) v0.Aux = sym - v0.AddArg(ptr) - v0.AddArg(mem) + v0.AddArg2(ptr, mem) return true } break @@ -33529,8 +31984,7 @@ func rewriteValueAMD64_OpAMD64TESTQconst(v *Value) bool { break } v.reset(OpAMD64TESTQ) - v.AddArg(x) - v.AddArg(x) + v.AddArg2(x, x) return true } return false @@ -33578,8 +32032,7 @@ func rewriteValueAMD64_OpAMD64TESTW(v *Value) bool { v.AddArg(v0) v0.AuxInt = makeValAndOff(0, off) v0.Aux = sym - v0.AddArg(ptr) - v0.AddArg(mem) + v0.AddArg2(ptr, mem) return true } break @@ -33600,8 +32053,7 @@ func rewriteValueAMD64_OpAMD64TESTWconst(v *Value) bool { break } v.reset(OpAMD64TESTW) - v.AddArg(x) - v.AddArg(x) + v.AddArg2(x, x) return true } return false @@ -33629,9 +32081,7 @@ func rewriteValueAMD64_OpAMD64XADDLlock(v *Value) bool { v.reset(OpAMD64XADDLlock) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(val) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg3(val, ptr, mem) return true } return false @@ -33659,9 +32109,7 @@ func rewriteValueAMD64_OpAMD64XADDQlock(v *Value) bool { v.reset(OpAMD64XADDQlock) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(val) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg3(val, ptr, mem) return true } return false @@ -33689,9 +32137,7 @@ func rewriteValueAMD64_OpAMD64XCHGL(v *Value) bool { v.reset(OpAMD64XCHGL) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(val) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg3(val, ptr, mem) return true } // match: (XCHGL [off1] {sym1} val (LEAQ [off2] {sym2} ptr) mem) @@ -33714,9 +32160,7 @@ func rewriteValueAMD64_OpAMD64XCHGL(v *Value) bool { v.reset(OpAMD64XCHGL) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(val) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg3(val, ptr, mem) return true } return false @@ -33744,9 +32188,7 @@ func rewriteValueAMD64_OpAMD64XCHGQ(v *Value) bool { v.reset(OpAMD64XCHGQ) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(val) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg3(val, ptr, mem) return true } // match: (XCHGQ [off1] {sym1} val (LEAQ [off2] {sym2} ptr) mem) @@ -33769,9 +32211,7 @@ func rewriteValueAMD64_OpAMD64XCHGQ(v *Value) bool { v.reset(OpAMD64XCHGQ) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(val) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg3(val, ptr, mem) return true } return false @@ -33793,8 +32233,7 @@ func rewriteValueAMD64_OpAMD64XORL(v *Value) bool { } x := v_1 v.reset(OpAMD64BTCL) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } break @@ -33940,9 +32379,7 @@ func rewriteValueAMD64_OpAMD64XORL(v *Value) bool { v.reset(OpAMD64XORLload) v.AuxInt = off v.Aux = sym - v.AddArg(x) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg3(x, ptr, mem) return true } break @@ -34152,8 +32589,7 @@ func rewriteValueAMD64_OpAMD64XORLconstmodify(v *Value) bool { v.reset(OpAMD64XORLconstmodify) v.AuxInt = ValAndOff(valoff1).add(off2) v.Aux = sym - v.AddArg(base) - v.AddArg(mem) + v.AddArg2(base, mem) return true } // match: (XORLconstmodify [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem) @@ -34175,8 +32611,7 @@ func rewriteValueAMD64_OpAMD64XORLconstmodify(v *Value) bool { v.reset(OpAMD64XORLconstmodify) v.AuxInt = ValAndOff(valoff1).add(off2) v.Aux = mergeSym(sym1, sym2) - v.AddArg(base) - v.AddArg(mem) + v.AddArg2(base, mem) return true } return false @@ -34206,9 +32641,7 @@ func rewriteValueAMD64_OpAMD64XORLload(v *Value) bool { v.reset(OpAMD64XORLload) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(val) - v.AddArg(base) - v.AddArg(mem) + v.AddArg3(val, base, mem) return true } // match: (XORLload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem) @@ -34231,9 +32664,7 @@ func rewriteValueAMD64_OpAMD64XORLload(v *Value) bool { v.reset(OpAMD64XORLload) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(val) - v.AddArg(base) - v.AddArg(mem) + v.AddArg3(val, base, mem) return true } // match: (XORLload x [off] {sym} ptr (MOVSSstore [off] {sym} ptr y _)) @@ -34252,10 +32683,9 @@ func rewriteValueAMD64_OpAMD64XORLload(v *Value) bool { } y := v_2.Args[1] v.reset(OpAMD64XORL) - v.AddArg(x) v0 := b.NewValue0(v_2.Pos, OpAMD64MOVLf2i, typ.UInt32) v0.AddArg(y) - v.AddArg(v0) + v.AddArg2(x, v0) return true } return false @@ -34283,9 +32713,7 @@ func rewriteValueAMD64_OpAMD64XORLmodify(v *Value) bool { v.reset(OpAMD64XORLmodify) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(base) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(base, val, mem) return true } // match: (XORLmodify [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) @@ -34308,9 +32736,7 @@ func rewriteValueAMD64_OpAMD64XORLmodify(v *Value) bool { v.reset(OpAMD64XORLmodify) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(base) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(base, val, mem) return true } return false @@ -34332,8 +32758,7 @@ func rewriteValueAMD64_OpAMD64XORQ(v *Value) bool { } x := v_1 v.reset(OpAMD64BTCQ) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } break @@ -34433,9 +32858,7 @@ func rewriteValueAMD64_OpAMD64XORQ(v *Value) bool { v.reset(OpAMD64XORQload) v.AuxInt = off v.Aux = sym - v.AddArg(x) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg3(x, ptr, mem) return true } break @@ -34533,8 +32956,7 @@ func rewriteValueAMD64_OpAMD64XORQconstmodify(v *Value) bool { v.reset(OpAMD64XORQconstmodify) v.AuxInt = ValAndOff(valoff1).add(off2) v.Aux = sym - v.AddArg(base) - v.AddArg(mem) + v.AddArg2(base, mem) return true } // match: (XORQconstmodify [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem) @@ -34556,8 +32978,7 @@ func rewriteValueAMD64_OpAMD64XORQconstmodify(v *Value) bool { v.reset(OpAMD64XORQconstmodify) v.AuxInt = ValAndOff(valoff1).add(off2) v.Aux = mergeSym(sym1, sym2) - v.AddArg(base) - v.AddArg(mem) + v.AddArg2(base, mem) return true } return false @@ -34587,9 +33008,7 @@ func rewriteValueAMD64_OpAMD64XORQload(v *Value) bool { v.reset(OpAMD64XORQload) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(val) - v.AddArg(base) - v.AddArg(mem) + v.AddArg3(val, base, mem) return true } // match: (XORQload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem) @@ -34612,9 +33031,7 @@ func rewriteValueAMD64_OpAMD64XORQload(v *Value) bool { v.reset(OpAMD64XORQload) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(val) - v.AddArg(base) - v.AddArg(mem) + v.AddArg3(val, base, mem) return true } // match: (XORQload x [off] {sym} ptr (MOVSDstore [off] {sym} ptr y _)) @@ -34633,10 +33050,9 @@ func rewriteValueAMD64_OpAMD64XORQload(v *Value) bool { } y := v_2.Args[1] v.reset(OpAMD64XORQ) - v.AddArg(x) v0 := b.NewValue0(v_2.Pos, OpAMD64MOVQf2i, typ.UInt64) v0.AddArg(y) - v.AddArg(v0) + v.AddArg2(x, v0) return true } return false @@ -34664,9 +33080,7 @@ func rewriteValueAMD64_OpAMD64XORQmodify(v *Value) bool { v.reset(OpAMD64XORQmodify) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(base) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(base, val, mem) return true } // match: (XORQmodify [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) @@ -34689,9 +33103,7 @@ func rewriteValueAMD64_OpAMD64XORQmodify(v *Value) bool { v.reset(OpAMD64XORQmodify) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(base) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(base, val, mem) return true } return false @@ -34709,12 +33121,9 @@ func rewriteValueAMD64_OpAtomicAdd32(v *Value) bool { val := v_1 mem := v_2 v.reset(OpAMD64AddTupleFirst32) - v.AddArg(val) v0 := b.NewValue0(v.Pos, OpAMD64XADDLlock, types.NewTuple(typ.UInt32, types.TypeMem)) - v0.AddArg(val) - v0.AddArg(ptr) - v0.AddArg(mem) - v.AddArg(v0) + v0.AddArg3(val, ptr, mem) + v.AddArg2(val, v0) return true } } @@ -34731,12 +33140,9 @@ func rewriteValueAMD64_OpAtomicAdd64(v *Value) bool { val := v_1 mem := v_2 v.reset(OpAMD64AddTupleFirst64) - v.AddArg(val) v0 := b.NewValue0(v.Pos, OpAMD64XADDQlock, types.NewTuple(typ.UInt64, types.TypeMem)) - v0.AddArg(val) - v0.AddArg(ptr) - v0.AddArg(mem) - v.AddArg(v0) + v0.AddArg3(val, ptr, mem) + v.AddArg2(val, v0) return true } } @@ -34751,9 +33157,7 @@ func rewriteValueAMD64_OpAtomicExchange32(v *Value) bool { val := v_1 mem := v_2 v.reset(OpAMD64XCHGL) - v.AddArg(val) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg3(val, ptr, mem) return true } } @@ -34768,9 +33172,7 @@ func rewriteValueAMD64_OpAtomicExchange64(v *Value) bool { val := v_1 mem := v_2 v.reset(OpAMD64XCHGQ) - v.AddArg(val) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg3(val, ptr, mem) return true } } @@ -34788,9 +33190,7 @@ func rewriteValueAMD64_OpAtomicStore32(v *Value) bool { mem := v_2 v.reset(OpSelect1) v0 := b.NewValue0(v.Pos, OpAMD64XCHGL, types.NewTuple(typ.UInt32, types.TypeMem)) - v0.AddArg(val) - v0.AddArg(ptr) - v0.AddArg(mem) + v0.AddArg3(val, ptr, mem) v.AddArg(v0) return true } @@ -34809,9 +33209,7 @@ func rewriteValueAMD64_OpAtomicStore64(v *Value) bool { mem := v_2 v.reset(OpSelect1) v0 := b.NewValue0(v.Pos, OpAMD64XCHGQ, types.NewTuple(typ.UInt64, types.TypeMem)) - v0.AddArg(val) - v0.AddArg(ptr) - v0.AddArg(mem) + v0.AddArg3(val, ptr, mem) v.AddArg(v0) return true } @@ -34830,9 +33228,7 @@ func rewriteValueAMD64_OpAtomicStore8(v *Value) bool { mem := v_2 v.reset(OpSelect1) v0 := b.NewValue0(v.Pos, OpAMD64XCHGB, types.NewTuple(typ.UInt8, types.TypeMem)) - v0.AddArg(val) - v0.AddArg(ptr) - v0.AddArg(mem) + v0.AddArg3(val, ptr, mem) v.AddArg(v0) return true } @@ -34851,9 +33247,7 @@ func rewriteValueAMD64_OpAtomicStorePtrNoWB(v *Value) bool { mem := v_2 v.reset(OpSelect1) v0 := b.NewValue0(v.Pos, OpAMD64XCHGQ, types.NewTuple(typ.BytePtr, types.TypeMem)) - v0.AddArg(val) - v0.AddArg(ptr) - v0.AddArg(mem) + v0.AddArg3(val, ptr, mem) v.AddArg(v0) return true } @@ -34871,10 +33265,9 @@ func rewriteValueAMD64_OpBitLen16(v *Value) bool { v0.AuxInt = 1 v1 := b.NewValue0(v.Pos, OpAMD64MOVWQZX, typ.UInt32) v1.AddArg(x) - v0.AddArg(v1) v2 := b.NewValue0(v.Pos, OpAMD64MOVWQZX, typ.UInt32) v2.AddArg(x) - v0.AddArg(v2) + v0.AddArg2(v1, v2) v.AddArg(v0) return true } @@ -34893,10 +33286,9 @@ func rewriteValueAMD64_OpBitLen32(v *Value) bool { v1.AuxInt = 1 v2 := b.NewValue0(v.Pos, OpAMD64MOVLQZX, typ.UInt64) v2.AddArg(x) - v1.AddArg(v2) v3 := b.NewValue0(v.Pos, OpAMD64MOVLQZX, typ.UInt64) v3.AddArg(x) - v1.AddArg(v3) + v1.AddArg2(v2, v3) v0.AddArg(v1) v.AddArg(v0) return true @@ -34918,15 +33310,13 @@ func rewriteValueAMD64_OpBitLen64(v *Value) bool { v2 := b.NewValue0(v.Pos, OpAMD64BSRQ, types.NewTuple(typ.UInt64, types.TypeFlags)) v2.AddArg(x) v1.AddArg(v2) - v0.AddArg(v1) v3 := b.NewValue0(v.Pos, OpAMD64MOVQconst, t) v3.AuxInt = -1 - v0.AddArg(v3) v4 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) v5 := b.NewValue0(v.Pos, OpAMD64BSRQ, types.NewTuple(typ.UInt64, types.TypeFlags)) v5.AddArg(x) v4.AddArg(v5) - v0.AddArg(v4) + v0.AddArg3(v1, v3, v4) v.AddArg(v0) return true } @@ -34944,10 +33334,9 @@ func rewriteValueAMD64_OpBitLen8(v *Value) bool { v0.AuxInt = 1 v1 := b.NewValue0(v.Pos, OpAMD64MOVBQZX, typ.UInt32) v1.AddArg(x) - v0.AddArg(v1) v2 := b.NewValue0(v.Pos, OpAMD64MOVBQZX, typ.UInt32) v2.AddArg(x) - v0.AddArg(v2) + v0.AddArg2(v1, v2) v.AddArg(v0) return true } @@ -34985,9 +33374,7 @@ func rewriteValueAMD64_OpCondSelect(v *Value) bool { break } v.reset(OpAMD64CMOVQEQ) - v.AddArg(y) - v.AddArg(x) - v.AddArg(cond) + v.AddArg3(y, x, cond) return true } // match: (CondSelect x y (SETNE cond)) @@ -35005,9 +33392,7 @@ func rewriteValueAMD64_OpCondSelect(v *Value) bool { break } v.reset(OpAMD64CMOVQNE) - v.AddArg(y) - v.AddArg(x) - v.AddArg(cond) + v.AddArg3(y, x, cond) return true } // match: (CondSelect x y (SETL cond)) @@ -35025,9 +33410,7 @@ func rewriteValueAMD64_OpCondSelect(v *Value) bool { break } v.reset(OpAMD64CMOVQLT) - v.AddArg(y) - v.AddArg(x) - v.AddArg(cond) + v.AddArg3(y, x, cond) return true } // match: (CondSelect x y (SETG cond)) @@ -35045,9 +33428,7 @@ func rewriteValueAMD64_OpCondSelect(v *Value) bool { break } v.reset(OpAMD64CMOVQGT) - v.AddArg(y) - v.AddArg(x) - v.AddArg(cond) + v.AddArg3(y, x, cond) return true } // match: (CondSelect x y (SETLE cond)) @@ -35065,9 +33446,7 @@ func rewriteValueAMD64_OpCondSelect(v *Value) bool { break } v.reset(OpAMD64CMOVQLE) - v.AddArg(y) - v.AddArg(x) - v.AddArg(cond) + v.AddArg3(y, x, cond) return true } // match: (CondSelect x y (SETGE cond)) @@ -35085,9 +33464,7 @@ func rewriteValueAMD64_OpCondSelect(v *Value) bool { break } v.reset(OpAMD64CMOVQGE) - v.AddArg(y) - v.AddArg(x) - v.AddArg(cond) + v.AddArg3(y, x, cond) return true } // match: (CondSelect x y (SETA cond)) @@ -35105,9 +33482,7 @@ func rewriteValueAMD64_OpCondSelect(v *Value) bool { break } v.reset(OpAMD64CMOVQHI) - v.AddArg(y) - v.AddArg(x) - v.AddArg(cond) + v.AddArg3(y, x, cond) return true } // match: (CondSelect x y (SETB cond)) @@ -35125,9 +33500,7 @@ func rewriteValueAMD64_OpCondSelect(v *Value) bool { break } v.reset(OpAMD64CMOVQCS) - v.AddArg(y) - v.AddArg(x) - v.AddArg(cond) + v.AddArg3(y, x, cond) return true } // match: (CondSelect x y (SETAE cond)) @@ -35145,9 +33518,7 @@ func rewriteValueAMD64_OpCondSelect(v *Value) bool { break } v.reset(OpAMD64CMOVQCC) - v.AddArg(y) - v.AddArg(x) - v.AddArg(cond) + v.AddArg3(y, x, cond) return true } // match: (CondSelect x y (SETBE cond)) @@ -35165,9 +33536,7 @@ func rewriteValueAMD64_OpCondSelect(v *Value) bool { break } v.reset(OpAMD64CMOVQLS) - v.AddArg(y) - v.AddArg(x) - v.AddArg(cond) + v.AddArg3(y, x, cond) return true } // match: (CondSelect x y (SETEQF cond)) @@ -35185,9 +33554,7 @@ func rewriteValueAMD64_OpCondSelect(v *Value) bool { break } v.reset(OpAMD64CMOVQEQF) - v.AddArg(y) - v.AddArg(x) - v.AddArg(cond) + v.AddArg3(y, x, cond) return true } // match: (CondSelect x y (SETNEF cond)) @@ -35205,9 +33572,7 @@ func rewriteValueAMD64_OpCondSelect(v *Value) bool { break } v.reset(OpAMD64CMOVQNEF) - v.AddArg(y) - v.AddArg(x) - v.AddArg(cond) + v.AddArg3(y, x, cond) return true } // match: (CondSelect x y (SETGF cond)) @@ -35225,9 +33590,7 @@ func rewriteValueAMD64_OpCondSelect(v *Value) bool { break } v.reset(OpAMD64CMOVQGTF) - v.AddArg(y) - v.AddArg(x) - v.AddArg(cond) + v.AddArg3(y, x, cond) return true } // match: (CondSelect x y (SETGEF cond)) @@ -35245,9 +33608,7 @@ func rewriteValueAMD64_OpCondSelect(v *Value) bool { break } v.reset(OpAMD64CMOVQGEF) - v.AddArg(y) - v.AddArg(x) - v.AddArg(cond) + v.AddArg3(y, x, cond) return true } // match: (CondSelect x y (SETEQ cond)) @@ -35265,9 +33626,7 @@ func rewriteValueAMD64_OpCondSelect(v *Value) bool { break } v.reset(OpAMD64CMOVLEQ) - v.AddArg(y) - v.AddArg(x) - v.AddArg(cond) + v.AddArg3(y, x, cond) return true } // match: (CondSelect x y (SETNE cond)) @@ -35285,9 +33644,7 @@ func rewriteValueAMD64_OpCondSelect(v *Value) bool { break } v.reset(OpAMD64CMOVLNE) - v.AddArg(y) - v.AddArg(x) - v.AddArg(cond) + v.AddArg3(y, x, cond) return true } // match: (CondSelect x y (SETL cond)) @@ -35305,9 +33662,7 @@ func rewriteValueAMD64_OpCondSelect(v *Value) bool { break } v.reset(OpAMD64CMOVLLT) - v.AddArg(y) - v.AddArg(x) - v.AddArg(cond) + v.AddArg3(y, x, cond) return true } // match: (CondSelect x y (SETG cond)) @@ -35325,9 +33680,7 @@ func rewriteValueAMD64_OpCondSelect(v *Value) bool { break } v.reset(OpAMD64CMOVLGT) - v.AddArg(y) - v.AddArg(x) - v.AddArg(cond) + v.AddArg3(y, x, cond) return true } // match: (CondSelect x y (SETLE cond)) @@ -35345,9 +33698,7 @@ func rewriteValueAMD64_OpCondSelect(v *Value) bool { break } v.reset(OpAMD64CMOVLLE) - v.AddArg(y) - v.AddArg(x) - v.AddArg(cond) + v.AddArg3(y, x, cond) return true } // match: (CondSelect x y (SETGE cond)) @@ -35365,9 +33716,7 @@ func rewriteValueAMD64_OpCondSelect(v *Value) bool { break } v.reset(OpAMD64CMOVLGE) - v.AddArg(y) - v.AddArg(x) - v.AddArg(cond) + v.AddArg3(y, x, cond) return true } // match: (CondSelect x y (SETA cond)) @@ -35385,9 +33734,7 @@ func rewriteValueAMD64_OpCondSelect(v *Value) bool { break } v.reset(OpAMD64CMOVLHI) - v.AddArg(y) - v.AddArg(x) - v.AddArg(cond) + v.AddArg3(y, x, cond) return true } // match: (CondSelect x y (SETB cond)) @@ -35405,9 +33752,7 @@ func rewriteValueAMD64_OpCondSelect(v *Value) bool { break } v.reset(OpAMD64CMOVLCS) - v.AddArg(y) - v.AddArg(x) - v.AddArg(cond) + v.AddArg3(y, x, cond) return true } // match: (CondSelect x y (SETAE cond)) @@ -35425,9 +33770,7 @@ func rewriteValueAMD64_OpCondSelect(v *Value) bool { break } v.reset(OpAMD64CMOVLCC) - v.AddArg(y) - v.AddArg(x) - v.AddArg(cond) + v.AddArg3(y, x, cond) return true } // match: (CondSelect x y (SETBE cond)) @@ -35445,9 +33788,7 @@ func rewriteValueAMD64_OpCondSelect(v *Value) bool { break } v.reset(OpAMD64CMOVLLS) - v.AddArg(y) - v.AddArg(x) - v.AddArg(cond) + v.AddArg3(y, x, cond) return true } // match: (CondSelect x y (SETEQF cond)) @@ -35465,9 +33806,7 @@ func rewriteValueAMD64_OpCondSelect(v *Value) bool { break } v.reset(OpAMD64CMOVLEQF) - v.AddArg(y) - v.AddArg(x) - v.AddArg(cond) + v.AddArg3(y, x, cond) return true } // match: (CondSelect x y (SETNEF cond)) @@ -35485,9 +33824,7 @@ func rewriteValueAMD64_OpCondSelect(v *Value) bool { break } v.reset(OpAMD64CMOVLNEF) - v.AddArg(y) - v.AddArg(x) - v.AddArg(cond) + v.AddArg3(y, x, cond) return true } // match: (CondSelect x y (SETGF cond)) @@ -35505,9 +33842,7 @@ func rewriteValueAMD64_OpCondSelect(v *Value) bool { break } v.reset(OpAMD64CMOVLGTF) - v.AddArg(y) - v.AddArg(x) - v.AddArg(cond) + v.AddArg3(y, x, cond) return true } // match: (CondSelect x y (SETGEF cond)) @@ -35525,9 +33860,7 @@ func rewriteValueAMD64_OpCondSelect(v *Value) bool { break } v.reset(OpAMD64CMOVLGEF) - v.AddArg(y) - v.AddArg(x) - v.AddArg(cond) + v.AddArg3(y, x, cond) return true } // match: (CondSelect x y (SETEQ cond)) @@ -35545,9 +33878,7 @@ func rewriteValueAMD64_OpCondSelect(v *Value) bool { break } v.reset(OpAMD64CMOVWEQ) - v.AddArg(y) - v.AddArg(x) - v.AddArg(cond) + v.AddArg3(y, x, cond) return true } // match: (CondSelect x y (SETNE cond)) @@ -35565,9 +33896,7 @@ func rewriteValueAMD64_OpCondSelect(v *Value) bool { break } v.reset(OpAMD64CMOVWNE) - v.AddArg(y) - v.AddArg(x) - v.AddArg(cond) + v.AddArg3(y, x, cond) return true } // match: (CondSelect x y (SETL cond)) @@ -35585,9 +33914,7 @@ func rewriteValueAMD64_OpCondSelect(v *Value) bool { break } v.reset(OpAMD64CMOVWLT) - v.AddArg(y) - v.AddArg(x) - v.AddArg(cond) + v.AddArg3(y, x, cond) return true } // match: (CondSelect x y (SETG cond)) @@ -35605,9 +33932,7 @@ func rewriteValueAMD64_OpCondSelect(v *Value) bool { break } v.reset(OpAMD64CMOVWGT) - v.AddArg(y) - v.AddArg(x) - v.AddArg(cond) + v.AddArg3(y, x, cond) return true } // match: (CondSelect x y (SETLE cond)) @@ -35625,9 +33950,7 @@ func rewriteValueAMD64_OpCondSelect(v *Value) bool { break } v.reset(OpAMD64CMOVWLE) - v.AddArg(y) - v.AddArg(x) - v.AddArg(cond) + v.AddArg3(y, x, cond) return true } // match: (CondSelect x y (SETGE cond)) @@ -35645,9 +33968,7 @@ func rewriteValueAMD64_OpCondSelect(v *Value) bool { break } v.reset(OpAMD64CMOVWGE) - v.AddArg(y) - v.AddArg(x) - v.AddArg(cond) + v.AddArg3(y, x, cond) return true } // match: (CondSelect x y (SETA cond)) @@ -35665,9 +33986,7 @@ func rewriteValueAMD64_OpCondSelect(v *Value) bool { break } v.reset(OpAMD64CMOVWHI) - v.AddArg(y) - v.AddArg(x) - v.AddArg(cond) + v.AddArg3(y, x, cond) return true } // match: (CondSelect x y (SETB cond)) @@ -35685,9 +34004,7 @@ func rewriteValueAMD64_OpCondSelect(v *Value) bool { break } v.reset(OpAMD64CMOVWCS) - v.AddArg(y) - v.AddArg(x) - v.AddArg(cond) + v.AddArg3(y, x, cond) return true } // match: (CondSelect x y (SETAE cond)) @@ -35705,9 +34022,7 @@ func rewriteValueAMD64_OpCondSelect(v *Value) bool { break } v.reset(OpAMD64CMOVWCC) - v.AddArg(y) - v.AddArg(x) - v.AddArg(cond) + v.AddArg3(y, x, cond) return true } // match: (CondSelect x y (SETBE cond)) @@ -35725,9 +34040,7 @@ func rewriteValueAMD64_OpCondSelect(v *Value) bool { break } v.reset(OpAMD64CMOVWLS) - v.AddArg(y) - v.AddArg(x) - v.AddArg(cond) + v.AddArg3(y, x, cond) return true } // match: (CondSelect x y (SETEQF cond)) @@ -35745,9 +34058,7 @@ func rewriteValueAMD64_OpCondSelect(v *Value) bool { break } v.reset(OpAMD64CMOVWEQF) - v.AddArg(y) - v.AddArg(x) - v.AddArg(cond) + v.AddArg3(y, x, cond) return true } // match: (CondSelect x y (SETNEF cond)) @@ -35765,9 +34076,7 @@ func rewriteValueAMD64_OpCondSelect(v *Value) bool { break } v.reset(OpAMD64CMOVWNEF) - v.AddArg(y) - v.AddArg(x) - v.AddArg(cond) + v.AddArg3(y, x, cond) return true } // match: (CondSelect x y (SETGF cond)) @@ -35785,9 +34094,7 @@ func rewriteValueAMD64_OpCondSelect(v *Value) bool { break } v.reset(OpAMD64CMOVWGTF) - v.AddArg(y) - v.AddArg(x) - v.AddArg(cond) + v.AddArg3(y, x, cond) return true } // match: (CondSelect x y (SETGEF cond)) @@ -35805,9 +34112,7 @@ func rewriteValueAMD64_OpCondSelect(v *Value) bool { break } v.reset(OpAMD64CMOVWGEF) - v.AddArg(y) - v.AddArg(x) - v.AddArg(cond) + v.AddArg3(y, x, cond) return true } // match: (CondSelect x y check) @@ -35823,11 +34128,9 @@ func rewriteValueAMD64_OpCondSelect(v *Value) bool { } v.reset(OpCondSelect) v.Type = t - v.AddArg(x) - v.AddArg(y) v0 := b.NewValue0(v.Pos, OpAMD64MOVBQZX, typ.UInt64) v0.AddArg(check) - v.AddArg(v0) + v.AddArg3(x, y, v0) return true } // match: (CondSelect x y check) @@ -35843,11 +34146,9 @@ func rewriteValueAMD64_OpCondSelect(v *Value) bool { } v.reset(OpCondSelect) v.Type = t - v.AddArg(x) - v.AddArg(y) v0 := b.NewValue0(v.Pos, OpAMD64MOVWQZX, typ.UInt64) v0.AddArg(check) - v.AddArg(v0) + v.AddArg3(x, y, v0) return true } // match: (CondSelect x y check) @@ -35863,11 +34164,9 @@ func rewriteValueAMD64_OpCondSelect(v *Value) bool { } v.reset(OpCondSelect) v.Type = t - v.AddArg(x) - v.AddArg(y) v0 := b.NewValue0(v.Pos, OpAMD64MOVLQZX, typ.UInt64) v0.AddArg(check) - v.AddArg(v0) + v.AddArg3(x, y, v0) return true } // match: (CondSelect x y check) @@ -35882,12 +34181,10 @@ func rewriteValueAMD64_OpCondSelect(v *Value) bool { break } v.reset(OpAMD64CMOVQNE) - v.AddArg(y) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) v0.AuxInt = 0 v0.AddArg(check) - v.AddArg(v0) + v.AddArg3(y, x, v0) return true } // match: (CondSelect x y check) @@ -35902,12 +34199,10 @@ func rewriteValueAMD64_OpCondSelect(v *Value) bool { break } v.reset(OpAMD64CMOVLNE) - v.AddArg(y) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) v0.AuxInt = 0 v0.AddArg(check) - v.AddArg(v0) + v.AddArg3(y, x, v0) return true } // match: (CondSelect x y check) @@ -35922,12 +34217,10 @@ func rewriteValueAMD64_OpCondSelect(v *Value) bool { break } v.reset(OpAMD64CMOVWNE) - v.AddArg(y) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) v0.AuxInt = 0 v0.AddArg(check) - v.AddArg(v0) + v.AddArg3(y, x, v0) return true } return false @@ -35980,15 +34273,13 @@ func rewriteValueAMD64_OpCtz64(v *Value) bool { v1 := b.NewValue0(v.Pos, OpAMD64BSFQ, types.NewTuple(typ.UInt64, types.TypeFlags)) v1.AddArg(x) v0.AddArg(v1) - v.AddArg(v0) v2 := b.NewValue0(v.Pos, OpAMD64MOVQconst, t) v2.AuxInt = 64 - v.AddArg(v2) v3 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) v4 := b.NewValue0(v.Pos, OpAMD64BSFQ, types.NewTuple(typ.UInt64, types.TypeFlags)) v4.AddArg(x) v3.AddArg(v4) - v.AddArg(v3) + v.AddArg3(v0, v2, v3) return true } } @@ -36037,8 +34328,7 @@ func rewriteValueAMD64_OpDiv16(v *Value) bool { v.reset(OpSelect0) v0 := b.NewValue0(v.Pos, OpAMD64DIVW, types.NewTuple(typ.Int16, typ.Int16)) v0.AuxInt = a - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -36055,8 +34345,7 @@ func rewriteValueAMD64_OpDiv16u(v *Value) bool { y := v_1 v.reset(OpSelect0) v0 := b.NewValue0(v.Pos, OpAMD64DIVWU, types.NewTuple(typ.UInt16, typ.UInt16)) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -36075,8 +34364,7 @@ func rewriteValueAMD64_OpDiv32(v *Value) bool { v.reset(OpSelect0) v0 := b.NewValue0(v.Pos, OpAMD64DIVL, types.NewTuple(typ.Int32, typ.Int32)) v0.AuxInt = a - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -36093,8 +34381,7 @@ func rewriteValueAMD64_OpDiv32u(v *Value) bool { y := v_1 v.reset(OpSelect0) v0 := b.NewValue0(v.Pos, OpAMD64DIVLU, types.NewTuple(typ.UInt32, typ.UInt32)) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -36113,8 +34400,7 @@ func rewriteValueAMD64_OpDiv64(v *Value) bool { v.reset(OpSelect0) v0 := b.NewValue0(v.Pos, OpAMD64DIVQ, types.NewTuple(typ.Int64, typ.Int64)) v0.AuxInt = a - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -36131,8 +34417,7 @@ func rewriteValueAMD64_OpDiv64u(v *Value) bool { y := v_1 v.reset(OpSelect0) v0 := b.NewValue0(v.Pos, OpAMD64DIVQU, types.NewTuple(typ.UInt64, typ.UInt64)) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -36151,10 +34436,9 @@ func rewriteValueAMD64_OpDiv8(v *Value) bool { v0 := b.NewValue0(v.Pos, OpAMD64DIVW, types.NewTuple(typ.Int16, typ.Int16)) v1 := b.NewValue0(v.Pos, OpSignExt8to16, typ.Int16) v1.AddArg(x) - v0.AddArg(v1) v2 := b.NewValue0(v.Pos, OpSignExt8to16, typ.Int16) v2.AddArg(y) - v0.AddArg(v2) + v0.AddArg2(v1, v2) v.AddArg(v0) return true } @@ -36173,10 +34457,9 @@ func rewriteValueAMD64_OpDiv8u(v *Value) bool { v0 := b.NewValue0(v.Pos, OpAMD64DIVWU, types.NewTuple(typ.UInt16, typ.UInt16)) v1 := b.NewValue0(v.Pos, OpZeroExt8to16, typ.UInt16) v1.AddArg(x) - v0.AddArg(v1) v2 := b.NewValue0(v.Pos, OpZeroExt8to16, typ.UInt16) v2.AddArg(y) - v0.AddArg(v2) + v0.AddArg2(v1, v2) v.AddArg(v0) return true } @@ -36192,8 +34475,7 @@ func rewriteValueAMD64_OpEq16(v *Value) bool { y := v_1 v.reset(OpAMD64SETEQ) v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -36209,8 +34491,7 @@ func rewriteValueAMD64_OpEq32(v *Value) bool { y := v_1 v.reset(OpAMD64SETEQ) v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -36226,8 +34507,7 @@ func rewriteValueAMD64_OpEq32F(v *Value) bool { y := v_1 v.reset(OpAMD64SETEQF) v0 := b.NewValue0(v.Pos, OpAMD64UCOMISS, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -36243,8 +34523,7 @@ func rewriteValueAMD64_OpEq64(v *Value) bool { y := v_1 v.reset(OpAMD64SETEQ) v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -36260,8 +34539,7 @@ func rewriteValueAMD64_OpEq64F(v *Value) bool { y := v_1 v.reset(OpAMD64SETEQF) v0 := b.NewValue0(v.Pos, OpAMD64UCOMISD, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -36277,8 +34555,7 @@ func rewriteValueAMD64_OpEq8(v *Value) bool { y := v_1 v.reset(OpAMD64SETEQ) v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -36294,8 +34571,7 @@ func rewriteValueAMD64_OpEqB(v *Value) bool { y := v_1 v.reset(OpAMD64SETEQ) v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -36311,8 +34587,7 @@ func rewriteValueAMD64_OpEqPtr(v *Value) bool { y := v_1 v.reset(OpAMD64SETEQ) v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -36328,9 +34603,7 @@ func rewriteValueAMD64_OpFMA(v *Value) bool { y := v_1 z := v_2 v.reset(OpAMD64VFMADD231SD) - v.AddArg(z) - v.AddArg(x) - v.AddArg(y) + v.AddArg3(z, x, y) return true } } @@ -36357,8 +34630,7 @@ func rewriteValueAMD64_OpGeq32F(v *Value) bool { y := v_1 v.reset(OpAMD64SETGEF) v0 := b.NewValue0(v.Pos, OpAMD64UCOMISS, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -36374,8 +34646,7 @@ func rewriteValueAMD64_OpGeq64F(v *Value) bool { y := v_1 v.reset(OpAMD64SETGEF) v0 := b.NewValue0(v.Pos, OpAMD64UCOMISD, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -36391,8 +34662,7 @@ func rewriteValueAMD64_OpGreater32F(v *Value) bool { y := v_1 v.reset(OpAMD64SETGF) v0 := b.NewValue0(v.Pos, OpAMD64UCOMISS, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -36408,8 +34678,7 @@ func rewriteValueAMD64_OpGreater64F(v *Value) bool { y := v_1 v.reset(OpAMD64SETGF) v0 := b.NewValue0(v.Pos, OpAMD64UCOMISD, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -36425,8 +34694,7 @@ func rewriteValueAMD64_OpIsInBounds(v *Value) bool { len := v_1 v.reset(OpAMD64SETB) v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) - v0.AddArg(idx) - v0.AddArg(len) + v0.AddArg2(idx, len) v.AddArg(v0) return true } @@ -36440,8 +34708,7 @@ func rewriteValueAMD64_OpIsNonNil(v *Value) bool { p := v_0 v.reset(OpAMD64SETNE) v0 := b.NewValue0(v.Pos, OpAMD64TESTQ, types.TypeFlags) - v0.AddArg(p) - v0.AddArg(p) + v0.AddArg2(p, p) v.AddArg(v0) return true } @@ -36457,8 +34724,7 @@ func rewriteValueAMD64_OpIsSliceInBounds(v *Value) bool { len := v_1 v.reset(OpAMD64SETBE) v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) - v0.AddArg(idx) - v0.AddArg(len) + v0.AddArg2(idx, len) v.AddArg(v0) return true } @@ -36474,8 +34740,7 @@ func rewriteValueAMD64_OpLeq16(v *Value) bool { y := v_1 v.reset(OpAMD64SETLE) v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -36491,8 +34756,7 @@ func rewriteValueAMD64_OpLeq16U(v *Value) bool { y := v_1 v.reset(OpAMD64SETBE) v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -36508,8 +34772,7 @@ func rewriteValueAMD64_OpLeq32(v *Value) bool { y := v_1 v.reset(OpAMD64SETLE) v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -36525,8 +34788,7 @@ func rewriteValueAMD64_OpLeq32F(v *Value) bool { y := v_1 v.reset(OpAMD64SETGEF) v0 := b.NewValue0(v.Pos, OpAMD64UCOMISS, types.TypeFlags) - v0.AddArg(y) - v0.AddArg(x) + v0.AddArg2(y, x) v.AddArg(v0) return true } @@ -36542,8 +34804,7 @@ func rewriteValueAMD64_OpLeq32U(v *Value) bool { y := v_1 v.reset(OpAMD64SETBE) v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -36559,8 +34820,7 @@ func rewriteValueAMD64_OpLeq64(v *Value) bool { y := v_1 v.reset(OpAMD64SETLE) v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -36576,8 +34836,7 @@ func rewriteValueAMD64_OpLeq64F(v *Value) bool { y := v_1 v.reset(OpAMD64SETGEF) v0 := b.NewValue0(v.Pos, OpAMD64UCOMISD, types.TypeFlags) - v0.AddArg(y) - v0.AddArg(x) + v0.AddArg2(y, x) v.AddArg(v0) return true } @@ -36593,8 +34852,7 @@ func rewriteValueAMD64_OpLeq64U(v *Value) bool { y := v_1 v.reset(OpAMD64SETBE) v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -36610,8 +34868,7 @@ func rewriteValueAMD64_OpLeq8(v *Value) bool { y := v_1 v.reset(OpAMD64SETLE) v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -36627,8 +34884,7 @@ func rewriteValueAMD64_OpLeq8U(v *Value) bool { y := v_1 v.reset(OpAMD64SETBE) v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -36644,8 +34900,7 @@ func rewriteValueAMD64_OpLess16(v *Value) bool { y := v_1 v.reset(OpAMD64SETL) v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -36661,8 +34916,7 @@ func rewriteValueAMD64_OpLess16U(v *Value) bool { y := v_1 v.reset(OpAMD64SETB) v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -36678,8 +34932,7 @@ func rewriteValueAMD64_OpLess32(v *Value) bool { y := v_1 v.reset(OpAMD64SETL) v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -36695,8 +34948,7 @@ func rewriteValueAMD64_OpLess32F(v *Value) bool { y := v_1 v.reset(OpAMD64SETGF) v0 := b.NewValue0(v.Pos, OpAMD64UCOMISS, types.TypeFlags) - v0.AddArg(y) - v0.AddArg(x) + v0.AddArg2(y, x) v.AddArg(v0) return true } @@ -36712,8 +34964,7 @@ func rewriteValueAMD64_OpLess32U(v *Value) bool { y := v_1 v.reset(OpAMD64SETB) v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -36729,8 +34980,7 @@ func rewriteValueAMD64_OpLess64(v *Value) bool { y := v_1 v.reset(OpAMD64SETL) v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -36746,8 +34996,7 @@ func rewriteValueAMD64_OpLess64F(v *Value) bool { y := v_1 v.reset(OpAMD64SETGF) v0 := b.NewValue0(v.Pos, OpAMD64UCOMISD, types.TypeFlags) - v0.AddArg(y) - v0.AddArg(x) + v0.AddArg2(y, x) v.AddArg(v0) return true } @@ -36763,8 +35012,7 @@ func rewriteValueAMD64_OpLess64U(v *Value) bool { y := v_1 v.reset(OpAMD64SETB) v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -36780,8 +35028,7 @@ func rewriteValueAMD64_OpLess8(v *Value) bool { y := v_1 v.reset(OpAMD64SETL) v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -36797,8 +35044,7 @@ func rewriteValueAMD64_OpLess8U(v *Value) bool { y := v_1 v.reset(OpAMD64SETB) v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -36817,8 +35063,7 @@ func rewriteValueAMD64_OpLoad(v *Value) bool { break } v.reset(OpAMD64MOVQload) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (Load ptr mem) @@ -36832,8 +35077,7 @@ func rewriteValueAMD64_OpLoad(v *Value) bool { break } v.reset(OpAMD64MOVLload) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (Load ptr mem) @@ -36847,8 +35091,7 @@ func rewriteValueAMD64_OpLoad(v *Value) bool { break } v.reset(OpAMD64MOVWload) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (Load ptr mem) @@ -36862,8 +35105,7 @@ func rewriteValueAMD64_OpLoad(v *Value) bool { break } v.reset(OpAMD64MOVBload) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (Load ptr mem) @@ -36877,8 +35119,7 @@ func rewriteValueAMD64_OpLoad(v *Value) bool { break } v.reset(OpAMD64MOVSSload) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (Load ptr mem) @@ -36892,8 +35133,7 @@ func rewriteValueAMD64_OpLoad(v *Value) bool { break } v.reset(OpAMD64MOVSDload) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } return false @@ -36927,15 +35167,13 @@ func rewriteValueAMD64_OpLsh16x16(v *Value) bool { } v.reset(OpAMD64ANDL) v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) + v0.AddArg2(x, y) v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags) v2.AuxInt = 32 v2.AddArg(y) v1.AddArg(v2) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } // match: (Lsh16x16 x y) @@ -36948,8 +35186,7 @@ func rewriteValueAMD64_OpLsh16x16(v *Value) bool { break } v.reset(OpAMD64SHLL) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } return false @@ -36970,15 +35207,13 @@ func rewriteValueAMD64_OpLsh16x32(v *Value) bool { } v.reset(OpAMD64ANDL) v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) + v0.AddArg2(x, y) v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) v2.AuxInt = 32 v2.AddArg(y) v1.AddArg(v2) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } // match: (Lsh16x32 x y) @@ -36991,8 +35226,7 @@ func rewriteValueAMD64_OpLsh16x32(v *Value) bool { break } v.reset(OpAMD64SHLL) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } return false @@ -37013,15 +35247,13 @@ func rewriteValueAMD64_OpLsh16x64(v *Value) bool { } v.reset(OpAMD64ANDL) v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) + v0.AddArg2(x, y) v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) v2.AuxInt = 32 v2.AddArg(y) v1.AddArg(v2) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } // match: (Lsh16x64 x y) @@ -37034,8 +35266,7 @@ func rewriteValueAMD64_OpLsh16x64(v *Value) bool { break } v.reset(OpAMD64SHLL) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } return false @@ -37056,15 +35287,13 @@ func rewriteValueAMD64_OpLsh16x8(v *Value) bool { } v.reset(OpAMD64ANDL) v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) + v0.AddArg2(x, y) v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags) v2.AuxInt = 32 v2.AddArg(y) v1.AddArg(v2) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } // match: (Lsh16x8 x y) @@ -37077,8 +35306,7 @@ func rewriteValueAMD64_OpLsh16x8(v *Value) bool { break } v.reset(OpAMD64SHLL) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } return false @@ -37099,15 +35327,13 @@ func rewriteValueAMD64_OpLsh32x16(v *Value) bool { } v.reset(OpAMD64ANDL) v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) + v0.AddArg2(x, y) v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags) v2.AuxInt = 32 v2.AddArg(y) v1.AddArg(v2) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } // match: (Lsh32x16 x y) @@ -37120,8 +35346,7 @@ func rewriteValueAMD64_OpLsh32x16(v *Value) bool { break } v.reset(OpAMD64SHLL) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } return false @@ -37142,15 +35367,13 @@ func rewriteValueAMD64_OpLsh32x32(v *Value) bool { } v.reset(OpAMD64ANDL) v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) + v0.AddArg2(x, y) v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) v2.AuxInt = 32 v2.AddArg(y) v1.AddArg(v2) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } // match: (Lsh32x32 x y) @@ -37163,8 +35386,7 @@ func rewriteValueAMD64_OpLsh32x32(v *Value) bool { break } v.reset(OpAMD64SHLL) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } return false @@ -37185,15 +35407,13 @@ func rewriteValueAMD64_OpLsh32x64(v *Value) bool { } v.reset(OpAMD64ANDL) v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) + v0.AddArg2(x, y) v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) v2.AuxInt = 32 v2.AddArg(y) v1.AddArg(v2) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } // match: (Lsh32x64 x y) @@ -37206,8 +35426,7 @@ func rewriteValueAMD64_OpLsh32x64(v *Value) bool { break } v.reset(OpAMD64SHLL) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } return false @@ -37228,15 +35447,13 @@ func rewriteValueAMD64_OpLsh32x8(v *Value) bool { } v.reset(OpAMD64ANDL) v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) + v0.AddArg2(x, y) v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags) v2.AuxInt = 32 v2.AddArg(y) v1.AddArg(v2) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } // match: (Lsh32x8 x y) @@ -37249,8 +35466,7 @@ func rewriteValueAMD64_OpLsh32x8(v *Value) bool { break } v.reset(OpAMD64SHLL) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } return false @@ -37271,15 +35487,13 @@ func rewriteValueAMD64_OpLsh64x16(v *Value) bool { } v.reset(OpAMD64ANDQ) v0 := b.NewValue0(v.Pos, OpAMD64SHLQ, t) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) + v0.AddArg2(x, y) v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t) v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags) v2.AuxInt = 64 v2.AddArg(y) v1.AddArg(v2) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } // match: (Lsh64x16 x y) @@ -37292,8 +35506,7 @@ func rewriteValueAMD64_OpLsh64x16(v *Value) bool { break } v.reset(OpAMD64SHLQ) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } return false @@ -37314,15 +35527,13 @@ func rewriteValueAMD64_OpLsh64x32(v *Value) bool { } v.reset(OpAMD64ANDQ) v0 := b.NewValue0(v.Pos, OpAMD64SHLQ, t) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) + v0.AddArg2(x, y) v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t) v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) v2.AuxInt = 64 v2.AddArg(y) v1.AddArg(v2) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } // match: (Lsh64x32 x y) @@ -37335,8 +35546,7 @@ func rewriteValueAMD64_OpLsh64x32(v *Value) bool { break } v.reset(OpAMD64SHLQ) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } return false @@ -37357,15 +35567,13 @@ func rewriteValueAMD64_OpLsh64x64(v *Value) bool { } v.reset(OpAMD64ANDQ) v0 := b.NewValue0(v.Pos, OpAMD64SHLQ, t) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) + v0.AddArg2(x, y) v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t) v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) v2.AuxInt = 64 v2.AddArg(y) v1.AddArg(v2) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } // match: (Lsh64x64 x y) @@ -37378,8 +35586,7 @@ func rewriteValueAMD64_OpLsh64x64(v *Value) bool { break } v.reset(OpAMD64SHLQ) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } return false @@ -37400,15 +35607,13 @@ func rewriteValueAMD64_OpLsh64x8(v *Value) bool { } v.reset(OpAMD64ANDQ) v0 := b.NewValue0(v.Pos, OpAMD64SHLQ, t) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) + v0.AddArg2(x, y) v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t) v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags) v2.AuxInt = 64 v2.AddArg(y) v1.AddArg(v2) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } // match: (Lsh64x8 x y) @@ -37421,8 +35626,7 @@ func rewriteValueAMD64_OpLsh64x8(v *Value) bool { break } v.reset(OpAMD64SHLQ) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } return false @@ -37443,15 +35647,13 @@ func rewriteValueAMD64_OpLsh8x16(v *Value) bool { } v.reset(OpAMD64ANDL) v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) + v0.AddArg2(x, y) v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags) v2.AuxInt = 32 v2.AddArg(y) v1.AddArg(v2) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } // match: (Lsh8x16 x y) @@ -37464,8 +35666,7 @@ func rewriteValueAMD64_OpLsh8x16(v *Value) bool { break } v.reset(OpAMD64SHLL) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } return false @@ -37486,15 +35687,13 @@ func rewriteValueAMD64_OpLsh8x32(v *Value) bool { } v.reset(OpAMD64ANDL) v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) + v0.AddArg2(x, y) v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) v2.AuxInt = 32 v2.AddArg(y) v1.AddArg(v2) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } // match: (Lsh8x32 x y) @@ -37507,8 +35706,7 @@ func rewriteValueAMD64_OpLsh8x32(v *Value) bool { break } v.reset(OpAMD64SHLL) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } return false @@ -37529,15 +35727,13 @@ func rewriteValueAMD64_OpLsh8x64(v *Value) bool { } v.reset(OpAMD64ANDL) v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) + v0.AddArg2(x, y) v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) v2.AuxInt = 32 v2.AddArg(y) v1.AddArg(v2) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } // match: (Lsh8x64 x y) @@ -37550,8 +35746,7 @@ func rewriteValueAMD64_OpLsh8x64(v *Value) bool { break } v.reset(OpAMD64SHLL) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } return false @@ -37572,15 +35767,13 @@ func rewriteValueAMD64_OpLsh8x8(v *Value) bool { } v.reset(OpAMD64ANDL) v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) + v0.AddArg2(x, y) v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags) v2.AuxInt = 32 v2.AddArg(y) v1.AddArg(v2) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } // match: (Lsh8x8 x y) @@ -37593,8 +35786,7 @@ func rewriteValueAMD64_OpLsh8x8(v *Value) bool { break } v.reset(OpAMD64SHLL) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } return false @@ -37613,8 +35805,7 @@ func rewriteValueAMD64_OpMod16(v *Value) bool { v.reset(OpSelect1) v0 := b.NewValue0(v.Pos, OpAMD64DIVW, types.NewTuple(typ.Int16, typ.Int16)) v0.AuxInt = a - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -37631,8 +35822,7 @@ func rewriteValueAMD64_OpMod16u(v *Value) bool { y := v_1 v.reset(OpSelect1) v0 := b.NewValue0(v.Pos, OpAMD64DIVWU, types.NewTuple(typ.UInt16, typ.UInt16)) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -37651,8 +35841,7 @@ func rewriteValueAMD64_OpMod32(v *Value) bool { v.reset(OpSelect1) v0 := b.NewValue0(v.Pos, OpAMD64DIVL, types.NewTuple(typ.Int32, typ.Int32)) v0.AuxInt = a - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -37669,8 +35858,7 @@ func rewriteValueAMD64_OpMod32u(v *Value) bool { y := v_1 v.reset(OpSelect1) v0 := b.NewValue0(v.Pos, OpAMD64DIVLU, types.NewTuple(typ.UInt32, typ.UInt32)) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -37689,8 +35877,7 @@ func rewriteValueAMD64_OpMod64(v *Value) bool { v.reset(OpSelect1) v0 := b.NewValue0(v.Pos, OpAMD64DIVQ, types.NewTuple(typ.Int64, typ.Int64)) v0.AuxInt = a - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -37707,8 +35894,7 @@ func rewriteValueAMD64_OpMod64u(v *Value) bool { y := v_1 v.reset(OpSelect1) v0 := b.NewValue0(v.Pos, OpAMD64DIVQU, types.NewTuple(typ.UInt64, typ.UInt64)) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -37727,10 +35913,9 @@ func rewriteValueAMD64_OpMod8(v *Value) bool { v0 := b.NewValue0(v.Pos, OpAMD64DIVW, types.NewTuple(typ.Int16, typ.Int16)) v1 := b.NewValue0(v.Pos, OpSignExt8to16, typ.Int16) v1.AddArg(x) - v0.AddArg(v1) v2 := b.NewValue0(v.Pos, OpSignExt8to16, typ.Int16) v2.AddArg(y) - v0.AddArg(v2) + v0.AddArg2(v1, v2) v.AddArg(v0) return true } @@ -37749,10 +35934,9 @@ func rewriteValueAMD64_OpMod8u(v *Value) bool { v0 := b.NewValue0(v.Pos, OpAMD64DIVWU, types.NewTuple(typ.UInt16, typ.UInt16)) v1 := b.NewValue0(v.Pos, OpZeroExt8to16, typ.UInt16) v1.AddArg(x) - v0.AddArg(v1) v2 := b.NewValue0(v.Pos, OpZeroExt8to16, typ.UInt16) v2.AddArg(y) - v0.AddArg(v2) + v0.AddArg2(v1, v2) v.AddArg(v0) return true } @@ -37786,12 +35970,9 @@ func rewriteValueAMD64_OpMove(v *Value) bool { src := v_1 mem := v_2 v.reset(OpAMD64MOVBstore) - v.AddArg(dst) v0 := b.NewValue0(v.Pos, OpAMD64MOVBload, typ.UInt8) - v0.AddArg(src) - v0.AddArg(mem) - v.AddArg(v0) - v.AddArg(mem) + v0.AddArg2(src, mem) + v.AddArg3(dst, v0, mem) return true } // match: (Move [2] dst src mem) @@ -37804,12 +35985,9 @@ func rewriteValueAMD64_OpMove(v *Value) bool { src := v_1 mem := v_2 v.reset(OpAMD64MOVWstore) - v.AddArg(dst) v0 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) - v0.AddArg(src) - v0.AddArg(mem) - v.AddArg(v0) - v.AddArg(mem) + v0.AddArg2(src, mem) + v.AddArg3(dst, v0, mem) return true } // match: (Move [4] dst src mem) @@ -37822,12 +36000,9 @@ func rewriteValueAMD64_OpMove(v *Value) bool { src := v_1 mem := v_2 v.reset(OpAMD64MOVLstore) - v.AddArg(dst) v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) - v0.AddArg(src) - v0.AddArg(mem) - v.AddArg(v0) - v.AddArg(mem) + v0.AddArg2(src, mem) + v.AddArg3(dst, v0, mem) return true } // match: (Move [8] dst src mem) @@ -37840,12 +36015,9 @@ func rewriteValueAMD64_OpMove(v *Value) bool { src := v_1 mem := v_2 v.reset(OpAMD64MOVQstore) - v.AddArg(dst) v0 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) - v0.AddArg(src) - v0.AddArg(mem) - v.AddArg(v0) - v.AddArg(mem) + v0.AddArg2(src, mem) + v.AddArg3(dst, v0, mem) return true } // match: (Move [16] dst src mem) @@ -37862,12 +36034,9 @@ func rewriteValueAMD64_OpMove(v *Value) bool { break } v.reset(OpAMD64MOVOstore) - v.AddArg(dst) v0 := b.NewValue0(v.Pos, OpAMD64MOVOload, types.TypeInt128) - v0.AddArg(src) - v0.AddArg(mem) - v.AddArg(v0) - v.AddArg(mem) + v0.AddArg2(src, mem) + v.AddArg3(dst, v0, mem) return true } // match: (Move [16] dst src mem) @@ -37885,20 +36054,14 @@ func rewriteValueAMD64_OpMove(v *Value) bool { } v.reset(OpAMD64MOVQstore) v.AuxInt = 8 - v.AddArg(dst) v0 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) v0.AuxInt = 8 - v0.AddArg(src) - v0.AddArg(mem) - v.AddArg(v0) + v0.AddArg2(src, mem) v1 := b.NewValue0(v.Pos, OpAMD64MOVQstore, types.TypeMem) - v1.AddArg(dst) v2 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) - v2.AddArg(src) - v2.AddArg(mem) - v1.AddArg(v2) - v1.AddArg(mem) - v.AddArg(v1) + v2.AddArg2(src, mem) + v1.AddArg3(dst, v2, mem) + v.AddArg3(dst, v0, v1) return true } // match: (Move [32] dst src mem) @@ -37915,17 +36078,13 @@ func rewriteValueAMD64_OpMove(v *Value) bool { v0 := b.NewValue0(v.Pos, OpOffPtr, dst.Type) v0.AuxInt = 16 v0.AddArg(dst) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpOffPtr, src.Type) v1.AuxInt = 16 v1.AddArg(src) - v.AddArg(v1) v2 := b.NewValue0(v.Pos, OpMove, types.TypeMem) v2.AuxInt = 16 - v2.AddArg(dst) - v2.AddArg(src) - v2.AddArg(mem) - v.AddArg(v2) + v2.AddArg3(dst, src, mem) + v.AddArg3(v0, v1, v2) return true } // match: (Move [48] dst src mem) @@ -37946,17 +36105,13 @@ func rewriteValueAMD64_OpMove(v *Value) bool { v0 := b.NewValue0(v.Pos, OpOffPtr, dst.Type) v0.AuxInt = 16 v0.AddArg(dst) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpOffPtr, src.Type) v1.AuxInt = 16 v1.AddArg(src) - v.AddArg(v1) v2 := b.NewValue0(v.Pos, OpMove, types.TypeMem) v2.AuxInt = 16 - v2.AddArg(dst) - v2.AddArg(src) - v2.AddArg(mem) - v.AddArg(v2) + v2.AddArg3(dst, src, mem) + v.AddArg3(v0, v1, v2) return true } // match: (Move [64] dst src mem) @@ -37977,17 +36132,13 @@ func rewriteValueAMD64_OpMove(v *Value) bool { v0 := b.NewValue0(v.Pos, OpOffPtr, dst.Type) v0.AuxInt = 32 v0.AddArg(dst) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpOffPtr, src.Type) v1.AuxInt = 32 v1.AddArg(src) - v.AddArg(v1) v2 := b.NewValue0(v.Pos, OpMove, types.TypeMem) v2.AuxInt = 32 - v2.AddArg(dst) - v2.AddArg(src) - v2.AddArg(mem) - v.AddArg(v2) + v2.AddArg3(dst, src, mem) + v.AddArg3(v0, v1, v2) return true } // match: (Move [3] dst src mem) @@ -38001,20 +36152,14 @@ func rewriteValueAMD64_OpMove(v *Value) bool { mem := v_2 v.reset(OpAMD64MOVBstore) v.AuxInt = 2 - v.AddArg(dst) v0 := b.NewValue0(v.Pos, OpAMD64MOVBload, typ.UInt8) v0.AuxInt = 2 - v0.AddArg(src) - v0.AddArg(mem) - v.AddArg(v0) + v0.AddArg2(src, mem) v1 := b.NewValue0(v.Pos, OpAMD64MOVWstore, types.TypeMem) - v1.AddArg(dst) v2 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) - v2.AddArg(src) - v2.AddArg(mem) - v1.AddArg(v2) - v1.AddArg(mem) - v.AddArg(v1) + v2.AddArg2(src, mem) + v1.AddArg3(dst, v2, mem) + v.AddArg3(dst, v0, v1) return true } // match: (Move [5] dst src mem) @@ -38028,20 +36173,14 @@ func rewriteValueAMD64_OpMove(v *Value) bool { mem := v_2 v.reset(OpAMD64MOVBstore) v.AuxInt = 4 - v.AddArg(dst) v0 := b.NewValue0(v.Pos, OpAMD64MOVBload, typ.UInt8) v0.AuxInt = 4 - v0.AddArg(src) - v0.AddArg(mem) - v.AddArg(v0) + v0.AddArg2(src, mem) v1 := b.NewValue0(v.Pos, OpAMD64MOVLstore, types.TypeMem) - v1.AddArg(dst) v2 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) - v2.AddArg(src) - v2.AddArg(mem) - v1.AddArg(v2) - v1.AddArg(mem) - v.AddArg(v1) + v2.AddArg2(src, mem) + v1.AddArg3(dst, v2, mem) + v.AddArg3(dst, v0, v1) return true } // match: (Move [6] dst src mem) @@ -38055,20 +36194,14 @@ func rewriteValueAMD64_OpMove(v *Value) bool { mem := v_2 v.reset(OpAMD64MOVWstore) v.AuxInt = 4 - v.AddArg(dst) v0 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) v0.AuxInt = 4 - v0.AddArg(src) - v0.AddArg(mem) - v.AddArg(v0) + v0.AddArg2(src, mem) v1 := b.NewValue0(v.Pos, OpAMD64MOVLstore, types.TypeMem) - v1.AddArg(dst) v2 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) - v2.AddArg(src) - v2.AddArg(mem) - v1.AddArg(v2) - v1.AddArg(mem) - v.AddArg(v1) + v2.AddArg2(src, mem) + v1.AddArg3(dst, v2, mem) + v.AddArg3(dst, v0, v1) return true } // match: (Move [7] dst src mem) @@ -38082,20 +36215,14 @@ func rewriteValueAMD64_OpMove(v *Value) bool { mem := v_2 v.reset(OpAMD64MOVLstore) v.AuxInt = 3 - v.AddArg(dst) v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) v0.AuxInt = 3 - v0.AddArg(src) - v0.AddArg(mem) - v.AddArg(v0) + v0.AddArg2(src, mem) v1 := b.NewValue0(v.Pos, OpAMD64MOVLstore, types.TypeMem) - v1.AddArg(dst) v2 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) - v2.AddArg(src) - v2.AddArg(mem) - v1.AddArg(v2) - v1.AddArg(mem) - v.AddArg(v1) + v2.AddArg2(src, mem) + v1.AddArg3(dst, v2, mem) + v.AddArg3(dst, v0, v1) return true } // match: (Move [9] dst src mem) @@ -38109,20 +36236,14 @@ func rewriteValueAMD64_OpMove(v *Value) bool { mem := v_2 v.reset(OpAMD64MOVBstore) v.AuxInt = 8 - v.AddArg(dst) v0 := b.NewValue0(v.Pos, OpAMD64MOVBload, typ.UInt8) v0.AuxInt = 8 - v0.AddArg(src) - v0.AddArg(mem) - v.AddArg(v0) + v0.AddArg2(src, mem) v1 := b.NewValue0(v.Pos, OpAMD64MOVQstore, types.TypeMem) - v1.AddArg(dst) v2 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) - v2.AddArg(src) - v2.AddArg(mem) - v1.AddArg(v2) - v1.AddArg(mem) - v.AddArg(v1) + v2.AddArg2(src, mem) + v1.AddArg3(dst, v2, mem) + v.AddArg3(dst, v0, v1) return true } // match: (Move [10] dst src mem) @@ -38136,20 +36257,14 @@ func rewriteValueAMD64_OpMove(v *Value) bool { mem := v_2 v.reset(OpAMD64MOVWstore) v.AuxInt = 8 - v.AddArg(dst) v0 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) v0.AuxInt = 8 - v0.AddArg(src) - v0.AddArg(mem) - v.AddArg(v0) + v0.AddArg2(src, mem) v1 := b.NewValue0(v.Pos, OpAMD64MOVQstore, types.TypeMem) - v1.AddArg(dst) v2 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) - v2.AddArg(src) - v2.AddArg(mem) - v1.AddArg(v2) - v1.AddArg(mem) - v.AddArg(v1) + v2.AddArg2(src, mem) + v1.AddArg3(dst, v2, mem) + v.AddArg3(dst, v0, v1) return true } // match: (Move [12] dst src mem) @@ -38163,20 +36278,14 @@ func rewriteValueAMD64_OpMove(v *Value) bool { mem := v_2 v.reset(OpAMD64MOVLstore) v.AuxInt = 8 - v.AddArg(dst) v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) v0.AuxInt = 8 - v0.AddArg(src) - v0.AddArg(mem) - v.AddArg(v0) + v0.AddArg2(src, mem) v1 := b.NewValue0(v.Pos, OpAMD64MOVQstore, types.TypeMem) - v1.AddArg(dst) v2 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) - v2.AddArg(src) - v2.AddArg(mem) - v1.AddArg(v2) - v1.AddArg(mem) - v.AddArg(v1) + v2.AddArg2(src, mem) + v1.AddArg3(dst, v2, mem) + v.AddArg3(dst, v0, v1) return true } // match: (Move [s] dst src mem) @@ -38192,20 +36301,14 @@ func rewriteValueAMD64_OpMove(v *Value) bool { } v.reset(OpAMD64MOVQstore) v.AuxInt = s - 8 - v.AddArg(dst) v0 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) v0.AuxInt = s - 8 - v0.AddArg(src) - v0.AddArg(mem) - v.AddArg(v0) + v0.AddArg2(src, mem) v1 := b.NewValue0(v.Pos, OpAMD64MOVQstore, types.TypeMem) - v1.AddArg(dst) v2 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) - v2.AddArg(src) - v2.AddArg(mem) - v1.AddArg(v2) - v1.AddArg(mem) - v.AddArg(v1) + v2.AddArg2(src, mem) + v1.AddArg3(dst, v2, mem) + v.AddArg3(dst, v0, v1) return true } // match: (Move [s] dst src mem) @@ -38224,19 +36327,14 @@ func rewriteValueAMD64_OpMove(v *Value) bool { v0 := b.NewValue0(v.Pos, OpOffPtr, dst.Type) v0.AuxInt = s % 16 v0.AddArg(dst) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpOffPtr, src.Type) v1.AuxInt = s % 16 v1.AddArg(src) - v.AddArg(v1) v2 := b.NewValue0(v.Pos, OpAMD64MOVQstore, types.TypeMem) - v2.AddArg(dst) v3 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) - v3.AddArg(src) - v3.AddArg(mem) - v2.AddArg(v3) - v2.AddArg(mem) - v.AddArg(v2) + v3.AddArg2(src, mem) + v2.AddArg3(dst, v3, mem) + v.AddArg3(v0, v1, v2) return true } // match: (Move [s] dst src mem) @@ -38255,19 +36353,14 @@ func rewriteValueAMD64_OpMove(v *Value) bool { v0 := b.NewValue0(v.Pos, OpOffPtr, dst.Type) v0.AuxInt = s % 16 v0.AddArg(dst) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpOffPtr, src.Type) v1.AuxInt = s % 16 v1.AddArg(src) - v.AddArg(v1) v2 := b.NewValue0(v.Pos, OpAMD64MOVOstore, types.TypeMem) - v2.AddArg(dst) v3 := b.NewValue0(v.Pos, OpAMD64MOVOload, types.TypeInt128) - v3.AddArg(src) - v3.AddArg(mem) - v2.AddArg(v3) - v2.AddArg(mem) - v.AddArg(v2) + v3.AddArg2(src, mem) + v2.AddArg3(dst, v3, mem) + v.AddArg3(v0, v1, v2) return true } // match: (Move [s] dst src mem) @@ -38286,28 +36379,20 @@ func rewriteValueAMD64_OpMove(v *Value) bool { v0 := b.NewValue0(v.Pos, OpOffPtr, dst.Type) v0.AuxInt = s % 16 v0.AddArg(dst) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpOffPtr, src.Type) v1.AuxInt = s % 16 v1.AddArg(src) - v.AddArg(v1) v2 := b.NewValue0(v.Pos, OpAMD64MOVQstore, types.TypeMem) v2.AuxInt = 8 - v2.AddArg(dst) v3 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) v3.AuxInt = 8 - v3.AddArg(src) - v3.AddArg(mem) - v2.AddArg(v3) + v3.AddArg2(src, mem) v4 := b.NewValue0(v.Pos, OpAMD64MOVQstore, types.TypeMem) - v4.AddArg(dst) v5 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) - v5.AddArg(src) - v5.AddArg(mem) - v4.AddArg(v5) - v4.AddArg(mem) - v2.AddArg(v4) - v.AddArg(v2) + v5.AddArg2(src, mem) + v4.AddArg3(dst, v5, mem) + v2.AddArg3(dst, v3, v4) + v.AddArg3(v0, v1, v2) return true } // match: (Move [s] dst src mem) @@ -38323,9 +36408,7 @@ func rewriteValueAMD64_OpMove(v *Value) bool { } v.reset(OpAMD64DUFFCOPY) v.AuxInt = 14 * (64 - s/16) - v.AddArg(dst) - v.AddArg(src) - v.AddArg(mem) + v.AddArg3(dst, src, mem) return true } // match: (Move [s] dst src mem) @@ -38340,12 +36423,9 @@ func rewriteValueAMD64_OpMove(v *Value) bool { break } v.reset(OpAMD64REPMOVSQ) - v.AddArg(dst) - v.AddArg(src) v0 := b.NewValue0(v.Pos, OpAMD64MOVQconst, typ.UInt64) v0.AuxInt = s / 8 - v.AddArg(v0) - v.AddArg(mem) + v.AddArg4(dst, src, v0, mem) return true } return false @@ -38359,10 +36439,9 @@ func rewriteValueAMD64_OpNeg32F(v *Value) bool { for { x := v_0 v.reset(OpAMD64PXOR) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpAMD64MOVSSconst, typ.Float32) v0.AuxInt = auxFrom32F(float32(math.Copysign(0, -1))) - v.AddArg(v0) + v.AddArg2(x, v0) return true } } @@ -38375,10 +36454,9 @@ func rewriteValueAMD64_OpNeg64F(v *Value) bool { for { x := v_0 v.reset(OpAMD64PXOR) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpAMD64MOVSDconst, typ.Float64) v0.AuxInt = auxFrom64F(math.Copysign(0, -1)) - v.AddArg(v0) + v.AddArg2(x, v0) return true } } @@ -38393,8 +36471,7 @@ func rewriteValueAMD64_OpNeq16(v *Value) bool { y := v_1 v.reset(OpAMD64SETNE) v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -38410,8 +36487,7 @@ func rewriteValueAMD64_OpNeq32(v *Value) bool { y := v_1 v.reset(OpAMD64SETNE) v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -38427,8 +36503,7 @@ func rewriteValueAMD64_OpNeq32F(v *Value) bool { y := v_1 v.reset(OpAMD64SETNEF) v0 := b.NewValue0(v.Pos, OpAMD64UCOMISS, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -38444,8 +36519,7 @@ func rewriteValueAMD64_OpNeq64(v *Value) bool { y := v_1 v.reset(OpAMD64SETNE) v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -38461,8 +36535,7 @@ func rewriteValueAMD64_OpNeq64F(v *Value) bool { y := v_1 v.reset(OpAMD64SETNEF) v0 := b.NewValue0(v.Pos, OpAMD64UCOMISD, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -38478,8 +36551,7 @@ func rewriteValueAMD64_OpNeq8(v *Value) bool { y := v_1 v.reset(OpAMD64SETNE) v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -38495,8 +36567,7 @@ func rewriteValueAMD64_OpNeqB(v *Value) bool { y := v_1 v.reset(OpAMD64SETNE) v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -38512,8 +36583,7 @@ func rewriteValueAMD64_OpNeqPtr(v *Value) bool { y := v_1 v.reset(OpAMD64SETNE) v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -38556,8 +36626,7 @@ func rewriteValueAMD64_OpOffPtr(v *Value) bool { v.reset(OpAMD64ADDQ) v0 := b.NewValue0(v.Pos, OpAMD64MOVQconst, typ.UInt64) v0.AuxInt = off - v.AddArg(v0) - v.AddArg(ptr) + v.AddArg2(v0, ptr) return true } } @@ -38578,9 +36647,7 @@ func rewriteValueAMD64_OpPanicBounds(v *Value) bool { } v.reset(OpAMD64LoweredPanicBoundsA) v.AuxInt = kind - v.AddArg(x) - v.AddArg(y) - v.AddArg(mem) + v.AddArg3(x, y, mem) return true } // match: (PanicBounds [kind] x y mem) @@ -38596,9 +36663,7 @@ func rewriteValueAMD64_OpPanicBounds(v *Value) bool { } v.reset(OpAMD64LoweredPanicBoundsB) v.AuxInt = kind - v.AddArg(x) - v.AddArg(y) - v.AddArg(mem) + v.AddArg3(x, y, mem) return true } // match: (PanicBounds [kind] x y mem) @@ -38614,9 +36679,7 @@ func rewriteValueAMD64_OpPanicBounds(v *Value) bool { } v.reset(OpAMD64LoweredPanicBoundsC) v.AuxInt = kind - v.AddArg(x) - v.AddArg(y) - v.AddArg(mem) + v.AddArg3(x, y, mem) return true } return false @@ -38679,15 +36742,13 @@ func rewriteValueAMD64_OpRsh16Ux16(v *Value) bool { } v.reset(OpAMD64ANDL) v0 := b.NewValue0(v.Pos, OpAMD64SHRW, t) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) + v0.AddArg2(x, y) v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags) v2.AuxInt = 16 v2.AddArg(y) v1.AddArg(v2) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } // match: (Rsh16Ux16 x y) @@ -38700,8 +36761,7 @@ func rewriteValueAMD64_OpRsh16Ux16(v *Value) bool { break } v.reset(OpAMD64SHRW) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } return false @@ -38722,15 +36782,13 @@ func rewriteValueAMD64_OpRsh16Ux32(v *Value) bool { } v.reset(OpAMD64ANDL) v0 := b.NewValue0(v.Pos, OpAMD64SHRW, t) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) + v0.AddArg2(x, y) v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) v2.AuxInt = 16 v2.AddArg(y) v1.AddArg(v2) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } // match: (Rsh16Ux32 x y) @@ -38743,8 +36801,7 @@ func rewriteValueAMD64_OpRsh16Ux32(v *Value) bool { break } v.reset(OpAMD64SHRW) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } return false @@ -38765,15 +36822,13 @@ func rewriteValueAMD64_OpRsh16Ux64(v *Value) bool { } v.reset(OpAMD64ANDL) v0 := b.NewValue0(v.Pos, OpAMD64SHRW, t) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) + v0.AddArg2(x, y) v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) v2.AuxInt = 16 v2.AddArg(y) v1.AddArg(v2) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } // match: (Rsh16Ux64 x y) @@ -38786,8 +36841,7 @@ func rewriteValueAMD64_OpRsh16Ux64(v *Value) bool { break } v.reset(OpAMD64SHRW) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } return false @@ -38808,15 +36862,13 @@ func rewriteValueAMD64_OpRsh16Ux8(v *Value) bool { } v.reset(OpAMD64ANDL) v0 := b.NewValue0(v.Pos, OpAMD64SHRW, t) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) + v0.AddArg2(x, y) v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags) v2.AuxInt = 16 v2.AddArg(y) v1.AddArg(v2) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } // match: (Rsh16Ux8 x y) @@ -38829,8 +36881,7 @@ func rewriteValueAMD64_OpRsh16Ux8(v *Value) bool { break } v.reset(OpAMD64SHRW) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } return false @@ -38851,9 +36902,7 @@ func rewriteValueAMD64_OpRsh16x16(v *Value) bool { } v.reset(OpAMD64SARW) v.Type = t - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type) - v0.AddArg(y) v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type) v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type) v3 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags) @@ -38861,8 +36910,8 @@ func rewriteValueAMD64_OpRsh16x16(v *Value) bool { v3.AddArg(y) v2.AddArg(v3) v1.AddArg(v2) - v0.AddArg(v1) - v.AddArg(v0) + v0.AddArg2(y, v1) + v.AddArg2(x, v0) return true } // match: (Rsh16x16 x y) @@ -38875,8 +36924,7 @@ func rewriteValueAMD64_OpRsh16x16(v *Value) bool { break } v.reset(OpAMD64SARW) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } return false @@ -38897,9 +36945,7 @@ func rewriteValueAMD64_OpRsh16x32(v *Value) bool { } v.reset(OpAMD64SARW) v.Type = t - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type) - v0.AddArg(y) v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type) v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type) v3 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) @@ -38907,8 +36953,8 @@ func rewriteValueAMD64_OpRsh16x32(v *Value) bool { v3.AddArg(y) v2.AddArg(v3) v1.AddArg(v2) - v0.AddArg(v1) - v.AddArg(v0) + v0.AddArg2(y, v1) + v.AddArg2(x, v0) return true } // match: (Rsh16x32 x y) @@ -38921,8 +36967,7 @@ func rewriteValueAMD64_OpRsh16x32(v *Value) bool { break } v.reset(OpAMD64SARW) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } return false @@ -38943,9 +36988,7 @@ func rewriteValueAMD64_OpRsh16x64(v *Value) bool { } v.reset(OpAMD64SARW) v.Type = t - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpAMD64ORQ, y.Type) - v0.AddArg(y) v1 := b.NewValue0(v.Pos, OpAMD64NOTQ, y.Type) v2 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, y.Type) v3 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) @@ -38953,8 +36996,8 @@ func rewriteValueAMD64_OpRsh16x64(v *Value) bool { v3.AddArg(y) v2.AddArg(v3) v1.AddArg(v2) - v0.AddArg(v1) - v.AddArg(v0) + v0.AddArg2(y, v1) + v.AddArg2(x, v0) return true } // match: (Rsh16x64 x y) @@ -38967,8 +37010,7 @@ func rewriteValueAMD64_OpRsh16x64(v *Value) bool { break } v.reset(OpAMD64SARW) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } return false @@ -38989,9 +37031,7 @@ func rewriteValueAMD64_OpRsh16x8(v *Value) bool { } v.reset(OpAMD64SARW) v.Type = t - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type) - v0.AddArg(y) v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type) v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type) v3 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags) @@ -38999,8 +37039,8 @@ func rewriteValueAMD64_OpRsh16x8(v *Value) bool { v3.AddArg(y) v2.AddArg(v3) v1.AddArg(v2) - v0.AddArg(v1) - v.AddArg(v0) + v0.AddArg2(y, v1) + v.AddArg2(x, v0) return true } // match: (Rsh16x8 x y) @@ -39013,8 +37053,7 @@ func rewriteValueAMD64_OpRsh16x8(v *Value) bool { break } v.reset(OpAMD64SARW) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } return false @@ -39035,15 +37074,13 @@ func rewriteValueAMD64_OpRsh32Ux16(v *Value) bool { } v.reset(OpAMD64ANDL) v0 := b.NewValue0(v.Pos, OpAMD64SHRL, t) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) + v0.AddArg2(x, y) v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags) v2.AuxInt = 32 v2.AddArg(y) v1.AddArg(v2) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } // match: (Rsh32Ux16 x y) @@ -39056,8 +37093,7 @@ func rewriteValueAMD64_OpRsh32Ux16(v *Value) bool { break } v.reset(OpAMD64SHRL) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } return false @@ -39078,15 +37114,13 @@ func rewriteValueAMD64_OpRsh32Ux32(v *Value) bool { } v.reset(OpAMD64ANDL) v0 := b.NewValue0(v.Pos, OpAMD64SHRL, t) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) + v0.AddArg2(x, y) v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) v2.AuxInt = 32 v2.AddArg(y) v1.AddArg(v2) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } // match: (Rsh32Ux32 x y) @@ -39099,8 +37133,7 @@ func rewriteValueAMD64_OpRsh32Ux32(v *Value) bool { break } v.reset(OpAMD64SHRL) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } return false @@ -39121,15 +37154,13 @@ func rewriteValueAMD64_OpRsh32Ux64(v *Value) bool { } v.reset(OpAMD64ANDL) v0 := b.NewValue0(v.Pos, OpAMD64SHRL, t) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) + v0.AddArg2(x, y) v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) v2.AuxInt = 32 v2.AddArg(y) v1.AddArg(v2) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } // match: (Rsh32Ux64 x y) @@ -39142,8 +37173,7 @@ func rewriteValueAMD64_OpRsh32Ux64(v *Value) bool { break } v.reset(OpAMD64SHRL) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } return false @@ -39164,15 +37194,13 @@ func rewriteValueAMD64_OpRsh32Ux8(v *Value) bool { } v.reset(OpAMD64ANDL) v0 := b.NewValue0(v.Pos, OpAMD64SHRL, t) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) + v0.AddArg2(x, y) v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags) v2.AuxInt = 32 v2.AddArg(y) v1.AddArg(v2) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } // match: (Rsh32Ux8 x y) @@ -39185,8 +37213,7 @@ func rewriteValueAMD64_OpRsh32Ux8(v *Value) bool { break } v.reset(OpAMD64SHRL) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } return false @@ -39207,9 +37234,7 @@ func rewriteValueAMD64_OpRsh32x16(v *Value) bool { } v.reset(OpAMD64SARL) v.Type = t - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type) - v0.AddArg(y) v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type) v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type) v3 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags) @@ -39217,8 +37242,8 @@ func rewriteValueAMD64_OpRsh32x16(v *Value) bool { v3.AddArg(y) v2.AddArg(v3) v1.AddArg(v2) - v0.AddArg(v1) - v.AddArg(v0) + v0.AddArg2(y, v1) + v.AddArg2(x, v0) return true } // match: (Rsh32x16 x y) @@ -39231,8 +37256,7 @@ func rewriteValueAMD64_OpRsh32x16(v *Value) bool { break } v.reset(OpAMD64SARL) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } return false @@ -39253,9 +37277,7 @@ func rewriteValueAMD64_OpRsh32x32(v *Value) bool { } v.reset(OpAMD64SARL) v.Type = t - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type) - v0.AddArg(y) v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type) v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type) v3 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) @@ -39263,8 +37285,8 @@ func rewriteValueAMD64_OpRsh32x32(v *Value) bool { v3.AddArg(y) v2.AddArg(v3) v1.AddArg(v2) - v0.AddArg(v1) - v.AddArg(v0) + v0.AddArg2(y, v1) + v.AddArg2(x, v0) return true } // match: (Rsh32x32 x y) @@ -39277,8 +37299,7 @@ func rewriteValueAMD64_OpRsh32x32(v *Value) bool { break } v.reset(OpAMD64SARL) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } return false @@ -39299,9 +37320,7 @@ func rewriteValueAMD64_OpRsh32x64(v *Value) bool { } v.reset(OpAMD64SARL) v.Type = t - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpAMD64ORQ, y.Type) - v0.AddArg(y) v1 := b.NewValue0(v.Pos, OpAMD64NOTQ, y.Type) v2 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, y.Type) v3 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) @@ -39309,8 +37328,8 @@ func rewriteValueAMD64_OpRsh32x64(v *Value) bool { v3.AddArg(y) v2.AddArg(v3) v1.AddArg(v2) - v0.AddArg(v1) - v.AddArg(v0) + v0.AddArg2(y, v1) + v.AddArg2(x, v0) return true } // match: (Rsh32x64 x y) @@ -39323,8 +37342,7 @@ func rewriteValueAMD64_OpRsh32x64(v *Value) bool { break } v.reset(OpAMD64SARL) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } return false @@ -39345,9 +37363,7 @@ func rewriteValueAMD64_OpRsh32x8(v *Value) bool { } v.reset(OpAMD64SARL) v.Type = t - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type) - v0.AddArg(y) v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type) v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type) v3 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags) @@ -39355,8 +37371,8 @@ func rewriteValueAMD64_OpRsh32x8(v *Value) bool { v3.AddArg(y) v2.AddArg(v3) v1.AddArg(v2) - v0.AddArg(v1) - v.AddArg(v0) + v0.AddArg2(y, v1) + v.AddArg2(x, v0) return true } // match: (Rsh32x8 x y) @@ -39369,8 +37385,7 @@ func rewriteValueAMD64_OpRsh32x8(v *Value) bool { break } v.reset(OpAMD64SARL) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } return false @@ -39391,15 +37406,13 @@ func rewriteValueAMD64_OpRsh64Ux16(v *Value) bool { } v.reset(OpAMD64ANDQ) v0 := b.NewValue0(v.Pos, OpAMD64SHRQ, t) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) + v0.AddArg2(x, y) v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t) v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags) v2.AuxInt = 64 v2.AddArg(y) v1.AddArg(v2) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } // match: (Rsh64Ux16 x y) @@ -39412,8 +37425,7 @@ func rewriteValueAMD64_OpRsh64Ux16(v *Value) bool { break } v.reset(OpAMD64SHRQ) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } return false @@ -39434,15 +37446,13 @@ func rewriteValueAMD64_OpRsh64Ux32(v *Value) bool { } v.reset(OpAMD64ANDQ) v0 := b.NewValue0(v.Pos, OpAMD64SHRQ, t) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) + v0.AddArg2(x, y) v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t) v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) v2.AuxInt = 64 v2.AddArg(y) v1.AddArg(v2) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } // match: (Rsh64Ux32 x y) @@ -39455,8 +37465,7 @@ func rewriteValueAMD64_OpRsh64Ux32(v *Value) bool { break } v.reset(OpAMD64SHRQ) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } return false @@ -39477,15 +37486,13 @@ func rewriteValueAMD64_OpRsh64Ux64(v *Value) bool { } v.reset(OpAMD64ANDQ) v0 := b.NewValue0(v.Pos, OpAMD64SHRQ, t) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) + v0.AddArg2(x, y) v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t) v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) v2.AuxInt = 64 v2.AddArg(y) v1.AddArg(v2) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } // match: (Rsh64Ux64 x y) @@ -39498,8 +37505,7 @@ func rewriteValueAMD64_OpRsh64Ux64(v *Value) bool { break } v.reset(OpAMD64SHRQ) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } return false @@ -39520,15 +37526,13 @@ func rewriteValueAMD64_OpRsh64Ux8(v *Value) bool { } v.reset(OpAMD64ANDQ) v0 := b.NewValue0(v.Pos, OpAMD64SHRQ, t) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) + v0.AddArg2(x, y) v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t) v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags) v2.AuxInt = 64 v2.AddArg(y) v1.AddArg(v2) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } // match: (Rsh64Ux8 x y) @@ -39541,8 +37545,7 @@ func rewriteValueAMD64_OpRsh64Ux8(v *Value) bool { break } v.reset(OpAMD64SHRQ) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } return false @@ -39563,9 +37566,7 @@ func rewriteValueAMD64_OpRsh64x16(v *Value) bool { } v.reset(OpAMD64SARQ) v.Type = t - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type) - v0.AddArg(y) v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type) v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type) v3 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags) @@ -39573,8 +37574,8 @@ func rewriteValueAMD64_OpRsh64x16(v *Value) bool { v3.AddArg(y) v2.AddArg(v3) v1.AddArg(v2) - v0.AddArg(v1) - v.AddArg(v0) + v0.AddArg2(y, v1) + v.AddArg2(x, v0) return true } // match: (Rsh64x16 x y) @@ -39587,8 +37588,7 @@ func rewriteValueAMD64_OpRsh64x16(v *Value) bool { break } v.reset(OpAMD64SARQ) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } return false @@ -39609,9 +37609,7 @@ func rewriteValueAMD64_OpRsh64x32(v *Value) bool { } v.reset(OpAMD64SARQ) v.Type = t - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type) - v0.AddArg(y) v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type) v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type) v3 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) @@ -39619,8 +37617,8 @@ func rewriteValueAMD64_OpRsh64x32(v *Value) bool { v3.AddArg(y) v2.AddArg(v3) v1.AddArg(v2) - v0.AddArg(v1) - v.AddArg(v0) + v0.AddArg2(y, v1) + v.AddArg2(x, v0) return true } // match: (Rsh64x32 x y) @@ -39633,8 +37631,7 @@ func rewriteValueAMD64_OpRsh64x32(v *Value) bool { break } v.reset(OpAMD64SARQ) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } return false @@ -39655,9 +37652,7 @@ func rewriteValueAMD64_OpRsh64x64(v *Value) bool { } v.reset(OpAMD64SARQ) v.Type = t - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpAMD64ORQ, y.Type) - v0.AddArg(y) v1 := b.NewValue0(v.Pos, OpAMD64NOTQ, y.Type) v2 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, y.Type) v3 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) @@ -39665,8 +37660,8 @@ func rewriteValueAMD64_OpRsh64x64(v *Value) bool { v3.AddArg(y) v2.AddArg(v3) v1.AddArg(v2) - v0.AddArg(v1) - v.AddArg(v0) + v0.AddArg2(y, v1) + v.AddArg2(x, v0) return true } // match: (Rsh64x64 x y) @@ -39679,8 +37674,7 @@ func rewriteValueAMD64_OpRsh64x64(v *Value) bool { break } v.reset(OpAMD64SARQ) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } return false @@ -39701,9 +37695,7 @@ func rewriteValueAMD64_OpRsh64x8(v *Value) bool { } v.reset(OpAMD64SARQ) v.Type = t - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type) - v0.AddArg(y) v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type) v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type) v3 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags) @@ -39711,8 +37703,8 @@ func rewriteValueAMD64_OpRsh64x8(v *Value) bool { v3.AddArg(y) v2.AddArg(v3) v1.AddArg(v2) - v0.AddArg(v1) - v.AddArg(v0) + v0.AddArg2(y, v1) + v.AddArg2(x, v0) return true } // match: (Rsh64x8 x y) @@ -39725,8 +37717,7 @@ func rewriteValueAMD64_OpRsh64x8(v *Value) bool { break } v.reset(OpAMD64SARQ) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } return false @@ -39747,15 +37738,13 @@ func rewriteValueAMD64_OpRsh8Ux16(v *Value) bool { } v.reset(OpAMD64ANDL) v0 := b.NewValue0(v.Pos, OpAMD64SHRB, t) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) + v0.AddArg2(x, y) v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags) v2.AuxInt = 8 v2.AddArg(y) v1.AddArg(v2) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } // match: (Rsh8Ux16 x y) @@ -39768,8 +37757,7 @@ func rewriteValueAMD64_OpRsh8Ux16(v *Value) bool { break } v.reset(OpAMD64SHRB) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } return false @@ -39790,15 +37778,13 @@ func rewriteValueAMD64_OpRsh8Ux32(v *Value) bool { } v.reset(OpAMD64ANDL) v0 := b.NewValue0(v.Pos, OpAMD64SHRB, t) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) + v0.AddArg2(x, y) v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) v2.AuxInt = 8 v2.AddArg(y) v1.AddArg(v2) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } // match: (Rsh8Ux32 x y) @@ -39811,8 +37797,7 @@ func rewriteValueAMD64_OpRsh8Ux32(v *Value) bool { break } v.reset(OpAMD64SHRB) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } return false @@ -39833,15 +37818,13 @@ func rewriteValueAMD64_OpRsh8Ux64(v *Value) bool { } v.reset(OpAMD64ANDL) v0 := b.NewValue0(v.Pos, OpAMD64SHRB, t) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) + v0.AddArg2(x, y) v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) v2.AuxInt = 8 v2.AddArg(y) v1.AddArg(v2) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } // match: (Rsh8Ux64 x y) @@ -39854,8 +37837,7 @@ func rewriteValueAMD64_OpRsh8Ux64(v *Value) bool { break } v.reset(OpAMD64SHRB) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } return false @@ -39876,15 +37858,13 @@ func rewriteValueAMD64_OpRsh8Ux8(v *Value) bool { } v.reset(OpAMD64ANDL) v0 := b.NewValue0(v.Pos, OpAMD64SHRB, t) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) + v0.AddArg2(x, y) v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags) v2.AuxInt = 8 v2.AddArg(y) v1.AddArg(v2) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } // match: (Rsh8Ux8 x y) @@ -39897,8 +37877,7 @@ func rewriteValueAMD64_OpRsh8Ux8(v *Value) bool { break } v.reset(OpAMD64SHRB) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } return false @@ -39919,9 +37898,7 @@ func rewriteValueAMD64_OpRsh8x16(v *Value) bool { } v.reset(OpAMD64SARB) v.Type = t - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type) - v0.AddArg(y) v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type) v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type) v3 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags) @@ -39929,8 +37906,8 @@ func rewriteValueAMD64_OpRsh8x16(v *Value) bool { v3.AddArg(y) v2.AddArg(v3) v1.AddArg(v2) - v0.AddArg(v1) - v.AddArg(v0) + v0.AddArg2(y, v1) + v.AddArg2(x, v0) return true } // match: (Rsh8x16 x y) @@ -39943,8 +37920,7 @@ func rewriteValueAMD64_OpRsh8x16(v *Value) bool { break } v.reset(OpAMD64SARB) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } return false @@ -39965,9 +37941,7 @@ func rewriteValueAMD64_OpRsh8x32(v *Value) bool { } v.reset(OpAMD64SARB) v.Type = t - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type) - v0.AddArg(y) v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type) v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type) v3 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) @@ -39975,8 +37949,8 @@ func rewriteValueAMD64_OpRsh8x32(v *Value) bool { v3.AddArg(y) v2.AddArg(v3) v1.AddArg(v2) - v0.AddArg(v1) - v.AddArg(v0) + v0.AddArg2(y, v1) + v.AddArg2(x, v0) return true } // match: (Rsh8x32 x y) @@ -39989,8 +37963,7 @@ func rewriteValueAMD64_OpRsh8x32(v *Value) bool { break } v.reset(OpAMD64SARB) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } return false @@ -40011,9 +37984,7 @@ func rewriteValueAMD64_OpRsh8x64(v *Value) bool { } v.reset(OpAMD64SARB) v.Type = t - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpAMD64ORQ, y.Type) - v0.AddArg(y) v1 := b.NewValue0(v.Pos, OpAMD64NOTQ, y.Type) v2 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, y.Type) v3 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) @@ -40021,8 +37992,8 @@ func rewriteValueAMD64_OpRsh8x64(v *Value) bool { v3.AddArg(y) v2.AddArg(v3) v1.AddArg(v2) - v0.AddArg(v1) - v.AddArg(v0) + v0.AddArg2(y, v1) + v.AddArg2(x, v0) return true } // match: (Rsh8x64 x y) @@ -40035,8 +38006,7 @@ func rewriteValueAMD64_OpRsh8x64(v *Value) bool { break } v.reset(OpAMD64SARB) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } return false @@ -40057,9 +38027,7 @@ func rewriteValueAMD64_OpRsh8x8(v *Value) bool { } v.reset(OpAMD64SARB) v.Type = t - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type) - v0.AddArg(y) v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type) v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type) v3 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags) @@ -40067,8 +38035,8 @@ func rewriteValueAMD64_OpRsh8x8(v *Value) bool { v3.AddArg(y) v2.AddArg(v3) v1.AddArg(v2) - v0.AddArg(v1) - v.AddArg(v0) + v0.AddArg2(y, v1) + v.AddArg2(x, v0) return true } // match: (Rsh8x8 x y) @@ -40081,8 +38049,7 @@ func rewriteValueAMD64_OpRsh8x8(v *Value) bool { break } v.reset(OpAMD64SARB) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } return false @@ -40102,8 +38069,7 @@ func rewriteValueAMD64_OpSelect0(v *Value) bool { v.reset(OpSelect0) v.Type = typ.UInt64 v0 := b.NewValue0(v.Pos, OpAMD64MULQU, types.NewTuple(typ.UInt64, types.TypeFlags)) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -40118,8 +38084,7 @@ func rewriteValueAMD64_OpSelect0(v *Value) bool { v.reset(OpSelect0) v.Type = typ.UInt32 v0 := b.NewValue0(v.Pos, OpAMD64MULLU, types.NewTuple(typ.UInt32, types.TypeFlags)) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -40135,13 +38100,11 @@ func rewriteValueAMD64_OpSelect0(v *Value) bool { v.reset(OpSelect0) v.Type = typ.UInt64 v0 := b.NewValue0(v.Pos, OpAMD64ADCQ, types.NewTuple(typ.UInt64, types.TypeFlags)) - v0.AddArg(x) - v0.AddArg(y) v1 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) v2 := b.NewValue0(v.Pos, OpAMD64NEGLflags, types.NewTuple(typ.UInt32, types.TypeFlags)) v2.AddArg(c) v1.AddArg(v2) - v0.AddArg(v1) + v0.AddArg3(x, y, v1) v.AddArg(v0) return true } @@ -40157,13 +38120,11 @@ func rewriteValueAMD64_OpSelect0(v *Value) bool { v.reset(OpSelect0) v.Type = typ.UInt64 v0 := b.NewValue0(v.Pos, OpAMD64SBBQ, types.NewTuple(typ.UInt64, types.TypeFlags)) - v0.AddArg(x) - v0.AddArg(y) v1 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) v2 := b.NewValue0(v.Pos, OpAMD64NEGLflags, types.NewTuple(typ.UInt32, types.TypeFlags)) v2.AddArg(c) v1.AddArg(v2) - v0.AddArg(v1) + v0.AddArg3(x, y, v1) v.AddArg(v0) return true } @@ -40177,10 +38138,9 @@ func rewriteValueAMD64_OpSelect0(v *Value) bool { tuple := v_0.Args[1] val := v_0.Args[0] v.reset(OpAMD64ADDL) - v.AddArg(val) v0 := b.NewValue0(v.Pos, OpSelect0, t) v0.AddArg(tuple) - v.AddArg(v0) + v.AddArg2(val, v0) return true } // match: (Select0 (AddTupleFirst64 val tuple)) @@ -40193,10 +38153,9 @@ func rewriteValueAMD64_OpSelect0(v *Value) bool { tuple := v_0.Args[1] val := v_0.Args[0] v.reset(OpAMD64ADDQ) - v.AddArg(val) v0 := b.NewValue0(v.Pos, OpSelect0, t) v0.AddArg(tuple) - v.AddArg(v0) + v.AddArg2(val, v0) return true } return false @@ -40216,8 +38175,7 @@ func rewriteValueAMD64_OpSelect1(v *Value) bool { v.reset(OpAMD64SETO) v0 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) v1 := b.NewValue0(v.Pos, OpAMD64MULQU, types.NewTuple(typ.UInt64, types.TypeFlags)) - v1.AddArg(x) - v1.AddArg(y) + v1.AddArg2(x, y) v0.AddArg(v1) v.AddArg(v0) return true @@ -40233,8 +38191,7 @@ func rewriteValueAMD64_OpSelect1(v *Value) bool { v.reset(OpAMD64SETO) v0 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) v1 := b.NewValue0(v.Pos, OpAMD64MULLU, types.NewTuple(typ.UInt32, types.TypeFlags)) - v1.AddArg(x) - v1.AddArg(y) + v1.AddArg2(x, y) v0.AddArg(v1) v.AddArg(v0) return true @@ -40253,13 +38210,11 @@ func rewriteValueAMD64_OpSelect1(v *Value) bool { v0 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, typ.UInt64) v1 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) v2 := b.NewValue0(v.Pos, OpAMD64ADCQ, types.NewTuple(typ.UInt64, types.TypeFlags)) - v2.AddArg(x) - v2.AddArg(y) v3 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) v4 := b.NewValue0(v.Pos, OpAMD64NEGLflags, types.NewTuple(typ.UInt32, types.TypeFlags)) v4.AddArg(c) v3.AddArg(v4) - v2.AddArg(v3) + v2.AddArg3(x, y, v3) v1.AddArg(v2) v0.AddArg(v1) v.AddArg(v0) @@ -40279,13 +38234,11 @@ func rewriteValueAMD64_OpSelect1(v *Value) bool { v0 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, typ.UInt64) v1 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) v2 := b.NewValue0(v.Pos, OpAMD64SBBQ, types.NewTuple(typ.UInt64, types.TypeFlags)) - v2.AddArg(x) - v2.AddArg(y) v3 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) v4 := b.NewValue0(v.Pos, OpAMD64NEGLflags, types.NewTuple(typ.UInt32, types.TypeFlags)) v4.AddArg(c) v3.AddArg(v4) - v2.AddArg(v3) + v2.AddArg3(x, y, v3) v1.AddArg(v2) v0.AddArg(v1) v.AddArg(v0) @@ -40380,9 +38333,7 @@ func rewriteValueAMD64_OpStore(v *Value) bool { break } v.reset(OpAMD64MOVSDstore) - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } // match: (Store {t} ptr val mem) @@ -40397,9 +38348,7 @@ func rewriteValueAMD64_OpStore(v *Value) bool { break } v.reset(OpAMD64MOVSSstore) - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } // match: (Store {t} ptr val mem) @@ -40414,9 +38363,7 @@ func rewriteValueAMD64_OpStore(v *Value) bool { break } v.reset(OpAMD64MOVQstore) - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } // match: (Store {t} ptr val mem) @@ -40431,9 +38378,7 @@ func rewriteValueAMD64_OpStore(v *Value) bool { break } v.reset(OpAMD64MOVLstore) - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } // match: (Store {t} ptr val mem) @@ -40448,9 +38393,7 @@ func rewriteValueAMD64_OpStore(v *Value) bool { break } v.reset(OpAMD64MOVWstore) - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } // match: (Store {t} ptr val mem) @@ -40465,9 +38408,7 @@ func rewriteValueAMD64_OpStore(v *Value) bool { break } v.reset(OpAMD64MOVBstore) - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } return false @@ -40512,8 +38453,7 @@ func rewriteValueAMD64_OpZero(v *Value) bool { mem := v_1 v.reset(OpAMD64MOVBstoreconst) v.AuxInt = 0 - v.AddArg(destptr) - v.AddArg(mem) + v.AddArg2(destptr, mem) return true } // match: (Zero [2] destptr mem) @@ -40526,8 +38466,7 @@ func rewriteValueAMD64_OpZero(v *Value) bool { mem := v_1 v.reset(OpAMD64MOVWstoreconst) v.AuxInt = 0 - v.AddArg(destptr) - v.AddArg(mem) + v.AddArg2(destptr, mem) return true } // match: (Zero [4] destptr mem) @@ -40540,8 +38479,7 @@ func rewriteValueAMD64_OpZero(v *Value) bool { mem := v_1 v.reset(OpAMD64MOVLstoreconst) v.AuxInt = 0 - v.AddArg(destptr) - v.AddArg(mem) + v.AddArg2(destptr, mem) return true } // match: (Zero [8] destptr mem) @@ -40554,8 +38492,7 @@ func rewriteValueAMD64_OpZero(v *Value) bool { mem := v_1 v.reset(OpAMD64MOVQstoreconst) v.AuxInt = 0 - v.AddArg(destptr) - v.AddArg(mem) + v.AddArg2(destptr, mem) return true } // match: (Zero [3] destptr mem) @@ -40568,12 +38505,10 @@ func rewriteValueAMD64_OpZero(v *Value) bool { mem := v_1 v.reset(OpAMD64MOVBstoreconst) v.AuxInt = makeValAndOff(0, 2) - v.AddArg(destptr) v0 := b.NewValue0(v.Pos, OpAMD64MOVWstoreconst, types.TypeMem) v0.AuxInt = 0 - v0.AddArg(destptr) - v0.AddArg(mem) - v.AddArg(v0) + v0.AddArg2(destptr, mem) + v.AddArg2(destptr, v0) return true } // match: (Zero [5] destptr mem) @@ -40586,12 +38521,10 @@ func rewriteValueAMD64_OpZero(v *Value) bool { mem := v_1 v.reset(OpAMD64MOVBstoreconst) v.AuxInt = makeValAndOff(0, 4) - v.AddArg(destptr) v0 := b.NewValue0(v.Pos, OpAMD64MOVLstoreconst, types.TypeMem) v0.AuxInt = 0 - v0.AddArg(destptr) - v0.AddArg(mem) - v.AddArg(v0) + v0.AddArg2(destptr, mem) + v.AddArg2(destptr, v0) return true } // match: (Zero [6] destptr mem) @@ -40604,12 +38537,10 @@ func rewriteValueAMD64_OpZero(v *Value) bool { mem := v_1 v.reset(OpAMD64MOVWstoreconst) v.AuxInt = makeValAndOff(0, 4) - v.AddArg(destptr) v0 := b.NewValue0(v.Pos, OpAMD64MOVLstoreconst, types.TypeMem) v0.AuxInt = 0 - v0.AddArg(destptr) - v0.AddArg(mem) - v.AddArg(v0) + v0.AddArg2(destptr, mem) + v.AddArg2(destptr, v0) return true } // match: (Zero [7] destptr mem) @@ -40622,12 +38553,10 @@ func rewriteValueAMD64_OpZero(v *Value) bool { mem := v_1 v.reset(OpAMD64MOVLstoreconst) v.AuxInt = makeValAndOff(0, 3) - v.AddArg(destptr) v0 := b.NewValue0(v.Pos, OpAMD64MOVLstoreconst, types.TypeMem) v0.AuxInt = 0 - v0.AddArg(destptr) - v0.AddArg(mem) - v.AddArg(v0) + v0.AddArg2(destptr, mem) + v.AddArg2(destptr, v0) return true } // match: (Zero [s] destptr mem) @@ -40645,12 +38574,10 @@ func rewriteValueAMD64_OpZero(v *Value) bool { v0 := b.NewValue0(v.Pos, OpOffPtr, destptr.Type) v0.AuxInt = s % 8 v0.AddArg(destptr) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, types.TypeMem) v1.AuxInt = 0 - v1.AddArg(destptr) - v1.AddArg(mem) - v.AddArg(v1) + v1.AddArg2(destptr, mem) + v.AddArg2(v0, v1) return true } // match: (Zero [16] destptr mem) @@ -40667,12 +38594,10 @@ func rewriteValueAMD64_OpZero(v *Value) bool { } v.reset(OpAMD64MOVQstoreconst) v.AuxInt = makeValAndOff(0, 8) - v.AddArg(destptr) v0 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, types.TypeMem) v0.AuxInt = 0 - v0.AddArg(destptr) - v0.AddArg(mem) - v.AddArg(v0) + v0.AddArg2(destptr, mem) + v.AddArg2(destptr, v0) return true } // match: (Zero [24] destptr mem) @@ -40689,16 +38614,13 @@ func rewriteValueAMD64_OpZero(v *Value) bool { } v.reset(OpAMD64MOVQstoreconst) v.AuxInt = makeValAndOff(0, 16) - v.AddArg(destptr) v0 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, types.TypeMem) v0.AuxInt = makeValAndOff(0, 8) - v0.AddArg(destptr) v1 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, types.TypeMem) v1.AuxInt = 0 - v1.AddArg(destptr) - v1.AddArg(mem) - v0.AddArg(v1) - v.AddArg(v0) + v1.AddArg2(destptr, mem) + v0.AddArg2(destptr, v1) + v.AddArg2(destptr, v0) return true } // match: (Zero [32] destptr mem) @@ -40715,20 +38637,16 @@ func rewriteValueAMD64_OpZero(v *Value) bool { } v.reset(OpAMD64MOVQstoreconst) v.AuxInt = makeValAndOff(0, 24) - v.AddArg(destptr) v0 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, types.TypeMem) v0.AuxInt = makeValAndOff(0, 16) - v0.AddArg(destptr) v1 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, types.TypeMem) v1.AuxInt = makeValAndOff(0, 8) - v1.AddArg(destptr) v2 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, types.TypeMem) v2.AuxInt = 0 - v2.AddArg(destptr) - v2.AddArg(mem) - v1.AddArg(v2) - v0.AddArg(v1) - v.AddArg(v0) + v2.AddArg2(destptr, mem) + v1.AddArg2(destptr, v2) + v0.AddArg2(destptr, v1) + v.AddArg2(destptr, v0) return true } // match: (Zero [s] destptr mem) @@ -40743,12 +38661,10 @@ func rewriteValueAMD64_OpZero(v *Value) bool { } v.reset(OpAMD64MOVQstoreconst) v.AuxInt = makeValAndOff(0, s-8) - v.AddArg(destptr) v0 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, types.TypeMem) v0.AuxInt = 0 - v0.AddArg(destptr) - v0.AddArg(mem) - v.AddArg(v0) + v0.AddArg2(destptr, mem) + v.AddArg2(destptr, v0) return true } // match: (Zero [s] destptr mem) @@ -40766,14 +38682,11 @@ func rewriteValueAMD64_OpZero(v *Value) bool { v0 := b.NewValue0(v.Pos, OpOffPtr, destptr.Type) v0.AuxInt = s % 16 v0.AddArg(destptr) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpAMD64MOVOstore, types.TypeMem) - v1.AddArg(destptr) v2 := b.NewValue0(v.Pos, OpAMD64MOVOconst, types.TypeInt128) v2.AuxInt = 0 - v1.AddArg(v2) - v1.AddArg(mem) - v.AddArg(v1) + v1.AddArg3(destptr, v2, mem) + v.AddArg2(v0, v1) return true } // match: (Zero [s] destptr mem) @@ -40791,12 +38704,10 @@ func rewriteValueAMD64_OpZero(v *Value) bool { v0 := b.NewValue0(v.Pos, OpOffPtr, destptr.Type) v0.AuxInt = s % 16 v0.AddArg(destptr) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, types.TypeMem) v1.AuxInt = 0 - v1.AddArg(destptr) - v1.AddArg(mem) - v.AddArg(v1) + v1.AddArg2(destptr, mem) + v.AddArg2(v0, v1) return true } // match: (Zero [16] destptr mem) @@ -40812,11 +38723,9 @@ func rewriteValueAMD64_OpZero(v *Value) bool { break } v.reset(OpAMD64MOVOstore) - v.AddArg(destptr) v0 := b.NewValue0(v.Pos, OpAMD64MOVOconst, types.TypeInt128) v0.AuxInt = 0 - v.AddArg(v0) - v.AddArg(mem) + v.AddArg3(destptr, v0, mem) return true } // match: (Zero [32] destptr mem) @@ -40835,17 +38744,13 @@ func rewriteValueAMD64_OpZero(v *Value) bool { v0 := b.NewValue0(v.Pos, OpOffPtr, destptr.Type) v0.AuxInt = 16 v0.AddArg(destptr) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpAMD64MOVOconst, types.TypeInt128) v1.AuxInt = 0 - v.AddArg(v1) v2 := b.NewValue0(v.Pos, OpAMD64MOVOstore, types.TypeMem) - v2.AddArg(destptr) v3 := b.NewValue0(v.Pos, OpAMD64MOVOconst, types.TypeInt128) v3.AuxInt = 0 - v2.AddArg(v3) - v2.AddArg(mem) - v.AddArg(v2) + v2.AddArg3(destptr, v3, mem) + v.AddArg3(v0, v1, v2) return true } // match: (Zero [48] destptr mem) @@ -40864,26 +38769,20 @@ func rewriteValueAMD64_OpZero(v *Value) bool { v0 := b.NewValue0(v.Pos, OpOffPtr, destptr.Type) v0.AuxInt = 32 v0.AddArg(destptr) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpAMD64MOVOconst, types.TypeInt128) v1.AuxInt = 0 - v.AddArg(v1) v2 := b.NewValue0(v.Pos, OpAMD64MOVOstore, types.TypeMem) v3 := b.NewValue0(v.Pos, OpOffPtr, destptr.Type) v3.AuxInt = 16 v3.AddArg(destptr) - v2.AddArg(v3) v4 := b.NewValue0(v.Pos, OpAMD64MOVOconst, types.TypeInt128) v4.AuxInt = 0 - v2.AddArg(v4) v5 := b.NewValue0(v.Pos, OpAMD64MOVOstore, types.TypeMem) - v5.AddArg(destptr) v6 := b.NewValue0(v.Pos, OpAMD64MOVOconst, types.TypeInt128) v6.AuxInt = 0 - v5.AddArg(v6) - v5.AddArg(mem) - v2.AddArg(v5) - v.AddArg(v2) + v5.AddArg3(destptr, v6, mem) + v2.AddArg3(v3, v4, v5) + v.AddArg3(v0, v1, v2) return true } // match: (Zero [64] destptr mem) @@ -40902,35 +38801,27 @@ func rewriteValueAMD64_OpZero(v *Value) bool { v0 := b.NewValue0(v.Pos, OpOffPtr, destptr.Type) v0.AuxInt = 48 v0.AddArg(destptr) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpAMD64MOVOconst, types.TypeInt128) v1.AuxInt = 0 - v.AddArg(v1) v2 := b.NewValue0(v.Pos, OpAMD64MOVOstore, types.TypeMem) v3 := b.NewValue0(v.Pos, OpOffPtr, destptr.Type) v3.AuxInt = 32 v3.AddArg(destptr) - v2.AddArg(v3) v4 := b.NewValue0(v.Pos, OpAMD64MOVOconst, types.TypeInt128) v4.AuxInt = 0 - v2.AddArg(v4) v5 := b.NewValue0(v.Pos, OpAMD64MOVOstore, types.TypeMem) v6 := b.NewValue0(v.Pos, OpOffPtr, destptr.Type) v6.AuxInt = 16 v6.AddArg(destptr) - v5.AddArg(v6) v7 := b.NewValue0(v.Pos, OpAMD64MOVOconst, types.TypeInt128) v7.AuxInt = 0 - v5.AddArg(v7) v8 := b.NewValue0(v.Pos, OpAMD64MOVOstore, types.TypeMem) - v8.AddArg(destptr) v9 := b.NewValue0(v.Pos, OpAMD64MOVOconst, types.TypeInt128) v9.AuxInt = 0 - v8.AddArg(v9) - v8.AddArg(mem) - v5.AddArg(v8) - v2.AddArg(v5) - v.AddArg(v2) + v8.AddArg3(destptr, v9, mem) + v5.AddArg3(v6, v7, v8) + v2.AddArg3(v3, v4, v5) + v.AddArg3(v0, v1, v2) return true } // match: (Zero [s] destptr mem) @@ -40945,11 +38836,9 @@ func rewriteValueAMD64_OpZero(v *Value) bool { } v.reset(OpAMD64DUFFZERO) v.AuxInt = s - v.AddArg(destptr) v0 := b.NewValue0(v.Pos, OpAMD64MOVOconst, types.TypeInt128) v0.AuxInt = 0 - v.AddArg(v0) - v.AddArg(mem) + v.AddArg3(destptr, v0, mem) return true } // match: (Zero [s] destptr mem) @@ -40963,14 +38852,11 @@ func rewriteValueAMD64_OpZero(v *Value) bool { break } v.reset(OpAMD64REPSTOSQ) - v.AddArg(destptr) v0 := b.NewValue0(v.Pos, OpAMD64MOVQconst, typ.UInt64) v0.AuxInt = s / 8 - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpAMD64MOVQconst, typ.UInt64) v1.AuxInt = 0 - v.AddArg(v1) - v.AddArg(mem) + v.AddArg4(destptr, v0, v1, mem) return true } return false @@ -40997,8 +38883,7 @@ func rewriteBlockAMD64(b *Block) bool { y := v_0_1 b.Reset(BlockAMD64UGE) v0 := b.NewValue0(v_0.Pos, OpAMD64BTL, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) b.AddControl(v0) return true } @@ -41023,8 +38908,7 @@ func rewriteBlockAMD64(b *Block) bool { y := v_0_1 b.Reset(BlockAMD64UGE) v0 := b.NewValue0(v_0.Pos, OpAMD64BTQ, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) b.AddControl(v0) return true } @@ -41538,8 +39422,7 @@ func rewriteBlockAMD64(b *Block) bool { cond := b.Controls[0] b.Reset(BlockAMD64NE) v0 := b.NewValue0(cond.Pos, OpAMD64TESTB, types.TypeFlags) - v0.AddArg(cond) - v0.AddArg(cond) + v0.AddArg2(cond, cond) b.AddControl(v0) return true } @@ -41846,8 +39729,7 @@ func rewriteBlockAMD64(b *Block) bool { y := v_0_1 b.Reset(BlockAMD64ULT) v0 := b.NewValue0(v_0.Pos, OpAMD64BTL, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) b.AddControl(v0) return true } @@ -41872,8 +39754,7 @@ func rewriteBlockAMD64(b *Block) bool { y := v_0_1 b.Reset(BlockAMD64ULT) v0 := b.NewValue0(v_0.Pos, OpAMD64BTQ, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) b.AddControl(v0) return true } diff --git a/src/cmd/compile/internal/ssa/rewriteAMD64splitload.go b/src/cmd/compile/internal/ssa/rewriteAMD64splitload.go index 69df3f7a1e..40a7013744 100644 --- a/src/cmd/compile/internal/ssa/rewriteAMD64splitload.go +++ b/src/cmd/compile/internal/ssa/rewriteAMD64splitload.go @@ -41,8 +41,7 @@ func rewriteValueAMD64splitload_OpAMD64CMPBconstload(v *Value) bool { v0 := b.NewValue0(v.Pos, OpAMD64MOVBload, typ.UInt8) v0.AuxInt = offOnly(vo) v0.Aux = sym - v0.AddArg(ptr) - v0.AddArg(mem) + v0.AddArg2(ptr, mem) v.AddArg(v0) return true } @@ -65,10 +64,8 @@ func rewriteValueAMD64splitload_OpAMD64CMPBload(v *Value) bool { v0 := b.NewValue0(v.Pos, OpAMD64MOVBload, typ.UInt8) v0.AuxInt = off v0.Aux = sym - v0.AddArg(ptr) - v0.AddArg(mem) - v.AddArg(v0) - v.AddArg(x) + v0.AddArg2(ptr, mem) + v.AddArg2(v0, x) return true } } @@ -89,8 +86,7 @@ func rewriteValueAMD64splitload_OpAMD64CMPLconstload(v *Value) bool { v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) v0.AuxInt = offOnly(vo) v0.Aux = sym - v0.AddArg(ptr) - v0.AddArg(mem) + v0.AddArg2(ptr, mem) v.AddArg(v0) return true } @@ -113,10 +109,8 @@ func rewriteValueAMD64splitload_OpAMD64CMPLload(v *Value) bool { v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) v0.AuxInt = off v0.Aux = sym - v0.AddArg(ptr) - v0.AddArg(mem) - v.AddArg(v0) - v.AddArg(x) + v0.AddArg2(ptr, mem) + v.AddArg2(v0, x) return true } } @@ -137,8 +131,7 @@ func rewriteValueAMD64splitload_OpAMD64CMPQconstload(v *Value) bool { v0 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) v0.AuxInt = offOnly(vo) v0.Aux = sym - v0.AddArg(ptr) - v0.AddArg(mem) + v0.AddArg2(ptr, mem) v.AddArg(v0) return true } @@ -161,10 +154,8 @@ func rewriteValueAMD64splitload_OpAMD64CMPQload(v *Value) bool { v0 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) v0.AuxInt = off v0.Aux = sym - v0.AddArg(ptr) - v0.AddArg(mem) - v.AddArg(v0) - v.AddArg(x) + v0.AddArg2(ptr, mem) + v.AddArg2(v0, x) return true } } @@ -185,8 +176,7 @@ func rewriteValueAMD64splitload_OpAMD64CMPWconstload(v *Value) bool { v0 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) v0.AuxInt = offOnly(vo) v0.Aux = sym - v0.AddArg(ptr) - v0.AddArg(mem) + v0.AddArg2(ptr, mem) v.AddArg(v0) return true } @@ -209,10 +199,8 @@ func rewriteValueAMD64splitload_OpAMD64CMPWload(v *Value) bool { v0 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) v0.AuxInt = off v0.Aux = sym - v0.AddArg(ptr) - v0.AddArg(mem) - v.AddArg(v0) - v.AddArg(x) + v0.AddArg2(ptr, mem) + v.AddArg2(v0, x) return true } } diff --git a/src/cmd/compile/internal/ssa/rewriteARM.go b/src/cmd/compile/internal/ssa/rewriteARM.go index 6849fecc2a..5be3e34dcb 100644 --- a/src/cmd/compile/internal/ssa/rewriteARM.go +++ b/src/cmd/compile/internal/ssa/rewriteARM.go @@ -918,8 +918,7 @@ func rewriteValueARM_OpARMADC(v *Value) bool { flags := v_2 v.reset(OpARMADCconst) v.AuxInt = c - v.AddArg(x) - v.AddArg(flags) + v.AddArg2(x, flags) return true } break @@ -937,9 +936,7 @@ func rewriteValueARM_OpARMADC(v *Value) bool { flags := v_2 v.reset(OpARMADCshiftLL) v.AuxInt = c - v.AddArg(x) - v.AddArg(y) - v.AddArg(flags) + v.AddArg3(x, y, flags) return true } break @@ -957,9 +954,7 @@ func rewriteValueARM_OpARMADC(v *Value) bool { flags := v_2 v.reset(OpARMADCshiftRL) v.AuxInt = c - v.AddArg(x) - v.AddArg(y) - v.AddArg(flags) + v.AddArg3(x, y, flags) return true } break @@ -977,9 +972,7 @@ func rewriteValueARM_OpARMADC(v *Value) bool { flags := v_2 v.reset(OpARMADCshiftRA) v.AuxInt = c - v.AddArg(x) - v.AddArg(y) - v.AddArg(flags) + v.AddArg3(x, y, flags) return true } break @@ -996,10 +989,7 @@ func rewriteValueARM_OpARMADC(v *Value) bool { y := v_1.Args[0] flags := v_2 v.reset(OpARMADCshiftLLreg) - v.AddArg(x) - v.AddArg(y) - v.AddArg(z) - v.AddArg(flags) + v.AddArg4(x, y, z, flags) return true } break @@ -1016,10 +1006,7 @@ func rewriteValueARM_OpARMADC(v *Value) bool { y := v_1.Args[0] flags := v_2 v.reset(OpARMADCshiftRLreg) - v.AddArg(x) - v.AddArg(y) - v.AddArg(z) - v.AddArg(flags) + v.AddArg4(x, y, z, flags) return true } break @@ -1036,10 +1023,7 @@ func rewriteValueARM_OpARMADC(v *Value) bool { y := v_1.Args[0] flags := v_2 v.reset(OpARMADCshiftRAreg) - v.AddArg(x) - v.AddArg(y) - v.AddArg(z) - v.AddArg(flags) + v.AddArg4(x, y, z, flags) return true } break @@ -1061,8 +1045,7 @@ func rewriteValueARM_OpARMADCconst(v *Value) bool { flags := v_1 v.reset(OpARMADCconst) v.AuxInt = int64(int32(c + d)) - v.AddArg(x) - v.AddArg(flags) + v.AddArg2(x, flags) return true } // match: (ADCconst [c] (SUBconst [d] x) flags) @@ -1077,8 +1060,7 @@ func rewriteValueARM_OpARMADCconst(v *Value) bool { flags := v_1 v.reset(OpARMADCconst) v.AuxInt = int64(int32(c - d)) - v.AddArg(x) - v.AddArg(flags) + v.AddArg2(x, flags) return true } return false @@ -1103,8 +1085,7 @@ func rewriteValueARM_OpARMADCshiftLL(v *Value) bool { v0 := b.NewValue0(v.Pos, OpARMSLLconst, x.Type) v0.AuxInt = d v0.AddArg(x) - v.AddArg(v0) - v.AddArg(flags) + v.AddArg2(v0, flags) return true } // match: (ADCshiftLL x (MOVWconst [c]) [d] flags) @@ -1119,8 +1100,7 @@ func rewriteValueARM_OpARMADCshiftLL(v *Value) bool { flags := v_2 v.reset(OpARMADCconst) v.AuxInt = int64(int32(uint32(c) << uint64(d))) - v.AddArg(x) - v.AddArg(flags) + v.AddArg2(x, flags) return true } return false @@ -1144,10 +1124,8 @@ func rewriteValueARM_OpARMADCshiftLLreg(v *Value) bool { v.reset(OpARMADCconst) v.AuxInt = c v0 := b.NewValue0(v.Pos, OpARMSLL, x.Type) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) - v.AddArg(flags) + v0.AddArg2(x, y) + v.AddArg2(v0, flags) return true } // match: (ADCshiftLLreg x y (MOVWconst [c]) flags) @@ -1162,9 +1140,7 @@ func rewriteValueARM_OpARMADCshiftLLreg(v *Value) bool { flags := v_3 v.reset(OpARMADCshiftLL) v.AuxInt = c - v.AddArg(x) - v.AddArg(y) - v.AddArg(flags) + v.AddArg3(x, y, flags) return true } return false @@ -1189,8 +1165,7 @@ func rewriteValueARM_OpARMADCshiftRA(v *Value) bool { v0 := b.NewValue0(v.Pos, OpARMSRAconst, x.Type) v0.AuxInt = d v0.AddArg(x) - v.AddArg(v0) - v.AddArg(flags) + v.AddArg2(v0, flags) return true } // match: (ADCshiftRA x (MOVWconst [c]) [d] flags) @@ -1205,8 +1180,7 @@ func rewriteValueARM_OpARMADCshiftRA(v *Value) bool { flags := v_2 v.reset(OpARMADCconst) v.AuxInt = int64(int32(c) >> uint64(d)) - v.AddArg(x) - v.AddArg(flags) + v.AddArg2(x, flags) return true } return false @@ -1230,10 +1204,8 @@ func rewriteValueARM_OpARMADCshiftRAreg(v *Value) bool { v.reset(OpARMADCconst) v.AuxInt = c v0 := b.NewValue0(v.Pos, OpARMSRA, x.Type) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) - v.AddArg(flags) + v0.AddArg2(x, y) + v.AddArg2(v0, flags) return true } // match: (ADCshiftRAreg x y (MOVWconst [c]) flags) @@ -1248,9 +1220,7 @@ func rewriteValueARM_OpARMADCshiftRAreg(v *Value) bool { flags := v_3 v.reset(OpARMADCshiftRA) v.AuxInt = c - v.AddArg(x) - v.AddArg(y) - v.AddArg(flags) + v.AddArg3(x, y, flags) return true } return false @@ -1275,8 +1245,7 @@ func rewriteValueARM_OpARMADCshiftRL(v *Value) bool { v0 := b.NewValue0(v.Pos, OpARMSRLconst, x.Type) v0.AuxInt = d v0.AddArg(x) - v.AddArg(v0) - v.AddArg(flags) + v.AddArg2(v0, flags) return true } // match: (ADCshiftRL x (MOVWconst [c]) [d] flags) @@ -1291,8 +1260,7 @@ func rewriteValueARM_OpARMADCshiftRL(v *Value) bool { flags := v_2 v.reset(OpARMADCconst) v.AuxInt = int64(int32(uint32(c) >> uint64(d))) - v.AddArg(x) - v.AddArg(flags) + v.AddArg2(x, flags) return true } return false @@ -1316,10 +1284,8 @@ func rewriteValueARM_OpARMADCshiftRLreg(v *Value) bool { v.reset(OpARMADCconst) v.AuxInt = c v0 := b.NewValue0(v.Pos, OpARMSRL, x.Type) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) - v.AddArg(flags) + v0.AddArg2(x, y) + v.AddArg2(v0, flags) return true } // match: (ADCshiftRLreg x y (MOVWconst [c]) flags) @@ -1334,9 +1300,7 @@ func rewriteValueARM_OpARMADCshiftRLreg(v *Value) bool { flags := v_3 v.reset(OpARMADCshiftRL) v.AuxInt = c - v.AddArg(x) - v.AddArg(y) - v.AddArg(flags) + v.AddArg3(x, y, flags) return true } return false @@ -1373,8 +1337,7 @@ func rewriteValueARM_OpARMADD(v *Value) bool { y := v_1.Args[0] v.reset(OpARMADDshiftLL) v.AuxInt = c - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } break @@ -1391,8 +1354,7 @@ func rewriteValueARM_OpARMADD(v *Value) bool { y := v_1.Args[0] v.reset(OpARMADDshiftRL) v.AuxInt = c - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } break @@ -1409,8 +1371,7 @@ func rewriteValueARM_OpARMADD(v *Value) bool { y := v_1.Args[0] v.reset(OpARMADDshiftRA) v.AuxInt = c - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } break @@ -1426,9 +1387,7 @@ func rewriteValueARM_OpARMADD(v *Value) bool { z := v_1.Args[1] y := v_1.Args[0] v.reset(OpARMADDshiftLLreg) - v.AddArg(x) - v.AddArg(y) - v.AddArg(z) + v.AddArg3(x, y, z) return true } break @@ -1444,9 +1403,7 @@ func rewriteValueARM_OpARMADD(v *Value) bool { z := v_1.Args[1] y := v_1.Args[0] v.reset(OpARMADDshiftRLreg) - v.AddArg(x) - v.AddArg(y) - v.AddArg(z) + v.AddArg3(x, y, z) return true } break @@ -1462,9 +1419,7 @@ func rewriteValueARM_OpARMADD(v *Value) bool { z := v_1.Args[1] y := v_1.Args[0] v.reset(OpARMADDshiftRAreg) - v.AddArg(x) - v.AddArg(y) - v.AddArg(z) + v.AddArg3(x, y, z) return true } break @@ -1479,8 +1434,7 @@ func rewriteValueARM_OpARMADD(v *Value) bool { } y := v_1.Args[0] v.reset(OpARMSUB) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } break @@ -1503,8 +1457,7 @@ func rewriteValueARM_OpARMADD(v *Value) bool { v.reset(OpARMRSBconst) v.AuxInt = c + d v0 := b.NewValue0(v.Pos, OpARMADD, t) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -1521,9 +1474,7 @@ func rewriteValueARM_OpARMADD(v *Value) bool { x := v_0.Args[0] a := v_1 v.reset(OpARMMULA) - v.AddArg(x) - v.AddArg(y) - v.AddArg(a) + v.AddArg3(x, y, a) return true } break @@ -1548,9 +1499,7 @@ func rewriteValueARM_OpARMADDD(v *Value) bool { continue } v.reset(OpARMMULAD) - v.AddArg(a) - v.AddArg(x) - v.AddArg(y) + v.AddArg3(a, x, y) return true } break @@ -1570,9 +1519,7 @@ func rewriteValueARM_OpARMADDD(v *Value) bool { continue } v.reset(OpARMMULSD) - v.AddArg(a) - v.AddArg(x) - v.AddArg(y) + v.AddArg3(a, x, y) return true } break @@ -1597,9 +1544,7 @@ func rewriteValueARM_OpARMADDF(v *Value) bool { continue } v.reset(OpARMMULAF) - v.AddArg(a) - v.AddArg(x) - v.AddArg(y) + v.AddArg3(a, x, y) return true } break @@ -1619,9 +1564,7 @@ func rewriteValueARM_OpARMADDF(v *Value) bool { continue } v.reset(OpARMMULSF) - v.AddArg(a) - v.AddArg(x) - v.AddArg(y) + v.AddArg3(a, x, y) return true } break @@ -1659,8 +1602,7 @@ func rewriteValueARM_OpARMADDS(v *Value) bool { y := v_1.Args[0] v.reset(OpARMADDSshiftLL) v.AuxInt = c - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } break @@ -1677,8 +1619,7 @@ func rewriteValueARM_OpARMADDS(v *Value) bool { y := v_1.Args[0] v.reset(OpARMADDSshiftRL) v.AuxInt = c - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } break @@ -1695,8 +1636,7 @@ func rewriteValueARM_OpARMADDS(v *Value) bool { y := v_1.Args[0] v.reset(OpARMADDSshiftRA) v.AuxInt = c - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } break @@ -1712,9 +1652,7 @@ func rewriteValueARM_OpARMADDS(v *Value) bool { z := v_1.Args[1] y := v_1.Args[0] v.reset(OpARMADDSshiftLLreg) - v.AddArg(x) - v.AddArg(y) - v.AddArg(z) + v.AddArg3(x, y, z) return true } break @@ -1730,9 +1668,7 @@ func rewriteValueARM_OpARMADDS(v *Value) bool { z := v_1.Args[1] y := v_1.Args[0] v.reset(OpARMADDSshiftRLreg) - v.AddArg(x) - v.AddArg(y) - v.AddArg(z) + v.AddArg3(x, y, z) return true } break @@ -1748,9 +1684,7 @@ func rewriteValueARM_OpARMADDS(v *Value) bool { z := v_1.Args[1] y := v_1.Args[0] v.reset(OpARMADDSshiftRAreg) - v.AddArg(x) - v.AddArg(y) - v.AddArg(z) + v.AddArg3(x, y, z) return true } break @@ -1811,8 +1745,7 @@ func rewriteValueARM_OpARMADDSshiftLLreg(v *Value) bool { v.reset(OpARMADDSconst) v.AuxInt = c v0 := b.NewValue0(v.Pos, OpARMSLL, x.Type) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -1827,8 +1760,7 @@ func rewriteValueARM_OpARMADDSshiftLLreg(v *Value) bool { c := v_2.AuxInt v.reset(OpARMADDSshiftLL) v.AuxInt = c - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } return false @@ -1887,8 +1819,7 @@ func rewriteValueARM_OpARMADDSshiftRAreg(v *Value) bool { v.reset(OpARMADDSconst) v.AuxInt = c v0 := b.NewValue0(v.Pos, OpARMSRA, x.Type) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -1903,8 +1834,7 @@ func rewriteValueARM_OpARMADDSshiftRAreg(v *Value) bool { c := v_2.AuxInt v.reset(OpARMADDSshiftRA) v.AuxInt = c - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } return false @@ -1963,8 +1893,7 @@ func rewriteValueARM_OpARMADDSshiftRLreg(v *Value) bool { v.reset(OpARMADDSconst) v.AuxInt = c v0 := b.NewValue0(v.Pos, OpARMSRL, x.Type) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -1979,8 +1908,7 @@ func rewriteValueARM_OpARMADDSshiftRLreg(v *Value) bool { c := v_2.AuxInt v.reset(OpARMADDSshiftRL) v.AuxInt = c - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } return false @@ -2203,8 +2131,7 @@ func rewriteValueARM_OpARMADDshiftLLreg(v *Value) bool { v.reset(OpARMADDconst) v.AuxInt = c v0 := b.NewValue0(v.Pos, OpARMSLL, x.Type) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -2219,8 +2146,7 @@ func rewriteValueARM_OpARMADDshiftLLreg(v *Value) bool { c := v_2.AuxInt v.reset(OpARMADDshiftLL) v.AuxInt = c - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } return false @@ -2279,8 +2205,7 @@ func rewriteValueARM_OpARMADDshiftRAreg(v *Value) bool { v.reset(OpARMADDconst) v.AuxInt = c v0 := b.NewValue0(v.Pos, OpARMSRA, x.Type) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -2295,8 +2220,7 @@ func rewriteValueARM_OpARMADDshiftRAreg(v *Value) bool { c := v_2.AuxInt v.reset(OpARMADDshiftRA) v.AuxInt = c - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } return false @@ -2371,8 +2295,7 @@ func rewriteValueARM_OpARMADDshiftRLreg(v *Value) bool { v.reset(OpARMADDconst) v.AuxInt = c v0 := b.NewValue0(v.Pos, OpARMSRL, x.Type) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -2387,8 +2310,7 @@ func rewriteValueARM_OpARMADDshiftRLreg(v *Value) bool { c := v_2.AuxInt v.reset(OpARMADDshiftRL) v.AuxInt = c - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } return false @@ -2424,8 +2346,7 @@ func rewriteValueARM_OpARMAND(v *Value) bool { y := v_1.Args[0] v.reset(OpARMANDshiftLL) v.AuxInt = c - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } break @@ -2442,8 +2363,7 @@ func rewriteValueARM_OpARMAND(v *Value) bool { y := v_1.Args[0] v.reset(OpARMANDshiftRL) v.AuxInt = c - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } break @@ -2460,8 +2380,7 @@ func rewriteValueARM_OpARMAND(v *Value) bool { y := v_1.Args[0] v.reset(OpARMANDshiftRA) v.AuxInt = c - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } break @@ -2477,9 +2396,7 @@ func rewriteValueARM_OpARMAND(v *Value) bool { z := v_1.Args[1] y := v_1.Args[0] v.reset(OpARMANDshiftLLreg) - v.AddArg(x) - v.AddArg(y) - v.AddArg(z) + v.AddArg3(x, y, z) return true } break @@ -2495,9 +2412,7 @@ func rewriteValueARM_OpARMAND(v *Value) bool { z := v_1.Args[1] y := v_1.Args[0] v.reset(OpARMANDshiftRLreg) - v.AddArg(x) - v.AddArg(y) - v.AddArg(z) + v.AddArg3(x, y, z) return true } break @@ -2513,9 +2428,7 @@ func rewriteValueARM_OpARMAND(v *Value) bool { z := v_1.Args[1] y := v_1.Args[0] v.reset(OpARMANDshiftRAreg) - v.AddArg(x) - v.AddArg(y) - v.AddArg(z) + v.AddArg3(x, y, z) return true } break @@ -2542,8 +2455,7 @@ func rewriteValueARM_OpARMAND(v *Value) bool { } y := v_1.Args[0] v.reset(OpARMBIC) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } break @@ -2560,8 +2472,7 @@ func rewriteValueARM_OpARMAND(v *Value) bool { y := v_1.Args[0] v.reset(OpARMBICshiftLL) v.AuxInt = c - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } break @@ -2578,8 +2489,7 @@ func rewriteValueARM_OpARMAND(v *Value) bool { y := v_1.Args[0] v.reset(OpARMBICshiftRL) v.AuxInt = c - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } break @@ -2596,8 +2506,7 @@ func rewriteValueARM_OpARMAND(v *Value) bool { y := v_1.Args[0] v.reset(OpARMBICshiftRA) v.AuxInt = c - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } break @@ -2759,8 +2668,7 @@ func rewriteValueARM_OpARMANDshiftLLreg(v *Value) bool { v.reset(OpARMANDconst) v.AuxInt = c v0 := b.NewValue0(v.Pos, OpARMSLL, x.Type) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -2775,8 +2683,7 @@ func rewriteValueARM_OpARMANDshiftLLreg(v *Value) bool { c := v_2.AuxInt v.reset(OpARMANDshiftLL) v.AuxInt = c - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } return false @@ -2854,8 +2761,7 @@ func rewriteValueARM_OpARMANDshiftRAreg(v *Value) bool { v.reset(OpARMANDconst) v.AuxInt = c v0 := b.NewValue0(v.Pos, OpARMSRA, x.Type) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -2870,8 +2776,7 @@ func rewriteValueARM_OpARMANDshiftRAreg(v *Value) bool { c := v_2.AuxInt v.reset(OpARMANDshiftRA) v.AuxInt = c - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } return false @@ -2949,8 +2854,7 @@ func rewriteValueARM_OpARMANDshiftRLreg(v *Value) bool { v.reset(OpARMANDconst) v.AuxInt = c v0 := b.NewValue0(v.Pos, OpARMSRL, x.Type) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -2965,8 +2869,7 @@ func rewriteValueARM_OpARMANDshiftRLreg(v *Value) bool { c := v_2.AuxInt v.reset(OpARMANDshiftRL) v.AuxInt = c - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } return false @@ -3030,8 +2933,7 @@ func rewriteValueARM_OpARMBIC(v *Value) bool { y := v_1.Args[0] v.reset(OpARMBICshiftLL) v.AuxInt = c - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (BIC x (SRLconst [c] y)) @@ -3045,8 +2947,7 @@ func rewriteValueARM_OpARMBIC(v *Value) bool { y := v_1.Args[0] v.reset(OpARMBICshiftRL) v.AuxInt = c - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (BIC x (SRAconst [c] y)) @@ -3060,8 +2961,7 @@ func rewriteValueARM_OpARMBIC(v *Value) bool { y := v_1.Args[0] v.reset(OpARMBICshiftRA) v.AuxInt = c - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (BIC x (SLL y z)) @@ -3074,9 +2974,7 @@ func rewriteValueARM_OpARMBIC(v *Value) bool { z := v_1.Args[1] y := v_1.Args[0] v.reset(OpARMBICshiftLLreg) - v.AddArg(x) - v.AddArg(y) - v.AddArg(z) + v.AddArg3(x, y, z) return true } // match: (BIC x (SRL y z)) @@ -3089,9 +2987,7 @@ func rewriteValueARM_OpARMBIC(v *Value) bool { z := v_1.Args[1] y := v_1.Args[0] v.reset(OpARMBICshiftRLreg) - v.AddArg(x) - v.AddArg(y) - v.AddArg(z) + v.AddArg3(x, y, z) return true } // match: (BIC x (SRA y z)) @@ -3104,9 +3000,7 @@ func rewriteValueARM_OpARMBIC(v *Value) bool { z := v_1.Args[1] y := v_1.Args[0] v.reset(OpARMBICshiftRAreg) - v.AddArg(x) - v.AddArg(y) - v.AddArg(z) + v.AddArg3(x, y, z) return true } // match: (BIC x x) @@ -3255,8 +3149,7 @@ func rewriteValueARM_OpARMBICshiftLLreg(v *Value) bool { c := v_2.AuxInt v.reset(OpARMBICshiftLL) v.AuxInt = c - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } return false @@ -3312,8 +3205,7 @@ func rewriteValueARM_OpARMBICshiftRAreg(v *Value) bool { c := v_2.AuxInt v.reset(OpARMBICshiftRA) v.AuxInt = c - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } return false @@ -3369,8 +3261,7 @@ func rewriteValueARM_OpARMBICshiftRLreg(v *Value) bool { c := v_2.AuxInt v.reset(OpARMBICshiftRL) v.AuxInt = c - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } return false @@ -3406,8 +3297,7 @@ func rewriteValueARM_OpARMCMN(v *Value) bool { y := v_1.Args[0] v.reset(OpARMCMNshiftLL) v.AuxInt = c - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } break @@ -3424,8 +3314,7 @@ func rewriteValueARM_OpARMCMN(v *Value) bool { y := v_1.Args[0] v.reset(OpARMCMNshiftRL) v.AuxInt = c - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } break @@ -3442,8 +3331,7 @@ func rewriteValueARM_OpARMCMN(v *Value) bool { y := v_1.Args[0] v.reset(OpARMCMNshiftRA) v.AuxInt = c - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } break @@ -3459,9 +3347,7 @@ func rewriteValueARM_OpARMCMN(v *Value) bool { z := v_1.Args[1] y := v_1.Args[0] v.reset(OpARMCMNshiftLLreg) - v.AddArg(x) - v.AddArg(y) - v.AddArg(z) + v.AddArg3(x, y, z) return true } break @@ -3477,9 +3363,7 @@ func rewriteValueARM_OpARMCMN(v *Value) bool { z := v_1.Args[1] y := v_1.Args[0] v.reset(OpARMCMNshiftRLreg) - v.AddArg(x) - v.AddArg(y) - v.AddArg(z) + v.AddArg3(x, y, z) return true } break @@ -3495,9 +3379,7 @@ func rewriteValueARM_OpARMCMN(v *Value) bool { z := v_1.Args[1] y := v_1.Args[0] v.reset(OpARMCMNshiftRAreg) - v.AddArg(x) - v.AddArg(y) - v.AddArg(z) + v.AddArg3(x, y, z) return true } break @@ -3512,8 +3394,7 @@ func rewriteValueARM_OpARMCMN(v *Value) bool { } y := v_1.Args[0] v.reset(OpARMCMP) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } break @@ -3653,8 +3534,7 @@ func rewriteValueARM_OpARMCMNshiftLLreg(v *Value) bool { v.reset(OpARMCMNconst) v.AuxInt = c v0 := b.NewValue0(v.Pos, OpARMSLL, x.Type) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -3669,8 +3549,7 @@ func rewriteValueARM_OpARMCMNshiftLLreg(v *Value) bool { c := v_2.AuxInt v.reset(OpARMCMNshiftLL) v.AuxInt = c - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } return false @@ -3729,8 +3608,7 @@ func rewriteValueARM_OpARMCMNshiftRAreg(v *Value) bool { v.reset(OpARMCMNconst) v.AuxInt = c v0 := b.NewValue0(v.Pos, OpARMSRA, x.Type) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -3745,8 +3623,7 @@ func rewriteValueARM_OpARMCMNshiftRAreg(v *Value) bool { c := v_2.AuxInt v.reset(OpARMCMNshiftRA) v.AuxInt = c - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } return false @@ -3805,8 +3682,7 @@ func rewriteValueARM_OpARMCMNshiftRLreg(v *Value) bool { v.reset(OpARMCMNconst) v.AuxInt = c v0 := b.NewValue0(v.Pos, OpARMSRL, x.Type) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -3821,8 +3697,7 @@ func rewriteValueARM_OpARMCMNshiftRLreg(v *Value) bool { c := v_2.AuxInt v.reset(OpARMCMNshiftRL) v.AuxInt = c - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } return false @@ -3898,8 +3773,7 @@ func rewriteValueARM_OpARMCMOVWHSconst(v *Value) bool { flags := v_1.Args[0] v.reset(OpARMCMOVWLSconst) v.AuxInt = c - v.AddArg(x) - v.AddArg(flags) + v.AddArg2(x, flags) return true } return false @@ -3975,8 +3849,7 @@ func rewriteValueARM_OpARMCMOVWLSconst(v *Value) bool { flags := v_1.Args[0] v.reset(OpARMCMOVWHSconst) v.AuxInt = c - v.AddArg(x) - v.AddArg(flags) + v.AddArg2(x, flags) return true } return false @@ -4024,8 +3897,7 @@ func rewriteValueARM_OpARMCMP(v *Value) bool { } v.reset(OpARMInvertFlags) v0 := b.NewValue0(v.Pos, OpARMCMP, types.TypeFlags) - v0.AddArg(y) - v0.AddArg(x) + v0.AddArg2(y, x) v.AddArg(v0) return true } @@ -4040,8 +3912,7 @@ func rewriteValueARM_OpARMCMP(v *Value) bool { y := v_1.Args[0] v.reset(OpARMCMPshiftLL) v.AuxInt = c - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (CMP (SLLconst [c] y) x) @@ -4056,8 +3927,7 @@ func rewriteValueARM_OpARMCMP(v *Value) bool { v.reset(OpARMInvertFlags) v0 := b.NewValue0(v.Pos, OpARMCMPshiftLL, types.TypeFlags) v0.AuxInt = c - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -4072,8 +3942,7 @@ func rewriteValueARM_OpARMCMP(v *Value) bool { y := v_1.Args[0] v.reset(OpARMCMPshiftRL) v.AuxInt = c - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (CMP (SRLconst [c] y) x) @@ -4088,8 +3957,7 @@ func rewriteValueARM_OpARMCMP(v *Value) bool { v.reset(OpARMInvertFlags) v0 := b.NewValue0(v.Pos, OpARMCMPshiftRL, types.TypeFlags) v0.AuxInt = c - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -4104,8 +3972,7 @@ func rewriteValueARM_OpARMCMP(v *Value) bool { y := v_1.Args[0] v.reset(OpARMCMPshiftRA) v.AuxInt = c - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (CMP (SRAconst [c] y) x) @@ -4120,8 +3987,7 @@ func rewriteValueARM_OpARMCMP(v *Value) bool { v.reset(OpARMInvertFlags) v0 := b.NewValue0(v.Pos, OpARMCMPshiftRA, types.TypeFlags) v0.AuxInt = c - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -4135,9 +4001,7 @@ func rewriteValueARM_OpARMCMP(v *Value) bool { z := v_1.Args[1] y := v_1.Args[0] v.reset(OpARMCMPshiftLLreg) - v.AddArg(x) - v.AddArg(y) - v.AddArg(z) + v.AddArg3(x, y, z) return true } // match: (CMP (SLL y z) x) @@ -4151,9 +4015,7 @@ func rewriteValueARM_OpARMCMP(v *Value) bool { x := v_1 v.reset(OpARMInvertFlags) v0 := b.NewValue0(v.Pos, OpARMCMPshiftLLreg, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - v0.AddArg(z) + v0.AddArg3(x, y, z) v.AddArg(v0) return true } @@ -4167,9 +4029,7 @@ func rewriteValueARM_OpARMCMP(v *Value) bool { z := v_1.Args[1] y := v_1.Args[0] v.reset(OpARMCMPshiftRLreg) - v.AddArg(x) - v.AddArg(y) - v.AddArg(z) + v.AddArg3(x, y, z) return true } // match: (CMP (SRL y z) x) @@ -4183,9 +4043,7 @@ func rewriteValueARM_OpARMCMP(v *Value) bool { x := v_1 v.reset(OpARMInvertFlags) v0 := b.NewValue0(v.Pos, OpARMCMPshiftRLreg, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - v0.AddArg(z) + v0.AddArg3(x, y, z) v.AddArg(v0) return true } @@ -4199,9 +4057,7 @@ func rewriteValueARM_OpARMCMP(v *Value) bool { z := v_1.Args[1] y := v_1.Args[0] v.reset(OpARMCMPshiftRAreg) - v.AddArg(x) - v.AddArg(y) - v.AddArg(z) + v.AddArg3(x, y, z) return true } // match: (CMP (SRA y z) x) @@ -4215,9 +4071,7 @@ func rewriteValueARM_OpARMCMP(v *Value) bool { x := v_1 v.reset(OpARMInvertFlags) v0 := b.NewValue0(v.Pos, OpARMCMPshiftRAreg, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - v0.AddArg(z) + v0.AddArg3(x, y, z) v.AddArg(v0) return true } @@ -4230,8 +4084,7 @@ func rewriteValueARM_OpARMCMP(v *Value) bool { } y := v_1.Args[0] v.reset(OpARMCMN) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } return false @@ -4456,8 +4309,7 @@ func rewriteValueARM_OpARMCMPshiftLLreg(v *Value) bool { v0 := b.NewValue0(v.Pos, OpARMCMPconst, types.TypeFlags) v0.AuxInt = c v1 := b.NewValue0(v.Pos, OpARMSLL, x.Type) - v1.AddArg(x) - v1.AddArg(y) + v1.AddArg2(x, y) v0.AddArg(v1) v.AddArg(v0) return true @@ -4473,8 +4325,7 @@ func rewriteValueARM_OpARMCMPshiftLLreg(v *Value) bool { c := v_2.AuxInt v.reset(OpARMCMPshiftLL) v.AuxInt = c - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } return false @@ -4536,8 +4387,7 @@ func rewriteValueARM_OpARMCMPshiftRAreg(v *Value) bool { v0 := b.NewValue0(v.Pos, OpARMCMPconst, types.TypeFlags) v0.AuxInt = c v1 := b.NewValue0(v.Pos, OpARMSRA, x.Type) - v1.AddArg(x) - v1.AddArg(y) + v1.AddArg2(x, y) v0.AddArg(v1) v.AddArg(v0) return true @@ -4553,8 +4403,7 @@ func rewriteValueARM_OpARMCMPshiftRAreg(v *Value) bool { c := v_2.AuxInt v.reset(OpARMCMPshiftRA) v.AuxInt = c - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } return false @@ -4616,8 +4465,7 @@ func rewriteValueARM_OpARMCMPshiftRLreg(v *Value) bool { v0 := b.NewValue0(v.Pos, OpARMCMPconst, types.TypeFlags) v0.AuxInt = c v1 := b.NewValue0(v.Pos, OpARMSRL, x.Type) - v1.AddArg(x) - v1.AddArg(y) + v1.AddArg2(x, y) v0.AddArg(v1) v.AddArg(v0) return true @@ -4633,8 +4481,7 @@ func rewriteValueARM_OpARMCMPshiftRLreg(v *Value) bool { c := v_2.AuxInt v.reset(OpARMCMPshiftRL) v.AuxInt = c - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } return false @@ -5241,8 +5088,7 @@ func rewriteValueARM_OpARMMOVBUload(v *Value) bool { v.reset(OpARMMOVBUload) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVBUload [off1] {sym} (SUBconst [off2] ptr) mem) @@ -5259,8 +5105,7 @@ func rewriteValueARM_OpARMMOVBUload(v *Value) bool { v.reset(OpARMMOVBUload) v.AuxInt = off1 - off2 v.Aux = sym - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVBUload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem) @@ -5282,8 +5127,7 @@ func rewriteValueARM_OpARMMOVBUload(v *Value) bool { v.reset(OpARMMOVBUload) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVBUload [off] {sym} ptr (MOVBstore [off2] {sym2} ptr2 x _)) @@ -5326,9 +5170,7 @@ func rewriteValueARM_OpARMMOVBUload(v *Value) bool { break } v.reset(OpARMMOVBUloadidx) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } // match: (MOVBUload [off] {sym} (SB) _) @@ -5383,8 +5225,7 @@ func rewriteValueARM_OpARMMOVBUloadidx(v *Value) bool { mem := v_2 v.reset(OpARMMOVBUload) v.AuxInt = c - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVBUloadidx (MOVWconst [c]) ptr mem) @@ -5398,8 +5239,7 @@ func rewriteValueARM_OpARMMOVBUloadidx(v *Value) bool { mem := v_2 v.reset(OpARMMOVBUload) v.AuxInt = c - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } return false @@ -5472,8 +5312,7 @@ func rewriteValueARM_OpARMMOVBload(v *Value) bool { v.reset(OpARMMOVBload) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVBload [off1] {sym} (SUBconst [off2] ptr) mem) @@ -5490,8 +5329,7 @@ func rewriteValueARM_OpARMMOVBload(v *Value) bool { v.reset(OpARMMOVBload) v.AuxInt = off1 - off2 v.Aux = sym - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVBload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem) @@ -5513,8 +5351,7 @@ func rewriteValueARM_OpARMMOVBload(v *Value) bool { v.reset(OpARMMOVBload) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVBload [off] {sym} ptr (MOVBstore [off2] {sym2} ptr2 x _)) @@ -5557,9 +5394,7 @@ func rewriteValueARM_OpARMMOVBload(v *Value) bool { break } v.reset(OpARMMOVBloadidx) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } return false @@ -5601,8 +5436,7 @@ func rewriteValueARM_OpARMMOVBloadidx(v *Value) bool { mem := v_2 v.reset(OpARMMOVBload) v.AuxInt = c - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVBloadidx (MOVWconst [c]) ptr mem) @@ -5616,8 +5450,7 @@ func rewriteValueARM_OpARMMOVBloadidx(v *Value) bool { mem := v_2 v.reset(OpARMMOVBload) v.AuxInt = c - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } return false @@ -5696,9 +5529,7 @@ func rewriteValueARM_OpARMMOVBstore(v *Value) bool { v.reset(OpARMMOVBstore) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } // match: (MOVBstore [off1] {sym} (SUBconst [off2] ptr) val mem) @@ -5716,9 +5547,7 @@ func rewriteValueARM_OpARMMOVBstore(v *Value) bool { v.reset(OpARMMOVBstore) v.AuxInt = off1 - off2 v.Aux = sym - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } // match: (MOVBstore [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) val mem) @@ -5741,9 +5570,7 @@ func rewriteValueARM_OpARMMOVBstore(v *Value) bool { v.reset(OpARMMOVBstore) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } // match: (MOVBstore [off] {sym} ptr (MOVBreg x) mem) @@ -5760,9 +5587,7 @@ func rewriteValueARM_OpARMMOVBstore(v *Value) bool { v.reset(OpARMMOVBstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(x) - v.AddArg(mem) + v.AddArg3(ptr, x, mem) return true } // match: (MOVBstore [off] {sym} ptr (MOVBUreg x) mem) @@ -5779,9 +5604,7 @@ func rewriteValueARM_OpARMMOVBstore(v *Value) bool { v.reset(OpARMMOVBstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(x) - v.AddArg(mem) + v.AddArg3(ptr, x, mem) return true } // match: (MOVBstore [off] {sym} ptr (MOVHreg x) mem) @@ -5798,9 +5621,7 @@ func rewriteValueARM_OpARMMOVBstore(v *Value) bool { v.reset(OpARMMOVBstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(x) - v.AddArg(mem) + v.AddArg3(ptr, x, mem) return true } // match: (MOVBstore [off] {sym} ptr (MOVHUreg x) mem) @@ -5817,9 +5638,7 @@ func rewriteValueARM_OpARMMOVBstore(v *Value) bool { v.reset(OpARMMOVBstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(x) - v.AddArg(mem) + v.AddArg3(ptr, x, mem) return true } // match: (MOVBstore [0] {sym} (ADD ptr idx) val mem) @@ -5841,10 +5660,7 @@ func rewriteValueARM_OpARMMOVBstore(v *Value) bool { break } v.reset(OpARMMOVBstoreidx) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) + v.AddArg4(ptr, idx, val, mem) return true } return false @@ -5866,9 +5682,7 @@ func rewriteValueARM_OpARMMOVBstoreidx(v *Value) bool { mem := v_3 v.reset(OpARMMOVBstore) v.AuxInt = c - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } // match: (MOVBstoreidx (MOVWconst [c]) ptr val mem) @@ -5883,9 +5697,7 @@ func rewriteValueARM_OpARMMOVBstoreidx(v *Value) bool { mem := v_3 v.reset(OpARMMOVBstore) v.AuxInt = c - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } return false @@ -5907,8 +5719,7 @@ func rewriteValueARM_OpARMMOVDload(v *Value) bool { v.reset(OpARMMOVDload) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVDload [off1] {sym} (SUBconst [off2] ptr) mem) @@ -5925,8 +5736,7 @@ func rewriteValueARM_OpARMMOVDload(v *Value) bool { v.reset(OpARMMOVDload) v.AuxInt = off1 - off2 v.Aux = sym - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVDload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem) @@ -5948,8 +5758,7 @@ func rewriteValueARM_OpARMMOVDload(v *Value) bool { v.reset(OpARMMOVDload) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVDload [off] {sym} ptr (MOVDstore [off2] {sym2} ptr2 x _)) @@ -5996,9 +5805,7 @@ func rewriteValueARM_OpARMMOVDstore(v *Value) bool { v.reset(OpARMMOVDstore) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } // match: (MOVDstore [off1] {sym} (SUBconst [off2] ptr) val mem) @@ -6016,9 +5823,7 @@ func rewriteValueARM_OpARMMOVDstore(v *Value) bool { v.reset(OpARMMOVDstore) v.AuxInt = off1 - off2 v.Aux = sym - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } // match: (MOVDstore [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) val mem) @@ -6041,9 +5846,7 @@ func rewriteValueARM_OpARMMOVDstore(v *Value) bool { v.reset(OpARMMOVDstore) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } return false @@ -6065,8 +5868,7 @@ func rewriteValueARM_OpARMMOVFload(v *Value) bool { v.reset(OpARMMOVFload) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVFload [off1] {sym} (SUBconst [off2] ptr) mem) @@ -6083,8 +5885,7 @@ func rewriteValueARM_OpARMMOVFload(v *Value) bool { v.reset(OpARMMOVFload) v.AuxInt = off1 - off2 v.Aux = sym - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVFload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem) @@ -6106,8 +5907,7 @@ func rewriteValueARM_OpARMMOVFload(v *Value) bool { v.reset(OpARMMOVFload) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVFload [off] {sym} ptr (MOVFstore [off2] {sym2} ptr2 x _)) @@ -6154,9 +5954,7 @@ func rewriteValueARM_OpARMMOVFstore(v *Value) bool { v.reset(OpARMMOVFstore) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } // match: (MOVFstore [off1] {sym} (SUBconst [off2] ptr) val mem) @@ -6174,9 +5972,7 @@ func rewriteValueARM_OpARMMOVFstore(v *Value) bool { v.reset(OpARMMOVFstore) v.AuxInt = off1 - off2 v.Aux = sym - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } // match: (MOVFstore [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) val mem) @@ -6199,9 +5995,7 @@ func rewriteValueARM_OpARMMOVFstore(v *Value) bool { v.reset(OpARMMOVFstore) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } return false @@ -6225,8 +6019,7 @@ func rewriteValueARM_OpARMMOVHUload(v *Value) bool { v.reset(OpARMMOVHUload) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVHUload [off1] {sym} (SUBconst [off2] ptr) mem) @@ -6243,8 +6036,7 @@ func rewriteValueARM_OpARMMOVHUload(v *Value) bool { v.reset(OpARMMOVHUload) v.AuxInt = off1 - off2 v.Aux = sym - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVHUload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem) @@ -6266,8 +6058,7 @@ func rewriteValueARM_OpARMMOVHUload(v *Value) bool { v.reset(OpARMMOVHUload) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVHUload [off] {sym} ptr (MOVHstore [off2] {sym2} ptr2 x _)) @@ -6310,9 +6101,7 @@ func rewriteValueARM_OpARMMOVHUload(v *Value) bool { break } v.reset(OpARMMOVHUloadidx) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } // match: (MOVHUload [off] {sym} (SB) _) @@ -6367,8 +6156,7 @@ func rewriteValueARM_OpARMMOVHUloadidx(v *Value) bool { mem := v_2 v.reset(OpARMMOVHUload) v.AuxInt = c - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVHUloadidx (MOVWconst [c]) ptr mem) @@ -6382,8 +6170,7 @@ func rewriteValueARM_OpARMMOVHUloadidx(v *Value) bool { mem := v_2 v.reset(OpARMMOVHUload) v.AuxInt = c - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } return false @@ -6479,8 +6266,7 @@ func rewriteValueARM_OpARMMOVHload(v *Value) bool { v.reset(OpARMMOVHload) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVHload [off1] {sym} (SUBconst [off2] ptr) mem) @@ -6497,8 +6283,7 @@ func rewriteValueARM_OpARMMOVHload(v *Value) bool { v.reset(OpARMMOVHload) v.AuxInt = off1 - off2 v.Aux = sym - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVHload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem) @@ -6520,8 +6305,7 @@ func rewriteValueARM_OpARMMOVHload(v *Value) bool { v.reset(OpARMMOVHload) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVHload [off] {sym} ptr (MOVHstore [off2] {sym2} ptr2 x _)) @@ -6564,9 +6348,7 @@ func rewriteValueARM_OpARMMOVHload(v *Value) bool { break } v.reset(OpARMMOVHloadidx) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } return false @@ -6608,8 +6390,7 @@ func rewriteValueARM_OpARMMOVHloadidx(v *Value) bool { mem := v_2 v.reset(OpARMMOVHload) v.AuxInt = c - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVHloadidx (MOVWconst [c]) ptr mem) @@ -6623,8 +6404,7 @@ func rewriteValueARM_OpARMMOVHloadidx(v *Value) bool { mem := v_2 v.reset(OpARMMOVHload) v.AuxInt = c - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } return false @@ -6749,9 +6529,7 @@ func rewriteValueARM_OpARMMOVHstore(v *Value) bool { v.reset(OpARMMOVHstore) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } // match: (MOVHstore [off1] {sym} (SUBconst [off2] ptr) val mem) @@ -6769,9 +6547,7 @@ func rewriteValueARM_OpARMMOVHstore(v *Value) bool { v.reset(OpARMMOVHstore) v.AuxInt = off1 - off2 v.Aux = sym - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } // match: (MOVHstore [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) val mem) @@ -6794,9 +6570,7 @@ func rewriteValueARM_OpARMMOVHstore(v *Value) bool { v.reset(OpARMMOVHstore) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } // match: (MOVHstore [off] {sym} ptr (MOVHreg x) mem) @@ -6813,9 +6587,7 @@ func rewriteValueARM_OpARMMOVHstore(v *Value) bool { v.reset(OpARMMOVHstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(x) - v.AddArg(mem) + v.AddArg3(ptr, x, mem) return true } // match: (MOVHstore [off] {sym} ptr (MOVHUreg x) mem) @@ -6832,9 +6604,7 @@ func rewriteValueARM_OpARMMOVHstore(v *Value) bool { v.reset(OpARMMOVHstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(x) - v.AddArg(mem) + v.AddArg3(ptr, x, mem) return true } // match: (MOVHstore [0] {sym} (ADD ptr idx) val mem) @@ -6856,10 +6626,7 @@ func rewriteValueARM_OpARMMOVHstore(v *Value) bool { break } v.reset(OpARMMOVHstoreidx) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) + v.AddArg4(ptr, idx, val, mem) return true } return false @@ -6881,9 +6648,7 @@ func rewriteValueARM_OpARMMOVHstoreidx(v *Value) bool { mem := v_3 v.reset(OpARMMOVHstore) v.AuxInt = c - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } // match: (MOVHstoreidx (MOVWconst [c]) ptr val mem) @@ -6898,9 +6663,7 @@ func rewriteValueARM_OpARMMOVHstoreidx(v *Value) bool { mem := v_3 v.reset(OpARMMOVHstore) v.AuxInt = c - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } return false @@ -6924,8 +6687,7 @@ func rewriteValueARM_OpARMMOVWload(v *Value) bool { v.reset(OpARMMOVWload) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVWload [off1] {sym} (SUBconst [off2] ptr) mem) @@ -6942,8 +6704,7 @@ func rewriteValueARM_OpARMMOVWload(v *Value) bool { v.reset(OpARMMOVWload) v.AuxInt = off1 - off2 v.Aux = sym - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVWload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem) @@ -6965,8 +6726,7 @@ func rewriteValueARM_OpARMMOVWload(v *Value) bool { v.reset(OpARMMOVWload) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVWload [off] {sym} ptr (MOVWstore [off2] {sym2} ptr2 x _)) @@ -7010,9 +6770,7 @@ func rewriteValueARM_OpARMMOVWload(v *Value) bool { break } v.reset(OpARMMOVWloadidx) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } // match: (MOVWload [0] {sym} (ADDshiftLL ptr idx [c]) mem) @@ -7035,9 +6793,7 @@ func rewriteValueARM_OpARMMOVWload(v *Value) bool { } v.reset(OpARMMOVWloadshiftLL) v.AuxInt = c - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } // match: (MOVWload [0] {sym} (ADDshiftRL ptr idx [c]) mem) @@ -7060,9 +6816,7 @@ func rewriteValueARM_OpARMMOVWload(v *Value) bool { } v.reset(OpARMMOVWloadshiftRL) v.AuxInt = c - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } // match: (MOVWload [0] {sym} (ADDshiftRA ptr idx [c]) mem) @@ -7085,9 +6839,7 @@ func rewriteValueARM_OpARMMOVWload(v *Value) bool { } v.reset(OpARMMOVWloadshiftRA) v.AuxInt = c - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } // match: (MOVWload [off] {sym} (SB) _) @@ -7143,8 +6895,7 @@ func rewriteValueARM_OpARMMOVWloadidx(v *Value) bool { mem := v_2 v.reset(OpARMMOVWload) v.AuxInt = c - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVWloadidx (MOVWconst [c]) ptr mem) @@ -7158,8 +6909,7 @@ func rewriteValueARM_OpARMMOVWloadidx(v *Value) bool { mem := v_2 v.reset(OpARMMOVWload) v.AuxInt = c - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVWloadidx ptr (SLLconst idx [c]) mem) @@ -7174,9 +6924,7 @@ func rewriteValueARM_OpARMMOVWloadidx(v *Value) bool { mem := v_2 v.reset(OpARMMOVWloadshiftLL) v.AuxInt = c - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } // match: (MOVWloadidx (SLLconst idx [c]) ptr mem) @@ -7191,9 +6939,7 @@ func rewriteValueARM_OpARMMOVWloadidx(v *Value) bool { mem := v_2 v.reset(OpARMMOVWloadshiftLL) v.AuxInt = c - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } // match: (MOVWloadidx ptr (SRLconst idx [c]) mem) @@ -7208,9 +6954,7 @@ func rewriteValueARM_OpARMMOVWloadidx(v *Value) bool { mem := v_2 v.reset(OpARMMOVWloadshiftRL) v.AuxInt = c - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } // match: (MOVWloadidx (SRLconst idx [c]) ptr mem) @@ -7225,9 +6969,7 @@ func rewriteValueARM_OpARMMOVWloadidx(v *Value) bool { mem := v_2 v.reset(OpARMMOVWloadshiftRL) v.AuxInt = c - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } // match: (MOVWloadidx ptr (SRAconst idx [c]) mem) @@ -7242,9 +6984,7 @@ func rewriteValueARM_OpARMMOVWloadidx(v *Value) bool { mem := v_2 v.reset(OpARMMOVWloadshiftRA) v.AuxInt = c - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } // match: (MOVWloadidx (SRAconst idx [c]) ptr mem) @@ -7259,9 +6999,7 @@ func rewriteValueARM_OpARMMOVWloadidx(v *Value) bool { mem := v_2 v.reset(OpARMMOVWloadshiftRA) v.AuxInt = c - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } return false @@ -7307,8 +7045,7 @@ func rewriteValueARM_OpARMMOVWloadshiftLL(v *Value) bool { mem := v_2 v.reset(OpARMMOVWload) v.AuxInt = int64(uint32(c) << uint64(d)) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } return false @@ -7354,8 +7091,7 @@ func rewriteValueARM_OpARMMOVWloadshiftRA(v *Value) bool { mem := v_2 v.reset(OpARMMOVWload) v.AuxInt = int64(int32(c) >> uint64(d)) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } return false @@ -7401,8 +7137,7 @@ func rewriteValueARM_OpARMMOVWloadshiftRL(v *Value) bool { mem := v_2 v.reset(OpARMMOVWload) v.AuxInt = int64(uint32(c) >> uint64(d)) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } return false @@ -7453,9 +7188,7 @@ func rewriteValueARM_OpARMMOVWstore(v *Value) bool { v.reset(OpARMMOVWstore) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } // match: (MOVWstore [off1] {sym} (SUBconst [off2] ptr) val mem) @@ -7473,9 +7206,7 @@ func rewriteValueARM_OpARMMOVWstore(v *Value) bool { v.reset(OpARMMOVWstore) v.AuxInt = off1 - off2 v.Aux = sym - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } // match: (MOVWstore [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) val mem) @@ -7498,9 +7229,7 @@ func rewriteValueARM_OpARMMOVWstore(v *Value) bool { v.reset(OpARMMOVWstore) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } // match: (MOVWstore [0] {sym} (ADD ptr idx) val mem) @@ -7522,10 +7251,7 @@ func rewriteValueARM_OpARMMOVWstore(v *Value) bool { break } v.reset(OpARMMOVWstoreidx) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) + v.AddArg4(ptr, idx, val, mem) return true } // match: (MOVWstore [0] {sym} (ADDshiftLL ptr idx [c]) val mem) @@ -7549,10 +7275,7 @@ func rewriteValueARM_OpARMMOVWstore(v *Value) bool { } v.reset(OpARMMOVWstoreshiftLL) v.AuxInt = c - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) + v.AddArg4(ptr, idx, val, mem) return true } // match: (MOVWstore [0] {sym} (ADDshiftRL ptr idx [c]) val mem) @@ -7576,10 +7299,7 @@ func rewriteValueARM_OpARMMOVWstore(v *Value) bool { } v.reset(OpARMMOVWstoreshiftRL) v.AuxInt = c - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) + v.AddArg4(ptr, idx, val, mem) return true } // match: (MOVWstore [0] {sym} (ADDshiftRA ptr idx [c]) val mem) @@ -7603,10 +7323,7 @@ func rewriteValueARM_OpARMMOVWstore(v *Value) bool { } v.reset(OpARMMOVWstoreshiftRA) v.AuxInt = c - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) + v.AddArg4(ptr, idx, val, mem) return true } return false @@ -7628,9 +7345,7 @@ func rewriteValueARM_OpARMMOVWstoreidx(v *Value) bool { mem := v_3 v.reset(OpARMMOVWstore) v.AuxInt = c - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } // match: (MOVWstoreidx (MOVWconst [c]) ptr val mem) @@ -7645,9 +7360,7 @@ func rewriteValueARM_OpARMMOVWstoreidx(v *Value) bool { mem := v_3 v.reset(OpARMMOVWstore) v.AuxInt = c - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } // match: (MOVWstoreidx ptr (SLLconst idx [c]) val mem) @@ -7663,10 +7376,7 @@ func rewriteValueARM_OpARMMOVWstoreidx(v *Value) bool { mem := v_3 v.reset(OpARMMOVWstoreshiftLL) v.AuxInt = c - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) + v.AddArg4(ptr, idx, val, mem) return true } // match: (MOVWstoreidx (SLLconst idx [c]) ptr val mem) @@ -7682,10 +7392,7 @@ func rewriteValueARM_OpARMMOVWstoreidx(v *Value) bool { mem := v_3 v.reset(OpARMMOVWstoreshiftLL) v.AuxInt = c - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) + v.AddArg4(ptr, idx, val, mem) return true } // match: (MOVWstoreidx ptr (SRLconst idx [c]) val mem) @@ -7701,10 +7408,7 @@ func rewriteValueARM_OpARMMOVWstoreidx(v *Value) bool { mem := v_3 v.reset(OpARMMOVWstoreshiftRL) v.AuxInt = c - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) + v.AddArg4(ptr, idx, val, mem) return true } // match: (MOVWstoreidx (SRLconst idx [c]) ptr val mem) @@ -7720,10 +7424,7 @@ func rewriteValueARM_OpARMMOVWstoreidx(v *Value) bool { mem := v_3 v.reset(OpARMMOVWstoreshiftRL) v.AuxInt = c - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) + v.AddArg4(ptr, idx, val, mem) return true } // match: (MOVWstoreidx ptr (SRAconst idx [c]) val mem) @@ -7739,10 +7440,7 @@ func rewriteValueARM_OpARMMOVWstoreidx(v *Value) bool { mem := v_3 v.reset(OpARMMOVWstoreshiftRA) v.AuxInt = c - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) + v.AddArg4(ptr, idx, val, mem) return true } // match: (MOVWstoreidx (SRAconst idx [c]) ptr val mem) @@ -7758,10 +7456,7 @@ func rewriteValueARM_OpARMMOVWstoreidx(v *Value) bool { mem := v_3 v.reset(OpARMMOVWstoreshiftRA) v.AuxInt = c - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) + v.AddArg4(ptr, idx, val, mem) return true } return false @@ -7784,9 +7479,7 @@ func rewriteValueARM_OpARMMOVWstoreshiftLL(v *Value) bool { mem := v_3 v.reset(OpARMMOVWstore) v.AuxInt = int64(uint32(c) << uint64(d)) - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } return false @@ -7809,9 +7502,7 @@ func rewriteValueARM_OpARMMOVWstoreshiftRA(v *Value) bool { mem := v_3 v.reset(OpARMMOVWstore) v.AuxInt = int64(int32(c) >> uint64(d)) - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } return false @@ -7834,9 +7525,7 @@ func rewriteValueARM_OpARMMOVWstoreshiftRL(v *Value) bool { mem := v_3 v.reset(OpARMMOVWstore) v.AuxInt = int64(uint32(c) >> uint64(d)) - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } return false @@ -7928,8 +7617,7 @@ func rewriteValueARM_OpARMMUL(v *Value) bool { } v.reset(OpARMADDshiftLL) v.AuxInt = log2(c - 1) - v.AddArg(x) - v.AddArg(x) + v.AddArg2(x, x) return true } break @@ -7949,8 +7637,7 @@ func rewriteValueARM_OpARMMUL(v *Value) bool { } v.reset(OpARMRSBshiftLL) v.AuxInt = log2(c + 1) - v.AddArg(x) - v.AddArg(x) + v.AddArg2(x, x) return true } break @@ -7972,8 +7659,7 @@ func rewriteValueARM_OpARMMUL(v *Value) bool { v.AuxInt = log2(c / 3) v0 := b.NewValue0(v.Pos, OpARMADDshiftLL, x.Type) v0.AuxInt = 1 - v0.AddArg(x) - v0.AddArg(x) + v0.AddArg2(x, x) v.AddArg(v0) return true } @@ -7996,8 +7682,7 @@ func rewriteValueARM_OpARMMUL(v *Value) bool { v.AuxInt = log2(c / 5) v0 := b.NewValue0(v.Pos, OpARMADDshiftLL, x.Type) v0.AuxInt = 2 - v0.AddArg(x) - v0.AddArg(x) + v0.AddArg2(x, x) v.AddArg(v0) return true } @@ -8020,8 +7705,7 @@ func rewriteValueARM_OpARMMUL(v *Value) bool { v.AuxInt = log2(c / 7) v0 := b.NewValue0(v.Pos, OpARMRSBshiftLL, x.Type) v0.AuxInt = 3 - v0.AddArg(x) - v0.AddArg(x) + v0.AddArg2(x, x) v.AddArg(v0) return true } @@ -8044,8 +7728,7 @@ func rewriteValueARM_OpARMMUL(v *Value) bool { v.AuxInt = log2(c / 9) v0 := b.NewValue0(v.Pos, OpARMADDshiftLL, x.Type) v0.AuxInt = 3 - v0.AddArg(x) - v0.AddArg(x) + v0.AddArg2(x, x) v.AddArg(v0) return true } @@ -8090,8 +7773,7 @@ func rewriteValueARM_OpARMMULA(v *Value) bool { break } v.reset(OpARMSUB) - v.AddArg(a) - v.AddArg(x) + v.AddArg2(a, x) return true } // match: (MULA _ (MOVWconst [0]) a) @@ -8115,8 +7797,7 @@ func rewriteValueARM_OpARMMULA(v *Value) bool { } a := v_2 v.reset(OpARMADD) - v.AddArg(x) - v.AddArg(a) + v.AddArg2(x, a) return true } // match: (MULA x (MOVWconst [c]) a) @@ -8136,8 +7817,7 @@ func rewriteValueARM_OpARMMULA(v *Value) bool { v0 := b.NewValue0(v.Pos, OpARMSLLconst, x.Type) v0.AuxInt = log2(c) v0.AddArg(x) - v.AddArg(v0) - v.AddArg(a) + v.AddArg2(v0, a) return true } // match: (MULA x (MOVWconst [c]) a) @@ -8156,10 +7836,8 @@ func rewriteValueARM_OpARMMULA(v *Value) bool { v.reset(OpARMADD) v0 := b.NewValue0(v.Pos, OpARMADDshiftLL, x.Type) v0.AuxInt = log2(c - 1) - v0.AddArg(x) - v0.AddArg(x) - v.AddArg(v0) - v.AddArg(a) + v0.AddArg2(x, x) + v.AddArg2(v0, a) return true } // match: (MULA x (MOVWconst [c]) a) @@ -8178,10 +7856,8 @@ func rewriteValueARM_OpARMMULA(v *Value) bool { v.reset(OpARMADD) v0 := b.NewValue0(v.Pos, OpARMRSBshiftLL, x.Type) v0.AuxInt = log2(c + 1) - v0.AddArg(x) - v0.AddArg(x) - v.AddArg(v0) - v.AddArg(a) + v0.AddArg2(x, x) + v.AddArg2(v0, a) return true } // match: (MULA x (MOVWconst [c]) a) @@ -8202,11 +7878,9 @@ func rewriteValueARM_OpARMMULA(v *Value) bool { v0.AuxInt = log2(c / 3) v1 := b.NewValue0(v.Pos, OpARMADDshiftLL, x.Type) v1.AuxInt = 1 - v1.AddArg(x) - v1.AddArg(x) + v1.AddArg2(x, x) v0.AddArg(v1) - v.AddArg(v0) - v.AddArg(a) + v.AddArg2(v0, a) return true } // match: (MULA x (MOVWconst [c]) a) @@ -8227,11 +7901,9 @@ func rewriteValueARM_OpARMMULA(v *Value) bool { v0.AuxInt = log2(c / 5) v1 := b.NewValue0(v.Pos, OpARMADDshiftLL, x.Type) v1.AuxInt = 2 - v1.AddArg(x) - v1.AddArg(x) + v1.AddArg2(x, x) v0.AddArg(v1) - v.AddArg(v0) - v.AddArg(a) + v.AddArg2(v0, a) return true } // match: (MULA x (MOVWconst [c]) a) @@ -8252,11 +7924,9 @@ func rewriteValueARM_OpARMMULA(v *Value) bool { v0.AuxInt = log2(c / 7) v1 := b.NewValue0(v.Pos, OpARMRSBshiftLL, x.Type) v1.AuxInt = 3 - v1.AddArg(x) - v1.AddArg(x) + v1.AddArg2(x, x) v0.AddArg(v1) - v.AddArg(v0) - v.AddArg(a) + v.AddArg2(v0, a) return true } // match: (MULA x (MOVWconst [c]) a) @@ -8277,11 +7947,9 @@ func rewriteValueARM_OpARMMULA(v *Value) bool { v0.AuxInt = log2(c / 9) v1 := b.NewValue0(v.Pos, OpARMADDshiftLL, x.Type) v1.AuxInt = 3 - v1.AddArg(x) - v1.AddArg(x) + v1.AddArg2(x, x) v0.AddArg(v1) - v.AddArg(v0) - v.AddArg(a) + v.AddArg2(v0, a) return true } // match: (MULA (MOVWconst [c]) x a) @@ -8298,8 +7966,7 @@ func rewriteValueARM_OpARMMULA(v *Value) bool { break } v.reset(OpARMSUB) - v.AddArg(a) - v.AddArg(x) + v.AddArg2(a, x) return true } // match: (MULA (MOVWconst [0]) _ a) @@ -8323,8 +7990,7 @@ func rewriteValueARM_OpARMMULA(v *Value) bool { x := v_1 a := v_2 v.reset(OpARMADD) - v.AddArg(x) - v.AddArg(a) + v.AddArg2(x, a) return true } // match: (MULA (MOVWconst [c]) x a) @@ -8344,8 +8010,7 @@ func rewriteValueARM_OpARMMULA(v *Value) bool { v0 := b.NewValue0(v.Pos, OpARMSLLconst, x.Type) v0.AuxInt = log2(c) v0.AddArg(x) - v.AddArg(v0) - v.AddArg(a) + v.AddArg2(v0, a) return true } // match: (MULA (MOVWconst [c]) x a) @@ -8364,10 +8029,8 @@ func rewriteValueARM_OpARMMULA(v *Value) bool { v.reset(OpARMADD) v0 := b.NewValue0(v.Pos, OpARMADDshiftLL, x.Type) v0.AuxInt = log2(c - 1) - v0.AddArg(x) - v0.AddArg(x) - v.AddArg(v0) - v.AddArg(a) + v0.AddArg2(x, x) + v.AddArg2(v0, a) return true } // match: (MULA (MOVWconst [c]) x a) @@ -8386,10 +8049,8 @@ func rewriteValueARM_OpARMMULA(v *Value) bool { v.reset(OpARMADD) v0 := b.NewValue0(v.Pos, OpARMRSBshiftLL, x.Type) v0.AuxInt = log2(c + 1) - v0.AddArg(x) - v0.AddArg(x) - v.AddArg(v0) - v.AddArg(a) + v0.AddArg2(x, x) + v.AddArg2(v0, a) return true } // match: (MULA (MOVWconst [c]) x a) @@ -8410,11 +8071,9 @@ func rewriteValueARM_OpARMMULA(v *Value) bool { v0.AuxInt = log2(c / 3) v1 := b.NewValue0(v.Pos, OpARMADDshiftLL, x.Type) v1.AuxInt = 1 - v1.AddArg(x) - v1.AddArg(x) + v1.AddArg2(x, x) v0.AddArg(v1) - v.AddArg(v0) - v.AddArg(a) + v.AddArg2(v0, a) return true } // match: (MULA (MOVWconst [c]) x a) @@ -8435,11 +8094,9 @@ func rewriteValueARM_OpARMMULA(v *Value) bool { v0.AuxInt = log2(c / 5) v1 := b.NewValue0(v.Pos, OpARMADDshiftLL, x.Type) v1.AuxInt = 2 - v1.AddArg(x) - v1.AddArg(x) + v1.AddArg2(x, x) v0.AddArg(v1) - v.AddArg(v0) - v.AddArg(a) + v.AddArg2(v0, a) return true } // match: (MULA (MOVWconst [c]) x a) @@ -8460,11 +8117,9 @@ func rewriteValueARM_OpARMMULA(v *Value) bool { v0.AuxInt = log2(c / 7) v1 := b.NewValue0(v.Pos, OpARMRSBshiftLL, x.Type) v1.AuxInt = 3 - v1.AddArg(x) - v1.AddArg(x) + v1.AddArg2(x, x) v0.AddArg(v1) - v.AddArg(v0) - v.AddArg(a) + v.AddArg2(v0, a) return true } // match: (MULA (MOVWconst [c]) x a) @@ -8485,11 +8140,9 @@ func rewriteValueARM_OpARMMULA(v *Value) bool { v0.AuxInt = log2(c / 9) v1 := b.NewValue0(v.Pos, OpARMADDshiftLL, x.Type) v1.AuxInt = 3 - v1.AddArg(x) - v1.AddArg(x) + v1.AddArg2(x, x) v0.AddArg(v1) - v.AddArg(v0) - v.AddArg(a) + v.AddArg2(v0, a) return true } // match: (MULA (MOVWconst [c]) (MOVWconst [d]) a) @@ -8528,8 +8181,7 @@ func rewriteValueARM_OpARMMULD(v *Value) bool { continue } v.reset(OpARMNMULD) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } break @@ -8553,8 +8205,7 @@ func rewriteValueARM_OpARMMULF(v *Value) bool { continue } v.reset(OpARMNMULF) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } break @@ -8580,8 +8231,7 @@ func rewriteValueARM_OpARMMULS(v *Value) bool { break } v.reset(OpARMADD) - v.AddArg(a) - v.AddArg(x) + v.AddArg2(a, x) return true } // match: (MULS _ (MOVWconst [0]) a) @@ -8605,8 +8255,7 @@ func rewriteValueARM_OpARMMULS(v *Value) bool { } a := v_2 v.reset(OpARMRSB) - v.AddArg(x) - v.AddArg(a) + v.AddArg2(x, a) return true } // match: (MULS x (MOVWconst [c]) a) @@ -8626,8 +8275,7 @@ func rewriteValueARM_OpARMMULS(v *Value) bool { v0 := b.NewValue0(v.Pos, OpARMSLLconst, x.Type) v0.AuxInt = log2(c) v0.AddArg(x) - v.AddArg(v0) - v.AddArg(a) + v.AddArg2(v0, a) return true } // match: (MULS x (MOVWconst [c]) a) @@ -8646,10 +8294,8 @@ func rewriteValueARM_OpARMMULS(v *Value) bool { v.reset(OpARMRSB) v0 := b.NewValue0(v.Pos, OpARMADDshiftLL, x.Type) v0.AuxInt = log2(c - 1) - v0.AddArg(x) - v0.AddArg(x) - v.AddArg(v0) - v.AddArg(a) + v0.AddArg2(x, x) + v.AddArg2(v0, a) return true } // match: (MULS x (MOVWconst [c]) a) @@ -8668,10 +8314,8 @@ func rewriteValueARM_OpARMMULS(v *Value) bool { v.reset(OpARMRSB) v0 := b.NewValue0(v.Pos, OpARMRSBshiftLL, x.Type) v0.AuxInt = log2(c + 1) - v0.AddArg(x) - v0.AddArg(x) - v.AddArg(v0) - v.AddArg(a) + v0.AddArg2(x, x) + v.AddArg2(v0, a) return true } // match: (MULS x (MOVWconst [c]) a) @@ -8692,11 +8336,9 @@ func rewriteValueARM_OpARMMULS(v *Value) bool { v0.AuxInt = log2(c / 3) v1 := b.NewValue0(v.Pos, OpARMADDshiftLL, x.Type) v1.AuxInt = 1 - v1.AddArg(x) - v1.AddArg(x) + v1.AddArg2(x, x) v0.AddArg(v1) - v.AddArg(v0) - v.AddArg(a) + v.AddArg2(v0, a) return true } // match: (MULS x (MOVWconst [c]) a) @@ -8717,11 +8359,9 @@ func rewriteValueARM_OpARMMULS(v *Value) bool { v0.AuxInt = log2(c / 5) v1 := b.NewValue0(v.Pos, OpARMADDshiftLL, x.Type) v1.AuxInt = 2 - v1.AddArg(x) - v1.AddArg(x) + v1.AddArg2(x, x) v0.AddArg(v1) - v.AddArg(v0) - v.AddArg(a) + v.AddArg2(v0, a) return true } // match: (MULS x (MOVWconst [c]) a) @@ -8742,11 +8382,9 @@ func rewriteValueARM_OpARMMULS(v *Value) bool { v0.AuxInt = log2(c / 7) v1 := b.NewValue0(v.Pos, OpARMRSBshiftLL, x.Type) v1.AuxInt = 3 - v1.AddArg(x) - v1.AddArg(x) + v1.AddArg2(x, x) v0.AddArg(v1) - v.AddArg(v0) - v.AddArg(a) + v.AddArg2(v0, a) return true } // match: (MULS x (MOVWconst [c]) a) @@ -8767,11 +8405,9 @@ func rewriteValueARM_OpARMMULS(v *Value) bool { v0.AuxInt = log2(c / 9) v1 := b.NewValue0(v.Pos, OpARMADDshiftLL, x.Type) v1.AuxInt = 3 - v1.AddArg(x) - v1.AddArg(x) + v1.AddArg2(x, x) v0.AddArg(v1) - v.AddArg(v0) - v.AddArg(a) + v.AddArg2(v0, a) return true } // match: (MULS (MOVWconst [c]) x a) @@ -8788,8 +8424,7 @@ func rewriteValueARM_OpARMMULS(v *Value) bool { break } v.reset(OpARMADD) - v.AddArg(a) - v.AddArg(x) + v.AddArg2(a, x) return true } // match: (MULS (MOVWconst [0]) _ a) @@ -8813,8 +8448,7 @@ func rewriteValueARM_OpARMMULS(v *Value) bool { x := v_1 a := v_2 v.reset(OpARMRSB) - v.AddArg(x) - v.AddArg(a) + v.AddArg2(x, a) return true } // match: (MULS (MOVWconst [c]) x a) @@ -8834,8 +8468,7 @@ func rewriteValueARM_OpARMMULS(v *Value) bool { v0 := b.NewValue0(v.Pos, OpARMSLLconst, x.Type) v0.AuxInt = log2(c) v0.AddArg(x) - v.AddArg(v0) - v.AddArg(a) + v.AddArg2(v0, a) return true } // match: (MULS (MOVWconst [c]) x a) @@ -8854,10 +8487,8 @@ func rewriteValueARM_OpARMMULS(v *Value) bool { v.reset(OpARMRSB) v0 := b.NewValue0(v.Pos, OpARMADDshiftLL, x.Type) v0.AuxInt = log2(c - 1) - v0.AddArg(x) - v0.AddArg(x) - v.AddArg(v0) - v.AddArg(a) + v0.AddArg2(x, x) + v.AddArg2(v0, a) return true } // match: (MULS (MOVWconst [c]) x a) @@ -8876,10 +8507,8 @@ func rewriteValueARM_OpARMMULS(v *Value) bool { v.reset(OpARMRSB) v0 := b.NewValue0(v.Pos, OpARMRSBshiftLL, x.Type) v0.AuxInt = log2(c + 1) - v0.AddArg(x) - v0.AddArg(x) - v.AddArg(v0) - v.AddArg(a) + v0.AddArg2(x, x) + v.AddArg2(v0, a) return true } // match: (MULS (MOVWconst [c]) x a) @@ -8900,11 +8529,9 @@ func rewriteValueARM_OpARMMULS(v *Value) bool { v0.AuxInt = log2(c / 3) v1 := b.NewValue0(v.Pos, OpARMADDshiftLL, x.Type) v1.AuxInt = 1 - v1.AddArg(x) - v1.AddArg(x) + v1.AddArg2(x, x) v0.AddArg(v1) - v.AddArg(v0) - v.AddArg(a) + v.AddArg2(v0, a) return true } // match: (MULS (MOVWconst [c]) x a) @@ -8925,11 +8552,9 @@ func rewriteValueARM_OpARMMULS(v *Value) bool { v0.AuxInt = log2(c / 5) v1 := b.NewValue0(v.Pos, OpARMADDshiftLL, x.Type) v1.AuxInt = 2 - v1.AddArg(x) - v1.AddArg(x) + v1.AddArg2(x, x) v0.AddArg(v1) - v.AddArg(v0) - v.AddArg(a) + v.AddArg2(v0, a) return true } // match: (MULS (MOVWconst [c]) x a) @@ -8950,11 +8575,9 @@ func rewriteValueARM_OpARMMULS(v *Value) bool { v0.AuxInt = log2(c / 7) v1 := b.NewValue0(v.Pos, OpARMRSBshiftLL, x.Type) v1.AuxInt = 3 - v1.AddArg(x) - v1.AddArg(x) + v1.AddArg2(x, x) v0.AddArg(v1) - v.AddArg(v0) - v.AddArg(a) + v.AddArg2(v0, a) return true } // match: (MULS (MOVWconst [c]) x a) @@ -8975,11 +8598,9 @@ func rewriteValueARM_OpARMMULS(v *Value) bool { v0.AuxInt = log2(c / 9) v1 := b.NewValue0(v.Pos, OpARMADDshiftLL, x.Type) v1.AuxInt = 3 - v1.AddArg(x) - v1.AddArg(x) + v1.AddArg2(x, x) v0.AddArg(v1) - v.AddArg(v0) - v.AddArg(a) + v.AddArg2(v0, a) return true } // match: (MULS (MOVWconst [c]) (MOVWconst [d]) a) @@ -9062,8 +8683,7 @@ func rewriteValueARM_OpARMMVN(v *Value) bool { y := v_0.Args[1] x := v_0.Args[0] v.reset(OpARMMVNshiftLLreg) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (MVN (SRL x y)) @@ -9075,8 +8695,7 @@ func rewriteValueARM_OpARMMVN(v *Value) bool { y := v_0.Args[1] x := v_0.Args[0] v.reset(OpARMMVNshiftRLreg) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (MVN (SRA x y)) @@ -9088,8 +8707,7 @@ func rewriteValueARM_OpARMMVN(v *Value) bool { y := v_0.Args[1] x := v_0.Args[0] v.reset(OpARMMVNshiftRAreg) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } return false @@ -9211,8 +8829,7 @@ func rewriteValueARM_OpARMNEGD(v *Value) bool { break } v.reset(OpARMNMULD) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } return false @@ -9232,8 +8849,7 @@ func rewriteValueARM_OpARMNEGF(v *Value) bool { break } v.reset(OpARMNMULF) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } return false @@ -9251,8 +8867,7 @@ func rewriteValueARM_OpARMNMULD(v *Value) bool { x := v_0.Args[0] y := v_1 v.reset(OpARMMULD) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } break @@ -9272,8 +8887,7 @@ func rewriteValueARM_OpARMNMULF(v *Value) bool { x := v_0.Args[0] y := v_1 v.reset(OpARMMULF) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } break @@ -9376,8 +8990,7 @@ func rewriteValueARM_OpARMOR(v *Value) bool { y := v_1.Args[0] v.reset(OpARMORshiftLL) v.AuxInt = c - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } break @@ -9394,8 +9007,7 @@ func rewriteValueARM_OpARMOR(v *Value) bool { y := v_1.Args[0] v.reset(OpARMORshiftRL) v.AuxInt = c - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } break @@ -9412,8 +9024,7 @@ func rewriteValueARM_OpARMOR(v *Value) bool { y := v_1.Args[0] v.reset(OpARMORshiftRA) v.AuxInt = c - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } break @@ -9429,9 +9040,7 @@ func rewriteValueARM_OpARMOR(v *Value) bool { z := v_1.Args[1] y := v_1.Args[0] v.reset(OpARMORshiftLLreg) - v.AddArg(x) - v.AddArg(y) - v.AddArg(z) + v.AddArg3(x, y, z) return true } break @@ -9447,9 +9056,7 @@ func rewriteValueARM_OpARMOR(v *Value) bool { z := v_1.Args[1] y := v_1.Args[0] v.reset(OpARMORshiftRLreg) - v.AddArg(x) - v.AddArg(y) - v.AddArg(z) + v.AddArg3(x, y, z) return true } break @@ -9465,9 +9072,7 @@ func rewriteValueARM_OpARMOR(v *Value) bool { z := v_1.Args[1] y := v_1.Args[0] v.reset(OpARMORshiftRAreg) - v.AddArg(x) - v.AddArg(y) - v.AddArg(z) + v.AddArg3(x, y, z) return true } break @@ -9663,8 +9268,7 @@ func rewriteValueARM_OpARMORshiftLLreg(v *Value) bool { v.reset(OpARMORconst) v.AuxInt = c v0 := b.NewValue0(v.Pos, OpARMSLL, x.Type) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -9679,8 +9283,7 @@ func rewriteValueARM_OpARMORshiftLLreg(v *Value) bool { c := v_2.AuxInt v.reset(OpARMORshiftLL) v.AuxInt = c - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } return false @@ -9758,8 +9361,7 @@ func rewriteValueARM_OpARMORshiftRAreg(v *Value) bool { v.reset(OpARMORconst) v.AuxInt = c v0 := b.NewValue0(v.Pos, OpARMSRA, x.Type) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -9774,8 +9376,7 @@ func rewriteValueARM_OpARMORshiftRAreg(v *Value) bool { c := v_2.AuxInt v.reset(OpARMORshiftRA) v.AuxInt = c - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } return false @@ -9869,8 +9470,7 @@ func rewriteValueARM_OpARMORshiftRLreg(v *Value) bool { v.reset(OpARMORconst) v.AuxInt = c v0 := b.NewValue0(v.Pos, OpARMSRL, x.Type) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -9885,8 +9485,7 @@ func rewriteValueARM_OpARMORshiftRLreg(v *Value) bool { c := v_2.AuxInt v.reset(OpARMORshiftRL) v.AuxInt = c - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } return false @@ -9931,8 +9530,7 @@ func rewriteValueARM_OpARMRSB(v *Value) bool { y := v_1.Args[0] v.reset(OpARMRSBshiftLL) v.AuxInt = c - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (RSB (SLLconst [c] y) x) @@ -9946,8 +9544,7 @@ func rewriteValueARM_OpARMRSB(v *Value) bool { x := v_1 v.reset(OpARMSUBshiftLL) v.AuxInt = c - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (RSB x (SRLconst [c] y)) @@ -9961,8 +9558,7 @@ func rewriteValueARM_OpARMRSB(v *Value) bool { y := v_1.Args[0] v.reset(OpARMRSBshiftRL) v.AuxInt = c - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (RSB (SRLconst [c] y) x) @@ -9976,8 +9572,7 @@ func rewriteValueARM_OpARMRSB(v *Value) bool { x := v_1 v.reset(OpARMSUBshiftRL) v.AuxInt = c - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (RSB x (SRAconst [c] y)) @@ -9991,8 +9586,7 @@ func rewriteValueARM_OpARMRSB(v *Value) bool { y := v_1.Args[0] v.reset(OpARMRSBshiftRA) v.AuxInt = c - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (RSB (SRAconst [c] y) x) @@ -10006,8 +9600,7 @@ func rewriteValueARM_OpARMRSB(v *Value) bool { x := v_1 v.reset(OpARMSUBshiftRA) v.AuxInt = c - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (RSB x (SLL y z)) @@ -10020,9 +9613,7 @@ func rewriteValueARM_OpARMRSB(v *Value) bool { z := v_1.Args[1] y := v_1.Args[0] v.reset(OpARMRSBshiftLLreg) - v.AddArg(x) - v.AddArg(y) - v.AddArg(z) + v.AddArg3(x, y, z) return true } // match: (RSB (SLL y z) x) @@ -10035,9 +9626,7 @@ func rewriteValueARM_OpARMRSB(v *Value) bool { y := v_0.Args[0] x := v_1 v.reset(OpARMSUBshiftLLreg) - v.AddArg(x) - v.AddArg(y) - v.AddArg(z) + v.AddArg3(x, y, z) return true } // match: (RSB x (SRL y z)) @@ -10050,9 +9639,7 @@ func rewriteValueARM_OpARMRSB(v *Value) bool { z := v_1.Args[1] y := v_1.Args[0] v.reset(OpARMRSBshiftRLreg) - v.AddArg(x) - v.AddArg(y) - v.AddArg(z) + v.AddArg3(x, y, z) return true } // match: (RSB (SRL y z) x) @@ -10065,9 +9652,7 @@ func rewriteValueARM_OpARMRSB(v *Value) bool { y := v_0.Args[0] x := v_1 v.reset(OpARMSUBshiftRLreg) - v.AddArg(x) - v.AddArg(y) - v.AddArg(z) + v.AddArg3(x, y, z) return true } // match: (RSB x (SRA y z)) @@ -10080,9 +9665,7 @@ func rewriteValueARM_OpARMRSB(v *Value) bool { z := v_1.Args[1] y := v_1.Args[0] v.reset(OpARMRSBshiftRAreg) - v.AddArg(x) - v.AddArg(y) - v.AddArg(z) + v.AddArg3(x, y, z) return true } // match: (RSB (SRA y z) x) @@ -10095,9 +9678,7 @@ func rewriteValueARM_OpARMRSB(v *Value) bool { y := v_0.Args[0] x := v_1 v.reset(OpARMSUBshiftRAreg) - v.AddArg(x) - v.AddArg(y) - v.AddArg(z) + v.AddArg3(x, y, z) return true } // match: (RSB x x) @@ -10125,9 +9706,7 @@ func rewriteValueARM_OpARMRSB(v *Value) bool { break } v.reset(OpARMMULS) - v.AddArg(x) - v.AddArg(y) - v.AddArg(a) + v.AddArg3(x, y, a) return true } return false @@ -10186,8 +9765,7 @@ func rewriteValueARM_OpARMRSBSshiftLLreg(v *Value) bool { v.reset(OpARMSUBSconst) v.AuxInt = c v0 := b.NewValue0(v.Pos, OpARMSLL, x.Type) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -10202,8 +9780,7 @@ func rewriteValueARM_OpARMRSBSshiftLLreg(v *Value) bool { c := v_2.AuxInt v.reset(OpARMRSBSshiftLL) v.AuxInt = c - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } return false @@ -10262,8 +9839,7 @@ func rewriteValueARM_OpARMRSBSshiftRAreg(v *Value) bool { v.reset(OpARMSUBSconst) v.AuxInt = c v0 := b.NewValue0(v.Pos, OpARMSRA, x.Type) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -10278,8 +9854,7 @@ func rewriteValueARM_OpARMRSBSshiftRAreg(v *Value) bool { c := v_2.AuxInt v.reset(OpARMRSBSshiftRA) v.AuxInt = c - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } return false @@ -10338,8 +9913,7 @@ func rewriteValueARM_OpARMRSBSshiftRLreg(v *Value) bool { v.reset(OpARMSUBSconst) v.AuxInt = c v0 := b.NewValue0(v.Pos, OpARMSRL, x.Type) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -10354,8 +9928,7 @@ func rewriteValueARM_OpARMRSBSshiftRLreg(v *Value) bool { c := v_2.AuxInt v.reset(OpARMRSBSshiftRL) v.AuxInt = c - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } return false @@ -10489,8 +10062,7 @@ func rewriteValueARM_OpARMRSBshiftLLreg(v *Value) bool { v.reset(OpARMSUBconst) v.AuxInt = c v0 := b.NewValue0(v.Pos, OpARMSLL, x.Type) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -10505,8 +10077,7 @@ func rewriteValueARM_OpARMRSBshiftLLreg(v *Value) bool { c := v_2.AuxInt v.reset(OpARMRSBshiftLL) v.AuxInt = c - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } return false @@ -10582,8 +10153,7 @@ func rewriteValueARM_OpARMRSBshiftRAreg(v *Value) bool { v.reset(OpARMSUBconst) v.AuxInt = c v0 := b.NewValue0(v.Pos, OpARMSRA, x.Type) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -10598,8 +10168,7 @@ func rewriteValueARM_OpARMRSBshiftRAreg(v *Value) bool { c := v_2.AuxInt v.reset(OpARMRSBshiftRA) v.AuxInt = c - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } return false @@ -10675,8 +10244,7 @@ func rewriteValueARM_OpARMRSBshiftRLreg(v *Value) bool { v.reset(OpARMSUBconst) v.AuxInt = c v0 := b.NewValue0(v.Pos, OpARMSRL, x.Type) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -10691,8 +10259,7 @@ func rewriteValueARM_OpARMRSBshiftRLreg(v *Value) bool { c := v_2.AuxInt v.reset(OpARMRSBshiftRL) v.AuxInt = c - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } return false @@ -10712,8 +10279,7 @@ func rewriteValueARM_OpARMRSCconst(v *Value) bool { flags := v_1 v.reset(OpARMRSCconst) v.AuxInt = int64(int32(c - d)) - v.AddArg(x) - v.AddArg(flags) + v.AddArg2(x, flags) return true } // match: (RSCconst [c] (SUBconst [d] x) flags) @@ -10728,8 +10294,7 @@ func rewriteValueARM_OpARMRSCconst(v *Value) bool { flags := v_1 v.reset(OpARMRSCconst) v.AuxInt = int64(int32(c + d)) - v.AddArg(x) - v.AddArg(flags) + v.AddArg2(x, flags) return true } return false @@ -10754,8 +10319,7 @@ func rewriteValueARM_OpARMRSCshiftLL(v *Value) bool { v0 := b.NewValue0(v.Pos, OpARMSLLconst, x.Type) v0.AuxInt = d v0.AddArg(x) - v.AddArg(v0) - v.AddArg(flags) + v.AddArg2(v0, flags) return true } // match: (RSCshiftLL x (MOVWconst [c]) [d] flags) @@ -10770,8 +10334,7 @@ func rewriteValueARM_OpARMRSCshiftLL(v *Value) bool { flags := v_2 v.reset(OpARMRSCconst) v.AuxInt = int64(int32(uint32(c) << uint64(d))) - v.AddArg(x) - v.AddArg(flags) + v.AddArg2(x, flags) return true } return false @@ -10795,10 +10358,8 @@ func rewriteValueARM_OpARMRSCshiftLLreg(v *Value) bool { v.reset(OpARMSBCconst) v.AuxInt = c v0 := b.NewValue0(v.Pos, OpARMSLL, x.Type) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) - v.AddArg(flags) + v0.AddArg2(x, y) + v.AddArg2(v0, flags) return true } // match: (RSCshiftLLreg x y (MOVWconst [c]) flags) @@ -10813,9 +10374,7 @@ func rewriteValueARM_OpARMRSCshiftLLreg(v *Value) bool { flags := v_3 v.reset(OpARMRSCshiftLL) v.AuxInt = c - v.AddArg(x) - v.AddArg(y) - v.AddArg(flags) + v.AddArg3(x, y, flags) return true } return false @@ -10840,8 +10399,7 @@ func rewriteValueARM_OpARMRSCshiftRA(v *Value) bool { v0 := b.NewValue0(v.Pos, OpARMSRAconst, x.Type) v0.AuxInt = d v0.AddArg(x) - v.AddArg(v0) - v.AddArg(flags) + v.AddArg2(v0, flags) return true } // match: (RSCshiftRA x (MOVWconst [c]) [d] flags) @@ -10856,8 +10414,7 @@ func rewriteValueARM_OpARMRSCshiftRA(v *Value) bool { flags := v_2 v.reset(OpARMRSCconst) v.AuxInt = int64(int32(c) >> uint64(d)) - v.AddArg(x) - v.AddArg(flags) + v.AddArg2(x, flags) return true } return false @@ -10881,10 +10438,8 @@ func rewriteValueARM_OpARMRSCshiftRAreg(v *Value) bool { v.reset(OpARMSBCconst) v.AuxInt = c v0 := b.NewValue0(v.Pos, OpARMSRA, x.Type) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) - v.AddArg(flags) + v0.AddArg2(x, y) + v.AddArg2(v0, flags) return true } // match: (RSCshiftRAreg x y (MOVWconst [c]) flags) @@ -10899,9 +10454,7 @@ func rewriteValueARM_OpARMRSCshiftRAreg(v *Value) bool { flags := v_3 v.reset(OpARMRSCshiftRA) v.AuxInt = c - v.AddArg(x) - v.AddArg(y) - v.AddArg(flags) + v.AddArg3(x, y, flags) return true } return false @@ -10926,8 +10479,7 @@ func rewriteValueARM_OpARMRSCshiftRL(v *Value) bool { v0 := b.NewValue0(v.Pos, OpARMSRLconst, x.Type) v0.AuxInt = d v0.AddArg(x) - v.AddArg(v0) - v.AddArg(flags) + v.AddArg2(v0, flags) return true } // match: (RSCshiftRL x (MOVWconst [c]) [d] flags) @@ -10942,8 +10494,7 @@ func rewriteValueARM_OpARMRSCshiftRL(v *Value) bool { flags := v_2 v.reset(OpARMRSCconst) v.AuxInt = int64(int32(uint32(c) >> uint64(d))) - v.AddArg(x) - v.AddArg(flags) + v.AddArg2(x, flags) return true } return false @@ -10967,10 +10518,8 @@ func rewriteValueARM_OpARMRSCshiftRLreg(v *Value) bool { v.reset(OpARMSBCconst) v.AuxInt = c v0 := b.NewValue0(v.Pos, OpARMSRL, x.Type) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) - v.AddArg(flags) + v0.AddArg2(x, y) + v.AddArg2(v0, flags) return true } // match: (RSCshiftRLreg x y (MOVWconst [c]) flags) @@ -10985,9 +10534,7 @@ func rewriteValueARM_OpARMRSCshiftRLreg(v *Value) bool { flags := v_3 v.reset(OpARMRSCshiftRL) v.AuxInt = c - v.AddArg(x) - v.AddArg(y) - v.AddArg(flags) + v.AddArg3(x, y, flags) return true } return false @@ -11007,8 +10554,7 @@ func rewriteValueARM_OpARMSBC(v *Value) bool { flags := v_2 v.reset(OpARMRSCconst) v.AuxInt = c - v.AddArg(x) - v.AddArg(flags) + v.AddArg2(x, flags) return true } // match: (SBC x (MOVWconst [c]) flags) @@ -11022,8 +10568,7 @@ func rewriteValueARM_OpARMSBC(v *Value) bool { flags := v_2 v.reset(OpARMSBCconst) v.AuxInt = c - v.AddArg(x) - v.AddArg(flags) + v.AddArg2(x, flags) return true } // match: (SBC x (SLLconst [c] y) flags) @@ -11038,9 +10583,7 @@ func rewriteValueARM_OpARMSBC(v *Value) bool { flags := v_2 v.reset(OpARMSBCshiftLL) v.AuxInt = c - v.AddArg(x) - v.AddArg(y) - v.AddArg(flags) + v.AddArg3(x, y, flags) return true } // match: (SBC (SLLconst [c] y) x flags) @@ -11055,9 +10598,7 @@ func rewriteValueARM_OpARMSBC(v *Value) bool { flags := v_2 v.reset(OpARMRSCshiftLL) v.AuxInt = c - v.AddArg(x) - v.AddArg(y) - v.AddArg(flags) + v.AddArg3(x, y, flags) return true } // match: (SBC x (SRLconst [c] y) flags) @@ -11072,9 +10613,7 @@ func rewriteValueARM_OpARMSBC(v *Value) bool { flags := v_2 v.reset(OpARMSBCshiftRL) v.AuxInt = c - v.AddArg(x) - v.AddArg(y) - v.AddArg(flags) + v.AddArg3(x, y, flags) return true } // match: (SBC (SRLconst [c] y) x flags) @@ -11089,9 +10628,7 @@ func rewriteValueARM_OpARMSBC(v *Value) bool { flags := v_2 v.reset(OpARMRSCshiftRL) v.AuxInt = c - v.AddArg(x) - v.AddArg(y) - v.AddArg(flags) + v.AddArg3(x, y, flags) return true } // match: (SBC x (SRAconst [c] y) flags) @@ -11106,9 +10643,7 @@ func rewriteValueARM_OpARMSBC(v *Value) bool { flags := v_2 v.reset(OpARMSBCshiftRA) v.AuxInt = c - v.AddArg(x) - v.AddArg(y) - v.AddArg(flags) + v.AddArg3(x, y, flags) return true } // match: (SBC (SRAconst [c] y) x flags) @@ -11123,9 +10658,7 @@ func rewriteValueARM_OpARMSBC(v *Value) bool { flags := v_2 v.reset(OpARMRSCshiftRA) v.AuxInt = c - v.AddArg(x) - v.AddArg(y) - v.AddArg(flags) + v.AddArg3(x, y, flags) return true } // match: (SBC x (SLL y z) flags) @@ -11139,10 +10672,7 @@ func rewriteValueARM_OpARMSBC(v *Value) bool { y := v_1.Args[0] flags := v_2 v.reset(OpARMSBCshiftLLreg) - v.AddArg(x) - v.AddArg(y) - v.AddArg(z) - v.AddArg(flags) + v.AddArg4(x, y, z, flags) return true } // match: (SBC (SLL y z) x flags) @@ -11156,10 +10686,7 @@ func rewriteValueARM_OpARMSBC(v *Value) bool { x := v_1 flags := v_2 v.reset(OpARMRSCshiftLLreg) - v.AddArg(x) - v.AddArg(y) - v.AddArg(z) - v.AddArg(flags) + v.AddArg4(x, y, z, flags) return true } // match: (SBC x (SRL y z) flags) @@ -11173,10 +10700,7 @@ func rewriteValueARM_OpARMSBC(v *Value) bool { y := v_1.Args[0] flags := v_2 v.reset(OpARMSBCshiftRLreg) - v.AddArg(x) - v.AddArg(y) - v.AddArg(z) - v.AddArg(flags) + v.AddArg4(x, y, z, flags) return true } // match: (SBC (SRL y z) x flags) @@ -11190,10 +10714,7 @@ func rewriteValueARM_OpARMSBC(v *Value) bool { x := v_1 flags := v_2 v.reset(OpARMRSCshiftRLreg) - v.AddArg(x) - v.AddArg(y) - v.AddArg(z) - v.AddArg(flags) + v.AddArg4(x, y, z, flags) return true } // match: (SBC x (SRA y z) flags) @@ -11207,10 +10728,7 @@ func rewriteValueARM_OpARMSBC(v *Value) bool { y := v_1.Args[0] flags := v_2 v.reset(OpARMSBCshiftRAreg) - v.AddArg(x) - v.AddArg(y) - v.AddArg(z) - v.AddArg(flags) + v.AddArg4(x, y, z, flags) return true } // match: (SBC (SRA y z) x flags) @@ -11224,10 +10742,7 @@ func rewriteValueARM_OpARMSBC(v *Value) bool { x := v_1 flags := v_2 v.reset(OpARMRSCshiftRAreg) - v.AddArg(x) - v.AddArg(y) - v.AddArg(z) - v.AddArg(flags) + v.AddArg4(x, y, z, flags) return true } return false @@ -11247,8 +10762,7 @@ func rewriteValueARM_OpARMSBCconst(v *Value) bool { flags := v_1 v.reset(OpARMSBCconst) v.AuxInt = int64(int32(c - d)) - v.AddArg(x) - v.AddArg(flags) + v.AddArg2(x, flags) return true } // match: (SBCconst [c] (SUBconst [d] x) flags) @@ -11263,8 +10777,7 @@ func rewriteValueARM_OpARMSBCconst(v *Value) bool { flags := v_1 v.reset(OpARMSBCconst) v.AuxInt = int64(int32(c + d)) - v.AddArg(x) - v.AddArg(flags) + v.AddArg2(x, flags) return true } return false @@ -11289,8 +10802,7 @@ func rewriteValueARM_OpARMSBCshiftLL(v *Value) bool { v0 := b.NewValue0(v.Pos, OpARMSLLconst, x.Type) v0.AuxInt = d v0.AddArg(x) - v.AddArg(v0) - v.AddArg(flags) + v.AddArg2(v0, flags) return true } // match: (SBCshiftLL x (MOVWconst [c]) [d] flags) @@ -11305,8 +10817,7 @@ func rewriteValueARM_OpARMSBCshiftLL(v *Value) bool { flags := v_2 v.reset(OpARMSBCconst) v.AuxInt = int64(int32(uint32(c) << uint64(d))) - v.AddArg(x) - v.AddArg(flags) + v.AddArg2(x, flags) return true } return false @@ -11330,10 +10841,8 @@ func rewriteValueARM_OpARMSBCshiftLLreg(v *Value) bool { v.reset(OpARMRSCconst) v.AuxInt = c v0 := b.NewValue0(v.Pos, OpARMSLL, x.Type) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) - v.AddArg(flags) + v0.AddArg2(x, y) + v.AddArg2(v0, flags) return true } // match: (SBCshiftLLreg x y (MOVWconst [c]) flags) @@ -11348,9 +10857,7 @@ func rewriteValueARM_OpARMSBCshiftLLreg(v *Value) bool { flags := v_3 v.reset(OpARMSBCshiftLL) v.AuxInt = c - v.AddArg(x) - v.AddArg(y) - v.AddArg(flags) + v.AddArg3(x, y, flags) return true } return false @@ -11375,8 +10882,7 @@ func rewriteValueARM_OpARMSBCshiftRA(v *Value) bool { v0 := b.NewValue0(v.Pos, OpARMSRAconst, x.Type) v0.AuxInt = d v0.AddArg(x) - v.AddArg(v0) - v.AddArg(flags) + v.AddArg2(v0, flags) return true } // match: (SBCshiftRA x (MOVWconst [c]) [d] flags) @@ -11391,8 +10897,7 @@ func rewriteValueARM_OpARMSBCshiftRA(v *Value) bool { flags := v_2 v.reset(OpARMSBCconst) v.AuxInt = int64(int32(c) >> uint64(d)) - v.AddArg(x) - v.AddArg(flags) + v.AddArg2(x, flags) return true } return false @@ -11416,10 +10921,8 @@ func rewriteValueARM_OpARMSBCshiftRAreg(v *Value) bool { v.reset(OpARMRSCconst) v.AuxInt = c v0 := b.NewValue0(v.Pos, OpARMSRA, x.Type) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) - v.AddArg(flags) + v0.AddArg2(x, y) + v.AddArg2(v0, flags) return true } // match: (SBCshiftRAreg x y (MOVWconst [c]) flags) @@ -11434,9 +10937,7 @@ func rewriteValueARM_OpARMSBCshiftRAreg(v *Value) bool { flags := v_3 v.reset(OpARMSBCshiftRA) v.AuxInt = c - v.AddArg(x) - v.AddArg(y) - v.AddArg(flags) + v.AddArg3(x, y, flags) return true } return false @@ -11461,8 +10962,7 @@ func rewriteValueARM_OpARMSBCshiftRL(v *Value) bool { v0 := b.NewValue0(v.Pos, OpARMSRLconst, x.Type) v0.AuxInt = d v0.AddArg(x) - v.AddArg(v0) - v.AddArg(flags) + v.AddArg2(v0, flags) return true } // match: (SBCshiftRL x (MOVWconst [c]) [d] flags) @@ -11477,8 +10977,7 @@ func rewriteValueARM_OpARMSBCshiftRL(v *Value) bool { flags := v_2 v.reset(OpARMSBCconst) v.AuxInt = int64(int32(uint32(c) >> uint64(d))) - v.AddArg(x) - v.AddArg(flags) + v.AddArg2(x, flags) return true } return false @@ -11502,10 +11001,8 @@ func rewriteValueARM_OpARMSBCshiftRLreg(v *Value) bool { v.reset(OpARMRSCconst) v.AuxInt = c v0 := b.NewValue0(v.Pos, OpARMSRL, x.Type) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) - v.AddArg(flags) + v0.AddArg2(x, y) + v.AddArg2(v0, flags) return true } // match: (SBCshiftRLreg x y (MOVWconst [c]) flags) @@ -11520,9 +11017,7 @@ func rewriteValueARM_OpARMSBCshiftRLreg(v *Value) bool { flags := v_3 v.reset(OpARMSBCshiftRL) v.AuxInt = c - v.AddArg(x) - v.AddArg(y) - v.AddArg(flags) + v.AddArg3(x, y, flags) return true } return false @@ -11604,8 +11099,7 @@ func rewriteValueARM_OpARMSRAcond(v *Value) bool { break } v.reset(OpARMSRA) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (SRAcond x _ (FlagLT_UGT)) @@ -11629,8 +11123,7 @@ func rewriteValueARM_OpARMSRAcond(v *Value) bool { break } v.reset(OpARMSRA) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (SRAcond x _ (FlagGT_UGT)) @@ -11773,8 +11266,7 @@ func rewriteValueARM_OpARMSUB(v *Value) bool { y := v_1.Args[0] v.reset(OpARMSUBshiftLL) v.AuxInt = c - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (SUB (SLLconst [c] y) x) @@ -11788,8 +11280,7 @@ func rewriteValueARM_OpARMSUB(v *Value) bool { x := v_1 v.reset(OpARMRSBshiftLL) v.AuxInt = c - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (SUB x (SRLconst [c] y)) @@ -11803,8 +11294,7 @@ func rewriteValueARM_OpARMSUB(v *Value) bool { y := v_1.Args[0] v.reset(OpARMSUBshiftRL) v.AuxInt = c - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (SUB (SRLconst [c] y) x) @@ -11818,8 +11308,7 @@ func rewriteValueARM_OpARMSUB(v *Value) bool { x := v_1 v.reset(OpARMRSBshiftRL) v.AuxInt = c - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (SUB x (SRAconst [c] y)) @@ -11833,8 +11322,7 @@ func rewriteValueARM_OpARMSUB(v *Value) bool { y := v_1.Args[0] v.reset(OpARMSUBshiftRA) v.AuxInt = c - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (SUB (SRAconst [c] y) x) @@ -11848,8 +11336,7 @@ func rewriteValueARM_OpARMSUB(v *Value) bool { x := v_1 v.reset(OpARMRSBshiftRA) v.AuxInt = c - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (SUB x (SLL y z)) @@ -11862,9 +11349,7 @@ func rewriteValueARM_OpARMSUB(v *Value) bool { z := v_1.Args[1] y := v_1.Args[0] v.reset(OpARMSUBshiftLLreg) - v.AddArg(x) - v.AddArg(y) - v.AddArg(z) + v.AddArg3(x, y, z) return true } // match: (SUB (SLL y z) x) @@ -11877,9 +11362,7 @@ func rewriteValueARM_OpARMSUB(v *Value) bool { y := v_0.Args[0] x := v_1 v.reset(OpARMRSBshiftLLreg) - v.AddArg(x) - v.AddArg(y) - v.AddArg(z) + v.AddArg3(x, y, z) return true } // match: (SUB x (SRL y z)) @@ -11892,9 +11375,7 @@ func rewriteValueARM_OpARMSUB(v *Value) bool { z := v_1.Args[1] y := v_1.Args[0] v.reset(OpARMSUBshiftRLreg) - v.AddArg(x) - v.AddArg(y) - v.AddArg(z) + v.AddArg3(x, y, z) return true } // match: (SUB (SRL y z) x) @@ -11907,9 +11388,7 @@ func rewriteValueARM_OpARMSUB(v *Value) bool { y := v_0.Args[0] x := v_1 v.reset(OpARMRSBshiftRLreg) - v.AddArg(x) - v.AddArg(y) - v.AddArg(z) + v.AddArg3(x, y, z) return true } // match: (SUB x (SRA y z)) @@ -11922,9 +11401,7 @@ func rewriteValueARM_OpARMSUB(v *Value) bool { z := v_1.Args[1] y := v_1.Args[0] v.reset(OpARMSUBshiftRAreg) - v.AddArg(x) - v.AddArg(y) - v.AddArg(z) + v.AddArg3(x, y, z) return true } // match: (SUB (SRA y z) x) @@ -11937,9 +11414,7 @@ func rewriteValueARM_OpARMSUB(v *Value) bool { y := v_0.Args[0] x := v_1 v.reset(OpARMRSBshiftRAreg) - v.AddArg(x) - v.AddArg(y) - v.AddArg(z) + v.AddArg3(x, y, z) return true } // match: (SUB x x) @@ -11967,9 +11442,7 @@ func rewriteValueARM_OpARMSUB(v *Value) bool { break } v.reset(OpARMMULS) - v.AddArg(x) - v.AddArg(y) - v.AddArg(a) + v.AddArg3(x, y, a) return true } return false @@ -11991,9 +11464,7 @@ func rewriteValueARM_OpARMSUBD(v *Value) bool { break } v.reset(OpARMMULSD) - v.AddArg(a) - v.AddArg(x) - v.AddArg(y) + v.AddArg3(a, x, y) return true } // match: (SUBD a (NMULD x y)) @@ -12010,9 +11481,7 @@ func rewriteValueARM_OpARMSUBD(v *Value) bool { break } v.reset(OpARMMULAD) - v.AddArg(a) - v.AddArg(x) - v.AddArg(y) + v.AddArg3(a, x, y) return true } return false @@ -12034,9 +11503,7 @@ func rewriteValueARM_OpARMSUBF(v *Value) bool { break } v.reset(OpARMMULSF) - v.AddArg(a) - v.AddArg(x) - v.AddArg(y) + v.AddArg3(a, x, y) return true } // match: (SUBF a (NMULF x y)) @@ -12053,9 +11520,7 @@ func rewriteValueARM_OpARMSUBF(v *Value) bool { break } v.reset(OpARMMULAF) - v.AddArg(a) - v.AddArg(x) - v.AddArg(y) + v.AddArg3(a, x, y) return true } return false @@ -12087,8 +11552,7 @@ func rewriteValueARM_OpARMSUBS(v *Value) bool { y := v_1.Args[0] v.reset(OpARMSUBSshiftLL) v.AuxInt = c - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (SUBS (SLLconst [c] y) x) @@ -12102,8 +11566,7 @@ func rewriteValueARM_OpARMSUBS(v *Value) bool { x := v_1 v.reset(OpARMRSBSshiftLL) v.AuxInt = c - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (SUBS x (SRLconst [c] y)) @@ -12117,8 +11580,7 @@ func rewriteValueARM_OpARMSUBS(v *Value) bool { y := v_1.Args[0] v.reset(OpARMSUBSshiftRL) v.AuxInt = c - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (SUBS (SRLconst [c] y) x) @@ -12132,8 +11594,7 @@ func rewriteValueARM_OpARMSUBS(v *Value) bool { x := v_1 v.reset(OpARMRSBSshiftRL) v.AuxInt = c - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (SUBS x (SRAconst [c] y)) @@ -12147,8 +11608,7 @@ func rewriteValueARM_OpARMSUBS(v *Value) bool { y := v_1.Args[0] v.reset(OpARMSUBSshiftRA) v.AuxInt = c - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (SUBS (SRAconst [c] y) x) @@ -12162,8 +11622,7 @@ func rewriteValueARM_OpARMSUBS(v *Value) bool { x := v_1 v.reset(OpARMRSBSshiftRA) v.AuxInt = c - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (SUBS x (SLL y z)) @@ -12176,9 +11635,7 @@ func rewriteValueARM_OpARMSUBS(v *Value) bool { z := v_1.Args[1] y := v_1.Args[0] v.reset(OpARMSUBSshiftLLreg) - v.AddArg(x) - v.AddArg(y) - v.AddArg(z) + v.AddArg3(x, y, z) return true } // match: (SUBS (SLL y z) x) @@ -12191,9 +11648,7 @@ func rewriteValueARM_OpARMSUBS(v *Value) bool { y := v_0.Args[0] x := v_1 v.reset(OpARMRSBSshiftLLreg) - v.AddArg(x) - v.AddArg(y) - v.AddArg(z) + v.AddArg3(x, y, z) return true } // match: (SUBS x (SRL y z)) @@ -12206,9 +11661,7 @@ func rewriteValueARM_OpARMSUBS(v *Value) bool { z := v_1.Args[1] y := v_1.Args[0] v.reset(OpARMSUBSshiftRLreg) - v.AddArg(x) - v.AddArg(y) - v.AddArg(z) + v.AddArg3(x, y, z) return true } // match: (SUBS (SRL y z) x) @@ -12221,9 +11674,7 @@ func rewriteValueARM_OpARMSUBS(v *Value) bool { y := v_0.Args[0] x := v_1 v.reset(OpARMRSBSshiftRLreg) - v.AddArg(x) - v.AddArg(y) - v.AddArg(z) + v.AddArg3(x, y, z) return true } // match: (SUBS x (SRA y z)) @@ -12236,9 +11687,7 @@ func rewriteValueARM_OpARMSUBS(v *Value) bool { z := v_1.Args[1] y := v_1.Args[0] v.reset(OpARMSUBSshiftRAreg) - v.AddArg(x) - v.AddArg(y) - v.AddArg(z) + v.AddArg3(x, y, z) return true } // match: (SUBS (SRA y z) x) @@ -12251,9 +11700,7 @@ func rewriteValueARM_OpARMSUBS(v *Value) bool { y := v_0.Args[0] x := v_1 v.reset(OpARMRSBSshiftRAreg) - v.AddArg(x) - v.AddArg(y) - v.AddArg(z) + v.AddArg3(x, y, z) return true } return false @@ -12312,8 +11759,7 @@ func rewriteValueARM_OpARMSUBSshiftLLreg(v *Value) bool { v.reset(OpARMRSBSconst) v.AuxInt = c v0 := b.NewValue0(v.Pos, OpARMSLL, x.Type) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -12328,8 +11774,7 @@ func rewriteValueARM_OpARMSUBSshiftLLreg(v *Value) bool { c := v_2.AuxInt v.reset(OpARMSUBSshiftLL) v.AuxInt = c - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } return false @@ -12388,8 +11833,7 @@ func rewriteValueARM_OpARMSUBSshiftRAreg(v *Value) bool { v.reset(OpARMRSBSconst) v.AuxInt = c v0 := b.NewValue0(v.Pos, OpARMSRA, x.Type) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -12404,8 +11848,7 @@ func rewriteValueARM_OpARMSUBSshiftRAreg(v *Value) bool { c := v_2.AuxInt v.reset(OpARMSUBSshiftRA) v.AuxInt = c - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } return false @@ -12464,8 +11907,7 @@ func rewriteValueARM_OpARMSUBSshiftRLreg(v *Value) bool { v.reset(OpARMRSBSconst) v.AuxInt = c v0 := b.NewValue0(v.Pos, OpARMSRL, x.Type) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -12480,8 +11922,7 @@ func rewriteValueARM_OpARMSUBSshiftRLreg(v *Value) bool { c := v_2.AuxInt v.reset(OpARMSUBSshiftRL) v.AuxInt = c - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } return false @@ -12671,8 +12112,7 @@ func rewriteValueARM_OpARMSUBshiftLLreg(v *Value) bool { v.reset(OpARMRSBconst) v.AuxInt = c v0 := b.NewValue0(v.Pos, OpARMSLL, x.Type) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -12687,8 +12127,7 @@ func rewriteValueARM_OpARMSUBshiftLLreg(v *Value) bool { c := v_2.AuxInt v.reset(OpARMSUBshiftLL) v.AuxInt = c - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } return false @@ -12764,8 +12203,7 @@ func rewriteValueARM_OpARMSUBshiftRAreg(v *Value) bool { v.reset(OpARMRSBconst) v.AuxInt = c v0 := b.NewValue0(v.Pos, OpARMSRA, x.Type) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -12780,8 +12218,7 @@ func rewriteValueARM_OpARMSUBshiftRAreg(v *Value) bool { c := v_2.AuxInt v.reset(OpARMSUBshiftRA) v.AuxInt = c - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } return false @@ -12857,8 +12294,7 @@ func rewriteValueARM_OpARMSUBshiftRLreg(v *Value) bool { v.reset(OpARMRSBconst) v.AuxInt = c v0 := b.NewValue0(v.Pos, OpARMSRL, x.Type) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -12873,8 +12309,7 @@ func rewriteValueARM_OpARMSUBshiftRLreg(v *Value) bool { c := v_2.AuxInt v.reset(OpARMSUBshiftRL) v.AuxInt = c - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } return false @@ -12910,8 +12345,7 @@ func rewriteValueARM_OpARMTEQ(v *Value) bool { y := v_1.Args[0] v.reset(OpARMTEQshiftLL) v.AuxInt = c - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } break @@ -12928,8 +12362,7 @@ func rewriteValueARM_OpARMTEQ(v *Value) bool { y := v_1.Args[0] v.reset(OpARMTEQshiftRL) v.AuxInt = c - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } break @@ -12946,8 +12379,7 @@ func rewriteValueARM_OpARMTEQ(v *Value) bool { y := v_1.Args[0] v.reset(OpARMTEQshiftRA) v.AuxInt = c - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } break @@ -12963,9 +12395,7 @@ func rewriteValueARM_OpARMTEQ(v *Value) bool { z := v_1.Args[1] y := v_1.Args[0] v.reset(OpARMTEQshiftLLreg) - v.AddArg(x) - v.AddArg(y) - v.AddArg(z) + v.AddArg3(x, y, z) return true } break @@ -12981,9 +12411,7 @@ func rewriteValueARM_OpARMTEQ(v *Value) bool { z := v_1.Args[1] y := v_1.Args[0] v.reset(OpARMTEQshiftRLreg) - v.AddArg(x) - v.AddArg(y) - v.AddArg(z) + v.AddArg3(x, y, z) return true } break @@ -12999,9 +12427,7 @@ func rewriteValueARM_OpARMTEQ(v *Value) bool { z := v_1.Args[1] y := v_1.Args[0] v.reset(OpARMTEQshiftRAreg) - v.AddArg(x) - v.AddArg(y) - v.AddArg(z) + v.AddArg3(x, y, z) return true } break @@ -13111,8 +12537,7 @@ func rewriteValueARM_OpARMTEQshiftLLreg(v *Value) bool { v.reset(OpARMTEQconst) v.AuxInt = c v0 := b.NewValue0(v.Pos, OpARMSLL, x.Type) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -13127,8 +12552,7 @@ func rewriteValueARM_OpARMTEQshiftLLreg(v *Value) bool { c := v_2.AuxInt v.reset(OpARMTEQshiftLL) v.AuxInt = c - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } return false @@ -13187,8 +12611,7 @@ func rewriteValueARM_OpARMTEQshiftRAreg(v *Value) bool { v.reset(OpARMTEQconst) v.AuxInt = c v0 := b.NewValue0(v.Pos, OpARMSRA, x.Type) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -13203,8 +12626,7 @@ func rewriteValueARM_OpARMTEQshiftRAreg(v *Value) bool { c := v_2.AuxInt v.reset(OpARMTEQshiftRA) v.AuxInt = c - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } return false @@ -13263,8 +12685,7 @@ func rewriteValueARM_OpARMTEQshiftRLreg(v *Value) bool { v.reset(OpARMTEQconst) v.AuxInt = c v0 := b.NewValue0(v.Pos, OpARMSRL, x.Type) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -13279,8 +12700,7 @@ func rewriteValueARM_OpARMTEQshiftRLreg(v *Value) bool { c := v_2.AuxInt v.reset(OpARMTEQshiftRL) v.AuxInt = c - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } return false @@ -13316,8 +12736,7 @@ func rewriteValueARM_OpARMTST(v *Value) bool { y := v_1.Args[0] v.reset(OpARMTSTshiftLL) v.AuxInt = c - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } break @@ -13334,8 +12753,7 @@ func rewriteValueARM_OpARMTST(v *Value) bool { y := v_1.Args[0] v.reset(OpARMTSTshiftRL) v.AuxInt = c - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } break @@ -13352,8 +12770,7 @@ func rewriteValueARM_OpARMTST(v *Value) bool { y := v_1.Args[0] v.reset(OpARMTSTshiftRA) v.AuxInt = c - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } break @@ -13369,9 +12786,7 @@ func rewriteValueARM_OpARMTST(v *Value) bool { z := v_1.Args[1] y := v_1.Args[0] v.reset(OpARMTSTshiftLLreg) - v.AddArg(x) - v.AddArg(y) - v.AddArg(z) + v.AddArg3(x, y, z) return true } break @@ -13387,9 +12802,7 @@ func rewriteValueARM_OpARMTST(v *Value) bool { z := v_1.Args[1] y := v_1.Args[0] v.reset(OpARMTSTshiftRLreg) - v.AddArg(x) - v.AddArg(y) - v.AddArg(z) + v.AddArg3(x, y, z) return true } break @@ -13405,9 +12818,7 @@ func rewriteValueARM_OpARMTST(v *Value) bool { z := v_1.Args[1] y := v_1.Args[0] v.reset(OpARMTSTshiftRAreg) - v.AddArg(x) - v.AddArg(y) - v.AddArg(z) + v.AddArg3(x, y, z) return true } break @@ -13517,8 +12928,7 @@ func rewriteValueARM_OpARMTSTshiftLLreg(v *Value) bool { v.reset(OpARMTSTconst) v.AuxInt = c v0 := b.NewValue0(v.Pos, OpARMSLL, x.Type) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -13533,8 +12943,7 @@ func rewriteValueARM_OpARMTSTshiftLLreg(v *Value) bool { c := v_2.AuxInt v.reset(OpARMTSTshiftLL) v.AuxInt = c - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } return false @@ -13593,8 +13002,7 @@ func rewriteValueARM_OpARMTSTshiftRAreg(v *Value) bool { v.reset(OpARMTSTconst) v.AuxInt = c v0 := b.NewValue0(v.Pos, OpARMSRA, x.Type) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -13609,8 +13017,7 @@ func rewriteValueARM_OpARMTSTshiftRAreg(v *Value) bool { c := v_2.AuxInt v.reset(OpARMTSTshiftRA) v.AuxInt = c - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } return false @@ -13669,8 +13076,7 @@ func rewriteValueARM_OpARMTSTshiftRLreg(v *Value) bool { v.reset(OpARMTSTconst) v.AuxInt = c v0 := b.NewValue0(v.Pos, OpARMSRL, x.Type) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -13685,8 +13091,7 @@ func rewriteValueARM_OpARMTSTshiftRLreg(v *Value) bool { c := v_2.AuxInt v.reset(OpARMTSTshiftRL) v.AuxInt = c - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } return false @@ -13722,8 +13127,7 @@ func rewriteValueARM_OpARMXOR(v *Value) bool { y := v_1.Args[0] v.reset(OpARMXORshiftLL) v.AuxInt = c - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } break @@ -13740,8 +13144,7 @@ func rewriteValueARM_OpARMXOR(v *Value) bool { y := v_1.Args[0] v.reset(OpARMXORshiftRL) v.AuxInt = c - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } break @@ -13758,8 +13161,7 @@ func rewriteValueARM_OpARMXOR(v *Value) bool { y := v_1.Args[0] v.reset(OpARMXORshiftRA) v.AuxInt = c - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } break @@ -13776,8 +13178,7 @@ func rewriteValueARM_OpARMXOR(v *Value) bool { y := v_1.Args[0] v.reset(OpARMXORshiftRR) v.AuxInt = c - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } break @@ -13793,9 +13194,7 @@ func rewriteValueARM_OpARMXOR(v *Value) bool { z := v_1.Args[1] y := v_1.Args[0] v.reset(OpARMXORshiftLLreg) - v.AddArg(x) - v.AddArg(y) - v.AddArg(z) + v.AddArg3(x, y, z) return true } break @@ -13811,9 +13210,7 @@ func rewriteValueARM_OpARMXOR(v *Value) bool { z := v_1.Args[1] y := v_1.Args[0] v.reset(OpARMXORshiftRLreg) - v.AddArg(x) - v.AddArg(y) - v.AddArg(z) + v.AddArg3(x, y, z) return true } break @@ -13829,9 +13226,7 @@ func rewriteValueARM_OpARMXOR(v *Value) bool { z := v_1.Args[1] y := v_1.Args[0] v.reset(OpARMXORshiftRAreg) - v.AddArg(x) - v.AddArg(y) - v.AddArg(z) + v.AddArg3(x, y, z) return true } break @@ -14012,8 +13407,7 @@ func rewriteValueARM_OpARMXORshiftLLreg(v *Value) bool { v.reset(OpARMXORconst) v.AuxInt = c v0 := b.NewValue0(v.Pos, OpARMSLL, x.Type) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -14028,8 +13422,7 @@ func rewriteValueARM_OpARMXORshiftLLreg(v *Value) bool { c := v_2.AuxInt v.reset(OpARMXORshiftLL) v.AuxInt = c - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } return false @@ -14105,8 +13498,7 @@ func rewriteValueARM_OpARMXORshiftRAreg(v *Value) bool { v.reset(OpARMXORconst) v.AuxInt = c v0 := b.NewValue0(v.Pos, OpARMSRA, x.Type) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -14121,8 +13513,7 @@ func rewriteValueARM_OpARMXORshiftRAreg(v *Value) bool { c := v_2.AuxInt v.reset(OpARMXORshiftRA) v.AuxInt = c - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } return false @@ -14214,8 +13605,7 @@ func rewriteValueARM_OpARMXORshiftRLreg(v *Value) bool { v.reset(OpARMXORconst) v.AuxInt = c v0 := b.NewValue0(v.Pos, OpARMSRL, x.Type) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -14230,8 +13620,7 @@ func rewriteValueARM_OpARMXORshiftRLreg(v *Value) bool { c := v_2.AuxInt v.reset(OpARMXORshiftRL) v.AuxInt = c - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } return false @@ -14287,11 +13676,9 @@ func rewriteValueARM_OpAvg32u(v *Value) bool { v0 := b.NewValue0(v.Pos, OpARMSRLconst, t) v0.AuxInt = 1 v1 := b.NewValue0(v.Pos, OpARMSUB, t) - v1.AddArg(x) - v1.AddArg(y) + v1.AddArg2(x, y) v0.AddArg(v1) - v.AddArg(v0) - v.AddArg(y) + v.AddArg2(v0, y) return true } } @@ -14330,18 +13717,16 @@ func rewriteValueARM_OpBswap32(v *Value) bool { v1 := b.NewValue0(v.Pos, OpARMBICconst, t) v1.AuxInt = 0xff0000 v2 := b.NewValue0(v.Pos, OpARMXOR, t) - v2.AddArg(x) v3 := b.NewValue0(v.Pos, OpARMSRRconst, t) v3.AuxInt = 16 v3.AddArg(x) - v2.AddArg(v3) + v2.AddArg2(x, v3) v1.AddArg(v2) v0.AddArg(v1) - v.AddArg(v0) v4 := b.NewValue0(v.Pos, OpARMSRRconst, t) v4.AuxInt = 8 v4.AddArg(x) - v.AddArg(v4) + v.AddArg2(v0, v4) return true } // match: (Bswap32 x) @@ -14389,14 +13774,13 @@ func rewriteValueARM_OpCtz16(v *Value) bool { v3 := b.NewValue0(v.Pos, OpARMORconst, typ.UInt32) v3.AuxInt = 0x10000 v3.AddArg(x) - v2.AddArg(v3) v4 := b.NewValue0(v.Pos, OpARMRSBconst, typ.UInt32) v4.AuxInt = 0 v5 := b.NewValue0(v.Pos, OpARMORconst, typ.UInt32) v5.AuxInt = 0x10000 v5.AddArg(x) v4.AddArg(v5) - v2.AddArg(v4) + v2.AddArg2(v3, v4) v1.AddArg(v2) v0.AddArg(v1) v.AddArg(v0) @@ -14441,11 +13825,10 @@ func rewriteValueARM_OpCtz32(v *Value) bool { v1 := b.NewValue0(v.Pos, OpARMSUBconst, t) v1.AuxInt = 1 v2 := b.NewValue0(v.Pos, OpARMAND, t) - v2.AddArg(x) v3 := b.NewValue0(v.Pos, OpARMRSBconst, t) v3.AuxInt = 0 v3.AddArg(x) - v2.AddArg(v3) + v2.AddArg2(x, v3) v1.AddArg(v2) v0.AddArg(v1) v.AddArg(v0) @@ -14491,14 +13874,13 @@ func rewriteValueARM_OpCtz8(v *Value) bool { v3 := b.NewValue0(v.Pos, OpARMORconst, typ.UInt32) v3.AuxInt = 0x100 v3.AddArg(x) - v2.AddArg(v3) v4 := b.NewValue0(v.Pos, OpARMRSBconst, typ.UInt32) v4.AuxInt = 0 v5 := b.NewValue0(v.Pos, OpARMORconst, typ.UInt32) v5.AuxInt = 0x100 v5.AddArg(x) v4.AddArg(v5) - v2.AddArg(v4) + v2.AddArg2(v3, v4) v1.AddArg(v2) v0.AddArg(v1) v.AddArg(v0) @@ -14538,10 +13920,9 @@ func rewriteValueARM_OpDiv16(v *Value) bool { v.reset(OpDiv32) v0 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32) v1.AddArg(y) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } } @@ -14558,10 +13939,9 @@ func rewriteValueARM_OpDiv16u(v *Value) bool { v.reset(OpDiv32u) v0 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) v1.AddArg(y) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } } @@ -14581,41 +13961,32 @@ func rewriteValueARM_OpDiv32(v *Value) bool { v2 := b.NewValue0(v.Pos, OpARMCALLudiv, types.NewTuple(typ.UInt32, typ.UInt32)) v3 := b.NewValue0(v.Pos, OpARMSUB, typ.UInt32) v4 := b.NewValue0(v.Pos, OpARMXOR, typ.UInt32) - v4.AddArg(x) v5 := b.NewValue0(v.Pos, OpSignmask, typ.Int32) v5.AddArg(x) - v4.AddArg(v5) - v3.AddArg(v4) + v4.AddArg2(x, v5) v6 := b.NewValue0(v.Pos, OpSignmask, typ.Int32) v6.AddArg(x) - v3.AddArg(v6) - v2.AddArg(v3) + v3.AddArg2(v4, v6) v7 := b.NewValue0(v.Pos, OpARMSUB, typ.UInt32) v8 := b.NewValue0(v.Pos, OpARMXOR, typ.UInt32) - v8.AddArg(y) v9 := b.NewValue0(v.Pos, OpSignmask, typ.Int32) v9.AddArg(y) - v8.AddArg(v9) - v7.AddArg(v8) + v8.AddArg2(y, v9) v10 := b.NewValue0(v.Pos, OpSignmask, typ.Int32) v10.AddArg(y) - v7.AddArg(v10) - v2.AddArg(v7) + v7.AddArg2(v8, v10) + v2.AddArg2(v3, v7) v1.AddArg(v2) - v0.AddArg(v1) v11 := b.NewValue0(v.Pos, OpSignmask, typ.Int32) v12 := b.NewValue0(v.Pos, OpARMXOR, typ.UInt32) - v12.AddArg(x) - v12.AddArg(y) + v12.AddArg2(x, y) v11.AddArg(v12) - v0.AddArg(v11) - v.AddArg(v0) + v0.AddArg2(v1, v11) v13 := b.NewValue0(v.Pos, OpSignmask, typ.Int32) v14 := b.NewValue0(v.Pos, OpARMXOR, typ.UInt32) - v14.AddArg(x) - v14.AddArg(y) + v14.AddArg2(x, y) v13.AddArg(v14) - v.AddArg(v13) + v.AddArg2(v0, v13) return true } } @@ -14632,8 +14003,7 @@ func rewriteValueARM_OpDiv32u(v *Value) bool { v.reset(OpSelect0) v.Type = typ.UInt32 v0 := b.NewValue0(v.Pos, OpARMCALLudiv, types.NewTuple(typ.UInt32, typ.UInt32)) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -14651,10 +14021,9 @@ func rewriteValueARM_OpDiv8(v *Value) bool { v.reset(OpDiv32) v0 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32) v1.AddArg(y) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } } @@ -14671,10 +14040,9 @@ func rewriteValueARM_OpDiv8u(v *Value) bool { v.reset(OpDiv32u) v0 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) v1.AddArg(y) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } } @@ -14692,10 +14060,9 @@ func rewriteValueARM_OpEq16(v *Value) bool { v0 := b.NewValue0(v.Pos, OpARMCMP, types.TypeFlags) v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) v1.AddArg(x) - v0.AddArg(v1) v2 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) v2.AddArg(y) - v0.AddArg(v2) + v0.AddArg2(v1, v2) v.AddArg(v0) return true } @@ -14711,8 +14078,7 @@ func rewriteValueARM_OpEq32(v *Value) bool { y := v_1 v.reset(OpARMEqual) v0 := b.NewValue0(v.Pos, OpARMCMP, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -14728,8 +14094,7 @@ func rewriteValueARM_OpEq32F(v *Value) bool { y := v_1 v.reset(OpARMEqual) v0 := b.NewValue0(v.Pos, OpARMCMPF, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -14745,8 +14110,7 @@ func rewriteValueARM_OpEq64F(v *Value) bool { y := v_1 v.reset(OpARMEqual) v0 := b.NewValue0(v.Pos, OpARMCMPD, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -14765,10 +14129,9 @@ func rewriteValueARM_OpEq8(v *Value) bool { v0 := b.NewValue0(v.Pos, OpARMCMP, types.TypeFlags) v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) v1.AddArg(x) - v0.AddArg(v1) v2 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) v2.AddArg(y) - v0.AddArg(v2) + v0.AddArg2(v1, v2) v.AddArg(v0) return true } @@ -14786,8 +14149,7 @@ func rewriteValueARM_OpEqB(v *Value) bool { v.reset(OpARMXORconst) v.AuxInt = 1 v0 := b.NewValue0(v.Pos, OpARMXOR, typ.Bool) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -14803,8 +14165,7 @@ func rewriteValueARM_OpEqPtr(v *Value) bool { y := v_1 v.reset(OpARMEqual) v0 := b.NewValue0(v.Pos, OpARMCMP, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -14820,9 +14181,7 @@ func rewriteValueARM_OpFMA(v *Value) bool { y := v_1 z := v_2 v.reset(OpARMFMULAD) - v.AddArg(z) - v.AddArg(x) - v.AddArg(y) + v.AddArg3(z, x, y) return true } } @@ -14837,8 +14196,7 @@ func rewriteValueARM_OpGeq32F(v *Value) bool { y := v_1 v.reset(OpARMGreaterEqual) v0 := b.NewValue0(v.Pos, OpARMCMPF, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -14854,8 +14212,7 @@ func rewriteValueARM_OpGeq64F(v *Value) bool { y := v_1 v.reset(OpARMGreaterEqual) v0 := b.NewValue0(v.Pos, OpARMCMPD, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -14871,8 +14228,7 @@ func rewriteValueARM_OpGreater32F(v *Value) bool { y := v_1 v.reset(OpARMGreaterThan) v0 := b.NewValue0(v.Pos, OpARMCMPF, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -14888,8 +14244,7 @@ func rewriteValueARM_OpGreater64F(v *Value) bool { y := v_1 v.reset(OpARMGreaterThan) v0 := b.NewValue0(v.Pos, OpARMCMPD, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -14905,8 +14260,7 @@ func rewriteValueARM_OpIsInBounds(v *Value) bool { len := v_1 v.reset(OpARMLessThanU) v0 := b.NewValue0(v.Pos, OpARMCMP, types.TypeFlags) - v0.AddArg(idx) - v0.AddArg(len) + v0.AddArg2(idx, len) v.AddArg(v0) return true } @@ -14937,8 +14291,7 @@ func rewriteValueARM_OpIsSliceInBounds(v *Value) bool { len := v_1 v.reset(OpARMLessEqualU) v0 := b.NewValue0(v.Pos, OpARMCMP, types.TypeFlags) - v0.AddArg(idx) - v0.AddArg(len) + v0.AddArg2(idx, len) v.AddArg(v0) return true } @@ -14957,10 +14310,9 @@ func rewriteValueARM_OpLeq16(v *Value) bool { v0 := b.NewValue0(v.Pos, OpARMCMP, types.TypeFlags) v1 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32) v1.AddArg(x) - v0.AddArg(v1) v2 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32) v2.AddArg(y) - v0.AddArg(v2) + v0.AddArg2(v1, v2) v.AddArg(v0) return true } @@ -14979,10 +14331,9 @@ func rewriteValueARM_OpLeq16U(v *Value) bool { v0 := b.NewValue0(v.Pos, OpARMCMP, types.TypeFlags) v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) v1.AddArg(x) - v0.AddArg(v1) v2 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) v2.AddArg(y) - v0.AddArg(v2) + v0.AddArg2(v1, v2) v.AddArg(v0) return true } @@ -14998,8 +14349,7 @@ func rewriteValueARM_OpLeq32(v *Value) bool { y := v_1 v.reset(OpARMLessEqual) v0 := b.NewValue0(v.Pos, OpARMCMP, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -15015,8 +14365,7 @@ func rewriteValueARM_OpLeq32F(v *Value) bool { y := v_1 v.reset(OpARMGreaterEqual) v0 := b.NewValue0(v.Pos, OpARMCMPF, types.TypeFlags) - v0.AddArg(y) - v0.AddArg(x) + v0.AddArg2(y, x) v.AddArg(v0) return true } @@ -15032,8 +14381,7 @@ func rewriteValueARM_OpLeq32U(v *Value) bool { y := v_1 v.reset(OpARMLessEqualU) v0 := b.NewValue0(v.Pos, OpARMCMP, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -15049,8 +14397,7 @@ func rewriteValueARM_OpLeq64F(v *Value) bool { y := v_1 v.reset(OpARMGreaterEqual) v0 := b.NewValue0(v.Pos, OpARMCMPD, types.TypeFlags) - v0.AddArg(y) - v0.AddArg(x) + v0.AddArg2(y, x) v.AddArg(v0) return true } @@ -15069,10 +14416,9 @@ func rewriteValueARM_OpLeq8(v *Value) bool { v0 := b.NewValue0(v.Pos, OpARMCMP, types.TypeFlags) v1 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32) v1.AddArg(x) - v0.AddArg(v1) v2 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32) v2.AddArg(y) - v0.AddArg(v2) + v0.AddArg2(v1, v2) v.AddArg(v0) return true } @@ -15091,10 +14437,9 @@ func rewriteValueARM_OpLeq8U(v *Value) bool { v0 := b.NewValue0(v.Pos, OpARMCMP, types.TypeFlags) v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) v1.AddArg(x) - v0.AddArg(v1) v2 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) v2.AddArg(y) - v0.AddArg(v2) + v0.AddArg2(v1, v2) v.AddArg(v0) return true } @@ -15113,10 +14458,9 @@ func rewriteValueARM_OpLess16(v *Value) bool { v0 := b.NewValue0(v.Pos, OpARMCMP, types.TypeFlags) v1 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32) v1.AddArg(x) - v0.AddArg(v1) v2 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32) v2.AddArg(y) - v0.AddArg(v2) + v0.AddArg2(v1, v2) v.AddArg(v0) return true } @@ -15135,10 +14479,9 @@ func rewriteValueARM_OpLess16U(v *Value) bool { v0 := b.NewValue0(v.Pos, OpARMCMP, types.TypeFlags) v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) v1.AddArg(x) - v0.AddArg(v1) v2 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) v2.AddArg(y) - v0.AddArg(v2) + v0.AddArg2(v1, v2) v.AddArg(v0) return true } @@ -15154,8 +14497,7 @@ func rewriteValueARM_OpLess32(v *Value) bool { y := v_1 v.reset(OpARMLessThan) v0 := b.NewValue0(v.Pos, OpARMCMP, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -15171,8 +14513,7 @@ func rewriteValueARM_OpLess32F(v *Value) bool { y := v_1 v.reset(OpARMGreaterThan) v0 := b.NewValue0(v.Pos, OpARMCMPF, types.TypeFlags) - v0.AddArg(y) - v0.AddArg(x) + v0.AddArg2(y, x) v.AddArg(v0) return true } @@ -15188,8 +14529,7 @@ func rewriteValueARM_OpLess32U(v *Value) bool { y := v_1 v.reset(OpARMLessThanU) v0 := b.NewValue0(v.Pos, OpARMCMP, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -15205,8 +14545,7 @@ func rewriteValueARM_OpLess64F(v *Value) bool { y := v_1 v.reset(OpARMGreaterThan) v0 := b.NewValue0(v.Pos, OpARMCMPD, types.TypeFlags) - v0.AddArg(y) - v0.AddArg(x) + v0.AddArg2(y, x) v.AddArg(v0) return true } @@ -15225,10 +14564,9 @@ func rewriteValueARM_OpLess8(v *Value) bool { v0 := b.NewValue0(v.Pos, OpARMCMP, types.TypeFlags) v1 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32) v1.AddArg(x) - v0.AddArg(v1) v2 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32) v2.AddArg(y) - v0.AddArg(v2) + v0.AddArg2(v1, v2) v.AddArg(v0) return true } @@ -15247,10 +14585,9 @@ func rewriteValueARM_OpLess8U(v *Value) bool { v0 := b.NewValue0(v.Pos, OpARMCMP, types.TypeFlags) v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) v1.AddArg(x) - v0.AddArg(v1) v2 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) v2.AddArg(y) - v0.AddArg(v2) + v0.AddArg2(v1, v2) v.AddArg(v0) return true } @@ -15269,8 +14606,7 @@ func rewriteValueARM_OpLoad(v *Value) bool { break } v.reset(OpARMMOVBUload) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (Load ptr mem) @@ -15284,8 +14620,7 @@ func rewriteValueARM_OpLoad(v *Value) bool { break } v.reset(OpARMMOVBload) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (Load ptr mem) @@ -15299,8 +14634,7 @@ func rewriteValueARM_OpLoad(v *Value) bool { break } v.reset(OpARMMOVBUload) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (Load ptr mem) @@ -15314,8 +14648,7 @@ func rewriteValueARM_OpLoad(v *Value) bool { break } v.reset(OpARMMOVHload) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (Load ptr mem) @@ -15329,8 +14662,7 @@ func rewriteValueARM_OpLoad(v *Value) bool { break } v.reset(OpARMMOVHUload) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (Load ptr mem) @@ -15344,8 +14676,7 @@ func rewriteValueARM_OpLoad(v *Value) bool { break } v.reset(OpARMMOVWload) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (Load ptr mem) @@ -15359,8 +14690,7 @@ func rewriteValueARM_OpLoad(v *Value) bool { break } v.reset(OpARMMOVFload) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (Load ptr mem) @@ -15374,8 +14704,7 @@ func rewriteValueARM_OpLoad(v *Value) bool { break } v.reset(OpARMMOVDload) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } return false @@ -15406,17 +14735,15 @@ func rewriteValueARM_OpLsh16x16(v *Value) bool { v.reset(OpARMCMOVWHSconst) v.AuxInt = 0 v0 := b.NewValue0(v.Pos, OpARMSLL, x.Type) - v0.AddArg(x) v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) v1.AddArg(y) - v0.AddArg(v1) - v.AddArg(v0) + v0.AddArg2(x, v1) v2 := b.NewValue0(v.Pos, OpARMCMPconst, types.TypeFlags) v2.AuxInt = 256 v3 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) v3.AddArg(y) v2.AddArg(v3) - v.AddArg(v2) + v.AddArg2(v0, v2) return true } } @@ -15432,13 +14759,11 @@ func rewriteValueARM_OpLsh16x32(v *Value) bool { v.reset(OpARMCMOVWHSconst) v.AuxInt = 0 v0 := b.NewValue0(v.Pos, OpARMSLL, x.Type) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) + v0.AddArg2(x, y) v1 := b.NewValue0(v.Pos, OpARMCMPconst, types.TypeFlags) v1.AuxInt = 256 v1.AddArg(y) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } } @@ -15490,10 +14815,9 @@ func rewriteValueARM_OpLsh16x8(v *Value) bool { x := v_0 y := v_1 v.reset(OpARMSLL) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) v0.AddArg(y) - v.AddArg(v0) + v.AddArg2(x, v0) return true } } @@ -15510,17 +14834,15 @@ func rewriteValueARM_OpLsh32x16(v *Value) bool { v.reset(OpARMCMOVWHSconst) v.AuxInt = 0 v0 := b.NewValue0(v.Pos, OpARMSLL, x.Type) - v0.AddArg(x) v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) v1.AddArg(y) - v0.AddArg(v1) - v.AddArg(v0) + v0.AddArg2(x, v1) v2 := b.NewValue0(v.Pos, OpARMCMPconst, types.TypeFlags) v2.AuxInt = 256 v3 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) v3.AddArg(y) v2.AddArg(v3) - v.AddArg(v2) + v.AddArg2(v0, v2) return true } } @@ -15536,13 +14858,11 @@ func rewriteValueARM_OpLsh32x32(v *Value) bool { v.reset(OpARMCMOVWHSconst) v.AuxInt = 0 v0 := b.NewValue0(v.Pos, OpARMSLL, x.Type) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) + v0.AddArg2(x, y) v1 := b.NewValue0(v.Pos, OpARMCMPconst, types.TypeFlags) v1.AuxInt = 256 v1.AddArg(y) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } } @@ -15594,10 +14914,9 @@ func rewriteValueARM_OpLsh32x8(v *Value) bool { x := v_0 y := v_1 v.reset(OpARMSLL) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) v0.AddArg(y) - v.AddArg(v0) + v.AddArg2(x, v0) return true } } @@ -15614,17 +14933,15 @@ func rewriteValueARM_OpLsh8x16(v *Value) bool { v.reset(OpARMCMOVWHSconst) v.AuxInt = 0 v0 := b.NewValue0(v.Pos, OpARMSLL, x.Type) - v0.AddArg(x) v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) v1.AddArg(y) - v0.AddArg(v1) - v.AddArg(v0) + v0.AddArg2(x, v1) v2 := b.NewValue0(v.Pos, OpARMCMPconst, types.TypeFlags) v2.AuxInt = 256 v3 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) v3.AddArg(y) v2.AddArg(v3) - v.AddArg(v2) + v.AddArg2(v0, v2) return true } } @@ -15640,13 +14957,11 @@ func rewriteValueARM_OpLsh8x32(v *Value) bool { v.reset(OpARMCMOVWHSconst) v.AuxInt = 0 v0 := b.NewValue0(v.Pos, OpARMSLL, x.Type) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) + v0.AddArg2(x, y) v1 := b.NewValue0(v.Pos, OpARMCMPconst, types.TypeFlags) v1.AuxInt = 256 v1.AddArg(y) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } } @@ -15698,10 +15013,9 @@ func rewriteValueARM_OpLsh8x8(v *Value) bool { x := v_0 y := v_1 v.reset(OpARMSLL) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) v0.AddArg(y) - v.AddArg(v0) + v.AddArg2(x, v0) return true } } @@ -15718,10 +15032,9 @@ func rewriteValueARM_OpMod16(v *Value) bool { v.reset(OpMod32) v0 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32) v1.AddArg(y) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } } @@ -15738,10 +15051,9 @@ func rewriteValueARM_OpMod16u(v *Value) bool { v.reset(OpMod32u) v0 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) v1.AddArg(y) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } } @@ -15761,35 +15073,28 @@ func rewriteValueARM_OpMod32(v *Value) bool { v2 := b.NewValue0(v.Pos, OpARMCALLudiv, types.NewTuple(typ.UInt32, typ.UInt32)) v3 := b.NewValue0(v.Pos, OpARMSUB, typ.UInt32) v4 := b.NewValue0(v.Pos, OpARMXOR, typ.UInt32) - v4.AddArg(x) v5 := b.NewValue0(v.Pos, OpSignmask, typ.Int32) v5.AddArg(x) - v4.AddArg(v5) - v3.AddArg(v4) + v4.AddArg2(x, v5) v6 := b.NewValue0(v.Pos, OpSignmask, typ.Int32) v6.AddArg(x) - v3.AddArg(v6) - v2.AddArg(v3) + v3.AddArg2(v4, v6) v7 := b.NewValue0(v.Pos, OpARMSUB, typ.UInt32) v8 := b.NewValue0(v.Pos, OpARMXOR, typ.UInt32) - v8.AddArg(y) v9 := b.NewValue0(v.Pos, OpSignmask, typ.Int32) v9.AddArg(y) - v8.AddArg(v9) - v7.AddArg(v8) + v8.AddArg2(y, v9) v10 := b.NewValue0(v.Pos, OpSignmask, typ.Int32) v10.AddArg(y) - v7.AddArg(v10) - v2.AddArg(v7) + v7.AddArg2(v8, v10) + v2.AddArg2(v3, v7) v1.AddArg(v2) - v0.AddArg(v1) v11 := b.NewValue0(v.Pos, OpSignmask, typ.Int32) v11.AddArg(x) - v0.AddArg(v11) - v.AddArg(v0) + v0.AddArg2(v1, v11) v12 := b.NewValue0(v.Pos, OpSignmask, typ.Int32) v12.AddArg(x) - v.AddArg(v12) + v.AddArg2(v0, v12) return true } } @@ -15806,8 +15111,7 @@ func rewriteValueARM_OpMod32u(v *Value) bool { v.reset(OpSelect1) v.Type = typ.UInt32 v0 := b.NewValue0(v.Pos, OpARMCALLudiv, types.NewTuple(typ.UInt32, typ.UInt32)) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -15825,10 +15129,9 @@ func rewriteValueARM_OpMod8(v *Value) bool { v.reset(OpMod32) v0 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32) v1.AddArg(y) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } } @@ -15845,10 +15148,9 @@ func rewriteValueARM_OpMod8u(v *Value) bool { v.reset(OpMod32u) v0 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) v1.AddArg(y) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } } @@ -15881,12 +15183,9 @@ func rewriteValueARM_OpMove(v *Value) bool { src := v_1 mem := v_2 v.reset(OpARMMOVBstore) - v.AddArg(dst) v0 := b.NewValue0(v.Pos, OpARMMOVBUload, typ.UInt8) - v0.AddArg(src) - v0.AddArg(mem) - v.AddArg(v0) - v.AddArg(mem) + v0.AddArg2(src, mem) + v.AddArg3(dst, v0, mem) return true } // match: (Move [2] {t} dst src mem) @@ -15904,12 +15203,9 @@ func rewriteValueARM_OpMove(v *Value) bool { break } v.reset(OpARMMOVHstore) - v.AddArg(dst) v0 := b.NewValue0(v.Pos, OpARMMOVHUload, typ.UInt16) - v0.AddArg(src) - v0.AddArg(mem) - v.AddArg(v0) - v.AddArg(mem) + v0.AddArg2(src, mem) + v.AddArg3(dst, v0, mem) return true } // match: (Move [2] dst src mem) @@ -15923,20 +15219,14 @@ func rewriteValueARM_OpMove(v *Value) bool { mem := v_2 v.reset(OpARMMOVBstore) v.AuxInt = 1 - v.AddArg(dst) v0 := b.NewValue0(v.Pos, OpARMMOVBUload, typ.UInt8) v0.AuxInt = 1 - v0.AddArg(src) - v0.AddArg(mem) - v.AddArg(v0) + v0.AddArg2(src, mem) v1 := b.NewValue0(v.Pos, OpARMMOVBstore, types.TypeMem) - v1.AddArg(dst) v2 := b.NewValue0(v.Pos, OpARMMOVBUload, typ.UInt8) - v2.AddArg(src) - v2.AddArg(mem) - v1.AddArg(v2) - v1.AddArg(mem) - v.AddArg(v1) + v2.AddArg2(src, mem) + v1.AddArg3(dst, v2, mem) + v.AddArg3(dst, v0, v1) return true } // match: (Move [4] {t} dst src mem) @@ -15954,12 +15244,9 @@ func rewriteValueARM_OpMove(v *Value) bool { break } v.reset(OpARMMOVWstore) - v.AddArg(dst) v0 := b.NewValue0(v.Pos, OpARMMOVWload, typ.UInt32) - v0.AddArg(src) - v0.AddArg(mem) - v.AddArg(v0) - v.AddArg(mem) + v0.AddArg2(src, mem) + v.AddArg3(dst, v0, mem) return true } // match: (Move [4] {t} dst src mem) @@ -15978,20 +15265,14 @@ func rewriteValueARM_OpMove(v *Value) bool { } v.reset(OpARMMOVHstore) v.AuxInt = 2 - v.AddArg(dst) v0 := b.NewValue0(v.Pos, OpARMMOVHUload, typ.UInt16) v0.AuxInt = 2 - v0.AddArg(src) - v0.AddArg(mem) - v.AddArg(v0) + v0.AddArg2(src, mem) v1 := b.NewValue0(v.Pos, OpARMMOVHstore, types.TypeMem) - v1.AddArg(dst) v2 := b.NewValue0(v.Pos, OpARMMOVHUload, typ.UInt16) - v2.AddArg(src) - v2.AddArg(mem) - v1.AddArg(v2) - v1.AddArg(mem) - v.AddArg(v1) + v2.AddArg2(src, mem) + v1.AddArg3(dst, v2, mem) + v.AddArg3(dst, v0, v1) return true } // match: (Move [4] dst src mem) @@ -16005,38 +15286,26 @@ func rewriteValueARM_OpMove(v *Value) bool { mem := v_2 v.reset(OpARMMOVBstore) v.AuxInt = 3 - v.AddArg(dst) v0 := b.NewValue0(v.Pos, OpARMMOVBUload, typ.UInt8) v0.AuxInt = 3 - v0.AddArg(src) - v0.AddArg(mem) - v.AddArg(v0) + v0.AddArg2(src, mem) v1 := b.NewValue0(v.Pos, OpARMMOVBstore, types.TypeMem) v1.AuxInt = 2 - v1.AddArg(dst) v2 := b.NewValue0(v.Pos, OpARMMOVBUload, typ.UInt8) v2.AuxInt = 2 - v2.AddArg(src) - v2.AddArg(mem) - v1.AddArg(v2) + v2.AddArg2(src, mem) v3 := b.NewValue0(v.Pos, OpARMMOVBstore, types.TypeMem) v3.AuxInt = 1 - v3.AddArg(dst) v4 := b.NewValue0(v.Pos, OpARMMOVBUload, typ.UInt8) v4.AuxInt = 1 - v4.AddArg(src) - v4.AddArg(mem) - v3.AddArg(v4) + v4.AddArg2(src, mem) v5 := b.NewValue0(v.Pos, OpARMMOVBstore, types.TypeMem) - v5.AddArg(dst) v6 := b.NewValue0(v.Pos, OpARMMOVBUload, typ.UInt8) - v6.AddArg(src) - v6.AddArg(mem) - v5.AddArg(v6) - v5.AddArg(mem) - v3.AddArg(v5) - v1.AddArg(v3) - v.AddArg(v1) + v6.AddArg2(src, mem) + v5.AddArg3(dst, v6, mem) + v3.AddArg3(dst, v4, v5) + v1.AddArg3(dst, v2, v3) + v.AddArg3(dst, v0, v1) return true } // match: (Move [3] dst src mem) @@ -16050,29 +15319,20 @@ func rewriteValueARM_OpMove(v *Value) bool { mem := v_2 v.reset(OpARMMOVBstore) v.AuxInt = 2 - v.AddArg(dst) v0 := b.NewValue0(v.Pos, OpARMMOVBUload, typ.UInt8) v0.AuxInt = 2 - v0.AddArg(src) - v0.AddArg(mem) - v.AddArg(v0) + v0.AddArg2(src, mem) v1 := b.NewValue0(v.Pos, OpARMMOVBstore, types.TypeMem) v1.AuxInt = 1 - v1.AddArg(dst) v2 := b.NewValue0(v.Pos, OpARMMOVBUload, typ.UInt8) v2.AuxInt = 1 - v2.AddArg(src) - v2.AddArg(mem) - v1.AddArg(v2) + v2.AddArg2(src, mem) v3 := b.NewValue0(v.Pos, OpARMMOVBstore, types.TypeMem) - v3.AddArg(dst) v4 := b.NewValue0(v.Pos, OpARMMOVBUload, typ.UInt8) - v4.AddArg(src) - v4.AddArg(mem) - v3.AddArg(v4) - v3.AddArg(mem) - v1.AddArg(v3) - v.AddArg(v1) + v4.AddArg2(src, mem) + v3.AddArg3(dst, v4, mem) + v1.AddArg3(dst, v2, v3) + v.AddArg3(dst, v0, v1) return true } // match: (Move [s] {t} dst src mem) @@ -16089,9 +15349,7 @@ func rewriteValueARM_OpMove(v *Value) bool { } v.reset(OpARMDUFFCOPY) v.AuxInt = 8 * (128 - s/4) - v.AddArg(dst) - v.AddArg(src) - v.AddArg(mem) + v.AddArg3(dst, src, mem) return true } // match: (Move [s] {t} dst src mem) @@ -16108,13 +15366,10 @@ func rewriteValueARM_OpMove(v *Value) bool { } v.reset(OpARMLoweredMove) v.AuxInt = t.(*types.Type).Alignment() - v.AddArg(dst) - v.AddArg(src) v0 := b.NewValue0(v.Pos, OpARMADDconst, src.Type) v0.AuxInt = s - moveSize(t.(*types.Type).Alignment(), config) v0.AddArg(src) - v.AddArg(v0) - v.AddArg(mem) + v.AddArg4(dst, src, v0, mem) return true } return false @@ -16169,10 +15424,9 @@ func rewriteValueARM_OpNeq16(v *Value) bool { v0 := b.NewValue0(v.Pos, OpARMCMP, types.TypeFlags) v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) v1.AddArg(x) - v0.AddArg(v1) v2 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) v2.AddArg(y) - v0.AddArg(v2) + v0.AddArg2(v1, v2) v.AddArg(v0) return true } @@ -16188,8 +15442,7 @@ func rewriteValueARM_OpNeq32(v *Value) bool { y := v_1 v.reset(OpARMNotEqual) v0 := b.NewValue0(v.Pos, OpARMCMP, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -16205,8 +15458,7 @@ func rewriteValueARM_OpNeq32F(v *Value) bool { y := v_1 v.reset(OpARMNotEqual) v0 := b.NewValue0(v.Pos, OpARMCMPF, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -16222,8 +15474,7 @@ func rewriteValueARM_OpNeq64F(v *Value) bool { y := v_1 v.reset(OpARMNotEqual) v0 := b.NewValue0(v.Pos, OpARMCMPD, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -16242,10 +15493,9 @@ func rewriteValueARM_OpNeq8(v *Value) bool { v0 := b.NewValue0(v.Pos, OpARMCMP, types.TypeFlags) v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) v1.AddArg(x) - v0.AddArg(v1) v2 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) v2.AddArg(y) - v0.AddArg(v2) + v0.AddArg2(v1, v2) v.AddArg(v0) return true } @@ -16261,8 +15511,7 @@ func rewriteValueARM_OpNeqPtr(v *Value) bool { y := v_1 v.reset(OpARMNotEqual) v0 := b.NewValue0(v.Pos, OpARMCMP, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -16322,9 +15571,7 @@ func rewriteValueARM_OpPanicBounds(v *Value) bool { } v.reset(OpARMLoweredPanicBoundsA) v.AuxInt = kind - v.AddArg(x) - v.AddArg(y) - v.AddArg(mem) + v.AddArg3(x, y, mem) return true } // match: (PanicBounds [kind] x y mem) @@ -16340,9 +15587,7 @@ func rewriteValueARM_OpPanicBounds(v *Value) bool { } v.reset(OpARMLoweredPanicBoundsB) v.AuxInt = kind - v.AddArg(x) - v.AddArg(y) - v.AddArg(mem) + v.AddArg3(x, y, mem) return true } // match: (PanicBounds [kind] x y mem) @@ -16358,9 +15603,7 @@ func rewriteValueARM_OpPanicBounds(v *Value) bool { } v.reset(OpARMLoweredPanicBoundsC) v.AuxInt = kind - v.AddArg(x) - v.AddArg(y) - v.AddArg(mem) + v.AddArg3(x, y, mem) return true } return false @@ -16384,10 +15627,7 @@ func rewriteValueARM_OpPanicExtend(v *Value) bool { } v.reset(OpARMLoweredPanicExtendA) v.AuxInt = kind - v.AddArg(hi) - v.AddArg(lo) - v.AddArg(y) - v.AddArg(mem) + v.AddArg4(hi, lo, y, mem) return true } // match: (PanicExtend [kind] hi lo y mem) @@ -16404,10 +15644,7 @@ func rewriteValueARM_OpPanicExtend(v *Value) bool { } v.reset(OpARMLoweredPanicExtendB) v.AuxInt = kind - v.AddArg(hi) - v.AddArg(lo) - v.AddArg(y) - v.AddArg(mem) + v.AddArg4(hi, lo, y, mem) return true } // match: (PanicExtend [kind] hi lo y mem) @@ -16424,10 +15661,7 @@ func rewriteValueARM_OpPanicExtend(v *Value) bool { } v.reset(OpARMLoweredPanicExtendC) v.AuxInt = kind - v.AddArg(hi) - v.AddArg(lo) - v.AddArg(y) - v.AddArg(mem) + v.AddArg4(hi, lo, y, mem) return true } return false @@ -16448,17 +15682,14 @@ func rewriteValueARM_OpRotateLeft16(v *Value) bool { c := v_1.AuxInt v.reset(OpOr16) v0 := b.NewValue0(v.Pos, OpLsh16x32, t) - v0.AddArg(x) v1 := b.NewValue0(v.Pos, OpARMMOVWconst, typ.UInt32) v1.AuxInt = c & 15 - v0.AddArg(v1) - v.AddArg(v0) + v0.AddArg2(x, v1) v2 := b.NewValue0(v.Pos, OpRsh16Ux32, t) - v2.AddArg(x) v3 := b.NewValue0(v.Pos, OpARMMOVWconst, typ.UInt32) v3.AuxInt = -c & 15 - v2.AddArg(v3) - v.AddArg(v2) + v2.AddArg2(x, v3) + v.AddArg2(v0, v2) return true } return false @@ -16486,11 +15717,10 @@ func rewriteValueARM_OpRotateLeft32(v *Value) bool { x := v_0 y := v_1 v.reset(OpARMSRR) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpARMRSBconst, y.Type) v0.AuxInt = 0 v0.AddArg(y) - v.AddArg(v0) + v.AddArg2(x, v0) return true } } @@ -16510,17 +15740,14 @@ func rewriteValueARM_OpRotateLeft8(v *Value) bool { c := v_1.AuxInt v.reset(OpOr8) v0 := b.NewValue0(v.Pos, OpLsh8x32, t) - v0.AddArg(x) v1 := b.NewValue0(v.Pos, OpARMMOVWconst, typ.UInt32) v1.AuxInt = c & 7 - v0.AddArg(v1) - v.AddArg(v0) + v0.AddArg2(x, v1) v2 := b.NewValue0(v.Pos, OpRsh8Ux32, t) - v2.AddArg(x) v3 := b.NewValue0(v.Pos, OpARMMOVWconst, typ.UInt32) v3.AuxInt = -c & 7 - v2.AddArg(v3) - v.AddArg(v2) + v2.AddArg2(x, v3) + v.AddArg2(v0, v2) return true } return false @@ -16540,17 +15767,15 @@ func rewriteValueARM_OpRsh16Ux16(v *Value) bool { v0 := b.NewValue0(v.Pos, OpARMSRL, x.Type) v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) v1.AddArg(x) - v0.AddArg(v1) v2 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) v2.AddArg(y) - v0.AddArg(v2) - v.AddArg(v0) + v0.AddArg2(v1, v2) v3 := b.NewValue0(v.Pos, OpARMCMPconst, types.TypeFlags) v3.AuxInt = 256 v4 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) v4.AddArg(y) v3.AddArg(v4) - v.AddArg(v3) + v.AddArg2(v0, v3) return true } } @@ -16569,13 +15794,11 @@ func rewriteValueARM_OpRsh16Ux32(v *Value) bool { v0 := b.NewValue0(v.Pos, OpARMSRL, x.Type) v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) v1.AddArg(x) - v0.AddArg(v1) - v0.AddArg(y) - v.AddArg(v0) + v0.AddArg2(v1, y) v2 := b.NewValue0(v.Pos, OpARMCMPconst, types.TypeFlags) v2.AuxInt = 256 v2.AddArg(y) - v.AddArg(v2) + v.AddArg2(v0, v2) return true } } @@ -16634,10 +15857,9 @@ func rewriteValueARM_OpRsh16Ux8(v *Value) bool { v.reset(OpARMSRL) v0 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) v1.AddArg(y) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } } @@ -16654,16 +15876,14 @@ func rewriteValueARM_OpRsh16x16(v *Value) bool { v.reset(OpARMSRAcond) v0 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) v1.AddArg(y) - v.AddArg(v1) v2 := b.NewValue0(v.Pos, OpARMCMPconst, types.TypeFlags) v2.AuxInt = 256 v3 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) v3.AddArg(y) v2.AddArg(v3) - v.AddArg(v2) + v.AddArg3(v0, v1, v2) return true } } @@ -16680,12 +15900,10 @@ func rewriteValueARM_OpRsh16x32(v *Value) bool { v.reset(OpARMSRAcond) v0 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32) v0.AddArg(x) - v.AddArg(v0) - v.AddArg(y) v1 := b.NewValue0(v.Pos, OpARMCMPconst, types.TypeFlags) v1.AuxInt = 256 v1.AddArg(y) - v.AddArg(v1) + v.AddArg3(v0, y, v1) return true } } @@ -16749,10 +15967,9 @@ func rewriteValueARM_OpRsh16x8(v *Value) bool { v.reset(OpARMSRA) v0 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) v1.AddArg(y) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } } @@ -16769,17 +15986,15 @@ func rewriteValueARM_OpRsh32Ux16(v *Value) bool { v.reset(OpARMCMOVWHSconst) v.AuxInt = 0 v0 := b.NewValue0(v.Pos, OpARMSRL, x.Type) - v0.AddArg(x) v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) v1.AddArg(y) - v0.AddArg(v1) - v.AddArg(v0) + v0.AddArg2(x, v1) v2 := b.NewValue0(v.Pos, OpARMCMPconst, types.TypeFlags) v2.AuxInt = 256 v3 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) v3.AddArg(y) v2.AddArg(v3) - v.AddArg(v2) + v.AddArg2(v0, v2) return true } } @@ -16795,13 +16010,11 @@ func rewriteValueARM_OpRsh32Ux32(v *Value) bool { v.reset(OpARMCMOVWHSconst) v.AuxInt = 0 v0 := b.NewValue0(v.Pos, OpARMSRL, x.Type) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) + v0.AddArg2(x, y) v1 := b.NewValue0(v.Pos, OpARMCMPconst, types.TypeFlags) v1.AuxInt = 256 v1.AddArg(y) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } } @@ -16853,10 +16066,9 @@ func rewriteValueARM_OpRsh32Ux8(v *Value) bool { x := v_0 y := v_1 v.reset(OpARMSRL) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) v0.AddArg(y) - v.AddArg(v0) + v.AddArg2(x, v0) return true } } @@ -16871,16 +16083,14 @@ func rewriteValueARM_OpRsh32x16(v *Value) bool { x := v_0 y := v_1 v.reset(OpARMSRAcond) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) v0.AddArg(y) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpARMCMPconst, types.TypeFlags) v1.AuxInt = 256 v2 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) v2.AddArg(y) v1.AddArg(v2) - v.AddArg(v1) + v.AddArg3(x, v0, v1) return true } } @@ -16894,12 +16104,10 @@ func rewriteValueARM_OpRsh32x32(v *Value) bool { x := v_0 y := v_1 v.reset(OpARMSRAcond) - v.AddArg(x) - v.AddArg(y) v0 := b.NewValue0(v.Pos, OpARMCMPconst, types.TypeFlags) v0.AuxInt = 256 v0.AddArg(y) - v.AddArg(v0) + v.AddArg3(x, y, v0) return true } } @@ -16953,10 +16161,9 @@ func rewriteValueARM_OpRsh32x8(v *Value) bool { x := v_0 y := v_1 v.reset(OpARMSRA) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) v0.AddArg(y) - v.AddArg(v0) + v.AddArg2(x, v0) return true } } @@ -16975,17 +16182,15 @@ func rewriteValueARM_OpRsh8Ux16(v *Value) bool { v0 := b.NewValue0(v.Pos, OpARMSRL, x.Type) v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) v1.AddArg(x) - v0.AddArg(v1) v2 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) v2.AddArg(y) - v0.AddArg(v2) - v.AddArg(v0) + v0.AddArg2(v1, v2) v3 := b.NewValue0(v.Pos, OpARMCMPconst, types.TypeFlags) v3.AuxInt = 256 v4 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) v4.AddArg(y) v3.AddArg(v4) - v.AddArg(v3) + v.AddArg2(v0, v3) return true } } @@ -17004,13 +16209,11 @@ func rewriteValueARM_OpRsh8Ux32(v *Value) bool { v0 := b.NewValue0(v.Pos, OpARMSRL, x.Type) v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) v1.AddArg(x) - v0.AddArg(v1) - v0.AddArg(y) - v.AddArg(v0) + v0.AddArg2(v1, y) v2 := b.NewValue0(v.Pos, OpARMCMPconst, types.TypeFlags) v2.AuxInt = 256 v2.AddArg(y) - v.AddArg(v2) + v.AddArg2(v0, v2) return true } } @@ -17069,10 +16272,9 @@ func rewriteValueARM_OpRsh8Ux8(v *Value) bool { v.reset(OpARMSRL) v0 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) v1.AddArg(y) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } } @@ -17089,16 +16291,14 @@ func rewriteValueARM_OpRsh8x16(v *Value) bool { v.reset(OpARMSRAcond) v0 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) v1.AddArg(y) - v.AddArg(v1) v2 := b.NewValue0(v.Pos, OpARMCMPconst, types.TypeFlags) v2.AuxInt = 256 v3 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) v3.AddArg(y) v2.AddArg(v3) - v.AddArg(v2) + v.AddArg3(v0, v1, v2) return true } } @@ -17115,12 +16315,10 @@ func rewriteValueARM_OpRsh8x32(v *Value) bool { v.reset(OpARMSRAcond) v0 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32) v0.AddArg(x) - v.AddArg(v0) - v.AddArg(y) v1 := b.NewValue0(v.Pos, OpARMCMPconst, types.TypeFlags) v1.AuxInt = 256 v1.AddArg(y) - v.AddArg(v1) + v.AddArg3(v0, y, v1) return true } } @@ -17184,10 +16382,9 @@ func rewriteValueARM_OpRsh8x8(v *Value) bool { v.reset(OpARMSRA) v0 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) v1.AddArg(y) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } } @@ -17362,9 +16559,7 @@ func rewriteValueARM_OpStore(v *Value) bool { break } v.reset(OpARMMOVBstore) - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } // match: (Store {t} ptr val mem) @@ -17379,9 +16574,7 @@ func rewriteValueARM_OpStore(v *Value) bool { break } v.reset(OpARMMOVHstore) - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } // match: (Store {t} ptr val mem) @@ -17396,9 +16589,7 @@ func rewriteValueARM_OpStore(v *Value) bool { break } v.reset(OpARMMOVWstore) - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } // match: (Store {t} ptr val mem) @@ -17413,9 +16604,7 @@ func rewriteValueARM_OpStore(v *Value) bool { break } v.reset(OpARMMOVFstore) - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } // match: (Store {t} ptr val mem) @@ -17430,9 +16619,7 @@ func rewriteValueARM_OpStore(v *Value) bool { break } v.reset(OpARMMOVDstore) - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } return false @@ -17464,11 +16651,9 @@ func rewriteValueARM_OpZero(v *Value) bool { ptr := v_0 mem := v_1 v.reset(OpARMMOVBstore) - v.AddArg(ptr) v0 := b.NewValue0(v.Pos, OpARMMOVWconst, typ.UInt32) v0.AuxInt = 0 - v.AddArg(v0) - v.AddArg(mem) + v.AddArg3(ptr, v0, mem) return true } // match: (Zero [2] {t} ptr mem) @@ -17485,11 +16670,9 @@ func rewriteValueARM_OpZero(v *Value) bool { break } v.reset(OpARMMOVHstore) - v.AddArg(ptr) v0 := b.NewValue0(v.Pos, OpARMMOVWconst, typ.UInt32) v0.AuxInt = 0 - v.AddArg(v0) - v.AddArg(mem) + v.AddArg3(ptr, v0, mem) return true } // match: (Zero [2] ptr mem) @@ -17502,18 +16685,14 @@ func rewriteValueARM_OpZero(v *Value) bool { mem := v_1 v.reset(OpARMMOVBstore) v.AuxInt = 1 - v.AddArg(ptr) v0 := b.NewValue0(v.Pos, OpARMMOVWconst, typ.UInt32) v0.AuxInt = 0 - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpARMMOVBstore, types.TypeMem) v1.AuxInt = 0 - v1.AddArg(ptr) v2 := b.NewValue0(v.Pos, OpARMMOVWconst, typ.UInt32) v2.AuxInt = 0 - v1.AddArg(v2) - v1.AddArg(mem) - v.AddArg(v1) + v1.AddArg3(ptr, v2, mem) + v.AddArg3(ptr, v0, v1) return true } // match: (Zero [4] {t} ptr mem) @@ -17530,11 +16709,9 @@ func rewriteValueARM_OpZero(v *Value) bool { break } v.reset(OpARMMOVWstore) - v.AddArg(ptr) v0 := b.NewValue0(v.Pos, OpARMMOVWconst, typ.UInt32) v0.AuxInt = 0 - v.AddArg(v0) - v.AddArg(mem) + v.AddArg3(ptr, v0, mem) return true } // match: (Zero [4] {t} ptr mem) @@ -17552,18 +16729,14 @@ func rewriteValueARM_OpZero(v *Value) bool { } v.reset(OpARMMOVHstore) v.AuxInt = 2 - v.AddArg(ptr) v0 := b.NewValue0(v.Pos, OpARMMOVWconst, typ.UInt32) v0.AuxInt = 0 - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpARMMOVHstore, types.TypeMem) v1.AuxInt = 0 - v1.AddArg(ptr) v2 := b.NewValue0(v.Pos, OpARMMOVWconst, typ.UInt32) v2.AuxInt = 0 - v1.AddArg(v2) - v1.AddArg(mem) - v.AddArg(v1) + v1.AddArg3(ptr, v2, mem) + v.AddArg3(ptr, v0, v1) return true } // match: (Zero [4] ptr mem) @@ -17576,32 +16749,24 @@ func rewriteValueARM_OpZero(v *Value) bool { mem := v_1 v.reset(OpARMMOVBstore) v.AuxInt = 3 - v.AddArg(ptr) v0 := b.NewValue0(v.Pos, OpARMMOVWconst, typ.UInt32) v0.AuxInt = 0 - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpARMMOVBstore, types.TypeMem) v1.AuxInt = 2 - v1.AddArg(ptr) v2 := b.NewValue0(v.Pos, OpARMMOVWconst, typ.UInt32) v2.AuxInt = 0 - v1.AddArg(v2) v3 := b.NewValue0(v.Pos, OpARMMOVBstore, types.TypeMem) v3.AuxInt = 1 - v3.AddArg(ptr) v4 := b.NewValue0(v.Pos, OpARMMOVWconst, typ.UInt32) v4.AuxInt = 0 - v3.AddArg(v4) v5 := b.NewValue0(v.Pos, OpARMMOVBstore, types.TypeMem) v5.AuxInt = 0 - v5.AddArg(ptr) v6 := b.NewValue0(v.Pos, OpARMMOVWconst, typ.UInt32) v6.AuxInt = 0 - v5.AddArg(v6) - v5.AddArg(mem) - v3.AddArg(v5) - v1.AddArg(v3) - v.AddArg(v1) + v5.AddArg3(ptr, v6, mem) + v3.AddArg3(ptr, v4, v5) + v1.AddArg3(ptr, v2, v3) + v.AddArg3(ptr, v0, v1) return true } // match: (Zero [3] ptr mem) @@ -17614,25 +16779,19 @@ func rewriteValueARM_OpZero(v *Value) bool { mem := v_1 v.reset(OpARMMOVBstore) v.AuxInt = 2 - v.AddArg(ptr) v0 := b.NewValue0(v.Pos, OpARMMOVWconst, typ.UInt32) v0.AuxInt = 0 - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpARMMOVBstore, types.TypeMem) v1.AuxInt = 1 - v1.AddArg(ptr) v2 := b.NewValue0(v.Pos, OpARMMOVWconst, typ.UInt32) v2.AuxInt = 0 - v1.AddArg(v2) v3 := b.NewValue0(v.Pos, OpARMMOVBstore, types.TypeMem) v3.AuxInt = 0 - v3.AddArg(ptr) v4 := b.NewValue0(v.Pos, OpARMMOVWconst, typ.UInt32) v4.AuxInt = 0 - v3.AddArg(v4) - v3.AddArg(mem) - v1.AddArg(v3) - v.AddArg(v1) + v3.AddArg3(ptr, v4, mem) + v1.AddArg3(ptr, v2, v3) + v.AddArg3(ptr, v0, v1) return true } // match: (Zero [s] {t} ptr mem) @@ -17648,11 +16807,9 @@ func rewriteValueARM_OpZero(v *Value) bool { } v.reset(OpARMDUFFZERO) v.AuxInt = 4 * (128 - s/4) - v.AddArg(ptr) v0 := b.NewValue0(v.Pos, OpARMMOVWconst, typ.UInt32) v0.AuxInt = 0 - v.AddArg(v0) - v.AddArg(mem) + v.AddArg3(ptr, v0, mem) return true } // match: (Zero [s] {t} ptr mem) @@ -17668,15 +16825,12 @@ func rewriteValueARM_OpZero(v *Value) bool { } v.reset(OpARMLoweredZero) v.AuxInt = t.(*types.Type).Alignment() - v.AddArg(ptr) v0 := b.NewValue0(v.Pos, OpARMADDconst, ptr.Type) v0.AuxInt = s - moveSize(t.(*types.Type).Alignment(), config) v0.AddArg(ptr) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpARMMOVWconst, typ.UInt32) v1.AuxInt = 0 - v.AddArg(v1) - v.AddArg(mem) + v.AddArg4(ptr, v0, v1, mem) return true } return false @@ -17693,8 +16847,7 @@ func rewriteValueARM_OpZeromask(v *Value) bool { v.AuxInt = 31 v0 := b.NewValue0(v.Pos, OpARMRSBshiftRL, typ.Int32) v0.AuxInt = 1 - v0.AddArg(x) - v0.AddArg(x) + v0.AddArg2(x, x) v.AddArg(v0) return true } @@ -17764,8 +16917,7 @@ func rewriteBlockARM(b *Block) bool { } b.Reset(BlockARMEQ) v0 := b.NewValue0(v_0.Pos, OpARMCMP, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) b.AddControl(v0) return true } @@ -17789,11 +16941,9 @@ func rewriteBlockARM(b *Block) bool { } b.Reset(BlockARMEQ) v0 := b.NewValue0(v_0.Pos, OpARMCMP, types.TypeFlags) - v0.AddArg(a) v1 := b.NewValue0(v_0.Pos, OpARMMUL, x.Type) - v1.AddArg(x) - v1.AddArg(y) - v0.AddArg(v1) + v1.AddArg2(x, y) + v0.AddArg2(a, v1) b.AddControl(v0) return true } @@ -17842,8 +16992,7 @@ func rewriteBlockARM(b *Block) bool { b.Reset(BlockARMEQ) v0 := b.NewValue0(v_0.Pos, OpARMCMPshiftLL, types.TypeFlags) v0.AuxInt = c - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) b.AddControl(v0) return true } @@ -17868,8 +17017,7 @@ func rewriteBlockARM(b *Block) bool { b.Reset(BlockARMEQ) v0 := b.NewValue0(v_0.Pos, OpARMCMPshiftRL, types.TypeFlags) v0.AuxInt = c - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) b.AddControl(v0) return true } @@ -17894,8 +17042,7 @@ func rewriteBlockARM(b *Block) bool { b.Reset(BlockARMEQ) v0 := b.NewValue0(v_0.Pos, OpARMCMPshiftRA, types.TypeFlags) v0.AuxInt = c - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) b.AddControl(v0) return true } @@ -17919,9 +17066,7 @@ func rewriteBlockARM(b *Block) bool { } b.Reset(BlockARMEQ) v0 := b.NewValue0(v_0.Pos, OpARMCMPshiftLLreg, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - v0.AddArg(z) + v0.AddArg3(x, y, z) b.AddControl(v0) return true } @@ -17945,9 +17090,7 @@ func rewriteBlockARM(b *Block) bool { } b.Reset(BlockARMEQ) v0 := b.NewValue0(v_0.Pos, OpARMCMPshiftRLreg, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - v0.AddArg(z) + v0.AddArg3(x, y, z) b.AddControl(v0) return true } @@ -17971,9 +17114,7 @@ func rewriteBlockARM(b *Block) bool { } b.Reset(BlockARMEQ) v0 := b.NewValue0(v_0.Pos, OpARMCMPshiftRAreg, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - v0.AddArg(z) + v0.AddArg3(x, y, z) b.AddControl(v0) return true } @@ -18000,8 +17141,7 @@ func rewriteBlockARM(b *Block) bool { } b.Reset(BlockARMEQ) v0 := b.NewValue0(v_0.Pos, OpARMCMN, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) b.AddControl(v0) return true } @@ -18027,11 +17167,9 @@ func rewriteBlockARM(b *Block) bool { } b.Reset(BlockARMEQ) v0 := b.NewValue0(v_0.Pos, OpARMCMN, types.TypeFlags) - v0.AddArg(a) v1 := b.NewValue0(v_0.Pos, OpARMMUL, x.Type) - v1.AddArg(x) - v1.AddArg(y) - v0.AddArg(v1) + v1.AddArg2(x, y) + v0.AddArg2(a, v1) b.AddControl(v0) return true } @@ -18080,8 +17218,7 @@ func rewriteBlockARM(b *Block) bool { b.Reset(BlockARMEQ) v0 := b.NewValue0(v_0.Pos, OpARMCMNshiftLL, types.TypeFlags) v0.AuxInt = c - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) b.AddControl(v0) return true } @@ -18106,8 +17243,7 @@ func rewriteBlockARM(b *Block) bool { b.Reset(BlockARMEQ) v0 := b.NewValue0(v_0.Pos, OpARMCMNshiftRL, types.TypeFlags) v0.AuxInt = c - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) b.AddControl(v0) return true } @@ -18132,8 +17268,7 @@ func rewriteBlockARM(b *Block) bool { b.Reset(BlockARMEQ) v0 := b.NewValue0(v_0.Pos, OpARMCMNshiftRA, types.TypeFlags) v0.AuxInt = c - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) b.AddControl(v0) return true } @@ -18157,9 +17292,7 @@ func rewriteBlockARM(b *Block) bool { } b.Reset(BlockARMEQ) v0 := b.NewValue0(v_0.Pos, OpARMCMNshiftLLreg, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - v0.AddArg(z) + v0.AddArg3(x, y, z) b.AddControl(v0) return true } @@ -18183,9 +17316,7 @@ func rewriteBlockARM(b *Block) bool { } b.Reset(BlockARMEQ) v0 := b.NewValue0(v_0.Pos, OpARMCMNshiftRLreg, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - v0.AddArg(z) + v0.AddArg3(x, y, z) b.AddControl(v0) return true } @@ -18209,9 +17340,7 @@ func rewriteBlockARM(b *Block) bool { } b.Reset(BlockARMEQ) v0 := b.NewValue0(v_0.Pos, OpARMCMNshiftRAreg, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - v0.AddArg(z) + v0.AddArg3(x, y, z) b.AddControl(v0) return true } @@ -18238,8 +17367,7 @@ func rewriteBlockARM(b *Block) bool { } b.Reset(BlockARMEQ) v0 := b.NewValue0(v_0.Pos, OpARMTST, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) b.AddControl(v0) return true } @@ -18290,8 +17418,7 @@ func rewriteBlockARM(b *Block) bool { b.Reset(BlockARMEQ) v0 := b.NewValue0(v_0.Pos, OpARMTSTshiftLL, types.TypeFlags) v0.AuxInt = c - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) b.AddControl(v0) return true } @@ -18316,8 +17443,7 @@ func rewriteBlockARM(b *Block) bool { b.Reset(BlockARMEQ) v0 := b.NewValue0(v_0.Pos, OpARMTSTshiftRL, types.TypeFlags) v0.AuxInt = c - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) b.AddControl(v0) return true } @@ -18342,8 +17468,7 @@ func rewriteBlockARM(b *Block) bool { b.Reset(BlockARMEQ) v0 := b.NewValue0(v_0.Pos, OpARMTSTshiftRA, types.TypeFlags) v0.AuxInt = c - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) b.AddControl(v0) return true } @@ -18367,9 +17492,7 @@ func rewriteBlockARM(b *Block) bool { } b.Reset(BlockARMEQ) v0 := b.NewValue0(v_0.Pos, OpARMTSTshiftLLreg, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - v0.AddArg(z) + v0.AddArg3(x, y, z) b.AddControl(v0) return true } @@ -18393,9 +17516,7 @@ func rewriteBlockARM(b *Block) bool { } b.Reset(BlockARMEQ) v0 := b.NewValue0(v_0.Pos, OpARMTSTshiftRLreg, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - v0.AddArg(z) + v0.AddArg3(x, y, z) b.AddControl(v0) return true } @@ -18419,9 +17540,7 @@ func rewriteBlockARM(b *Block) bool { } b.Reset(BlockARMEQ) v0 := b.NewValue0(v_0.Pos, OpARMTSTshiftRAreg, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - v0.AddArg(z) + v0.AddArg3(x, y, z) b.AddControl(v0) return true } @@ -18448,8 +17567,7 @@ func rewriteBlockARM(b *Block) bool { } b.Reset(BlockARMEQ) v0 := b.NewValue0(v_0.Pos, OpARMTEQ, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) b.AddControl(v0) return true } @@ -18500,8 +17618,7 @@ func rewriteBlockARM(b *Block) bool { b.Reset(BlockARMEQ) v0 := b.NewValue0(v_0.Pos, OpARMTEQshiftLL, types.TypeFlags) v0.AuxInt = c - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) b.AddControl(v0) return true } @@ -18526,8 +17643,7 @@ func rewriteBlockARM(b *Block) bool { b.Reset(BlockARMEQ) v0 := b.NewValue0(v_0.Pos, OpARMTEQshiftRL, types.TypeFlags) v0.AuxInt = c - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) b.AddControl(v0) return true } @@ -18552,8 +17668,7 @@ func rewriteBlockARM(b *Block) bool { b.Reset(BlockARMEQ) v0 := b.NewValue0(v_0.Pos, OpARMTEQshiftRA, types.TypeFlags) v0.AuxInt = c - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) b.AddControl(v0) return true } @@ -18577,9 +17692,7 @@ func rewriteBlockARM(b *Block) bool { } b.Reset(BlockARMEQ) v0 := b.NewValue0(v_0.Pos, OpARMTEQshiftLLreg, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - v0.AddArg(z) + v0.AddArg3(x, y, z) b.AddControl(v0) return true } @@ -18603,9 +17716,7 @@ func rewriteBlockARM(b *Block) bool { } b.Reset(BlockARMEQ) v0 := b.NewValue0(v_0.Pos, OpARMTEQshiftRLreg, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - v0.AddArg(z) + v0.AddArg3(x, y, z) b.AddControl(v0) return true } @@ -18629,9 +17740,7 @@ func rewriteBlockARM(b *Block) bool { } b.Reset(BlockARMEQ) v0 := b.NewValue0(v_0.Pos, OpARMTEQshiftRAreg, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - v0.AddArg(z) + v0.AddArg3(x, y, z) b.AddControl(v0) return true } @@ -18696,8 +17805,7 @@ func rewriteBlockARM(b *Block) bool { } b.Reset(BlockARMGE) v0 := b.NewValue0(v_0.Pos, OpARMCMP, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) b.AddControl(v0) return true } @@ -18721,11 +17829,9 @@ func rewriteBlockARM(b *Block) bool { } b.Reset(BlockARMGE) v0 := b.NewValue0(v_0.Pos, OpARMCMP, types.TypeFlags) - v0.AddArg(a) v1 := b.NewValue0(v_0.Pos, OpARMMUL, x.Type) - v1.AddArg(x) - v1.AddArg(y) - v0.AddArg(v1) + v1.AddArg2(x, y) + v0.AddArg2(a, v1) b.AddControl(v0) return true } @@ -18774,8 +17880,7 @@ func rewriteBlockARM(b *Block) bool { b.Reset(BlockARMGE) v0 := b.NewValue0(v_0.Pos, OpARMCMPshiftLL, types.TypeFlags) v0.AuxInt = c - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) b.AddControl(v0) return true } @@ -18800,8 +17905,7 @@ func rewriteBlockARM(b *Block) bool { b.Reset(BlockARMGE) v0 := b.NewValue0(v_0.Pos, OpARMCMPshiftRL, types.TypeFlags) v0.AuxInt = c - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) b.AddControl(v0) return true } @@ -18826,8 +17930,7 @@ func rewriteBlockARM(b *Block) bool { b.Reset(BlockARMGE) v0 := b.NewValue0(v_0.Pos, OpARMCMPshiftRA, types.TypeFlags) v0.AuxInt = c - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) b.AddControl(v0) return true } @@ -18851,9 +17954,7 @@ func rewriteBlockARM(b *Block) bool { } b.Reset(BlockARMGE) v0 := b.NewValue0(v_0.Pos, OpARMCMPshiftLLreg, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - v0.AddArg(z) + v0.AddArg3(x, y, z) b.AddControl(v0) return true } @@ -18877,9 +17978,7 @@ func rewriteBlockARM(b *Block) bool { } b.Reset(BlockARMGE) v0 := b.NewValue0(v_0.Pos, OpARMCMPshiftRLreg, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - v0.AddArg(z) + v0.AddArg3(x, y, z) b.AddControl(v0) return true } @@ -18903,9 +18002,7 @@ func rewriteBlockARM(b *Block) bool { } b.Reset(BlockARMGE) v0 := b.NewValue0(v_0.Pos, OpARMCMPshiftRAreg, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - v0.AddArg(z) + v0.AddArg3(x, y, z) b.AddControl(v0) return true } @@ -18932,8 +18029,7 @@ func rewriteBlockARM(b *Block) bool { } b.Reset(BlockARMGE) v0 := b.NewValue0(v_0.Pos, OpARMCMN, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) b.AddControl(v0) return true } @@ -18959,11 +18055,9 @@ func rewriteBlockARM(b *Block) bool { } b.Reset(BlockARMGE) v0 := b.NewValue0(v_0.Pos, OpARMCMN, types.TypeFlags) - v0.AddArg(a) v1 := b.NewValue0(v_0.Pos, OpARMMUL, x.Type) - v1.AddArg(x) - v1.AddArg(y) - v0.AddArg(v1) + v1.AddArg2(x, y) + v0.AddArg2(a, v1) b.AddControl(v0) return true } @@ -19012,8 +18106,7 @@ func rewriteBlockARM(b *Block) bool { b.Reset(BlockARMGE) v0 := b.NewValue0(v_0.Pos, OpARMCMNshiftLL, types.TypeFlags) v0.AuxInt = c - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) b.AddControl(v0) return true } @@ -19038,8 +18131,7 @@ func rewriteBlockARM(b *Block) bool { b.Reset(BlockARMGE) v0 := b.NewValue0(v_0.Pos, OpARMCMNshiftRL, types.TypeFlags) v0.AuxInt = c - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) b.AddControl(v0) return true } @@ -19064,8 +18156,7 @@ func rewriteBlockARM(b *Block) bool { b.Reset(BlockARMGE) v0 := b.NewValue0(v_0.Pos, OpARMCMNshiftRA, types.TypeFlags) v0.AuxInt = c - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) b.AddControl(v0) return true } @@ -19089,9 +18180,7 @@ func rewriteBlockARM(b *Block) bool { } b.Reset(BlockARMGE) v0 := b.NewValue0(v_0.Pos, OpARMCMNshiftLLreg, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - v0.AddArg(z) + v0.AddArg3(x, y, z) b.AddControl(v0) return true } @@ -19115,9 +18204,7 @@ func rewriteBlockARM(b *Block) bool { } b.Reset(BlockARMGE) v0 := b.NewValue0(v_0.Pos, OpARMCMNshiftRLreg, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - v0.AddArg(z) + v0.AddArg3(x, y, z) b.AddControl(v0) return true } @@ -19141,9 +18228,7 @@ func rewriteBlockARM(b *Block) bool { } b.Reset(BlockARMGE) v0 := b.NewValue0(v_0.Pos, OpARMCMNshiftRAreg, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - v0.AddArg(z) + v0.AddArg3(x, y, z) b.AddControl(v0) return true } @@ -19170,8 +18255,7 @@ func rewriteBlockARM(b *Block) bool { } b.Reset(BlockARMGE) v0 := b.NewValue0(v_0.Pos, OpARMTST, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) b.AddControl(v0) return true } @@ -19222,8 +18306,7 @@ func rewriteBlockARM(b *Block) bool { b.Reset(BlockARMGE) v0 := b.NewValue0(v_0.Pos, OpARMTSTshiftLL, types.TypeFlags) v0.AuxInt = c - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) b.AddControl(v0) return true } @@ -19248,8 +18331,7 @@ func rewriteBlockARM(b *Block) bool { b.Reset(BlockARMGE) v0 := b.NewValue0(v_0.Pos, OpARMTSTshiftRL, types.TypeFlags) v0.AuxInt = c - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) b.AddControl(v0) return true } @@ -19274,8 +18356,7 @@ func rewriteBlockARM(b *Block) bool { b.Reset(BlockARMGE) v0 := b.NewValue0(v_0.Pos, OpARMTSTshiftRA, types.TypeFlags) v0.AuxInt = c - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) b.AddControl(v0) return true } @@ -19299,9 +18380,7 @@ func rewriteBlockARM(b *Block) bool { } b.Reset(BlockARMGE) v0 := b.NewValue0(v_0.Pos, OpARMTSTshiftLLreg, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - v0.AddArg(z) + v0.AddArg3(x, y, z) b.AddControl(v0) return true } @@ -19325,9 +18404,7 @@ func rewriteBlockARM(b *Block) bool { } b.Reset(BlockARMGE) v0 := b.NewValue0(v_0.Pos, OpARMTSTshiftRLreg, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - v0.AddArg(z) + v0.AddArg3(x, y, z) b.AddControl(v0) return true } @@ -19351,9 +18428,7 @@ func rewriteBlockARM(b *Block) bool { } b.Reset(BlockARMGE) v0 := b.NewValue0(v_0.Pos, OpARMTSTshiftRAreg, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - v0.AddArg(z) + v0.AddArg3(x, y, z) b.AddControl(v0) return true } @@ -19380,8 +18455,7 @@ func rewriteBlockARM(b *Block) bool { } b.Reset(BlockARMGE) v0 := b.NewValue0(v_0.Pos, OpARMTEQ, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) b.AddControl(v0) return true } @@ -19432,8 +18506,7 @@ func rewriteBlockARM(b *Block) bool { b.Reset(BlockARMGE) v0 := b.NewValue0(v_0.Pos, OpARMTEQshiftLL, types.TypeFlags) v0.AuxInt = c - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) b.AddControl(v0) return true } @@ -19458,8 +18531,7 @@ func rewriteBlockARM(b *Block) bool { b.Reset(BlockARMGE) v0 := b.NewValue0(v_0.Pos, OpARMTEQshiftRL, types.TypeFlags) v0.AuxInt = c - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) b.AddControl(v0) return true } @@ -19484,8 +18556,7 @@ func rewriteBlockARM(b *Block) bool { b.Reset(BlockARMGE) v0 := b.NewValue0(v_0.Pos, OpARMTEQshiftRA, types.TypeFlags) v0.AuxInt = c - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) b.AddControl(v0) return true } @@ -19509,9 +18580,7 @@ func rewriteBlockARM(b *Block) bool { } b.Reset(BlockARMGE) v0 := b.NewValue0(v_0.Pos, OpARMTEQshiftLLreg, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - v0.AddArg(z) + v0.AddArg3(x, y, z) b.AddControl(v0) return true } @@ -19535,9 +18604,7 @@ func rewriteBlockARM(b *Block) bool { } b.Reset(BlockARMGE) v0 := b.NewValue0(v_0.Pos, OpARMTEQshiftRLreg, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - v0.AddArg(z) + v0.AddArg3(x, y, z) b.AddControl(v0) return true } @@ -19561,9 +18628,7 @@ func rewriteBlockARM(b *Block) bool { } b.Reset(BlockARMGE) v0 := b.NewValue0(v_0.Pos, OpARMTEQshiftRAreg, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - v0.AddArg(z) + v0.AddArg3(x, y, z) b.AddControl(v0) return true } @@ -19629,8 +18694,7 @@ func rewriteBlockARM(b *Block) bool { } b.Reset(BlockARMGT) v0 := b.NewValue0(v_0.Pos, OpARMCMP, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) b.AddControl(v0) return true } @@ -19654,11 +18718,9 @@ func rewriteBlockARM(b *Block) bool { } b.Reset(BlockARMGT) v0 := b.NewValue0(v_0.Pos, OpARMCMP, types.TypeFlags) - v0.AddArg(a) v1 := b.NewValue0(v_0.Pos, OpARMMUL, x.Type) - v1.AddArg(x) - v1.AddArg(y) - v0.AddArg(v1) + v1.AddArg2(x, y) + v0.AddArg2(a, v1) b.AddControl(v0) return true } @@ -19707,8 +18769,7 @@ func rewriteBlockARM(b *Block) bool { b.Reset(BlockARMGT) v0 := b.NewValue0(v_0.Pos, OpARMCMPshiftLL, types.TypeFlags) v0.AuxInt = c - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) b.AddControl(v0) return true } @@ -19733,8 +18794,7 @@ func rewriteBlockARM(b *Block) bool { b.Reset(BlockARMGT) v0 := b.NewValue0(v_0.Pos, OpARMCMPshiftRL, types.TypeFlags) v0.AuxInt = c - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) b.AddControl(v0) return true } @@ -19759,8 +18819,7 @@ func rewriteBlockARM(b *Block) bool { b.Reset(BlockARMGT) v0 := b.NewValue0(v_0.Pos, OpARMCMPshiftRA, types.TypeFlags) v0.AuxInt = c - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) b.AddControl(v0) return true } @@ -19784,9 +18843,7 @@ func rewriteBlockARM(b *Block) bool { } b.Reset(BlockARMGT) v0 := b.NewValue0(v_0.Pos, OpARMCMPshiftLLreg, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - v0.AddArg(z) + v0.AddArg3(x, y, z) b.AddControl(v0) return true } @@ -19810,9 +18867,7 @@ func rewriteBlockARM(b *Block) bool { } b.Reset(BlockARMGT) v0 := b.NewValue0(v_0.Pos, OpARMCMPshiftRLreg, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - v0.AddArg(z) + v0.AddArg3(x, y, z) b.AddControl(v0) return true } @@ -19836,9 +18891,7 @@ func rewriteBlockARM(b *Block) bool { } b.Reset(BlockARMGT) v0 := b.NewValue0(v_0.Pos, OpARMCMPshiftRAreg, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - v0.AddArg(z) + v0.AddArg3(x, y, z) b.AddControl(v0) return true } @@ -19865,8 +18918,7 @@ func rewriteBlockARM(b *Block) bool { } b.Reset(BlockARMGT) v0 := b.NewValue0(v_0.Pos, OpARMCMN, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) b.AddControl(v0) return true } @@ -19917,8 +18969,7 @@ func rewriteBlockARM(b *Block) bool { b.Reset(BlockARMGT) v0 := b.NewValue0(v_0.Pos, OpARMCMNshiftLL, types.TypeFlags) v0.AuxInt = c - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) b.AddControl(v0) return true } @@ -19943,8 +18994,7 @@ func rewriteBlockARM(b *Block) bool { b.Reset(BlockARMGT) v0 := b.NewValue0(v_0.Pos, OpARMCMNshiftRL, types.TypeFlags) v0.AuxInt = c - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) b.AddControl(v0) return true } @@ -19969,8 +19019,7 @@ func rewriteBlockARM(b *Block) bool { b.Reset(BlockARMGT) v0 := b.NewValue0(v_0.Pos, OpARMCMNshiftRA, types.TypeFlags) v0.AuxInt = c - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) b.AddControl(v0) return true } @@ -19994,9 +19043,7 @@ func rewriteBlockARM(b *Block) bool { } b.Reset(BlockARMGT) v0 := b.NewValue0(v_0.Pos, OpARMCMNshiftLLreg, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - v0.AddArg(z) + v0.AddArg3(x, y, z) b.AddControl(v0) return true } @@ -20020,9 +19067,7 @@ func rewriteBlockARM(b *Block) bool { } b.Reset(BlockARMGT) v0 := b.NewValue0(v_0.Pos, OpARMCMNshiftRLreg, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - v0.AddArg(z) + v0.AddArg3(x, y, z) b.AddControl(v0) return true } @@ -20046,9 +19091,7 @@ func rewriteBlockARM(b *Block) bool { } b.Reset(BlockARMGT) v0 := b.NewValue0(v_0.Pos, OpARMCMNshiftRAreg, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - v0.AddArg(z) + v0.AddArg3(x, y, z) b.AddControl(v0) return true } @@ -20075,8 +19118,7 @@ func rewriteBlockARM(b *Block) bool { } b.Reset(BlockARMGT) v0 := b.NewValue0(v_0.Pos, OpARMTST, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) b.AddControl(v0) return true } @@ -20102,11 +19144,9 @@ func rewriteBlockARM(b *Block) bool { } b.Reset(BlockARMGT) v0 := b.NewValue0(v_0.Pos, OpARMCMN, types.TypeFlags) - v0.AddArg(a) v1 := b.NewValue0(v_0.Pos, OpARMMUL, x.Type) - v1.AddArg(x) - v1.AddArg(y) - v0.AddArg(v1) + v1.AddArg2(x, y) + v0.AddArg2(a, v1) b.AddControl(v0) return true } @@ -20155,8 +19195,7 @@ func rewriteBlockARM(b *Block) bool { b.Reset(BlockARMGT) v0 := b.NewValue0(v_0.Pos, OpARMTSTshiftLL, types.TypeFlags) v0.AuxInt = c - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) b.AddControl(v0) return true } @@ -20181,8 +19220,7 @@ func rewriteBlockARM(b *Block) bool { b.Reset(BlockARMGT) v0 := b.NewValue0(v_0.Pos, OpARMTSTshiftRL, types.TypeFlags) v0.AuxInt = c - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) b.AddControl(v0) return true } @@ -20207,8 +19245,7 @@ func rewriteBlockARM(b *Block) bool { b.Reset(BlockARMGT) v0 := b.NewValue0(v_0.Pos, OpARMTSTshiftRA, types.TypeFlags) v0.AuxInt = c - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) b.AddControl(v0) return true } @@ -20232,9 +19269,7 @@ func rewriteBlockARM(b *Block) bool { } b.Reset(BlockARMGT) v0 := b.NewValue0(v_0.Pos, OpARMTSTshiftLLreg, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - v0.AddArg(z) + v0.AddArg3(x, y, z) b.AddControl(v0) return true } @@ -20258,9 +19293,7 @@ func rewriteBlockARM(b *Block) bool { } b.Reset(BlockARMGT) v0 := b.NewValue0(v_0.Pos, OpARMTSTshiftRLreg, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - v0.AddArg(z) + v0.AddArg3(x, y, z) b.AddControl(v0) return true } @@ -20284,9 +19317,7 @@ func rewriteBlockARM(b *Block) bool { } b.Reset(BlockARMGT) v0 := b.NewValue0(v_0.Pos, OpARMTSTshiftRAreg, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - v0.AddArg(z) + v0.AddArg3(x, y, z) b.AddControl(v0) return true } @@ -20313,8 +19344,7 @@ func rewriteBlockARM(b *Block) bool { } b.Reset(BlockARMGT) v0 := b.NewValue0(v_0.Pos, OpARMTEQ, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) b.AddControl(v0) return true } @@ -20365,8 +19395,7 @@ func rewriteBlockARM(b *Block) bool { b.Reset(BlockARMGT) v0 := b.NewValue0(v_0.Pos, OpARMTEQshiftLL, types.TypeFlags) v0.AuxInt = c - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) b.AddControl(v0) return true } @@ -20391,8 +19420,7 @@ func rewriteBlockARM(b *Block) bool { b.Reset(BlockARMGT) v0 := b.NewValue0(v_0.Pos, OpARMTEQshiftRL, types.TypeFlags) v0.AuxInt = c - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) b.AddControl(v0) return true } @@ -20417,8 +19445,7 @@ func rewriteBlockARM(b *Block) bool { b.Reset(BlockARMGT) v0 := b.NewValue0(v_0.Pos, OpARMTEQshiftRA, types.TypeFlags) v0.AuxInt = c - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) b.AddControl(v0) return true } @@ -20442,9 +19469,7 @@ func rewriteBlockARM(b *Block) bool { } b.Reset(BlockARMGT) v0 := b.NewValue0(v_0.Pos, OpARMTEQshiftLLreg, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - v0.AddArg(z) + v0.AddArg3(x, y, z) b.AddControl(v0) return true } @@ -20468,9 +19493,7 @@ func rewriteBlockARM(b *Block) bool { } b.Reset(BlockARMGT) v0 := b.NewValue0(v_0.Pos, OpARMTEQshiftRLreg, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - v0.AddArg(z) + v0.AddArg3(x, y, z) b.AddControl(v0) return true } @@ -20494,9 +19517,7 @@ func rewriteBlockARM(b *Block) bool { } b.Reset(BlockARMGT) v0 := b.NewValue0(v_0.Pos, OpARMTEQshiftRAreg, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - v0.AddArg(z) + v0.AddArg3(x, y, z) b.AddControl(v0) return true } @@ -20663,8 +19684,7 @@ func rewriteBlockARM(b *Block) bool { } b.Reset(BlockARMLE) v0 := b.NewValue0(v_0.Pos, OpARMCMP, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) b.AddControl(v0) return true } @@ -20688,11 +19708,9 @@ func rewriteBlockARM(b *Block) bool { } b.Reset(BlockARMLE) v0 := b.NewValue0(v_0.Pos, OpARMCMP, types.TypeFlags) - v0.AddArg(a) v1 := b.NewValue0(v_0.Pos, OpARMMUL, x.Type) - v1.AddArg(x) - v1.AddArg(y) - v0.AddArg(v1) + v1.AddArg2(x, y) + v0.AddArg2(a, v1) b.AddControl(v0) return true } @@ -20741,8 +19759,7 @@ func rewriteBlockARM(b *Block) bool { b.Reset(BlockARMLE) v0 := b.NewValue0(v_0.Pos, OpARMCMPshiftLL, types.TypeFlags) v0.AuxInt = c - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) b.AddControl(v0) return true } @@ -20767,8 +19784,7 @@ func rewriteBlockARM(b *Block) bool { b.Reset(BlockARMLE) v0 := b.NewValue0(v_0.Pos, OpARMCMPshiftRL, types.TypeFlags) v0.AuxInt = c - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) b.AddControl(v0) return true } @@ -20793,8 +19809,7 @@ func rewriteBlockARM(b *Block) bool { b.Reset(BlockARMLE) v0 := b.NewValue0(v_0.Pos, OpARMCMPshiftRA, types.TypeFlags) v0.AuxInt = c - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) b.AddControl(v0) return true } @@ -20818,9 +19833,7 @@ func rewriteBlockARM(b *Block) bool { } b.Reset(BlockARMLE) v0 := b.NewValue0(v_0.Pos, OpARMCMPshiftLLreg, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - v0.AddArg(z) + v0.AddArg3(x, y, z) b.AddControl(v0) return true } @@ -20844,9 +19857,7 @@ func rewriteBlockARM(b *Block) bool { } b.Reset(BlockARMLE) v0 := b.NewValue0(v_0.Pos, OpARMCMPshiftRLreg, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - v0.AddArg(z) + v0.AddArg3(x, y, z) b.AddControl(v0) return true } @@ -20870,9 +19881,7 @@ func rewriteBlockARM(b *Block) bool { } b.Reset(BlockARMLE) v0 := b.NewValue0(v_0.Pos, OpARMCMPshiftRAreg, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - v0.AddArg(z) + v0.AddArg3(x, y, z) b.AddControl(v0) return true } @@ -20899,8 +19908,7 @@ func rewriteBlockARM(b *Block) bool { } b.Reset(BlockARMLE) v0 := b.NewValue0(v_0.Pos, OpARMCMN, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) b.AddControl(v0) return true } @@ -20926,11 +19934,9 @@ func rewriteBlockARM(b *Block) bool { } b.Reset(BlockARMLE) v0 := b.NewValue0(v_0.Pos, OpARMCMN, types.TypeFlags) - v0.AddArg(a) v1 := b.NewValue0(v_0.Pos, OpARMMUL, x.Type) - v1.AddArg(x) - v1.AddArg(y) - v0.AddArg(v1) + v1.AddArg2(x, y) + v0.AddArg2(a, v1) b.AddControl(v0) return true } @@ -20979,8 +19985,7 @@ func rewriteBlockARM(b *Block) bool { b.Reset(BlockARMLE) v0 := b.NewValue0(v_0.Pos, OpARMCMNshiftLL, types.TypeFlags) v0.AuxInt = c - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) b.AddControl(v0) return true } @@ -21005,8 +20010,7 @@ func rewriteBlockARM(b *Block) bool { b.Reset(BlockARMLE) v0 := b.NewValue0(v_0.Pos, OpARMCMNshiftRL, types.TypeFlags) v0.AuxInt = c - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) b.AddControl(v0) return true } @@ -21031,8 +20035,7 @@ func rewriteBlockARM(b *Block) bool { b.Reset(BlockARMLE) v0 := b.NewValue0(v_0.Pos, OpARMCMNshiftRA, types.TypeFlags) v0.AuxInt = c - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) b.AddControl(v0) return true } @@ -21056,9 +20059,7 @@ func rewriteBlockARM(b *Block) bool { } b.Reset(BlockARMLE) v0 := b.NewValue0(v_0.Pos, OpARMCMNshiftLLreg, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - v0.AddArg(z) + v0.AddArg3(x, y, z) b.AddControl(v0) return true } @@ -21082,9 +20083,7 @@ func rewriteBlockARM(b *Block) bool { } b.Reset(BlockARMLE) v0 := b.NewValue0(v_0.Pos, OpARMCMNshiftRLreg, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - v0.AddArg(z) + v0.AddArg3(x, y, z) b.AddControl(v0) return true } @@ -21108,9 +20107,7 @@ func rewriteBlockARM(b *Block) bool { } b.Reset(BlockARMLE) v0 := b.NewValue0(v_0.Pos, OpARMCMNshiftRAreg, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - v0.AddArg(z) + v0.AddArg3(x, y, z) b.AddControl(v0) return true } @@ -21137,8 +20134,7 @@ func rewriteBlockARM(b *Block) bool { } b.Reset(BlockARMLE) v0 := b.NewValue0(v_0.Pos, OpARMTST, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) b.AddControl(v0) return true } @@ -21189,8 +20185,7 @@ func rewriteBlockARM(b *Block) bool { b.Reset(BlockARMLE) v0 := b.NewValue0(v_0.Pos, OpARMTSTshiftLL, types.TypeFlags) v0.AuxInt = c - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) b.AddControl(v0) return true } @@ -21215,8 +20210,7 @@ func rewriteBlockARM(b *Block) bool { b.Reset(BlockARMLE) v0 := b.NewValue0(v_0.Pos, OpARMTSTshiftRL, types.TypeFlags) v0.AuxInt = c - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) b.AddControl(v0) return true } @@ -21241,8 +20235,7 @@ func rewriteBlockARM(b *Block) bool { b.Reset(BlockARMLE) v0 := b.NewValue0(v_0.Pos, OpARMTSTshiftRA, types.TypeFlags) v0.AuxInt = c - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) b.AddControl(v0) return true } @@ -21266,9 +20259,7 @@ func rewriteBlockARM(b *Block) bool { } b.Reset(BlockARMLE) v0 := b.NewValue0(v_0.Pos, OpARMTSTshiftLLreg, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - v0.AddArg(z) + v0.AddArg3(x, y, z) b.AddControl(v0) return true } @@ -21292,9 +20283,7 @@ func rewriteBlockARM(b *Block) bool { } b.Reset(BlockARMLE) v0 := b.NewValue0(v_0.Pos, OpARMTSTshiftRLreg, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - v0.AddArg(z) + v0.AddArg3(x, y, z) b.AddControl(v0) return true } @@ -21318,9 +20307,7 @@ func rewriteBlockARM(b *Block) bool { } b.Reset(BlockARMLE) v0 := b.NewValue0(v_0.Pos, OpARMTSTshiftRAreg, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - v0.AddArg(z) + v0.AddArg3(x, y, z) b.AddControl(v0) return true } @@ -21347,8 +20334,7 @@ func rewriteBlockARM(b *Block) bool { } b.Reset(BlockARMLE) v0 := b.NewValue0(v_0.Pos, OpARMTEQ, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) b.AddControl(v0) return true } @@ -21399,8 +20385,7 @@ func rewriteBlockARM(b *Block) bool { b.Reset(BlockARMLE) v0 := b.NewValue0(v_0.Pos, OpARMTEQshiftLL, types.TypeFlags) v0.AuxInt = c - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) b.AddControl(v0) return true } @@ -21425,8 +20410,7 @@ func rewriteBlockARM(b *Block) bool { b.Reset(BlockARMLE) v0 := b.NewValue0(v_0.Pos, OpARMTEQshiftRL, types.TypeFlags) v0.AuxInt = c - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) b.AddControl(v0) return true } @@ -21451,8 +20435,7 @@ func rewriteBlockARM(b *Block) bool { b.Reset(BlockARMLE) v0 := b.NewValue0(v_0.Pos, OpARMTEQshiftRA, types.TypeFlags) v0.AuxInt = c - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) b.AddControl(v0) return true } @@ -21476,9 +20459,7 @@ func rewriteBlockARM(b *Block) bool { } b.Reset(BlockARMLE) v0 := b.NewValue0(v_0.Pos, OpARMTEQshiftLLreg, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - v0.AddArg(z) + v0.AddArg3(x, y, z) b.AddControl(v0) return true } @@ -21502,9 +20483,7 @@ func rewriteBlockARM(b *Block) bool { } b.Reset(BlockARMLE) v0 := b.NewValue0(v_0.Pos, OpARMTEQshiftRLreg, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - v0.AddArg(z) + v0.AddArg3(x, y, z) b.AddControl(v0) return true } @@ -21528,9 +20507,7 @@ func rewriteBlockARM(b *Block) bool { } b.Reset(BlockARMLE) v0 := b.NewValue0(v_0.Pos, OpARMTEQshiftRAreg, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - v0.AddArg(z) + v0.AddArg3(x, y, z) b.AddControl(v0) return true } @@ -21596,8 +20573,7 @@ func rewriteBlockARM(b *Block) bool { } b.Reset(BlockARMLT) v0 := b.NewValue0(v_0.Pos, OpARMCMP, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) b.AddControl(v0) return true } @@ -21621,11 +20597,9 @@ func rewriteBlockARM(b *Block) bool { } b.Reset(BlockARMLT) v0 := b.NewValue0(v_0.Pos, OpARMCMP, types.TypeFlags) - v0.AddArg(a) v1 := b.NewValue0(v_0.Pos, OpARMMUL, x.Type) - v1.AddArg(x) - v1.AddArg(y) - v0.AddArg(v1) + v1.AddArg2(x, y) + v0.AddArg2(a, v1) b.AddControl(v0) return true } @@ -21674,8 +20648,7 @@ func rewriteBlockARM(b *Block) bool { b.Reset(BlockARMLT) v0 := b.NewValue0(v_0.Pos, OpARMCMPshiftLL, types.TypeFlags) v0.AuxInt = c - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) b.AddControl(v0) return true } @@ -21700,8 +20673,7 @@ func rewriteBlockARM(b *Block) bool { b.Reset(BlockARMLT) v0 := b.NewValue0(v_0.Pos, OpARMCMPshiftRL, types.TypeFlags) v0.AuxInt = c - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) b.AddControl(v0) return true } @@ -21726,8 +20698,7 @@ func rewriteBlockARM(b *Block) bool { b.Reset(BlockARMLT) v0 := b.NewValue0(v_0.Pos, OpARMCMPshiftRA, types.TypeFlags) v0.AuxInt = c - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) b.AddControl(v0) return true } @@ -21751,9 +20722,7 @@ func rewriteBlockARM(b *Block) bool { } b.Reset(BlockARMLT) v0 := b.NewValue0(v_0.Pos, OpARMCMPshiftLLreg, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - v0.AddArg(z) + v0.AddArg3(x, y, z) b.AddControl(v0) return true } @@ -21777,9 +20746,7 @@ func rewriteBlockARM(b *Block) bool { } b.Reset(BlockARMLT) v0 := b.NewValue0(v_0.Pos, OpARMCMPshiftRLreg, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - v0.AddArg(z) + v0.AddArg3(x, y, z) b.AddControl(v0) return true } @@ -21803,9 +20770,7 @@ func rewriteBlockARM(b *Block) bool { } b.Reset(BlockARMLT) v0 := b.NewValue0(v_0.Pos, OpARMCMPshiftRAreg, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - v0.AddArg(z) + v0.AddArg3(x, y, z) b.AddControl(v0) return true } @@ -21832,8 +20797,7 @@ func rewriteBlockARM(b *Block) bool { } b.Reset(BlockARMLT) v0 := b.NewValue0(v_0.Pos, OpARMCMN, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) b.AddControl(v0) return true } @@ -21859,11 +20823,9 @@ func rewriteBlockARM(b *Block) bool { } b.Reset(BlockARMLT) v0 := b.NewValue0(v_0.Pos, OpARMCMN, types.TypeFlags) - v0.AddArg(a) v1 := b.NewValue0(v_0.Pos, OpARMMUL, x.Type) - v1.AddArg(x) - v1.AddArg(y) - v0.AddArg(v1) + v1.AddArg2(x, y) + v0.AddArg2(a, v1) b.AddControl(v0) return true } @@ -21912,8 +20874,7 @@ func rewriteBlockARM(b *Block) bool { b.Reset(BlockARMLT) v0 := b.NewValue0(v_0.Pos, OpARMCMNshiftLL, types.TypeFlags) v0.AuxInt = c - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) b.AddControl(v0) return true } @@ -21938,8 +20899,7 @@ func rewriteBlockARM(b *Block) bool { b.Reset(BlockARMLT) v0 := b.NewValue0(v_0.Pos, OpARMCMNshiftRL, types.TypeFlags) v0.AuxInt = c - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) b.AddControl(v0) return true } @@ -21964,8 +20924,7 @@ func rewriteBlockARM(b *Block) bool { b.Reset(BlockARMLT) v0 := b.NewValue0(v_0.Pos, OpARMCMNshiftRA, types.TypeFlags) v0.AuxInt = c - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) b.AddControl(v0) return true } @@ -21989,9 +20948,7 @@ func rewriteBlockARM(b *Block) bool { } b.Reset(BlockARMLT) v0 := b.NewValue0(v_0.Pos, OpARMCMNshiftLLreg, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - v0.AddArg(z) + v0.AddArg3(x, y, z) b.AddControl(v0) return true } @@ -22015,9 +20972,7 @@ func rewriteBlockARM(b *Block) bool { } b.Reset(BlockARMLT) v0 := b.NewValue0(v_0.Pos, OpARMCMNshiftRLreg, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - v0.AddArg(z) + v0.AddArg3(x, y, z) b.AddControl(v0) return true } @@ -22041,9 +20996,7 @@ func rewriteBlockARM(b *Block) bool { } b.Reset(BlockARMLT) v0 := b.NewValue0(v_0.Pos, OpARMCMNshiftRAreg, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - v0.AddArg(z) + v0.AddArg3(x, y, z) b.AddControl(v0) return true } @@ -22070,8 +21023,7 @@ func rewriteBlockARM(b *Block) bool { } b.Reset(BlockARMLT) v0 := b.NewValue0(v_0.Pos, OpARMTST, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) b.AddControl(v0) return true } @@ -22122,8 +21074,7 @@ func rewriteBlockARM(b *Block) bool { b.Reset(BlockARMLT) v0 := b.NewValue0(v_0.Pos, OpARMTSTshiftLL, types.TypeFlags) v0.AuxInt = c - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) b.AddControl(v0) return true } @@ -22148,8 +21099,7 @@ func rewriteBlockARM(b *Block) bool { b.Reset(BlockARMLT) v0 := b.NewValue0(v_0.Pos, OpARMTSTshiftRL, types.TypeFlags) v0.AuxInt = c - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) b.AddControl(v0) return true } @@ -22174,8 +21124,7 @@ func rewriteBlockARM(b *Block) bool { b.Reset(BlockARMLT) v0 := b.NewValue0(v_0.Pos, OpARMTSTshiftRA, types.TypeFlags) v0.AuxInt = c - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) b.AddControl(v0) return true } @@ -22199,9 +21148,7 @@ func rewriteBlockARM(b *Block) bool { } b.Reset(BlockARMLT) v0 := b.NewValue0(v_0.Pos, OpARMTSTshiftLLreg, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - v0.AddArg(z) + v0.AddArg3(x, y, z) b.AddControl(v0) return true } @@ -22225,9 +21172,7 @@ func rewriteBlockARM(b *Block) bool { } b.Reset(BlockARMLT) v0 := b.NewValue0(v_0.Pos, OpARMTSTshiftRLreg, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - v0.AddArg(z) + v0.AddArg3(x, y, z) b.AddControl(v0) return true } @@ -22251,9 +21196,7 @@ func rewriteBlockARM(b *Block) bool { } b.Reset(BlockARMLT) v0 := b.NewValue0(v_0.Pos, OpARMTSTshiftRAreg, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - v0.AddArg(z) + v0.AddArg3(x, y, z) b.AddControl(v0) return true } @@ -22280,8 +21223,7 @@ func rewriteBlockARM(b *Block) bool { } b.Reset(BlockARMLT) v0 := b.NewValue0(v_0.Pos, OpARMTEQ, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) b.AddControl(v0) return true } @@ -22332,8 +21274,7 @@ func rewriteBlockARM(b *Block) bool { b.Reset(BlockARMLT) v0 := b.NewValue0(v_0.Pos, OpARMTEQshiftLL, types.TypeFlags) v0.AuxInt = c - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) b.AddControl(v0) return true } @@ -22358,8 +21299,7 @@ func rewriteBlockARM(b *Block) bool { b.Reset(BlockARMLT) v0 := b.NewValue0(v_0.Pos, OpARMTEQshiftRL, types.TypeFlags) v0.AuxInt = c - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) b.AddControl(v0) return true } @@ -22384,8 +21324,7 @@ func rewriteBlockARM(b *Block) bool { b.Reset(BlockARMLT) v0 := b.NewValue0(v_0.Pos, OpARMTEQshiftRA, types.TypeFlags) v0.AuxInt = c - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) b.AddControl(v0) return true } @@ -22409,9 +21348,7 @@ func rewriteBlockARM(b *Block) bool { } b.Reset(BlockARMLT) v0 := b.NewValue0(v_0.Pos, OpARMTEQshiftLLreg, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - v0.AddArg(z) + v0.AddArg3(x, y, z) b.AddControl(v0) return true } @@ -22435,9 +21372,7 @@ func rewriteBlockARM(b *Block) bool { } b.Reset(BlockARMLT) v0 := b.NewValue0(v_0.Pos, OpARMTEQshiftRLreg, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - v0.AddArg(z) + v0.AddArg3(x, y, z) b.AddControl(v0) return true } @@ -22461,9 +21396,7 @@ func rewriteBlockARM(b *Block) bool { } b.Reset(BlockARMLT) v0 := b.NewValue0(v_0.Pos, OpARMTEQshiftRAreg, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - v0.AddArg(z) + v0.AddArg3(x, y, z) b.AddControl(v0) return true } @@ -22687,8 +21620,7 @@ func rewriteBlockARM(b *Block) bool { } b.Reset(BlockARMNE) v0 := b.NewValue0(v_0.Pos, OpARMCMP, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) b.AddControl(v0) return true } @@ -22712,11 +21644,9 @@ func rewriteBlockARM(b *Block) bool { } b.Reset(BlockARMNE) v0 := b.NewValue0(v_0.Pos, OpARMCMP, types.TypeFlags) - v0.AddArg(a) v1 := b.NewValue0(v_0.Pos, OpARMMUL, x.Type) - v1.AddArg(x) - v1.AddArg(y) - v0.AddArg(v1) + v1.AddArg2(x, y) + v0.AddArg2(a, v1) b.AddControl(v0) return true } @@ -22765,8 +21695,7 @@ func rewriteBlockARM(b *Block) bool { b.Reset(BlockARMNE) v0 := b.NewValue0(v_0.Pos, OpARMCMPshiftLL, types.TypeFlags) v0.AuxInt = c - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) b.AddControl(v0) return true } @@ -22791,8 +21720,7 @@ func rewriteBlockARM(b *Block) bool { b.Reset(BlockARMNE) v0 := b.NewValue0(v_0.Pos, OpARMCMPshiftRL, types.TypeFlags) v0.AuxInt = c - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) b.AddControl(v0) return true } @@ -22817,8 +21745,7 @@ func rewriteBlockARM(b *Block) bool { b.Reset(BlockARMNE) v0 := b.NewValue0(v_0.Pos, OpARMCMPshiftRA, types.TypeFlags) v0.AuxInt = c - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) b.AddControl(v0) return true } @@ -22842,9 +21769,7 @@ func rewriteBlockARM(b *Block) bool { } b.Reset(BlockARMNE) v0 := b.NewValue0(v_0.Pos, OpARMCMPshiftLLreg, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - v0.AddArg(z) + v0.AddArg3(x, y, z) b.AddControl(v0) return true } @@ -22868,9 +21793,7 @@ func rewriteBlockARM(b *Block) bool { } b.Reset(BlockARMNE) v0 := b.NewValue0(v_0.Pos, OpARMCMPshiftRLreg, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - v0.AddArg(z) + v0.AddArg3(x, y, z) b.AddControl(v0) return true } @@ -22894,9 +21817,7 @@ func rewriteBlockARM(b *Block) bool { } b.Reset(BlockARMNE) v0 := b.NewValue0(v_0.Pos, OpARMCMPshiftRAreg, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - v0.AddArg(z) + v0.AddArg3(x, y, z) b.AddControl(v0) return true } @@ -22923,8 +21844,7 @@ func rewriteBlockARM(b *Block) bool { } b.Reset(BlockARMNE) v0 := b.NewValue0(v_0.Pos, OpARMCMN, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) b.AddControl(v0) return true } @@ -22950,11 +21870,9 @@ func rewriteBlockARM(b *Block) bool { } b.Reset(BlockARMNE) v0 := b.NewValue0(v_0.Pos, OpARMCMN, types.TypeFlags) - v0.AddArg(a) v1 := b.NewValue0(v_0.Pos, OpARMMUL, x.Type) - v1.AddArg(x) - v1.AddArg(y) - v0.AddArg(v1) + v1.AddArg2(x, y) + v0.AddArg2(a, v1) b.AddControl(v0) return true } @@ -23003,8 +21921,7 @@ func rewriteBlockARM(b *Block) bool { b.Reset(BlockARMNE) v0 := b.NewValue0(v_0.Pos, OpARMCMNshiftLL, types.TypeFlags) v0.AuxInt = c - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) b.AddControl(v0) return true } @@ -23029,8 +21946,7 @@ func rewriteBlockARM(b *Block) bool { b.Reset(BlockARMNE) v0 := b.NewValue0(v_0.Pos, OpARMCMNshiftRL, types.TypeFlags) v0.AuxInt = c - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) b.AddControl(v0) return true } @@ -23055,8 +21971,7 @@ func rewriteBlockARM(b *Block) bool { b.Reset(BlockARMNE) v0 := b.NewValue0(v_0.Pos, OpARMCMNshiftRA, types.TypeFlags) v0.AuxInt = c - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) b.AddControl(v0) return true } @@ -23080,9 +21995,7 @@ func rewriteBlockARM(b *Block) bool { } b.Reset(BlockARMNE) v0 := b.NewValue0(v_0.Pos, OpARMCMNshiftLLreg, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - v0.AddArg(z) + v0.AddArg3(x, y, z) b.AddControl(v0) return true } @@ -23106,9 +22019,7 @@ func rewriteBlockARM(b *Block) bool { } b.Reset(BlockARMNE) v0 := b.NewValue0(v_0.Pos, OpARMCMNshiftRLreg, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - v0.AddArg(z) + v0.AddArg3(x, y, z) b.AddControl(v0) return true } @@ -23132,9 +22043,7 @@ func rewriteBlockARM(b *Block) bool { } b.Reset(BlockARMNE) v0 := b.NewValue0(v_0.Pos, OpARMCMNshiftRAreg, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - v0.AddArg(z) + v0.AddArg3(x, y, z) b.AddControl(v0) return true } @@ -23161,8 +22070,7 @@ func rewriteBlockARM(b *Block) bool { } b.Reset(BlockARMNE) v0 := b.NewValue0(v_0.Pos, OpARMTST, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) b.AddControl(v0) return true } @@ -23213,8 +22121,7 @@ func rewriteBlockARM(b *Block) bool { b.Reset(BlockARMNE) v0 := b.NewValue0(v_0.Pos, OpARMTSTshiftLL, types.TypeFlags) v0.AuxInt = c - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) b.AddControl(v0) return true } @@ -23239,8 +22146,7 @@ func rewriteBlockARM(b *Block) bool { b.Reset(BlockARMNE) v0 := b.NewValue0(v_0.Pos, OpARMTSTshiftRL, types.TypeFlags) v0.AuxInt = c - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) b.AddControl(v0) return true } @@ -23265,8 +22171,7 @@ func rewriteBlockARM(b *Block) bool { b.Reset(BlockARMNE) v0 := b.NewValue0(v_0.Pos, OpARMTSTshiftRA, types.TypeFlags) v0.AuxInt = c - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) b.AddControl(v0) return true } @@ -23290,9 +22195,7 @@ func rewriteBlockARM(b *Block) bool { } b.Reset(BlockARMNE) v0 := b.NewValue0(v_0.Pos, OpARMTSTshiftLLreg, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - v0.AddArg(z) + v0.AddArg3(x, y, z) b.AddControl(v0) return true } @@ -23316,9 +22219,7 @@ func rewriteBlockARM(b *Block) bool { } b.Reset(BlockARMNE) v0 := b.NewValue0(v_0.Pos, OpARMTSTshiftRLreg, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - v0.AddArg(z) + v0.AddArg3(x, y, z) b.AddControl(v0) return true } @@ -23342,9 +22243,7 @@ func rewriteBlockARM(b *Block) bool { } b.Reset(BlockARMNE) v0 := b.NewValue0(v_0.Pos, OpARMTSTshiftRAreg, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - v0.AddArg(z) + v0.AddArg3(x, y, z) b.AddControl(v0) return true } @@ -23371,8 +22270,7 @@ func rewriteBlockARM(b *Block) bool { } b.Reset(BlockARMNE) v0 := b.NewValue0(v_0.Pos, OpARMTEQ, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) b.AddControl(v0) return true } @@ -23423,8 +22321,7 @@ func rewriteBlockARM(b *Block) bool { b.Reset(BlockARMNE) v0 := b.NewValue0(v_0.Pos, OpARMTEQshiftLL, types.TypeFlags) v0.AuxInt = c - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) b.AddControl(v0) return true } @@ -23449,8 +22346,7 @@ func rewriteBlockARM(b *Block) bool { b.Reset(BlockARMNE) v0 := b.NewValue0(v_0.Pos, OpARMTEQshiftRL, types.TypeFlags) v0.AuxInt = c - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) b.AddControl(v0) return true } @@ -23475,8 +22371,7 @@ func rewriteBlockARM(b *Block) bool { b.Reset(BlockARMNE) v0 := b.NewValue0(v_0.Pos, OpARMTEQshiftRA, types.TypeFlags) v0.AuxInt = c - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) b.AddControl(v0) return true } @@ -23500,9 +22395,7 @@ func rewriteBlockARM(b *Block) bool { } b.Reset(BlockARMNE) v0 := b.NewValue0(v_0.Pos, OpARMTEQshiftLLreg, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - v0.AddArg(z) + v0.AddArg3(x, y, z) b.AddControl(v0) return true } @@ -23526,9 +22419,7 @@ func rewriteBlockARM(b *Block) bool { } b.Reset(BlockARMNE) v0 := b.NewValue0(v_0.Pos, OpARMTEQshiftRLreg, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - v0.AddArg(z) + v0.AddArg3(x, y, z) b.AddControl(v0) return true } @@ -23552,9 +22443,7 @@ func rewriteBlockARM(b *Block) bool { } b.Reset(BlockARMNE) v0 := b.NewValue0(v_0.Pos, OpARMTEQshiftRAreg, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - v0.AddArg(z) + v0.AddArg3(x, y, z) b.AddControl(v0) return true } diff --git a/src/cmd/compile/internal/ssa/rewriteARM64.go b/src/cmd/compile/internal/ssa/rewriteARM64.go index 51051b93b7..dd3a8b922b 100644 --- a/src/cmd/compile/internal/ssa/rewriteARM64.go +++ b/src/cmd/compile/internal/ssa/rewriteARM64.go @@ -1094,9 +1094,7 @@ func rewriteValueARM64_OpARM64ADCSflags(v *Value) bool { } c := v_2_0_0.Args[0] v.reset(OpARM64ADCSflags) - v.AddArg(x) - v.AddArg(y) - v.AddArg(c) + v.AddArg3(x, y, c) return true } // match: (ADCSflags x y (Select1 (ADDSconstflags [-1] (MOVDconst [0])))) @@ -1116,8 +1114,7 @@ func rewriteValueARM64_OpARM64ADCSflags(v *Value) bool { break } v.reset(OpARM64ADDSflags) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } return false @@ -1159,9 +1156,7 @@ func rewriteValueARM64_OpARM64ADD(v *Value) bool { continue } v.reset(OpARM64MADD) - v.AddArg(a) - v.AddArg(x) - v.AddArg(y) + v.AddArg3(a, x, y) return true } break @@ -1182,9 +1177,7 @@ func rewriteValueARM64_OpARM64ADD(v *Value) bool { continue } v.reset(OpARM64MSUB) - v.AddArg(a) - v.AddArg(x) - v.AddArg(y) + v.AddArg3(a, x, y) return true } break @@ -1205,9 +1198,7 @@ func rewriteValueARM64_OpARM64ADD(v *Value) bool { continue } v.reset(OpARM64MADDW) - v.AddArg(a) - v.AddArg(x) - v.AddArg(y) + v.AddArg3(a, x, y) return true } break @@ -1228,9 +1219,7 @@ func rewriteValueARM64_OpARM64ADD(v *Value) bool { continue } v.reset(OpARM64MSUBW) - v.AddArg(a) - v.AddArg(x) - v.AddArg(y) + v.AddArg3(a, x, y) return true } break @@ -1245,8 +1234,7 @@ func rewriteValueARM64_OpARM64ADD(v *Value) bool { } y := v_1.Args[0] v.reset(OpARM64SUB) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } break @@ -1268,8 +1256,7 @@ func rewriteValueARM64_OpARM64ADD(v *Value) bool { } v.reset(OpARM64ADDshiftLL) v.AuxInt = c - v.AddArg(x0) - v.AddArg(y) + v.AddArg2(x0, y) return true } break @@ -1291,8 +1278,7 @@ func rewriteValueARM64_OpARM64ADD(v *Value) bool { } v.reset(OpARM64ADDshiftRL) v.AuxInt = c - v.AddArg(x0) - v.AddArg(y) + v.AddArg2(x0, y) return true } break @@ -1314,8 +1300,7 @@ func rewriteValueARM64_OpARM64ADD(v *Value) bool { } v.reset(OpARM64ADDshiftRA) v.AuxInt = c - v.AddArg(x0) - v.AddArg(y) + v.AddArg2(x0, y) return true } break @@ -1383,10 +1368,9 @@ func rewriteValueARM64_OpARM64ADD(v *Value) bool { continue } v.reset(OpARM64ROR) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpARM64NEG, t) v0.AddArg(y) - v.AddArg(v0) + v.AddArg2(x, v0) return true } break @@ -1454,8 +1438,7 @@ func rewriteValueARM64_OpARM64ADD(v *Value) bool { continue } v.reset(OpARM64ROR) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } break @@ -1524,10 +1507,9 @@ func rewriteValueARM64_OpARM64ADD(v *Value) bool { continue } v.reset(OpARM64RORW) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpARM64NEG, t) v0.AddArg(y) - v.AddArg(v0) + v.AddArg2(x, v0) return true } break @@ -1599,8 +1581,7 @@ func rewriteValueARM64_OpARM64ADD(v *Value) bool { continue } v.reset(OpARM64RORW) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } break @@ -1775,8 +1756,7 @@ func rewriteValueARM64_OpARM64ADDshiftLL(v *Value) bool { x2 := v_1 v.reset(OpARM64EXTRconst) v.AuxInt = 64 - c - v.AddArg(x2) - v.AddArg(x) + v.AddArg2(x2, x) return true } // match: (ADDshiftLL [c] (UBFX [bfc] x) x2) @@ -1796,8 +1776,7 @@ func rewriteValueARM64_OpARM64ADDshiftLL(v *Value) bool { } v.reset(OpARM64EXTRWconst) v.AuxInt = 32 - c - v.AddArg(x2) - v.AddArg(x) + v.AddArg2(x2, x) return true } return false @@ -1951,8 +1930,7 @@ func rewriteValueARM64_OpARM64AND(v *Value) bool { } y := v_1.Args[0] v.reset(OpARM64BIC) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } break @@ -1974,8 +1952,7 @@ func rewriteValueARM64_OpARM64AND(v *Value) bool { } v.reset(OpARM64ANDshiftLL) v.AuxInt = c - v.AddArg(x0) - v.AddArg(y) + v.AddArg2(x0, y) return true } break @@ -1997,8 +1974,7 @@ func rewriteValueARM64_OpARM64AND(v *Value) bool { } v.reset(OpARM64ANDshiftRL) v.AuxInt = c - v.AddArg(x0) - v.AddArg(y) + v.AddArg2(x0, y) return true } break @@ -2020,8 +1996,7 @@ func rewriteValueARM64_OpARM64AND(v *Value) bool { } v.reset(OpARM64ANDshiftRA) v.AuxInt = c - v.AddArg(x0) - v.AddArg(y) + v.AddArg2(x0, y) return true } break @@ -2366,8 +2341,7 @@ func rewriteValueARM64_OpARM64BIC(v *Value) bool { } v.reset(OpARM64BICshiftLL) v.AuxInt = c - v.AddArg(x0) - v.AddArg(y) + v.AddArg2(x0, y) return true } // match: (BIC x0 x1:(SRLconst [c] y)) @@ -2386,8 +2360,7 @@ func rewriteValueARM64_OpARM64BIC(v *Value) bool { } v.reset(OpARM64BICshiftRL) v.AuxInt = c - v.AddArg(x0) - v.AddArg(y) + v.AddArg2(x0, y) return true } // match: (BIC x0 x1:(SRAconst [c] y)) @@ -2406,8 +2379,7 @@ func rewriteValueARM64_OpARM64BIC(v *Value) bool { } v.reset(OpARM64BICshiftRA) v.AuxInt = c - v.AddArg(x0) - v.AddArg(y) + v.AddArg2(x0, y) return true } return false @@ -2556,8 +2528,7 @@ func rewriteValueARM64_OpARM64CMN(v *Value) bool { } v.reset(OpARM64CMNshiftLL) v.AuxInt = c - v.AddArg(x0) - v.AddArg(y) + v.AddArg2(x0, y) return true } break @@ -2579,8 +2550,7 @@ func rewriteValueARM64_OpARM64CMN(v *Value) bool { } v.reset(OpARM64CMNshiftRL) v.AuxInt = c - v.AddArg(x0) - v.AddArg(y) + v.AddArg2(x0, y) return true } break @@ -2602,8 +2572,7 @@ func rewriteValueARM64_OpARM64CMN(v *Value) bool { } v.reset(OpARM64CMNshiftRA) v.AuxInt = c - v.AddArg(x0) - v.AddArg(y) + v.AddArg2(x0, y) return true } break @@ -2943,8 +2912,7 @@ func rewriteValueARM64_OpARM64CMP(v *Value) bool { } v.reset(OpARM64InvertFlags) v0 := b.NewValue0(v.Pos, OpARM64CMP, types.TypeFlags) - v0.AddArg(y) - v0.AddArg(x) + v0.AddArg2(y, x) v.AddArg(v0) return true } @@ -2964,8 +2932,7 @@ func rewriteValueARM64_OpARM64CMP(v *Value) bool { } v.reset(OpARM64CMPshiftLL) v.AuxInt = c - v.AddArg(x0) - v.AddArg(y) + v.AddArg2(x0, y) return true } // match: (CMP x0:(SLLconst [c] y) x1) @@ -2985,8 +2952,7 @@ func rewriteValueARM64_OpARM64CMP(v *Value) bool { v.reset(OpARM64InvertFlags) v0 := b.NewValue0(v.Pos, OpARM64CMPshiftLL, types.TypeFlags) v0.AuxInt = c - v0.AddArg(x1) - v0.AddArg(y) + v0.AddArg2(x1, y) v.AddArg(v0) return true } @@ -3006,8 +2972,7 @@ func rewriteValueARM64_OpARM64CMP(v *Value) bool { } v.reset(OpARM64CMPshiftRL) v.AuxInt = c - v.AddArg(x0) - v.AddArg(y) + v.AddArg2(x0, y) return true } // match: (CMP x0:(SRLconst [c] y) x1) @@ -3027,8 +2992,7 @@ func rewriteValueARM64_OpARM64CMP(v *Value) bool { v.reset(OpARM64InvertFlags) v0 := b.NewValue0(v.Pos, OpARM64CMPshiftRL, types.TypeFlags) v0.AuxInt = c - v0.AddArg(x1) - v0.AddArg(y) + v0.AddArg2(x1, y) v.AddArg(v0) return true } @@ -3048,8 +3012,7 @@ func rewriteValueARM64_OpARM64CMP(v *Value) bool { } v.reset(OpARM64CMPshiftRA) v.AuxInt = c - v.AddArg(x0) - v.AddArg(y) + v.AddArg2(x0, y) return true } // match: (CMP x0:(SRAconst [c] y) x1) @@ -3069,8 +3032,7 @@ func rewriteValueARM64_OpARM64CMP(v *Value) bool { v.reset(OpARM64InvertFlags) v0 := b.NewValue0(v.Pos, OpARM64CMPshiftRA, types.TypeFlags) v0.AuxInt = c - v0.AddArg(x1) - v0.AddArg(y) + v0.AddArg2(x1, y) v.AddArg(v0) return true } @@ -3119,8 +3081,7 @@ func rewriteValueARM64_OpARM64CMPW(v *Value) bool { } v.reset(OpARM64InvertFlags) v0 := b.NewValue0(v.Pos, OpARM64CMPW, types.TypeFlags) - v0.AddArg(y) - v0.AddArg(x) + v0.AddArg2(y, x) v.AddArg(v0) return true } @@ -3501,8 +3462,7 @@ func rewriteValueARM64_OpARM64CSEL(v *Value) bool { flag := v_2 v.reset(OpARM64CSEL0) v.Aux = cc - v.AddArg(x) - v.AddArg(flag) + v.AddArg2(x, flag) return true } // match: (CSEL {cc} (MOVDconst [0]) y flag) @@ -3516,8 +3476,7 @@ func rewriteValueARM64_OpARM64CSEL(v *Value) bool { flag := v_2 v.reset(OpARM64CSEL0) v.Aux = arm64Negate(cc.(Op)) - v.AddArg(y) - v.AddArg(flag) + v.AddArg2(y, flag) return true } // match: (CSEL {cc} x y (InvertFlags cmp)) @@ -3532,9 +3491,7 @@ func rewriteValueARM64_OpARM64CSEL(v *Value) bool { cmp := v_2.Args[0] v.reset(OpARM64CSEL) v.Aux = arm64Invert(cc.(Op)) - v.AddArg(x) - v.AddArg(y) - v.AddArg(cmp) + v.AddArg3(x, y, cmp) return true } // match: (CSEL {cc} x _ flag) @@ -3583,9 +3540,7 @@ func rewriteValueARM64_OpARM64CSEL(v *Value) bool { } v.reset(OpARM64CSEL) v.Aux = boolval.Op - v.AddArg(x) - v.AddArg(y) - v.AddArg(flagArg(boolval)) + v.AddArg3(x, y, flagArg(boolval)) return true } // match: (CSEL {cc} x y (CMPWconst [0] boolval)) @@ -3604,9 +3559,7 @@ func rewriteValueARM64_OpARM64CSEL(v *Value) bool { } v.reset(OpARM64CSEL) v.Aux = arm64Negate(boolval.Op) - v.AddArg(x) - v.AddArg(y) - v.AddArg(flagArg(boolval)) + v.AddArg3(x, y, flagArg(boolval)) return true } return false @@ -3625,8 +3578,7 @@ func rewriteValueARM64_OpARM64CSEL0(v *Value) bool { cmp := v_1.Args[0] v.reset(OpARM64CSEL0) v.Aux = arm64Invert(cc.(Op)) - v.AddArg(x) - v.AddArg(cmp) + v.AddArg2(x, cmp) return true } // match: (CSEL0 {cc} x flag) @@ -3672,8 +3624,7 @@ func rewriteValueARM64_OpARM64CSEL0(v *Value) bool { } v.reset(OpARM64CSEL0) v.Aux = boolval.Op - v.AddArg(x) - v.AddArg(flagArg(boolval)) + v.AddArg2(x, flagArg(boolval)) return true } // match: (CSEL0 {cc} x (CMPWconst [0] boolval)) @@ -3691,8 +3642,7 @@ func rewriteValueARM64_OpARM64CSEL0(v *Value) bool { } v.reset(OpARM64CSEL0) v.Aux = arm64Negate(boolval.Op) - v.AddArg(x) - v.AddArg(flagArg(boolval)) + v.AddArg2(x, flagArg(boolval)) return true } return false @@ -3780,8 +3730,7 @@ func rewriteValueARM64_OpARM64EON(v *Value) bool { } v.reset(OpARM64EONshiftLL) v.AuxInt = c - v.AddArg(x0) - v.AddArg(y) + v.AddArg2(x0, y) return true } // match: (EON x0 x1:(SRLconst [c] y)) @@ -3800,8 +3749,7 @@ func rewriteValueARM64_OpARM64EON(v *Value) bool { } v.reset(OpARM64EONshiftRL) v.AuxInt = c - v.AddArg(x0) - v.AddArg(y) + v.AddArg2(x0, y) return true } // match: (EON x0 x1:(SRAconst [c] y)) @@ -3820,8 +3768,7 @@ func rewriteValueARM64_OpARM64EON(v *Value) bool { } v.reset(OpARM64EONshiftRA) v.AuxInt = c - v.AddArg(x0) - v.AddArg(y) + v.AddArg2(x0, y) return true } return false @@ -4013,9 +3960,7 @@ func rewriteValueARM64_OpARM64FADDD(v *Value) bool { y := v_1.Args[1] x := v_1.Args[0] v.reset(OpARM64FMADDD) - v.AddArg(a) - v.AddArg(x) - v.AddArg(y) + v.AddArg3(a, x, y) return true } break @@ -4031,9 +3976,7 @@ func rewriteValueARM64_OpARM64FADDD(v *Value) bool { y := v_1.Args[1] x := v_1.Args[0] v.reset(OpARM64FMSUBD) - v.AddArg(a) - v.AddArg(x) - v.AddArg(y) + v.AddArg3(a, x, y) return true } break @@ -4054,9 +3997,7 @@ func rewriteValueARM64_OpARM64FADDS(v *Value) bool { y := v_1.Args[1] x := v_1.Args[0] v.reset(OpARM64FMADDS) - v.AddArg(a) - v.AddArg(x) - v.AddArg(y) + v.AddArg3(a, x, y) return true } break @@ -4072,9 +4013,7 @@ func rewriteValueARM64_OpARM64FADDS(v *Value) bool { y := v_1.Args[1] x := v_1.Args[0] v.reset(OpARM64FMSUBS) - v.AddArg(a) - v.AddArg(x) - v.AddArg(y) + v.AddArg3(a, x, y) return true } break @@ -4226,8 +4165,7 @@ func rewriteValueARM64_OpARM64FMOVDload(v *Value) bool { v.reset(OpARM64FMOVDload) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (FMOVDload [off] {sym} (ADD ptr idx) mem) @@ -4246,9 +4184,7 @@ func rewriteValueARM64_OpARM64FMOVDload(v *Value) bool { break } v.reset(OpARM64FMOVDloadidx) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } // match: (FMOVDload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem) @@ -4270,8 +4206,7 @@ func rewriteValueARM64_OpARM64FMOVDload(v *Value) bool { v.reset(OpARM64FMOVDload) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } return false @@ -4291,8 +4226,7 @@ func rewriteValueARM64_OpARM64FMOVDloadidx(v *Value) bool { mem := v_2 v.reset(OpARM64FMOVDload) v.AuxInt = c - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (FMOVDloadidx (MOVDconst [c]) ptr mem) @@ -4306,8 +4240,7 @@ func rewriteValueARM64_OpARM64FMOVDloadidx(v *Value) bool { mem := v_2 v.reset(OpARM64FMOVDload) v.AuxInt = c - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } return false @@ -4332,9 +4265,7 @@ func rewriteValueARM64_OpARM64FMOVDstore(v *Value) bool { v.reset(OpARM64MOVDstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } // match: (FMOVDstore [off1] {sym} (ADDconst [off2] ptr) val mem) @@ -4356,9 +4287,7 @@ func rewriteValueARM64_OpARM64FMOVDstore(v *Value) bool { v.reset(OpARM64FMOVDstore) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } // match: (FMOVDstore [off] {sym} (ADD ptr idx) val mem) @@ -4378,10 +4307,7 @@ func rewriteValueARM64_OpARM64FMOVDstore(v *Value) bool { break } v.reset(OpARM64FMOVDstoreidx) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) + v.AddArg4(ptr, idx, val, mem) return true } // match: (FMOVDstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem) @@ -4404,9 +4330,7 @@ func rewriteValueARM64_OpARM64FMOVDstore(v *Value) bool { v.reset(OpARM64FMOVDstore) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } return false @@ -4428,9 +4352,7 @@ func rewriteValueARM64_OpARM64FMOVDstoreidx(v *Value) bool { mem := v_3 v.reset(OpARM64FMOVDstore) v.AuxInt = c - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } // match: (FMOVDstoreidx (MOVDconst [c]) idx val mem) @@ -4445,9 +4367,7 @@ func rewriteValueARM64_OpARM64FMOVDstoreidx(v *Value) bool { mem := v_3 v.reset(OpARM64FMOVDstore) v.AuxInt = c - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(idx, val, mem) return true } return false @@ -4493,8 +4413,7 @@ func rewriteValueARM64_OpARM64FMOVSload(v *Value) bool { v.reset(OpARM64FMOVSload) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (FMOVSload [off] {sym} (ADD ptr idx) mem) @@ -4513,9 +4432,7 @@ func rewriteValueARM64_OpARM64FMOVSload(v *Value) bool { break } v.reset(OpARM64FMOVSloadidx) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } // match: (FMOVSload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem) @@ -4537,8 +4454,7 @@ func rewriteValueARM64_OpARM64FMOVSload(v *Value) bool { v.reset(OpARM64FMOVSload) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } return false @@ -4558,8 +4474,7 @@ func rewriteValueARM64_OpARM64FMOVSloadidx(v *Value) bool { mem := v_2 v.reset(OpARM64FMOVSload) v.AuxInt = c - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (FMOVSloadidx (MOVDconst [c]) ptr mem) @@ -4573,8 +4488,7 @@ func rewriteValueARM64_OpARM64FMOVSloadidx(v *Value) bool { mem := v_2 v.reset(OpARM64FMOVSload) v.AuxInt = c - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } return false @@ -4599,9 +4513,7 @@ func rewriteValueARM64_OpARM64FMOVSstore(v *Value) bool { v.reset(OpARM64MOVWstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } // match: (FMOVSstore [off1] {sym} (ADDconst [off2] ptr) val mem) @@ -4623,9 +4535,7 @@ func rewriteValueARM64_OpARM64FMOVSstore(v *Value) bool { v.reset(OpARM64FMOVSstore) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } // match: (FMOVSstore [off] {sym} (ADD ptr idx) val mem) @@ -4645,10 +4555,7 @@ func rewriteValueARM64_OpARM64FMOVSstore(v *Value) bool { break } v.reset(OpARM64FMOVSstoreidx) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) + v.AddArg4(ptr, idx, val, mem) return true } // match: (FMOVSstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem) @@ -4671,9 +4578,7 @@ func rewriteValueARM64_OpARM64FMOVSstore(v *Value) bool { v.reset(OpARM64FMOVSstore) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } return false @@ -4695,9 +4600,7 @@ func rewriteValueARM64_OpARM64FMOVSstoreidx(v *Value) bool { mem := v_3 v.reset(OpARM64FMOVSstore) v.AuxInt = c - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } // match: (FMOVSstoreidx (MOVDconst [c]) idx val mem) @@ -4712,9 +4615,7 @@ func rewriteValueARM64_OpARM64FMOVSstoreidx(v *Value) bool { mem := v_3 v.reset(OpARM64FMOVSstore) v.AuxInt = c - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(idx, val, mem) return true } return false @@ -4732,8 +4633,7 @@ func rewriteValueARM64_OpARM64FMULD(v *Value) bool { x := v_0.Args[0] y := v_1 v.reset(OpARM64FNMULD) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } break @@ -4753,8 +4653,7 @@ func rewriteValueARM64_OpARM64FMULS(v *Value) bool { x := v_0.Args[0] y := v_1 v.reset(OpARM64FNMULS) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } break @@ -4772,8 +4671,7 @@ func rewriteValueARM64_OpARM64FNEGD(v *Value) bool { y := v_0.Args[1] x := v_0.Args[0] v.reset(OpARM64FNMULD) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (FNEGD (FNMULD x y)) @@ -4785,8 +4683,7 @@ func rewriteValueARM64_OpARM64FNEGD(v *Value) bool { y := v_0.Args[1] x := v_0.Args[0] v.reset(OpARM64FMULD) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } return false @@ -4802,8 +4699,7 @@ func rewriteValueARM64_OpARM64FNEGS(v *Value) bool { y := v_0.Args[1] x := v_0.Args[0] v.reset(OpARM64FNMULS) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (FNEGS (FNMULS x y)) @@ -4815,8 +4711,7 @@ func rewriteValueARM64_OpARM64FNEGS(v *Value) bool { y := v_0.Args[1] x := v_0.Args[0] v.reset(OpARM64FMULS) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } return false @@ -4834,8 +4729,7 @@ func rewriteValueARM64_OpARM64FNMULD(v *Value) bool { x := v_0.Args[0] y := v_1 v.reset(OpARM64FMULD) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } break @@ -4855,8 +4749,7 @@ func rewriteValueARM64_OpARM64FNMULS(v *Value) bool { x := v_0.Args[0] y := v_1 v.reset(OpARM64FMULS) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } break @@ -4876,9 +4769,7 @@ func rewriteValueARM64_OpARM64FSUBD(v *Value) bool { y := v_1.Args[1] x := v_1.Args[0] v.reset(OpARM64FMSUBD) - v.AddArg(a) - v.AddArg(x) - v.AddArg(y) + v.AddArg3(a, x, y) return true } // match: (FSUBD (FMULD x y) a) @@ -4891,9 +4782,7 @@ func rewriteValueARM64_OpARM64FSUBD(v *Value) bool { x := v_0.Args[0] a := v_1 v.reset(OpARM64FNMSUBD) - v.AddArg(a) - v.AddArg(x) - v.AddArg(y) + v.AddArg3(a, x, y) return true } // match: (FSUBD a (FNMULD x y)) @@ -4906,9 +4795,7 @@ func rewriteValueARM64_OpARM64FSUBD(v *Value) bool { y := v_1.Args[1] x := v_1.Args[0] v.reset(OpARM64FMADDD) - v.AddArg(a) - v.AddArg(x) - v.AddArg(y) + v.AddArg3(a, x, y) return true } // match: (FSUBD (FNMULD x y) a) @@ -4921,9 +4808,7 @@ func rewriteValueARM64_OpARM64FSUBD(v *Value) bool { x := v_0.Args[0] a := v_1 v.reset(OpARM64FNMADDD) - v.AddArg(a) - v.AddArg(x) - v.AddArg(y) + v.AddArg3(a, x, y) return true } return false @@ -4941,9 +4826,7 @@ func rewriteValueARM64_OpARM64FSUBS(v *Value) bool { y := v_1.Args[1] x := v_1.Args[0] v.reset(OpARM64FMSUBS) - v.AddArg(a) - v.AddArg(x) - v.AddArg(y) + v.AddArg3(a, x, y) return true } // match: (FSUBS (FMULS x y) a) @@ -4956,9 +4839,7 @@ func rewriteValueARM64_OpARM64FSUBS(v *Value) bool { x := v_0.Args[0] a := v_1 v.reset(OpARM64FNMSUBS) - v.AddArg(a) - v.AddArg(x) - v.AddArg(y) + v.AddArg3(a, x, y) return true } // match: (FSUBS a (FNMULS x y)) @@ -4971,9 +4852,7 @@ func rewriteValueARM64_OpARM64FSUBS(v *Value) bool { y := v_1.Args[1] x := v_1.Args[0] v.reset(OpARM64FMADDS) - v.AddArg(a) - v.AddArg(x) - v.AddArg(y) + v.AddArg3(a, x, y) return true } // match: (FSUBS (FNMULS x y) a) @@ -4986,9 +4865,7 @@ func rewriteValueARM64_OpARM64FSUBS(v *Value) bool { x := v_0.Args[0] a := v_1 v.reset(OpARM64FNMADDS) - v.AddArg(a) - v.AddArg(x) - v.AddArg(y) + v.AddArg3(a, x, y) return true } return false @@ -5587,8 +5464,7 @@ func rewriteValueARM64_OpARM64MADD(v *Value) bool { break } v.reset(OpARM64SUB) - v.AddArg(a) - v.AddArg(x) + v.AddArg2(a, x) return true } // match: (MADD a _ (MOVDconst [0])) @@ -5612,8 +5488,7 @@ func rewriteValueARM64_OpARM64MADD(v *Value) bool { break } v.reset(OpARM64ADD) - v.AddArg(a) - v.AddArg(x) + v.AddArg2(a, x) return true } // match: (MADD a x (MOVDconst [c])) @@ -5631,8 +5506,7 @@ func rewriteValueARM64_OpARM64MADD(v *Value) bool { } v.reset(OpARM64ADDshiftLL) v.AuxInt = log2(c) - v.AddArg(a) - v.AddArg(x) + v.AddArg2(a, x) return true } // match: (MADD a x (MOVDconst [c])) @@ -5649,12 +5523,10 @@ func rewriteValueARM64_OpARM64MADD(v *Value) bool { break } v.reset(OpARM64ADD) - v.AddArg(a) v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) v0.AuxInt = log2(c - 1) - v0.AddArg(x) - v0.AddArg(x) - v.AddArg(v0) + v0.AddArg2(x, x) + v.AddArg2(a, v0) return true } // match: (MADD a x (MOVDconst [c])) @@ -5671,12 +5543,10 @@ func rewriteValueARM64_OpARM64MADD(v *Value) bool { break } v.reset(OpARM64SUB) - v.AddArg(a) v0 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type) v0.AuxInt = log2(c + 1) - v0.AddArg(x) - v0.AddArg(x) - v.AddArg(v0) + v0.AddArg2(x, x) + v.AddArg2(a, v0) return true } // match: (MADD a x (MOVDconst [c])) @@ -5694,12 +5564,10 @@ func rewriteValueARM64_OpARM64MADD(v *Value) bool { } v.reset(OpARM64SUBshiftLL) v.AuxInt = log2(c / 3) - v.AddArg(a) v0 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type) v0.AuxInt = 2 - v0.AddArg(x) - v0.AddArg(x) - v.AddArg(v0) + v0.AddArg2(x, x) + v.AddArg2(a, v0) return true } // match: (MADD a x (MOVDconst [c])) @@ -5717,12 +5585,10 @@ func rewriteValueARM64_OpARM64MADD(v *Value) bool { } v.reset(OpARM64ADDshiftLL) v.AuxInt = log2(c / 5) - v.AddArg(a) v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) v0.AuxInt = 2 - v0.AddArg(x) - v0.AddArg(x) - v.AddArg(v0) + v0.AddArg2(x, x) + v.AddArg2(a, v0) return true } // match: (MADD a x (MOVDconst [c])) @@ -5740,12 +5606,10 @@ func rewriteValueARM64_OpARM64MADD(v *Value) bool { } v.reset(OpARM64SUBshiftLL) v.AuxInt = log2(c / 7) - v.AddArg(a) v0 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type) v0.AuxInt = 3 - v0.AddArg(x) - v0.AddArg(x) - v.AddArg(v0) + v0.AddArg2(x, x) + v.AddArg2(a, v0) return true } // match: (MADD a x (MOVDconst [c])) @@ -5763,12 +5627,10 @@ func rewriteValueARM64_OpARM64MADD(v *Value) bool { } v.reset(OpARM64ADDshiftLL) v.AuxInt = log2(c / 9) - v.AddArg(a) v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) v0.AuxInt = 3 - v0.AddArg(x) - v0.AddArg(x) - v.AddArg(v0) + v0.AddArg2(x, x) + v.AddArg2(a, v0) return true } // match: (MADD a (MOVDconst [-1]) x) @@ -5780,8 +5642,7 @@ func rewriteValueARM64_OpARM64MADD(v *Value) bool { } x := v_2 v.reset(OpARM64SUB) - v.AddArg(a) - v.AddArg(x) + v.AddArg2(a, x) return true } // match: (MADD a (MOVDconst [0]) _) @@ -5805,8 +5666,7 @@ func rewriteValueARM64_OpARM64MADD(v *Value) bool { } x := v_2 v.reset(OpARM64ADD) - v.AddArg(a) - v.AddArg(x) + v.AddArg2(a, x) return true } // match: (MADD a (MOVDconst [c]) x) @@ -5824,8 +5684,7 @@ func rewriteValueARM64_OpARM64MADD(v *Value) bool { } v.reset(OpARM64ADDshiftLL) v.AuxInt = log2(c) - v.AddArg(a) - v.AddArg(x) + v.AddArg2(a, x) return true } // match: (MADD a (MOVDconst [c]) x) @@ -5842,12 +5701,10 @@ func rewriteValueARM64_OpARM64MADD(v *Value) bool { break } v.reset(OpARM64ADD) - v.AddArg(a) v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) v0.AuxInt = log2(c - 1) - v0.AddArg(x) - v0.AddArg(x) - v.AddArg(v0) + v0.AddArg2(x, x) + v.AddArg2(a, v0) return true } // match: (MADD a (MOVDconst [c]) x) @@ -5864,12 +5721,10 @@ func rewriteValueARM64_OpARM64MADD(v *Value) bool { break } v.reset(OpARM64SUB) - v.AddArg(a) v0 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type) v0.AuxInt = log2(c + 1) - v0.AddArg(x) - v0.AddArg(x) - v.AddArg(v0) + v0.AddArg2(x, x) + v.AddArg2(a, v0) return true } // match: (MADD a (MOVDconst [c]) x) @@ -5887,12 +5742,10 @@ func rewriteValueARM64_OpARM64MADD(v *Value) bool { } v.reset(OpARM64SUBshiftLL) v.AuxInt = log2(c / 3) - v.AddArg(a) v0 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type) v0.AuxInt = 2 - v0.AddArg(x) - v0.AddArg(x) - v.AddArg(v0) + v0.AddArg2(x, x) + v.AddArg2(a, v0) return true } // match: (MADD a (MOVDconst [c]) x) @@ -5910,12 +5763,10 @@ func rewriteValueARM64_OpARM64MADD(v *Value) bool { } v.reset(OpARM64ADDshiftLL) v.AuxInt = log2(c / 5) - v.AddArg(a) v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) v0.AuxInt = 2 - v0.AddArg(x) - v0.AddArg(x) - v.AddArg(v0) + v0.AddArg2(x, x) + v.AddArg2(a, v0) return true } // match: (MADD a (MOVDconst [c]) x) @@ -5933,12 +5784,10 @@ func rewriteValueARM64_OpARM64MADD(v *Value) bool { } v.reset(OpARM64SUBshiftLL) v.AuxInt = log2(c / 7) - v.AddArg(a) v0 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type) v0.AuxInt = 3 - v0.AddArg(x) - v0.AddArg(x) - v.AddArg(v0) + v0.AddArg2(x, x) + v.AddArg2(a, v0) return true } // match: (MADD a (MOVDconst [c]) x) @@ -5956,12 +5805,10 @@ func rewriteValueARM64_OpARM64MADD(v *Value) bool { } v.reset(OpARM64ADDshiftLL) v.AuxInt = log2(c / 9) - v.AddArg(a) v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) v0.AuxInt = 3 - v0.AddArg(x) - v0.AddArg(x) - v.AddArg(v0) + v0.AddArg2(x, x) + v.AddArg2(a, v0) return true } // match: (MADD (MOVDconst [c]) x y) @@ -5976,8 +5823,7 @@ func rewriteValueARM64_OpARM64MADD(v *Value) bool { v.reset(OpARM64ADDconst) v.AuxInt = c v0 := b.NewValue0(v.Pos, OpARM64MUL, x.Type) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -6019,8 +5865,7 @@ func rewriteValueARM64_OpARM64MADDW(v *Value) bool { break } v.reset(OpARM64SUB) - v.AddArg(a) - v.AddArg(x) + v.AddArg2(a, x) return true } // match: (MADDW a _ (MOVDconst [c])) @@ -6054,8 +5899,7 @@ func rewriteValueARM64_OpARM64MADDW(v *Value) bool { break } v.reset(OpARM64ADD) - v.AddArg(a) - v.AddArg(x) + v.AddArg2(a, x) return true } // match: (MADDW a x (MOVDconst [c])) @@ -6073,8 +5917,7 @@ func rewriteValueARM64_OpARM64MADDW(v *Value) bool { } v.reset(OpARM64ADDshiftLL) v.AuxInt = log2(c) - v.AddArg(a) - v.AddArg(x) + v.AddArg2(a, x) return true } // match: (MADDW a x (MOVDconst [c])) @@ -6091,12 +5934,10 @@ func rewriteValueARM64_OpARM64MADDW(v *Value) bool { break } v.reset(OpARM64ADD) - v.AddArg(a) v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) v0.AuxInt = log2(c - 1) - v0.AddArg(x) - v0.AddArg(x) - v.AddArg(v0) + v0.AddArg2(x, x) + v.AddArg2(a, v0) return true } // match: (MADDW a x (MOVDconst [c])) @@ -6113,12 +5954,10 @@ func rewriteValueARM64_OpARM64MADDW(v *Value) bool { break } v.reset(OpARM64SUB) - v.AddArg(a) v0 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type) v0.AuxInt = log2(c + 1) - v0.AddArg(x) - v0.AddArg(x) - v.AddArg(v0) + v0.AddArg2(x, x) + v.AddArg2(a, v0) return true } // match: (MADDW a x (MOVDconst [c])) @@ -6136,12 +5975,10 @@ func rewriteValueARM64_OpARM64MADDW(v *Value) bool { } v.reset(OpARM64SUBshiftLL) v.AuxInt = log2(c / 3) - v.AddArg(a) v0 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type) v0.AuxInt = 2 - v0.AddArg(x) - v0.AddArg(x) - v.AddArg(v0) + v0.AddArg2(x, x) + v.AddArg2(a, v0) return true } // match: (MADDW a x (MOVDconst [c])) @@ -6159,12 +5996,10 @@ func rewriteValueARM64_OpARM64MADDW(v *Value) bool { } v.reset(OpARM64ADDshiftLL) v.AuxInt = log2(c / 5) - v.AddArg(a) v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) v0.AuxInt = 2 - v0.AddArg(x) - v0.AddArg(x) - v.AddArg(v0) + v0.AddArg2(x, x) + v.AddArg2(a, v0) return true } // match: (MADDW a x (MOVDconst [c])) @@ -6182,12 +6017,10 @@ func rewriteValueARM64_OpARM64MADDW(v *Value) bool { } v.reset(OpARM64SUBshiftLL) v.AuxInt = log2(c / 7) - v.AddArg(a) v0 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type) v0.AuxInt = 3 - v0.AddArg(x) - v0.AddArg(x) - v.AddArg(v0) + v0.AddArg2(x, x) + v.AddArg2(a, v0) return true } // match: (MADDW a x (MOVDconst [c])) @@ -6205,12 +6038,10 @@ func rewriteValueARM64_OpARM64MADDW(v *Value) bool { } v.reset(OpARM64ADDshiftLL) v.AuxInt = log2(c / 9) - v.AddArg(a) v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) v0.AuxInt = 3 - v0.AddArg(x) - v0.AddArg(x) - v.AddArg(v0) + v0.AddArg2(x, x) + v.AddArg2(a, v0) return true } // match: (MADDW a (MOVDconst [c]) x) @@ -6227,8 +6058,7 @@ func rewriteValueARM64_OpARM64MADDW(v *Value) bool { break } v.reset(OpARM64SUB) - v.AddArg(a) - v.AddArg(x) + v.AddArg2(a, x) return true } // match: (MADDW a (MOVDconst [c]) _) @@ -6262,8 +6092,7 @@ func rewriteValueARM64_OpARM64MADDW(v *Value) bool { break } v.reset(OpARM64ADD) - v.AddArg(a) - v.AddArg(x) + v.AddArg2(a, x) return true } // match: (MADDW a (MOVDconst [c]) x) @@ -6281,8 +6110,7 @@ func rewriteValueARM64_OpARM64MADDW(v *Value) bool { } v.reset(OpARM64ADDshiftLL) v.AuxInt = log2(c) - v.AddArg(a) - v.AddArg(x) + v.AddArg2(a, x) return true } // match: (MADDW a (MOVDconst [c]) x) @@ -6299,12 +6127,10 @@ func rewriteValueARM64_OpARM64MADDW(v *Value) bool { break } v.reset(OpARM64ADD) - v.AddArg(a) v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) v0.AuxInt = log2(c - 1) - v0.AddArg(x) - v0.AddArg(x) - v.AddArg(v0) + v0.AddArg2(x, x) + v.AddArg2(a, v0) return true } // match: (MADDW a (MOVDconst [c]) x) @@ -6321,12 +6147,10 @@ func rewriteValueARM64_OpARM64MADDW(v *Value) bool { break } v.reset(OpARM64SUB) - v.AddArg(a) v0 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type) v0.AuxInt = log2(c + 1) - v0.AddArg(x) - v0.AddArg(x) - v.AddArg(v0) + v0.AddArg2(x, x) + v.AddArg2(a, v0) return true } // match: (MADDW a (MOVDconst [c]) x) @@ -6344,12 +6168,10 @@ func rewriteValueARM64_OpARM64MADDW(v *Value) bool { } v.reset(OpARM64SUBshiftLL) v.AuxInt = log2(c / 3) - v.AddArg(a) v0 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type) v0.AuxInt = 2 - v0.AddArg(x) - v0.AddArg(x) - v.AddArg(v0) + v0.AddArg2(x, x) + v.AddArg2(a, v0) return true } // match: (MADDW a (MOVDconst [c]) x) @@ -6367,12 +6189,10 @@ func rewriteValueARM64_OpARM64MADDW(v *Value) bool { } v.reset(OpARM64ADDshiftLL) v.AuxInt = log2(c / 5) - v.AddArg(a) v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) v0.AuxInt = 2 - v0.AddArg(x) - v0.AddArg(x) - v.AddArg(v0) + v0.AddArg2(x, x) + v.AddArg2(a, v0) return true } // match: (MADDW a (MOVDconst [c]) x) @@ -6390,12 +6210,10 @@ func rewriteValueARM64_OpARM64MADDW(v *Value) bool { } v.reset(OpARM64SUBshiftLL) v.AuxInt = log2(c / 7) - v.AddArg(a) v0 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type) v0.AuxInt = 3 - v0.AddArg(x) - v0.AddArg(x) - v.AddArg(v0) + v0.AddArg2(x, x) + v.AddArg2(a, v0) return true } // match: (MADDW a (MOVDconst [c]) x) @@ -6413,12 +6231,10 @@ func rewriteValueARM64_OpARM64MADDW(v *Value) bool { } v.reset(OpARM64ADDshiftLL) v.AuxInt = log2(c / 9) - v.AddArg(a) v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) v0.AuxInt = 3 - v0.AddArg(x) - v0.AddArg(x) - v.AddArg(v0) + v0.AddArg2(x, x) + v.AddArg2(a, v0) return true } // match: (MADDW (MOVDconst [c]) x y) @@ -6433,8 +6249,7 @@ func rewriteValueARM64_OpARM64MADDW(v *Value) bool { v.reset(OpARM64ADDconst) v.AuxInt = c v0 := b.NewValue0(v.Pos, OpARM64MULW, x.Type) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -6541,8 +6356,7 @@ func rewriteValueARM64_OpARM64MNEG(v *Value) bool { v.reset(OpARM64NEG) v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) v0.AuxInt = log2(c - 1) - v0.AddArg(x) - v0.AddArg(x) + v0.AddArg2(x, x) v.AddArg(v0) return true } @@ -6566,8 +6380,7 @@ func rewriteValueARM64_OpARM64MNEG(v *Value) bool { v0.AuxInt = log2(c + 1) v1 := b.NewValue0(v.Pos, OpARM64NEG, x.Type) v1.AddArg(x) - v0.AddArg(v1) - v0.AddArg(x) + v0.AddArg2(v1, x) v.AddArg(v0) return true } @@ -6591,8 +6404,7 @@ func rewriteValueARM64_OpARM64MNEG(v *Value) bool { v.AuxInt = log2(c / 3) v0 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type) v0.AuxInt = 2 - v0.AddArg(x) - v0.AddArg(x) + v0.AddArg2(x, x) v.AddArg(v0) return true } @@ -6616,8 +6428,7 @@ func rewriteValueARM64_OpARM64MNEG(v *Value) bool { v0.AuxInt = log2(c / 5) v1 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) v1.AuxInt = 2 - v1.AddArg(x) - v1.AddArg(x) + v1.AddArg2(x, x) v0.AddArg(v1) v.AddArg(v0) return true @@ -6642,8 +6453,7 @@ func rewriteValueARM64_OpARM64MNEG(v *Value) bool { v.AuxInt = log2(c / 7) v0 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type) v0.AuxInt = 3 - v0.AddArg(x) - v0.AddArg(x) + v0.AddArg2(x, x) v.AddArg(v0) return true } @@ -6667,8 +6477,7 @@ func rewriteValueARM64_OpARM64MNEG(v *Value) bool { v0.AuxInt = log2(c / 9) v1 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) v1.AuxInt = 3 - v1.AddArg(x) - v1.AddArg(x) + v1.AddArg2(x, x) v0.AddArg(v1) v.AddArg(v0) return true @@ -6794,8 +6603,7 @@ func rewriteValueARM64_OpARM64MNEGW(v *Value) bool { v.reset(OpARM64NEG) v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) v0.AuxInt = log2(c - 1) - v0.AddArg(x) - v0.AddArg(x) + v0.AddArg2(x, x) v.AddArg(v0) return true } @@ -6819,8 +6627,7 @@ func rewriteValueARM64_OpARM64MNEGW(v *Value) bool { v0.AuxInt = log2(c + 1) v1 := b.NewValue0(v.Pos, OpARM64NEG, x.Type) v1.AddArg(x) - v0.AddArg(v1) - v0.AddArg(x) + v0.AddArg2(v1, x) v.AddArg(v0) return true } @@ -6844,8 +6651,7 @@ func rewriteValueARM64_OpARM64MNEGW(v *Value) bool { v.AuxInt = log2(c / 3) v0 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type) v0.AuxInt = 2 - v0.AddArg(x) - v0.AddArg(x) + v0.AddArg2(x, x) v.AddArg(v0) return true } @@ -6869,8 +6675,7 @@ func rewriteValueARM64_OpARM64MNEGW(v *Value) bool { v0.AuxInt = log2(c / 5) v1 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) v1.AuxInt = 2 - v1.AddArg(x) - v1.AddArg(x) + v1.AddArg2(x, x) v0.AddArg(v1) v.AddArg(v0) return true @@ -6895,8 +6700,7 @@ func rewriteValueARM64_OpARM64MNEGW(v *Value) bool { v.AuxInt = log2(c / 7) v0 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type) v0.AuxInt = 3 - v0.AddArg(x) - v0.AddArg(x) + v0.AddArg2(x, x) v.AddArg(v0) return true } @@ -6920,8 +6724,7 @@ func rewriteValueARM64_OpARM64MNEGW(v *Value) bool { v0.AuxInt = log2(c / 9) v1 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) v1.AuxInt = 3 - v1.AddArg(x) - v1.AddArg(x) + v1.AddArg2(x, x) v0.AddArg(v1) v.AddArg(v0) return true @@ -7011,8 +6814,7 @@ func rewriteValueARM64_OpARM64MOVBUload(v *Value) bool { v.reset(OpARM64MOVBUload) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVBUload [off] {sym} (ADD ptr idx) mem) @@ -7031,9 +6833,7 @@ func rewriteValueARM64_OpARM64MOVBUload(v *Value) bool { break } v.reset(OpARM64MOVBUloadidx) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } // match: (MOVBUload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem) @@ -7055,8 +6855,7 @@ func rewriteValueARM64_OpARM64MOVBUload(v *Value) bool { v.reset(OpARM64MOVBUload) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVBUload [off] {sym} ptr (MOVBstorezero [off2] {sym2} ptr2 _)) @@ -7110,8 +6909,7 @@ func rewriteValueARM64_OpARM64MOVBUloadidx(v *Value) bool { mem := v_2 v.reset(OpARM64MOVBUload) v.AuxInt = c - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVBUloadidx (MOVDconst [c]) ptr mem) @@ -7125,8 +6923,7 @@ func rewriteValueARM64_OpARM64MOVBUloadidx(v *Value) bool { mem := v_2 v.reset(OpARM64MOVBUload) v.AuxInt = c - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVBUloadidx ptr idx (MOVBstorezeroidx ptr2 idx2 _)) @@ -7282,8 +7079,7 @@ func rewriteValueARM64_OpARM64MOVBload(v *Value) bool { v.reset(OpARM64MOVBload) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVBload [off] {sym} (ADD ptr idx) mem) @@ -7302,9 +7098,7 @@ func rewriteValueARM64_OpARM64MOVBload(v *Value) bool { break } v.reset(OpARM64MOVBloadidx) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } // match: (MOVBload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem) @@ -7326,8 +7120,7 @@ func rewriteValueARM64_OpARM64MOVBload(v *Value) bool { v.reset(OpARM64MOVBload) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVBload [off] {sym} ptr (MOVBstorezero [off2] {sym2} ptr2 _)) @@ -7368,8 +7161,7 @@ func rewriteValueARM64_OpARM64MOVBloadidx(v *Value) bool { mem := v_2 v.reset(OpARM64MOVBload) v.AuxInt = c - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVBloadidx (MOVDconst [c]) ptr mem) @@ -7383,8 +7175,7 @@ func rewriteValueARM64_OpARM64MOVBloadidx(v *Value) bool { mem := v_2 v.reset(OpARM64MOVBload) v.AuxInt = c - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVBloadidx ptr idx (MOVBstorezeroidx ptr2 idx2 _)) @@ -7500,9 +7291,7 @@ func rewriteValueARM64_OpARM64MOVBstore(v *Value) bool { v.reset(OpARM64MOVBstore) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } // match: (MOVBstore [off] {sym} (ADD ptr idx) val mem) @@ -7522,10 +7311,7 @@ func rewriteValueARM64_OpARM64MOVBstore(v *Value) bool { break } v.reset(OpARM64MOVBstoreidx) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) + v.AddArg4(ptr, idx, val, mem) return true } // match: (MOVBstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem) @@ -7548,9 +7334,7 @@ func rewriteValueARM64_OpARM64MOVBstore(v *Value) bool { v.reset(OpARM64MOVBstore) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } // match: (MOVBstore [off] {sym} ptr (MOVDconst [0]) mem) @@ -7566,8 +7350,7 @@ func rewriteValueARM64_OpARM64MOVBstore(v *Value) bool { v.reset(OpARM64MOVBstorezero) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVBstore [off] {sym} ptr (MOVBreg x) mem) @@ -7584,9 +7367,7 @@ func rewriteValueARM64_OpARM64MOVBstore(v *Value) bool { v.reset(OpARM64MOVBstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(x) - v.AddArg(mem) + v.AddArg3(ptr, x, mem) return true } // match: (MOVBstore [off] {sym} ptr (MOVBUreg x) mem) @@ -7603,9 +7384,7 @@ func rewriteValueARM64_OpARM64MOVBstore(v *Value) bool { v.reset(OpARM64MOVBstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(x) - v.AddArg(mem) + v.AddArg3(ptr, x, mem) return true } // match: (MOVBstore [off] {sym} ptr (MOVHreg x) mem) @@ -7622,9 +7401,7 @@ func rewriteValueARM64_OpARM64MOVBstore(v *Value) bool { v.reset(OpARM64MOVBstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(x) - v.AddArg(mem) + v.AddArg3(ptr, x, mem) return true } // match: (MOVBstore [off] {sym} ptr (MOVHUreg x) mem) @@ -7641,9 +7418,7 @@ func rewriteValueARM64_OpARM64MOVBstore(v *Value) bool { v.reset(OpARM64MOVBstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(x) - v.AddArg(mem) + v.AddArg3(ptr, x, mem) return true } // match: (MOVBstore [off] {sym} ptr (MOVWreg x) mem) @@ -7660,9 +7435,7 @@ func rewriteValueARM64_OpARM64MOVBstore(v *Value) bool { v.reset(OpARM64MOVBstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(x) - v.AddArg(mem) + v.AddArg3(ptr, x, mem) return true } // match: (MOVBstore [off] {sym} ptr (MOVWUreg x) mem) @@ -7679,9 +7452,7 @@ func rewriteValueARM64_OpARM64MOVBstore(v *Value) bool { v.reset(OpARM64MOVBstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(x) - v.AddArg(mem) + v.AddArg3(ptr, x, mem) return true } // match: (MOVBstore [i] {s} ptr0 (SRLconst [8] w) x:(MOVBstore [i-1] {s} ptr1 w mem)) @@ -7707,9 +7478,7 @@ func rewriteValueARM64_OpARM64MOVBstore(v *Value) bool { v.reset(OpARM64MOVHstore) v.AuxInt = i - 1 v.Aux = s - v.AddArg(ptr0) - v.AddArg(w) - v.AddArg(mem) + v.AddArg3(ptr0, w, mem) return true } // match: (MOVBstore [1] {s} (ADD ptr0 idx0) (SRLconst [8] w) x:(MOVBstoreidx ptr1 idx1 w mem)) @@ -7744,10 +7513,7 @@ func rewriteValueARM64_OpARM64MOVBstore(v *Value) bool { continue } v.reset(OpARM64MOVHstoreidx) - v.AddArg(ptr1) - v.AddArg(idx1) - v.AddArg(w) - v.AddArg(mem) + v.AddArg4(ptr1, idx1, w, mem) return true } break @@ -7775,9 +7541,7 @@ func rewriteValueARM64_OpARM64MOVBstore(v *Value) bool { v.reset(OpARM64MOVHstore) v.AuxInt = i - 1 v.Aux = s - v.AddArg(ptr0) - v.AddArg(w) - v.AddArg(mem) + v.AddArg3(ptr0, w, mem) return true } // match: (MOVBstore [1] {s} (ADD ptr0 idx0) (UBFX [armBFAuxInt(8, 8)] w) x:(MOVBstoreidx ptr1 idx1 w mem)) @@ -7812,10 +7576,7 @@ func rewriteValueARM64_OpARM64MOVBstore(v *Value) bool { continue } v.reset(OpARM64MOVHstoreidx) - v.AddArg(ptr1) - v.AddArg(idx1) - v.AddArg(w) - v.AddArg(mem) + v.AddArg4(ptr1, idx1, w, mem) return true } break @@ -7843,9 +7604,7 @@ func rewriteValueARM64_OpARM64MOVBstore(v *Value) bool { v.reset(OpARM64MOVHstore) v.AuxInt = i - 1 v.Aux = s - v.AddArg(ptr0) - v.AddArg(w) - v.AddArg(mem) + v.AddArg3(ptr0, w, mem) return true } // match: (MOVBstore [1] {s} (ADD ptr0 idx0) (UBFX [armBFAuxInt(8, 24)] w) x:(MOVBstoreidx ptr1 idx1 w mem)) @@ -7880,10 +7639,7 @@ func rewriteValueARM64_OpARM64MOVBstore(v *Value) bool { continue } v.reset(OpARM64MOVHstoreidx) - v.AddArg(ptr1) - v.AddArg(idx1) - v.AddArg(w) - v.AddArg(mem) + v.AddArg4(ptr1, idx1, w, mem) return true } break @@ -7915,9 +7671,7 @@ func rewriteValueARM64_OpARM64MOVBstore(v *Value) bool { v.reset(OpARM64MOVHstore) v.AuxInt = i - 1 v.Aux = s - v.AddArg(ptr0) - v.AddArg(w) - v.AddArg(mem) + v.AddArg3(ptr0, w, mem) return true } // match: (MOVBstore [1] {s} (ADD ptr0 idx0) (SRLconst [8] (MOVDreg w)) x:(MOVBstoreidx ptr1 idx1 w mem)) @@ -7956,10 +7710,7 @@ func rewriteValueARM64_OpARM64MOVBstore(v *Value) bool { continue } v.reset(OpARM64MOVHstoreidx) - v.AddArg(ptr1) - v.AddArg(idx1) - v.AddArg(w) - v.AddArg(mem) + v.AddArg4(ptr1, idx1, w, mem) return true } break @@ -7989,9 +7740,7 @@ func rewriteValueARM64_OpARM64MOVBstore(v *Value) bool { v.reset(OpARM64MOVHstore) v.AuxInt = i - 1 v.Aux = s - v.AddArg(ptr0) - v.AddArg(w0) - v.AddArg(mem) + v.AddArg3(ptr0, w0, mem) return true } // match: (MOVBstore [1] {s} (ADD ptr0 idx0) (SRLconst [j] w) x:(MOVBstoreidx ptr1 idx1 w0:(SRLconst [j-8] w) mem)) @@ -8028,10 +7777,7 @@ func rewriteValueARM64_OpARM64MOVBstore(v *Value) bool { continue } v.reset(OpARM64MOVHstoreidx) - v.AddArg(ptr1) - v.AddArg(idx1) - v.AddArg(w0) - v.AddArg(mem) + v.AddArg4(ptr1, idx1, w0, mem) return true } break @@ -8065,9 +7811,7 @@ func rewriteValueARM64_OpARM64MOVBstore(v *Value) bool { v.reset(OpARM64MOVHstore) v.AuxInt = i - 1 v.Aux = s - v.AddArg(ptr0) - v.AddArg(w0) - v.AddArg(mem) + v.AddArg3(ptr0, w0, mem) return true } // match: (MOVBstore [1] {s} (ADD ptr0 idx0) (UBFX [bfc] w) x:(MOVBstoreidx ptr1 idx1 w0:(UBFX [bfc2] w) mem)) @@ -8108,10 +7852,7 @@ func rewriteValueARM64_OpARM64MOVBstore(v *Value) bool { continue } v.reset(OpARM64MOVHstoreidx) - v.AddArg(ptr1) - v.AddArg(idx1) - v.AddArg(w0) - v.AddArg(mem) + v.AddArg4(ptr1, idx1, w0, mem) return true } break @@ -8149,9 +7890,7 @@ func rewriteValueARM64_OpARM64MOVBstore(v *Value) bool { v.reset(OpARM64MOVHstore) v.AuxInt = i - 1 v.Aux = s - v.AddArg(ptr0) - v.AddArg(w0) - v.AddArg(mem) + v.AddArg3(ptr0, w0, mem) return true } // match: (MOVBstore [1] {s} (ADD ptr0 idx0) (SRLconst [j] (MOVDreg w)) x:(MOVBstoreidx ptr1 idx1 w0:(SRLconst [j-8] (MOVDreg w)) mem)) @@ -8196,10 +7935,7 @@ func rewriteValueARM64_OpARM64MOVBstore(v *Value) bool { continue } v.reset(OpARM64MOVHstoreidx) - v.AddArg(ptr1) - v.AddArg(idx1) - v.AddArg(w0) - v.AddArg(mem) + v.AddArg4(ptr1, idx1, w0, mem) return true } break @@ -8299,11 +8035,9 @@ func rewriteValueARM64_OpARM64MOVBstore(v *Value) bool { v.reset(OpARM64MOVDstore) v.AuxInt = i - 7 v.Aux = s - v.AddArg(ptr) v0 := b.NewValue0(x6.Pos, OpARM64REV, w.Type) v0.AddArg(w) - v.AddArg(v0) - v.AddArg(mem) + v.AddArg3(ptr, v0, mem) return true } // match: (MOVBstore [7] {s} p w x0:(MOVBstore [6] {s} p (SRLconst [8] w) x1:(MOVBstore [5] {s} p (SRLconst [16] w) x2:(MOVBstore [4] {s} p (SRLconst [24] w) x3:(MOVBstore [3] {s} p (SRLconst [32] w) x4:(MOVBstore [2] {s} p (SRLconst [40] w) x5:(MOVBstore [1] {s} p1:(ADD ptr1 idx1) (SRLconst [48] w) x6:(MOVBstoreidx ptr0 idx0 (SRLconst [56] w) mem)))))))) @@ -8407,12 +8141,9 @@ func rewriteValueARM64_OpARM64MOVBstore(v *Value) bool { continue } v.reset(OpARM64MOVDstoreidx) - v.AddArg(ptr0) - v.AddArg(idx0) v0 := b.NewValue0(x5.Pos, OpARM64REV, w.Type) v0.AddArg(w) - v.AddArg(v0) - v.AddArg(mem) + v.AddArg4(ptr0, idx0, v0, mem) return true } break @@ -8464,11 +8195,9 @@ func rewriteValueARM64_OpARM64MOVBstore(v *Value) bool { v.reset(OpARM64MOVWstore) v.AuxInt = i - 3 v.Aux = s - v.AddArg(ptr) v0 := b.NewValue0(x2.Pos, OpARM64REVW, w.Type) v0.AddArg(w) - v.AddArg(v0) - v.AddArg(mem) + v.AddArg3(ptr, v0, mem) return true } // match: (MOVBstore [3] {s} p w x0:(MOVBstore [2] {s} p (UBFX [armBFAuxInt(8, 24)] w) x1:(MOVBstore [1] {s} p1:(ADD ptr1 idx1) (UBFX [armBFAuxInt(16, 16)] w) x2:(MOVBstoreidx ptr0 idx0 (UBFX [armBFAuxInt(24, 8)] w) mem)))) @@ -8524,12 +8253,9 @@ func rewriteValueARM64_OpARM64MOVBstore(v *Value) bool { continue } v.reset(OpARM64MOVWstoreidx) - v.AddArg(ptr0) - v.AddArg(idx0) v0 := b.NewValue0(x1.Pos, OpARM64REVW, w.Type) v0.AddArg(w) - v.AddArg(v0) - v.AddArg(mem) + v.AddArg4(ptr0, idx0, v0, mem) return true } break @@ -8593,11 +8319,9 @@ func rewriteValueARM64_OpARM64MOVBstore(v *Value) bool { v.reset(OpARM64MOVWstore) v.AuxInt = i - 3 v.Aux = s - v.AddArg(ptr) v0 := b.NewValue0(x2.Pos, OpARM64REVW, w.Type) v0.AddArg(w) - v.AddArg(v0) - v.AddArg(mem) + v.AddArg3(ptr, v0, mem) return true } // match: (MOVBstore [3] {s} p w x0:(MOVBstore [2] {s} p (SRLconst [8] (MOVDreg w)) x1:(MOVBstore [1] {s} p1:(ADD ptr1 idx1) (SRLconst [16] (MOVDreg w)) x2:(MOVBstoreidx ptr0 idx0 (SRLconst [24] (MOVDreg w)) mem)))) @@ -8665,12 +8389,9 @@ func rewriteValueARM64_OpARM64MOVBstore(v *Value) bool { continue } v.reset(OpARM64MOVWstoreidx) - v.AddArg(ptr0) - v.AddArg(idx0) v0 := b.NewValue0(x1.Pos, OpARM64REVW, w.Type) v0.AddArg(w) - v.AddArg(v0) - v.AddArg(mem) + v.AddArg4(ptr0, idx0, v0, mem) return true } break @@ -8722,11 +8443,9 @@ func rewriteValueARM64_OpARM64MOVBstore(v *Value) bool { v.reset(OpARM64MOVWstore) v.AuxInt = i - 3 v.Aux = s - v.AddArg(ptr) v0 := b.NewValue0(x2.Pos, OpARM64REVW, w.Type) v0.AddArg(w) - v.AddArg(v0) - v.AddArg(mem) + v.AddArg3(ptr, v0, mem) return true } // match: (MOVBstore [3] {s} p w x0:(MOVBstore [2] {s} p (SRLconst [8] w) x1:(MOVBstore [1] {s} p1:(ADD ptr1 idx1) (SRLconst [16] w) x2:(MOVBstoreidx ptr0 idx0 (SRLconst [24] w) mem)))) @@ -8782,12 +8501,9 @@ func rewriteValueARM64_OpARM64MOVBstore(v *Value) bool { continue } v.reset(OpARM64MOVWstoreidx) - v.AddArg(ptr0) - v.AddArg(idx0) v0 := b.NewValue0(x1.Pos, OpARM64REVW, w.Type) v0.AddArg(w) - v.AddArg(v0) - v.AddArg(mem) + v.AddArg4(ptr0, idx0, v0, mem) return true } break @@ -8815,11 +8531,9 @@ func rewriteValueARM64_OpARM64MOVBstore(v *Value) bool { v.reset(OpARM64MOVHstore) v.AuxInt = i - 1 v.Aux = s - v.AddArg(ptr) v0 := b.NewValue0(x.Pos, OpARM64REV16W, w.Type) v0.AddArg(w) - v.AddArg(v0) - v.AddArg(mem) + v.AddArg3(ptr, v0, mem) return true } // match: (MOVBstore [1] {s} (ADD ptr1 idx1) w x:(MOVBstoreidx ptr0 idx0 (SRLconst [8] w) mem)) @@ -8852,12 +8566,9 @@ func rewriteValueARM64_OpARM64MOVBstore(v *Value) bool { continue } v.reset(OpARM64MOVHstoreidx) - v.AddArg(ptr0) - v.AddArg(idx0) v0 := b.NewValue0(v.Pos, OpARM64REV16W, w.Type) v0.AddArg(w) - v.AddArg(v0) - v.AddArg(mem) + v.AddArg4(ptr0, idx0, v0, mem) return true } break @@ -8885,11 +8596,9 @@ func rewriteValueARM64_OpARM64MOVBstore(v *Value) bool { v.reset(OpARM64MOVHstore) v.AuxInt = i - 1 v.Aux = s - v.AddArg(ptr) v0 := b.NewValue0(x.Pos, OpARM64REV16W, w.Type) v0.AddArg(w) - v.AddArg(v0) - v.AddArg(mem) + v.AddArg3(ptr, v0, mem) return true } // match: (MOVBstore [1] {s} (ADD ptr1 idx1) w x:(MOVBstoreidx ptr0 idx0 (UBFX [armBFAuxInt(8, 8)] w) mem)) @@ -8922,12 +8631,9 @@ func rewriteValueARM64_OpARM64MOVBstore(v *Value) bool { continue } v.reset(OpARM64MOVHstoreidx) - v.AddArg(ptr0) - v.AddArg(idx0) v0 := b.NewValue0(v.Pos, OpARM64REV16W, w.Type) v0.AddArg(w) - v.AddArg(v0) - v.AddArg(mem) + v.AddArg4(ptr0, idx0, v0, mem) return true } break @@ -8959,11 +8665,9 @@ func rewriteValueARM64_OpARM64MOVBstore(v *Value) bool { v.reset(OpARM64MOVHstore) v.AuxInt = i - 1 v.Aux = s - v.AddArg(ptr) v0 := b.NewValue0(x.Pos, OpARM64REV16W, w.Type) v0.AddArg(w) - v.AddArg(v0) - v.AddArg(mem) + v.AddArg3(ptr, v0, mem) return true } // match: (MOVBstore [1] {s} (ADD ptr1 idx1) w x:(MOVBstoreidx ptr0 idx0 (SRLconst [8] (MOVDreg w)) mem)) @@ -9000,12 +8704,9 @@ func rewriteValueARM64_OpARM64MOVBstore(v *Value) bool { continue } v.reset(OpARM64MOVHstoreidx) - v.AddArg(ptr0) - v.AddArg(idx0) v0 := b.NewValue0(v.Pos, OpARM64REV16W, w.Type) v0.AddArg(w) - v.AddArg(v0) - v.AddArg(mem) + v.AddArg4(ptr0, idx0, v0, mem) return true } break @@ -9033,11 +8734,9 @@ func rewriteValueARM64_OpARM64MOVBstore(v *Value) bool { v.reset(OpARM64MOVHstore) v.AuxInt = i - 1 v.Aux = s - v.AddArg(ptr) v0 := b.NewValue0(x.Pos, OpARM64REV16W, w.Type) v0.AddArg(w) - v.AddArg(v0) - v.AddArg(mem) + v.AddArg3(ptr, v0, mem) return true } // match: (MOVBstore [1] {s} (ADD ptr1 idx1) w x:(MOVBstoreidx ptr0 idx0 (UBFX [armBFAuxInt(8, 24)] w) mem)) @@ -9070,12 +8769,9 @@ func rewriteValueARM64_OpARM64MOVBstore(v *Value) bool { continue } v.reset(OpARM64MOVHstoreidx) - v.AddArg(ptr0) - v.AddArg(idx0) v0 := b.NewValue0(v.Pos, OpARM64REV16W, w.Type) v0.AddArg(w) - v.AddArg(v0) - v.AddArg(mem) + v.AddArg4(ptr0, idx0, v0, mem) return true } break @@ -9100,9 +8796,7 @@ func rewriteValueARM64_OpARM64MOVBstoreidx(v *Value) bool { mem := v_3 v.reset(OpARM64MOVBstore) v.AuxInt = c - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } // match: (MOVBstoreidx (MOVDconst [c]) idx val mem) @@ -9117,9 +8811,7 @@ func rewriteValueARM64_OpARM64MOVBstoreidx(v *Value) bool { mem := v_3 v.reset(OpARM64MOVBstore) v.AuxInt = c - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(idx, val, mem) return true } // match: (MOVBstoreidx ptr idx (MOVDconst [0]) mem) @@ -9132,9 +8824,7 @@ func rewriteValueARM64_OpARM64MOVBstoreidx(v *Value) bool { } mem := v_3 v.reset(OpARM64MOVBstorezeroidx) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } // match: (MOVBstoreidx ptr idx (MOVBreg x) mem) @@ -9148,10 +8838,7 @@ func rewriteValueARM64_OpARM64MOVBstoreidx(v *Value) bool { x := v_2.Args[0] mem := v_3 v.reset(OpARM64MOVBstoreidx) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(x) - v.AddArg(mem) + v.AddArg4(ptr, idx, x, mem) return true } // match: (MOVBstoreidx ptr idx (MOVBUreg x) mem) @@ -9165,10 +8852,7 @@ func rewriteValueARM64_OpARM64MOVBstoreidx(v *Value) bool { x := v_2.Args[0] mem := v_3 v.reset(OpARM64MOVBstoreidx) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(x) - v.AddArg(mem) + v.AddArg4(ptr, idx, x, mem) return true } // match: (MOVBstoreidx ptr idx (MOVHreg x) mem) @@ -9182,10 +8866,7 @@ func rewriteValueARM64_OpARM64MOVBstoreidx(v *Value) bool { x := v_2.Args[0] mem := v_3 v.reset(OpARM64MOVBstoreidx) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(x) - v.AddArg(mem) + v.AddArg4(ptr, idx, x, mem) return true } // match: (MOVBstoreidx ptr idx (MOVHUreg x) mem) @@ -9199,10 +8880,7 @@ func rewriteValueARM64_OpARM64MOVBstoreidx(v *Value) bool { x := v_2.Args[0] mem := v_3 v.reset(OpARM64MOVBstoreidx) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(x) - v.AddArg(mem) + v.AddArg4(ptr, idx, x, mem) return true } // match: (MOVBstoreidx ptr idx (MOVWreg x) mem) @@ -9216,10 +8894,7 @@ func rewriteValueARM64_OpARM64MOVBstoreidx(v *Value) bool { x := v_2.Args[0] mem := v_3 v.reset(OpARM64MOVBstoreidx) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(x) - v.AddArg(mem) + v.AddArg4(ptr, idx, x, mem) return true } // match: (MOVBstoreidx ptr idx (MOVWUreg x) mem) @@ -9233,10 +8908,7 @@ func rewriteValueARM64_OpARM64MOVBstoreidx(v *Value) bool { x := v_2.Args[0] mem := v_3 v.reset(OpARM64MOVBstoreidx) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(x) - v.AddArg(mem) + v.AddArg4(ptr, idx, x, mem) return true } // match: (MOVBstoreidx ptr (ADDconst [1] idx) (SRLconst [8] w) x:(MOVBstoreidx ptr idx w mem)) @@ -9261,10 +8933,7 @@ func rewriteValueARM64_OpARM64MOVBstoreidx(v *Value) bool { break } v.reset(OpARM64MOVHstoreidx) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(w) - v.AddArg(mem) + v.AddArg4(ptr, idx, w, mem) return true } // match: (MOVBstoreidx ptr (ADDconst [3] idx) w x0:(MOVBstoreidx ptr (ADDconst [2] idx) (UBFX [armBFAuxInt(8, 24)] w) x1:(MOVBstoreidx ptr (ADDconst [1] idx) (UBFX [armBFAuxInt(16, 16)] w) x2:(MOVBstoreidx ptr idx (UBFX [armBFAuxInt(24, 8)] w) mem)))) @@ -9322,12 +8991,9 @@ func rewriteValueARM64_OpARM64MOVBstoreidx(v *Value) bool { break } v.reset(OpARM64MOVWstoreidx) - v.AddArg(ptr) - v.AddArg(idx) v0 := b.NewValue0(v.Pos, OpARM64REVW, w.Type) v0.AddArg(w) - v.AddArg(v0) - v.AddArg(mem) + v.AddArg4(ptr, idx, v0, mem) return true } // match: (MOVBstoreidx ptr idx w x0:(MOVBstoreidx ptr (ADDconst [1] idx) (UBFX [armBFAuxInt(8, 24)] w) x1:(MOVBstoreidx ptr (ADDconst [2] idx) (UBFX [armBFAuxInt(16, 16)] w) x2:(MOVBstoreidx ptr (ADDconst [3] idx) (UBFX [armBFAuxInt(24, 8)] w) mem)))) @@ -9386,10 +9052,7 @@ func rewriteValueARM64_OpARM64MOVBstoreidx(v *Value) bool { break } v.reset(OpARM64MOVWstoreidx) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(w) - v.AddArg(mem) + v.AddArg4(ptr, idx, w, mem) return true } // match: (MOVBstoreidx ptr (ADDconst [1] idx) w x:(MOVBstoreidx ptr idx (UBFX [armBFAuxInt(8, 8)] w) mem)) @@ -9415,12 +9078,9 @@ func rewriteValueARM64_OpARM64MOVBstoreidx(v *Value) bool { break } v.reset(OpARM64MOVHstoreidx) - v.AddArg(ptr) - v.AddArg(idx) v0 := b.NewValue0(v.Pos, OpARM64REV16W, w.Type) v0.AddArg(w) - v.AddArg(v0) - v.AddArg(mem) + v.AddArg4(ptr, idx, v0, mem) return true } // match: (MOVBstoreidx ptr idx w x:(MOVBstoreidx ptr (ADDconst [1] idx) (UBFX [armBFAuxInt(8, 8)] w) mem)) @@ -9447,10 +9107,7 @@ func rewriteValueARM64_OpARM64MOVBstoreidx(v *Value) bool { break } v.reset(OpARM64MOVHstoreidx) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(w) - v.AddArg(mem) + v.AddArg4(ptr, idx, w, mem) return true } return false @@ -9478,8 +9135,7 @@ func rewriteValueARM64_OpARM64MOVBstorezero(v *Value) bool { v.reset(OpARM64MOVBstorezero) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVBstorezero [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem) @@ -9501,8 +9157,7 @@ func rewriteValueARM64_OpARM64MOVBstorezero(v *Value) bool { v.reset(OpARM64MOVBstorezero) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVBstorezero [off] {sym} (ADD ptr idx) mem) @@ -9521,9 +9176,7 @@ func rewriteValueARM64_OpARM64MOVBstorezero(v *Value) bool { break } v.reset(OpARM64MOVBstorezeroidx) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } // match: (MOVBstorezero [i] {s} ptr0 x:(MOVBstorezero [j] {s} ptr1 mem)) @@ -9549,8 +9202,7 @@ func rewriteValueARM64_OpARM64MOVBstorezero(v *Value) bool { v.reset(OpARM64MOVHstorezero) v.AuxInt = min(i, j) v.Aux = s - v.AddArg(ptr0) - v.AddArg(mem) + v.AddArg2(ptr0, mem) return true } // match: (MOVBstorezero [1] {s} (ADD ptr0 idx0) x:(MOVBstorezeroidx ptr1 idx1 mem)) @@ -9581,9 +9233,7 @@ func rewriteValueARM64_OpARM64MOVBstorezero(v *Value) bool { continue } v.reset(OpARM64MOVHstorezeroidx) - v.AddArg(ptr1) - v.AddArg(idx1) - v.AddArg(mem) + v.AddArg3(ptr1, idx1, mem) return true } break @@ -9605,8 +9255,7 @@ func rewriteValueARM64_OpARM64MOVBstorezeroidx(v *Value) bool { mem := v_2 v.reset(OpARM64MOVBstorezero) v.AuxInt = c - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVBstorezeroidx (MOVDconst [c]) idx mem) @@ -9620,8 +9269,7 @@ func rewriteValueARM64_OpARM64MOVBstorezeroidx(v *Value) bool { mem := v_2 v.reset(OpARM64MOVBstorezero) v.AuxInt = c - v.AddArg(idx) - v.AddArg(mem) + v.AddArg2(idx, mem) return true } // match: (MOVBstorezeroidx ptr (ADDconst [1] idx) x:(MOVBstorezeroidx ptr idx mem)) @@ -9642,9 +9290,7 @@ func rewriteValueARM64_OpARM64MOVBstorezeroidx(v *Value) bool { break } v.reset(OpARM64MOVHstorezeroidx) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } return false @@ -9690,8 +9336,7 @@ func rewriteValueARM64_OpARM64MOVDload(v *Value) bool { v.reset(OpARM64MOVDload) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVDload [off] {sym} (ADD ptr idx) mem) @@ -9710,9 +9355,7 @@ func rewriteValueARM64_OpARM64MOVDload(v *Value) bool { break } v.reset(OpARM64MOVDloadidx) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } // match: (MOVDload [off] {sym} (ADDshiftLL [3] ptr idx) mem) @@ -9731,9 +9374,7 @@ func rewriteValueARM64_OpARM64MOVDload(v *Value) bool { break } v.reset(OpARM64MOVDloadidx8) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } // match: (MOVDload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem) @@ -9755,8 +9396,7 @@ func rewriteValueARM64_OpARM64MOVDload(v *Value) bool { v.reset(OpARM64MOVDload) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVDload [off] {sym} ptr (MOVDstorezero [off2] {sym2} ptr2 _)) @@ -9810,8 +9450,7 @@ func rewriteValueARM64_OpARM64MOVDloadidx(v *Value) bool { mem := v_2 v.reset(OpARM64MOVDload) v.AuxInt = c - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVDloadidx (MOVDconst [c]) ptr mem) @@ -9825,8 +9464,7 @@ func rewriteValueARM64_OpARM64MOVDloadidx(v *Value) bool { mem := v_2 v.reset(OpARM64MOVDload) v.AuxInt = c - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVDloadidx ptr (SLLconst [3] idx) mem) @@ -9839,9 +9477,7 @@ func rewriteValueARM64_OpARM64MOVDloadidx(v *Value) bool { idx := v_1.Args[0] mem := v_2 v.reset(OpARM64MOVDloadidx8) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } // match: (MOVDloadidx (SLLconst [3] idx) ptr mem) @@ -9854,9 +9490,7 @@ func rewriteValueARM64_OpARM64MOVDloadidx(v *Value) bool { ptr := v_1 mem := v_2 v.reset(OpARM64MOVDloadidx8) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } // match: (MOVDloadidx ptr idx (MOVDstorezeroidx ptr2 idx2 _)) @@ -9895,8 +9529,7 @@ func rewriteValueARM64_OpARM64MOVDloadidx8(v *Value) bool { mem := v_2 v.reset(OpARM64MOVDload) v.AuxInt = c << 3 - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVDloadidx8 ptr idx (MOVDstorezeroidx8 ptr2 idx2 _)) @@ -9967,9 +9600,7 @@ func rewriteValueARM64_OpARM64MOVDstore(v *Value) bool { v.reset(OpARM64FMOVDstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } // match: (MOVDstore [off1] {sym} (ADDconst [off2] ptr) val mem) @@ -9991,9 +9622,7 @@ func rewriteValueARM64_OpARM64MOVDstore(v *Value) bool { v.reset(OpARM64MOVDstore) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } // match: (MOVDstore [off] {sym} (ADD ptr idx) val mem) @@ -10013,10 +9642,7 @@ func rewriteValueARM64_OpARM64MOVDstore(v *Value) bool { break } v.reset(OpARM64MOVDstoreidx) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) + v.AddArg4(ptr, idx, val, mem) return true } // match: (MOVDstore [off] {sym} (ADDshiftLL [3] ptr idx) val mem) @@ -10036,10 +9662,7 @@ func rewriteValueARM64_OpARM64MOVDstore(v *Value) bool { break } v.reset(OpARM64MOVDstoreidx8) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) + v.AddArg4(ptr, idx, val, mem) return true } // match: (MOVDstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem) @@ -10062,9 +9685,7 @@ func rewriteValueARM64_OpARM64MOVDstore(v *Value) bool { v.reset(OpARM64MOVDstore) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } // match: (MOVDstore [off] {sym} ptr (MOVDconst [0]) mem) @@ -10080,8 +9701,7 @@ func rewriteValueARM64_OpARM64MOVDstore(v *Value) bool { v.reset(OpARM64MOVDstorezero) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } return false @@ -10103,9 +9723,7 @@ func rewriteValueARM64_OpARM64MOVDstoreidx(v *Value) bool { mem := v_3 v.reset(OpARM64MOVDstore) v.AuxInt = c - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } // match: (MOVDstoreidx (MOVDconst [c]) idx val mem) @@ -10120,9 +9738,7 @@ func rewriteValueARM64_OpARM64MOVDstoreidx(v *Value) bool { mem := v_3 v.reset(OpARM64MOVDstore) v.AuxInt = c - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(idx, val, mem) return true } // match: (MOVDstoreidx ptr (SLLconst [3] idx) val mem) @@ -10136,10 +9752,7 @@ func rewriteValueARM64_OpARM64MOVDstoreidx(v *Value) bool { val := v_2 mem := v_3 v.reset(OpARM64MOVDstoreidx8) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) + v.AddArg4(ptr, idx, val, mem) return true } // match: (MOVDstoreidx (SLLconst [3] idx) ptr val mem) @@ -10153,10 +9766,7 @@ func rewriteValueARM64_OpARM64MOVDstoreidx(v *Value) bool { val := v_2 mem := v_3 v.reset(OpARM64MOVDstoreidx8) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) + v.AddArg4(ptr, idx, val, mem) return true } // match: (MOVDstoreidx ptr idx (MOVDconst [0]) mem) @@ -10169,9 +9779,7 @@ func rewriteValueARM64_OpARM64MOVDstoreidx(v *Value) bool { } mem := v_3 v.reset(OpARM64MOVDstorezeroidx) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } return false @@ -10193,9 +9801,7 @@ func rewriteValueARM64_OpARM64MOVDstoreidx8(v *Value) bool { mem := v_3 v.reset(OpARM64MOVDstore) v.AuxInt = c << 3 - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } // match: (MOVDstoreidx8 ptr idx (MOVDconst [0]) mem) @@ -10208,9 +9814,7 @@ func rewriteValueARM64_OpARM64MOVDstoreidx8(v *Value) bool { } mem := v_3 v.reset(OpARM64MOVDstorezeroidx8) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } return false @@ -10238,8 +9842,7 @@ func rewriteValueARM64_OpARM64MOVDstorezero(v *Value) bool { v.reset(OpARM64MOVDstorezero) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVDstorezero [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem) @@ -10261,8 +9864,7 @@ func rewriteValueARM64_OpARM64MOVDstorezero(v *Value) bool { v.reset(OpARM64MOVDstorezero) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVDstorezero [off] {sym} (ADD ptr idx) mem) @@ -10281,9 +9883,7 @@ func rewriteValueARM64_OpARM64MOVDstorezero(v *Value) bool { break } v.reset(OpARM64MOVDstorezeroidx) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } // match: (MOVDstorezero [off] {sym} (ADDshiftLL [3] ptr idx) mem) @@ -10302,9 +9902,7 @@ func rewriteValueARM64_OpARM64MOVDstorezero(v *Value) bool { break } v.reset(OpARM64MOVDstorezeroidx8) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } // match: (MOVDstorezero [i] {s} ptr0 x:(MOVDstorezero [j] {s} ptr1 mem)) @@ -10330,8 +9928,7 @@ func rewriteValueARM64_OpARM64MOVDstorezero(v *Value) bool { v.reset(OpARM64MOVQstorezero) v.AuxInt = min(i, j) v.Aux = s - v.AddArg(ptr0) - v.AddArg(mem) + v.AddArg2(ptr0, mem) return true } // match: (MOVDstorezero [8] {s} p0:(ADD ptr0 idx0) x:(MOVDstorezeroidx ptr1 idx1 mem)) @@ -10365,8 +9962,7 @@ func rewriteValueARM64_OpARM64MOVDstorezero(v *Value) bool { v.reset(OpARM64MOVQstorezero) v.AuxInt = 0 v.Aux = s - v.AddArg(p0) - v.AddArg(mem) + v.AddArg2(p0, mem) return true } break @@ -10398,8 +9994,7 @@ func rewriteValueARM64_OpARM64MOVDstorezero(v *Value) bool { v.reset(OpARM64MOVQstorezero) v.AuxInt = 0 v.Aux = s - v.AddArg(p0) - v.AddArg(mem) + v.AddArg2(p0, mem) return true } return false @@ -10419,8 +10014,7 @@ func rewriteValueARM64_OpARM64MOVDstorezeroidx(v *Value) bool { mem := v_2 v.reset(OpARM64MOVDstorezero) v.AuxInt = c - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVDstorezeroidx (MOVDconst [c]) idx mem) @@ -10434,8 +10028,7 @@ func rewriteValueARM64_OpARM64MOVDstorezeroidx(v *Value) bool { mem := v_2 v.reset(OpARM64MOVDstorezero) v.AuxInt = c - v.AddArg(idx) - v.AddArg(mem) + v.AddArg2(idx, mem) return true } // match: (MOVDstorezeroidx ptr (SLLconst [3] idx) mem) @@ -10448,9 +10041,7 @@ func rewriteValueARM64_OpARM64MOVDstorezeroidx(v *Value) bool { idx := v_1.Args[0] mem := v_2 v.reset(OpARM64MOVDstorezeroidx8) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } // match: (MOVDstorezeroidx (SLLconst [3] idx) ptr mem) @@ -10463,9 +10054,7 @@ func rewriteValueARM64_OpARM64MOVDstorezeroidx(v *Value) bool { ptr := v_1 mem := v_2 v.reset(OpARM64MOVDstorezeroidx8) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } return false @@ -10485,8 +10074,7 @@ func rewriteValueARM64_OpARM64MOVDstorezeroidx8(v *Value) bool { mem := v_2 v.reset(OpARM64MOVDstorezero) v.AuxInt = c << 3 - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } return false @@ -10514,8 +10102,7 @@ func rewriteValueARM64_OpARM64MOVHUload(v *Value) bool { v.reset(OpARM64MOVHUload) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVHUload [off] {sym} (ADD ptr idx) mem) @@ -10534,9 +10121,7 @@ func rewriteValueARM64_OpARM64MOVHUload(v *Value) bool { break } v.reset(OpARM64MOVHUloadidx) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } // match: (MOVHUload [off] {sym} (ADDshiftLL [1] ptr idx) mem) @@ -10555,9 +10140,7 @@ func rewriteValueARM64_OpARM64MOVHUload(v *Value) bool { break } v.reset(OpARM64MOVHUloadidx2) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } // match: (MOVHUload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem) @@ -10579,8 +10162,7 @@ func rewriteValueARM64_OpARM64MOVHUload(v *Value) bool { v.reset(OpARM64MOVHUload) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVHUload [off] {sym} ptr (MOVHstorezero [off2] {sym2} ptr2 _)) @@ -10634,8 +10216,7 @@ func rewriteValueARM64_OpARM64MOVHUloadidx(v *Value) bool { mem := v_2 v.reset(OpARM64MOVHUload) v.AuxInt = c - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVHUloadidx (MOVDconst [c]) ptr mem) @@ -10649,8 +10230,7 @@ func rewriteValueARM64_OpARM64MOVHUloadidx(v *Value) bool { mem := v_2 v.reset(OpARM64MOVHUload) v.AuxInt = c - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVHUloadidx ptr (SLLconst [1] idx) mem) @@ -10663,9 +10243,7 @@ func rewriteValueARM64_OpARM64MOVHUloadidx(v *Value) bool { idx := v_1.Args[0] mem := v_2 v.reset(OpARM64MOVHUloadidx2) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } // match: (MOVHUloadidx ptr (ADD idx idx) mem) @@ -10681,9 +10259,7 @@ func rewriteValueARM64_OpARM64MOVHUloadidx(v *Value) bool { } mem := v_2 v.reset(OpARM64MOVHUloadidx2) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } // match: (MOVHUloadidx (ADD idx idx) ptr mem) @@ -10699,9 +10275,7 @@ func rewriteValueARM64_OpARM64MOVHUloadidx(v *Value) bool { ptr := v_1 mem := v_2 v.reset(OpARM64MOVHUloadidx2) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } // match: (MOVHUloadidx ptr idx (MOVHstorezeroidx ptr2 idx2 _)) @@ -10740,8 +10314,7 @@ func rewriteValueARM64_OpARM64MOVHUloadidx2(v *Value) bool { mem := v_2 v.reset(OpARM64MOVHUload) v.AuxInt = c << 1 - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVHUloadidx2 ptr idx (MOVHstorezeroidx2 ptr2 idx2 _)) @@ -10932,8 +10505,7 @@ func rewriteValueARM64_OpARM64MOVHload(v *Value) bool { v.reset(OpARM64MOVHload) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVHload [off] {sym} (ADD ptr idx) mem) @@ -10952,9 +10524,7 @@ func rewriteValueARM64_OpARM64MOVHload(v *Value) bool { break } v.reset(OpARM64MOVHloadidx) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } // match: (MOVHload [off] {sym} (ADDshiftLL [1] ptr idx) mem) @@ -10973,9 +10543,7 @@ func rewriteValueARM64_OpARM64MOVHload(v *Value) bool { break } v.reset(OpARM64MOVHloadidx2) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } // match: (MOVHload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem) @@ -10997,8 +10565,7 @@ func rewriteValueARM64_OpARM64MOVHload(v *Value) bool { v.reset(OpARM64MOVHload) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVHload [off] {sym} ptr (MOVHstorezero [off2] {sym2} ptr2 _)) @@ -11039,8 +10606,7 @@ func rewriteValueARM64_OpARM64MOVHloadidx(v *Value) bool { mem := v_2 v.reset(OpARM64MOVHload) v.AuxInt = c - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVHloadidx (MOVDconst [c]) ptr mem) @@ -11054,8 +10620,7 @@ func rewriteValueARM64_OpARM64MOVHloadidx(v *Value) bool { mem := v_2 v.reset(OpARM64MOVHload) v.AuxInt = c - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVHloadidx ptr (SLLconst [1] idx) mem) @@ -11068,9 +10633,7 @@ func rewriteValueARM64_OpARM64MOVHloadidx(v *Value) bool { idx := v_1.Args[0] mem := v_2 v.reset(OpARM64MOVHloadidx2) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } // match: (MOVHloadidx ptr (ADD idx idx) mem) @@ -11086,9 +10649,7 @@ func rewriteValueARM64_OpARM64MOVHloadidx(v *Value) bool { } mem := v_2 v.reset(OpARM64MOVHloadidx2) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } // match: (MOVHloadidx (ADD idx idx) ptr mem) @@ -11104,9 +10665,7 @@ func rewriteValueARM64_OpARM64MOVHloadidx(v *Value) bool { ptr := v_1 mem := v_2 v.reset(OpARM64MOVHloadidx2) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } // match: (MOVHloadidx ptr idx (MOVHstorezeroidx ptr2 idx2 _)) @@ -11145,8 +10704,7 @@ func rewriteValueARM64_OpARM64MOVHloadidx2(v *Value) bool { mem := v_2 v.reset(OpARM64MOVHload) v.AuxInt = c << 1 - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVHloadidx2 ptr idx (MOVHstorezeroidx2 ptr2 idx2 _)) @@ -11344,9 +10902,7 @@ func rewriteValueARM64_OpARM64MOVHstore(v *Value) bool { v.reset(OpARM64MOVHstore) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } // match: (MOVHstore [off] {sym} (ADD ptr idx) val mem) @@ -11366,10 +10922,7 @@ func rewriteValueARM64_OpARM64MOVHstore(v *Value) bool { break } v.reset(OpARM64MOVHstoreidx) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) + v.AddArg4(ptr, idx, val, mem) return true } // match: (MOVHstore [off] {sym} (ADDshiftLL [1] ptr idx) val mem) @@ -11389,10 +10942,7 @@ func rewriteValueARM64_OpARM64MOVHstore(v *Value) bool { break } v.reset(OpARM64MOVHstoreidx2) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) + v.AddArg4(ptr, idx, val, mem) return true } // match: (MOVHstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem) @@ -11415,9 +10965,7 @@ func rewriteValueARM64_OpARM64MOVHstore(v *Value) bool { v.reset(OpARM64MOVHstore) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } // match: (MOVHstore [off] {sym} ptr (MOVDconst [0]) mem) @@ -11433,8 +10981,7 @@ func rewriteValueARM64_OpARM64MOVHstore(v *Value) bool { v.reset(OpARM64MOVHstorezero) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVHstore [off] {sym} ptr (MOVHreg x) mem) @@ -11451,9 +10998,7 @@ func rewriteValueARM64_OpARM64MOVHstore(v *Value) bool { v.reset(OpARM64MOVHstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(x) - v.AddArg(mem) + v.AddArg3(ptr, x, mem) return true } // match: (MOVHstore [off] {sym} ptr (MOVHUreg x) mem) @@ -11470,9 +11015,7 @@ func rewriteValueARM64_OpARM64MOVHstore(v *Value) bool { v.reset(OpARM64MOVHstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(x) - v.AddArg(mem) + v.AddArg3(ptr, x, mem) return true } // match: (MOVHstore [off] {sym} ptr (MOVWreg x) mem) @@ -11489,9 +11032,7 @@ func rewriteValueARM64_OpARM64MOVHstore(v *Value) bool { v.reset(OpARM64MOVHstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(x) - v.AddArg(mem) + v.AddArg3(ptr, x, mem) return true } // match: (MOVHstore [off] {sym} ptr (MOVWUreg x) mem) @@ -11508,9 +11049,7 @@ func rewriteValueARM64_OpARM64MOVHstore(v *Value) bool { v.reset(OpARM64MOVHstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(x) - v.AddArg(mem) + v.AddArg3(ptr, x, mem) return true } // match: (MOVHstore [i] {s} ptr0 (SRLconst [16] w) x:(MOVHstore [i-2] {s} ptr1 w mem)) @@ -11536,9 +11075,7 @@ func rewriteValueARM64_OpARM64MOVHstore(v *Value) bool { v.reset(OpARM64MOVWstore) v.AuxInt = i - 2 v.Aux = s - v.AddArg(ptr0) - v.AddArg(w) - v.AddArg(mem) + v.AddArg3(ptr0, w, mem) return true } // match: (MOVHstore [2] {s} (ADD ptr0 idx0) (SRLconst [16] w) x:(MOVHstoreidx ptr1 idx1 w mem)) @@ -11573,10 +11110,7 @@ func rewriteValueARM64_OpARM64MOVHstore(v *Value) bool { continue } v.reset(OpARM64MOVWstoreidx) - v.AddArg(ptr1) - v.AddArg(idx1) - v.AddArg(w) - v.AddArg(mem) + v.AddArg4(ptr1, idx1, w, mem) return true } break @@ -11609,13 +11143,10 @@ func rewriteValueARM64_OpARM64MOVHstore(v *Value) bool { break } v.reset(OpARM64MOVWstoreidx) - v.AddArg(ptr1) v0 := b.NewValue0(v.Pos, OpARM64SLLconst, idx1.Type) v0.AuxInt = 1 v0.AddArg(idx1) - v.AddArg(v0) - v.AddArg(w) - v.AddArg(mem) + v.AddArg4(ptr1, v0, w, mem) return true } // match: (MOVHstore [i] {s} ptr0 (UBFX [armBFAuxInt(16, 16)] w) x:(MOVHstore [i-2] {s} ptr1 w mem)) @@ -11641,9 +11172,7 @@ func rewriteValueARM64_OpARM64MOVHstore(v *Value) bool { v.reset(OpARM64MOVWstore) v.AuxInt = i - 2 v.Aux = s - v.AddArg(ptr0) - v.AddArg(w) - v.AddArg(mem) + v.AddArg3(ptr0, w, mem) return true } // match: (MOVHstore [2] {s} (ADD ptr0 idx0) (UBFX [armBFAuxInt(16, 16)] w) x:(MOVHstoreidx ptr1 idx1 w mem)) @@ -11678,10 +11207,7 @@ func rewriteValueARM64_OpARM64MOVHstore(v *Value) bool { continue } v.reset(OpARM64MOVWstoreidx) - v.AddArg(ptr1) - v.AddArg(idx1) - v.AddArg(w) - v.AddArg(mem) + v.AddArg4(ptr1, idx1, w, mem) return true } break @@ -11714,13 +11240,10 @@ func rewriteValueARM64_OpARM64MOVHstore(v *Value) bool { break } v.reset(OpARM64MOVWstoreidx) - v.AddArg(ptr1) v0 := b.NewValue0(v.Pos, OpARM64SLLconst, idx1.Type) v0.AuxInt = 1 v0.AddArg(idx1) - v.AddArg(v0) - v.AddArg(w) - v.AddArg(mem) + v.AddArg4(ptr1, v0, w, mem) return true } // match: (MOVHstore [i] {s} ptr0 (SRLconst [16] (MOVDreg w)) x:(MOVHstore [i-2] {s} ptr1 w mem)) @@ -11750,9 +11273,7 @@ func rewriteValueARM64_OpARM64MOVHstore(v *Value) bool { v.reset(OpARM64MOVWstore) v.AuxInt = i - 2 v.Aux = s - v.AddArg(ptr0) - v.AddArg(w) - v.AddArg(mem) + v.AddArg3(ptr0, w, mem) return true } // match: (MOVHstore [2] {s} (ADD ptr0 idx0) (SRLconst [16] (MOVDreg w)) x:(MOVHstoreidx ptr1 idx1 w mem)) @@ -11791,10 +11312,7 @@ func rewriteValueARM64_OpARM64MOVHstore(v *Value) bool { continue } v.reset(OpARM64MOVWstoreidx) - v.AddArg(ptr1) - v.AddArg(idx1) - v.AddArg(w) - v.AddArg(mem) + v.AddArg4(ptr1, idx1, w, mem) return true } break @@ -11831,13 +11349,10 @@ func rewriteValueARM64_OpARM64MOVHstore(v *Value) bool { break } v.reset(OpARM64MOVWstoreidx) - v.AddArg(ptr1) v0 := b.NewValue0(v.Pos, OpARM64SLLconst, idx1.Type) v0.AuxInt = 1 v0.AddArg(idx1) - v.AddArg(v0) - v.AddArg(w) - v.AddArg(mem) + v.AddArg4(ptr1, v0, w, mem) return true } // match: (MOVHstore [i] {s} ptr0 (SRLconst [j] w) x:(MOVHstore [i-2] {s} ptr1 w0:(SRLconst [j-16] w) mem)) @@ -11865,9 +11380,7 @@ func rewriteValueARM64_OpARM64MOVHstore(v *Value) bool { v.reset(OpARM64MOVWstore) v.AuxInt = i - 2 v.Aux = s - v.AddArg(ptr0) - v.AddArg(w0) - v.AddArg(mem) + v.AddArg3(ptr0, w0, mem) return true } // match: (MOVHstore [2] {s} (ADD ptr0 idx0) (SRLconst [j] w) x:(MOVHstoreidx ptr1 idx1 w0:(SRLconst [j-16] w) mem)) @@ -11904,10 +11417,7 @@ func rewriteValueARM64_OpARM64MOVHstore(v *Value) bool { continue } v.reset(OpARM64MOVWstoreidx) - v.AddArg(ptr1) - v.AddArg(idx1) - v.AddArg(w0) - v.AddArg(mem) + v.AddArg4(ptr1, idx1, w0, mem) return true } break @@ -11942,13 +11452,10 @@ func rewriteValueARM64_OpARM64MOVHstore(v *Value) bool { break } v.reset(OpARM64MOVWstoreidx) - v.AddArg(ptr1) v0 := b.NewValue0(v.Pos, OpARM64SLLconst, idx1.Type) v0.AuxInt = 1 v0.AddArg(idx1) - v.AddArg(v0) - v.AddArg(w0) - v.AddArg(mem) + v.AddArg4(ptr1, v0, w0, mem) return true } return false @@ -11970,9 +11477,7 @@ func rewriteValueARM64_OpARM64MOVHstoreidx(v *Value) bool { mem := v_3 v.reset(OpARM64MOVHstore) v.AuxInt = c - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } // match: (MOVHstoreidx (MOVDconst [c]) idx val mem) @@ -11987,9 +11492,7 @@ func rewriteValueARM64_OpARM64MOVHstoreidx(v *Value) bool { mem := v_3 v.reset(OpARM64MOVHstore) v.AuxInt = c - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(idx, val, mem) return true } // match: (MOVHstoreidx ptr (SLLconst [1] idx) val mem) @@ -12003,10 +11506,7 @@ func rewriteValueARM64_OpARM64MOVHstoreidx(v *Value) bool { val := v_2 mem := v_3 v.reset(OpARM64MOVHstoreidx2) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) + v.AddArg4(ptr, idx, val, mem) return true } // match: (MOVHstoreidx ptr (ADD idx idx) val mem) @@ -12023,10 +11523,7 @@ func rewriteValueARM64_OpARM64MOVHstoreidx(v *Value) bool { val := v_2 mem := v_3 v.reset(OpARM64MOVHstoreidx2) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) + v.AddArg4(ptr, idx, val, mem) return true } // match: (MOVHstoreidx (SLLconst [1] idx) ptr val mem) @@ -12040,10 +11537,7 @@ func rewriteValueARM64_OpARM64MOVHstoreidx(v *Value) bool { val := v_2 mem := v_3 v.reset(OpARM64MOVHstoreidx2) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) + v.AddArg4(ptr, idx, val, mem) return true } // match: (MOVHstoreidx (ADD idx idx) ptr val mem) @@ -12060,10 +11554,7 @@ func rewriteValueARM64_OpARM64MOVHstoreidx(v *Value) bool { val := v_2 mem := v_3 v.reset(OpARM64MOVHstoreidx2) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) + v.AddArg4(ptr, idx, val, mem) return true } // match: (MOVHstoreidx ptr idx (MOVDconst [0]) mem) @@ -12076,9 +11567,7 @@ func rewriteValueARM64_OpARM64MOVHstoreidx(v *Value) bool { } mem := v_3 v.reset(OpARM64MOVHstorezeroidx) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } // match: (MOVHstoreidx ptr idx (MOVHreg x) mem) @@ -12092,10 +11581,7 @@ func rewriteValueARM64_OpARM64MOVHstoreidx(v *Value) bool { x := v_2.Args[0] mem := v_3 v.reset(OpARM64MOVHstoreidx) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(x) - v.AddArg(mem) + v.AddArg4(ptr, idx, x, mem) return true } // match: (MOVHstoreidx ptr idx (MOVHUreg x) mem) @@ -12109,10 +11595,7 @@ func rewriteValueARM64_OpARM64MOVHstoreidx(v *Value) bool { x := v_2.Args[0] mem := v_3 v.reset(OpARM64MOVHstoreidx) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(x) - v.AddArg(mem) + v.AddArg4(ptr, idx, x, mem) return true } // match: (MOVHstoreidx ptr idx (MOVWreg x) mem) @@ -12126,10 +11609,7 @@ func rewriteValueARM64_OpARM64MOVHstoreidx(v *Value) bool { x := v_2.Args[0] mem := v_3 v.reset(OpARM64MOVHstoreidx) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(x) - v.AddArg(mem) + v.AddArg4(ptr, idx, x, mem) return true } // match: (MOVHstoreidx ptr idx (MOVWUreg x) mem) @@ -12143,10 +11623,7 @@ func rewriteValueARM64_OpARM64MOVHstoreidx(v *Value) bool { x := v_2.Args[0] mem := v_3 v.reset(OpARM64MOVHstoreidx) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(x) - v.AddArg(mem) + v.AddArg4(ptr, idx, x, mem) return true } // match: (MOVHstoreidx ptr (ADDconst [2] idx) (SRLconst [16] w) x:(MOVHstoreidx ptr idx w mem)) @@ -12171,10 +11648,7 @@ func rewriteValueARM64_OpARM64MOVHstoreidx(v *Value) bool { break } v.reset(OpARM64MOVWstoreidx) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(w) - v.AddArg(mem) + v.AddArg4(ptr, idx, w, mem) return true } return false @@ -12196,9 +11670,7 @@ func rewriteValueARM64_OpARM64MOVHstoreidx2(v *Value) bool { mem := v_3 v.reset(OpARM64MOVHstore) v.AuxInt = c << 1 - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } // match: (MOVHstoreidx2 ptr idx (MOVDconst [0]) mem) @@ -12211,9 +11683,7 @@ func rewriteValueARM64_OpARM64MOVHstoreidx2(v *Value) bool { } mem := v_3 v.reset(OpARM64MOVHstorezeroidx2) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } // match: (MOVHstoreidx2 ptr idx (MOVHreg x) mem) @@ -12227,10 +11697,7 @@ func rewriteValueARM64_OpARM64MOVHstoreidx2(v *Value) bool { x := v_2.Args[0] mem := v_3 v.reset(OpARM64MOVHstoreidx2) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(x) - v.AddArg(mem) + v.AddArg4(ptr, idx, x, mem) return true } // match: (MOVHstoreidx2 ptr idx (MOVHUreg x) mem) @@ -12244,10 +11711,7 @@ func rewriteValueARM64_OpARM64MOVHstoreidx2(v *Value) bool { x := v_2.Args[0] mem := v_3 v.reset(OpARM64MOVHstoreidx2) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(x) - v.AddArg(mem) + v.AddArg4(ptr, idx, x, mem) return true } // match: (MOVHstoreidx2 ptr idx (MOVWreg x) mem) @@ -12261,10 +11725,7 @@ func rewriteValueARM64_OpARM64MOVHstoreidx2(v *Value) bool { x := v_2.Args[0] mem := v_3 v.reset(OpARM64MOVHstoreidx2) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(x) - v.AddArg(mem) + v.AddArg4(ptr, idx, x, mem) return true } // match: (MOVHstoreidx2 ptr idx (MOVWUreg x) mem) @@ -12278,10 +11739,7 @@ func rewriteValueARM64_OpARM64MOVHstoreidx2(v *Value) bool { x := v_2.Args[0] mem := v_3 v.reset(OpARM64MOVHstoreidx2) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(x) - v.AddArg(mem) + v.AddArg4(ptr, idx, x, mem) return true } return false @@ -12309,8 +11767,7 @@ func rewriteValueARM64_OpARM64MOVHstorezero(v *Value) bool { v.reset(OpARM64MOVHstorezero) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVHstorezero [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem) @@ -12332,8 +11789,7 @@ func rewriteValueARM64_OpARM64MOVHstorezero(v *Value) bool { v.reset(OpARM64MOVHstorezero) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVHstorezero [off] {sym} (ADD ptr idx) mem) @@ -12352,9 +11808,7 @@ func rewriteValueARM64_OpARM64MOVHstorezero(v *Value) bool { break } v.reset(OpARM64MOVHstorezeroidx) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } // match: (MOVHstorezero [off] {sym} (ADDshiftLL [1] ptr idx) mem) @@ -12373,9 +11827,7 @@ func rewriteValueARM64_OpARM64MOVHstorezero(v *Value) bool { break } v.reset(OpARM64MOVHstorezeroidx2) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } // match: (MOVHstorezero [i] {s} ptr0 x:(MOVHstorezero [j] {s} ptr1 mem)) @@ -12401,8 +11853,7 @@ func rewriteValueARM64_OpARM64MOVHstorezero(v *Value) bool { v.reset(OpARM64MOVWstorezero) v.AuxInt = min(i, j) v.Aux = s - v.AddArg(ptr0) - v.AddArg(mem) + v.AddArg2(ptr0, mem) return true } // match: (MOVHstorezero [2] {s} (ADD ptr0 idx0) x:(MOVHstorezeroidx ptr1 idx1 mem)) @@ -12433,9 +11884,7 @@ func rewriteValueARM64_OpARM64MOVHstorezero(v *Value) bool { continue } v.reset(OpARM64MOVWstorezeroidx) - v.AddArg(ptr1) - v.AddArg(idx1) - v.AddArg(mem) + v.AddArg3(ptr1, idx1, mem) return true } break @@ -12464,12 +11913,10 @@ func rewriteValueARM64_OpARM64MOVHstorezero(v *Value) bool { break } v.reset(OpARM64MOVWstorezeroidx) - v.AddArg(ptr1) v0 := b.NewValue0(v.Pos, OpARM64SLLconst, idx1.Type) v0.AuxInt = 1 v0.AddArg(idx1) - v.AddArg(v0) - v.AddArg(mem) + v.AddArg3(ptr1, v0, mem) return true } return false @@ -12489,8 +11936,7 @@ func rewriteValueARM64_OpARM64MOVHstorezeroidx(v *Value) bool { mem := v_2 v.reset(OpARM64MOVHstorezero) v.AuxInt = c - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVHstorezeroidx (MOVDconst [c]) idx mem) @@ -12504,8 +11950,7 @@ func rewriteValueARM64_OpARM64MOVHstorezeroidx(v *Value) bool { mem := v_2 v.reset(OpARM64MOVHstorezero) v.AuxInt = c - v.AddArg(idx) - v.AddArg(mem) + v.AddArg2(idx, mem) return true } // match: (MOVHstorezeroidx ptr (SLLconst [1] idx) mem) @@ -12518,9 +11963,7 @@ func rewriteValueARM64_OpARM64MOVHstorezeroidx(v *Value) bool { idx := v_1.Args[0] mem := v_2 v.reset(OpARM64MOVHstorezeroidx2) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } // match: (MOVHstorezeroidx ptr (ADD idx idx) mem) @@ -12536,9 +11979,7 @@ func rewriteValueARM64_OpARM64MOVHstorezeroidx(v *Value) bool { } mem := v_2 v.reset(OpARM64MOVHstorezeroidx2) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } // match: (MOVHstorezeroidx (SLLconst [1] idx) ptr mem) @@ -12551,9 +11992,7 @@ func rewriteValueARM64_OpARM64MOVHstorezeroidx(v *Value) bool { ptr := v_1 mem := v_2 v.reset(OpARM64MOVHstorezeroidx2) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } // match: (MOVHstorezeroidx (ADD idx idx) ptr mem) @@ -12569,9 +12008,7 @@ func rewriteValueARM64_OpARM64MOVHstorezeroidx(v *Value) bool { ptr := v_1 mem := v_2 v.reset(OpARM64MOVHstorezeroidx2) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } // match: (MOVHstorezeroidx ptr (ADDconst [2] idx) x:(MOVHstorezeroidx ptr idx mem)) @@ -12592,9 +12029,7 @@ func rewriteValueARM64_OpARM64MOVHstorezeroidx(v *Value) bool { break } v.reset(OpARM64MOVWstorezeroidx) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } return false @@ -12614,8 +12049,7 @@ func rewriteValueARM64_OpARM64MOVHstorezeroidx2(v *Value) bool { mem := v_2 v.reset(OpARM64MOVHstorezero) v.AuxInt = c << 1 - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } return false @@ -12643,8 +12077,7 @@ func rewriteValueARM64_OpARM64MOVQstorezero(v *Value) bool { v.reset(OpARM64MOVQstorezero) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVQstorezero [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem) @@ -12666,8 +12099,7 @@ func rewriteValueARM64_OpARM64MOVQstorezero(v *Value) bool { v.reset(OpARM64MOVQstorezero) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } return false @@ -12713,8 +12145,7 @@ func rewriteValueARM64_OpARM64MOVWUload(v *Value) bool { v.reset(OpARM64MOVWUload) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVWUload [off] {sym} (ADD ptr idx) mem) @@ -12733,9 +12164,7 @@ func rewriteValueARM64_OpARM64MOVWUload(v *Value) bool { break } v.reset(OpARM64MOVWUloadidx) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } // match: (MOVWUload [off] {sym} (ADDshiftLL [2] ptr idx) mem) @@ -12754,9 +12183,7 @@ func rewriteValueARM64_OpARM64MOVWUload(v *Value) bool { break } v.reset(OpARM64MOVWUloadidx4) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } // match: (MOVWUload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem) @@ -12778,8 +12205,7 @@ func rewriteValueARM64_OpARM64MOVWUload(v *Value) bool { v.reset(OpARM64MOVWUload) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVWUload [off] {sym} ptr (MOVWstorezero [off2] {sym2} ptr2 _)) @@ -12833,8 +12259,7 @@ func rewriteValueARM64_OpARM64MOVWUloadidx(v *Value) bool { mem := v_2 v.reset(OpARM64MOVWUload) v.AuxInt = c - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVWUloadidx (MOVDconst [c]) ptr mem) @@ -12848,8 +12273,7 @@ func rewriteValueARM64_OpARM64MOVWUloadidx(v *Value) bool { mem := v_2 v.reset(OpARM64MOVWUload) v.AuxInt = c - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVWUloadidx ptr (SLLconst [2] idx) mem) @@ -12862,9 +12286,7 @@ func rewriteValueARM64_OpARM64MOVWUloadidx(v *Value) bool { idx := v_1.Args[0] mem := v_2 v.reset(OpARM64MOVWUloadidx4) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } // match: (MOVWUloadidx (SLLconst [2] idx) ptr mem) @@ -12877,9 +12299,7 @@ func rewriteValueARM64_OpARM64MOVWUloadidx(v *Value) bool { ptr := v_1 mem := v_2 v.reset(OpARM64MOVWUloadidx4) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } // match: (MOVWUloadidx ptr idx (MOVWstorezeroidx ptr2 idx2 _)) @@ -12918,8 +12338,7 @@ func rewriteValueARM64_OpARM64MOVWUloadidx4(v *Value) bool { mem := v_2 v.reset(OpARM64MOVWUload) v.AuxInt = c << 2 - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVWUloadidx4 ptr idx (MOVWstorezeroidx4 ptr2 idx2 _)) @@ -13157,8 +12576,7 @@ func rewriteValueARM64_OpARM64MOVWload(v *Value) bool { v.reset(OpARM64MOVWload) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVWload [off] {sym} (ADD ptr idx) mem) @@ -13177,9 +12595,7 @@ func rewriteValueARM64_OpARM64MOVWload(v *Value) bool { break } v.reset(OpARM64MOVWloadidx) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } // match: (MOVWload [off] {sym} (ADDshiftLL [2] ptr idx) mem) @@ -13198,9 +12614,7 @@ func rewriteValueARM64_OpARM64MOVWload(v *Value) bool { break } v.reset(OpARM64MOVWloadidx4) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } // match: (MOVWload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem) @@ -13222,8 +12636,7 @@ func rewriteValueARM64_OpARM64MOVWload(v *Value) bool { v.reset(OpARM64MOVWload) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVWload [off] {sym} ptr (MOVWstorezero [off2] {sym2} ptr2 _)) @@ -13264,8 +12677,7 @@ func rewriteValueARM64_OpARM64MOVWloadidx(v *Value) bool { mem := v_2 v.reset(OpARM64MOVWload) v.AuxInt = c - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVWloadidx (MOVDconst [c]) ptr mem) @@ -13279,8 +12691,7 @@ func rewriteValueARM64_OpARM64MOVWloadidx(v *Value) bool { mem := v_2 v.reset(OpARM64MOVWload) v.AuxInt = c - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVWloadidx ptr (SLLconst [2] idx) mem) @@ -13293,9 +12704,7 @@ func rewriteValueARM64_OpARM64MOVWloadidx(v *Value) bool { idx := v_1.Args[0] mem := v_2 v.reset(OpARM64MOVWloadidx4) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } // match: (MOVWloadidx (SLLconst [2] idx) ptr mem) @@ -13308,9 +12717,7 @@ func rewriteValueARM64_OpARM64MOVWloadidx(v *Value) bool { ptr := v_1 mem := v_2 v.reset(OpARM64MOVWloadidx4) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } // match: (MOVWloadidx ptr idx (MOVWstorezeroidx ptr2 idx2 _)) @@ -13349,8 +12756,7 @@ func rewriteValueARM64_OpARM64MOVWloadidx4(v *Value) bool { mem := v_2 v.reset(OpARM64MOVWload) v.AuxInt = c << 2 - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVWloadidx4 ptr idx (MOVWstorezeroidx4 ptr2 idx2 _)) @@ -13626,9 +13032,7 @@ func rewriteValueARM64_OpARM64MOVWstore(v *Value) bool { v.reset(OpARM64FMOVSstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } // match: (MOVWstore [off1] {sym} (ADDconst [off2] ptr) val mem) @@ -13650,9 +13054,7 @@ func rewriteValueARM64_OpARM64MOVWstore(v *Value) bool { v.reset(OpARM64MOVWstore) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } // match: (MOVWstore [off] {sym} (ADD ptr idx) val mem) @@ -13672,10 +13074,7 @@ func rewriteValueARM64_OpARM64MOVWstore(v *Value) bool { break } v.reset(OpARM64MOVWstoreidx) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) + v.AddArg4(ptr, idx, val, mem) return true } // match: (MOVWstore [off] {sym} (ADDshiftLL [2] ptr idx) val mem) @@ -13695,10 +13094,7 @@ func rewriteValueARM64_OpARM64MOVWstore(v *Value) bool { break } v.reset(OpARM64MOVWstoreidx4) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) + v.AddArg4(ptr, idx, val, mem) return true } // match: (MOVWstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem) @@ -13721,9 +13117,7 @@ func rewriteValueARM64_OpARM64MOVWstore(v *Value) bool { v.reset(OpARM64MOVWstore) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } // match: (MOVWstore [off] {sym} ptr (MOVDconst [0]) mem) @@ -13739,8 +13133,7 @@ func rewriteValueARM64_OpARM64MOVWstore(v *Value) bool { v.reset(OpARM64MOVWstorezero) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVWstore [off] {sym} ptr (MOVWreg x) mem) @@ -13757,9 +13150,7 @@ func rewriteValueARM64_OpARM64MOVWstore(v *Value) bool { v.reset(OpARM64MOVWstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(x) - v.AddArg(mem) + v.AddArg3(ptr, x, mem) return true } // match: (MOVWstore [off] {sym} ptr (MOVWUreg x) mem) @@ -13776,9 +13167,7 @@ func rewriteValueARM64_OpARM64MOVWstore(v *Value) bool { v.reset(OpARM64MOVWstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(x) - v.AddArg(mem) + v.AddArg3(ptr, x, mem) return true } // match: (MOVWstore [i] {s} ptr0 (SRLconst [32] w) x:(MOVWstore [i-4] {s} ptr1 w mem)) @@ -13804,9 +13193,7 @@ func rewriteValueARM64_OpARM64MOVWstore(v *Value) bool { v.reset(OpARM64MOVDstore) v.AuxInt = i - 4 v.Aux = s - v.AddArg(ptr0) - v.AddArg(w) - v.AddArg(mem) + v.AddArg3(ptr0, w, mem) return true } // match: (MOVWstore [4] {s} (ADD ptr0 idx0) (SRLconst [32] w) x:(MOVWstoreidx ptr1 idx1 w mem)) @@ -13841,10 +13228,7 @@ func rewriteValueARM64_OpARM64MOVWstore(v *Value) bool { continue } v.reset(OpARM64MOVDstoreidx) - v.AddArg(ptr1) - v.AddArg(idx1) - v.AddArg(w) - v.AddArg(mem) + v.AddArg4(ptr1, idx1, w, mem) return true } break @@ -13877,13 +13261,10 @@ func rewriteValueARM64_OpARM64MOVWstore(v *Value) bool { break } v.reset(OpARM64MOVDstoreidx) - v.AddArg(ptr1) v0 := b.NewValue0(v.Pos, OpARM64SLLconst, idx1.Type) v0.AuxInt = 2 v0.AddArg(idx1) - v.AddArg(v0) - v.AddArg(w) - v.AddArg(mem) + v.AddArg4(ptr1, v0, w, mem) return true } // match: (MOVWstore [i] {s} ptr0 (SRLconst [j] w) x:(MOVWstore [i-4] {s} ptr1 w0:(SRLconst [j-32] w) mem)) @@ -13911,9 +13292,7 @@ func rewriteValueARM64_OpARM64MOVWstore(v *Value) bool { v.reset(OpARM64MOVDstore) v.AuxInt = i - 4 v.Aux = s - v.AddArg(ptr0) - v.AddArg(w0) - v.AddArg(mem) + v.AddArg3(ptr0, w0, mem) return true } // match: (MOVWstore [4] {s} (ADD ptr0 idx0) (SRLconst [j] w) x:(MOVWstoreidx ptr1 idx1 w0:(SRLconst [j-32] w) mem)) @@ -13950,10 +13329,7 @@ func rewriteValueARM64_OpARM64MOVWstore(v *Value) bool { continue } v.reset(OpARM64MOVDstoreidx) - v.AddArg(ptr1) - v.AddArg(idx1) - v.AddArg(w0) - v.AddArg(mem) + v.AddArg4(ptr1, idx1, w0, mem) return true } break @@ -13988,13 +13364,10 @@ func rewriteValueARM64_OpARM64MOVWstore(v *Value) bool { break } v.reset(OpARM64MOVDstoreidx) - v.AddArg(ptr1) v0 := b.NewValue0(v.Pos, OpARM64SLLconst, idx1.Type) v0.AuxInt = 2 v0.AddArg(idx1) - v.AddArg(v0) - v.AddArg(w0) - v.AddArg(mem) + v.AddArg4(ptr1, v0, w0, mem) return true } return false @@ -14016,9 +13389,7 @@ func rewriteValueARM64_OpARM64MOVWstoreidx(v *Value) bool { mem := v_3 v.reset(OpARM64MOVWstore) v.AuxInt = c - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } // match: (MOVWstoreidx (MOVDconst [c]) idx val mem) @@ -14033,9 +13404,7 @@ func rewriteValueARM64_OpARM64MOVWstoreidx(v *Value) bool { mem := v_3 v.reset(OpARM64MOVWstore) v.AuxInt = c - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(idx, val, mem) return true } // match: (MOVWstoreidx ptr (SLLconst [2] idx) val mem) @@ -14049,10 +13418,7 @@ func rewriteValueARM64_OpARM64MOVWstoreidx(v *Value) bool { val := v_2 mem := v_3 v.reset(OpARM64MOVWstoreidx4) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) + v.AddArg4(ptr, idx, val, mem) return true } // match: (MOVWstoreidx (SLLconst [2] idx) ptr val mem) @@ -14066,10 +13432,7 @@ func rewriteValueARM64_OpARM64MOVWstoreidx(v *Value) bool { val := v_2 mem := v_3 v.reset(OpARM64MOVWstoreidx4) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) + v.AddArg4(ptr, idx, val, mem) return true } // match: (MOVWstoreidx ptr idx (MOVDconst [0]) mem) @@ -14082,9 +13445,7 @@ func rewriteValueARM64_OpARM64MOVWstoreidx(v *Value) bool { } mem := v_3 v.reset(OpARM64MOVWstorezeroidx) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } // match: (MOVWstoreidx ptr idx (MOVWreg x) mem) @@ -14098,10 +13459,7 @@ func rewriteValueARM64_OpARM64MOVWstoreidx(v *Value) bool { x := v_2.Args[0] mem := v_3 v.reset(OpARM64MOVWstoreidx) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(x) - v.AddArg(mem) + v.AddArg4(ptr, idx, x, mem) return true } // match: (MOVWstoreidx ptr idx (MOVWUreg x) mem) @@ -14115,10 +13473,7 @@ func rewriteValueARM64_OpARM64MOVWstoreidx(v *Value) bool { x := v_2.Args[0] mem := v_3 v.reset(OpARM64MOVWstoreidx) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(x) - v.AddArg(mem) + v.AddArg4(ptr, idx, x, mem) return true } // match: (MOVWstoreidx ptr (ADDconst [4] idx) (SRLconst [32] w) x:(MOVWstoreidx ptr idx w mem)) @@ -14143,10 +13498,7 @@ func rewriteValueARM64_OpARM64MOVWstoreidx(v *Value) bool { break } v.reset(OpARM64MOVDstoreidx) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(w) - v.AddArg(mem) + v.AddArg4(ptr, idx, w, mem) return true } return false @@ -14168,9 +13520,7 @@ func rewriteValueARM64_OpARM64MOVWstoreidx4(v *Value) bool { mem := v_3 v.reset(OpARM64MOVWstore) v.AuxInt = c << 2 - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } // match: (MOVWstoreidx4 ptr idx (MOVDconst [0]) mem) @@ -14183,9 +13533,7 @@ func rewriteValueARM64_OpARM64MOVWstoreidx4(v *Value) bool { } mem := v_3 v.reset(OpARM64MOVWstorezeroidx4) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } // match: (MOVWstoreidx4 ptr idx (MOVWreg x) mem) @@ -14199,10 +13547,7 @@ func rewriteValueARM64_OpARM64MOVWstoreidx4(v *Value) bool { x := v_2.Args[0] mem := v_3 v.reset(OpARM64MOVWstoreidx4) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(x) - v.AddArg(mem) + v.AddArg4(ptr, idx, x, mem) return true } // match: (MOVWstoreidx4 ptr idx (MOVWUreg x) mem) @@ -14216,10 +13561,7 @@ func rewriteValueARM64_OpARM64MOVWstoreidx4(v *Value) bool { x := v_2.Args[0] mem := v_3 v.reset(OpARM64MOVWstoreidx4) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(x) - v.AddArg(mem) + v.AddArg4(ptr, idx, x, mem) return true } return false @@ -14247,8 +13589,7 @@ func rewriteValueARM64_OpARM64MOVWstorezero(v *Value) bool { v.reset(OpARM64MOVWstorezero) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVWstorezero [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem) @@ -14270,8 +13611,7 @@ func rewriteValueARM64_OpARM64MOVWstorezero(v *Value) bool { v.reset(OpARM64MOVWstorezero) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVWstorezero [off] {sym} (ADD ptr idx) mem) @@ -14290,9 +13630,7 @@ func rewriteValueARM64_OpARM64MOVWstorezero(v *Value) bool { break } v.reset(OpARM64MOVWstorezeroidx) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } // match: (MOVWstorezero [off] {sym} (ADDshiftLL [2] ptr idx) mem) @@ -14311,9 +13649,7 @@ func rewriteValueARM64_OpARM64MOVWstorezero(v *Value) bool { break } v.reset(OpARM64MOVWstorezeroidx4) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } // match: (MOVWstorezero [i] {s} ptr0 x:(MOVWstorezero [j] {s} ptr1 mem)) @@ -14339,8 +13675,7 @@ func rewriteValueARM64_OpARM64MOVWstorezero(v *Value) bool { v.reset(OpARM64MOVDstorezero) v.AuxInt = min(i, j) v.Aux = s - v.AddArg(ptr0) - v.AddArg(mem) + v.AddArg2(ptr0, mem) return true } // match: (MOVWstorezero [4] {s} (ADD ptr0 idx0) x:(MOVWstorezeroidx ptr1 idx1 mem)) @@ -14371,9 +13706,7 @@ func rewriteValueARM64_OpARM64MOVWstorezero(v *Value) bool { continue } v.reset(OpARM64MOVDstorezeroidx) - v.AddArg(ptr1) - v.AddArg(idx1) - v.AddArg(mem) + v.AddArg3(ptr1, idx1, mem) return true } break @@ -14402,12 +13735,10 @@ func rewriteValueARM64_OpARM64MOVWstorezero(v *Value) bool { break } v.reset(OpARM64MOVDstorezeroidx) - v.AddArg(ptr1) v0 := b.NewValue0(v.Pos, OpARM64SLLconst, idx1.Type) v0.AuxInt = 2 v0.AddArg(idx1) - v.AddArg(v0) - v.AddArg(mem) + v.AddArg3(ptr1, v0, mem) return true } return false @@ -14427,8 +13758,7 @@ func rewriteValueARM64_OpARM64MOVWstorezeroidx(v *Value) bool { mem := v_2 v.reset(OpARM64MOVWstorezero) v.AuxInt = c - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVWstorezeroidx (MOVDconst [c]) idx mem) @@ -14442,8 +13772,7 @@ func rewriteValueARM64_OpARM64MOVWstorezeroidx(v *Value) bool { mem := v_2 v.reset(OpARM64MOVWstorezero) v.AuxInt = c - v.AddArg(idx) - v.AddArg(mem) + v.AddArg2(idx, mem) return true } // match: (MOVWstorezeroidx ptr (SLLconst [2] idx) mem) @@ -14456,9 +13785,7 @@ func rewriteValueARM64_OpARM64MOVWstorezeroidx(v *Value) bool { idx := v_1.Args[0] mem := v_2 v.reset(OpARM64MOVWstorezeroidx4) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } // match: (MOVWstorezeroidx (SLLconst [2] idx) ptr mem) @@ -14471,9 +13798,7 @@ func rewriteValueARM64_OpARM64MOVWstorezeroidx(v *Value) bool { ptr := v_1 mem := v_2 v.reset(OpARM64MOVWstorezeroidx4) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } // match: (MOVWstorezeroidx ptr (ADDconst [4] idx) x:(MOVWstorezeroidx ptr idx mem)) @@ -14494,9 +13819,7 @@ func rewriteValueARM64_OpARM64MOVWstorezeroidx(v *Value) bool { break } v.reset(OpARM64MOVDstorezeroidx) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } return false @@ -14516,8 +13839,7 @@ func rewriteValueARM64_OpARM64MOVWstorezeroidx4(v *Value) bool { mem := v_2 v.reset(OpARM64MOVWstorezero) v.AuxInt = c << 2 - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } return false @@ -14536,8 +13858,7 @@ func rewriteValueARM64_OpARM64MSUB(v *Value) bool { break } v.reset(OpARM64ADD) - v.AddArg(a) - v.AddArg(x) + v.AddArg2(a, x) return true } // match: (MSUB a _ (MOVDconst [0])) @@ -14561,8 +13882,7 @@ func rewriteValueARM64_OpARM64MSUB(v *Value) bool { break } v.reset(OpARM64SUB) - v.AddArg(a) - v.AddArg(x) + v.AddArg2(a, x) return true } // match: (MSUB a x (MOVDconst [c])) @@ -14580,8 +13900,7 @@ func rewriteValueARM64_OpARM64MSUB(v *Value) bool { } v.reset(OpARM64SUBshiftLL) v.AuxInt = log2(c) - v.AddArg(a) - v.AddArg(x) + v.AddArg2(a, x) return true } // match: (MSUB a x (MOVDconst [c])) @@ -14598,12 +13917,10 @@ func rewriteValueARM64_OpARM64MSUB(v *Value) bool { break } v.reset(OpARM64SUB) - v.AddArg(a) v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) v0.AuxInt = log2(c - 1) - v0.AddArg(x) - v0.AddArg(x) - v.AddArg(v0) + v0.AddArg2(x, x) + v.AddArg2(a, v0) return true } // match: (MSUB a x (MOVDconst [c])) @@ -14620,12 +13937,10 @@ func rewriteValueARM64_OpARM64MSUB(v *Value) bool { break } v.reset(OpARM64ADD) - v.AddArg(a) v0 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type) v0.AuxInt = log2(c + 1) - v0.AddArg(x) - v0.AddArg(x) - v.AddArg(v0) + v0.AddArg2(x, x) + v.AddArg2(a, v0) return true } // match: (MSUB a x (MOVDconst [c])) @@ -14643,12 +13958,10 @@ func rewriteValueARM64_OpARM64MSUB(v *Value) bool { } v.reset(OpARM64ADDshiftLL) v.AuxInt = log2(c / 3) - v.AddArg(a) v0 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type) v0.AuxInt = 2 - v0.AddArg(x) - v0.AddArg(x) - v.AddArg(v0) + v0.AddArg2(x, x) + v.AddArg2(a, v0) return true } // match: (MSUB a x (MOVDconst [c])) @@ -14666,12 +13979,10 @@ func rewriteValueARM64_OpARM64MSUB(v *Value) bool { } v.reset(OpARM64SUBshiftLL) v.AuxInt = log2(c / 5) - v.AddArg(a) v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) v0.AuxInt = 2 - v0.AddArg(x) - v0.AddArg(x) - v.AddArg(v0) + v0.AddArg2(x, x) + v.AddArg2(a, v0) return true } // match: (MSUB a x (MOVDconst [c])) @@ -14689,12 +14000,10 @@ func rewriteValueARM64_OpARM64MSUB(v *Value) bool { } v.reset(OpARM64ADDshiftLL) v.AuxInt = log2(c / 7) - v.AddArg(a) v0 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type) v0.AuxInt = 3 - v0.AddArg(x) - v0.AddArg(x) - v.AddArg(v0) + v0.AddArg2(x, x) + v.AddArg2(a, v0) return true } // match: (MSUB a x (MOVDconst [c])) @@ -14712,12 +14021,10 @@ func rewriteValueARM64_OpARM64MSUB(v *Value) bool { } v.reset(OpARM64SUBshiftLL) v.AuxInt = log2(c / 9) - v.AddArg(a) v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) v0.AuxInt = 3 - v0.AddArg(x) - v0.AddArg(x) - v.AddArg(v0) + v0.AddArg2(x, x) + v.AddArg2(a, v0) return true } // match: (MSUB a (MOVDconst [-1]) x) @@ -14729,8 +14036,7 @@ func rewriteValueARM64_OpARM64MSUB(v *Value) bool { } x := v_2 v.reset(OpARM64ADD) - v.AddArg(a) - v.AddArg(x) + v.AddArg2(a, x) return true } // match: (MSUB a (MOVDconst [0]) _) @@ -14754,8 +14060,7 @@ func rewriteValueARM64_OpARM64MSUB(v *Value) bool { } x := v_2 v.reset(OpARM64SUB) - v.AddArg(a) - v.AddArg(x) + v.AddArg2(a, x) return true } // match: (MSUB a (MOVDconst [c]) x) @@ -14773,8 +14078,7 @@ func rewriteValueARM64_OpARM64MSUB(v *Value) bool { } v.reset(OpARM64SUBshiftLL) v.AuxInt = log2(c) - v.AddArg(a) - v.AddArg(x) + v.AddArg2(a, x) return true } // match: (MSUB a (MOVDconst [c]) x) @@ -14791,12 +14095,10 @@ func rewriteValueARM64_OpARM64MSUB(v *Value) bool { break } v.reset(OpARM64SUB) - v.AddArg(a) v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) v0.AuxInt = log2(c - 1) - v0.AddArg(x) - v0.AddArg(x) - v.AddArg(v0) + v0.AddArg2(x, x) + v.AddArg2(a, v0) return true } // match: (MSUB a (MOVDconst [c]) x) @@ -14813,12 +14115,10 @@ func rewriteValueARM64_OpARM64MSUB(v *Value) bool { break } v.reset(OpARM64ADD) - v.AddArg(a) v0 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type) v0.AuxInt = log2(c + 1) - v0.AddArg(x) - v0.AddArg(x) - v.AddArg(v0) + v0.AddArg2(x, x) + v.AddArg2(a, v0) return true } // match: (MSUB a (MOVDconst [c]) x) @@ -14836,12 +14136,10 @@ func rewriteValueARM64_OpARM64MSUB(v *Value) bool { } v.reset(OpARM64ADDshiftLL) v.AuxInt = log2(c / 3) - v.AddArg(a) v0 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type) v0.AuxInt = 2 - v0.AddArg(x) - v0.AddArg(x) - v.AddArg(v0) + v0.AddArg2(x, x) + v.AddArg2(a, v0) return true } // match: (MSUB a (MOVDconst [c]) x) @@ -14859,12 +14157,10 @@ func rewriteValueARM64_OpARM64MSUB(v *Value) bool { } v.reset(OpARM64SUBshiftLL) v.AuxInt = log2(c / 5) - v.AddArg(a) v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) v0.AuxInt = 2 - v0.AddArg(x) - v0.AddArg(x) - v.AddArg(v0) + v0.AddArg2(x, x) + v.AddArg2(a, v0) return true } // match: (MSUB a (MOVDconst [c]) x) @@ -14882,12 +14178,10 @@ func rewriteValueARM64_OpARM64MSUB(v *Value) bool { } v.reset(OpARM64ADDshiftLL) v.AuxInt = log2(c / 7) - v.AddArg(a) v0 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type) v0.AuxInt = 3 - v0.AddArg(x) - v0.AddArg(x) - v.AddArg(v0) + v0.AddArg2(x, x) + v.AddArg2(a, v0) return true } // match: (MSUB a (MOVDconst [c]) x) @@ -14905,12 +14199,10 @@ func rewriteValueARM64_OpARM64MSUB(v *Value) bool { } v.reset(OpARM64SUBshiftLL) v.AuxInt = log2(c / 9) - v.AddArg(a) v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) v0.AuxInt = 3 - v0.AddArg(x) - v0.AddArg(x) - v.AddArg(v0) + v0.AddArg2(x, x) + v.AddArg2(a, v0) return true } // match: (MSUB (MOVDconst [c]) x y) @@ -14925,8 +14217,7 @@ func rewriteValueARM64_OpARM64MSUB(v *Value) bool { v.reset(OpARM64ADDconst) v.AuxInt = c v0 := b.NewValue0(v.Pos, OpARM64MNEG, x.Type) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -14968,8 +14259,7 @@ func rewriteValueARM64_OpARM64MSUBW(v *Value) bool { break } v.reset(OpARM64ADD) - v.AddArg(a) - v.AddArg(x) + v.AddArg2(a, x) return true } // match: (MSUBW a _ (MOVDconst [c])) @@ -15003,8 +14293,7 @@ func rewriteValueARM64_OpARM64MSUBW(v *Value) bool { break } v.reset(OpARM64SUB) - v.AddArg(a) - v.AddArg(x) + v.AddArg2(a, x) return true } // match: (MSUBW a x (MOVDconst [c])) @@ -15022,8 +14311,7 @@ func rewriteValueARM64_OpARM64MSUBW(v *Value) bool { } v.reset(OpARM64SUBshiftLL) v.AuxInt = log2(c) - v.AddArg(a) - v.AddArg(x) + v.AddArg2(a, x) return true } // match: (MSUBW a x (MOVDconst [c])) @@ -15040,12 +14328,10 @@ func rewriteValueARM64_OpARM64MSUBW(v *Value) bool { break } v.reset(OpARM64SUB) - v.AddArg(a) v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) v0.AuxInt = log2(c - 1) - v0.AddArg(x) - v0.AddArg(x) - v.AddArg(v0) + v0.AddArg2(x, x) + v.AddArg2(a, v0) return true } // match: (MSUBW a x (MOVDconst [c])) @@ -15062,12 +14348,10 @@ func rewriteValueARM64_OpARM64MSUBW(v *Value) bool { break } v.reset(OpARM64ADD) - v.AddArg(a) v0 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type) v0.AuxInt = log2(c + 1) - v0.AddArg(x) - v0.AddArg(x) - v.AddArg(v0) + v0.AddArg2(x, x) + v.AddArg2(a, v0) return true } // match: (MSUBW a x (MOVDconst [c])) @@ -15085,12 +14369,10 @@ func rewriteValueARM64_OpARM64MSUBW(v *Value) bool { } v.reset(OpARM64ADDshiftLL) v.AuxInt = log2(c / 3) - v.AddArg(a) v0 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type) v0.AuxInt = 2 - v0.AddArg(x) - v0.AddArg(x) - v.AddArg(v0) + v0.AddArg2(x, x) + v.AddArg2(a, v0) return true } // match: (MSUBW a x (MOVDconst [c])) @@ -15108,12 +14390,10 @@ func rewriteValueARM64_OpARM64MSUBW(v *Value) bool { } v.reset(OpARM64SUBshiftLL) v.AuxInt = log2(c / 5) - v.AddArg(a) v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) v0.AuxInt = 2 - v0.AddArg(x) - v0.AddArg(x) - v.AddArg(v0) + v0.AddArg2(x, x) + v.AddArg2(a, v0) return true } // match: (MSUBW a x (MOVDconst [c])) @@ -15131,12 +14411,10 @@ func rewriteValueARM64_OpARM64MSUBW(v *Value) bool { } v.reset(OpARM64ADDshiftLL) v.AuxInt = log2(c / 7) - v.AddArg(a) v0 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type) v0.AuxInt = 3 - v0.AddArg(x) - v0.AddArg(x) - v.AddArg(v0) + v0.AddArg2(x, x) + v.AddArg2(a, v0) return true } // match: (MSUBW a x (MOVDconst [c])) @@ -15154,12 +14432,10 @@ func rewriteValueARM64_OpARM64MSUBW(v *Value) bool { } v.reset(OpARM64SUBshiftLL) v.AuxInt = log2(c / 9) - v.AddArg(a) v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) v0.AuxInt = 3 - v0.AddArg(x) - v0.AddArg(x) - v.AddArg(v0) + v0.AddArg2(x, x) + v.AddArg2(a, v0) return true } // match: (MSUBW a (MOVDconst [c]) x) @@ -15176,8 +14452,7 @@ func rewriteValueARM64_OpARM64MSUBW(v *Value) bool { break } v.reset(OpARM64ADD) - v.AddArg(a) - v.AddArg(x) + v.AddArg2(a, x) return true } // match: (MSUBW a (MOVDconst [c]) _) @@ -15211,8 +14486,7 @@ func rewriteValueARM64_OpARM64MSUBW(v *Value) bool { break } v.reset(OpARM64SUB) - v.AddArg(a) - v.AddArg(x) + v.AddArg2(a, x) return true } // match: (MSUBW a (MOVDconst [c]) x) @@ -15230,8 +14504,7 @@ func rewriteValueARM64_OpARM64MSUBW(v *Value) bool { } v.reset(OpARM64SUBshiftLL) v.AuxInt = log2(c) - v.AddArg(a) - v.AddArg(x) + v.AddArg2(a, x) return true } // match: (MSUBW a (MOVDconst [c]) x) @@ -15248,12 +14521,10 @@ func rewriteValueARM64_OpARM64MSUBW(v *Value) bool { break } v.reset(OpARM64SUB) - v.AddArg(a) v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) v0.AuxInt = log2(c - 1) - v0.AddArg(x) - v0.AddArg(x) - v.AddArg(v0) + v0.AddArg2(x, x) + v.AddArg2(a, v0) return true } // match: (MSUBW a (MOVDconst [c]) x) @@ -15270,12 +14541,10 @@ func rewriteValueARM64_OpARM64MSUBW(v *Value) bool { break } v.reset(OpARM64ADD) - v.AddArg(a) v0 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type) v0.AuxInt = log2(c + 1) - v0.AddArg(x) - v0.AddArg(x) - v.AddArg(v0) + v0.AddArg2(x, x) + v.AddArg2(a, v0) return true } // match: (MSUBW a (MOVDconst [c]) x) @@ -15293,12 +14562,10 @@ func rewriteValueARM64_OpARM64MSUBW(v *Value) bool { } v.reset(OpARM64ADDshiftLL) v.AuxInt = log2(c / 3) - v.AddArg(a) v0 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type) v0.AuxInt = 2 - v0.AddArg(x) - v0.AddArg(x) - v.AddArg(v0) + v0.AddArg2(x, x) + v.AddArg2(a, v0) return true } // match: (MSUBW a (MOVDconst [c]) x) @@ -15316,12 +14583,10 @@ func rewriteValueARM64_OpARM64MSUBW(v *Value) bool { } v.reset(OpARM64SUBshiftLL) v.AuxInt = log2(c / 5) - v.AddArg(a) v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) v0.AuxInt = 2 - v0.AddArg(x) - v0.AddArg(x) - v.AddArg(v0) + v0.AddArg2(x, x) + v.AddArg2(a, v0) return true } // match: (MSUBW a (MOVDconst [c]) x) @@ -15339,12 +14604,10 @@ func rewriteValueARM64_OpARM64MSUBW(v *Value) bool { } v.reset(OpARM64ADDshiftLL) v.AuxInt = log2(c / 7) - v.AddArg(a) v0 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type) v0.AuxInt = 3 - v0.AddArg(x) - v0.AddArg(x) - v.AddArg(v0) + v0.AddArg2(x, x) + v.AddArg2(a, v0) return true } // match: (MSUBW a (MOVDconst [c]) x) @@ -15362,12 +14625,10 @@ func rewriteValueARM64_OpARM64MSUBW(v *Value) bool { } v.reset(OpARM64SUBshiftLL) v.AuxInt = log2(c / 9) - v.AddArg(a) v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) v0.AuxInt = 3 - v0.AddArg(x) - v0.AddArg(x) - v.AddArg(v0) + v0.AddArg2(x, x) + v.AddArg2(a, v0) return true } // match: (MSUBW (MOVDconst [c]) x y) @@ -15382,8 +14643,7 @@ func rewriteValueARM64_OpARM64MSUBW(v *Value) bool { v.reset(OpARM64ADDconst) v.AuxInt = c v0 := b.NewValue0(v.Pos, OpARM64MNEGW, x.Type) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -15420,8 +14680,7 @@ func rewriteValueARM64_OpARM64MUL(v *Value) bool { x := v_0.Args[0] y := v_1 v.reset(OpARM64MNEG) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } break @@ -15503,8 +14762,7 @@ func rewriteValueARM64_OpARM64MUL(v *Value) bool { } v.reset(OpARM64ADDshiftLL) v.AuxInt = log2(c - 1) - v.AddArg(x) - v.AddArg(x) + v.AddArg2(x, x) return true } break @@ -15526,8 +14784,7 @@ func rewriteValueARM64_OpARM64MUL(v *Value) bool { v.AuxInt = log2(c + 1) v0 := b.NewValue0(v.Pos, OpARM64NEG, x.Type) v0.AddArg(x) - v.AddArg(v0) - v.AddArg(x) + v.AddArg2(v0, x) return true } break @@ -15549,8 +14806,7 @@ func rewriteValueARM64_OpARM64MUL(v *Value) bool { v.AuxInt = log2(c / 3) v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) v0.AuxInt = 1 - v0.AddArg(x) - v0.AddArg(x) + v0.AddArg2(x, x) v.AddArg(v0) return true } @@ -15573,8 +14829,7 @@ func rewriteValueARM64_OpARM64MUL(v *Value) bool { v.AuxInt = log2(c / 5) v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) v0.AuxInt = 2 - v0.AddArg(x) - v0.AddArg(x) + v0.AddArg2(x, x) v.AddArg(v0) return true } @@ -15599,8 +14854,7 @@ func rewriteValueARM64_OpARM64MUL(v *Value) bool { v0.AuxInt = 3 v1 := b.NewValue0(v.Pos, OpARM64NEG, x.Type) v1.AddArg(x) - v0.AddArg(v1) - v0.AddArg(x) + v0.AddArg2(v1, x) v.AddArg(v0) return true } @@ -15623,8 +14877,7 @@ func rewriteValueARM64_OpARM64MUL(v *Value) bool { v.AuxInt = log2(c / 9) v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) v0.AuxInt = 3 - v0.AddArg(x) - v0.AddArg(x) + v0.AddArg2(x, x) v.AddArg(v0) return true } @@ -15664,8 +14917,7 @@ func rewriteValueARM64_OpARM64MULW(v *Value) bool { x := v_0.Args[0] y := v_1 v.reset(OpARM64MNEGW) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } break @@ -15762,8 +15014,7 @@ func rewriteValueARM64_OpARM64MULW(v *Value) bool { } v.reset(OpARM64ADDshiftLL) v.AuxInt = log2(c - 1) - v.AddArg(x) - v.AddArg(x) + v.AddArg2(x, x) return true } break @@ -15785,8 +15036,7 @@ func rewriteValueARM64_OpARM64MULW(v *Value) bool { v.AuxInt = log2(c + 1) v0 := b.NewValue0(v.Pos, OpARM64NEG, x.Type) v0.AddArg(x) - v.AddArg(v0) - v.AddArg(x) + v.AddArg2(v0, x) return true } break @@ -15808,8 +15058,7 @@ func rewriteValueARM64_OpARM64MULW(v *Value) bool { v.AuxInt = log2(c / 3) v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) v0.AuxInt = 1 - v0.AddArg(x) - v0.AddArg(x) + v0.AddArg2(x, x) v.AddArg(v0) return true } @@ -15832,8 +15081,7 @@ func rewriteValueARM64_OpARM64MULW(v *Value) bool { v.AuxInt = log2(c / 5) v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) v0.AuxInt = 2 - v0.AddArg(x) - v0.AddArg(x) + v0.AddArg2(x, x) v.AddArg(v0) return true } @@ -15858,8 +15106,7 @@ func rewriteValueARM64_OpARM64MULW(v *Value) bool { v0.AuxInt = 3 v1 := b.NewValue0(v.Pos, OpARM64NEG, x.Type) v1.AddArg(x) - v0.AddArg(v1) - v0.AddArg(x) + v0.AddArg2(v1, x) v.AddArg(v0) return true } @@ -15882,8 +15129,7 @@ func rewriteValueARM64_OpARM64MULW(v *Value) bool { v.AuxInt = log2(c / 9) v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) v0.AuxInt = 3 - v0.AddArg(x) - v0.AddArg(x) + v0.AddArg2(x, x) v.AddArg(v0) return true } @@ -16037,8 +15283,7 @@ func rewriteValueARM64_OpARM64NEG(v *Value) bool { y := v_0.Args[1] x := v_0.Args[0] v.reset(OpARM64MNEG) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (NEG (MULW x y)) @@ -16050,8 +15295,7 @@ func rewriteValueARM64_OpARM64NEG(v *Value) bool { y := v_0.Args[1] x := v_0.Args[0] v.reset(OpARM64MNEGW) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (NEG (MOVDconst [c])) @@ -16277,8 +15521,7 @@ func rewriteValueARM64_OpARM64OR(v *Value) bool { } y := v_1.Args[0] v.reset(OpARM64ORN) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } break @@ -16300,8 +15543,7 @@ func rewriteValueARM64_OpARM64OR(v *Value) bool { } v.reset(OpARM64ORshiftLL) v.AuxInt = c - v.AddArg(x0) - v.AddArg(y) + v.AddArg2(x0, y) return true } break @@ -16323,8 +15565,7 @@ func rewriteValueARM64_OpARM64OR(v *Value) bool { } v.reset(OpARM64ORshiftRL) v.AuxInt = c - v.AddArg(x0) - v.AddArg(y) + v.AddArg2(x0, y) return true } break @@ -16346,8 +15587,7 @@ func rewriteValueARM64_OpARM64OR(v *Value) bool { } v.reset(OpARM64ORshiftRA) v.AuxInt = c - v.AddArg(x0) - v.AddArg(y) + v.AddArg2(x0, y) return true } break @@ -16415,10 +15655,9 @@ func rewriteValueARM64_OpARM64OR(v *Value) bool { continue } v.reset(OpARM64ROR) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpARM64NEG, t) v0.AddArg(y) - v.AddArg(v0) + v.AddArg2(x, v0) return true } break @@ -16486,8 +15725,7 @@ func rewriteValueARM64_OpARM64OR(v *Value) bool { continue } v.reset(OpARM64ROR) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } break @@ -16556,10 +15794,9 @@ func rewriteValueARM64_OpARM64OR(v *Value) bool { continue } v.reset(OpARM64RORW) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpARM64NEG, t) v0.AddArg(y) - v.AddArg(v0) + v.AddArg2(x, v0) return true } break @@ -16631,8 +15868,7 @@ func rewriteValueARM64_OpARM64OR(v *Value) bool { continue } v.reset(OpARM64RORW) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } break @@ -16657,8 +15893,7 @@ func rewriteValueARM64_OpARM64OR(v *Value) bool { } v.reset(OpARM64BFI) v.AuxInt = bfc - v.AddArg(y) - v.AddArg(x) + v.AddArg2(y, x) return true } break @@ -16683,8 +15918,7 @@ func rewriteValueARM64_OpARM64OR(v *Value) bool { } v.reset(OpARM64BFXIL) v.AuxInt = bfc - v.AddArg(y) - v.AddArg(x) + v.AddArg2(y, x) return true } break @@ -16777,8 +16011,7 @@ func rewriteValueARM64_OpARM64OR(v *Value) bool { v1 := b.NewValue0(x3.Pos, OpOffPtr, p.Type) v1.AuxInt = i0 v1.AddArg(p) - v0.AddArg(v1) - v0.AddArg(mem) + v0.AddArg2(v1, mem) return true } break @@ -16866,9 +16099,7 @@ func rewriteValueARM64_OpARM64OR(v *Value) bool { v0 := b.NewValue0(x2.Pos, OpARM64MOVWUloadidx, t) v.reset(OpCopy) v.AddArg(v0) - v0.AddArg(ptr0) - v0.AddArg(idx0) - v0.AddArg(mem) + v0.AddArg3(ptr0, idx0, mem) return true } } @@ -16957,9 +16188,7 @@ func rewriteValueARM64_OpARM64OR(v *Value) bool { v0 := b.NewValue0(v.Pos, OpARM64MOVWUloadidx, t) v.reset(OpCopy) v.AddArg(v0) - v0.AddArg(ptr) - v0.AddArg(idx) - v0.AddArg(mem) + v0.AddArg3(ptr, idx, mem) return true } break @@ -17136,8 +16365,7 @@ func rewriteValueARM64_OpARM64OR(v *Value) bool { v1 := b.NewValue0(x7.Pos, OpOffPtr, p.Type) v1.AuxInt = i0 v1.AddArg(p) - v0.AddArg(v1) - v0.AddArg(mem) + v0.AddArg2(v1, mem) return true } break @@ -17293,9 +16521,7 @@ func rewriteValueARM64_OpARM64OR(v *Value) bool { v0 := b.NewValue0(x6.Pos, OpARM64MOVDloadidx, t) v.reset(OpCopy) v.AddArg(v0) - v0.AddArg(ptr0) - v0.AddArg(idx0) - v0.AddArg(mem) + v0.AddArg3(ptr0, idx0, mem) return true } } @@ -17468,9 +16694,7 @@ func rewriteValueARM64_OpARM64OR(v *Value) bool { v0 := b.NewValue0(v.Pos, OpARM64MOVDloadidx, t) v.reset(OpCopy) v.AddArg(v0) - v0.AddArg(ptr) - v0.AddArg(idx) - v0.AddArg(mem) + v0.AddArg3(ptr, idx, mem) return true } break @@ -17564,8 +16788,7 @@ func rewriteValueARM64_OpARM64OR(v *Value) bool { v2 := b.NewValue0(x3.Pos, OpOffPtr, p.Type) v2.AuxInt = i0 v2.AddArg(p) - v1.AddArg(v2) - v1.AddArg(mem) + v1.AddArg2(v2, mem) v0.AddArg(v1) return true } @@ -17655,9 +16878,7 @@ func rewriteValueARM64_OpARM64OR(v *Value) bool { v.reset(OpCopy) v.AddArg(v0) v1 := b.NewValue0(x3.Pos, OpARM64MOVWUloadidx, t) - v1.AddArg(ptr0) - v1.AddArg(idx0) - v1.AddArg(mem) + v1.AddArg3(ptr0, idx0, mem) v0.AddArg(v1) return true } @@ -17748,9 +16969,7 @@ func rewriteValueARM64_OpARM64OR(v *Value) bool { v.reset(OpCopy) v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpARM64MOVWUloadidx, t) - v1.AddArg(ptr) - v1.AddArg(idx) - v1.AddArg(mem) + v1.AddArg3(ptr, idx, mem) v0.AddArg(v1) return true } @@ -17929,8 +17148,7 @@ func rewriteValueARM64_OpARM64OR(v *Value) bool { v2 := b.NewValue0(x7.Pos, OpOffPtr, p.Type) v2.AuxInt = i0 v2.AddArg(p) - v1.AddArg(v2) - v1.AddArg(mem) + v1.AddArg2(v2, mem) v0.AddArg(v1) return true } @@ -18088,9 +17306,7 @@ func rewriteValueARM64_OpARM64OR(v *Value) bool { v.reset(OpCopy) v.AddArg(v0) v1 := b.NewValue0(x7.Pos, OpARM64MOVDloadidx, t) - v1.AddArg(ptr0) - v1.AddArg(idx0) - v1.AddArg(mem) + v1.AddArg3(ptr0, idx0, mem) v0.AddArg(v1) return true } @@ -18265,9 +17481,7 @@ func rewriteValueARM64_OpARM64OR(v *Value) bool { v.reset(OpCopy) v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpARM64MOVDloadidx, t) - v1.AddArg(ptr) - v1.AddArg(idx) - v1.AddArg(mem) + v1.AddArg3(ptr, idx, mem) v0.AddArg(v1) return true } @@ -18318,8 +17532,7 @@ func rewriteValueARM64_OpARM64ORN(v *Value) bool { } v.reset(OpARM64ORNshiftLL) v.AuxInt = c - v.AddArg(x0) - v.AddArg(y) + v.AddArg2(x0, y) return true } // match: (ORN x0 x1:(SRLconst [c] y)) @@ -18338,8 +17551,7 @@ func rewriteValueARM64_OpARM64ORN(v *Value) bool { } v.reset(OpARM64ORNshiftRL) v.AuxInt = c - v.AddArg(x0) - v.AddArg(y) + v.AddArg2(x0, y) return true } // match: (ORN x0 x1:(SRAconst [c] y)) @@ -18358,8 +17570,7 @@ func rewriteValueARM64_OpARM64ORN(v *Value) bool { } v.reset(OpARM64ORNshiftRA) v.AuxInt = c - v.AddArg(x0) - v.AddArg(y) + v.AddArg2(x0, y) return true } return false @@ -18657,8 +17868,7 @@ func rewriteValueARM64_OpARM64ORshiftLL(v *Value) bool { x2 := v_1 v.reset(OpARM64EXTRconst) v.AuxInt = 64 - c - v.AddArg(x2) - v.AddArg(x) + v.AddArg2(x2, x) return true } // match: ( ORshiftLL [c] (UBFX [bfc] x) x2) @@ -18678,8 +17888,7 @@ func rewriteValueARM64_OpARM64ORshiftLL(v *Value) bool { } v.reset(OpARM64EXTRWconst) v.AuxInt = 32 - c - v.AddArg(x2) - v.AddArg(x) + v.AddArg2(x2, x) return true } // match: (ORshiftLL [sc] (UBFX [bfc] x) (SRLconst [sc] y)) @@ -18701,8 +17910,7 @@ func rewriteValueARM64_OpARM64ORshiftLL(v *Value) bool { } v.reset(OpARM64BFXIL) v.AuxInt = bfc - v.AddArg(y) - v.AddArg(x) + v.AddArg2(y, x) return true } // match: (ORshiftLL [8] y0:(MOVDnop x0:(MOVBUload [i0] {s} p mem)) y1:(MOVDnop x1:(MOVBUload [i1] {s} p mem))) @@ -18749,8 +17957,7 @@ func rewriteValueARM64_OpARM64ORshiftLL(v *Value) bool { v1 := b.NewValue0(x1.Pos, OpOffPtr, p.Type) v1.AuxInt = i0 v1.AddArg(p) - v0.AddArg(v1) - v0.AddArg(mem) + v0.AddArg2(v1, mem) return true } // match: (ORshiftLL [8] y0:(MOVDnop x0:(MOVBUloadidx ptr0 idx0 mem)) y1:(MOVDnop x1:(MOVBUload [1] {s} p1:(ADD ptr1 idx1) mem))) @@ -18799,9 +18006,7 @@ func rewriteValueARM64_OpARM64ORshiftLL(v *Value) bool { v0 := b.NewValue0(x1.Pos, OpARM64MOVHUloadidx, t) v.reset(OpCopy) v.AddArg(v0) - v0.AddArg(ptr0) - v0.AddArg(idx0) - v0.AddArg(mem) + v0.AddArg3(ptr0, idx0, mem) return true } break @@ -18845,9 +18050,7 @@ func rewriteValueARM64_OpARM64ORshiftLL(v *Value) bool { v0 := b.NewValue0(v.Pos, OpARM64MOVHUloadidx, t) v.reset(OpCopy) v.AddArg(v0) - v0.AddArg(ptr) - v0.AddArg(idx) - v0.AddArg(mem) + v0.AddArg3(ptr, idx, mem) return true } // match: (ORshiftLL [24] o0:(ORshiftLL [16] x0:(MOVHUload [i0] {s} p mem) y1:(MOVDnop x1:(MOVBUload [i2] {s} p mem))) y2:(MOVDnop x2:(MOVBUload [i3] {s} p mem))) @@ -18911,8 +18114,7 @@ func rewriteValueARM64_OpARM64ORshiftLL(v *Value) bool { v1 := b.NewValue0(x2.Pos, OpOffPtr, p.Type) v1.AuxInt = i0 v1.AddArg(p) - v0.AddArg(v1) - v0.AddArg(mem) + v0.AddArg2(v1, mem) return true } // match: (ORshiftLL [24] o0:(ORshiftLL [16] x0:(MOVHUloadidx ptr0 idx0 mem) y1:(MOVDnop x1:(MOVBUload [2] {s} p1:(ADD ptr1 idx1) mem))) y2:(MOVDnop x2:(MOVBUload [3] {s} p mem))) @@ -18975,9 +18177,7 @@ func rewriteValueARM64_OpARM64ORshiftLL(v *Value) bool { v0 := b.NewValue0(x2.Pos, OpARM64MOVWUloadidx, t) v.reset(OpCopy) v.AddArg(v0) - v0.AddArg(ptr0) - v0.AddArg(idx0) - v0.AddArg(mem) + v0.AddArg3(ptr0, idx0, mem) return true } break @@ -19038,9 +18238,7 @@ func rewriteValueARM64_OpARM64ORshiftLL(v *Value) bool { v0 := b.NewValue0(v.Pos, OpARM64MOVWUloadidx, t) v.reset(OpCopy) v.AddArg(v0) - v0.AddArg(ptr) - v0.AddArg(idx) - v0.AddArg(mem) + v0.AddArg3(ptr, idx, mem) return true } // match: (ORshiftLL [24] o0:(ORshiftLL [16] x0:(MOVHUloadidx2 ptr0 idx0 mem) y1:(MOVDnop x1:(MOVBUload [2] {s} p1:(ADDshiftLL [1] ptr1 idx1) mem))) y2:(MOVDnop x2:(MOVBUload [3] {s} p mem))) @@ -19099,12 +18297,10 @@ func rewriteValueARM64_OpARM64ORshiftLL(v *Value) bool { v0 := b.NewValue0(x2.Pos, OpARM64MOVWUloadidx, t) v.reset(OpCopy) v.AddArg(v0) - v0.AddArg(ptr0) v1 := b.NewValue0(x2.Pos, OpARM64SLLconst, idx0.Type) v1.AuxInt = 1 v1.AddArg(idx0) - v0.AddArg(v1) - v0.AddArg(mem) + v0.AddArg3(ptr0, v1, mem) return true } // match: (ORshiftLL [56] o0:(ORshiftLL [48] o1:(ORshiftLL [40] o2:(ORshiftLL [32] x0:(MOVWUload [i0] {s} p mem) y1:(MOVDnop x1:(MOVBUload [i4] {s} p mem))) y2:(MOVDnop x2:(MOVBUload [i5] {s} p mem))) y3:(MOVDnop x3:(MOVBUload [i6] {s} p mem))) y4:(MOVDnop x4:(MOVBUload [i7] {s} p mem))) @@ -19210,8 +18406,7 @@ func rewriteValueARM64_OpARM64ORshiftLL(v *Value) bool { v1 := b.NewValue0(x4.Pos, OpOffPtr, p.Type) v1.AuxInt = i0 v1.AddArg(p) - v0.AddArg(v1) - v0.AddArg(mem) + v0.AddArg2(v1, mem) return true } // match: (ORshiftLL [56] o0:(ORshiftLL [48] o1:(ORshiftLL [40] o2:(ORshiftLL [32] x0:(MOVWUloadidx ptr0 idx0 mem) y1:(MOVDnop x1:(MOVBUload [4] {s} p1:(ADD ptr1 idx1) mem))) y2:(MOVDnop x2:(MOVBUload [5] {s} p mem))) y3:(MOVDnop x3:(MOVBUload [6] {s} p mem))) y4:(MOVDnop x4:(MOVBUload [7] {s} p mem))) @@ -19308,9 +18503,7 @@ func rewriteValueARM64_OpARM64ORshiftLL(v *Value) bool { v0 := b.NewValue0(x4.Pos, OpARM64MOVDloadidx, t) v.reset(OpCopy) v.AddArg(v0) - v0.AddArg(ptr0) - v0.AddArg(idx0) - v0.AddArg(mem) + v0.AddArg3(ptr0, idx0, mem) return true } break @@ -19405,12 +18598,10 @@ func rewriteValueARM64_OpARM64ORshiftLL(v *Value) bool { v0 := b.NewValue0(x4.Pos, OpARM64MOVDloadidx, t) v.reset(OpCopy) v.AddArg(v0) - v0.AddArg(ptr0) v1 := b.NewValue0(x4.Pos, OpARM64SLLconst, idx0.Type) v1.AuxInt = 2 v1.AddArg(idx0) - v0.AddArg(v1) - v0.AddArg(mem) + v0.AddArg3(ptr0, v1, mem) return true } // match: (ORshiftLL [56] o0:(ORshiftLL [48] o1:(ORshiftLL [40] o2:(ORshiftLL [32] x0:(MOVWUloadidx ptr idx mem) y1:(MOVDnop x1:(MOVBUloadidx ptr (ADDconst [4] idx) mem))) y2:(MOVDnop x2:(MOVBUloadidx ptr (ADDconst [5] idx) mem))) y3:(MOVDnop x3:(MOVBUloadidx ptr (ADDconst [6] idx) mem))) y4:(MOVDnop x4:(MOVBUloadidx ptr (ADDconst [7] idx) mem))) @@ -19511,9 +18702,7 @@ func rewriteValueARM64_OpARM64ORshiftLL(v *Value) bool { v0 := b.NewValue0(v.Pos, OpARM64MOVDloadidx, t) v.reset(OpCopy) v.AddArg(v0) - v0.AddArg(ptr) - v0.AddArg(idx) - v0.AddArg(mem) + v0.AddArg3(ptr, idx, mem) return true } // match: (ORshiftLL [8] y0:(MOVDnop x0:(MOVBUload [i1] {s} p mem)) y1:(MOVDnop x1:(MOVBUload [i0] {s} p mem))) @@ -19559,8 +18748,7 @@ func rewriteValueARM64_OpARM64ORshiftLL(v *Value) bool { v1 := b.NewValue0(x1.Pos, OpARM64MOVHUload, t) v1.AuxInt = i0 v1.Aux = s - v1.AddArg(p) - v1.AddArg(mem) + v1.AddArg2(p, mem) v0.AddArg(v1) return true } @@ -19611,9 +18799,7 @@ func rewriteValueARM64_OpARM64ORshiftLL(v *Value) bool { v.reset(OpCopy) v.AddArg(v0) v1 := b.NewValue0(x0.Pos, OpARM64MOVHUloadidx, t) - v1.AddArg(ptr0) - v1.AddArg(idx0) - v1.AddArg(mem) + v1.AddArg3(ptr0, idx0, mem) v0.AddArg(v1) return true } @@ -19659,9 +18845,7 @@ func rewriteValueARM64_OpARM64ORshiftLL(v *Value) bool { v.reset(OpCopy) v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpARM64MOVHUloadidx, t) - v1.AddArg(ptr) - v1.AddArg(idx) - v1.AddArg(mem) + v1.AddArg3(ptr, idx, mem) v0.AddArg(v1) return true } @@ -19731,8 +18915,7 @@ func rewriteValueARM64_OpARM64ORshiftLL(v *Value) bool { v2 := b.NewValue0(x2.Pos, OpOffPtr, p.Type) v2.AuxInt = i0 v2.AddArg(p) - v1.AddArg(v2) - v1.AddArg(mem) + v1.AddArg2(v2, mem) v0.AddArg(v1) return true } @@ -19801,9 +18984,7 @@ func rewriteValueARM64_OpARM64ORshiftLL(v *Value) bool { v.reset(OpCopy) v.AddArg(v0) v1 := b.NewValue0(x1.Pos, OpARM64MOVWUloadidx, t) - v1.AddArg(ptr0) - v1.AddArg(idx0) - v1.AddArg(mem) + v1.AddArg3(ptr0, idx0, mem) v0.AddArg(v1) return true } @@ -19870,9 +19051,7 @@ func rewriteValueARM64_OpARM64ORshiftLL(v *Value) bool { v.reset(OpCopy) v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpARM64MOVWUloadidx, t) - v1.AddArg(ptr) - v1.AddArg(idx) - v1.AddArg(mem) + v1.AddArg3(ptr, idx, mem) v0.AddArg(v1) return true } @@ -19984,8 +19163,7 @@ func rewriteValueARM64_OpARM64ORshiftLL(v *Value) bool { v2 := b.NewValue0(x4.Pos, OpOffPtr, p.Type) v2.AuxInt = i0 v2.AddArg(p) - v1.AddArg(v2) - v1.AddArg(mem) + v1.AddArg2(v2, mem) v0.AddArg(v1) return true } @@ -20088,9 +19266,7 @@ func rewriteValueARM64_OpARM64ORshiftLL(v *Value) bool { v.reset(OpCopy) v.AddArg(v0) v1 := b.NewValue0(x3.Pos, OpARM64MOVDloadidx, t) - v1.AddArg(ptr0) - v1.AddArg(idx0) - v1.AddArg(mem) + v1.AddArg3(ptr0, idx0, mem) v0.AddArg(v1) return true } @@ -20199,9 +19375,7 @@ func rewriteValueARM64_OpARM64ORshiftLL(v *Value) bool { v.reset(OpCopy) v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpARM64MOVDloadidx, t) - v1.AddArg(ptr) - v1.AddArg(idx) - v1.AddArg(mem) + v1.AddArg3(ptr, idx, mem) v0.AddArg(v1) return true } @@ -20371,8 +19545,7 @@ func rewriteValueARM64_OpARM64ORshiftRL(v *Value) bool { } v.reset(OpARM64BFI) v.AuxInt = armBFAuxInt(lc-rc, 64-lc) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (ORshiftRL [rc] (ANDconst [ac] y) (SLLconst [lc] x)) @@ -20395,8 +19568,7 @@ func rewriteValueARM64_OpARM64ORshiftRL(v *Value) bool { } v.reset(OpARM64BFXIL) v.AuxInt = armBFAuxInt(rc-lc, 64-rc) - v.AddArg(y) - v.AddArg(x) + v.AddArg2(y, x) return true } return false @@ -20465,9 +19637,7 @@ func rewriteValueARM64_OpARM64SBCSflags(v *Value) bool { } bo := v_2_0_0_0.Args[0] v.reset(OpARM64SBCSflags) - v.AddArg(x) - v.AddArg(y) - v.AddArg(bo) + v.AddArg3(x, y, bo) return true } // match: (SBCSflags x y (Select1 (NEGSflags (MOVDconst [0])))) @@ -20487,8 +19657,7 @@ func rewriteValueARM64_OpARM64SBCSflags(v *Value) bool { break } v.reset(OpARM64SUBSflags) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } return false @@ -21043,10 +20212,7 @@ func rewriteValueARM64_OpARM64STP(v *Value) bool { v.reset(OpARM64STP) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(ptr) - v.AddArg(val1) - v.AddArg(val2) - v.AddArg(mem) + v.AddArg4(ptr, val1, val2, mem) return true } // match: (STP [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val1 val2 mem) @@ -21070,10 +20236,7 @@ func rewriteValueARM64_OpARM64STP(v *Value) bool { v.reset(OpARM64STP) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(val1) - v.AddArg(val2) - v.AddArg(mem) + v.AddArg4(ptr, val1, val2, mem) return true } // match: (STP [off] {sym} ptr (MOVDconst [0]) (MOVDconst [0]) mem) @@ -21089,8 +20252,7 @@ func rewriteValueARM64_OpARM64STP(v *Value) bool { v.reset(OpARM64MOVQstorezero) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } return false @@ -21127,9 +20289,7 @@ func rewriteValueARM64_OpARM64SUB(v *Value) bool { break } v.reset(OpARM64MSUB) - v.AddArg(a) - v.AddArg(x) - v.AddArg(y) + v.AddArg3(a, x, y) return true } // match: (SUB a l:(MNEG x y)) @@ -21147,9 +20307,7 @@ func rewriteValueARM64_OpARM64SUB(v *Value) bool { break } v.reset(OpARM64MADD) - v.AddArg(a) - v.AddArg(x) - v.AddArg(y) + v.AddArg3(a, x, y) return true } // match: (SUB a l:(MULW x y)) @@ -21167,9 +20325,7 @@ func rewriteValueARM64_OpARM64SUB(v *Value) bool { break } v.reset(OpARM64MSUBW) - v.AddArg(a) - v.AddArg(x) - v.AddArg(y) + v.AddArg3(a, x, y) return true } // match: (SUB a l:(MNEGW x y)) @@ -21187,9 +20343,7 @@ func rewriteValueARM64_OpARM64SUB(v *Value) bool { break } v.reset(OpARM64MADDW) - v.AddArg(a) - v.AddArg(x) - v.AddArg(y) + v.AddArg3(a, x, y) return true } // match: (SUB x x) @@ -21214,10 +20368,8 @@ func rewriteValueARM64_OpARM64SUB(v *Value) bool { y := v_1.Args[0] v.reset(OpARM64SUB) v0 := b.NewValue0(v.Pos, OpARM64ADD, v.Type) - v0.AddArg(x) - v0.AddArg(z) - v.AddArg(v0) - v.AddArg(y) + v0.AddArg2(x, z) + v.AddArg2(v0, y) return true } // match: (SUB (SUB x y) z) @@ -21230,11 +20382,9 @@ func rewriteValueARM64_OpARM64SUB(v *Value) bool { x := v_0.Args[0] z := v_1 v.reset(OpARM64SUB) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpARM64ADD, y.Type) - v0.AddArg(y) - v0.AddArg(z) - v.AddArg(v0) + v0.AddArg2(y, z) + v.AddArg2(x, v0) return true } // match: (SUB x0 x1:(SLLconst [c] y)) @@ -21253,8 +20403,7 @@ func rewriteValueARM64_OpARM64SUB(v *Value) bool { } v.reset(OpARM64SUBshiftLL) v.AuxInt = c - v.AddArg(x0) - v.AddArg(y) + v.AddArg2(x0, y) return true } // match: (SUB x0 x1:(SRLconst [c] y)) @@ -21273,8 +20422,7 @@ func rewriteValueARM64_OpARM64SUB(v *Value) bool { } v.reset(OpARM64SUBshiftRL) v.AuxInt = c - v.AddArg(x0) - v.AddArg(y) + v.AddArg2(x0, y) return true } // match: (SUB x0 x1:(SRAconst [c] y)) @@ -21293,8 +20441,7 @@ func rewriteValueARM64_OpARM64SUB(v *Value) bool { } v.reset(OpARM64SUBshiftRA) v.AuxInt = c - v.AddArg(x0) - v.AddArg(y) + v.AddArg2(x0, y) return true } return false @@ -21499,8 +20646,7 @@ func rewriteValueARM64_OpARM64TST(v *Value) bool { } v.reset(OpARM64TSTshiftLL) v.AuxInt = c - v.AddArg(x0) - v.AddArg(y) + v.AddArg2(x0, y) return true } break @@ -21522,8 +20668,7 @@ func rewriteValueARM64_OpARM64TST(v *Value) bool { } v.reset(OpARM64TSTshiftRL) v.AuxInt = c - v.AddArg(x0) - v.AddArg(y) + v.AddArg2(x0, y) return true } break @@ -21545,8 +20690,7 @@ func rewriteValueARM64_OpARM64TST(v *Value) bool { } v.reset(OpARM64TSTshiftRA) v.AuxInt = c - v.AddArg(x0) - v.AddArg(y) + v.AddArg2(x0, y) return true } break @@ -21999,12 +21143,9 @@ func rewriteValueARM64_OpARM64UMOD(v *Value) bool { y := v_1 v.reset(OpARM64MSUB) v.Type = typ.UInt64 - v.AddArg(x) - v.AddArg(y) v0 := b.NewValue0(v.Pos, OpARM64UDIV, typ.UInt64) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) + v0.AddArg2(x, y) + v.AddArg3(x, y, v0) return true } // match: (UMOD _ (MOVDconst [1])) @@ -22066,12 +21207,9 @@ func rewriteValueARM64_OpARM64UMODW(v *Value) bool { y := v_1 v.reset(OpARM64MSUBW) v.Type = typ.UInt32 - v.AddArg(x) - v.AddArg(y) v0 := b.NewValue0(v.Pos, OpARM64UDIVW, typ.UInt32) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) + v0.AddArg2(x, y) + v.AddArg3(x, y, v0) return true } // match: (UMODW _ (MOVDconst [c])) @@ -22165,8 +21303,7 @@ func rewriteValueARM64_OpARM64XOR(v *Value) bool { } y := v_1.Args[0] v.reset(OpARM64EON) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } break @@ -22188,8 +21325,7 @@ func rewriteValueARM64_OpARM64XOR(v *Value) bool { } v.reset(OpARM64XORshiftLL) v.AuxInt = c - v.AddArg(x0) - v.AddArg(y) + v.AddArg2(x0, y) return true } break @@ -22211,8 +21347,7 @@ func rewriteValueARM64_OpARM64XOR(v *Value) bool { } v.reset(OpARM64XORshiftRL) v.AuxInt = c - v.AddArg(x0) - v.AddArg(y) + v.AddArg2(x0, y) return true } break @@ -22234,8 +21369,7 @@ func rewriteValueARM64_OpARM64XOR(v *Value) bool { } v.reset(OpARM64XORshiftRA) v.AuxInt = c - v.AddArg(x0) - v.AddArg(y) + v.AddArg2(x0, y) return true } break @@ -22303,10 +21437,9 @@ func rewriteValueARM64_OpARM64XOR(v *Value) bool { continue } v.reset(OpARM64ROR) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpARM64NEG, t) v0.AddArg(y) - v.AddArg(v0) + v.AddArg2(x, v0) return true } break @@ -22374,8 +21507,7 @@ func rewriteValueARM64_OpARM64XOR(v *Value) bool { continue } v.reset(OpARM64ROR) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } break @@ -22444,10 +21576,9 @@ func rewriteValueARM64_OpARM64XOR(v *Value) bool { continue } v.reset(OpARM64RORW) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpARM64NEG, t) v0.AddArg(y) - v.AddArg(v0) + v.AddArg2(x, v0) return true } break @@ -22519,8 +21650,7 @@ func rewriteValueARM64_OpARM64XOR(v *Value) bool { continue } v.reset(OpARM64RORW) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } break @@ -22693,8 +21823,7 @@ func rewriteValueARM64_OpARM64XORshiftLL(v *Value) bool { x2 := v_1 v.reset(OpARM64EXTRconst) v.AuxInt = 64 - c - v.AddArg(x2) - v.AddArg(x) + v.AddArg2(x2, x) return true } // match: (XORshiftLL [c] (UBFX [bfc] x) x2) @@ -22714,8 +21843,7 @@ func rewriteValueARM64_OpARM64XORshiftLL(v *Value) bool { } v.reset(OpARM64EXTRWconst) v.AuxInt = 32 - c - v.AddArg(x2) - v.AddArg(x) + v.AddArg2(x2, x) return true } return false @@ -22876,9 +22004,7 @@ func rewriteValueARM64_OpAtomicAnd8(v *Value) bool { mem := v_2 v.reset(OpSelect1) v0 := b.NewValue0(v.Pos, OpARM64LoweredAtomicAnd8, types.NewTuple(typ.UInt8, types.TypeMem)) - v0.AddArg(ptr) - v0.AddArg(val) - v0.AddArg(mem) + v0.AddArg3(ptr, val, mem) v.AddArg(v0) return true } @@ -22897,9 +22023,7 @@ func rewriteValueARM64_OpAtomicOr8(v *Value) bool { mem := v_2 v.reset(OpSelect1) v0 := b.NewValue0(v.Pos, OpARM64LoweredAtomicOr8, types.NewTuple(typ.UInt8, types.TypeMem)) - v0.AddArg(ptr) - v0.AddArg(val) - v0.AddArg(mem) + v0.AddArg3(ptr, val, mem) v.AddArg(v0) return true } @@ -22918,11 +22042,9 @@ func rewriteValueARM64_OpAvg64u(v *Value) bool { v0 := b.NewValue0(v.Pos, OpARM64SRLconst, t) v0.AuxInt = 1 v1 := b.NewValue0(v.Pos, OpARM64SUB, t) - v1.AddArg(x) - v1.AddArg(y) + v1.AddArg2(x, y) v0.AddArg(v1) - v.AddArg(v0) - v.AddArg(y) + v.AddArg2(v0, y) return true } } @@ -22937,10 +22059,9 @@ func rewriteValueARM64_OpBitLen32(v *Value) bool { v.reset(OpARM64SUB) v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) v0.AuxInt = 32 - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpARM64CLZW, typ.Int) v1.AddArg(x) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } } @@ -22955,10 +22076,9 @@ func rewriteValueARM64_OpBitLen64(v *Value) bool { v.reset(OpARM64SUB) v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) v0.AuxInt = 64 - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpARM64CLZ, typ.Int) v1.AddArg(x) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } } @@ -23011,9 +22131,7 @@ func rewriteValueARM64_OpCondSelect(v *Value) bool { } v.reset(OpARM64CSEL) v.Aux = boolval.Op - v.AddArg(x) - v.AddArg(y) - v.AddArg(flagArg(boolval)) + v.AddArg3(x, y, flagArg(boolval)) return true } // match: (CondSelect x y boolval) @@ -23028,12 +22146,10 @@ func rewriteValueARM64_OpCondSelect(v *Value) bool { } v.reset(OpARM64CSEL) v.Aux = OpARM64NotEqual - v.AddArg(x) - v.AddArg(y) v0 := b.NewValue0(v.Pos, OpARM64CMPWconst, types.TypeFlags) v0.AuxInt = 0 v0.AddArg(boolval) - v.AddArg(v0) + v.AddArg3(x, y, v0) return true } return false @@ -23130,10 +22246,9 @@ func rewriteValueARM64_OpDiv16(v *Value) bool { v.reset(OpARM64DIVW) v0 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32) v1.AddArg(y) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } } @@ -23150,10 +22265,9 @@ func rewriteValueARM64_OpDiv16u(v *Value) bool { v.reset(OpARM64UDIVW) v0 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) v1.AddArg(y) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } } @@ -23170,10 +22284,9 @@ func rewriteValueARM64_OpDiv8(v *Value) bool { v.reset(OpARM64DIVW) v0 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32) v1.AddArg(y) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } } @@ -23190,10 +22303,9 @@ func rewriteValueARM64_OpDiv8u(v *Value) bool { v.reset(OpARM64UDIVW) v0 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) v1.AddArg(y) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } } @@ -23211,10 +22323,9 @@ func rewriteValueARM64_OpEq16(v *Value) bool { v0 := b.NewValue0(v.Pos, OpARM64CMPW, types.TypeFlags) v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) v1.AddArg(x) - v0.AddArg(v1) v2 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) v2.AddArg(y) - v0.AddArg(v2) + v0.AddArg2(v1, v2) v.AddArg(v0) return true } @@ -23230,8 +22341,7 @@ func rewriteValueARM64_OpEq32(v *Value) bool { y := v_1 v.reset(OpARM64Equal) v0 := b.NewValue0(v.Pos, OpARM64CMPW, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -23247,8 +22357,7 @@ func rewriteValueARM64_OpEq32F(v *Value) bool { y := v_1 v.reset(OpARM64Equal) v0 := b.NewValue0(v.Pos, OpARM64FCMPS, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -23264,8 +22373,7 @@ func rewriteValueARM64_OpEq64(v *Value) bool { y := v_1 v.reset(OpARM64Equal) v0 := b.NewValue0(v.Pos, OpARM64CMP, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -23281,8 +22389,7 @@ func rewriteValueARM64_OpEq64F(v *Value) bool { y := v_1 v.reset(OpARM64Equal) v0 := b.NewValue0(v.Pos, OpARM64FCMPD, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -23301,10 +22408,9 @@ func rewriteValueARM64_OpEq8(v *Value) bool { v0 := b.NewValue0(v.Pos, OpARM64CMPW, types.TypeFlags) v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) v1.AddArg(x) - v0.AddArg(v1) v2 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) v2.AddArg(y) - v0.AddArg(v2) + v0.AddArg2(v1, v2) v.AddArg(v0) return true } @@ -23322,11 +22428,9 @@ func rewriteValueARM64_OpEqB(v *Value) bool { v.reset(OpARM64XOR) v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) v0.AuxInt = 1 - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpARM64XOR, typ.Bool) - v1.AddArg(x) - v1.AddArg(y) - v.AddArg(v1) + v1.AddArg2(x, y) + v.AddArg2(v0, v1) return true } } @@ -23341,8 +22445,7 @@ func rewriteValueARM64_OpEqPtr(v *Value) bool { y := v_1 v.reset(OpARM64Equal) v0 := b.NewValue0(v.Pos, OpARM64CMP, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -23358,9 +22461,7 @@ func rewriteValueARM64_OpFMA(v *Value) bool { y := v_1 z := v_2 v.reset(OpARM64FMADDD) - v.AddArg(z) - v.AddArg(x) - v.AddArg(y) + v.AddArg3(z, x, y) return true } } @@ -23375,8 +22476,7 @@ func rewriteValueARM64_OpGeq32F(v *Value) bool { y := v_1 v.reset(OpARM64GreaterEqualF) v0 := b.NewValue0(v.Pos, OpARM64FCMPS, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -23392,8 +22492,7 @@ func rewriteValueARM64_OpGeq64F(v *Value) bool { y := v_1 v.reset(OpARM64GreaterEqualF) v0 := b.NewValue0(v.Pos, OpARM64FCMPD, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -23409,8 +22508,7 @@ func rewriteValueARM64_OpGreater32F(v *Value) bool { y := v_1 v.reset(OpARM64GreaterThanF) v0 := b.NewValue0(v.Pos, OpARM64FCMPS, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -23426,8 +22524,7 @@ func rewriteValueARM64_OpGreater64F(v *Value) bool { y := v_1 v.reset(OpARM64GreaterThanF) v0 := b.NewValue0(v.Pos, OpARM64FCMPD, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -23445,8 +22542,7 @@ func rewriteValueARM64_OpHmul32(v *Value) bool { v.reset(OpARM64SRAconst) v.AuxInt = 32 v0 := b.NewValue0(v.Pos, OpARM64MULL, typ.Int64) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -23464,8 +22560,7 @@ func rewriteValueARM64_OpHmul32u(v *Value) bool { v.reset(OpARM64SRAconst) v.AuxInt = 32 v0 := b.NewValue0(v.Pos, OpARM64UMULL, typ.UInt64) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -23481,8 +22576,7 @@ func rewriteValueARM64_OpIsInBounds(v *Value) bool { len := v_1 v.reset(OpARM64LessThanU) v0 := b.NewValue0(v.Pos, OpARM64CMP, types.TypeFlags) - v0.AddArg(idx) - v0.AddArg(len) + v0.AddArg2(idx, len) v.AddArg(v0) return true } @@ -23513,8 +22607,7 @@ func rewriteValueARM64_OpIsSliceInBounds(v *Value) bool { len := v_1 v.reset(OpARM64LessEqualU) v0 := b.NewValue0(v.Pos, OpARM64CMP, types.TypeFlags) - v0.AddArg(idx) - v0.AddArg(len) + v0.AddArg2(idx, len) v.AddArg(v0) return true } @@ -23533,10 +22626,9 @@ func rewriteValueARM64_OpLeq16(v *Value) bool { v0 := b.NewValue0(v.Pos, OpARM64CMPW, types.TypeFlags) v1 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32) v1.AddArg(x) - v0.AddArg(v1) v2 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32) v2.AddArg(y) - v0.AddArg(v2) + v0.AddArg2(v1, v2) v.AddArg(v0) return true } @@ -23555,10 +22647,9 @@ func rewriteValueARM64_OpLeq16U(v *Value) bool { v0 := b.NewValue0(v.Pos, OpARM64CMPW, types.TypeFlags) v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) v1.AddArg(x) - v0.AddArg(v1) v2 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) v2.AddArg(y) - v0.AddArg(v2) + v0.AddArg2(v1, v2) v.AddArg(v0) return true } @@ -23574,8 +22665,7 @@ func rewriteValueARM64_OpLeq32(v *Value) bool { y := v_1 v.reset(OpARM64LessEqual) v0 := b.NewValue0(v.Pos, OpARM64CMPW, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -23591,8 +22681,7 @@ func rewriteValueARM64_OpLeq32F(v *Value) bool { y := v_1 v.reset(OpARM64LessEqualF) v0 := b.NewValue0(v.Pos, OpARM64FCMPS, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -23608,8 +22697,7 @@ func rewriteValueARM64_OpLeq32U(v *Value) bool { y := v_1 v.reset(OpARM64LessEqualU) v0 := b.NewValue0(v.Pos, OpARM64CMPW, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -23625,8 +22713,7 @@ func rewriteValueARM64_OpLeq64(v *Value) bool { y := v_1 v.reset(OpARM64LessEqual) v0 := b.NewValue0(v.Pos, OpARM64CMP, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -23642,8 +22729,7 @@ func rewriteValueARM64_OpLeq64F(v *Value) bool { y := v_1 v.reset(OpARM64LessEqualF) v0 := b.NewValue0(v.Pos, OpARM64FCMPD, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -23659,8 +22745,7 @@ func rewriteValueARM64_OpLeq64U(v *Value) bool { y := v_1 v.reset(OpARM64LessEqualU) v0 := b.NewValue0(v.Pos, OpARM64CMP, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -23679,10 +22764,9 @@ func rewriteValueARM64_OpLeq8(v *Value) bool { v0 := b.NewValue0(v.Pos, OpARM64CMPW, types.TypeFlags) v1 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32) v1.AddArg(x) - v0.AddArg(v1) v2 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32) v2.AddArg(y) - v0.AddArg(v2) + v0.AddArg2(v1, v2) v.AddArg(v0) return true } @@ -23701,10 +22785,9 @@ func rewriteValueARM64_OpLeq8U(v *Value) bool { v0 := b.NewValue0(v.Pos, OpARM64CMPW, types.TypeFlags) v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) v1.AddArg(x) - v0.AddArg(v1) v2 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) v2.AddArg(y) - v0.AddArg(v2) + v0.AddArg2(v1, v2) v.AddArg(v0) return true } @@ -23723,10 +22806,9 @@ func rewriteValueARM64_OpLess16(v *Value) bool { v0 := b.NewValue0(v.Pos, OpARM64CMPW, types.TypeFlags) v1 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32) v1.AddArg(x) - v0.AddArg(v1) v2 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32) v2.AddArg(y) - v0.AddArg(v2) + v0.AddArg2(v1, v2) v.AddArg(v0) return true } @@ -23745,10 +22827,9 @@ func rewriteValueARM64_OpLess16U(v *Value) bool { v0 := b.NewValue0(v.Pos, OpARM64CMPW, types.TypeFlags) v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) v1.AddArg(x) - v0.AddArg(v1) v2 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) v2.AddArg(y) - v0.AddArg(v2) + v0.AddArg2(v1, v2) v.AddArg(v0) return true } @@ -23764,8 +22845,7 @@ func rewriteValueARM64_OpLess32(v *Value) bool { y := v_1 v.reset(OpARM64LessThan) v0 := b.NewValue0(v.Pos, OpARM64CMPW, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -23781,8 +22861,7 @@ func rewriteValueARM64_OpLess32F(v *Value) bool { y := v_1 v.reset(OpARM64LessThanF) v0 := b.NewValue0(v.Pos, OpARM64FCMPS, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -23798,8 +22877,7 @@ func rewriteValueARM64_OpLess32U(v *Value) bool { y := v_1 v.reset(OpARM64LessThanU) v0 := b.NewValue0(v.Pos, OpARM64CMPW, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -23815,8 +22893,7 @@ func rewriteValueARM64_OpLess64(v *Value) bool { y := v_1 v.reset(OpARM64LessThan) v0 := b.NewValue0(v.Pos, OpARM64CMP, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -23832,8 +22909,7 @@ func rewriteValueARM64_OpLess64F(v *Value) bool { y := v_1 v.reset(OpARM64LessThanF) v0 := b.NewValue0(v.Pos, OpARM64FCMPD, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -23849,8 +22925,7 @@ func rewriteValueARM64_OpLess64U(v *Value) bool { y := v_1 v.reset(OpARM64LessThanU) v0 := b.NewValue0(v.Pos, OpARM64CMP, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -23869,10 +22944,9 @@ func rewriteValueARM64_OpLess8(v *Value) bool { v0 := b.NewValue0(v.Pos, OpARM64CMPW, types.TypeFlags) v1 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32) v1.AddArg(x) - v0.AddArg(v1) v2 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32) v2.AddArg(y) - v0.AddArg(v2) + v0.AddArg2(v1, v2) v.AddArg(v0) return true } @@ -23891,10 +22965,9 @@ func rewriteValueARM64_OpLess8U(v *Value) bool { v0 := b.NewValue0(v.Pos, OpARM64CMPW, types.TypeFlags) v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) v1.AddArg(x) - v0.AddArg(v1) v2 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) v2.AddArg(y) - v0.AddArg(v2) + v0.AddArg2(v1, v2) v.AddArg(v0) return true } @@ -23913,8 +22986,7 @@ func rewriteValueARM64_OpLoad(v *Value) bool { break } v.reset(OpARM64MOVBUload) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (Load ptr mem) @@ -23928,8 +23000,7 @@ func rewriteValueARM64_OpLoad(v *Value) bool { break } v.reset(OpARM64MOVBload) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (Load ptr mem) @@ -23943,8 +23014,7 @@ func rewriteValueARM64_OpLoad(v *Value) bool { break } v.reset(OpARM64MOVBUload) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (Load ptr mem) @@ -23958,8 +23028,7 @@ func rewriteValueARM64_OpLoad(v *Value) bool { break } v.reset(OpARM64MOVHload) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (Load ptr mem) @@ -23973,8 +23042,7 @@ func rewriteValueARM64_OpLoad(v *Value) bool { break } v.reset(OpARM64MOVHUload) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (Load ptr mem) @@ -23988,8 +23056,7 @@ func rewriteValueARM64_OpLoad(v *Value) bool { break } v.reset(OpARM64MOVWload) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (Load ptr mem) @@ -24003,8 +23070,7 @@ func rewriteValueARM64_OpLoad(v *Value) bool { break } v.reset(OpARM64MOVWUload) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (Load ptr mem) @@ -24018,8 +23084,7 @@ func rewriteValueARM64_OpLoad(v *Value) bool { break } v.reset(OpARM64MOVDload) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (Load ptr mem) @@ -24033,8 +23098,7 @@ func rewriteValueARM64_OpLoad(v *Value) bool { break } v.reset(OpARM64FMOVSload) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (Load ptr mem) @@ -24048,8 +23112,7 @@ func rewriteValueARM64_OpLoad(v *Value) bool { break } v.reset(OpARM64FMOVDload) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } return false @@ -24081,20 +23144,17 @@ func rewriteValueARM64_OpLsh16x16(v *Value) bool { v.reset(OpARM64CSEL) v.Aux = OpARM64LessThanU v0 := b.NewValue0(v.Pos, OpARM64SLL, t) - v0.AddArg(x) v1 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) v1.AddArg(y) - v0.AddArg(v1) - v.AddArg(v0) + v0.AddArg2(x, v1) v2 := b.NewValue0(v.Pos, OpConst64, t) v2.AuxInt = 0 - v.AddArg(v2) v3 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) v3.AuxInt = 64 v4 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) v4.AddArg(y) v3.AddArg(v4) - v.AddArg(v3) + v.AddArg3(v0, v2, v3) return true } } @@ -24112,20 +23172,17 @@ func rewriteValueARM64_OpLsh16x32(v *Value) bool { v.reset(OpARM64CSEL) v.Aux = OpARM64LessThanU v0 := b.NewValue0(v.Pos, OpARM64SLL, t) - v0.AddArg(x) v1 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) v1.AddArg(y) - v0.AddArg(v1) - v.AddArg(v0) + v0.AddArg2(x, v1) v2 := b.NewValue0(v.Pos, OpConst64, t) v2.AuxInt = 0 - v.AddArg(v2) v3 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) v3.AuxInt = 64 v4 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) v4.AddArg(y) v3.AddArg(v4) - v.AddArg(v3) + v.AddArg3(v0, v2, v3) return true } } @@ -24142,16 +23199,13 @@ func rewriteValueARM64_OpLsh16x64(v *Value) bool { v.reset(OpARM64CSEL) v.Aux = OpARM64LessThanU v0 := b.NewValue0(v.Pos, OpARM64SLL, t) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) + v0.AddArg2(x, y) v1 := b.NewValue0(v.Pos, OpConst64, t) v1.AuxInt = 0 - v.AddArg(v1) v2 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) v2.AuxInt = 64 v2.AddArg(y) - v.AddArg(v2) + v.AddArg3(v0, v1, v2) return true } } @@ -24169,20 +23223,17 @@ func rewriteValueARM64_OpLsh16x8(v *Value) bool { v.reset(OpARM64CSEL) v.Aux = OpARM64LessThanU v0 := b.NewValue0(v.Pos, OpARM64SLL, t) - v0.AddArg(x) v1 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) v1.AddArg(y) - v0.AddArg(v1) - v.AddArg(v0) + v0.AddArg2(x, v1) v2 := b.NewValue0(v.Pos, OpConst64, t) v2.AuxInt = 0 - v.AddArg(v2) v3 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) v3.AuxInt = 64 v4 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) v4.AddArg(y) v3.AddArg(v4) - v.AddArg(v3) + v.AddArg3(v0, v2, v3) return true } } @@ -24200,20 +23251,17 @@ func rewriteValueARM64_OpLsh32x16(v *Value) bool { v.reset(OpARM64CSEL) v.Aux = OpARM64LessThanU v0 := b.NewValue0(v.Pos, OpARM64SLL, t) - v0.AddArg(x) v1 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) v1.AddArg(y) - v0.AddArg(v1) - v.AddArg(v0) + v0.AddArg2(x, v1) v2 := b.NewValue0(v.Pos, OpConst64, t) v2.AuxInt = 0 - v.AddArg(v2) v3 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) v3.AuxInt = 64 v4 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) v4.AddArg(y) v3.AddArg(v4) - v.AddArg(v3) + v.AddArg3(v0, v2, v3) return true } } @@ -24231,20 +23279,17 @@ func rewriteValueARM64_OpLsh32x32(v *Value) bool { v.reset(OpARM64CSEL) v.Aux = OpARM64LessThanU v0 := b.NewValue0(v.Pos, OpARM64SLL, t) - v0.AddArg(x) v1 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) v1.AddArg(y) - v0.AddArg(v1) - v.AddArg(v0) + v0.AddArg2(x, v1) v2 := b.NewValue0(v.Pos, OpConst64, t) v2.AuxInt = 0 - v.AddArg(v2) v3 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) v3.AuxInt = 64 v4 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) v4.AddArg(y) v3.AddArg(v4) - v.AddArg(v3) + v.AddArg3(v0, v2, v3) return true } } @@ -24261,16 +23306,13 @@ func rewriteValueARM64_OpLsh32x64(v *Value) bool { v.reset(OpARM64CSEL) v.Aux = OpARM64LessThanU v0 := b.NewValue0(v.Pos, OpARM64SLL, t) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) + v0.AddArg2(x, y) v1 := b.NewValue0(v.Pos, OpConst64, t) v1.AuxInt = 0 - v.AddArg(v1) v2 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) v2.AuxInt = 64 v2.AddArg(y) - v.AddArg(v2) + v.AddArg3(v0, v1, v2) return true } } @@ -24288,20 +23330,17 @@ func rewriteValueARM64_OpLsh32x8(v *Value) bool { v.reset(OpARM64CSEL) v.Aux = OpARM64LessThanU v0 := b.NewValue0(v.Pos, OpARM64SLL, t) - v0.AddArg(x) v1 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) v1.AddArg(y) - v0.AddArg(v1) - v.AddArg(v0) + v0.AddArg2(x, v1) v2 := b.NewValue0(v.Pos, OpConst64, t) v2.AuxInt = 0 - v.AddArg(v2) v3 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) v3.AuxInt = 64 v4 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) v4.AddArg(y) v3.AddArg(v4) - v.AddArg(v3) + v.AddArg3(v0, v2, v3) return true } } @@ -24319,20 +23358,17 @@ func rewriteValueARM64_OpLsh64x16(v *Value) bool { v.reset(OpARM64CSEL) v.Aux = OpARM64LessThanU v0 := b.NewValue0(v.Pos, OpARM64SLL, t) - v0.AddArg(x) v1 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) v1.AddArg(y) - v0.AddArg(v1) - v.AddArg(v0) + v0.AddArg2(x, v1) v2 := b.NewValue0(v.Pos, OpConst64, t) v2.AuxInt = 0 - v.AddArg(v2) v3 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) v3.AuxInt = 64 v4 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) v4.AddArg(y) v3.AddArg(v4) - v.AddArg(v3) + v.AddArg3(v0, v2, v3) return true } } @@ -24350,20 +23386,17 @@ func rewriteValueARM64_OpLsh64x32(v *Value) bool { v.reset(OpARM64CSEL) v.Aux = OpARM64LessThanU v0 := b.NewValue0(v.Pos, OpARM64SLL, t) - v0.AddArg(x) v1 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) v1.AddArg(y) - v0.AddArg(v1) - v.AddArg(v0) + v0.AddArg2(x, v1) v2 := b.NewValue0(v.Pos, OpConst64, t) v2.AuxInt = 0 - v.AddArg(v2) v3 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) v3.AuxInt = 64 v4 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) v4.AddArg(y) v3.AddArg(v4) - v.AddArg(v3) + v.AddArg3(v0, v2, v3) return true } } @@ -24380,16 +23413,13 @@ func rewriteValueARM64_OpLsh64x64(v *Value) bool { v.reset(OpARM64CSEL) v.Aux = OpARM64LessThanU v0 := b.NewValue0(v.Pos, OpARM64SLL, t) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) + v0.AddArg2(x, y) v1 := b.NewValue0(v.Pos, OpConst64, t) v1.AuxInt = 0 - v.AddArg(v1) v2 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) v2.AuxInt = 64 v2.AddArg(y) - v.AddArg(v2) + v.AddArg3(v0, v1, v2) return true } } @@ -24407,20 +23437,17 @@ func rewriteValueARM64_OpLsh64x8(v *Value) bool { v.reset(OpARM64CSEL) v.Aux = OpARM64LessThanU v0 := b.NewValue0(v.Pos, OpARM64SLL, t) - v0.AddArg(x) v1 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) v1.AddArg(y) - v0.AddArg(v1) - v.AddArg(v0) + v0.AddArg2(x, v1) v2 := b.NewValue0(v.Pos, OpConst64, t) v2.AuxInt = 0 - v.AddArg(v2) v3 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) v3.AuxInt = 64 v4 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) v4.AddArg(y) v3.AddArg(v4) - v.AddArg(v3) + v.AddArg3(v0, v2, v3) return true } } @@ -24438,20 +23465,17 @@ func rewriteValueARM64_OpLsh8x16(v *Value) bool { v.reset(OpARM64CSEL) v.Aux = OpARM64LessThanU v0 := b.NewValue0(v.Pos, OpARM64SLL, t) - v0.AddArg(x) v1 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) v1.AddArg(y) - v0.AddArg(v1) - v.AddArg(v0) + v0.AddArg2(x, v1) v2 := b.NewValue0(v.Pos, OpConst64, t) v2.AuxInt = 0 - v.AddArg(v2) v3 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) v3.AuxInt = 64 v4 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) v4.AddArg(y) v3.AddArg(v4) - v.AddArg(v3) + v.AddArg3(v0, v2, v3) return true } } @@ -24469,20 +23493,17 @@ func rewriteValueARM64_OpLsh8x32(v *Value) bool { v.reset(OpARM64CSEL) v.Aux = OpARM64LessThanU v0 := b.NewValue0(v.Pos, OpARM64SLL, t) - v0.AddArg(x) v1 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) v1.AddArg(y) - v0.AddArg(v1) - v.AddArg(v0) + v0.AddArg2(x, v1) v2 := b.NewValue0(v.Pos, OpConst64, t) v2.AuxInt = 0 - v.AddArg(v2) v3 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) v3.AuxInt = 64 v4 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) v4.AddArg(y) v3.AddArg(v4) - v.AddArg(v3) + v.AddArg3(v0, v2, v3) return true } } @@ -24499,16 +23520,13 @@ func rewriteValueARM64_OpLsh8x64(v *Value) bool { v.reset(OpARM64CSEL) v.Aux = OpARM64LessThanU v0 := b.NewValue0(v.Pos, OpARM64SLL, t) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) + v0.AddArg2(x, y) v1 := b.NewValue0(v.Pos, OpConst64, t) v1.AuxInt = 0 - v.AddArg(v1) v2 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) v2.AuxInt = 64 v2.AddArg(y) - v.AddArg(v2) + v.AddArg3(v0, v1, v2) return true } } @@ -24526,20 +23544,17 @@ func rewriteValueARM64_OpLsh8x8(v *Value) bool { v.reset(OpARM64CSEL) v.Aux = OpARM64LessThanU v0 := b.NewValue0(v.Pos, OpARM64SLL, t) - v0.AddArg(x) v1 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) v1.AddArg(y) - v0.AddArg(v1) - v.AddArg(v0) + v0.AddArg2(x, v1) v2 := b.NewValue0(v.Pos, OpConst64, t) v2.AuxInt = 0 - v.AddArg(v2) v3 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) v3.AuxInt = 64 v4 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) v4.AddArg(y) v3.AddArg(v4) - v.AddArg(v3) + v.AddArg3(v0, v2, v3) return true } } @@ -24556,10 +23571,9 @@ func rewriteValueARM64_OpMod16(v *Value) bool { v.reset(OpARM64MODW) v0 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32) v1.AddArg(y) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } } @@ -24576,10 +23590,9 @@ func rewriteValueARM64_OpMod16u(v *Value) bool { v.reset(OpARM64UMODW) v0 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) v1.AddArg(y) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } } @@ -24596,10 +23609,9 @@ func rewriteValueARM64_OpMod8(v *Value) bool { v.reset(OpARM64MODW) v0 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32) v1.AddArg(y) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } } @@ -24616,10 +23628,9 @@ func rewriteValueARM64_OpMod8u(v *Value) bool { v.reset(OpARM64UMODW) v0 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) v1.AddArg(y) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } } @@ -24652,12 +23663,9 @@ func rewriteValueARM64_OpMove(v *Value) bool { src := v_1 mem := v_2 v.reset(OpARM64MOVBstore) - v.AddArg(dst) v0 := b.NewValue0(v.Pos, OpARM64MOVBUload, typ.UInt8) - v0.AddArg(src) - v0.AddArg(mem) - v.AddArg(v0) - v.AddArg(mem) + v0.AddArg2(src, mem) + v.AddArg3(dst, v0, mem) return true } // match: (Move [2] dst src mem) @@ -24670,12 +23678,9 @@ func rewriteValueARM64_OpMove(v *Value) bool { src := v_1 mem := v_2 v.reset(OpARM64MOVHstore) - v.AddArg(dst) v0 := b.NewValue0(v.Pos, OpARM64MOVHUload, typ.UInt16) - v0.AddArg(src) - v0.AddArg(mem) - v.AddArg(v0) - v.AddArg(mem) + v0.AddArg2(src, mem) + v.AddArg3(dst, v0, mem) return true } // match: (Move [4] dst src mem) @@ -24688,12 +23693,9 @@ func rewriteValueARM64_OpMove(v *Value) bool { src := v_1 mem := v_2 v.reset(OpARM64MOVWstore) - v.AddArg(dst) v0 := b.NewValue0(v.Pos, OpARM64MOVWUload, typ.UInt32) - v0.AddArg(src) - v0.AddArg(mem) - v.AddArg(v0) - v.AddArg(mem) + v0.AddArg2(src, mem) + v.AddArg3(dst, v0, mem) return true } // match: (Move [8] dst src mem) @@ -24706,12 +23708,9 @@ func rewriteValueARM64_OpMove(v *Value) bool { src := v_1 mem := v_2 v.reset(OpARM64MOVDstore) - v.AddArg(dst) v0 := b.NewValue0(v.Pos, OpARM64MOVDload, typ.UInt64) - v0.AddArg(src) - v0.AddArg(mem) - v.AddArg(v0) - v.AddArg(mem) + v0.AddArg2(src, mem) + v.AddArg3(dst, v0, mem) return true } // match: (Move [3] dst src mem) @@ -24725,20 +23724,14 @@ func rewriteValueARM64_OpMove(v *Value) bool { mem := v_2 v.reset(OpARM64MOVBstore) v.AuxInt = 2 - v.AddArg(dst) v0 := b.NewValue0(v.Pos, OpARM64MOVBUload, typ.UInt8) v0.AuxInt = 2 - v0.AddArg(src) - v0.AddArg(mem) - v.AddArg(v0) + v0.AddArg2(src, mem) v1 := b.NewValue0(v.Pos, OpARM64MOVHstore, types.TypeMem) - v1.AddArg(dst) v2 := b.NewValue0(v.Pos, OpARM64MOVHUload, typ.UInt16) - v2.AddArg(src) - v2.AddArg(mem) - v1.AddArg(v2) - v1.AddArg(mem) - v.AddArg(v1) + v2.AddArg2(src, mem) + v1.AddArg3(dst, v2, mem) + v.AddArg3(dst, v0, v1) return true } // match: (Move [5] dst src mem) @@ -24752,20 +23745,14 @@ func rewriteValueARM64_OpMove(v *Value) bool { mem := v_2 v.reset(OpARM64MOVBstore) v.AuxInt = 4 - v.AddArg(dst) v0 := b.NewValue0(v.Pos, OpARM64MOVBUload, typ.UInt8) v0.AuxInt = 4 - v0.AddArg(src) - v0.AddArg(mem) - v.AddArg(v0) + v0.AddArg2(src, mem) v1 := b.NewValue0(v.Pos, OpARM64MOVWstore, types.TypeMem) - v1.AddArg(dst) v2 := b.NewValue0(v.Pos, OpARM64MOVWUload, typ.UInt32) - v2.AddArg(src) - v2.AddArg(mem) - v1.AddArg(v2) - v1.AddArg(mem) - v.AddArg(v1) + v2.AddArg2(src, mem) + v1.AddArg3(dst, v2, mem) + v.AddArg3(dst, v0, v1) return true } // match: (Move [6] dst src mem) @@ -24779,20 +23766,14 @@ func rewriteValueARM64_OpMove(v *Value) bool { mem := v_2 v.reset(OpARM64MOVHstore) v.AuxInt = 4 - v.AddArg(dst) v0 := b.NewValue0(v.Pos, OpARM64MOVHUload, typ.UInt16) v0.AuxInt = 4 - v0.AddArg(src) - v0.AddArg(mem) - v.AddArg(v0) + v0.AddArg2(src, mem) v1 := b.NewValue0(v.Pos, OpARM64MOVWstore, types.TypeMem) - v1.AddArg(dst) v2 := b.NewValue0(v.Pos, OpARM64MOVWUload, typ.UInt32) - v2.AddArg(src) - v2.AddArg(mem) - v1.AddArg(v2) - v1.AddArg(mem) - v.AddArg(v1) + v2.AddArg2(src, mem) + v1.AddArg3(dst, v2, mem) + v.AddArg3(dst, v0, v1) return true } // match: (Move [7] dst src mem) @@ -24806,29 +23787,20 @@ func rewriteValueARM64_OpMove(v *Value) bool { mem := v_2 v.reset(OpARM64MOVBstore) v.AuxInt = 6 - v.AddArg(dst) v0 := b.NewValue0(v.Pos, OpARM64MOVBUload, typ.UInt8) v0.AuxInt = 6 - v0.AddArg(src) - v0.AddArg(mem) - v.AddArg(v0) + v0.AddArg2(src, mem) v1 := b.NewValue0(v.Pos, OpARM64MOVHstore, types.TypeMem) v1.AuxInt = 4 - v1.AddArg(dst) v2 := b.NewValue0(v.Pos, OpARM64MOVHUload, typ.UInt16) v2.AuxInt = 4 - v2.AddArg(src) - v2.AddArg(mem) - v1.AddArg(v2) + v2.AddArg2(src, mem) v3 := b.NewValue0(v.Pos, OpARM64MOVWstore, types.TypeMem) - v3.AddArg(dst) v4 := b.NewValue0(v.Pos, OpARM64MOVWUload, typ.UInt32) - v4.AddArg(src) - v4.AddArg(mem) - v3.AddArg(v4) - v3.AddArg(mem) - v1.AddArg(v3) - v.AddArg(v1) + v4.AddArg2(src, mem) + v3.AddArg3(dst, v4, mem) + v1.AddArg3(dst, v2, v3) + v.AddArg3(dst, v0, v1) return true } // match: (Move [12] dst src mem) @@ -24842,20 +23814,14 @@ func rewriteValueARM64_OpMove(v *Value) bool { mem := v_2 v.reset(OpARM64MOVWstore) v.AuxInt = 8 - v.AddArg(dst) v0 := b.NewValue0(v.Pos, OpARM64MOVWUload, typ.UInt32) v0.AuxInt = 8 - v0.AddArg(src) - v0.AddArg(mem) - v.AddArg(v0) + v0.AddArg2(src, mem) v1 := b.NewValue0(v.Pos, OpARM64MOVDstore, types.TypeMem) - v1.AddArg(dst) v2 := b.NewValue0(v.Pos, OpARM64MOVDload, typ.UInt64) - v2.AddArg(src) - v2.AddArg(mem) - v1.AddArg(v2) - v1.AddArg(mem) - v.AddArg(v1) + v2.AddArg2(src, mem) + v1.AddArg3(dst, v2, mem) + v.AddArg3(dst, v0, v1) return true } // match: (Move [16] dst src mem) @@ -24869,20 +23835,14 @@ func rewriteValueARM64_OpMove(v *Value) bool { mem := v_2 v.reset(OpARM64MOVDstore) v.AuxInt = 8 - v.AddArg(dst) v0 := b.NewValue0(v.Pos, OpARM64MOVDload, typ.UInt64) v0.AuxInt = 8 - v0.AddArg(src) - v0.AddArg(mem) - v.AddArg(v0) + v0.AddArg2(src, mem) v1 := b.NewValue0(v.Pos, OpARM64MOVDstore, types.TypeMem) - v1.AddArg(dst) v2 := b.NewValue0(v.Pos, OpARM64MOVDload, typ.UInt64) - v2.AddArg(src) - v2.AddArg(mem) - v1.AddArg(v2) - v1.AddArg(mem) - v.AddArg(v1) + v2.AddArg2(src, mem) + v1.AddArg3(dst, v2, mem) + v.AddArg3(dst, v0, v1) return true } // match: (Move [24] dst src mem) @@ -24896,29 +23856,20 @@ func rewriteValueARM64_OpMove(v *Value) bool { mem := v_2 v.reset(OpARM64MOVDstore) v.AuxInt = 16 - v.AddArg(dst) v0 := b.NewValue0(v.Pos, OpARM64MOVDload, typ.UInt64) v0.AuxInt = 16 - v0.AddArg(src) - v0.AddArg(mem) - v.AddArg(v0) + v0.AddArg2(src, mem) v1 := b.NewValue0(v.Pos, OpARM64MOVDstore, types.TypeMem) v1.AuxInt = 8 - v1.AddArg(dst) v2 := b.NewValue0(v.Pos, OpARM64MOVDload, typ.UInt64) v2.AuxInt = 8 - v2.AddArg(src) - v2.AddArg(mem) - v1.AddArg(v2) + v2.AddArg2(src, mem) v3 := b.NewValue0(v.Pos, OpARM64MOVDstore, types.TypeMem) - v3.AddArg(dst) v4 := b.NewValue0(v.Pos, OpARM64MOVDload, typ.UInt64) - v4.AddArg(src) - v4.AddArg(mem) - v3.AddArg(v4) - v3.AddArg(mem) - v1.AddArg(v3) - v.AddArg(v1) + v4.AddArg2(src, mem) + v3.AddArg3(dst, v4, mem) + v1.AddArg3(dst, v2, v3) + v.AddArg3(dst, v0, v1) return true } // match: (Move [s] dst src mem) @@ -24937,17 +23888,13 @@ func rewriteValueARM64_OpMove(v *Value) bool { v0 := b.NewValue0(v.Pos, OpOffPtr, dst.Type) v0.AuxInt = s - s%8 v0.AddArg(dst) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpOffPtr, src.Type) v1.AuxInt = s - s%8 v1.AddArg(src) - v.AddArg(v1) v2 := b.NewValue0(v.Pos, OpMove, types.TypeMem) v2.AuxInt = s - s%8 - v2.AddArg(dst) - v2.AddArg(src) - v2.AddArg(mem) - v.AddArg(v2) + v2.AddArg3(dst, src, mem) + v.AddArg3(v0, v1, v2) return true } // match: (Move [s] dst src mem) @@ -24963,18 +23910,13 @@ func rewriteValueARM64_OpMove(v *Value) bool { } v.reset(OpARM64MOVDstore) v.AuxInt = s - 8 - v.AddArg(dst) v0 := b.NewValue0(v.Pos, OpARM64MOVDload, typ.UInt64) v0.AuxInt = s - 8 - v0.AddArg(src) - v0.AddArg(mem) - v.AddArg(v0) + v0.AddArg2(src, mem) v1 := b.NewValue0(v.Pos, OpARM64DUFFCOPY, types.TypeMem) v1.AuxInt = 8 * (64 - (s-8)/16) - v1.AddArg(dst) - v1.AddArg(src) - v1.AddArg(mem) - v.AddArg(v1) + v1.AddArg3(dst, src, mem) + v.AddArg3(dst, v0, v1) return true } // match: (Move [s] dst src mem) @@ -24990,9 +23932,7 @@ func rewriteValueARM64_OpMove(v *Value) bool { } v.reset(OpARM64DUFFCOPY) v.AuxInt = 8 * (64 - s/16) - v.AddArg(dst) - v.AddArg(src) - v.AddArg(mem) + v.AddArg3(dst, src, mem) return true } // match: (Move [s] dst src mem) @@ -25007,13 +23947,10 @@ func rewriteValueARM64_OpMove(v *Value) bool { break } v.reset(OpARM64LoweredMove) - v.AddArg(dst) - v.AddArg(src) v0 := b.NewValue0(v.Pos, OpARM64ADDconst, src.Type) v0.AuxInt = s - 8 v0.AddArg(src) - v.AddArg(v0) - v.AddArg(mem) + v.AddArg4(dst, src, v0, mem) return true } return false @@ -25032,10 +23969,9 @@ func rewriteValueARM64_OpNeq16(v *Value) bool { v0 := b.NewValue0(v.Pos, OpARM64CMPW, types.TypeFlags) v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) v1.AddArg(x) - v0.AddArg(v1) v2 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) v2.AddArg(y) - v0.AddArg(v2) + v0.AddArg2(v1, v2) v.AddArg(v0) return true } @@ -25051,8 +23987,7 @@ func rewriteValueARM64_OpNeq32(v *Value) bool { y := v_1 v.reset(OpARM64NotEqual) v0 := b.NewValue0(v.Pos, OpARM64CMPW, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -25068,8 +24003,7 @@ func rewriteValueARM64_OpNeq32F(v *Value) bool { y := v_1 v.reset(OpARM64NotEqual) v0 := b.NewValue0(v.Pos, OpARM64FCMPS, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -25085,8 +24019,7 @@ func rewriteValueARM64_OpNeq64(v *Value) bool { y := v_1 v.reset(OpARM64NotEqual) v0 := b.NewValue0(v.Pos, OpARM64CMP, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -25102,8 +24035,7 @@ func rewriteValueARM64_OpNeq64F(v *Value) bool { y := v_1 v.reset(OpARM64NotEqual) v0 := b.NewValue0(v.Pos, OpARM64FCMPD, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -25122,10 +24054,9 @@ func rewriteValueARM64_OpNeq8(v *Value) bool { v0 := b.NewValue0(v.Pos, OpARM64CMPW, types.TypeFlags) v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) v1.AddArg(x) - v0.AddArg(v1) v2 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) v2.AddArg(y) - v0.AddArg(v2) + v0.AddArg2(v1, v2) v.AddArg(v0) return true } @@ -25141,8 +24072,7 @@ func rewriteValueARM64_OpNeqPtr(v *Value) bool { y := v_1 v.reset(OpARM64NotEqual) v0 := b.NewValue0(v.Pos, OpARM64CMP, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -25158,8 +24088,7 @@ func rewriteValueARM64_OpNot(v *Value) bool { v.reset(OpARM64XOR) v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) v0.AuxInt = 1 - v.AddArg(v0) - v.AddArg(x) + v.AddArg2(v0, x) return true } } @@ -25206,9 +24135,7 @@ func rewriteValueARM64_OpPanicBounds(v *Value) bool { } v.reset(OpARM64LoweredPanicBoundsA) v.AuxInt = kind - v.AddArg(x) - v.AddArg(y) - v.AddArg(mem) + v.AddArg3(x, y, mem) return true } // match: (PanicBounds [kind] x y mem) @@ -25224,9 +24151,7 @@ func rewriteValueARM64_OpPanicBounds(v *Value) bool { } v.reset(OpARM64LoweredPanicBoundsB) v.AuxInt = kind - v.AddArg(x) - v.AddArg(y) - v.AddArg(mem) + v.AddArg3(x, y, mem) return true } // match: (PanicBounds [kind] x y mem) @@ -25242,9 +24167,7 @@ func rewriteValueARM64_OpPanicBounds(v *Value) bool { } v.reset(OpARM64LoweredPanicBoundsC) v.AuxInt = kind - v.AddArg(x) - v.AddArg(y) - v.AddArg(mem) + v.AddArg3(x, y, mem) return true } return false @@ -25332,17 +24255,14 @@ func rewriteValueARM64_OpRotateLeft16(v *Value) bool { c := v_1.AuxInt v.reset(OpOr16) v0 := b.NewValue0(v.Pos, OpLsh16x64, t) - v0.AddArg(x) v1 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) v1.AuxInt = c & 15 - v0.AddArg(v1) - v.AddArg(v0) + v0.AddArg2(x, v1) v2 := b.NewValue0(v.Pos, OpRsh16Ux64, t) - v2.AddArg(x) v3 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) v3.AuxInt = -c & 15 - v2.AddArg(v3) - v.AddArg(v2) + v2.AddArg2(x, v3) + v.AddArg2(v0, v2) return true } return false @@ -25357,10 +24277,9 @@ func rewriteValueARM64_OpRotateLeft32(v *Value) bool { x := v_0 y := v_1 v.reset(OpARM64RORW) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpARM64NEG, y.Type) v0.AddArg(y) - v.AddArg(v0) + v.AddArg2(x, v0) return true } } @@ -25374,10 +24293,9 @@ func rewriteValueARM64_OpRotateLeft64(v *Value) bool { x := v_0 y := v_1 v.reset(OpARM64ROR) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpARM64NEG, y.Type) v0.AddArg(y) - v.AddArg(v0) + v.AddArg2(x, v0) return true } } @@ -25397,17 +24315,14 @@ func rewriteValueARM64_OpRotateLeft8(v *Value) bool { c := v_1.AuxInt v.reset(OpOr8) v0 := b.NewValue0(v.Pos, OpLsh8x64, t) - v0.AddArg(x) v1 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) v1.AuxInt = c & 7 - v0.AddArg(v1) - v.AddArg(v0) + v0.AddArg2(x, v1) v2 := b.NewValue0(v.Pos, OpRsh8Ux64, t) - v2.AddArg(x) v3 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) v3.AuxInt = -c & 7 - v2.AddArg(v3) - v.AddArg(v2) + v2.AddArg2(x, v3) + v.AddArg2(v0, v2) return true } return false @@ -25428,20 +24343,17 @@ func rewriteValueARM64_OpRsh16Ux16(v *Value) bool { v0 := b.NewValue0(v.Pos, OpARM64SRL, t) v1 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) v1.AddArg(x) - v0.AddArg(v1) v2 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) v2.AddArg(y) - v0.AddArg(v2) - v.AddArg(v0) + v0.AddArg2(v1, v2) v3 := b.NewValue0(v.Pos, OpConst64, t) v3.AuxInt = 0 - v.AddArg(v3) v4 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) v4.AuxInt = 64 v5 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) v5.AddArg(y) v4.AddArg(v5) - v.AddArg(v4) + v.AddArg3(v0, v3, v4) return true } } @@ -25461,20 +24373,17 @@ func rewriteValueARM64_OpRsh16Ux32(v *Value) bool { v0 := b.NewValue0(v.Pos, OpARM64SRL, t) v1 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) v1.AddArg(x) - v0.AddArg(v1) v2 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) v2.AddArg(y) - v0.AddArg(v2) - v.AddArg(v0) + v0.AddArg2(v1, v2) v3 := b.NewValue0(v.Pos, OpConst64, t) v3.AuxInt = 0 - v.AddArg(v3) v4 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) v4.AuxInt = 64 v5 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) v5.AddArg(y) v4.AddArg(v5) - v.AddArg(v4) + v.AddArg3(v0, v3, v4) return true } } @@ -25494,16 +24403,13 @@ func rewriteValueARM64_OpRsh16Ux64(v *Value) bool { v0 := b.NewValue0(v.Pos, OpARM64SRL, t) v1 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) v1.AddArg(x) - v0.AddArg(v1) - v0.AddArg(y) - v.AddArg(v0) + v0.AddArg2(v1, y) v2 := b.NewValue0(v.Pos, OpConst64, t) v2.AuxInt = 0 - v.AddArg(v2) v3 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) v3.AuxInt = 64 v3.AddArg(y) - v.AddArg(v3) + v.AddArg3(v0, v2, v3) return true } } @@ -25523,20 +24429,17 @@ func rewriteValueARM64_OpRsh16Ux8(v *Value) bool { v0 := b.NewValue0(v.Pos, OpARM64SRL, t) v1 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) v1.AddArg(x) - v0.AddArg(v1) v2 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) v2.AddArg(y) - v0.AddArg(v2) - v.AddArg(v0) + v0.AddArg2(v1, v2) v3 := b.NewValue0(v.Pos, OpConst64, t) v3.AuxInt = 0 - v.AddArg(v3) v4 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) v4.AuxInt = 64 v5 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) v5.AddArg(y) v4.AddArg(v5) - v.AddArg(v4) + v.AddArg3(v0, v3, v4) return true } } @@ -25553,22 +24456,19 @@ func rewriteValueARM64_OpRsh16x16(v *Value) bool { v.reset(OpARM64SRA) v0 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpARM64CSEL, y.Type) v1.Aux = OpARM64LessThanU v2 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) v2.AddArg(y) - v1.AddArg(v2) v3 := b.NewValue0(v.Pos, OpConst64, y.Type) v3.AuxInt = 63 - v1.AddArg(v3) v4 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) v4.AuxInt = 64 v5 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) v5.AddArg(y) v4.AddArg(v5) - v1.AddArg(v4) - v.AddArg(v1) + v1.AddArg3(v2, v3, v4) + v.AddArg2(v0, v1) return true } } @@ -25585,22 +24485,19 @@ func rewriteValueARM64_OpRsh16x32(v *Value) bool { v.reset(OpARM64SRA) v0 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpARM64CSEL, y.Type) v1.Aux = OpARM64LessThanU v2 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) v2.AddArg(y) - v1.AddArg(v2) v3 := b.NewValue0(v.Pos, OpConst64, y.Type) v3.AuxInt = 63 - v1.AddArg(v3) v4 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) v4.AuxInt = 64 v5 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) v5.AddArg(y) v4.AddArg(v5) - v1.AddArg(v4) - v.AddArg(v1) + v1.AddArg3(v2, v3, v4) + v.AddArg2(v0, v1) return true } } @@ -25617,18 +24514,15 @@ func rewriteValueARM64_OpRsh16x64(v *Value) bool { v.reset(OpARM64SRA) v0 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpARM64CSEL, y.Type) v1.Aux = OpARM64LessThanU - v1.AddArg(y) v2 := b.NewValue0(v.Pos, OpConst64, y.Type) v2.AuxInt = 63 - v1.AddArg(v2) v3 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) v3.AuxInt = 64 v3.AddArg(y) - v1.AddArg(v3) - v.AddArg(v1) + v1.AddArg3(y, v2, v3) + v.AddArg2(v0, v1) return true } } @@ -25645,22 +24539,19 @@ func rewriteValueARM64_OpRsh16x8(v *Value) bool { v.reset(OpARM64SRA) v0 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpARM64CSEL, y.Type) v1.Aux = OpARM64LessThanU v2 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) v2.AddArg(y) - v1.AddArg(v2) v3 := b.NewValue0(v.Pos, OpConst64, y.Type) v3.AuxInt = 63 - v1.AddArg(v3) v4 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) v4.AuxInt = 64 v5 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) v5.AddArg(y) v4.AddArg(v5) - v1.AddArg(v4) - v.AddArg(v1) + v1.AddArg3(v2, v3, v4) + v.AddArg2(v0, v1) return true } } @@ -25680,20 +24571,17 @@ func rewriteValueARM64_OpRsh32Ux16(v *Value) bool { v0 := b.NewValue0(v.Pos, OpARM64SRL, t) v1 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) v1.AddArg(x) - v0.AddArg(v1) v2 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) v2.AddArg(y) - v0.AddArg(v2) - v.AddArg(v0) + v0.AddArg2(v1, v2) v3 := b.NewValue0(v.Pos, OpConst64, t) v3.AuxInt = 0 - v.AddArg(v3) v4 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) v4.AuxInt = 64 v5 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) v5.AddArg(y) v4.AddArg(v5) - v.AddArg(v4) + v.AddArg3(v0, v3, v4) return true } } @@ -25713,20 +24601,17 @@ func rewriteValueARM64_OpRsh32Ux32(v *Value) bool { v0 := b.NewValue0(v.Pos, OpARM64SRL, t) v1 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) v1.AddArg(x) - v0.AddArg(v1) v2 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) v2.AddArg(y) - v0.AddArg(v2) - v.AddArg(v0) + v0.AddArg2(v1, v2) v3 := b.NewValue0(v.Pos, OpConst64, t) v3.AuxInt = 0 - v.AddArg(v3) v4 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) v4.AuxInt = 64 v5 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) v5.AddArg(y) v4.AddArg(v5) - v.AddArg(v4) + v.AddArg3(v0, v3, v4) return true } } @@ -25746,16 +24631,13 @@ func rewriteValueARM64_OpRsh32Ux64(v *Value) bool { v0 := b.NewValue0(v.Pos, OpARM64SRL, t) v1 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) v1.AddArg(x) - v0.AddArg(v1) - v0.AddArg(y) - v.AddArg(v0) + v0.AddArg2(v1, y) v2 := b.NewValue0(v.Pos, OpConst64, t) v2.AuxInt = 0 - v.AddArg(v2) v3 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) v3.AuxInt = 64 v3.AddArg(y) - v.AddArg(v3) + v.AddArg3(v0, v2, v3) return true } } @@ -25775,20 +24657,17 @@ func rewriteValueARM64_OpRsh32Ux8(v *Value) bool { v0 := b.NewValue0(v.Pos, OpARM64SRL, t) v1 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) v1.AddArg(x) - v0.AddArg(v1) v2 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) v2.AddArg(y) - v0.AddArg(v2) - v.AddArg(v0) + v0.AddArg2(v1, v2) v3 := b.NewValue0(v.Pos, OpConst64, t) v3.AuxInt = 0 - v.AddArg(v3) v4 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) v4.AuxInt = 64 v5 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) v5.AddArg(y) v4.AddArg(v5) - v.AddArg(v4) + v.AddArg3(v0, v3, v4) return true } } @@ -25805,22 +24684,19 @@ func rewriteValueARM64_OpRsh32x16(v *Value) bool { v.reset(OpARM64SRA) v0 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpARM64CSEL, y.Type) v1.Aux = OpARM64LessThanU v2 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) v2.AddArg(y) - v1.AddArg(v2) v3 := b.NewValue0(v.Pos, OpConst64, y.Type) v3.AuxInt = 63 - v1.AddArg(v3) v4 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) v4.AuxInt = 64 v5 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) v5.AddArg(y) v4.AddArg(v5) - v1.AddArg(v4) - v.AddArg(v1) + v1.AddArg3(v2, v3, v4) + v.AddArg2(v0, v1) return true } } @@ -25837,22 +24713,19 @@ func rewriteValueARM64_OpRsh32x32(v *Value) bool { v.reset(OpARM64SRA) v0 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpARM64CSEL, y.Type) v1.Aux = OpARM64LessThanU v2 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) v2.AddArg(y) - v1.AddArg(v2) v3 := b.NewValue0(v.Pos, OpConst64, y.Type) v3.AuxInt = 63 - v1.AddArg(v3) v4 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) v4.AuxInt = 64 v5 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) v5.AddArg(y) v4.AddArg(v5) - v1.AddArg(v4) - v.AddArg(v1) + v1.AddArg3(v2, v3, v4) + v.AddArg2(v0, v1) return true } } @@ -25869,18 +24742,15 @@ func rewriteValueARM64_OpRsh32x64(v *Value) bool { v.reset(OpARM64SRA) v0 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpARM64CSEL, y.Type) v1.Aux = OpARM64LessThanU - v1.AddArg(y) v2 := b.NewValue0(v.Pos, OpConst64, y.Type) v2.AuxInt = 63 - v1.AddArg(v2) v3 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) v3.AuxInt = 64 v3.AddArg(y) - v1.AddArg(v3) - v.AddArg(v1) + v1.AddArg3(y, v2, v3) + v.AddArg2(v0, v1) return true } } @@ -25897,22 +24767,19 @@ func rewriteValueARM64_OpRsh32x8(v *Value) bool { v.reset(OpARM64SRA) v0 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpARM64CSEL, y.Type) v1.Aux = OpARM64LessThanU v2 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) v2.AddArg(y) - v1.AddArg(v2) v3 := b.NewValue0(v.Pos, OpConst64, y.Type) v3.AuxInt = 63 - v1.AddArg(v3) v4 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) v4.AuxInt = 64 v5 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) v5.AddArg(y) v4.AddArg(v5) - v1.AddArg(v4) - v.AddArg(v1) + v1.AddArg3(v2, v3, v4) + v.AddArg2(v0, v1) return true } } @@ -25930,20 +24797,17 @@ func rewriteValueARM64_OpRsh64Ux16(v *Value) bool { v.reset(OpARM64CSEL) v.Aux = OpARM64LessThanU v0 := b.NewValue0(v.Pos, OpARM64SRL, t) - v0.AddArg(x) v1 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) v1.AddArg(y) - v0.AddArg(v1) - v.AddArg(v0) + v0.AddArg2(x, v1) v2 := b.NewValue0(v.Pos, OpConst64, t) v2.AuxInt = 0 - v.AddArg(v2) v3 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) v3.AuxInt = 64 v4 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) v4.AddArg(y) v3.AddArg(v4) - v.AddArg(v3) + v.AddArg3(v0, v2, v3) return true } } @@ -25961,20 +24825,17 @@ func rewriteValueARM64_OpRsh64Ux32(v *Value) bool { v.reset(OpARM64CSEL) v.Aux = OpARM64LessThanU v0 := b.NewValue0(v.Pos, OpARM64SRL, t) - v0.AddArg(x) v1 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) v1.AddArg(y) - v0.AddArg(v1) - v.AddArg(v0) + v0.AddArg2(x, v1) v2 := b.NewValue0(v.Pos, OpConst64, t) v2.AuxInt = 0 - v.AddArg(v2) v3 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) v3.AuxInt = 64 v4 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) v4.AddArg(y) v3.AddArg(v4) - v.AddArg(v3) + v.AddArg3(v0, v2, v3) return true } } @@ -25991,16 +24852,13 @@ func rewriteValueARM64_OpRsh64Ux64(v *Value) bool { v.reset(OpARM64CSEL) v.Aux = OpARM64LessThanU v0 := b.NewValue0(v.Pos, OpARM64SRL, t) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) + v0.AddArg2(x, y) v1 := b.NewValue0(v.Pos, OpConst64, t) v1.AuxInt = 0 - v.AddArg(v1) v2 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) v2.AuxInt = 64 v2.AddArg(y) - v.AddArg(v2) + v.AddArg3(v0, v1, v2) return true } } @@ -26018,20 +24876,17 @@ func rewriteValueARM64_OpRsh64Ux8(v *Value) bool { v.reset(OpARM64CSEL) v.Aux = OpARM64LessThanU v0 := b.NewValue0(v.Pos, OpARM64SRL, t) - v0.AddArg(x) v1 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) v1.AddArg(y) - v0.AddArg(v1) - v.AddArg(v0) + v0.AddArg2(x, v1) v2 := b.NewValue0(v.Pos, OpConst64, t) v2.AuxInt = 0 - v.AddArg(v2) v3 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) v3.AuxInt = 64 v4 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) v4.AddArg(y) v3.AddArg(v4) - v.AddArg(v3) + v.AddArg3(v0, v2, v3) return true } } @@ -26046,22 +24901,19 @@ func rewriteValueARM64_OpRsh64x16(v *Value) bool { x := v_0 y := v_1 v.reset(OpARM64SRA) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpARM64CSEL, y.Type) v0.Aux = OpARM64LessThanU v1 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) v1.AddArg(y) - v0.AddArg(v1) v2 := b.NewValue0(v.Pos, OpConst64, y.Type) v2.AuxInt = 63 - v0.AddArg(v2) v3 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) v3.AuxInt = 64 v4 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) v4.AddArg(y) v3.AddArg(v4) - v0.AddArg(v3) - v.AddArg(v0) + v0.AddArg3(v1, v2, v3) + v.AddArg2(x, v0) return true } } @@ -26076,22 +24928,19 @@ func rewriteValueARM64_OpRsh64x32(v *Value) bool { x := v_0 y := v_1 v.reset(OpARM64SRA) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpARM64CSEL, y.Type) v0.Aux = OpARM64LessThanU v1 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) v1.AddArg(y) - v0.AddArg(v1) v2 := b.NewValue0(v.Pos, OpConst64, y.Type) v2.AuxInt = 63 - v0.AddArg(v2) v3 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) v3.AuxInt = 64 v4 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) v4.AddArg(y) v3.AddArg(v4) - v0.AddArg(v3) - v.AddArg(v0) + v0.AddArg3(v1, v2, v3) + v.AddArg2(x, v0) return true } } @@ -26105,18 +24954,15 @@ func rewriteValueARM64_OpRsh64x64(v *Value) bool { x := v_0 y := v_1 v.reset(OpARM64SRA) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpARM64CSEL, y.Type) v0.Aux = OpARM64LessThanU - v0.AddArg(y) v1 := b.NewValue0(v.Pos, OpConst64, y.Type) v1.AuxInt = 63 - v0.AddArg(v1) v2 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) v2.AuxInt = 64 v2.AddArg(y) - v0.AddArg(v2) - v.AddArg(v0) + v0.AddArg3(y, v1, v2) + v.AddArg2(x, v0) return true } } @@ -26131,22 +24977,19 @@ func rewriteValueARM64_OpRsh64x8(v *Value) bool { x := v_0 y := v_1 v.reset(OpARM64SRA) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpARM64CSEL, y.Type) v0.Aux = OpARM64LessThanU v1 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) v1.AddArg(y) - v0.AddArg(v1) v2 := b.NewValue0(v.Pos, OpConst64, y.Type) v2.AuxInt = 63 - v0.AddArg(v2) v3 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) v3.AuxInt = 64 v4 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) v4.AddArg(y) v3.AddArg(v4) - v0.AddArg(v3) - v.AddArg(v0) + v0.AddArg3(v1, v2, v3) + v.AddArg2(x, v0) return true } } @@ -26166,20 +25009,17 @@ func rewriteValueARM64_OpRsh8Ux16(v *Value) bool { v0 := b.NewValue0(v.Pos, OpARM64SRL, t) v1 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) v1.AddArg(x) - v0.AddArg(v1) v2 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) v2.AddArg(y) - v0.AddArg(v2) - v.AddArg(v0) + v0.AddArg2(v1, v2) v3 := b.NewValue0(v.Pos, OpConst64, t) v3.AuxInt = 0 - v.AddArg(v3) v4 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) v4.AuxInt = 64 v5 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) v5.AddArg(y) v4.AddArg(v5) - v.AddArg(v4) + v.AddArg3(v0, v3, v4) return true } } @@ -26199,20 +25039,17 @@ func rewriteValueARM64_OpRsh8Ux32(v *Value) bool { v0 := b.NewValue0(v.Pos, OpARM64SRL, t) v1 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) v1.AddArg(x) - v0.AddArg(v1) v2 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) v2.AddArg(y) - v0.AddArg(v2) - v.AddArg(v0) + v0.AddArg2(v1, v2) v3 := b.NewValue0(v.Pos, OpConst64, t) v3.AuxInt = 0 - v.AddArg(v3) v4 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) v4.AuxInt = 64 v5 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) v5.AddArg(y) v4.AddArg(v5) - v.AddArg(v4) + v.AddArg3(v0, v3, v4) return true } } @@ -26232,16 +25069,13 @@ func rewriteValueARM64_OpRsh8Ux64(v *Value) bool { v0 := b.NewValue0(v.Pos, OpARM64SRL, t) v1 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) v1.AddArg(x) - v0.AddArg(v1) - v0.AddArg(y) - v.AddArg(v0) + v0.AddArg2(v1, y) v2 := b.NewValue0(v.Pos, OpConst64, t) v2.AuxInt = 0 - v.AddArg(v2) v3 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) v3.AuxInt = 64 v3.AddArg(y) - v.AddArg(v3) + v.AddArg3(v0, v2, v3) return true } } @@ -26261,20 +25095,17 @@ func rewriteValueARM64_OpRsh8Ux8(v *Value) bool { v0 := b.NewValue0(v.Pos, OpARM64SRL, t) v1 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) v1.AddArg(x) - v0.AddArg(v1) v2 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) v2.AddArg(y) - v0.AddArg(v2) - v.AddArg(v0) + v0.AddArg2(v1, v2) v3 := b.NewValue0(v.Pos, OpConst64, t) v3.AuxInt = 0 - v.AddArg(v3) v4 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) v4.AuxInt = 64 v5 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) v5.AddArg(y) v4.AddArg(v5) - v.AddArg(v4) + v.AddArg3(v0, v3, v4) return true } } @@ -26291,22 +25122,19 @@ func rewriteValueARM64_OpRsh8x16(v *Value) bool { v.reset(OpARM64SRA) v0 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpARM64CSEL, y.Type) v1.Aux = OpARM64LessThanU v2 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) v2.AddArg(y) - v1.AddArg(v2) v3 := b.NewValue0(v.Pos, OpConst64, y.Type) v3.AuxInt = 63 - v1.AddArg(v3) v4 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) v4.AuxInt = 64 v5 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) v5.AddArg(y) v4.AddArg(v5) - v1.AddArg(v4) - v.AddArg(v1) + v1.AddArg3(v2, v3, v4) + v.AddArg2(v0, v1) return true } } @@ -26323,22 +25151,19 @@ func rewriteValueARM64_OpRsh8x32(v *Value) bool { v.reset(OpARM64SRA) v0 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpARM64CSEL, y.Type) v1.Aux = OpARM64LessThanU v2 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) v2.AddArg(y) - v1.AddArg(v2) v3 := b.NewValue0(v.Pos, OpConst64, y.Type) v3.AuxInt = 63 - v1.AddArg(v3) v4 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) v4.AuxInt = 64 v5 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) v5.AddArg(y) v4.AddArg(v5) - v1.AddArg(v4) - v.AddArg(v1) + v1.AddArg3(v2, v3, v4) + v.AddArg2(v0, v1) return true } } @@ -26355,18 +25180,15 @@ func rewriteValueARM64_OpRsh8x64(v *Value) bool { v.reset(OpARM64SRA) v0 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpARM64CSEL, y.Type) v1.Aux = OpARM64LessThanU - v1.AddArg(y) v2 := b.NewValue0(v.Pos, OpConst64, y.Type) v2.AuxInt = 63 - v1.AddArg(v2) v3 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) v3.AuxInt = 64 v3.AddArg(y) - v1.AddArg(v3) - v.AddArg(v1) + v1.AddArg3(y, v2, v3) + v.AddArg2(v0, v1) return true } } @@ -26383,22 +25205,19 @@ func rewriteValueARM64_OpRsh8x8(v *Value) bool { v.reset(OpARM64SRA) v0 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpARM64CSEL, y.Type) v1.Aux = OpARM64LessThanU v2 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) v2.AddArg(y) - v1.AddArg(v2) v3 := b.NewValue0(v.Pos, OpConst64, y.Type) v3.AuxInt = 63 - v1.AddArg(v3) v4 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) v4.AuxInt = 64 v5 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) v5.AddArg(y) v4.AddArg(v5) - v1.AddArg(v4) - v.AddArg(v1) + v1.AddArg3(v2, v3, v4) + v.AddArg2(v0, v1) return true } } @@ -26418,14 +25237,12 @@ func rewriteValueARM64_OpSelect0(v *Value) bool { v.reset(OpSelect0) v.Type = typ.UInt64 v0 := b.NewValue0(v.Pos, OpARM64ADCSflags, types.NewTuple(typ.UInt64, types.TypeFlags)) - v0.AddArg(x) - v0.AddArg(y) v1 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) v2 := b.NewValue0(v.Pos, OpARM64ADDSconstflags, types.NewTuple(typ.UInt64, types.TypeFlags)) v2.AuxInt = -1 v2.AddArg(c) v1.AddArg(v2) - v0.AddArg(v1) + v0.AddArg3(x, y, v1) v.AddArg(v0) return true } @@ -26441,13 +25258,11 @@ func rewriteValueARM64_OpSelect0(v *Value) bool { v.reset(OpSelect0) v.Type = typ.UInt64 v0 := b.NewValue0(v.Pos, OpARM64SBCSflags, types.NewTuple(typ.UInt64, types.TypeFlags)) - v0.AddArg(x) - v0.AddArg(y) v1 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) v2 := b.NewValue0(v.Pos, OpARM64NEGSflags, types.NewTuple(typ.UInt64, types.TypeFlags)) v2.AddArg(bo) v1.AddArg(v2) - v0.AddArg(v1) + v0.AddArg3(x, y, v1) v.AddArg(v0) return true } @@ -26470,14 +25285,12 @@ func rewriteValueARM64_OpSelect1(v *Value) bool { v.Type = typ.UInt64 v0 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) v1 := b.NewValue0(v.Pos, OpARM64ADCSflags, types.NewTuple(typ.UInt64, types.TypeFlags)) - v1.AddArg(x) - v1.AddArg(y) v2 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) v3 := b.NewValue0(v.Pos, OpARM64ADDSconstflags, types.NewTuple(typ.UInt64, types.TypeFlags)) v3.AuxInt = -1 v3.AddArg(c) v2.AddArg(v3) - v1.AddArg(v2) + v1.AddArg3(x, y, v2) v0.AddArg(v1) v.AddArg(v0) return true @@ -26496,13 +25309,11 @@ func rewriteValueARM64_OpSelect1(v *Value) bool { v0 := b.NewValue0(v.Pos, OpARM64NGCzerocarry, typ.UInt64) v1 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) v2 := b.NewValue0(v.Pos, OpARM64SBCSflags, types.NewTuple(typ.UInt64, types.TypeFlags)) - v2.AddArg(x) - v2.AddArg(y) v3 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) v4 := b.NewValue0(v.Pos, OpARM64NEGSflags, types.NewTuple(typ.UInt64, types.TypeFlags)) v4.AddArg(bo) v3.AddArg(v4) - v2.AddArg(v3) + v2.AddArg3(x, y, v3) v1.AddArg(v2) v0.AddArg(v1) v.AddArg(v0) @@ -26542,9 +25353,7 @@ func rewriteValueARM64_OpStore(v *Value) bool { break } v.reset(OpARM64MOVBstore) - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } // match: (Store {t} ptr val mem) @@ -26559,9 +25368,7 @@ func rewriteValueARM64_OpStore(v *Value) bool { break } v.reset(OpARM64MOVHstore) - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } // match: (Store {t} ptr val mem) @@ -26576,9 +25383,7 @@ func rewriteValueARM64_OpStore(v *Value) bool { break } v.reset(OpARM64MOVWstore) - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } // match: (Store {t} ptr val mem) @@ -26593,9 +25398,7 @@ func rewriteValueARM64_OpStore(v *Value) bool { break } v.reset(OpARM64MOVDstore) - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } // match: (Store {t} ptr val mem) @@ -26610,9 +25413,7 @@ func rewriteValueARM64_OpStore(v *Value) bool { break } v.reset(OpARM64FMOVSstore) - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } // match: (Store {t} ptr val mem) @@ -26627,9 +25428,7 @@ func rewriteValueARM64_OpStore(v *Value) bool { break } v.reset(OpARM64FMOVDstore) - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } return false @@ -26661,11 +25460,9 @@ func rewriteValueARM64_OpZero(v *Value) bool { ptr := v_0 mem := v_1 v.reset(OpARM64MOVBstore) - v.AddArg(ptr) v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) v0.AuxInt = 0 - v.AddArg(v0) - v.AddArg(mem) + v.AddArg3(ptr, v0, mem) return true } // match: (Zero [2] ptr mem) @@ -26677,11 +25474,9 @@ func rewriteValueARM64_OpZero(v *Value) bool { ptr := v_0 mem := v_1 v.reset(OpARM64MOVHstore) - v.AddArg(ptr) v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) v0.AuxInt = 0 - v.AddArg(v0) - v.AddArg(mem) + v.AddArg3(ptr, v0, mem) return true } // match: (Zero [4] ptr mem) @@ -26693,11 +25488,9 @@ func rewriteValueARM64_OpZero(v *Value) bool { ptr := v_0 mem := v_1 v.reset(OpARM64MOVWstore) - v.AddArg(ptr) v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) v0.AuxInt = 0 - v.AddArg(v0) - v.AddArg(mem) + v.AddArg3(ptr, v0, mem) return true } // match: (Zero [8] ptr mem) @@ -26709,11 +25502,9 @@ func rewriteValueARM64_OpZero(v *Value) bool { ptr := v_0 mem := v_1 v.reset(OpARM64MOVDstore) - v.AddArg(ptr) v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) v0.AuxInt = 0 - v.AddArg(v0) - v.AddArg(mem) + v.AddArg3(ptr, v0, mem) return true } // match: (Zero [3] ptr mem) @@ -26726,17 +25517,13 @@ func rewriteValueARM64_OpZero(v *Value) bool { mem := v_1 v.reset(OpARM64MOVBstore) v.AuxInt = 2 - v.AddArg(ptr) v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) v0.AuxInt = 0 - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpARM64MOVHstore, types.TypeMem) - v1.AddArg(ptr) v2 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) v2.AuxInt = 0 - v1.AddArg(v2) - v1.AddArg(mem) - v.AddArg(v1) + v1.AddArg3(ptr, v2, mem) + v.AddArg3(ptr, v0, v1) return true } // match: (Zero [5] ptr mem) @@ -26749,17 +25536,13 @@ func rewriteValueARM64_OpZero(v *Value) bool { mem := v_1 v.reset(OpARM64MOVBstore) v.AuxInt = 4 - v.AddArg(ptr) v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) v0.AuxInt = 0 - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpARM64MOVWstore, types.TypeMem) - v1.AddArg(ptr) v2 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) v2.AuxInt = 0 - v1.AddArg(v2) - v1.AddArg(mem) - v.AddArg(v1) + v1.AddArg3(ptr, v2, mem) + v.AddArg3(ptr, v0, v1) return true } // match: (Zero [6] ptr mem) @@ -26772,17 +25555,13 @@ func rewriteValueARM64_OpZero(v *Value) bool { mem := v_1 v.reset(OpARM64MOVHstore) v.AuxInt = 4 - v.AddArg(ptr) v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) v0.AuxInt = 0 - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpARM64MOVWstore, types.TypeMem) - v1.AddArg(ptr) v2 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) v2.AuxInt = 0 - v1.AddArg(v2) - v1.AddArg(mem) - v.AddArg(v1) + v1.AddArg3(ptr, v2, mem) + v.AddArg3(ptr, v0, v1) return true } // match: (Zero [7] ptr mem) @@ -26795,24 +25574,18 @@ func rewriteValueARM64_OpZero(v *Value) bool { mem := v_1 v.reset(OpARM64MOVBstore) v.AuxInt = 6 - v.AddArg(ptr) v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) v0.AuxInt = 0 - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpARM64MOVHstore, types.TypeMem) v1.AuxInt = 4 - v1.AddArg(ptr) v2 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) v2.AuxInt = 0 - v1.AddArg(v2) v3 := b.NewValue0(v.Pos, OpARM64MOVWstore, types.TypeMem) - v3.AddArg(ptr) v4 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) v4.AuxInt = 0 - v3.AddArg(v4) - v3.AddArg(mem) - v1.AddArg(v3) - v.AddArg(v1) + v3.AddArg3(ptr, v4, mem) + v1.AddArg3(ptr, v2, v3) + v.AddArg3(ptr, v0, v1) return true } // match: (Zero [9] ptr mem) @@ -26825,17 +25598,13 @@ func rewriteValueARM64_OpZero(v *Value) bool { mem := v_1 v.reset(OpARM64MOVBstore) v.AuxInt = 8 - v.AddArg(ptr) v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) v0.AuxInt = 0 - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpARM64MOVDstore, types.TypeMem) - v1.AddArg(ptr) v2 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) v2.AuxInt = 0 - v1.AddArg(v2) - v1.AddArg(mem) - v.AddArg(v1) + v1.AddArg3(ptr, v2, mem) + v.AddArg3(ptr, v0, v1) return true } // match: (Zero [10] ptr mem) @@ -26848,17 +25617,13 @@ func rewriteValueARM64_OpZero(v *Value) bool { mem := v_1 v.reset(OpARM64MOVHstore) v.AuxInt = 8 - v.AddArg(ptr) v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) v0.AuxInt = 0 - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpARM64MOVDstore, types.TypeMem) - v1.AddArg(ptr) v2 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) v2.AuxInt = 0 - v1.AddArg(v2) - v1.AddArg(mem) - v.AddArg(v1) + v1.AddArg3(ptr, v2, mem) + v.AddArg3(ptr, v0, v1) return true } // match: (Zero [11] ptr mem) @@ -26871,24 +25636,18 @@ func rewriteValueARM64_OpZero(v *Value) bool { mem := v_1 v.reset(OpARM64MOVBstore) v.AuxInt = 10 - v.AddArg(ptr) v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) v0.AuxInt = 0 - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpARM64MOVHstore, types.TypeMem) v1.AuxInt = 8 - v1.AddArg(ptr) v2 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) v2.AuxInt = 0 - v1.AddArg(v2) v3 := b.NewValue0(v.Pos, OpARM64MOVDstore, types.TypeMem) - v3.AddArg(ptr) v4 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) v4.AuxInt = 0 - v3.AddArg(v4) - v3.AddArg(mem) - v1.AddArg(v3) - v.AddArg(v1) + v3.AddArg3(ptr, v4, mem) + v1.AddArg3(ptr, v2, v3) + v.AddArg3(ptr, v0, v1) return true } // match: (Zero [12] ptr mem) @@ -26901,17 +25660,13 @@ func rewriteValueARM64_OpZero(v *Value) bool { mem := v_1 v.reset(OpARM64MOVWstore) v.AuxInt = 8 - v.AddArg(ptr) v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) v0.AuxInt = 0 - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpARM64MOVDstore, types.TypeMem) - v1.AddArg(ptr) v2 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) v2.AuxInt = 0 - v1.AddArg(v2) - v1.AddArg(mem) - v.AddArg(v1) + v1.AddArg3(ptr, v2, mem) + v.AddArg3(ptr, v0, v1) return true } // match: (Zero [13] ptr mem) @@ -26924,24 +25679,18 @@ func rewriteValueARM64_OpZero(v *Value) bool { mem := v_1 v.reset(OpARM64MOVBstore) v.AuxInt = 12 - v.AddArg(ptr) v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) v0.AuxInt = 0 - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpARM64MOVWstore, types.TypeMem) v1.AuxInt = 8 - v1.AddArg(ptr) v2 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) v2.AuxInt = 0 - v1.AddArg(v2) v3 := b.NewValue0(v.Pos, OpARM64MOVDstore, types.TypeMem) - v3.AddArg(ptr) v4 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) v4.AuxInt = 0 - v3.AddArg(v4) - v3.AddArg(mem) - v1.AddArg(v3) - v.AddArg(v1) + v3.AddArg3(ptr, v4, mem) + v1.AddArg3(ptr, v2, v3) + v.AddArg3(ptr, v0, v1) return true } // match: (Zero [14] ptr mem) @@ -26954,24 +25703,18 @@ func rewriteValueARM64_OpZero(v *Value) bool { mem := v_1 v.reset(OpARM64MOVHstore) v.AuxInt = 12 - v.AddArg(ptr) v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) v0.AuxInt = 0 - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpARM64MOVWstore, types.TypeMem) v1.AuxInt = 8 - v1.AddArg(ptr) v2 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) v2.AuxInt = 0 - v1.AddArg(v2) v3 := b.NewValue0(v.Pos, OpARM64MOVDstore, types.TypeMem) - v3.AddArg(ptr) v4 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) v4.AuxInt = 0 - v3.AddArg(v4) - v3.AddArg(mem) - v1.AddArg(v3) - v.AddArg(v1) + v3.AddArg3(ptr, v4, mem) + v1.AddArg3(ptr, v2, v3) + v.AddArg3(ptr, v0, v1) return true } // match: (Zero [15] ptr mem) @@ -26984,31 +25727,23 @@ func rewriteValueARM64_OpZero(v *Value) bool { mem := v_1 v.reset(OpARM64MOVBstore) v.AuxInt = 14 - v.AddArg(ptr) v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) v0.AuxInt = 0 - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpARM64MOVHstore, types.TypeMem) v1.AuxInt = 12 - v1.AddArg(ptr) v2 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) v2.AuxInt = 0 - v1.AddArg(v2) v3 := b.NewValue0(v.Pos, OpARM64MOVWstore, types.TypeMem) v3.AuxInt = 8 - v3.AddArg(ptr) v4 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) v4.AuxInt = 0 - v3.AddArg(v4) v5 := b.NewValue0(v.Pos, OpARM64MOVDstore, types.TypeMem) - v5.AddArg(ptr) v6 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) v6.AuxInt = 0 - v5.AddArg(v6) - v5.AddArg(mem) - v3.AddArg(v5) - v1.AddArg(v3) - v.AddArg(v1) + v5.AddArg3(ptr, v6, mem) + v3.AddArg3(ptr, v4, v5) + v1.AddArg3(ptr, v2, v3) + v.AddArg3(ptr, v0, v1) return true } // match: (Zero [16] ptr mem) @@ -27021,14 +25756,11 @@ func rewriteValueARM64_OpZero(v *Value) bool { mem := v_1 v.reset(OpARM64STP) v.AuxInt = 0 - v.AddArg(ptr) v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) v0.AuxInt = 0 - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) v1.AuxInt = 0 - v.AddArg(v1) - v.AddArg(mem) + v.AddArg4(ptr, v0, v1, mem) return true } // match: (Zero [32] ptr mem) @@ -27041,24 +25773,18 @@ func rewriteValueARM64_OpZero(v *Value) bool { mem := v_1 v.reset(OpARM64STP) v.AuxInt = 16 - v.AddArg(ptr) v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) v0.AuxInt = 0 - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) v1.AuxInt = 0 - v.AddArg(v1) v2 := b.NewValue0(v.Pos, OpARM64STP, types.TypeMem) v2.AuxInt = 0 - v2.AddArg(ptr) v3 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) v3.AuxInt = 0 - v2.AddArg(v3) v4 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) v4.AuxInt = 0 - v2.AddArg(v4) - v2.AddArg(mem) - v.AddArg(v2) + v2.AddArg4(ptr, v3, v4, mem) + v.AddArg4(ptr, v0, v1, v2) return true } // match: (Zero [48] ptr mem) @@ -27071,34 +25797,25 @@ func rewriteValueARM64_OpZero(v *Value) bool { mem := v_1 v.reset(OpARM64STP) v.AuxInt = 32 - v.AddArg(ptr) v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) v0.AuxInt = 0 - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) v1.AuxInt = 0 - v.AddArg(v1) v2 := b.NewValue0(v.Pos, OpARM64STP, types.TypeMem) v2.AuxInt = 16 - v2.AddArg(ptr) v3 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) v3.AuxInt = 0 - v2.AddArg(v3) v4 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) v4.AuxInt = 0 - v2.AddArg(v4) v5 := b.NewValue0(v.Pos, OpARM64STP, types.TypeMem) v5.AuxInt = 0 - v5.AddArg(ptr) v6 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) v6.AuxInt = 0 - v5.AddArg(v6) v7 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) v7.AuxInt = 0 - v5.AddArg(v7) - v5.AddArg(mem) - v2.AddArg(v5) - v.AddArg(v2) + v5.AddArg4(ptr, v6, v7, mem) + v2.AddArg4(ptr, v3, v4, v5) + v.AddArg4(ptr, v0, v1, v2) return true } // match: (Zero [64] ptr mem) @@ -27111,44 +25828,32 @@ func rewriteValueARM64_OpZero(v *Value) bool { mem := v_1 v.reset(OpARM64STP) v.AuxInt = 48 - v.AddArg(ptr) v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) v0.AuxInt = 0 - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) v1.AuxInt = 0 - v.AddArg(v1) v2 := b.NewValue0(v.Pos, OpARM64STP, types.TypeMem) v2.AuxInt = 32 - v2.AddArg(ptr) v3 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) v3.AuxInt = 0 - v2.AddArg(v3) v4 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) v4.AuxInt = 0 - v2.AddArg(v4) v5 := b.NewValue0(v.Pos, OpARM64STP, types.TypeMem) v5.AuxInt = 16 - v5.AddArg(ptr) v6 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) v6.AuxInt = 0 - v5.AddArg(v6) v7 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) v7.AuxInt = 0 - v5.AddArg(v7) v8 := b.NewValue0(v.Pos, OpARM64STP, types.TypeMem) v8.AuxInt = 0 - v8.AddArg(ptr) v9 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) v9.AuxInt = 0 - v8.AddArg(v9) v10 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) v10.AuxInt = 0 - v8.AddArg(v10) - v8.AddArg(mem) - v5.AddArg(v8) - v2.AddArg(v5) - v.AddArg(v2) + v8.AddArg4(ptr, v9, v10, mem) + v5.AddArg4(ptr, v6, v7, v8) + v2.AddArg4(ptr, v3, v4, v5) + v.AddArg4(ptr, v0, v1, v2) return true } // match: (Zero [s] ptr mem) @@ -27166,12 +25871,10 @@ func rewriteValueARM64_OpZero(v *Value) bool { v0 := b.NewValue0(v.Pos, OpOffPtr, ptr.Type) v0.AuxInt = s - 8 v0.AddArg(ptr) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpZero, types.TypeMem) v1.AuxInt = s - s%16 - v1.AddArg(ptr) - v1.AddArg(mem) - v.AddArg(v1) + v1.AddArg2(ptr, mem) + v.AddArg2(v0, v1) return true } // match: (Zero [s] ptr mem) @@ -27189,12 +25892,10 @@ func rewriteValueARM64_OpZero(v *Value) bool { v0 := b.NewValue0(v.Pos, OpOffPtr, ptr.Type) v0.AuxInt = s - 16 v0.AddArg(ptr) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpZero, types.TypeMem) v1.AuxInt = s - s%16 - v1.AddArg(ptr) - v1.AddArg(mem) - v.AddArg(v1) + v1.AddArg2(ptr, mem) + v.AddArg2(v0, v1) return true } // match: (Zero [s] ptr mem) @@ -27209,8 +25910,7 @@ func rewriteValueARM64_OpZero(v *Value) bool { } v.reset(OpARM64DUFFZERO) v.AuxInt = 4 * (64 - s/16) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (Zero [s] ptr mem) @@ -27224,12 +25924,10 @@ func rewriteValueARM64_OpZero(v *Value) bool { break } v.reset(OpARM64LoweredZero) - v.AddArg(ptr) v0 := b.NewValue0(v.Pos, OpARM64ADDconst, ptr.Type) v0.AuxInt = s - 16 v0.AddArg(ptr) - v.AddArg(v0) - v.AddArg(mem) + v.AddArg3(ptr, v0, mem) return true } return false @@ -27284,8 +25982,7 @@ func rewriteBlockARM64(b *Block) bool { } b.Reset(BlockARM64EQ) v0 := b.NewValue0(v_0.Pos, OpARM64TST, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) b.AddControl(v0) return true } @@ -27314,8 +26011,7 @@ func rewriteBlockARM64(b *Block) bool { } b.Reset(BlockARM64EQ) v0 := b.NewValue0(v_0.Pos, OpARM64TSTW, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) b.AddControl(v0) return true } @@ -27416,8 +26112,7 @@ func rewriteBlockARM64(b *Block) bool { } b.Reset(BlockARM64EQ) v0 := b.NewValue0(v_0.Pos, OpARM64CMN, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) b.AddControl(v0) return true } @@ -27446,8 +26141,7 @@ func rewriteBlockARM64(b *Block) bool { } b.Reset(BlockARM64EQ) v0 := b.NewValue0(v_0.Pos, OpARM64CMNW, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) b.AddControl(v0) return true } @@ -27470,8 +26164,7 @@ func rewriteBlockARM64(b *Block) bool { } b.Reset(BlockARM64EQ) v0 := b.NewValue0(v_0.Pos, OpARM64CMN, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) b.AddControl(v0) return true } @@ -27492,8 +26185,7 @@ func rewriteBlockARM64(b *Block) bool { } b.Reset(BlockARM64EQ) v0 := b.NewValue0(v_0.Pos, OpARM64CMNW, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) b.AddControl(v0) return true } @@ -27541,11 +26233,9 @@ func rewriteBlockARM64(b *Block) bool { } b.Reset(BlockARM64EQ) v0 := b.NewValue0(v_0.Pos, OpARM64CMN, types.TypeFlags) - v0.AddArg(a) v1 := b.NewValue0(v_0.Pos, OpARM64MUL, x.Type) - v1.AddArg(x) - v1.AddArg(y) - v0.AddArg(v1) + v1.AddArg2(x, y) + v0.AddArg2(a, v1) b.AddControl(v0) return true } @@ -27569,11 +26259,9 @@ func rewriteBlockARM64(b *Block) bool { } b.Reset(BlockARM64EQ) v0 := b.NewValue0(v_0.Pos, OpARM64CMP, types.TypeFlags) - v0.AddArg(a) v1 := b.NewValue0(v_0.Pos, OpARM64MUL, x.Type) - v1.AddArg(x) - v1.AddArg(y) - v0.AddArg(v1) + v1.AddArg2(x, y) + v0.AddArg2(a, v1) b.AddControl(v0) return true } @@ -27597,11 +26285,9 @@ func rewriteBlockARM64(b *Block) bool { } b.Reset(BlockARM64EQ) v0 := b.NewValue0(v_0.Pos, OpARM64CMNW, types.TypeFlags) - v0.AddArg(a) v1 := b.NewValue0(v_0.Pos, OpARM64MULW, x.Type) - v1.AddArg(x) - v1.AddArg(y) - v0.AddArg(v1) + v1.AddArg2(x, y) + v0.AddArg2(a, v1) b.AddControl(v0) return true } @@ -27625,11 +26311,9 @@ func rewriteBlockARM64(b *Block) bool { } b.Reset(BlockARM64EQ) v0 := b.NewValue0(v_0.Pos, OpARM64CMPW, types.TypeFlags) - v0.AddArg(a) v1 := b.NewValue0(v_0.Pos, OpARM64MULW, x.Type) - v1.AddArg(x) - v1.AddArg(y) - v0.AddArg(v1) + v1.AddArg2(x, y) + v0.AddArg2(a, v1) b.AddControl(v0) return true } @@ -27794,8 +26478,7 @@ func rewriteBlockARM64(b *Block) bool { } b.Reset(BlockARM64GE) v0 := b.NewValue0(v_0.Pos, OpARM64TST, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) b.AddControl(v0) return true } @@ -27824,8 +26507,7 @@ func rewriteBlockARM64(b *Block) bool { } b.Reset(BlockARM64GE) v0 := b.NewValue0(v_0.Pos, OpARM64TSTW, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) b.AddControl(v0) return true } @@ -27926,8 +26608,7 @@ func rewriteBlockARM64(b *Block) bool { } b.Reset(BlockARM64GE) v0 := b.NewValue0(v_0.Pos, OpARM64CMN, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) b.AddControl(v0) return true } @@ -27956,8 +26637,7 @@ func rewriteBlockARM64(b *Block) bool { } b.Reset(BlockARM64GE) v0 := b.NewValue0(v_0.Pos, OpARM64CMNW, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) b.AddControl(v0) return true } @@ -27980,8 +26660,7 @@ func rewriteBlockARM64(b *Block) bool { } b.Reset(BlockARM64GE) v0 := b.NewValue0(v_0.Pos, OpARM64CMN, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) b.AddControl(v0) return true } @@ -28002,8 +26681,7 @@ func rewriteBlockARM64(b *Block) bool { } b.Reset(BlockARM64GE) v0 := b.NewValue0(v_0.Pos, OpARM64CMNW, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) b.AddControl(v0) return true } @@ -28027,11 +26705,9 @@ func rewriteBlockARM64(b *Block) bool { } b.Reset(BlockARM64GE) v0 := b.NewValue0(v_0.Pos, OpARM64CMN, types.TypeFlags) - v0.AddArg(a) v1 := b.NewValue0(v_0.Pos, OpARM64MUL, x.Type) - v1.AddArg(x) - v1.AddArg(y) - v0.AddArg(v1) + v1.AddArg2(x, y) + v0.AddArg2(a, v1) b.AddControl(v0) return true } @@ -28055,11 +26731,9 @@ func rewriteBlockARM64(b *Block) bool { } b.Reset(BlockARM64GE) v0 := b.NewValue0(v_0.Pos, OpARM64CMP, types.TypeFlags) - v0.AddArg(a) v1 := b.NewValue0(v_0.Pos, OpARM64MUL, x.Type) - v1.AddArg(x) - v1.AddArg(y) - v0.AddArg(v1) + v1.AddArg2(x, y) + v0.AddArg2(a, v1) b.AddControl(v0) return true } @@ -28083,11 +26757,9 @@ func rewriteBlockARM64(b *Block) bool { } b.Reset(BlockARM64GE) v0 := b.NewValue0(v_0.Pos, OpARM64CMNW, types.TypeFlags) - v0.AddArg(a) v1 := b.NewValue0(v_0.Pos, OpARM64MULW, x.Type) - v1.AddArg(x) - v1.AddArg(y) - v0.AddArg(v1) + v1.AddArg2(x, y) + v0.AddArg2(a, v1) b.AddControl(v0) return true } @@ -28111,11 +26783,9 @@ func rewriteBlockARM64(b *Block) bool { } b.Reset(BlockARM64GE) v0 := b.NewValue0(v_0.Pos, OpARM64CMPW, types.TypeFlags) - v0.AddArg(a) v1 := b.NewValue0(v_0.Pos, OpARM64MULW, x.Type) - v1.AddArg(x) - v1.AddArg(y) - v0.AddArg(v1) + v1.AddArg2(x, y) + v0.AddArg2(a, v1) b.AddControl(v0) return true } @@ -28234,8 +26904,7 @@ func rewriteBlockARM64(b *Block) bool { } b.Reset(BlockARM64GT) v0 := b.NewValue0(v_0.Pos, OpARM64TST, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) b.AddControl(v0) return true } @@ -28264,8 +26933,7 @@ func rewriteBlockARM64(b *Block) bool { } b.Reset(BlockARM64GT) v0 := b.NewValue0(v_0.Pos, OpARM64TSTW, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) b.AddControl(v0) return true } @@ -28366,8 +27034,7 @@ func rewriteBlockARM64(b *Block) bool { } b.Reset(BlockARM64GT) v0 := b.NewValue0(v_0.Pos, OpARM64CMN, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) b.AddControl(v0) return true } @@ -28396,8 +27063,7 @@ func rewriteBlockARM64(b *Block) bool { } b.Reset(BlockARM64GT) v0 := b.NewValue0(v_0.Pos, OpARM64CMNW, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) b.AddControl(v0) return true } @@ -28420,8 +27086,7 @@ func rewriteBlockARM64(b *Block) bool { } b.Reset(BlockARM64GT) v0 := b.NewValue0(v_0.Pos, OpARM64CMN, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) b.AddControl(v0) return true } @@ -28442,8 +27107,7 @@ func rewriteBlockARM64(b *Block) bool { } b.Reset(BlockARM64GT) v0 := b.NewValue0(v_0.Pos, OpARM64CMNW, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) b.AddControl(v0) return true } @@ -28467,11 +27131,9 @@ func rewriteBlockARM64(b *Block) bool { } b.Reset(BlockARM64GT) v0 := b.NewValue0(v_0.Pos, OpARM64CMN, types.TypeFlags) - v0.AddArg(a) v1 := b.NewValue0(v_0.Pos, OpARM64MUL, x.Type) - v1.AddArg(x) - v1.AddArg(y) - v0.AddArg(v1) + v1.AddArg2(x, y) + v0.AddArg2(a, v1) b.AddControl(v0) return true } @@ -28495,11 +27157,9 @@ func rewriteBlockARM64(b *Block) bool { } b.Reset(BlockARM64GT) v0 := b.NewValue0(v_0.Pos, OpARM64CMP, types.TypeFlags) - v0.AddArg(a) v1 := b.NewValue0(v_0.Pos, OpARM64MUL, x.Type) - v1.AddArg(x) - v1.AddArg(y) - v0.AddArg(v1) + v1.AddArg2(x, y) + v0.AddArg2(a, v1) b.AddControl(v0) return true } @@ -28523,11 +27183,9 @@ func rewriteBlockARM64(b *Block) bool { } b.Reset(BlockARM64GT) v0 := b.NewValue0(v_0.Pos, OpARM64CMNW, types.TypeFlags) - v0.AddArg(a) v1 := b.NewValue0(v_0.Pos, OpARM64MULW, x.Type) - v1.AddArg(x) - v1.AddArg(y) - v0.AddArg(v1) + v1.AddArg2(x, y) + v0.AddArg2(a, v1) b.AddControl(v0) return true } @@ -28551,11 +27209,9 @@ func rewriteBlockARM64(b *Block) bool { } b.Reset(BlockARM64GT) v0 := b.NewValue0(v_0.Pos, OpARM64CMPW, types.TypeFlags) - v0.AddArg(a) v1 := b.NewValue0(v_0.Pos, OpARM64MULW, x.Type) - v1.AddArg(x) - v1.AddArg(y) - v0.AddArg(v1) + v1.AddArg2(x, y) + v0.AddArg2(a, v1) b.AddControl(v0) return true } @@ -28784,8 +27440,7 @@ func rewriteBlockARM64(b *Block) bool { } b.Reset(BlockARM64LE) v0 := b.NewValue0(v_0.Pos, OpARM64TST, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) b.AddControl(v0) return true } @@ -28814,8 +27469,7 @@ func rewriteBlockARM64(b *Block) bool { } b.Reset(BlockARM64LE) v0 := b.NewValue0(v_0.Pos, OpARM64TSTW, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) b.AddControl(v0) return true } @@ -28916,8 +27570,7 @@ func rewriteBlockARM64(b *Block) bool { } b.Reset(BlockARM64LE) v0 := b.NewValue0(v_0.Pos, OpARM64CMN, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) b.AddControl(v0) return true } @@ -28946,8 +27599,7 @@ func rewriteBlockARM64(b *Block) bool { } b.Reset(BlockARM64LE) v0 := b.NewValue0(v_0.Pos, OpARM64CMNW, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) b.AddControl(v0) return true } @@ -28970,8 +27622,7 @@ func rewriteBlockARM64(b *Block) bool { } b.Reset(BlockARM64LE) v0 := b.NewValue0(v_0.Pos, OpARM64CMN, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) b.AddControl(v0) return true } @@ -28992,8 +27643,7 @@ func rewriteBlockARM64(b *Block) bool { } b.Reset(BlockARM64LE) v0 := b.NewValue0(v_0.Pos, OpARM64CMNW, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) b.AddControl(v0) return true } @@ -29017,11 +27667,9 @@ func rewriteBlockARM64(b *Block) bool { } b.Reset(BlockARM64LE) v0 := b.NewValue0(v_0.Pos, OpARM64CMN, types.TypeFlags) - v0.AddArg(a) v1 := b.NewValue0(v_0.Pos, OpARM64MUL, x.Type) - v1.AddArg(x) - v1.AddArg(y) - v0.AddArg(v1) + v1.AddArg2(x, y) + v0.AddArg2(a, v1) b.AddControl(v0) return true } @@ -29045,11 +27693,9 @@ func rewriteBlockARM64(b *Block) bool { } b.Reset(BlockARM64LE) v0 := b.NewValue0(v_0.Pos, OpARM64CMP, types.TypeFlags) - v0.AddArg(a) v1 := b.NewValue0(v_0.Pos, OpARM64MUL, x.Type) - v1.AddArg(x) - v1.AddArg(y) - v0.AddArg(v1) + v1.AddArg2(x, y) + v0.AddArg2(a, v1) b.AddControl(v0) return true } @@ -29073,11 +27719,9 @@ func rewriteBlockARM64(b *Block) bool { } b.Reset(BlockARM64LE) v0 := b.NewValue0(v_0.Pos, OpARM64CMNW, types.TypeFlags) - v0.AddArg(a) v1 := b.NewValue0(v_0.Pos, OpARM64MULW, x.Type) - v1.AddArg(x) - v1.AddArg(y) - v0.AddArg(v1) + v1.AddArg2(x, y) + v0.AddArg2(a, v1) b.AddControl(v0) return true } @@ -29101,11 +27745,9 @@ func rewriteBlockARM64(b *Block) bool { } b.Reset(BlockARM64LE) v0 := b.NewValue0(v_0.Pos, OpARM64CMPW, types.TypeFlags) - v0.AddArg(a) v1 := b.NewValue0(v_0.Pos, OpARM64MULW, x.Type) - v1.AddArg(x) - v1.AddArg(y) - v0.AddArg(v1) + v1.AddArg2(x, y) + v0.AddArg2(a, v1) b.AddControl(v0) return true } @@ -29198,8 +27840,7 @@ func rewriteBlockARM64(b *Block) bool { } b.Reset(BlockARM64LT) v0 := b.NewValue0(v_0.Pos, OpARM64TST, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) b.AddControl(v0) return true } @@ -29228,8 +27869,7 @@ func rewriteBlockARM64(b *Block) bool { } b.Reset(BlockARM64LT) v0 := b.NewValue0(v_0.Pos, OpARM64TSTW, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) b.AddControl(v0) return true } @@ -29330,8 +27970,7 @@ func rewriteBlockARM64(b *Block) bool { } b.Reset(BlockARM64LT) v0 := b.NewValue0(v_0.Pos, OpARM64CMN, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) b.AddControl(v0) return true } @@ -29360,8 +27999,7 @@ func rewriteBlockARM64(b *Block) bool { } b.Reset(BlockARM64LT) v0 := b.NewValue0(v_0.Pos, OpARM64CMNW, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) b.AddControl(v0) return true } @@ -29384,8 +28022,7 @@ func rewriteBlockARM64(b *Block) bool { } b.Reset(BlockARM64LT) v0 := b.NewValue0(v_0.Pos, OpARM64CMN, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) b.AddControl(v0) return true } @@ -29406,8 +28043,7 @@ func rewriteBlockARM64(b *Block) bool { } b.Reset(BlockARM64LT) v0 := b.NewValue0(v_0.Pos, OpARM64CMNW, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) b.AddControl(v0) return true } @@ -29431,11 +28067,9 @@ func rewriteBlockARM64(b *Block) bool { } b.Reset(BlockARM64LT) v0 := b.NewValue0(v_0.Pos, OpARM64CMN, types.TypeFlags) - v0.AddArg(a) v1 := b.NewValue0(v_0.Pos, OpARM64MUL, x.Type) - v1.AddArg(x) - v1.AddArg(y) - v0.AddArg(v1) + v1.AddArg2(x, y) + v0.AddArg2(a, v1) b.AddControl(v0) return true } @@ -29459,11 +28093,9 @@ func rewriteBlockARM64(b *Block) bool { } b.Reset(BlockARM64LT) v0 := b.NewValue0(v_0.Pos, OpARM64CMP, types.TypeFlags) - v0.AddArg(a) v1 := b.NewValue0(v_0.Pos, OpARM64MUL, x.Type) - v1.AddArg(x) - v1.AddArg(y) - v0.AddArg(v1) + v1.AddArg2(x, y) + v0.AddArg2(a, v1) b.AddControl(v0) return true } @@ -29487,11 +28119,9 @@ func rewriteBlockARM64(b *Block) bool { } b.Reset(BlockARM64LT) v0 := b.NewValue0(v_0.Pos, OpARM64CMNW, types.TypeFlags) - v0.AddArg(a) v1 := b.NewValue0(v_0.Pos, OpARM64MULW, x.Type) - v1.AddArg(x) - v1.AddArg(y) - v0.AddArg(v1) + v1.AddArg2(x, y) + v0.AddArg2(a, v1) b.AddControl(v0) return true } @@ -29515,11 +28145,9 @@ func rewriteBlockARM64(b *Block) bool { } b.Reset(BlockARM64LT) v0 := b.NewValue0(v_0.Pos, OpARM64CMPW, types.TypeFlags) - v0.AddArg(a) v1 := b.NewValue0(v_0.Pos, OpARM64MULW, x.Type) - v1.AddArg(x) - v1.AddArg(y) - v0.AddArg(v1) + v1.AddArg2(x, y) + v0.AddArg2(a, v1) b.AddControl(v0) return true } @@ -29639,8 +28267,7 @@ func rewriteBlockARM64(b *Block) bool { } b.Reset(BlockARM64NE) v0 := b.NewValue0(v_0.Pos, OpARM64TST, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) b.AddControl(v0) return true } @@ -29669,8 +28296,7 @@ func rewriteBlockARM64(b *Block) bool { } b.Reset(BlockARM64NE) v0 := b.NewValue0(v_0.Pos, OpARM64TSTW, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) b.AddControl(v0) return true } @@ -29771,8 +28397,7 @@ func rewriteBlockARM64(b *Block) bool { } b.Reset(BlockARM64NE) v0 := b.NewValue0(v_0.Pos, OpARM64CMN, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) b.AddControl(v0) return true } @@ -29801,8 +28426,7 @@ func rewriteBlockARM64(b *Block) bool { } b.Reset(BlockARM64NE) v0 := b.NewValue0(v_0.Pos, OpARM64CMNW, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) b.AddControl(v0) return true } @@ -29825,8 +28449,7 @@ func rewriteBlockARM64(b *Block) bool { } b.Reset(BlockARM64NE) v0 := b.NewValue0(v_0.Pos, OpARM64CMN, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) b.AddControl(v0) return true } @@ -29847,8 +28470,7 @@ func rewriteBlockARM64(b *Block) bool { } b.Reset(BlockARM64NE) v0 := b.NewValue0(v_0.Pos, OpARM64CMNW, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) b.AddControl(v0) return true } @@ -29896,11 +28518,9 @@ func rewriteBlockARM64(b *Block) bool { } b.Reset(BlockARM64NE) v0 := b.NewValue0(v_0.Pos, OpARM64CMN, types.TypeFlags) - v0.AddArg(a) v1 := b.NewValue0(v_0.Pos, OpARM64MUL, x.Type) - v1.AddArg(x) - v1.AddArg(y) - v0.AddArg(v1) + v1.AddArg2(x, y) + v0.AddArg2(a, v1) b.AddControl(v0) return true } @@ -29924,11 +28544,9 @@ func rewriteBlockARM64(b *Block) bool { } b.Reset(BlockARM64NE) v0 := b.NewValue0(v_0.Pos, OpARM64CMP, types.TypeFlags) - v0.AddArg(a) v1 := b.NewValue0(v_0.Pos, OpARM64MUL, x.Type) - v1.AddArg(x) - v1.AddArg(y) - v0.AddArg(v1) + v1.AddArg2(x, y) + v0.AddArg2(a, v1) b.AddControl(v0) return true } @@ -29952,11 +28570,9 @@ func rewriteBlockARM64(b *Block) bool { } b.Reset(BlockARM64NE) v0 := b.NewValue0(v_0.Pos, OpARM64CMNW, types.TypeFlags) - v0.AddArg(a) v1 := b.NewValue0(v_0.Pos, OpARM64MULW, x.Type) - v1.AddArg(x) - v1.AddArg(y) - v0.AddArg(v1) + v1.AddArg2(x, y) + v0.AddArg2(a, v1) b.AddControl(v0) return true } @@ -29980,11 +28596,9 @@ func rewriteBlockARM64(b *Block) bool { } b.Reset(BlockARM64NE) v0 := b.NewValue0(v_0.Pos, OpARM64CMPW, types.TypeFlags) - v0.AddArg(a) v1 := b.NewValue0(v_0.Pos, OpARM64MULW, x.Type) - v1.AddArg(x) - v1.AddArg(y) - v0.AddArg(v1) + v1.AddArg2(x, y) + v0.AddArg2(a, v1) b.AddControl(v0) return true } diff --git a/src/cmd/compile/internal/ssa/rewriteMIPS.go b/src/cmd/compile/internal/ssa/rewriteMIPS.go index b3226cddb5..58dc71bd04 100644 --- a/src/cmd/compile/internal/ssa/rewriteMIPS.go +++ b/src/cmd/compile/internal/ssa/rewriteMIPS.go @@ -596,11 +596,9 @@ func rewriteValueMIPS_OpAdd32withcarry(v *Value) bool { y := v_1 c := v_2 v.reset(OpMIPSADD) - v.AddArg(c) v0 := b.NewValue0(v.Pos, OpMIPSADD, t) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) + v0.AddArg2(x, y) + v.AddArg2(c, v0) return true } } @@ -625,39 +623,33 @@ func rewriteValueMIPS_OpAtomicAnd8(v *Value) bool { v0 := b.NewValue0(v.Pos, OpMIPSAND, typ.UInt32Ptr) v1 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32) v1.AuxInt = ^3 - v0.AddArg(v1) - v0.AddArg(ptr) - v.AddArg(v0) + v0.AddArg2(v1, ptr) v2 := b.NewValue0(v.Pos, OpMIPSOR, typ.UInt32) v3 := b.NewValue0(v.Pos, OpMIPSSLL, typ.UInt32) v4 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) v4.AddArg(val) - v3.AddArg(v4) v5 := b.NewValue0(v.Pos, OpMIPSSLLconst, typ.UInt32) v5.AuxInt = 3 v6 := b.NewValue0(v.Pos, OpMIPSANDconst, typ.UInt32) v6.AuxInt = 3 v6.AddArg(ptr) v5.AddArg(v6) - v3.AddArg(v5) - v2.AddArg(v3) + v3.AddArg2(v4, v5) v7 := b.NewValue0(v.Pos, OpMIPSNORconst, typ.UInt32) v7.AuxInt = 0 v8 := b.NewValue0(v.Pos, OpMIPSSLL, typ.UInt32) v9 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32) v9.AuxInt = 0xff - v8.AddArg(v9) v10 := b.NewValue0(v.Pos, OpMIPSSLLconst, typ.UInt32) v10.AuxInt = 3 v11 := b.NewValue0(v.Pos, OpMIPSANDconst, typ.UInt32) v11.AuxInt = 3 v11.AddArg(ptr) v10.AddArg(v11) - v8.AddArg(v10) + v8.AddArg2(v9, v10) v7.AddArg(v8) - v2.AddArg(v7) - v.AddArg(v2) - v.AddArg(mem) + v2.AddArg2(v3, v7) + v.AddArg3(v0, v2, mem) return true } // match: (AtomicAnd8 ptr val mem) @@ -674,14 +666,11 @@ func rewriteValueMIPS_OpAtomicAnd8(v *Value) bool { v0 := b.NewValue0(v.Pos, OpMIPSAND, typ.UInt32Ptr) v1 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32) v1.AuxInt = ^3 - v0.AddArg(v1) - v0.AddArg(ptr) - v.AddArg(v0) + v0.AddArg2(v1, ptr) v2 := b.NewValue0(v.Pos, OpMIPSOR, typ.UInt32) v3 := b.NewValue0(v.Pos, OpMIPSSLL, typ.UInt32) v4 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) v4.AddArg(val) - v3.AddArg(v4) v5 := b.NewValue0(v.Pos, OpMIPSSLLconst, typ.UInt32) v5.AuxInt = 3 v6 := b.NewValue0(v.Pos, OpMIPSANDconst, typ.UInt32) @@ -691,14 +680,12 @@ func rewriteValueMIPS_OpAtomicAnd8(v *Value) bool { v7.AddArg(ptr) v6.AddArg(v7) v5.AddArg(v6) - v3.AddArg(v5) - v2.AddArg(v3) + v3.AddArg2(v4, v5) v8 := b.NewValue0(v.Pos, OpMIPSNORconst, typ.UInt32) v8.AuxInt = 0 v9 := b.NewValue0(v.Pos, OpMIPSSLL, typ.UInt32) v10 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32) v10.AuxInt = 0xff - v9.AddArg(v10) v11 := b.NewValue0(v.Pos, OpMIPSSLLconst, typ.UInt32) v11.AuxInt = 3 v12 := b.NewValue0(v.Pos, OpMIPSANDconst, typ.UInt32) @@ -708,11 +695,10 @@ func rewriteValueMIPS_OpAtomicAnd8(v *Value) bool { v13.AddArg(ptr) v12.AddArg(v13) v11.AddArg(v12) - v9.AddArg(v11) + v9.AddArg2(v10, v11) v8.AddArg(v9) - v2.AddArg(v8) - v.AddArg(v2) - v.AddArg(mem) + v2.AddArg2(v3, v8) + v.AddArg3(v0, v2, mem) return true } return false @@ -738,22 +724,18 @@ func rewriteValueMIPS_OpAtomicOr8(v *Value) bool { v0 := b.NewValue0(v.Pos, OpMIPSAND, typ.UInt32Ptr) v1 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32) v1.AuxInt = ^3 - v0.AddArg(v1) - v0.AddArg(ptr) - v.AddArg(v0) + v0.AddArg2(v1, ptr) v2 := b.NewValue0(v.Pos, OpMIPSSLL, typ.UInt32) v3 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) v3.AddArg(val) - v2.AddArg(v3) v4 := b.NewValue0(v.Pos, OpMIPSSLLconst, typ.UInt32) v4.AuxInt = 3 v5 := b.NewValue0(v.Pos, OpMIPSANDconst, typ.UInt32) v5.AuxInt = 3 v5.AddArg(ptr) v4.AddArg(v5) - v2.AddArg(v4) - v.AddArg(v2) - v.AddArg(mem) + v2.AddArg2(v3, v4) + v.AddArg3(v0, v2, mem) return true } // match: (AtomicOr8 ptr val mem) @@ -770,13 +752,10 @@ func rewriteValueMIPS_OpAtomicOr8(v *Value) bool { v0 := b.NewValue0(v.Pos, OpMIPSAND, typ.UInt32Ptr) v1 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32) v1.AuxInt = ^3 - v0.AddArg(v1) - v0.AddArg(ptr) - v.AddArg(v0) + v0.AddArg2(v1, ptr) v2 := b.NewValue0(v.Pos, OpMIPSSLL, typ.UInt32) v3 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) v3.AddArg(val) - v2.AddArg(v3) v4 := b.NewValue0(v.Pos, OpMIPSSLLconst, typ.UInt32) v4.AuxInt = 3 v5 := b.NewValue0(v.Pos, OpMIPSANDconst, typ.UInt32) @@ -786,9 +765,8 @@ func rewriteValueMIPS_OpAtomicOr8(v *Value) bool { v6.AddArg(ptr) v5.AddArg(v6) v4.AddArg(v5) - v2.AddArg(v4) - v.AddArg(v2) - v.AddArg(mem) + v2.AddArg2(v3, v4) + v.AddArg3(v0, v2, mem) return true } return false @@ -807,11 +785,9 @@ func rewriteValueMIPS_OpAvg32u(v *Value) bool { v0 := b.NewValue0(v.Pos, OpMIPSSRLconst, t) v0.AuxInt = 1 v1 := b.NewValue0(v.Pos, OpMIPSSUB, t) - v1.AddArg(x) - v1.AddArg(y) + v1.AddArg2(x, y) v0.AddArg(v1) - v.AddArg(v0) - v.AddArg(y) + v.AddArg2(v0, y) return true } } @@ -827,10 +803,9 @@ func rewriteValueMIPS_OpBitLen32(v *Value) bool { v.reset(OpMIPSSUB) v0 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32) v0.AuxInt = 32 - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpMIPSCLZ, t) v1.AddArg(x) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } } @@ -891,18 +866,16 @@ func rewriteValueMIPS_OpCtz32(v *Value) bool { v.reset(OpMIPSSUB) v0 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32) v0.AuxInt = 32 - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpMIPSCLZ, t) v2 := b.NewValue0(v.Pos, OpMIPSSUBconst, t) v2.AuxInt = 1 v3 := b.NewValue0(v.Pos, OpMIPSAND, t) - v3.AddArg(x) v4 := b.NewValue0(v.Pos, OpMIPSNEG, t) v4.AddArg(x) - v3.AddArg(v4) + v3.AddArg2(x, v4) v2.AddArg(v3) v1.AddArg(v2) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } } @@ -920,10 +893,9 @@ func rewriteValueMIPS_OpDiv16(v *Value) bool { v0 := b.NewValue0(v.Pos, OpMIPSDIV, types.NewTuple(typ.Int32, typ.Int32)) v1 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32) v1.AddArg(x) - v0.AddArg(v1) v2 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32) v2.AddArg(y) - v0.AddArg(v2) + v0.AddArg2(v1, v2) v.AddArg(v0) return true } @@ -942,10 +914,9 @@ func rewriteValueMIPS_OpDiv16u(v *Value) bool { v0 := b.NewValue0(v.Pos, OpMIPSDIVU, types.NewTuple(typ.UInt32, typ.UInt32)) v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) v1.AddArg(x) - v0.AddArg(v1) v2 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) v2.AddArg(y) - v0.AddArg(v2) + v0.AddArg2(v1, v2) v.AddArg(v0) return true } @@ -962,8 +933,7 @@ func rewriteValueMIPS_OpDiv32(v *Value) bool { y := v_1 v.reset(OpSelect1) v0 := b.NewValue0(v.Pos, OpMIPSDIV, types.NewTuple(typ.Int32, typ.Int32)) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -980,8 +950,7 @@ func rewriteValueMIPS_OpDiv32u(v *Value) bool { y := v_1 v.reset(OpSelect1) v0 := b.NewValue0(v.Pos, OpMIPSDIVU, types.NewTuple(typ.UInt32, typ.UInt32)) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -1000,10 +969,9 @@ func rewriteValueMIPS_OpDiv8(v *Value) bool { v0 := b.NewValue0(v.Pos, OpMIPSDIV, types.NewTuple(typ.Int32, typ.Int32)) v1 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32) v1.AddArg(x) - v0.AddArg(v1) v2 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32) v2.AddArg(y) - v0.AddArg(v2) + v0.AddArg2(v1, v2) v.AddArg(v0) return true } @@ -1022,10 +990,9 @@ func rewriteValueMIPS_OpDiv8u(v *Value) bool { v0 := b.NewValue0(v.Pos, OpMIPSDIVU, types.NewTuple(typ.UInt32, typ.UInt32)) v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) v1.AddArg(x) - v0.AddArg(v1) v2 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) v2.AddArg(y) - v0.AddArg(v2) + v0.AddArg2(v1, v2) v.AddArg(v0) return true } @@ -1045,10 +1012,9 @@ func rewriteValueMIPS_OpEq16(v *Value) bool { v0 := b.NewValue0(v.Pos, OpMIPSXOR, typ.UInt32) v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) v1.AddArg(x) - v0.AddArg(v1) v2 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) v2.AddArg(y) - v0.AddArg(v2) + v0.AddArg2(v1, v2) v.AddArg(v0) return true } @@ -1066,8 +1032,7 @@ func rewriteValueMIPS_OpEq32(v *Value) bool { v.reset(OpMIPSSGTUconst) v.AuxInt = 1 v0 := b.NewValue0(v.Pos, OpMIPSXOR, typ.UInt32) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -1083,8 +1048,7 @@ func rewriteValueMIPS_OpEq32F(v *Value) bool { y := v_1 v.reset(OpMIPSFPFlagTrue) v0 := b.NewValue0(v.Pos, OpMIPSCMPEQF, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -1100,8 +1064,7 @@ func rewriteValueMIPS_OpEq64F(v *Value) bool { y := v_1 v.reset(OpMIPSFPFlagTrue) v0 := b.NewValue0(v.Pos, OpMIPSCMPEQD, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -1121,10 +1084,9 @@ func rewriteValueMIPS_OpEq8(v *Value) bool { v0 := b.NewValue0(v.Pos, OpMIPSXOR, typ.UInt32) v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) v1.AddArg(x) - v0.AddArg(v1) v2 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) v2.AddArg(y) - v0.AddArg(v2) + v0.AddArg2(v1, v2) v.AddArg(v0) return true } @@ -1142,8 +1104,7 @@ func rewriteValueMIPS_OpEqB(v *Value) bool { v.reset(OpMIPSXORconst) v.AuxInt = 1 v0 := b.NewValue0(v.Pos, OpMIPSXOR, typ.Bool) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -1161,8 +1122,7 @@ func rewriteValueMIPS_OpEqPtr(v *Value) bool { v.reset(OpMIPSSGTUconst) v.AuxInt = 1 v0 := b.NewValue0(v.Pos, OpMIPSXOR, typ.UInt32) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -1178,8 +1138,7 @@ func rewriteValueMIPS_OpGeq32F(v *Value) bool { y := v_1 v.reset(OpMIPSFPFlagTrue) v0 := b.NewValue0(v.Pos, OpMIPSCMPGEF, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -1195,8 +1154,7 @@ func rewriteValueMIPS_OpGeq64F(v *Value) bool { y := v_1 v.reset(OpMIPSFPFlagTrue) v0 := b.NewValue0(v.Pos, OpMIPSCMPGED, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -1212,8 +1170,7 @@ func rewriteValueMIPS_OpGreater32F(v *Value) bool { y := v_1 v.reset(OpMIPSFPFlagTrue) v0 := b.NewValue0(v.Pos, OpMIPSCMPGTF, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -1229,8 +1186,7 @@ func rewriteValueMIPS_OpGreater64F(v *Value) bool { y := v_1 v.reset(OpMIPSFPFlagTrue) v0 := b.NewValue0(v.Pos, OpMIPSCMPGTD, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -1247,8 +1203,7 @@ func rewriteValueMIPS_OpHmul32(v *Value) bool { y := v_1 v.reset(OpSelect0) v0 := b.NewValue0(v.Pos, OpMIPSMULT, types.NewTuple(typ.Int32, typ.Int32)) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -1265,8 +1220,7 @@ func rewriteValueMIPS_OpHmul32u(v *Value) bool { y := v_1 v.reset(OpSelect0) v0 := b.NewValue0(v.Pos, OpMIPSMULTU, types.NewTuple(typ.UInt32, typ.UInt32)) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -1280,8 +1234,7 @@ func rewriteValueMIPS_OpIsInBounds(v *Value) bool { idx := v_0 len := v_1 v.reset(OpMIPSSGTU) - v.AddArg(len) - v.AddArg(idx) + v.AddArg2(len, idx) return true } } @@ -1294,10 +1247,9 @@ func rewriteValueMIPS_OpIsNonNil(v *Value) bool { for { ptr := v_0 v.reset(OpMIPSSGTU) - v.AddArg(ptr) v0 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32) v0.AuxInt = 0 - v.AddArg(v0) + v.AddArg2(ptr, v0) return true } } @@ -1314,8 +1266,7 @@ func rewriteValueMIPS_OpIsSliceInBounds(v *Value) bool { v.reset(OpMIPSXORconst) v.AuxInt = 1 v0 := b.NewValue0(v.Pos, OpMIPSSGTU, typ.Bool) - v0.AddArg(idx) - v0.AddArg(len) + v0.AddArg2(idx, len) v.AddArg(v0) return true } @@ -1335,10 +1286,9 @@ func rewriteValueMIPS_OpLeq16(v *Value) bool { v0 := b.NewValue0(v.Pos, OpMIPSSGT, typ.Bool) v1 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32) v1.AddArg(x) - v0.AddArg(v1) v2 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32) v2.AddArg(y) - v0.AddArg(v2) + v0.AddArg2(v1, v2) v.AddArg(v0) return true } @@ -1358,10 +1308,9 @@ func rewriteValueMIPS_OpLeq16U(v *Value) bool { v0 := b.NewValue0(v.Pos, OpMIPSSGTU, typ.Bool) v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) v1.AddArg(x) - v0.AddArg(v1) v2 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) v2.AddArg(y) - v0.AddArg(v2) + v0.AddArg2(v1, v2) v.AddArg(v0) return true } @@ -1379,8 +1328,7 @@ func rewriteValueMIPS_OpLeq32(v *Value) bool { v.reset(OpMIPSXORconst) v.AuxInt = 1 v0 := b.NewValue0(v.Pos, OpMIPSSGT, typ.Bool) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -1396,8 +1344,7 @@ func rewriteValueMIPS_OpLeq32F(v *Value) bool { y := v_1 v.reset(OpMIPSFPFlagTrue) v0 := b.NewValue0(v.Pos, OpMIPSCMPGEF, types.TypeFlags) - v0.AddArg(y) - v0.AddArg(x) + v0.AddArg2(y, x) v.AddArg(v0) return true } @@ -1415,8 +1362,7 @@ func rewriteValueMIPS_OpLeq32U(v *Value) bool { v.reset(OpMIPSXORconst) v.AuxInt = 1 v0 := b.NewValue0(v.Pos, OpMIPSSGTU, typ.Bool) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -1432,8 +1378,7 @@ func rewriteValueMIPS_OpLeq64F(v *Value) bool { y := v_1 v.reset(OpMIPSFPFlagTrue) v0 := b.NewValue0(v.Pos, OpMIPSCMPGED, types.TypeFlags) - v0.AddArg(y) - v0.AddArg(x) + v0.AddArg2(y, x) v.AddArg(v0) return true } @@ -1453,10 +1398,9 @@ func rewriteValueMIPS_OpLeq8(v *Value) bool { v0 := b.NewValue0(v.Pos, OpMIPSSGT, typ.Bool) v1 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32) v1.AddArg(x) - v0.AddArg(v1) v2 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32) v2.AddArg(y) - v0.AddArg(v2) + v0.AddArg2(v1, v2) v.AddArg(v0) return true } @@ -1476,10 +1420,9 @@ func rewriteValueMIPS_OpLeq8U(v *Value) bool { v0 := b.NewValue0(v.Pos, OpMIPSSGTU, typ.Bool) v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) v1.AddArg(x) - v0.AddArg(v1) v2 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) v2.AddArg(y) - v0.AddArg(v2) + v0.AddArg2(v1, v2) v.AddArg(v0) return true } @@ -1497,10 +1440,9 @@ func rewriteValueMIPS_OpLess16(v *Value) bool { v.reset(OpMIPSSGT) v0 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32) v0.AddArg(y) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32) v1.AddArg(x) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } } @@ -1517,10 +1459,9 @@ func rewriteValueMIPS_OpLess16U(v *Value) bool { v.reset(OpMIPSSGTU) v0 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) v0.AddArg(y) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) v1.AddArg(x) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } } @@ -1533,8 +1474,7 @@ func rewriteValueMIPS_OpLess32(v *Value) bool { x := v_0 y := v_1 v.reset(OpMIPSSGT) - v.AddArg(y) - v.AddArg(x) + v.AddArg2(y, x) return true } } @@ -1549,8 +1489,7 @@ func rewriteValueMIPS_OpLess32F(v *Value) bool { y := v_1 v.reset(OpMIPSFPFlagTrue) v0 := b.NewValue0(v.Pos, OpMIPSCMPGTF, types.TypeFlags) - v0.AddArg(y) - v0.AddArg(x) + v0.AddArg2(y, x) v.AddArg(v0) return true } @@ -1564,8 +1503,7 @@ func rewriteValueMIPS_OpLess32U(v *Value) bool { x := v_0 y := v_1 v.reset(OpMIPSSGTU) - v.AddArg(y) - v.AddArg(x) + v.AddArg2(y, x) return true } } @@ -1580,8 +1518,7 @@ func rewriteValueMIPS_OpLess64F(v *Value) bool { y := v_1 v.reset(OpMIPSFPFlagTrue) v0 := b.NewValue0(v.Pos, OpMIPSCMPGTD, types.TypeFlags) - v0.AddArg(y) - v0.AddArg(x) + v0.AddArg2(y, x) v.AddArg(v0) return true } @@ -1599,10 +1536,9 @@ func rewriteValueMIPS_OpLess8(v *Value) bool { v.reset(OpMIPSSGT) v0 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32) v0.AddArg(y) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32) v1.AddArg(x) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } } @@ -1619,10 +1555,9 @@ func rewriteValueMIPS_OpLess8U(v *Value) bool { v.reset(OpMIPSSGTU) v0 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) v0.AddArg(y) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) v1.AddArg(x) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } } @@ -1640,8 +1575,7 @@ func rewriteValueMIPS_OpLoad(v *Value) bool { break } v.reset(OpMIPSMOVBUload) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (Load ptr mem) @@ -1655,8 +1589,7 @@ func rewriteValueMIPS_OpLoad(v *Value) bool { break } v.reset(OpMIPSMOVBload) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (Load ptr mem) @@ -1670,8 +1603,7 @@ func rewriteValueMIPS_OpLoad(v *Value) bool { break } v.reset(OpMIPSMOVBUload) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (Load ptr mem) @@ -1685,8 +1617,7 @@ func rewriteValueMIPS_OpLoad(v *Value) bool { break } v.reset(OpMIPSMOVHload) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (Load ptr mem) @@ -1700,8 +1631,7 @@ func rewriteValueMIPS_OpLoad(v *Value) bool { break } v.reset(OpMIPSMOVHUload) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (Load ptr mem) @@ -1715,8 +1645,7 @@ func rewriteValueMIPS_OpLoad(v *Value) bool { break } v.reset(OpMIPSMOVWload) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (Load ptr mem) @@ -1730,8 +1659,7 @@ func rewriteValueMIPS_OpLoad(v *Value) bool { break } v.reset(OpMIPSMOVFload) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (Load ptr mem) @@ -1745,8 +1673,7 @@ func rewriteValueMIPS_OpLoad(v *Value) bool { break } v.reset(OpMIPSMOVDload) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } return false @@ -1777,20 +1704,17 @@ func rewriteValueMIPS_OpLsh16x16(v *Value) bool { y := v_1 v.reset(OpMIPSCMOVZ) v0 := b.NewValue0(v.Pos, OpMIPSSLL, t) - v0.AddArg(x) v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) v1.AddArg(y) - v0.AddArg(v1) - v.AddArg(v0) + v0.AddArg2(x, v1) v2 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32) v2.AuxInt = 0 - v.AddArg(v2) v3 := b.NewValue0(v.Pos, OpMIPSSGTUconst, typ.Bool) v3.AuxInt = 32 v4 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) v4.AddArg(y) v3.AddArg(v4) - v.AddArg(v3) + v.AddArg3(v0, v2, v3) return true } } @@ -1807,16 +1731,13 @@ func rewriteValueMIPS_OpLsh16x32(v *Value) bool { y := v_1 v.reset(OpMIPSCMOVZ) v0 := b.NewValue0(v.Pos, OpMIPSSLL, t) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) + v0.AddArg2(x, y) v1 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32) v1.AuxInt = 0 - v.AddArg(v1) v2 := b.NewValue0(v.Pos, OpMIPSSGTUconst, typ.Bool) v2.AuxInt = 32 v2.AddArg(y) - v.AddArg(v2) + v.AddArg3(v0, v1, v2) return true } } @@ -1870,20 +1791,17 @@ func rewriteValueMIPS_OpLsh16x8(v *Value) bool { y := v_1 v.reset(OpMIPSCMOVZ) v0 := b.NewValue0(v.Pos, OpMIPSSLL, t) - v0.AddArg(x) v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) v1.AddArg(y) - v0.AddArg(v1) - v.AddArg(v0) + v0.AddArg2(x, v1) v2 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32) v2.AuxInt = 0 - v.AddArg(v2) v3 := b.NewValue0(v.Pos, OpMIPSSGTUconst, typ.Bool) v3.AuxInt = 32 v4 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) v4.AddArg(y) v3.AddArg(v4) - v.AddArg(v3) + v.AddArg3(v0, v2, v3) return true } } @@ -1900,20 +1818,17 @@ func rewriteValueMIPS_OpLsh32x16(v *Value) bool { y := v_1 v.reset(OpMIPSCMOVZ) v0 := b.NewValue0(v.Pos, OpMIPSSLL, t) - v0.AddArg(x) v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) v1.AddArg(y) - v0.AddArg(v1) - v.AddArg(v0) + v0.AddArg2(x, v1) v2 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32) v2.AuxInt = 0 - v.AddArg(v2) v3 := b.NewValue0(v.Pos, OpMIPSSGTUconst, typ.Bool) v3.AuxInt = 32 v4 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) v4.AddArg(y) v3.AddArg(v4) - v.AddArg(v3) + v.AddArg3(v0, v2, v3) return true } } @@ -1930,16 +1845,13 @@ func rewriteValueMIPS_OpLsh32x32(v *Value) bool { y := v_1 v.reset(OpMIPSCMOVZ) v0 := b.NewValue0(v.Pos, OpMIPSSLL, t) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) + v0.AddArg2(x, y) v1 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32) v1.AuxInt = 0 - v.AddArg(v1) v2 := b.NewValue0(v.Pos, OpMIPSSGTUconst, typ.Bool) v2.AuxInt = 32 v2.AddArg(y) - v.AddArg(v2) + v.AddArg3(v0, v1, v2) return true } } @@ -1993,20 +1905,17 @@ func rewriteValueMIPS_OpLsh32x8(v *Value) bool { y := v_1 v.reset(OpMIPSCMOVZ) v0 := b.NewValue0(v.Pos, OpMIPSSLL, t) - v0.AddArg(x) v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) v1.AddArg(y) - v0.AddArg(v1) - v.AddArg(v0) + v0.AddArg2(x, v1) v2 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32) v2.AuxInt = 0 - v.AddArg(v2) v3 := b.NewValue0(v.Pos, OpMIPSSGTUconst, typ.Bool) v3.AuxInt = 32 v4 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) v4.AddArg(y) v3.AddArg(v4) - v.AddArg(v3) + v.AddArg3(v0, v2, v3) return true } } @@ -2023,20 +1932,17 @@ func rewriteValueMIPS_OpLsh8x16(v *Value) bool { y := v_1 v.reset(OpMIPSCMOVZ) v0 := b.NewValue0(v.Pos, OpMIPSSLL, t) - v0.AddArg(x) v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) v1.AddArg(y) - v0.AddArg(v1) - v.AddArg(v0) + v0.AddArg2(x, v1) v2 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32) v2.AuxInt = 0 - v.AddArg(v2) v3 := b.NewValue0(v.Pos, OpMIPSSGTUconst, typ.Bool) v3.AuxInt = 32 v4 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) v4.AddArg(y) v3.AddArg(v4) - v.AddArg(v3) + v.AddArg3(v0, v2, v3) return true } } @@ -2053,16 +1959,13 @@ func rewriteValueMIPS_OpLsh8x32(v *Value) bool { y := v_1 v.reset(OpMIPSCMOVZ) v0 := b.NewValue0(v.Pos, OpMIPSSLL, t) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) + v0.AddArg2(x, y) v1 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32) v1.AuxInt = 0 - v.AddArg(v1) v2 := b.NewValue0(v.Pos, OpMIPSSGTUconst, typ.Bool) v2.AuxInt = 32 v2.AddArg(y) - v.AddArg(v2) + v.AddArg3(v0, v1, v2) return true } } @@ -2116,20 +2019,17 @@ func rewriteValueMIPS_OpLsh8x8(v *Value) bool { y := v_1 v.reset(OpMIPSCMOVZ) v0 := b.NewValue0(v.Pos, OpMIPSSLL, t) - v0.AddArg(x) v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) v1.AddArg(y) - v0.AddArg(v1) - v.AddArg(v0) + v0.AddArg2(x, v1) v2 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32) v2.AuxInt = 0 - v.AddArg(v2) v3 := b.NewValue0(v.Pos, OpMIPSSGTUconst, typ.Bool) v3.AuxInt = 32 v4 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) v4.AddArg(y) v3.AddArg(v4) - v.AddArg(v3) + v.AddArg3(v0, v2, v3) return true } } @@ -2162,8 +2062,7 @@ func rewriteValueMIPS_OpMIPSADD(v *Value) bool { } y := v_1.Args[0] v.reset(OpMIPSSUB) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } break @@ -2289,8 +2188,7 @@ func rewriteValueMIPS_OpMIPSAND(v *Value) bool { v.reset(OpMIPSSGTUconst) v.AuxInt = 1 v0 := b.NewValue0(v.Pos, OpMIPSOR, x.Type) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -2392,8 +2290,7 @@ func rewriteValueMIPS_OpMIPSCMOVZ(v *Value) bool { } c := v_2 v.reset(OpMIPSCMOVZzero) - v.AddArg(a) - v.AddArg(c) + v.AddArg2(a, c) return true } return false @@ -2449,8 +2346,7 @@ func rewriteValueMIPS_OpMIPSLoweredAtomicAdd(v *Value) bool { } v.reset(OpMIPSLoweredAtomicAddconst) v.AuxInt = c - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } return false @@ -2468,8 +2364,7 @@ func rewriteValueMIPS_OpMIPSLoweredAtomicStore32(v *Value) bool { } mem := v_2 v.reset(OpMIPSLoweredAtomicStorezero) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } return false @@ -2496,8 +2391,7 @@ func rewriteValueMIPS_OpMIPSMOVBUload(v *Value) bool { v.reset(OpMIPSMOVBUload) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVBUload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem) @@ -2519,8 +2413,7 @@ func rewriteValueMIPS_OpMIPSMOVBUload(v *Value) bool { v.reset(OpMIPSMOVBUload) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVBUload [off] {sym} ptr (MOVBstore [off2] {sym2} ptr2 x _)) @@ -2595,8 +2488,7 @@ func rewriteValueMIPS_OpMIPSMOVBUreg(v *Value) bool { v.AddArg(v0) v0.AuxInt = off v0.Aux = sym - v0.AddArg(ptr) - v0.AddArg(mem) + v0.AddArg2(ptr, mem) return true } // match: (MOVBUreg (ANDconst [c] x)) @@ -2647,8 +2539,7 @@ func rewriteValueMIPS_OpMIPSMOVBload(v *Value) bool { v.reset(OpMIPSMOVBload) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVBload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem) @@ -2670,8 +2561,7 @@ func rewriteValueMIPS_OpMIPSMOVBload(v *Value) bool { v.reset(OpMIPSMOVBload) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVBload [off] {sym} ptr (MOVBstore [off2] {sym2} ptr2 x _)) @@ -2746,8 +2636,7 @@ func rewriteValueMIPS_OpMIPSMOVBreg(v *Value) bool { v.AddArg(v0) v0.AuxInt = off v0.Aux = sym - v0.AddArg(ptr) - v0.AddArg(mem) + v0.AddArg2(ptr, mem) return true } // match: (MOVBreg (ANDconst [c] x)) @@ -2804,9 +2693,7 @@ func rewriteValueMIPS_OpMIPSMOVBstore(v *Value) bool { v.reset(OpMIPSMOVBstore) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } // match: (MOVBstore [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) val mem) @@ -2829,9 +2716,7 @@ func rewriteValueMIPS_OpMIPSMOVBstore(v *Value) bool { v.reset(OpMIPSMOVBstore) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } // match: (MOVBstore [off] {sym} ptr (MOVWconst [0]) mem) @@ -2847,8 +2732,7 @@ func rewriteValueMIPS_OpMIPSMOVBstore(v *Value) bool { v.reset(OpMIPSMOVBstorezero) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVBstore [off] {sym} ptr (MOVBreg x) mem) @@ -2865,9 +2749,7 @@ func rewriteValueMIPS_OpMIPSMOVBstore(v *Value) bool { v.reset(OpMIPSMOVBstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(x) - v.AddArg(mem) + v.AddArg3(ptr, x, mem) return true } // match: (MOVBstore [off] {sym} ptr (MOVBUreg x) mem) @@ -2884,9 +2766,7 @@ func rewriteValueMIPS_OpMIPSMOVBstore(v *Value) bool { v.reset(OpMIPSMOVBstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(x) - v.AddArg(mem) + v.AddArg3(ptr, x, mem) return true } // match: (MOVBstore [off] {sym} ptr (MOVHreg x) mem) @@ -2903,9 +2783,7 @@ func rewriteValueMIPS_OpMIPSMOVBstore(v *Value) bool { v.reset(OpMIPSMOVBstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(x) - v.AddArg(mem) + v.AddArg3(ptr, x, mem) return true } // match: (MOVBstore [off] {sym} ptr (MOVHUreg x) mem) @@ -2922,9 +2800,7 @@ func rewriteValueMIPS_OpMIPSMOVBstore(v *Value) bool { v.reset(OpMIPSMOVBstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(x) - v.AddArg(mem) + v.AddArg3(ptr, x, mem) return true } // match: (MOVBstore [off] {sym} ptr (MOVWreg x) mem) @@ -2941,9 +2817,7 @@ func rewriteValueMIPS_OpMIPSMOVBstore(v *Value) bool { v.reset(OpMIPSMOVBstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(x) - v.AddArg(mem) + v.AddArg3(ptr, x, mem) return true } return false @@ -2970,8 +2844,7 @@ func rewriteValueMIPS_OpMIPSMOVBstorezero(v *Value) bool { v.reset(OpMIPSMOVBstorezero) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVBstorezero [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem) @@ -2993,8 +2866,7 @@ func rewriteValueMIPS_OpMIPSMOVBstorezero(v *Value) bool { v.reset(OpMIPSMOVBstorezero) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } return false @@ -3021,8 +2893,7 @@ func rewriteValueMIPS_OpMIPSMOVDload(v *Value) bool { v.reset(OpMIPSMOVDload) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVDload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem) @@ -3044,8 +2915,7 @@ func rewriteValueMIPS_OpMIPSMOVDload(v *Value) bool { v.reset(OpMIPSMOVDload) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVDload [off] {sym} ptr (MOVDstore [off2] {sym2} ptr2 x _)) @@ -3097,9 +2967,7 @@ func rewriteValueMIPS_OpMIPSMOVDstore(v *Value) bool { v.reset(OpMIPSMOVDstore) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } // match: (MOVDstore [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) val mem) @@ -3122,9 +2990,7 @@ func rewriteValueMIPS_OpMIPSMOVDstore(v *Value) bool { v.reset(OpMIPSMOVDstore) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } return false @@ -3151,8 +3017,7 @@ func rewriteValueMIPS_OpMIPSMOVFload(v *Value) bool { v.reset(OpMIPSMOVFload) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVFload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem) @@ -3174,8 +3039,7 @@ func rewriteValueMIPS_OpMIPSMOVFload(v *Value) bool { v.reset(OpMIPSMOVFload) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVFload [off] {sym} ptr (MOVFstore [off2] {sym2} ptr2 x _)) @@ -3227,9 +3091,7 @@ func rewriteValueMIPS_OpMIPSMOVFstore(v *Value) bool { v.reset(OpMIPSMOVFstore) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } // match: (MOVFstore [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) val mem) @@ -3252,9 +3114,7 @@ func rewriteValueMIPS_OpMIPSMOVFstore(v *Value) bool { v.reset(OpMIPSMOVFstore) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } return false @@ -3281,8 +3141,7 @@ func rewriteValueMIPS_OpMIPSMOVHUload(v *Value) bool { v.reset(OpMIPSMOVHUload) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVHUload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem) @@ -3304,8 +3163,7 @@ func rewriteValueMIPS_OpMIPSMOVHUload(v *Value) bool { v.reset(OpMIPSMOVHUload) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVHUload [off] {sym} ptr (MOVHstore [off2] {sym2} ptr2 x _)) @@ -3403,8 +3261,7 @@ func rewriteValueMIPS_OpMIPSMOVHUreg(v *Value) bool { v.AddArg(v0) v0.AuxInt = off v0.Aux = sym - v0.AddArg(ptr) - v0.AddArg(mem) + v0.AddArg2(ptr, mem) return true } // match: (MOVHUreg (ANDconst [c] x)) @@ -3455,8 +3312,7 @@ func rewriteValueMIPS_OpMIPSMOVHload(v *Value) bool { v.reset(OpMIPSMOVHload) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVHload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem) @@ -3478,8 +3334,7 @@ func rewriteValueMIPS_OpMIPSMOVHload(v *Value) bool { v.reset(OpMIPSMOVHload) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVHload [off] {sym} ptr (MOVHstore [off2] {sym2} ptr2 x _)) @@ -3600,8 +3455,7 @@ func rewriteValueMIPS_OpMIPSMOVHreg(v *Value) bool { v.AddArg(v0) v0.AuxInt = off v0.Aux = sym - v0.AddArg(ptr) - v0.AddArg(mem) + v0.AddArg2(ptr, mem) return true } // match: (MOVHreg (ANDconst [c] x)) @@ -3658,9 +3512,7 @@ func rewriteValueMIPS_OpMIPSMOVHstore(v *Value) bool { v.reset(OpMIPSMOVHstore) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } // match: (MOVHstore [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) val mem) @@ -3683,9 +3535,7 @@ func rewriteValueMIPS_OpMIPSMOVHstore(v *Value) bool { v.reset(OpMIPSMOVHstore) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } // match: (MOVHstore [off] {sym} ptr (MOVWconst [0]) mem) @@ -3701,8 +3551,7 @@ func rewriteValueMIPS_OpMIPSMOVHstore(v *Value) bool { v.reset(OpMIPSMOVHstorezero) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVHstore [off] {sym} ptr (MOVHreg x) mem) @@ -3719,9 +3568,7 @@ func rewriteValueMIPS_OpMIPSMOVHstore(v *Value) bool { v.reset(OpMIPSMOVHstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(x) - v.AddArg(mem) + v.AddArg3(ptr, x, mem) return true } // match: (MOVHstore [off] {sym} ptr (MOVHUreg x) mem) @@ -3738,9 +3585,7 @@ func rewriteValueMIPS_OpMIPSMOVHstore(v *Value) bool { v.reset(OpMIPSMOVHstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(x) - v.AddArg(mem) + v.AddArg3(ptr, x, mem) return true } // match: (MOVHstore [off] {sym} ptr (MOVWreg x) mem) @@ -3757,9 +3602,7 @@ func rewriteValueMIPS_OpMIPSMOVHstore(v *Value) bool { v.reset(OpMIPSMOVHstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(x) - v.AddArg(mem) + v.AddArg3(ptr, x, mem) return true } return false @@ -3786,8 +3629,7 @@ func rewriteValueMIPS_OpMIPSMOVHstorezero(v *Value) bool { v.reset(OpMIPSMOVHstorezero) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVHstorezero [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem) @@ -3809,8 +3651,7 @@ func rewriteValueMIPS_OpMIPSMOVHstorezero(v *Value) bool { v.reset(OpMIPSMOVHstorezero) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } return false @@ -3837,8 +3678,7 @@ func rewriteValueMIPS_OpMIPSMOVWload(v *Value) bool { v.reset(OpMIPSMOVWload) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVWload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem) @@ -3860,8 +3700,7 @@ func rewriteValueMIPS_OpMIPSMOVWload(v *Value) bool { v.reset(OpMIPSMOVWload) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVWload [off] {sym} ptr (MOVWstore [off2] {sym2} ptr2 x _)) @@ -3940,9 +3779,7 @@ func rewriteValueMIPS_OpMIPSMOVWstore(v *Value) bool { v.reset(OpMIPSMOVWstore) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } // match: (MOVWstore [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) val mem) @@ -3965,9 +3802,7 @@ func rewriteValueMIPS_OpMIPSMOVWstore(v *Value) bool { v.reset(OpMIPSMOVWstore) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } // match: (MOVWstore [off] {sym} ptr (MOVWconst [0]) mem) @@ -3983,8 +3818,7 @@ func rewriteValueMIPS_OpMIPSMOVWstore(v *Value) bool { v.reset(OpMIPSMOVWstorezero) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVWstore [off] {sym} ptr (MOVWreg x) mem) @@ -4001,9 +3835,7 @@ func rewriteValueMIPS_OpMIPSMOVWstore(v *Value) bool { v.reset(OpMIPSMOVWstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(x) - v.AddArg(mem) + v.AddArg3(ptr, x, mem) return true } return false @@ -4030,8 +3862,7 @@ func rewriteValueMIPS_OpMIPSMOVWstorezero(v *Value) bool { v.reset(OpMIPSMOVWstorezero) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVWstorezero [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem) @@ -4053,8 +3884,7 @@ func rewriteValueMIPS_OpMIPSMOVWstorezero(v *Value) bool { v.reset(OpMIPSMOVWstorezero) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } return false @@ -4242,8 +4072,7 @@ func rewriteValueMIPS_OpMIPSOR(v *Value) bool { y := v_1.Args[0] v.reset(OpMIPSSGTUzero) v0 := b.NewValue0(v.Pos, OpMIPSOR, x.Type) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -5030,10 +4859,9 @@ func rewriteValueMIPS_OpMod16(v *Value) bool { v0 := b.NewValue0(v.Pos, OpMIPSDIV, types.NewTuple(typ.Int32, typ.Int32)) v1 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32) v1.AddArg(x) - v0.AddArg(v1) v2 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32) v2.AddArg(y) - v0.AddArg(v2) + v0.AddArg2(v1, v2) v.AddArg(v0) return true } @@ -5052,10 +4880,9 @@ func rewriteValueMIPS_OpMod16u(v *Value) bool { v0 := b.NewValue0(v.Pos, OpMIPSDIVU, types.NewTuple(typ.UInt32, typ.UInt32)) v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) v1.AddArg(x) - v0.AddArg(v1) v2 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) v2.AddArg(y) - v0.AddArg(v2) + v0.AddArg2(v1, v2) v.AddArg(v0) return true } @@ -5072,8 +4899,7 @@ func rewriteValueMIPS_OpMod32(v *Value) bool { y := v_1 v.reset(OpSelect0) v0 := b.NewValue0(v.Pos, OpMIPSDIV, types.NewTuple(typ.Int32, typ.Int32)) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -5090,8 +4916,7 @@ func rewriteValueMIPS_OpMod32u(v *Value) bool { y := v_1 v.reset(OpSelect0) v0 := b.NewValue0(v.Pos, OpMIPSDIVU, types.NewTuple(typ.UInt32, typ.UInt32)) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -5110,10 +4935,9 @@ func rewriteValueMIPS_OpMod8(v *Value) bool { v0 := b.NewValue0(v.Pos, OpMIPSDIV, types.NewTuple(typ.Int32, typ.Int32)) v1 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32) v1.AddArg(x) - v0.AddArg(v1) v2 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32) v2.AddArg(y) - v0.AddArg(v2) + v0.AddArg2(v1, v2) v.AddArg(v0) return true } @@ -5132,10 +4956,9 @@ func rewriteValueMIPS_OpMod8u(v *Value) bool { v0 := b.NewValue0(v.Pos, OpMIPSDIVU, types.NewTuple(typ.UInt32, typ.UInt32)) v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) v1.AddArg(x) - v0.AddArg(v1) v2 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) v2.AddArg(y) - v0.AddArg(v2) + v0.AddArg2(v1, v2) v.AddArg(v0) return true } @@ -5169,12 +4992,9 @@ func rewriteValueMIPS_OpMove(v *Value) bool { src := v_1 mem := v_2 v.reset(OpMIPSMOVBstore) - v.AddArg(dst) v0 := b.NewValue0(v.Pos, OpMIPSMOVBUload, typ.UInt8) - v0.AddArg(src) - v0.AddArg(mem) - v.AddArg(v0) - v.AddArg(mem) + v0.AddArg2(src, mem) + v.AddArg3(dst, v0, mem) return true } // match: (Move [2] {t} dst src mem) @@ -5192,12 +5012,9 @@ func rewriteValueMIPS_OpMove(v *Value) bool { break } v.reset(OpMIPSMOVHstore) - v.AddArg(dst) v0 := b.NewValue0(v.Pos, OpMIPSMOVHUload, typ.UInt16) - v0.AddArg(src) - v0.AddArg(mem) - v.AddArg(v0) - v.AddArg(mem) + v0.AddArg2(src, mem) + v.AddArg3(dst, v0, mem) return true } // match: (Move [2] dst src mem) @@ -5211,20 +5028,14 @@ func rewriteValueMIPS_OpMove(v *Value) bool { mem := v_2 v.reset(OpMIPSMOVBstore) v.AuxInt = 1 - v.AddArg(dst) v0 := b.NewValue0(v.Pos, OpMIPSMOVBUload, typ.UInt8) v0.AuxInt = 1 - v0.AddArg(src) - v0.AddArg(mem) - v.AddArg(v0) + v0.AddArg2(src, mem) v1 := b.NewValue0(v.Pos, OpMIPSMOVBstore, types.TypeMem) - v1.AddArg(dst) v2 := b.NewValue0(v.Pos, OpMIPSMOVBUload, typ.UInt8) - v2.AddArg(src) - v2.AddArg(mem) - v1.AddArg(v2) - v1.AddArg(mem) - v.AddArg(v1) + v2.AddArg2(src, mem) + v1.AddArg3(dst, v2, mem) + v.AddArg3(dst, v0, v1) return true } // match: (Move [4] {t} dst src mem) @@ -5242,12 +5053,9 @@ func rewriteValueMIPS_OpMove(v *Value) bool { break } v.reset(OpMIPSMOVWstore) - v.AddArg(dst) v0 := b.NewValue0(v.Pos, OpMIPSMOVWload, typ.UInt32) - v0.AddArg(src) - v0.AddArg(mem) - v.AddArg(v0) - v.AddArg(mem) + v0.AddArg2(src, mem) + v.AddArg3(dst, v0, mem) return true } // match: (Move [4] {t} dst src mem) @@ -5266,20 +5074,14 @@ func rewriteValueMIPS_OpMove(v *Value) bool { } v.reset(OpMIPSMOVHstore) v.AuxInt = 2 - v.AddArg(dst) v0 := b.NewValue0(v.Pos, OpMIPSMOVHUload, typ.UInt16) v0.AuxInt = 2 - v0.AddArg(src) - v0.AddArg(mem) - v.AddArg(v0) + v0.AddArg2(src, mem) v1 := b.NewValue0(v.Pos, OpMIPSMOVHstore, types.TypeMem) - v1.AddArg(dst) v2 := b.NewValue0(v.Pos, OpMIPSMOVHUload, typ.UInt16) - v2.AddArg(src) - v2.AddArg(mem) - v1.AddArg(v2) - v1.AddArg(mem) - v.AddArg(v1) + v2.AddArg2(src, mem) + v1.AddArg3(dst, v2, mem) + v.AddArg3(dst, v0, v1) return true } // match: (Move [4] dst src mem) @@ -5293,38 +5095,26 @@ func rewriteValueMIPS_OpMove(v *Value) bool { mem := v_2 v.reset(OpMIPSMOVBstore) v.AuxInt = 3 - v.AddArg(dst) v0 := b.NewValue0(v.Pos, OpMIPSMOVBUload, typ.UInt8) v0.AuxInt = 3 - v0.AddArg(src) - v0.AddArg(mem) - v.AddArg(v0) + v0.AddArg2(src, mem) v1 := b.NewValue0(v.Pos, OpMIPSMOVBstore, types.TypeMem) v1.AuxInt = 2 - v1.AddArg(dst) v2 := b.NewValue0(v.Pos, OpMIPSMOVBUload, typ.UInt8) v2.AuxInt = 2 - v2.AddArg(src) - v2.AddArg(mem) - v1.AddArg(v2) + v2.AddArg2(src, mem) v3 := b.NewValue0(v.Pos, OpMIPSMOVBstore, types.TypeMem) v3.AuxInt = 1 - v3.AddArg(dst) v4 := b.NewValue0(v.Pos, OpMIPSMOVBUload, typ.UInt8) v4.AuxInt = 1 - v4.AddArg(src) - v4.AddArg(mem) - v3.AddArg(v4) + v4.AddArg2(src, mem) v5 := b.NewValue0(v.Pos, OpMIPSMOVBstore, types.TypeMem) - v5.AddArg(dst) v6 := b.NewValue0(v.Pos, OpMIPSMOVBUload, typ.UInt8) - v6.AddArg(src) - v6.AddArg(mem) - v5.AddArg(v6) - v5.AddArg(mem) - v3.AddArg(v5) - v1.AddArg(v3) - v.AddArg(v1) + v6.AddArg2(src, mem) + v5.AddArg3(dst, v6, mem) + v3.AddArg3(dst, v4, v5) + v1.AddArg3(dst, v2, v3) + v.AddArg3(dst, v0, v1) return true } // match: (Move [3] dst src mem) @@ -5338,29 +5128,20 @@ func rewriteValueMIPS_OpMove(v *Value) bool { mem := v_2 v.reset(OpMIPSMOVBstore) v.AuxInt = 2 - v.AddArg(dst) v0 := b.NewValue0(v.Pos, OpMIPSMOVBUload, typ.UInt8) v0.AuxInt = 2 - v0.AddArg(src) - v0.AddArg(mem) - v.AddArg(v0) + v0.AddArg2(src, mem) v1 := b.NewValue0(v.Pos, OpMIPSMOVBstore, types.TypeMem) v1.AuxInt = 1 - v1.AddArg(dst) v2 := b.NewValue0(v.Pos, OpMIPSMOVBUload, typ.UInt8) v2.AuxInt = 1 - v2.AddArg(src) - v2.AddArg(mem) - v1.AddArg(v2) + v2.AddArg2(src, mem) v3 := b.NewValue0(v.Pos, OpMIPSMOVBstore, types.TypeMem) - v3.AddArg(dst) v4 := b.NewValue0(v.Pos, OpMIPSMOVBUload, typ.UInt8) - v4.AddArg(src) - v4.AddArg(mem) - v3.AddArg(v4) - v3.AddArg(mem) - v1.AddArg(v3) - v.AddArg(v1) + v4.AddArg2(src, mem) + v3.AddArg3(dst, v4, mem) + v1.AddArg3(dst, v2, v3) + v.AddArg3(dst, v0, v1) return true } // match: (Move [8] {t} dst src mem) @@ -5379,20 +5160,14 @@ func rewriteValueMIPS_OpMove(v *Value) bool { } v.reset(OpMIPSMOVWstore) v.AuxInt = 4 - v.AddArg(dst) v0 := b.NewValue0(v.Pos, OpMIPSMOVWload, typ.UInt32) v0.AuxInt = 4 - v0.AddArg(src) - v0.AddArg(mem) - v.AddArg(v0) + v0.AddArg2(src, mem) v1 := b.NewValue0(v.Pos, OpMIPSMOVWstore, types.TypeMem) - v1.AddArg(dst) v2 := b.NewValue0(v.Pos, OpMIPSMOVWload, typ.UInt32) - v2.AddArg(src) - v2.AddArg(mem) - v1.AddArg(v2) - v1.AddArg(mem) - v.AddArg(v1) + v2.AddArg2(src, mem) + v1.AddArg3(dst, v2, mem) + v.AddArg3(dst, v0, v1) return true } // match: (Move [8] {t} dst src mem) @@ -5411,38 +5186,26 @@ func rewriteValueMIPS_OpMove(v *Value) bool { } v.reset(OpMIPSMOVHstore) v.AuxInt = 6 - v.AddArg(dst) v0 := b.NewValue0(v.Pos, OpMIPSMOVHload, typ.Int16) v0.AuxInt = 6 - v0.AddArg(src) - v0.AddArg(mem) - v.AddArg(v0) + v0.AddArg2(src, mem) v1 := b.NewValue0(v.Pos, OpMIPSMOVHstore, types.TypeMem) v1.AuxInt = 4 - v1.AddArg(dst) v2 := b.NewValue0(v.Pos, OpMIPSMOVHload, typ.Int16) v2.AuxInt = 4 - v2.AddArg(src) - v2.AddArg(mem) - v1.AddArg(v2) + v2.AddArg2(src, mem) v3 := b.NewValue0(v.Pos, OpMIPSMOVHstore, types.TypeMem) v3.AuxInt = 2 - v3.AddArg(dst) v4 := b.NewValue0(v.Pos, OpMIPSMOVHload, typ.Int16) v4.AuxInt = 2 - v4.AddArg(src) - v4.AddArg(mem) - v3.AddArg(v4) + v4.AddArg2(src, mem) v5 := b.NewValue0(v.Pos, OpMIPSMOVHstore, types.TypeMem) - v5.AddArg(dst) v6 := b.NewValue0(v.Pos, OpMIPSMOVHload, typ.Int16) - v6.AddArg(src) - v6.AddArg(mem) - v5.AddArg(v6) - v5.AddArg(mem) - v3.AddArg(v5) - v1.AddArg(v3) - v.AddArg(v1) + v6.AddArg2(src, mem) + v5.AddArg3(dst, v6, mem) + v3.AddArg3(dst, v4, v5) + v1.AddArg3(dst, v2, v3) + v.AddArg3(dst, v0, v1) return true } // match: (Move [6] {t} dst src mem) @@ -5461,29 +5224,20 @@ func rewriteValueMIPS_OpMove(v *Value) bool { } v.reset(OpMIPSMOVHstore) v.AuxInt = 4 - v.AddArg(dst) v0 := b.NewValue0(v.Pos, OpMIPSMOVHload, typ.Int16) v0.AuxInt = 4 - v0.AddArg(src) - v0.AddArg(mem) - v.AddArg(v0) + v0.AddArg2(src, mem) v1 := b.NewValue0(v.Pos, OpMIPSMOVHstore, types.TypeMem) v1.AuxInt = 2 - v1.AddArg(dst) v2 := b.NewValue0(v.Pos, OpMIPSMOVHload, typ.Int16) v2.AuxInt = 2 - v2.AddArg(src) - v2.AddArg(mem) - v1.AddArg(v2) + v2.AddArg2(src, mem) v3 := b.NewValue0(v.Pos, OpMIPSMOVHstore, types.TypeMem) - v3.AddArg(dst) v4 := b.NewValue0(v.Pos, OpMIPSMOVHload, typ.Int16) - v4.AddArg(src) - v4.AddArg(mem) - v3.AddArg(v4) - v3.AddArg(mem) - v1.AddArg(v3) - v.AddArg(v1) + v4.AddArg2(src, mem) + v3.AddArg3(dst, v4, mem) + v1.AddArg3(dst, v2, v3) + v.AddArg3(dst, v0, v1) return true } // match: (Move [12] {t} dst src mem) @@ -5502,29 +5256,20 @@ func rewriteValueMIPS_OpMove(v *Value) bool { } v.reset(OpMIPSMOVWstore) v.AuxInt = 8 - v.AddArg(dst) v0 := b.NewValue0(v.Pos, OpMIPSMOVWload, typ.UInt32) v0.AuxInt = 8 - v0.AddArg(src) - v0.AddArg(mem) - v.AddArg(v0) + v0.AddArg2(src, mem) v1 := b.NewValue0(v.Pos, OpMIPSMOVWstore, types.TypeMem) v1.AuxInt = 4 - v1.AddArg(dst) v2 := b.NewValue0(v.Pos, OpMIPSMOVWload, typ.UInt32) v2.AuxInt = 4 - v2.AddArg(src) - v2.AddArg(mem) - v1.AddArg(v2) + v2.AddArg2(src, mem) v3 := b.NewValue0(v.Pos, OpMIPSMOVWstore, types.TypeMem) - v3.AddArg(dst) v4 := b.NewValue0(v.Pos, OpMIPSMOVWload, typ.UInt32) - v4.AddArg(src) - v4.AddArg(mem) - v3.AddArg(v4) - v3.AddArg(mem) - v1.AddArg(v3) - v.AddArg(v1) + v4.AddArg2(src, mem) + v3.AddArg3(dst, v4, mem) + v1.AddArg3(dst, v2, v3) + v.AddArg3(dst, v0, v1) return true } // match: (Move [16] {t} dst src mem) @@ -5543,38 +5288,26 @@ func rewriteValueMIPS_OpMove(v *Value) bool { } v.reset(OpMIPSMOVWstore) v.AuxInt = 12 - v.AddArg(dst) v0 := b.NewValue0(v.Pos, OpMIPSMOVWload, typ.UInt32) v0.AuxInt = 12 - v0.AddArg(src) - v0.AddArg(mem) - v.AddArg(v0) + v0.AddArg2(src, mem) v1 := b.NewValue0(v.Pos, OpMIPSMOVWstore, types.TypeMem) v1.AuxInt = 8 - v1.AddArg(dst) v2 := b.NewValue0(v.Pos, OpMIPSMOVWload, typ.UInt32) v2.AuxInt = 8 - v2.AddArg(src) - v2.AddArg(mem) - v1.AddArg(v2) + v2.AddArg2(src, mem) v3 := b.NewValue0(v.Pos, OpMIPSMOVWstore, types.TypeMem) v3.AuxInt = 4 - v3.AddArg(dst) v4 := b.NewValue0(v.Pos, OpMIPSMOVWload, typ.UInt32) v4.AuxInt = 4 - v4.AddArg(src) - v4.AddArg(mem) - v3.AddArg(v4) + v4.AddArg2(src, mem) v5 := b.NewValue0(v.Pos, OpMIPSMOVWstore, types.TypeMem) - v5.AddArg(dst) v6 := b.NewValue0(v.Pos, OpMIPSMOVWload, typ.UInt32) - v6.AddArg(src) - v6.AddArg(mem) - v5.AddArg(v6) - v5.AddArg(mem) - v3.AddArg(v5) - v1.AddArg(v3) - v.AddArg(v1) + v6.AddArg2(src, mem) + v5.AddArg3(dst, v6, mem) + v3.AddArg3(dst, v4, v5) + v1.AddArg3(dst, v2, v3) + v.AddArg3(dst, v0, v1) return true } // match: (Move [s] {t} dst src mem) @@ -5591,13 +5324,10 @@ func rewriteValueMIPS_OpMove(v *Value) bool { } v.reset(OpMIPSLoweredMove) v.AuxInt = t.(*types.Type).Alignment() - v.AddArg(dst) - v.AddArg(src) v0 := b.NewValue0(v.Pos, OpMIPSADDconst, src.Type) v0.AuxInt = s - moveSize(t.(*types.Type).Alignment(), config) v0.AddArg(src) - v.AddArg(v0) - v.AddArg(mem) + v.AddArg4(dst, src, v0, mem) return true } return false @@ -5616,14 +5346,12 @@ func rewriteValueMIPS_OpNeq16(v *Value) bool { v0 := b.NewValue0(v.Pos, OpMIPSXOR, typ.UInt32) v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) v1.AddArg(x) - v0.AddArg(v1) v2 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) v2.AddArg(y) - v0.AddArg(v2) - v.AddArg(v0) + v0.AddArg2(v1, v2) v3 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32) v3.AuxInt = 0 - v.AddArg(v3) + v.AddArg2(v0, v3) return true } } @@ -5639,12 +5367,10 @@ func rewriteValueMIPS_OpNeq32(v *Value) bool { y := v_1 v.reset(OpMIPSSGTU) v0 := b.NewValue0(v.Pos, OpMIPSXOR, typ.UInt32) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) + v0.AddArg2(x, y) v1 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32) v1.AuxInt = 0 - v.AddArg(v1) + v.AddArg2(v0, v1) return true } } @@ -5659,8 +5385,7 @@ func rewriteValueMIPS_OpNeq32F(v *Value) bool { y := v_1 v.reset(OpMIPSFPFlagFalse) v0 := b.NewValue0(v.Pos, OpMIPSCMPEQF, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -5676,8 +5401,7 @@ func rewriteValueMIPS_OpNeq64F(v *Value) bool { y := v_1 v.reset(OpMIPSFPFlagFalse) v0 := b.NewValue0(v.Pos, OpMIPSCMPEQD, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -5696,14 +5420,12 @@ func rewriteValueMIPS_OpNeq8(v *Value) bool { v0 := b.NewValue0(v.Pos, OpMIPSXOR, typ.UInt32) v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) v1.AddArg(x) - v0.AddArg(v1) v2 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) v2.AddArg(y) - v0.AddArg(v2) - v.AddArg(v0) + v0.AddArg2(v1, v2) v3 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32) v3.AuxInt = 0 - v.AddArg(v3) + v.AddArg2(v0, v3) return true } } @@ -5719,12 +5441,10 @@ func rewriteValueMIPS_OpNeqPtr(v *Value) bool { y := v_1 v.reset(OpMIPSSGTU) v0 := b.NewValue0(v.Pos, OpMIPSXOR, typ.UInt32) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) + v0.AddArg2(x, y) v1 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32) v1.AuxInt = 0 - v.AddArg(v1) + v.AddArg2(v0, v1) return true } } @@ -5783,9 +5503,7 @@ func rewriteValueMIPS_OpPanicBounds(v *Value) bool { } v.reset(OpMIPSLoweredPanicBoundsA) v.AuxInt = kind - v.AddArg(x) - v.AddArg(y) - v.AddArg(mem) + v.AddArg3(x, y, mem) return true } // match: (PanicBounds [kind] x y mem) @@ -5801,9 +5519,7 @@ func rewriteValueMIPS_OpPanicBounds(v *Value) bool { } v.reset(OpMIPSLoweredPanicBoundsB) v.AuxInt = kind - v.AddArg(x) - v.AddArg(y) - v.AddArg(mem) + v.AddArg3(x, y, mem) return true } // match: (PanicBounds [kind] x y mem) @@ -5819,9 +5535,7 @@ func rewriteValueMIPS_OpPanicBounds(v *Value) bool { } v.reset(OpMIPSLoweredPanicBoundsC) v.AuxInt = kind - v.AddArg(x) - v.AddArg(y) - v.AddArg(mem) + v.AddArg3(x, y, mem) return true } return false @@ -5845,10 +5559,7 @@ func rewriteValueMIPS_OpPanicExtend(v *Value) bool { } v.reset(OpMIPSLoweredPanicExtendA) v.AuxInt = kind - v.AddArg(hi) - v.AddArg(lo) - v.AddArg(y) - v.AddArg(mem) + v.AddArg4(hi, lo, y, mem) return true } // match: (PanicExtend [kind] hi lo y mem) @@ -5865,10 +5576,7 @@ func rewriteValueMIPS_OpPanicExtend(v *Value) bool { } v.reset(OpMIPSLoweredPanicExtendB) v.AuxInt = kind - v.AddArg(hi) - v.AddArg(lo) - v.AddArg(y) - v.AddArg(mem) + v.AddArg4(hi, lo, y, mem) return true } // match: (PanicExtend [kind] hi lo y mem) @@ -5885,10 +5593,7 @@ func rewriteValueMIPS_OpPanicExtend(v *Value) bool { } v.reset(OpMIPSLoweredPanicExtendC) v.AuxInt = kind - v.AddArg(hi) - v.AddArg(lo) - v.AddArg(y) - v.AddArg(mem) + v.AddArg4(hi, lo, y, mem) return true } return false @@ -5909,17 +5614,14 @@ func rewriteValueMIPS_OpRotateLeft16(v *Value) bool { c := v_1.AuxInt v.reset(OpOr16) v0 := b.NewValue0(v.Pos, OpLsh16x32, t) - v0.AddArg(x) v1 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32) v1.AuxInt = c & 15 - v0.AddArg(v1) - v.AddArg(v0) + v0.AddArg2(x, v1) v2 := b.NewValue0(v.Pos, OpRsh16Ux32, t) - v2.AddArg(x) v3 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32) v3.AuxInt = -c & 15 - v2.AddArg(v3) - v.AddArg(v2) + v2.AddArg2(x, v3) + v.AddArg2(v0, v2) return true } return false @@ -5940,17 +5642,14 @@ func rewriteValueMIPS_OpRotateLeft32(v *Value) bool { c := v_1.AuxInt v.reset(OpOr32) v0 := b.NewValue0(v.Pos, OpLsh32x32, t) - v0.AddArg(x) v1 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32) v1.AuxInt = c & 31 - v0.AddArg(v1) - v.AddArg(v0) + v0.AddArg2(x, v1) v2 := b.NewValue0(v.Pos, OpRsh32Ux32, t) - v2.AddArg(x) v3 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32) v3.AuxInt = -c & 31 - v2.AddArg(v3) - v.AddArg(v2) + v2.AddArg2(x, v3) + v.AddArg2(v0, v2) return true } return false @@ -5971,17 +5670,14 @@ func rewriteValueMIPS_OpRotateLeft64(v *Value) bool { c := v_1.AuxInt v.reset(OpOr64) v0 := b.NewValue0(v.Pos, OpLsh64x32, t) - v0.AddArg(x) v1 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32) v1.AuxInt = c & 63 - v0.AddArg(v1) - v.AddArg(v0) + v0.AddArg2(x, v1) v2 := b.NewValue0(v.Pos, OpRsh64Ux32, t) - v2.AddArg(x) v3 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32) v3.AuxInt = -c & 63 - v2.AddArg(v3) - v.AddArg(v2) + v2.AddArg2(x, v3) + v.AddArg2(v0, v2) return true } return false @@ -6002,17 +5698,14 @@ func rewriteValueMIPS_OpRotateLeft8(v *Value) bool { c := v_1.AuxInt v.reset(OpOr8) v0 := b.NewValue0(v.Pos, OpLsh8x32, t) - v0.AddArg(x) v1 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32) v1.AuxInt = c & 7 - v0.AddArg(v1) - v.AddArg(v0) + v0.AddArg2(x, v1) v2 := b.NewValue0(v.Pos, OpRsh8Ux32, t) - v2.AddArg(x) v3 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32) v3.AuxInt = -c & 7 - v2.AddArg(v3) - v.AddArg(v2) + v2.AddArg2(x, v3) + v.AddArg2(v0, v2) return true } return false @@ -6032,20 +5725,17 @@ func rewriteValueMIPS_OpRsh16Ux16(v *Value) bool { v0 := b.NewValue0(v.Pos, OpMIPSSRL, t) v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) v1.AddArg(x) - v0.AddArg(v1) v2 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) v2.AddArg(y) - v0.AddArg(v2) - v.AddArg(v0) + v0.AddArg2(v1, v2) v3 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32) v3.AuxInt = 0 - v.AddArg(v3) v4 := b.NewValue0(v.Pos, OpMIPSSGTUconst, typ.Bool) v4.AuxInt = 32 v5 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) v5.AddArg(y) v4.AddArg(v5) - v.AddArg(v4) + v.AddArg3(v0, v3, v4) return true } } @@ -6064,16 +5754,13 @@ func rewriteValueMIPS_OpRsh16Ux32(v *Value) bool { v0 := b.NewValue0(v.Pos, OpMIPSSRL, t) v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) v1.AddArg(x) - v0.AddArg(v1) - v0.AddArg(y) - v.AddArg(v0) + v0.AddArg2(v1, y) v2 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32) v2.AuxInt = 0 - v.AddArg(v2) v3 := b.NewValue0(v.Pos, OpMIPSSGTUconst, typ.Bool) v3.AuxInt = 32 v3.AddArg(y) - v.AddArg(v3) + v.AddArg3(v0, v2, v3) return true } } @@ -6134,20 +5821,17 @@ func rewriteValueMIPS_OpRsh16Ux8(v *Value) bool { v0 := b.NewValue0(v.Pos, OpMIPSSRL, t) v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) v1.AddArg(x) - v0.AddArg(v1) v2 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) v2.AddArg(y) - v0.AddArg(v2) - v.AddArg(v0) + v0.AddArg2(v1, v2) v3 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32) v3.AuxInt = 0 - v.AddArg(v3) v4 := b.NewValue0(v.Pos, OpMIPSSGTUconst, typ.Bool) v4.AuxInt = 32 v5 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) v5.AddArg(y) v4.AddArg(v5) - v.AddArg(v4) + v.AddArg3(v0, v3, v4) return true } } @@ -6164,21 +5848,18 @@ func rewriteValueMIPS_OpRsh16x16(v *Value) bool { v.reset(OpMIPSSRA) v0 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpMIPSCMOVZ, typ.UInt32) v2 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) v2.AddArg(y) - v1.AddArg(v2) v3 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32) v3.AuxInt = -1 - v1.AddArg(v3) v4 := b.NewValue0(v.Pos, OpMIPSSGTUconst, typ.Bool) v4.AuxInt = 32 v5 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) v5.AddArg(y) v4.AddArg(v5) - v1.AddArg(v4) - v.AddArg(v1) + v1.AddArg3(v2, v3, v4) + v.AddArg2(v0, v1) return true } } @@ -6195,17 +5876,14 @@ func rewriteValueMIPS_OpRsh16x32(v *Value) bool { v.reset(OpMIPSSRA) v0 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpMIPSCMOVZ, typ.UInt32) - v1.AddArg(y) v2 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32) v2.AuxInt = -1 - v1.AddArg(v2) v3 := b.NewValue0(v.Pos, OpMIPSSGTUconst, typ.Bool) v3.AuxInt = 32 v3.AddArg(y) - v1.AddArg(v3) - v.AddArg(v1) + v1.AddArg3(y, v2, v3) + v.AddArg2(v0, v1) return true } } @@ -6269,21 +5947,18 @@ func rewriteValueMIPS_OpRsh16x8(v *Value) bool { v.reset(OpMIPSSRA) v0 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpMIPSCMOVZ, typ.UInt32) v2 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) v2.AddArg(y) - v1.AddArg(v2) v3 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32) v3.AuxInt = -1 - v1.AddArg(v3) v4 := b.NewValue0(v.Pos, OpMIPSSGTUconst, typ.Bool) v4.AuxInt = 32 v5 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) v5.AddArg(y) v4.AddArg(v5) - v1.AddArg(v4) - v.AddArg(v1) + v1.AddArg3(v2, v3, v4) + v.AddArg2(v0, v1) return true } } @@ -6300,20 +5975,17 @@ func rewriteValueMIPS_OpRsh32Ux16(v *Value) bool { y := v_1 v.reset(OpMIPSCMOVZ) v0 := b.NewValue0(v.Pos, OpMIPSSRL, t) - v0.AddArg(x) v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) v1.AddArg(y) - v0.AddArg(v1) - v.AddArg(v0) + v0.AddArg2(x, v1) v2 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32) v2.AuxInt = 0 - v.AddArg(v2) v3 := b.NewValue0(v.Pos, OpMIPSSGTUconst, typ.Bool) v3.AuxInt = 32 v4 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) v4.AddArg(y) v3.AddArg(v4) - v.AddArg(v3) + v.AddArg3(v0, v2, v3) return true } } @@ -6330,16 +6002,13 @@ func rewriteValueMIPS_OpRsh32Ux32(v *Value) bool { y := v_1 v.reset(OpMIPSCMOVZ) v0 := b.NewValue0(v.Pos, OpMIPSSRL, t) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) + v0.AddArg2(x, y) v1 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32) v1.AuxInt = 0 - v.AddArg(v1) v2 := b.NewValue0(v.Pos, OpMIPSSGTUconst, typ.Bool) v2.AuxInt = 32 v2.AddArg(y) - v.AddArg(v2) + v.AddArg3(v0, v1, v2) return true } } @@ -6393,20 +6062,17 @@ func rewriteValueMIPS_OpRsh32Ux8(v *Value) bool { y := v_1 v.reset(OpMIPSCMOVZ) v0 := b.NewValue0(v.Pos, OpMIPSSRL, t) - v0.AddArg(x) v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) v1.AddArg(y) - v0.AddArg(v1) - v.AddArg(v0) + v0.AddArg2(x, v1) v2 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32) v2.AuxInt = 0 - v.AddArg(v2) v3 := b.NewValue0(v.Pos, OpMIPSSGTUconst, typ.Bool) v3.AuxInt = 32 v4 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) v4.AddArg(y) v3.AddArg(v4) - v.AddArg(v3) + v.AddArg3(v0, v2, v3) return true } } @@ -6421,21 +6087,18 @@ func rewriteValueMIPS_OpRsh32x16(v *Value) bool { x := v_0 y := v_1 v.reset(OpMIPSSRA) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpMIPSCMOVZ, typ.UInt32) v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) v1.AddArg(y) - v0.AddArg(v1) v2 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32) v2.AuxInt = -1 - v0.AddArg(v2) v3 := b.NewValue0(v.Pos, OpMIPSSGTUconst, typ.Bool) v3.AuxInt = 32 v4 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) v4.AddArg(y) v3.AddArg(v4) - v0.AddArg(v3) - v.AddArg(v0) + v0.AddArg3(v1, v2, v3) + v.AddArg2(x, v0) return true } } @@ -6450,17 +6113,14 @@ func rewriteValueMIPS_OpRsh32x32(v *Value) bool { x := v_0 y := v_1 v.reset(OpMIPSSRA) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpMIPSCMOVZ, typ.UInt32) - v0.AddArg(y) v1 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32) v1.AuxInt = -1 - v0.AddArg(v1) v2 := b.NewValue0(v.Pos, OpMIPSSGTUconst, typ.Bool) v2.AuxInt = 32 v2.AddArg(y) - v0.AddArg(v2) - v.AddArg(v0) + v0.AddArg3(y, v1, v2) + v.AddArg2(x, v0) return true } } @@ -6514,21 +6174,18 @@ func rewriteValueMIPS_OpRsh32x8(v *Value) bool { x := v_0 y := v_1 v.reset(OpMIPSSRA) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpMIPSCMOVZ, typ.UInt32) v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) v1.AddArg(y) - v0.AddArg(v1) v2 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32) v2.AuxInt = -1 - v0.AddArg(v2) v3 := b.NewValue0(v.Pos, OpMIPSSGTUconst, typ.Bool) v3.AuxInt = 32 v4 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) v4.AddArg(y) v3.AddArg(v4) - v0.AddArg(v3) - v.AddArg(v0) + v0.AddArg3(v1, v2, v3) + v.AddArg2(x, v0) return true } } @@ -6547,20 +6204,17 @@ func rewriteValueMIPS_OpRsh8Ux16(v *Value) bool { v0 := b.NewValue0(v.Pos, OpMIPSSRL, t) v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) v1.AddArg(x) - v0.AddArg(v1) v2 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) v2.AddArg(y) - v0.AddArg(v2) - v.AddArg(v0) + v0.AddArg2(v1, v2) v3 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32) v3.AuxInt = 0 - v.AddArg(v3) v4 := b.NewValue0(v.Pos, OpMIPSSGTUconst, typ.Bool) v4.AuxInt = 32 v5 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) v5.AddArg(y) v4.AddArg(v5) - v.AddArg(v4) + v.AddArg3(v0, v3, v4) return true } } @@ -6579,16 +6233,13 @@ func rewriteValueMIPS_OpRsh8Ux32(v *Value) bool { v0 := b.NewValue0(v.Pos, OpMIPSSRL, t) v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) v1.AddArg(x) - v0.AddArg(v1) - v0.AddArg(y) - v.AddArg(v0) + v0.AddArg2(v1, y) v2 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32) v2.AuxInt = 0 - v.AddArg(v2) v3 := b.NewValue0(v.Pos, OpMIPSSGTUconst, typ.Bool) v3.AuxInt = 32 v3.AddArg(y) - v.AddArg(v3) + v.AddArg3(v0, v2, v3) return true } } @@ -6649,20 +6300,17 @@ func rewriteValueMIPS_OpRsh8Ux8(v *Value) bool { v0 := b.NewValue0(v.Pos, OpMIPSSRL, t) v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) v1.AddArg(x) - v0.AddArg(v1) v2 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) v2.AddArg(y) - v0.AddArg(v2) - v.AddArg(v0) + v0.AddArg2(v1, v2) v3 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32) v3.AuxInt = 0 - v.AddArg(v3) v4 := b.NewValue0(v.Pos, OpMIPSSGTUconst, typ.Bool) v4.AuxInt = 32 v5 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) v5.AddArg(y) v4.AddArg(v5) - v.AddArg(v4) + v.AddArg3(v0, v3, v4) return true } } @@ -6679,21 +6327,18 @@ func rewriteValueMIPS_OpRsh8x16(v *Value) bool { v.reset(OpMIPSSRA) v0 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpMIPSCMOVZ, typ.UInt32) v2 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) v2.AddArg(y) - v1.AddArg(v2) v3 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32) v3.AuxInt = -1 - v1.AddArg(v3) v4 := b.NewValue0(v.Pos, OpMIPSSGTUconst, typ.Bool) v4.AuxInt = 32 v5 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) v5.AddArg(y) v4.AddArg(v5) - v1.AddArg(v4) - v.AddArg(v1) + v1.AddArg3(v2, v3, v4) + v.AddArg2(v0, v1) return true } } @@ -6710,17 +6355,14 @@ func rewriteValueMIPS_OpRsh8x32(v *Value) bool { v.reset(OpMIPSSRA) v0 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpMIPSCMOVZ, typ.UInt32) - v1.AddArg(y) v2 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32) v2.AuxInt = -1 - v1.AddArg(v2) v3 := b.NewValue0(v.Pos, OpMIPSSGTUconst, typ.Bool) v3.AuxInt = 32 v3.AddArg(y) - v1.AddArg(v3) - v.AddArg(v1) + v1.AddArg3(y, v2, v3) + v.AddArg2(v0, v1) return true } } @@ -6784,21 +6426,18 @@ func rewriteValueMIPS_OpRsh8x8(v *Value) bool { v.reset(OpMIPSSRA) v0 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpMIPSCMOVZ, typ.UInt32) v2 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) v2.AddArg(y) - v1.AddArg(v2) v3 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32) v3.AuxInt = -1 - v1.AddArg(v3) v4 := b.NewValue0(v.Pos, OpMIPSSGTUconst, typ.Bool) v4.AuxInt = 32 v5 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) v5.AddArg(y) v4.AddArg(v5) - v1.AddArg(v4) - v.AddArg(v1) + v1.AddArg3(v2, v3, v4) + v.AddArg2(v0, v1) return true } } @@ -6817,8 +6456,7 @@ func rewriteValueMIPS_OpSelect0(v *Value) bool { x := v_0.Args[0] v.reset(OpMIPSADD) v.Type = t.FieldType(0) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (Select0 (Sub32carry x y)) @@ -6832,8 +6470,7 @@ func rewriteValueMIPS_OpSelect0(v *Value) bool { x := v_0.Args[0] v.reset(OpMIPSSUB) v.Type = t.FieldType(0) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (Select0 (MULTU (MOVWconst [0]) _ )) @@ -6892,11 +6529,9 @@ func rewriteValueMIPS_OpSelect0(v *Value) bool { v0 := b.NewValue0(v.Pos, OpMIPSADDconst, x.Type) v0.AuxInt = -1 v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32) v1.AuxInt = 0 - v.AddArg(v1) - v.AddArg(x) + v.AddArg3(v0, v1, x) return true } break @@ -7010,11 +6645,9 @@ func rewriteValueMIPS_OpSelect1(v *Value) bool { x := v_0.Args[0] v.reset(OpMIPSSGTU) v.Type = typ.Bool - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpMIPSADD, t.FieldType(0)) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) + v0.AddArg2(x, y) + v.AddArg2(x, v0) return true } // match: (Select1 (Sub32carry x y)) @@ -7029,10 +6662,8 @@ func rewriteValueMIPS_OpSelect1(v *Value) bool { v.reset(OpMIPSSGTU) v.Type = typ.Bool v0 := b.NewValue0(v.Pos, OpMIPSSUB, t.FieldType(0)) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) - v.AddArg(x) + v0.AddArg2(x, y) + v.AddArg2(v0, x) return true } // match: (Select1 (MULTU (MOVWconst [0]) _ )) @@ -7234,9 +6865,7 @@ func rewriteValueMIPS_OpStore(v *Value) bool { break } v.reset(OpMIPSMOVBstore) - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } // match: (Store {t} ptr val mem) @@ -7251,9 +6880,7 @@ func rewriteValueMIPS_OpStore(v *Value) bool { break } v.reset(OpMIPSMOVHstore) - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } // match: (Store {t} ptr val mem) @@ -7268,9 +6895,7 @@ func rewriteValueMIPS_OpStore(v *Value) bool { break } v.reset(OpMIPSMOVWstore) - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } // match: (Store {t} ptr val mem) @@ -7285,9 +6910,7 @@ func rewriteValueMIPS_OpStore(v *Value) bool { break } v.reset(OpMIPSMOVFstore) - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } // match: (Store {t} ptr val mem) @@ -7302,9 +6925,7 @@ func rewriteValueMIPS_OpStore(v *Value) bool { break } v.reset(OpMIPSMOVDstore) - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } return false @@ -7323,10 +6944,8 @@ func rewriteValueMIPS_OpSub32withcarry(v *Value) bool { c := v_2 v.reset(OpMIPSSUB) v0 := b.NewValue0(v.Pos, OpMIPSSUB, t) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) - v.AddArg(c) + v0.AddArg2(x, y) + v.AddArg2(v0, c) return true } } @@ -7357,11 +6976,9 @@ func rewriteValueMIPS_OpZero(v *Value) bool { ptr := v_0 mem := v_1 v.reset(OpMIPSMOVBstore) - v.AddArg(ptr) v0 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32) v0.AuxInt = 0 - v.AddArg(v0) - v.AddArg(mem) + v.AddArg3(ptr, v0, mem) return true } // match: (Zero [2] {t} ptr mem) @@ -7378,11 +6995,9 @@ func rewriteValueMIPS_OpZero(v *Value) bool { break } v.reset(OpMIPSMOVHstore) - v.AddArg(ptr) v0 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32) v0.AuxInt = 0 - v.AddArg(v0) - v.AddArg(mem) + v.AddArg3(ptr, v0, mem) return true } // match: (Zero [2] ptr mem) @@ -7395,18 +7010,14 @@ func rewriteValueMIPS_OpZero(v *Value) bool { mem := v_1 v.reset(OpMIPSMOVBstore) v.AuxInt = 1 - v.AddArg(ptr) v0 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32) v0.AuxInt = 0 - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpMIPSMOVBstore, types.TypeMem) v1.AuxInt = 0 - v1.AddArg(ptr) v2 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32) v2.AuxInt = 0 - v1.AddArg(v2) - v1.AddArg(mem) - v.AddArg(v1) + v1.AddArg3(ptr, v2, mem) + v.AddArg3(ptr, v0, v1) return true } // match: (Zero [4] {t} ptr mem) @@ -7423,11 +7034,9 @@ func rewriteValueMIPS_OpZero(v *Value) bool { break } v.reset(OpMIPSMOVWstore) - v.AddArg(ptr) v0 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32) v0.AuxInt = 0 - v.AddArg(v0) - v.AddArg(mem) + v.AddArg3(ptr, v0, mem) return true } // match: (Zero [4] {t} ptr mem) @@ -7445,18 +7054,14 @@ func rewriteValueMIPS_OpZero(v *Value) bool { } v.reset(OpMIPSMOVHstore) v.AuxInt = 2 - v.AddArg(ptr) v0 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32) v0.AuxInt = 0 - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpMIPSMOVHstore, types.TypeMem) v1.AuxInt = 0 - v1.AddArg(ptr) v2 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32) v2.AuxInt = 0 - v1.AddArg(v2) - v1.AddArg(mem) - v.AddArg(v1) + v1.AddArg3(ptr, v2, mem) + v.AddArg3(ptr, v0, v1) return true } // match: (Zero [4] ptr mem) @@ -7469,32 +7074,24 @@ func rewriteValueMIPS_OpZero(v *Value) bool { mem := v_1 v.reset(OpMIPSMOVBstore) v.AuxInt = 3 - v.AddArg(ptr) v0 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32) v0.AuxInt = 0 - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpMIPSMOVBstore, types.TypeMem) v1.AuxInt = 2 - v1.AddArg(ptr) v2 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32) v2.AuxInt = 0 - v1.AddArg(v2) v3 := b.NewValue0(v.Pos, OpMIPSMOVBstore, types.TypeMem) v3.AuxInt = 1 - v3.AddArg(ptr) v4 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32) v4.AuxInt = 0 - v3.AddArg(v4) v5 := b.NewValue0(v.Pos, OpMIPSMOVBstore, types.TypeMem) v5.AuxInt = 0 - v5.AddArg(ptr) v6 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32) v6.AuxInt = 0 - v5.AddArg(v6) - v5.AddArg(mem) - v3.AddArg(v5) - v1.AddArg(v3) - v.AddArg(v1) + v5.AddArg3(ptr, v6, mem) + v3.AddArg3(ptr, v4, v5) + v1.AddArg3(ptr, v2, v3) + v.AddArg3(ptr, v0, v1) return true } // match: (Zero [3] ptr mem) @@ -7507,25 +7104,19 @@ func rewriteValueMIPS_OpZero(v *Value) bool { mem := v_1 v.reset(OpMIPSMOVBstore) v.AuxInt = 2 - v.AddArg(ptr) v0 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32) v0.AuxInt = 0 - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpMIPSMOVBstore, types.TypeMem) v1.AuxInt = 1 - v1.AddArg(ptr) v2 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32) v2.AuxInt = 0 - v1.AddArg(v2) v3 := b.NewValue0(v.Pos, OpMIPSMOVBstore, types.TypeMem) v3.AuxInt = 0 - v3.AddArg(ptr) v4 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32) v4.AuxInt = 0 - v3.AddArg(v4) - v3.AddArg(mem) - v1.AddArg(v3) - v.AddArg(v1) + v3.AddArg3(ptr, v4, mem) + v1.AddArg3(ptr, v2, v3) + v.AddArg3(ptr, v0, v1) return true } // match: (Zero [6] {t} ptr mem) @@ -7543,25 +7134,19 @@ func rewriteValueMIPS_OpZero(v *Value) bool { } v.reset(OpMIPSMOVHstore) v.AuxInt = 4 - v.AddArg(ptr) v0 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32) v0.AuxInt = 0 - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpMIPSMOVHstore, types.TypeMem) v1.AuxInt = 2 - v1.AddArg(ptr) v2 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32) v2.AuxInt = 0 - v1.AddArg(v2) v3 := b.NewValue0(v.Pos, OpMIPSMOVHstore, types.TypeMem) v3.AuxInt = 0 - v3.AddArg(ptr) v4 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32) v4.AuxInt = 0 - v3.AddArg(v4) - v3.AddArg(mem) - v1.AddArg(v3) - v.AddArg(v1) + v3.AddArg3(ptr, v4, mem) + v1.AddArg3(ptr, v2, v3) + v.AddArg3(ptr, v0, v1) return true } // match: (Zero [8] {t} ptr mem) @@ -7579,18 +7164,14 @@ func rewriteValueMIPS_OpZero(v *Value) bool { } v.reset(OpMIPSMOVWstore) v.AuxInt = 4 - v.AddArg(ptr) v0 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32) v0.AuxInt = 0 - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpMIPSMOVWstore, types.TypeMem) v1.AuxInt = 0 - v1.AddArg(ptr) v2 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32) v2.AuxInt = 0 - v1.AddArg(v2) - v1.AddArg(mem) - v.AddArg(v1) + v1.AddArg3(ptr, v2, mem) + v.AddArg3(ptr, v0, v1) return true } // match: (Zero [12] {t} ptr mem) @@ -7608,25 +7189,19 @@ func rewriteValueMIPS_OpZero(v *Value) bool { } v.reset(OpMIPSMOVWstore) v.AuxInt = 8 - v.AddArg(ptr) v0 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32) v0.AuxInt = 0 - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpMIPSMOVWstore, types.TypeMem) v1.AuxInt = 4 - v1.AddArg(ptr) v2 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32) v2.AuxInt = 0 - v1.AddArg(v2) v3 := b.NewValue0(v.Pos, OpMIPSMOVWstore, types.TypeMem) v3.AuxInt = 0 - v3.AddArg(ptr) v4 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32) v4.AuxInt = 0 - v3.AddArg(v4) - v3.AddArg(mem) - v1.AddArg(v3) - v.AddArg(v1) + v3.AddArg3(ptr, v4, mem) + v1.AddArg3(ptr, v2, v3) + v.AddArg3(ptr, v0, v1) return true } // match: (Zero [16] {t} ptr mem) @@ -7644,32 +7219,24 @@ func rewriteValueMIPS_OpZero(v *Value) bool { } v.reset(OpMIPSMOVWstore) v.AuxInt = 12 - v.AddArg(ptr) v0 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32) v0.AuxInt = 0 - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpMIPSMOVWstore, types.TypeMem) v1.AuxInt = 8 - v1.AddArg(ptr) v2 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32) v2.AuxInt = 0 - v1.AddArg(v2) v3 := b.NewValue0(v.Pos, OpMIPSMOVWstore, types.TypeMem) v3.AuxInt = 4 - v3.AddArg(ptr) v4 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32) v4.AuxInt = 0 - v3.AddArg(v4) v5 := b.NewValue0(v.Pos, OpMIPSMOVWstore, types.TypeMem) v5.AuxInt = 0 - v5.AddArg(ptr) v6 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32) v6.AuxInt = 0 - v5.AddArg(v6) - v5.AddArg(mem) - v3.AddArg(v5) - v1.AddArg(v3) - v.AddArg(v1) + v5.AddArg3(ptr, v6, mem) + v3.AddArg3(ptr, v4, v5) + v1.AddArg3(ptr, v2, v3) + v.AddArg3(ptr, v0, v1) return true } // match: (Zero [s] {t} ptr mem) @@ -7685,12 +7252,10 @@ func rewriteValueMIPS_OpZero(v *Value) bool { } v.reset(OpMIPSLoweredZero) v.AuxInt = t.(*types.Type).Alignment() - v.AddArg(ptr) v0 := b.NewValue0(v.Pos, OpMIPSADDconst, ptr.Type) v0.AuxInt = s - moveSize(t.(*types.Type).Alignment(), config) v0.AddArg(ptr) - v.AddArg(v0) - v.AddArg(mem) + v.AddArg3(ptr, v0, mem) return true } return false @@ -7705,10 +7270,9 @@ func rewriteValueMIPS_OpZeromask(v *Value) bool { x := v_0 v.reset(OpMIPSNEG) v0 := b.NewValue0(v.Pos, OpMIPSSGTU, typ.Bool) - v0.AddArg(x) v1 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32) v1.AuxInt = 0 - v0.AddArg(v1) + v0.AddArg2(x, v1) v.AddArg(v0) return true } diff --git a/src/cmd/compile/internal/ssa/rewriteMIPS64.go b/src/cmd/compile/internal/ssa/rewriteMIPS64.go index 315270b16a..6736fcd560 100644 --- a/src/cmd/compile/internal/ssa/rewriteMIPS64.go +++ b/src/cmd/compile/internal/ssa/rewriteMIPS64.go @@ -706,11 +706,9 @@ func rewriteValueMIPS64_OpAvg64u(v *Value) bool { v0 := b.NewValue0(v.Pos, OpMIPS64SRLVconst, t) v0.AuxInt = 1 v1 := b.NewValue0(v.Pos, OpMIPS64SUBV, t) - v1.AddArg(x) - v1.AddArg(y) + v1.AddArg2(x, y) v0.AddArg(v1) - v.AddArg(v0) - v.AddArg(y) + v.AddArg2(v0, y) return true } } @@ -725,8 +723,7 @@ func rewriteValueMIPS64_OpCom16(v *Value) bool { v.reset(OpMIPS64NOR) v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) v0.AuxInt = 0 - v.AddArg(v0) - v.AddArg(x) + v.AddArg2(v0, x) return true } } @@ -741,8 +738,7 @@ func rewriteValueMIPS64_OpCom32(v *Value) bool { v.reset(OpMIPS64NOR) v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) v0.AuxInt = 0 - v.AddArg(v0) - v.AddArg(x) + v.AddArg2(v0, x) return true } } @@ -757,8 +753,7 @@ func rewriteValueMIPS64_OpCom64(v *Value) bool { v.reset(OpMIPS64NOR) v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) v0.AuxInt = 0 - v.AddArg(v0) - v.AddArg(x) + v.AddArg2(v0, x) return true } } @@ -773,8 +768,7 @@ func rewriteValueMIPS64_OpCom8(v *Value) bool { v.reset(OpMIPS64NOR) v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) v0.AuxInt = 0 - v.AddArg(v0) - v.AddArg(x) + v.AddArg2(v0, x) return true } } @@ -801,10 +795,9 @@ func rewriteValueMIPS64_OpDiv16(v *Value) bool { v0 := b.NewValue0(v.Pos, OpMIPS64DIVV, types.NewTuple(typ.Int64, typ.Int64)) v1 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64) v1.AddArg(x) - v0.AddArg(v1) v2 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64) v2.AddArg(y) - v0.AddArg(v2) + v0.AddArg2(v1, v2) v.AddArg(v0) return true } @@ -823,10 +816,9 @@ func rewriteValueMIPS64_OpDiv16u(v *Value) bool { v0 := b.NewValue0(v.Pos, OpMIPS64DIVVU, types.NewTuple(typ.UInt64, typ.UInt64)) v1 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) v1.AddArg(x) - v0.AddArg(v1) v2 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) v2.AddArg(y) - v0.AddArg(v2) + v0.AddArg2(v1, v2) v.AddArg(v0) return true } @@ -845,10 +837,9 @@ func rewriteValueMIPS64_OpDiv32(v *Value) bool { v0 := b.NewValue0(v.Pos, OpMIPS64DIVV, types.NewTuple(typ.Int64, typ.Int64)) v1 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64) v1.AddArg(x) - v0.AddArg(v1) v2 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64) v2.AddArg(y) - v0.AddArg(v2) + v0.AddArg2(v1, v2) v.AddArg(v0) return true } @@ -867,10 +858,9 @@ func rewriteValueMIPS64_OpDiv32u(v *Value) bool { v0 := b.NewValue0(v.Pos, OpMIPS64DIVVU, types.NewTuple(typ.UInt64, typ.UInt64)) v1 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) v1.AddArg(x) - v0.AddArg(v1) v2 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) v2.AddArg(y) - v0.AddArg(v2) + v0.AddArg2(v1, v2) v.AddArg(v0) return true } @@ -887,8 +877,7 @@ func rewriteValueMIPS64_OpDiv64(v *Value) bool { y := v_1 v.reset(OpSelect1) v0 := b.NewValue0(v.Pos, OpMIPS64DIVV, types.NewTuple(typ.Int64, typ.Int64)) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -905,8 +894,7 @@ func rewriteValueMIPS64_OpDiv64u(v *Value) bool { y := v_1 v.reset(OpSelect1) v0 := b.NewValue0(v.Pos, OpMIPS64DIVVU, types.NewTuple(typ.UInt64, typ.UInt64)) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -925,10 +913,9 @@ func rewriteValueMIPS64_OpDiv8(v *Value) bool { v0 := b.NewValue0(v.Pos, OpMIPS64DIVV, types.NewTuple(typ.Int64, typ.Int64)) v1 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64) v1.AddArg(x) - v0.AddArg(v1) v2 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64) v2.AddArg(y) - v0.AddArg(v2) + v0.AddArg2(v1, v2) v.AddArg(v0) return true } @@ -947,10 +934,9 @@ func rewriteValueMIPS64_OpDiv8u(v *Value) bool { v0 := b.NewValue0(v.Pos, OpMIPS64DIVVU, types.NewTuple(typ.UInt64, typ.UInt64)) v1 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) v1.AddArg(x) - v0.AddArg(v1) v2 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) v2.AddArg(y) - v0.AddArg(v2) + v0.AddArg2(v1, v2) v.AddArg(v0) return true } @@ -968,15 +954,13 @@ func rewriteValueMIPS64_OpEq16(v *Value) bool { v.reset(OpMIPS64SGTU) v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) v0.AuxInt = 1 - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpMIPS64XOR, typ.UInt64) v2 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) v2.AddArg(x) - v1.AddArg(v2) v3 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) v3.AddArg(y) - v1.AddArg(v3) - v.AddArg(v1) + v1.AddArg2(v2, v3) + v.AddArg2(v0, v1) return true } } @@ -993,15 +977,13 @@ func rewriteValueMIPS64_OpEq32(v *Value) bool { v.reset(OpMIPS64SGTU) v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) v0.AuxInt = 1 - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpMIPS64XOR, typ.UInt64) v2 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) v2.AddArg(x) - v1.AddArg(v2) v3 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) v3.AddArg(y) - v1.AddArg(v3) - v.AddArg(v1) + v1.AddArg2(v2, v3) + v.AddArg2(v0, v1) return true } } @@ -1016,8 +998,7 @@ func rewriteValueMIPS64_OpEq32F(v *Value) bool { y := v_1 v.reset(OpMIPS64FPFlagTrue) v0 := b.NewValue0(v.Pos, OpMIPS64CMPEQF, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -1035,11 +1016,9 @@ func rewriteValueMIPS64_OpEq64(v *Value) bool { v.reset(OpMIPS64SGTU) v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) v0.AuxInt = 1 - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpMIPS64XOR, typ.UInt64) - v1.AddArg(x) - v1.AddArg(y) - v.AddArg(v1) + v1.AddArg2(x, y) + v.AddArg2(v0, v1) return true } } @@ -1054,8 +1033,7 @@ func rewriteValueMIPS64_OpEq64F(v *Value) bool { y := v_1 v.reset(OpMIPS64FPFlagTrue) v0 := b.NewValue0(v.Pos, OpMIPS64CMPEQD, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -1073,15 +1051,13 @@ func rewriteValueMIPS64_OpEq8(v *Value) bool { v.reset(OpMIPS64SGTU) v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) v0.AuxInt = 1 - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpMIPS64XOR, typ.UInt64) v2 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) v2.AddArg(x) - v1.AddArg(v2) v3 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) v3.AddArg(y) - v1.AddArg(v3) - v.AddArg(v1) + v1.AddArg2(v2, v3) + v.AddArg2(v0, v1) return true } } @@ -1098,11 +1074,9 @@ func rewriteValueMIPS64_OpEqB(v *Value) bool { v.reset(OpMIPS64XOR) v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) v0.AuxInt = 1 - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpMIPS64XOR, typ.Bool) - v1.AddArg(x) - v1.AddArg(y) - v.AddArg(v1) + v1.AddArg2(x, y) + v.AddArg2(v0, v1) return true } } @@ -1119,11 +1093,9 @@ func rewriteValueMIPS64_OpEqPtr(v *Value) bool { v.reset(OpMIPS64SGTU) v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) v0.AuxInt = 1 - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpMIPS64XOR, typ.UInt64) - v1.AddArg(x) - v1.AddArg(y) - v.AddArg(v1) + v1.AddArg2(x, y) + v.AddArg2(v0, v1) return true } } @@ -1138,8 +1110,7 @@ func rewriteValueMIPS64_OpGeq32F(v *Value) bool { y := v_1 v.reset(OpMIPS64FPFlagTrue) v0 := b.NewValue0(v.Pos, OpMIPS64CMPGEF, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -1155,8 +1126,7 @@ func rewriteValueMIPS64_OpGeq64F(v *Value) bool { y := v_1 v.reset(OpMIPS64FPFlagTrue) v0 := b.NewValue0(v.Pos, OpMIPS64CMPGED, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -1172,8 +1142,7 @@ func rewriteValueMIPS64_OpGreater32F(v *Value) bool { y := v_1 v.reset(OpMIPS64FPFlagTrue) v0 := b.NewValue0(v.Pos, OpMIPS64CMPGTF, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -1189,8 +1158,7 @@ func rewriteValueMIPS64_OpGreater64F(v *Value) bool { y := v_1 v.reset(OpMIPS64FPFlagTrue) v0 := b.NewValue0(v.Pos, OpMIPS64CMPGTD, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -1211,10 +1179,9 @@ func rewriteValueMIPS64_OpHmul32(v *Value) bool { v1 := b.NewValue0(v.Pos, OpMIPS64MULV, types.NewTuple(typ.Int64, typ.Int64)) v2 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64) v2.AddArg(x) - v1.AddArg(v2) v3 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64) v3.AddArg(y) - v1.AddArg(v3) + v1.AddArg2(v2, v3) v0.AddArg(v1) v.AddArg(v0) return true @@ -1236,10 +1203,9 @@ func rewriteValueMIPS64_OpHmul32u(v *Value) bool { v1 := b.NewValue0(v.Pos, OpMIPS64MULVU, types.NewTuple(typ.UInt64, typ.UInt64)) v2 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) v2.AddArg(x) - v1.AddArg(v2) v3 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) v3.AddArg(y) - v1.AddArg(v3) + v1.AddArg2(v2, v3) v0.AddArg(v1) v.AddArg(v0) return true @@ -1257,8 +1223,7 @@ func rewriteValueMIPS64_OpHmul64(v *Value) bool { y := v_1 v.reset(OpSelect0) v0 := b.NewValue0(v.Pos, OpMIPS64MULV, types.NewTuple(typ.Int64, typ.Int64)) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -1275,8 +1240,7 @@ func rewriteValueMIPS64_OpHmul64u(v *Value) bool { y := v_1 v.reset(OpSelect0) v0 := b.NewValue0(v.Pos, OpMIPS64MULVU, types.NewTuple(typ.UInt64, typ.UInt64)) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -1290,8 +1254,7 @@ func rewriteValueMIPS64_OpIsInBounds(v *Value) bool { idx := v_0 len := v_1 v.reset(OpMIPS64SGTU) - v.AddArg(len) - v.AddArg(idx) + v.AddArg2(len, idx) return true } } @@ -1304,10 +1267,9 @@ func rewriteValueMIPS64_OpIsNonNil(v *Value) bool { for { ptr := v_0 v.reset(OpMIPS64SGTU) - v.AddArg(ptr) v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) v0.AuxInt = 0 - v.AddArg(v0) + v.AddArg2(ptr, v0) return true } } @@ -1324,11 +1286,9 @@ func rewriteValueMIPS64_OpIsSliceInBounds(v *Value) bool { v.reset(OpMIPS64XOR) v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) v0.AuxInt = 1 - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool) - v1.AddArg(idx) - v1.AddArg(len) - v.AddArg(v1) + v1.AddArg2(idx, len) + v.AddArg2(v0, v1) return true } } @@ -1345,15 +1305,13 @@ func rewriteValueMIPS64_OpLeq16(v *Value) bool { v.reset(OpMIPS64XOR) v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) v0.AuxInt = 1 - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpMIPS64SGT, typ.Bool) v2 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64) v2.AddArg(x) - v1.AddArg(v2) v3 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64) v3.AddArg(y) - v1.AddArg(v3) - v.AddArg(v1) + v1.AddArg2(v2, v3) + v.AddArg2(v0, v1) return true } } @@ -1370,15 +1328,13 @@ func rewriteValueMIPS64_OpLeq16U(v *Value) bool { v.reset(OpMIPS64XOR) v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) v0.AuxInt = 1 - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool) v2 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) v2.AddArg(x) - v1.AddArg(v2) v3 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) v3.AddArg(y) - v1.AddArg(v3) - v.AddArg(v1) + v1.AddArg2(v2, v3) + v.AddArg2(v0, v1) return true } } @@ -1395,15 +1351,13 @@ func rewriteValueMIPS64_OpLeq32(v *Value) bool { v.reset(OpMIPS64XOR) v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) v0.AuxInt = 1 - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpMIPS64SGT, typ.Bool) v2 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64) v2.AddArg(x) - v1.AddArg(v2) v3 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64) v3.AddArg(y) - v1.AddArg(v3) - v.AddArg(v1) + v1.AddArg2(v2, v3) + v.AddArg2(v0, v1) return true } } @@ -1418,8 +1372,7 @@ func rewriteValueMIPS64_OpLeq32F(v *Value) bool { y := v_1 v.reset(OpMIPS64FPFlagTrue) v0 := b.NewValue0(v.Pos, OpMIPS64CMPGEF, types.TypeFlags) - v0.AddArg(y) - v0.AddArg(x) + v0.AddArg2(y, x) v.AddArg(v0) return true } @@ -1437,15 +1390,13 @@ func rewriteValueMIPS64_OpLeq32U(v *Value) bool { v.reset(OpMIPS64XOR) v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) v0.AuxInt = 1 - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool) v2 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) v2.AddArg(x) - v1.AddArg(v2) v3 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) v3.AddArg(y) - v1.AddArg(v3) - v.AddArg(v1) + v1.AddArg2(v2, v3) + v.AddArg2(v0, v1) return true } } @@ -1462,11 +1413,9 @@ func rewriteValueMIPS64_OpLeq64(v *Value) bool { v.reset(OpMIPS64XOR) v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) v0.AuxInt = 1 - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpMIPS64SGT, typ.Bool) - v1.AddArg(x) - v1.AddArg(y) - v.AddArg(v1) + v1.AddArg2(x, y) + v.AddArg2(v0, v1) return true } } @@ -1481,8 +1430,7 @@ func rewriteValueMIPS64_OpLeq64F(v *Value) bool { y := v_1 v.reset(OpMIPS64FPFlagTrue) v0 := b.NewValue0(v.Pos, OpMIPS64CMPGED, types.TypeFlags) - v0.AddArg(y) - v0.AddArg(x) + v0.AddArg2(y, x) v.AddArg(v0) return true } @@ -1500,11 +1448,9 @@ func rewriteValueMIPS64_OpLeq64U(v *Value) bool { v.reset(OpMIPS64XOR) v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) v0.AuxInt = 1 - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool) - v1.AddArg(x) - v1.AddArg(y) - v.AddArg(v1) + v1.AddArg2(x, y) + v.AddArg2(v0, v1) return true } } @@ -1521,15 +1467,13 @@ func rewriteValueMIPS64_OpLeq8(v *Value) bool { v.reset(OpMIPS64XOR) v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) v0.AuxInt = 1 - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpMIPS64SGT, typ.Bool) v2 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64) v2.AddArg(x) - v1.AddArg(v2) v3 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64) v3.AddArg(y) - v1.AddArg(v3) - v.AddArg(v1) + v1.AddArg2(v2, v3) + v.AddArg2(v0, v1) return true } } @@ -1546,15 +1490,13 @@ func rewriteValueMIPS64_OpLeq8U(v *Value) bool { v.reset(OpMIPS64XOR) v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) v0.AuxInt = 1 - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool) v2 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) v2.AddArg(x) - v1.AddArg(v2) v3 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) v3.AddArg(y) - v1.AddArg(v3) - v.AddArg(v1) + v1.AddArg2(v2, v3) + v.AddArg2(v0, v1) return true } } @@ -1571,10 +1513,9 @@ func rewriteValueMIPS64_OpLess16(v *Value) bool { v.reset(OpMIPS64SGT) v0 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64) v0.AddArg(y) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64) v1.AddArg(x) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } } @@ -1591,10 +1532,9 @@ func rewriteValueMIPS64_OpLess16U(v *Value) bool { v.reset(OpMIPS64SGTU) v0 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) v0.AddArg(y) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) v1.AddArg(x) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } } @@ -1611,10 +1551,9 @@ func rewriteValueMIPS64_OpLess32(v *Value) bool { v.reset(OpMIPS64SGT) v0 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64) v0.AddArg(y) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64) v1.AddArg(x) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } } @@ -1629,8 +1568,7 @@ func rewriteValueMIPS64_OpLess32F(v *Value) bool { y := v_1 v.reset(OpMIPS64FPFlagTrue) v0 := b.NewValue0(v.Pos, OpMIPS64CMPGTF, types.TypeFlags) - v0.AddArg(y) - v0.AddArg(x) + v0.AddArg2(y, x) v.AddArg(v0) return true } @@ -1648,10 +1586,9 @@ func rewriteValueMIPS64_OpLess32U(v *Value) bool { v.reset(OpMIPS64SGTU) v0 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) v0.AddArg(y) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) v1.AddArg(x) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } } @@ -1664,8 +1601,7 @@ func rewriteValueMIPS64_OpLess64(v *Value) bool { x := v_0 y := v_1 v.reset(OpMIPS64SGT) - v.AddArg(y) - v.AddArg(x) + v.AddArg2(y, x) return true } } @@ -1680,8 +1616,7 @@ func rewriteValueMIPS64_OpLess64F(v *Value) bool { y := v_1 v.reset(OpMIPS64FPFlagTrue) v0 := b.NewValue0(v.Pos, OpMIPS64CMPGTD, types.TypeFlags) - v0.AddArg(y) - v0.AddArg(x) + v0.AddArg2(y, x) v.AddArg(v0) return true } @@ -1695,8 +1630,7 @@ func rewriteValueMIPS64_OpLess64U(v *Value) bool { x := v_0 y := v_1 v.reset(OpMIPS64SGTU) - v.AddArg(y) - v.AddArg(x) + v.AddArg2(y, x) return true } } @@ -1713,10 +1647,9 @@ func rewriteValueMIPS64_OpLess8(v *Value) bool { v.reset(OpMIPS64SGT) v0 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64) v0.AddArg(y) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64) v1.AddArg(x) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } } @@ -1733,10 +1666,9 @@ func rewriteValueMIPS64_OpLess8U(v *Value) bool { v.reset(OpMIPS64SGTU) v0 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) v0.AddArg(y) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) v1.AddArg(x) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } } @@ -1754,8 +1686,7 @@ func rewriteValueMIPS64_OpLoad(v *Value) bool { break } v.reset(OpMIPS64MOVBUload) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (Load ptr mem) @@ -1769,8 +1700,7 @@ func rewriteValueMIPS64_OpLoad(v *Value) bool { break } v.reset(OpMIPS64MOVBload) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (Load ptr mem) @@ -1784,8 +1714,7 @@ func rewriteValueMIPS64_OpLoad(v *Value) bool { break } v.reset(OpMIPS64MOVBUload) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (Load ptr mem) @@ -1799,8 +1728,7 @@ func rewriteValueMIPS64_OpLoad(v *Value) bool { break } v.reset(OpMIPS64MOVHload) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (Load ptr mem) @@ -1814,8 +1742,7 @@ func rewriteValueMIPS64_OpLoad(v *Value) bool { break } v.reset(OpMIPS64MOVHUload) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (Load ptr mem) @@ -1829,8 +1756,7 @@ func rewriteValueMIPS64_OpLoad(v *Value) bool { break } v.reset(OpMIPS64MOVWload) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (Load ptr mem) @@ -1844,8 +1770,7 @@ func rewriteValueMIPS64_OpLoad(v *Value) bool { break } v.reset(OpMIPS64MOVWUload) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (Load ptr mem) @@ -1859,8 +1784,7 @@ func rewriteValueMIPS64_OpLoad(v *Value) bool { break } v.reset(OpMIPS64MOVVload) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (Load ptr mem) @@ -1874,8 +1798,7 @@ func rewriteValueMIPS64_OpLoad(v *Value) bool { break } v.reset(OpMIPS64MOVFload) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (Load ptr mem) @@ -1889,8 +1812,7 @@ func rewriteValueMIPS64_OpLoad(v *Value) bool { break } v.reset(OpMIPS64MOVDload) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } return false @@ -1924,18 +1846,15 @@ func rewriteValueMIPS64_OpLsh16x16(v *Value) bool { v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool) v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) v2.AuxInt = 64 - v1.AddArg(v2) v3 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) v3.AddArg(y) - v1.AddArg(v3) + v1.AddArg2(v2, v3) v0.AddArg(v1) - v.AddArg(v0) v4 := b.NewValue0(v.Pos, OpMIPS64SLLV, t) - v4.AddArg(x) v5 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) v5.AddArg(y) - v4.AddArg(v5) - v.AddArg(v4) + v4.AddArg2(x, v5) + v.AddArg2(v0, v4) return true } } @@ -1955,18 +1874,15 @@ func rewriteValueMIPS64_OpLsh16x32(v *Value) bool { v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool) v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) v2.AuxInt = 64 - v1.AddArg(v2) v3 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) v3.AddArg(y) - v1.AddArg(v3) + v1.AddArg2(v2, v3) v0.AddArg(v1) - v.AddArg(v0) v4 := b.NewValue0(v.Pos, OpMIPS64SLLV, t) - v4.AddArg(x) v5 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) v5.AddArg(y) - v4.AddArg(v5) - v.AddArg(v4) + v4.AddArg2(x, v5) + v.AddArg2(v0, v4) return true } } @@ -1986,14 +1902,11 @@ func rewriteValueMIPS64_OpLsh16x64(v *Value) bool { v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool) v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) v2.AuxInt = 64 - v1.AddArg(v2) - v1.AddArg(y) + v1.AddArg2(v2, y) v0.AddArg(v1) - v.AddArg(v0) v3 := b.NewValue0(v.Pos, OpMIPS64SLLV, t) - v3.AddArg(x) - v3.AddArg(y) - v.AddArg(v3) + v3.AddArg2(x, y) + v.AddArg2(v0, v3) return true } } @@ -2013,18 +1926,15 @@ func rewriteValueMIPS64_OpLsh16x8(v *Value) bool { v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool) v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) v2.AuxInt = 64 - v1.AddArg(v2) v3 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) v3.AddArg(y) - v1.AddArg(v3) + v1.AddArg2(v2, v3) v0.AddArg(v1) - v.AddArg(v0) v4 := b.NewValue0(v.Pos, OpMIPS64SLLV, t) - v4.AddArg(x) v5 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) v5.AddArg(y) - v4.AddArg(v5) - v.AddArg(v4) + v4.AddArg2(x, v5) + v.AddArg2(v0, v4) return true } } @@ -2044,18 +1954,15 @@ func rewriteValueMIPS64_OpLsh32x16(v *Value) bool { v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool) v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) v2.AuxInt = 64 - v1.AddArg(v2) v3 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) v3.AddArg(y) - v1.AddArg(v3) + v1.AddArg2(v2, v3) v0.AddArg(v1) - v.AddArg(v0) v4 := b.NewValue0(v.Pos, OpMIPS64SLLV, t) - v4.AddArg(x) v5 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) v5.AddArg(y) - v4.AddArg(v5) - v.AddArg(v4) + v4.AddArg2(x, v5) + v.AddArg2(v0, v4) return true } } @@ -2075,18 +1982,15 @@ func rewriteValueMIPS64_OpLsh32x32(v *Value) bool { v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool) v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) v2.AuxInt = 64 - v1.AddArg(v2) v3 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) v3.AddArg(y) - v1.AddArg(v3) + v1.AddArg2(v2, v3) v0.AddArg(v1) - v.AddArg(v0) v4 := b.NewValue0(v.Pos, OpMIPS64SLLV, t) - v4.AddArg(x) v5 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) v5.AddArg(y) - v4.AddArg(v5) - v.AddArg(v4) + v4.AddArg2(x, v5) + v.AddArg2(v0, v4) return true } } @@ -2106,14 +2010,11 @@ func rewriteValueMIPS64_OpLsh32x64(v *Value) bool { v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool) v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) v2.AuxInt = 64 - v1.AddArg(v2) - v1.AddArg(y) + v1.AddArg2(v2, y) v0.AddArg(v1) - v.AddArg(v0) v3 := b.NewValue0(v.Pos, OpMIPS64SLLV, t) - v3.AddArg(x) - v3.AddArg(y) - v.AddArg(v3) + v3.AddArg2(x, y) + v.AddArg2(v0, v3) return true } } @@ -2133,18 +2034,15 @@ func rewriteValueMIPS64_OpLsh32x8(v *Value) bool { v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool) v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) v2.AuxInt = 64 - v1.AddArg(v2) v3 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) v3.AddArg(y) - v1.AddArg(v3) + v1.AddArg2(v2, v3) v0.AddArg(v1) - v.AddArg(v0) v4 := b.NewValue0(v.Pos, OpMIPS64SLLV, t) - v4.AddArg(x) v5 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) v5.AddArg(y) - v4.AddArg(v5) - v.AddArg(v4) + v4.AddArg2(x, v5) + v.AddArg2(v0, v4) return true } } @@ -2164,18 +2062,15 @@ func rewriteValueMIPS64_OpLsh64x16(v *Value) bool { v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool) v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) v2.AuxInt = 64 - v1.AddArg(v2) v3 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) v3.AddArg(y) - v1.AddArg(v3) + v1.AddArg2(v2, v3) v0.AddArg(v1) - v.AddArg(v0) v4 := b.NewValue0(v.Pos, OpMIPS64SLLV, t) - v4.AddArg(x) v5 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) v5.AddArg(y) - v4.AddArg(v5) - v.AddArg(v4) + v4.AddArg2(x, v5) + v.AddArg2(v0, v4) return true } } @@ -2195,18 +2090,15 @@ func rewriteValueMIPS64_OpLsh64x32(v *Value) bool { v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool) v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) v2.AuxInt = 64 - v1.AddArg(v2) v3 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) v3.AddArg(y) - v1.AddArg(v3) + v1.AddArg2(v2, v3) v0.AddArg(v1) - v.AddArg(v0) v4 := b.NewValue0(v.Pos, OpMIPS64SLLV, t) - v4.AddArg(x) v5 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) v5.AddArg(y) - v4.AddArg(v5) - v.AddArg(v4) + v4.AddArg2(x, v5) + v.AddArg2(v0, v4) return true } } @@ -2226,14 +2118,11 @@ func rewriteValueMIPS64_OpLsh64x64(v *Value) bool { v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool) v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) v2.AuxInt = 64 - v1.AddArg(v2) - v1.AddArg(y) + v1.AddArg2(v2, y) v0.AddArg(v1) - v.AddArg(v0) v3 := b.NewValue0(v.Pos, OpMIPS64SLLV, t) - v3.AddArg(x) - v3.AddArg(y) - v.AddArg(v3) + v3.AddArg2(x, y) + v.AddArg2(v0, v3) return true } } @@ -2253,18 +2142,15 @@ func rewriteValueMIPS64_OpLsh64x8(v *Value) bool { v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool) v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) v2.AuxInt = 64 - v1.AddArg(v2) v3 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) v3.AddArg(y) - v1.AddArg(v3) + v1.AddArg2(v2, v3) v0.AddArg(v1) - v.AddArg(v0) v4 := b.NewValue0(v.Pos, OpMIPS64SLLV, t) - v4.AddArg(x) v5 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) v5.AddArg(y) - v4.AddArg(v5) - v.AddArg(v4) + v4.AddArg2(x, v5) + v.AddArg2(v0, v4) return true } } @@ -2284,18 +2170,15 @@ func rewriteValueMIPS64_OpLsh8x16(v *Value) bool { v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool) v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) v2.AuxInt = 64 - v1.AddArg(v2) v3 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) v3.AddArg(y) - v1.AddArg(v3) + v1.AddArg2(v2, v3) v0.AddArg(v1) - v.AddArg(v0) v4 := b.NewValue0(v.Pos, OpMIPS64SLLV, t) - v4.AddArg(x) v5 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) v5.AddArg(y) - v4.AddArg(v5) - v.AddArg(v4) + v4.AddArg2(x, v5) + v.AddArg2(v0, v4) return true } } @@ -2315,18 +2198,15 @@ func rewriteValueMIPS64_OpLsh8x32(v *Value) bool { v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool) v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) v2.AuxInt = 64 - v1.AddArg(v2) v3 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) v3.AddArg(y) - v1.AddArg(v3) + v1.AddArg2(v2, v3) v0.AddArg(v1) - v.AddArg(v0) v4 := b.NewValue0(v.Pos, OpMIPS64SLLV, t) - v4.AddArg(x) v5 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) v5.AddArg(y) - v4.AddArg(v5) - v.AddArg(v4) + v4.AddArg2(x, v5) + v.AddArg2(v0, v4) return true } } @@ -2346,14 +2226,11 @@ func rewriteValueMIPS64_OpLsh8x64(v *Value) bool { v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool) v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) v2.AuxInt = 64 - v1.AddArg(v2) - v1.AddArg(y) + v1.AddArg2(v2, y) v0.AddArg(v1) - v.AddArg(v0) v3 := b.NewValue0(v.Pos, OpMIPS64SLLV, t) - v3.AddArg(x) - v3.AddArg(y) - v.AddArg(v3) + v3.AddArg2(x, y) + v.AddArg2(v0, v3) return true } } @@ -2373,18 +2250,15 @@ func rewriteValueMIPS64_OpLsh8x8(v *Value) bool { v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool) v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) v2.AuxInt = 64 - v1.AddArg(v2) v3 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) v3.AddArg(y) - v1.AddArg(v3) + v1.AddArg2(v2, v3) v0.AddArg(v1) - v.AddArg(v0) v4 := b.NewValue0(v.Pos, OpMIPS64SLLV, t) - v4.AddArg(x) v5 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) v5.AddArg(y) - v4.AddArg(v5) - v.AddArg(v4) + v4.AddArg2(x, v5) + v.AddArg2(v0, v4) return true } } @@ -2421,8 +2295,7 @@ func rewriteValueMIPS64_OpMIPS64ADDV(v *Value) bool { } y := v_1.Args[0] v.reset(OpMIPS64SUBV) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } break @@ -2617,8 +2490,7 @@ func rewriteValueMIPS64_OpMIPS64LoweredAtomicAdd32(v *Value) bool { } v.reset(OpMIPS64LoweredAtomicAddconst32) v.AuxInt = c - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } return false @@ -2642,8 +2514,7 @@ func rewriteValueMIPS64_OpMIPS64LoweredAtomicAdd64(v *Value) bool { } v.reset(OpMIPS64LoweredAtomicAddconst64) v.AuxInt = c - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } return false @@ -2661,8 +2532,7 @@ func rewriteValueMIPS64_OpMIPS64LoweredAtomicStore32(v *Value) bool { } mem := v_2 v.reset(OpMIPS64LoweredAtomicStorezero32) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } return false @@ -2680,8 +2550,7 @@ func rewriteValueMIPS64_OpMIPS64LoweredAtomicStore64(v *Value) bool { } mem := v_2 v.reset(OpMIPS64LoweredAtomicStorezero64) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } return false @@ -2707,8 +2576,7 @@ func rewriteValueMIPS64_OpMIPS64MOVBUload(v *Value) bool { v.reset(OpMIPS64MOVBUload) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVBUload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) @@ -2730,8 +2598,7 @@ func rewriteValueMIPS64_OpMIPS64MOVBUload(v *Value) bool { v.reset(OpMIPS64MOVBUload) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } return false @@ -2795,8 +2662,7 @@ func rewriteValueMIPS64_OpMIPS64MOVBload(v *Value) bool { v.reset(OpMIPS64MOVBload) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVBload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) @@ -2818,8 +2684,7 @@ func rewriteValueMIPS64_OpMIPS64MOVBload(v *Value) bool { v.reset(OpMIPS64MOVBload) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } return false @@ -2885,9 +2750,7 @@ func rewriteValueMIPS64_OpMIPS64MOVBstore(v *Value) bool { v.reset(OpMIPS64MOVBstore) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } // match: (MOVBstore [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) val mem) @@ -2910,9 +2773,7 @@ func rewriteValueMIPS64_OpMIPS64MOVBstore(v *Value) bool { v.reset(OpMIPS64MOVBstore) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } // match: (MOVBstore [off] {sym} ptr (MOVVconst [0]) mem) @@ -2928,8 +2789,7 @@ func rewriteValueMIPS64_OpMIPS64MOVBstore(v *Value) bool { v.reset(OpMIPS64MOVBstorezero) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVBstore [off] {sym} ptr (MOVBreg x) mem) @@ -2946,9 +2806,7 @@ func rewriteValueMIPS64_OpMIPS64MOVBstore(v *Value) bool { v.reset(OpMIPS64MOVBstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(x) - v.AddArg(mem) + v.AddArg3(ptr, x, mem) return true } // match: (MOVBstore [off] {sym} ptr (MOVBUreg x) mem) @@ -2965,9 +2823,7 @@ func rewriteValueMIPS64_OpMIPS64MOVBstore(v *Value) bool { v.reset(OpMIPS64MOVBstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(x) - v.AddArg(mem) + v.AddArg3(ptr, x, mem) return true } // match: (MOVBstore [off] {sym} ptr (MOVHreg x) mem) @@ -2984,9 +2840,7 @@ func rewriteValueMIPS64_OpMIPS64MOVBstore(v *Value) bool { v.reset(OpMIPS64MOVBstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(x) - v.AddArg(mem) + v.AddArg3(ptr, x, mem) return true } // match: (MOVBstore [off] {sym} ptr (MOVHUreg x) mem) @@ -3003,9 +2857,7 @@ func rewriteValueMIPS64_OpMIPS64MOVBstore(v *Value) bool { v.reset(OpMIPS64MOVBstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(x) - v.AddArg(mem) + v.AddArg3(ptr, x, mem) return true } // match: (MOVBstore [off] {sym} ptr (MOVWreg x) mem) @@ -3022,9 +2874,7 @@ func rewriteValueMIPS64_OpMIPS64MOVBstore(v *Value) bool { v.reset(OpMIPS64MOVBstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(x) - v.AddArg(mem) + v.AddArg3(ptr, x, mem) return true } // match: (MOVBstore [off] {sym} ptr (MOVWUreg x) mem) @@ -3041,9 +2891,7 @@ func rewriteValueMIPS64_OpMIPS64MOVBstore(v *Value) bool { v.reset(OpMIPS64MOVBstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(x) - v.AddArg(mem) + v.AddArg3(ptr, x, mem) return true } return false @@ -3069,8 +2917,7 @@ func rewriteValueMIPS64_OpMIPS64MOVBstorezero(v *Value) bool { v.reset(OpMIPS64MOVBstorezero) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVBstorezero [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) @@ -3092,8 +2939,7 @@ func rewriteValueMIPS64_OpMIPS64MOVBstorezero(v *Value) bool { v.reset(OpMIPS64MOVBstorezero) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } return false @@ -3119,8 +2965,7 @@ func rewriteValueMIPS64_OpMIPS64MOVDload(v *Value) bool { v.reset(OpMIPS64MOVDload) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVDload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) @@ -3142,8 +2987,7 @@ func rewriteValueMIPS64_OpMIPS64MOVDload(v *Value) bool { v.reset(OpMIPS64MOVDload) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } return false @@ -3171,9 +3015,7 @@ func rewriteValueMIPS64_OpMIPS64MOVDstore(v *Value) bool { v.reset(OpMIPS64MOVDstore) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } // match: (MOVDstore [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) val mem) @@ -3196,9 +3038,7 @@ func rewriteValueMIPS64_OpMIPS64MOVDstore(v *Value) bool { v.reset(OpMIPS64MOVDstore) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } return false @@ -3224,8 +3064,7 @@ func rewriteValueMIPS64_OpMIPS64MOVFload(v *Value) bool { v.reset(OpMIPS64MOVFload) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVFload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) @@ -3247,8 +3086,7 @@ func rewriteValueMIPS64_OpMIPS64MOVFload(v *Value) bool { v.reset(OpMIPS64MOVFload) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } return false @@ -3276,9 +3114,7 @@ func rewriteValueMIPS64_OpMIPS64MOVFstore(v *Value) bool { v.reset(OpMIPS64MOVFstore) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } // match: (MOVFstore [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) val mem) @@ -3301,9 +3137,7 @@ func rewriteValueMIPS64_OpMIPS64MOVFstore(v *Value) bool { v.reset(OpMIPS64MOVFstore) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } return false @@ -3329,8 +3163,7 @@ func rewriteValueMIPS64_OpMIPS64MOVHUload(v *Value) bool { v.reset(OpMIPS64MOVHUload) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVHUload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) @@ -3352,8 +3185,7 @@ func rewriteValueMIPS64_OpMIPS64MOVHUload(v *Value) bool { v.reset(OpMIPS64MOVHUload) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } return false @@ -3440,8 +3272,7 @@ func rewriteValueMIPS64_OpMIPS64MOVHload(v *Value) bool { v.reset(OpMIPS64MOVHload) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVHload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) @@ -3463,8 +3294,7 @@ func rewriteValueMIPS64_OpMIPS64MOVHload(v *Value) bool { v.reset(OpMIPS64MOVHload) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } return false @@ -3576,9 +3406,7 @@ func rewriteValueMIPS64_OpMIPS64MOVHstore(v *Value) bool { v.reset(OpMIPS64MOVHstore) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } // match: (MOVHstore [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) val mem) @@ -3601,9 +3429,7 @@ func rewriteValueMIPS64_OpMIPS64MOVHstore(v *Value) bool { v.reset(OpMIPS64MOVHstore) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } // match: (MOVHstore [off] {sym} ptr (MOVVconst [0]) mem) @@ -3619,8 +3445,7 @@ func rewriteValueMIPS64_OpMIPS64MOVHstore(v *Value) bool { v.reset(OpMIPS64MOVHstorezero) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVHstore [off] {sym} ptr (MOVHreg x) mem) @@ -3637,9 +3462,7 @@ func rewriteValueMIPS64_OpMIPS64MOVHstore(v *Value) bool { v.reset(OpMIPS64MOVHstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(x) - v.AddArg(mem) + v.AddArg3(ptr, x, mem) return true } // match: (MOVHstore [off] {sym} ptr (MOVHUreg x) mem) @@ -3656,9 +3479,7 @@ func rewriteValueMIPS64_OpMIPS64MOVHstore(v *Value) bool { v.reset(OpMIPS64MOVHstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(x) - v.AddArg(mem) + v.AddArg3(ptr, x, mem) return true } // match: (MOVHstore [off] {sym} ptr (MOVWreg x) mem) @@ -3675,9 +3496,7 @@ func rewriteValueMIPS64_OpMIPS64MOVHstore(v *Value) bool { v.reset(OpMIPS64MOVHstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(x) - v.AddArg(mem) + v.AddArg3(ptr, x, mem) return true } // match: (MOVHstore [off] {sym} ptr (MOVWUreg x) mem) @@ -3694,9 +3513,7 @@ func rewriteValueMIPS64_OpMIPS64MOVHstore(v *Value) bool { v.reset(OpMIPS64MOVHstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(x) - v.AddArg(mem) + v.AddArg3(ptr, x, mem) return true } return false @@ -3722,8 +3539,7 @@ func rewriteValueMIPS64_OpMIPS64MOVHstorezero(v *Value) bool { v.reset(OpMIPS64MOVHstorezero) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVHstorezero [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) @@ -3745,8 +3561,7 @@ func rewriteValueMIPS64_OpMIPS64MOVHstorezero(v *Value) bool { v.reset(OpMIPS64MOVHstorezero) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } return false @@ -3772,8 +3587,7 @@ func rewriteValueMIPS64_OpMIPS64MOVVload(v *Value) bool { v.reset(OpMIPS64MOVVload) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVVload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) @@ -3795,8 +3609,7 @@ func rewriteValueMIPS64_OpMIPS64MOVVload(v *Value) bool { v.reset(OpMIPS64MOVVload) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } return false @@ -3851,9 +3664,7 @@ func rewriteValueMIPS64_OpMIPS64MOVVstore(v *Value) bool { v.reset(OpMIPS64MOVVstore) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } // match: (MOVVstore [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) val mem) @@ -3876,9 +3687,7 @@ func rewriteValueMIPS64_OpMIPS64MOVVstore(v *Value) bool { v.reset(OpMIPS64MOVVstore) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } // match: (MOVVstore [off] {sym} ptr (MOVVconst [0]) mem) @@ -3894,8 +3703,7 @@ func rewriteValueMIPS64_OpMIPS64MOVVstore(v *Value) bool { v.reset(OpMIPS64MOVVstorezero) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } return false @@ -3921,8 +3729,7 @@ func rewriteValueMIPS64_OpMIPS64MOVVstorezero(v *Value) bool { v.reset(OpMIPS64MOVVstorezero) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVVstorezero [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) @@ -3944,8 +3751,7 @@ func rewriteValueMIPS64_OpMIPS64MOVVstorezero(v *Value) bool { v.reset(OpMIPS64MOVVstorezero) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } return false @@ -3971,8 +3777,7 @@ func rewriteValueMIPS64_OpMIPS64MOVWUload(v *Value) bool { v.reset(OpMIPS64MOVWUload) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVWUload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) @@ -3994,8 +3799,7 @@ func rewriteValueMIPS64_OpMIPS64MOVWUload(v *Value) bool { v.reset(OpMIPS64MOVWUload) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } return false @@ -4105,8 +3909,7 @@ func rewriteValueMIPS64_OpMIPS64MOVWload(v *Value) bool { v.reset(OpMIPS64MOVWload) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVWload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) @@ -4128,8 +3931,7 @@ func rewriteValueMIPS64_OpMIPS64MOVWload(v *Value) bool { v.reset(OpMIPS64MOVWload) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } return false @@ -4276,9 +4078,7 @@ func rewriteValueMIPS64_OpMIPS64MOVWstore(v *Value) bool { v.reset(OpMIPS64MOVWstore) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } // match: (MOVWstore [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) val mem) @@ -4301,9 +4101,7 @@ func rewriteValueMIPS64_OpMIPS64MOVWstore(v *Value) bool { v.reset(OpMIPS64MOVWstore) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } // match: (MOVWstore [off] {sym} ptr (MOVVconst [0]) mem) @@ -4319,8 +4117,7 @@ func rewriteValueMIPS64_OpMIPS64MOVWstore(v *Value) bool { v.reset(OpMIPS64MOVWstorezero) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVWstore [off] {sym} ptr (MOVWreg x) mem) @@ -4337,9 +4134,7 @@ func rewriteValueMIPS64_OpMIPS64MOVWstore(v *Value) bool { v.reset(OpMIPS64MOVWstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(x) - v.AddArg(mem) + v.AddArg3(ptr, x, mem) return true } // match: (MOVWstore [off] {sym} ptr (MOVWUreg x) mem) @@ -4356,9 +4151,7 @@ func rewriteValueMIPS64_OpMIPS64MOVWstore(v *Value) bool { v.reset(OpMIPS64MOVWstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(x) - v.AddArg(mem) + v.AddArg3(ptr, x, mem) return true } return false @@ -4384,8 +4177,7 @@ func rewriteValueMIPS64_OpMIPS64MOVWstorezero(v *Value) bool { v.reset(OpMIPS64MOVWstorezero) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVWstorezero [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) @@ -4407,8 +4199,7 @@ func rewriteValueMIPS64_OpMIPS64MOVWstorezero(v *Value) bool { v.reset(OpMIPS64MOVWstorezero) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } return false @@ -5239,10 +5030,9 @@ func rewriteValueMIPS64_OpMod16(v *Value) bool { v0 := b.NewValue0(v.Pos, OpMIPS64DIVV, types.NewTuple(typ.Int64, typ.Int64)) v1 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64) v1.AddArg(x) - v0.AddArg(v1) v2 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64) v2.AddArg(y) - v0.AddArg(v2) + v0.AddArg2(v1, v2) v.AddArg(v0) return true } @@ -5261,10 +5051,9 @@ func rewriteValueMIPS64_OpMod16u(v *Value) bool { v0 := b.NewValue0(v.Pos, OpMIPS64DIVVU, types.NewTuple(typ.UInt64, typ.UInt64)) v1 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) v1.AddArg(x) - v0.AddArg(v1) v2 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) v2.AddArg(y) - v0.AddArg(v2) + v0.AddArg2(v1, v2) v.AddArg(v0) return true } @@ -5283,10 +5072,9 @@ func rewriteValueMIPS64_OpMod32(v *Value) bool { v0 := b.NewValue0(v.Pos, OpMIPS64DIVV, types.NewTuple(typ.Int64, typ.Int64)) v1 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64) v1.AddArg(x) - v0.AddArg(v1) v2 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64) v2.AddArg(y) - v0.AddArg(v2) + v0.AddArg2(v1, v2) v.AddArg(v0) return true } @@ -5305,10 +5093,9 @@ func rewriteValueMIPS64_OpMod32u(v *Value) bool { v0 := b.NewValue0(v.Pos, OpMIPS64DIVVU, types.NewTuple(typ.UInt64, typ.UInt64)) v1 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) v1.AddArg(x) - v0.AddArg(v1) v2 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) v2.AddArg(y) - v0.AddArg(v2) + v0.AddArg2(v1, v2) v.AddArg(v0) return true } @@ -5325,8 +5112,7 @@ func rewriteValueMIPS64_OpMod64(v *Value) bool { y := v_1 v.reset(OpSelect0) v0 := b.NewValue0(v.Pos, OpMIPS64DIVV, types.NewTuple(typ.Int64, typ.Int64)) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -5343,8 +5129,7 @@ func rewriteValueMIPS64_OpMod64u(v *Value) bool { y := v_1 v.reset(OpSelect0) v0 := b.NewValue0(v.Pos, OpMIPS64DIVVU, types.NewTuple(typ.UInt64, typ.UInt64)) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -5363,10 +5148,9 @@ func rewriteValueMIPS64_OpMod8(v *Value) bool { v0 := b.NewValue0(v.Pos, OpMIPS64DIVV, types.NewTuple(typ.Int64, typ.Int64)) v1 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64) v1.AddArg(x) - v0.AddArg(v1) v2 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64) v2.AddArg(y) - v0.AddArg(v2) + v0.AddArg2(v1, v2) v.AddArg(v0) return true } @@ -5385,10 +5169,9 @@ func rewriteValueMIPS64_OpMod8u(v *Value) bool { v0 := b.NewValue0(v.Pos, OpMIPS64DIVVU, types.NewTuple(typ.UInt64, typ.UInt64)) v1 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) v1.AddArg(x) - v0.AddArg(v1) v2 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) v2.AddArg(y) - v0.AddArg(v2) + v0.AddArg2(v1, v2) v.AddArg(v0) return true } @@ -5422,12 +5205,9 @@ func rewriteValueMIPS64_OpMove(v *Value) bool { src := v_1 mem := v_2 v.reset(OpMIPS64MOVBstore) - v.AddArg(dst) v0 := b.NewValue0(v.Pos, OpMIPS64MOVBload, typ.Int8) - v0.AddArg(src) - v0.AddArg(mem) - v.AddArg(v0) - v.AddArg(mem) + v0.AddArg2(src, mem) + v.AddArg3(dst, v0, mem) return true } // match: (Move [2] {t} dst src mem) @@ -5445,12 +5225,9 @@ func rewriteValueMIPS64_OpMove(v *Value) bool { break } v.reset(OpMIPS64MOVHstore) - v.AddArg(dst) v0 := b.NewValue0(v.Pos, OpMIPS64MOVHload, typ.Int16) - v0.AddArg(src) - v0.AddArg(mem) - v.AddArg(v0) - v.AddArg(mem) + v0.AddArg2(src, mem) + v.AddArg3(dst, v0, mem) return true } // match: (Move [2] dst src mem) @@ -5464,20 +5241,14 @@ func rewriteValueMIPS64_OpMove(v *Value) bool { mem := v_2 v.reset(OpMIPS64MOVBstore) v.AuxInt = 1 - v.AddArg(dst) v0 := b.NewValue0(v.Pos, OpMIPS64MOVBload, typ.Int8) v0.AuxInt = 1 - v0.AddArg(src) - v0.AddArg(mem) - v.AddArg(v0) + v0.AddArg2(src, mem) v1 := b.NewValue0(v.Pos, OpMIPS64MOVBstore, types.TypeMem) - v1.AddArg(dst) v2 := b.NewValue0(v.Pos, OpMIPS64MOVBload, typ.Int8) - v2.AddArg(src) - v2.AddArg(mem) - v1.AddArg(v2) - v1.AddArg(mem) - v.AddArg(v1) + v2.AddArg2(src, mem) + v1.AddArg3(dst, v2, mem) + v.AddArg3(dst, v0, v1) return true } // match: (Move [4] {t} dst src mem) @@ -5495,12 +5266,9 @@ func rewriteValueMIPS64_OpMove(v *Value) bool { break } v.reset(OpMIPS64MOVWstore) - v.AddArg(dst) v0 := b.NewValue0(v.Pos, OpMIPS64MOVWload, typ.Int32) - v0.AddArg(src) - v0.AddArg(mem) - v.AddArg(v0) - v.AddArg(mem) + v0.AddArg2(src, mem) + v.AddArg3(dst, v0, mem) return true } // match: (Move [4] {t} dst src mem) @@ -5519,20 +5287,14 @@ func rewriteValueMIPS64_OpMove(v *Value) bool { } v.reset(OpMIPS64MOVHstore) v.AuxInt = 2 - v.AddArg(dst) v0 := b.NewValue0(v.Pos, OpMIPS64MOVHload, typ.Int16) v0.AuxInt = 2 - v0.AddArg(src) - v0.AddArg(mem) - v.AddArg(v0) + v0.AddArg2(src, mem) v1 := b.NewValue0(v.Pos, OpMIPS64MOVHstore, types.TypeMem) - v1.AddArg(dst) v2 := b.NewValue0(v.Pos, OpMIPS64MOVHload, typ.Int16) - v2.AddArg(src) - v2.AddArg(mem) - v1.AddArg(v2) - v1.AddArg(mem) - v.AddArg(v1) + v2.AddArg2(src, mem) + v1.AddArg3(dst, v2, mem) + v.AddArg3(dst, v0, v1) return true } // match: (Move [4] dst src mem) @@ -5546,38 +5308,26 @@ func rewriteValueMIPS64_OpMove(v *Value) bool { mem := v_2 v.reset(OpMIPS64MOVBstore) v.AuxInt = 3 - v.AddArg(dst) v0 := b.NewValue0(v.Pos, OpMIPS64MOVBload, typ.Int8) v0.AuxInt = 3 - v0.AddArg(src) - v0.AddArg(mem) - v.AddArg(v0) + v0.AddArg2(src, mem) v1 := b.NewValue0(v.Pos, OpMIPS64MOVBstore, types.TypeMem) v1.AuxInt = 2 - v1.AddArg(dst) v2 := b.NewValue0(v.Pos, OpMIPS64MOVBload, typ.Int8) v2.AuxInt = 2 - v2.AddArg(src) - v2.AddArg(mem) - v1.AddArg(v2) + v2.AddArg2(src, mem) v3 := b.NewValue0(v.Pos, OpMIPS64MOVBstore, types.TypeMem) v3.AuxInt = 1 - v3.AddArg(dst) v4 := b.NewValue0(v.Pos, OpMIPS64MOVBload, typ.Int8) v4.AuxInt = 1 - v4.AddArg(src) - v4.AddArg(mem) - v3.AddArg(v4) + v4.AddArg2(src, mem) v5 := b.NewValue0(v.Pos, OpMIPS64MOVBstore, types.TypeMem) - v5.AddArg(dst) v6 := b.NewValue0(v.Pos, OpMIPS64MOVBload, typ.Int8) - v6.AddArg(src) - v6.AddArg(mem) - v5.AddArg(v6) - v5.AddArg(mem) - v3.AddArg(v5) - v1.AddArg(v3) - v.AddArg(v1) + v6.AddArg2(src, mem) + v5.AddArg3(dst, v6, mem) + v3.AddArg3(dst, v4, v5) + v1.AddArg3(dst, v2, v3) + v.AddArg3(dst, v0, v1) return true } // match: (Move [8] {t} dst src mem) @@ -5595,12 +5345,9 @@ func rewriteValueMIPS64_OpMove(v *Value) bool { break } v.reset(OpMIPS64MOVVstore) - v.AddArg(dst) v0 := b.NewValue0(v.Pos, OpMIPS64MOVVload, typ.UInt64) - v0.AddArg(src) - v0.AddArg(mem) - v.AddArg(v0) - v.AddArg(mem) + v0.AddArg2(src, mem) + v.AddArg3(dst, v0, mem) return true } // match: (Move [8] {t} dst src mem) @@ -5619,20 +5366,14 @@ func rewriteValueMIPS64_OpMove(v *Value) bool { } v.reset(OpMIPS64MOVWstore) v.AuxInt = 4 - v.AddArg(dst) v0 := b.NewValue0(v.Pos, OpMIPS64MOVWload, typ.Int32) v0.AuxInt = 4 - v0.AddArg(src) - v0.AddArg(mem) - v.AddArg(v0) + v0.AddArg2(src, mem) v1 := b.NewValue0(v.Pos, OpMIPS64MOVWstore, types.TypeMem) - v1.AddArg(dst) v2 := b.NewValue0(v.Pos, OpMIPS64MOVWload, typ.Int32) - v2.AddArg(src) - v2.AddArg(mem) - v1.AddArg(v2) - v1.AddArg(mem) - v.AddArg(v1) + v2.AddArg2(src, mem) + v1.AddArg3(dst, v2, mem) + v.AddArg3(dst, v0, v1) return true } // match: (Move [8] {t} dst src mem) @@ -5651,38 +5392,26 @@ func rewriteValueMIPS64_OpMove(v *Value) bool { } v.reset(OpMIPS64MOVHstore) v.AuxInt = 6 - v.AddArg(dst) v0 := b.NewValue0(v.Pos, OpMIPS64MOVHload, typ.Int16) v0.AuxInt = 6 - v0.AddArg(src) - v0.AddArg(mem) - v.AddArg(v0) + v0.AddArg2(src, mem) v1 := b.NewValue0(v.Pos, OpMIPS64MOVHstore, types.TypeMem) v1.AuxInt = 4 - v1.AddArg(dst) v2 := b.NewValue0(v.Pos, OpMIPS64MOVHload, typ.Int16) v2.AuxInt = 4 - v2.AddArg(src) - v2.AddArg(mem) - v1.AddArg(v2) + v2.AddArg2(src, mem) v3 := b.NewValue0(v.Pos, OpMIPS64MOVHstore, types.TypeMem) v3.AuxInt = 2 - v3.AddArg(dst) v4 := b.NewValue0(v.Pos, OpMIPS64MOVHload, typ.Int16) v4.AuxInt = 2 - v4.AddArg(src) - v4.AddArg(mem) - v3.AddArg(v4) + v4.AddArg2(src, mem) v5 := b.NewValue0(v.Pos, OpMIPS64MOVHstore, types.TypeMem) - v5.AddArg(dst) v6 := b.NewValue0(v.Pos, OpMIPS64MOVHload, typ.Int16) - v6.AddArg(src) - v6.AddArg(mem) - v5.AddArg(v6) - v5.AddArg(mem) - v3.AddArg(v5) - v1.AddArg(v3) - v.AddArg(v1) + v6.AddArg2(src, mem) + v5.AddArg3(dst, v6, mem) + v3.AddArg3(dst, v4, v5) + v1.AddArg3(dst, v2, v3) + v.AddArg3(dst, v0, v1) return true } // match: (Move [3] dst src mem) @@ -5696,29 +5425,20 @@ func rewriteValueMIPS64_OpMove(v *Value) bool { mem := v_2 v.reset(OpMIPS64MOVBstore) v.AuxInt = 2 - v.AddArg(dst) v0 := b.NewValue0(v.Pos, OpMIPS64MOVBload, typ.Int8) v0.AuxInt = 2 - v0.AddArg(src) - v0.AddArg(mem) - v.AddArg(v0) + v0.AddArg2(src, mem) v1 := b.NewValue0(v.Pos, OpMIPS64MOVBstore, types.TypeMem) v1.AuxInt = 1 - v1.AddArg(dst) v2 := b.NewValue0(v.Pos, OpMIPS64MOVBload, typ.Int8) v2.AuxInt = 1 - v2.AddArg(src) - v2.AddArg(mem) - v1.AddArg(v2) + v2.AddArg2(src, mem) v3 := b.NewValue0(v.Pos, OpMIPS64MOVBstore, types.TypeMem) - v3.AddArg(dst) v4 := b.NewValue0(v.Pos, OpMIPS64MOVBload, typ.Int8) - v4.AddArg(src) - v4.AddArg(mem) - v3.AddArg(v4) - v3.AddArg(mem) - v1.AddArg(v3) - v.AddArg(v1) + v4.AddArg2(src, mem) + v3.AddArg3(dst, v4, mem) + v1.AddArg3(dst, v2, v3) + v.AddArg3(dst, v0, v1) return true } // match: (Move [6] {t} dst src mem) @@ -5737,29 +5457,20 @@ func rewriteValueMIPS64_OpMove(v *Value) bool { } v.reset(OpMIPS64MOVHstore) v.AuxInt = 4 - v.AddArg(dst) v0 := b.NewValue0(v.Pos, OpMIPS64MOVHload, typ.Int16) v0.AuxInt = 4 - v0.AddArg(src) - v0.AddArg(mem) - v.AddArg(v0) + v0.AddArg2(src, mem) v1 := b.NewValue0(v.Pos, OpMIPS64MOVHstore, types.TypeMem) v1.AuxInt = 2 - v1.AddArg(dst) v2 := b.NewValue0(v.Pos, OpMIPS64MOVHload, typ.Int16) v2.AuxInt = 2 - v2.AddArg(src) - v2.AddArg(mem) - v1.AddArg(v2) + v2.AddArg2(src, mem) v3 := b.NewValue0(v.Pos, OpMIPS64MOVHstore, types.TypeMem) - v3.AddArg(dst) v4 := b.NewValue0(v.Pos, OpMIPS64MOVHload, typ.Int16) - v4.AddArg(src) - v4.AddArg(mem) - v3.AddArg(v4) - v3.AddArg(mem) - v1.AddArg(v3) - v.AddArg(v1) + v4.AddArg2(src, mem) + v3.AddArg3(dst, v4, mem) + v1.AddArg3(dst, v2, v3) + v.AddArg3(dst, v0, v1) return true } // match: (Move [12] {t} dst src mem) @@ -5778,29 +5489,20 @@ func rewriteValueMIPS64_OpMove(v *Value) bool { } v.reset(OpMIPS64MOVWstore) v.AuxInt = 8 - v.AddArg(dst) v0 := b.NewValue0(v.Pos, OpMIPS64MOVWload, typ.Int32) v0.AuxInt = 8 - v0.AddArg(src) - v0.AddArg(mem) - v.AddArg(v0) + v0.AddArg2(src, mem) v1 := b.NewValue0(v.Pos, OpMIPS64MOVWstore, types.TypeMem) v1.AuxInt = 4 - v1.AddArg(dst) v2 := b.NewValue0(v.Pos, OpMIPS64MOVWload, typ.Int32) v2.AuxInt = 4 - v2.AddArg(src) - v2.AddArg(mem) - v1.AddArg(v2) + v2.AddArg2(src, mem) v3 := b.NewValue0(v.Pos, OpMIPS64MOVWstore, types.TypeMem) - v3.AddArg(dst) v4 := b.NewValue0(v.Pos, OpMIPS64MOVWload, typ.Int32) - v4.AddArg(src) - v4.AddArg(mem) - v3.AddArg(v4) - v3.AddArg(mem) - v1.AddArg(v3) - v.AddArg(v1) + v4.AddArg2(src, mem) + v3.AddArg3(dst, v4, mem) + v1.AddArg3(dst, v2, v3) + v.AddArg3(dst, v0, v1) return true } // match: (Move [16] {t} dst src mem) @@ -5819,20 +5521,14 @@ func rewriteValueMIPS64_OpMove(v *Value) bool { } v.reset(OpMIPS64MOVVstore) v.AuxInt = 8 - v.AddArg(dst) v0 := b.NewValue0(v.Pos, OpMIPS64MOVVload, typ.UInt64) v0.AuxInt = 8 - v0.AddArg(src) - v0.AddArg(mem) - v.AddArg(v0) + v0.AddArg2(src, mem) v1 := b.NewValue0(v.Pos, OpMIPS64MOVVstore, types.TypeMem) - v1.AddArg(dst) v2 := b.NewValue0(v.Pos, OpMIPS64MOVVload, typ.UInt64) - v2.AddArg(src) - v2.AddArg(mem) - v1.AddArg(v2) - v1.AddArg(mem) - v.AddArg(v1) + v2.AddArg2(src, mem) + v1.AddArg3(dst, v2, mem) + v.AddArg3(dst, v0, v1) return true } // match: (Move [24] {t} dst src mem) @@ -5851,29 +5547,20 @@ func rewriteValueMIPS64_OpMove(v *Value) bool { } v.reset(OpMIPS64MOVVstore) v.AuxInt = 16 - v.AddArg(dst) v0 := b.NewValue0(v.Pos, OpMIPS64MOVVload, typ.UInt64) v0.AuxInt = 16 - v0.AddArg(src) - v0.AddArg(mem) - v.AddArg(v0) + v0.AddArg2(src, mem) v1 := b.NewValue0(v.Pos, OpMIPS64MOVVstore, types.TypeMem) v1.AuxInt = 8 - v1.AddArg(dst) v2 := b.NewValue0(v.Pos, OpMIPS64MOVVload, typ.UInt64) v2.AuxInt = 8 - v2.AddArg(src) - v2.AddArg(mem) - v1.AddArg(v2) + v2.AddArg2(src, mem) v3 := b.NewValue0(v.Pos, OpMIPS64MOVVstore, types.TypeMem) - v3.AddArg(dst) v4 := b.NewValue0(v.Pos, OpMIPS64MOVVload, typ.UInt64) - v4.AddArg(src) - v4.AddArg(mem) - v3.AddArg(v4) - v3.AddArg(mem) - v1.AddArg(v3) - v.AddArg(v1) + v4.AddArg2(src, mem) + v3.AddArg3(dst, v4, mem) + v1.AddArg3(dst, v2, v3) + v.AddArg3(dst, v0, v1) return true } // match: (Move [s] {t} dst src mem) @@ -5890,9 +5577,7 @@ func rewriteValueMIPS64_OpMove(v *Value) bool { } v.reset(OpMIPS64DUFFCOPY) v.AuxInt = 16 * (128 - s/8) - v.AddArg(dst) - v.AddArg(src) - v.AddArg(mem) + v.AddArg3(dst, src, mem) return true } // match: (Move [s] {t} dst src mem) @@ -5909,13 +5594,10 @@ func rewriteValueMIPS64_OpMove(v *Value) bool { } v.reset(OpMIPS64LoweredMove) v.AuxInt = t.(*types.Type).Alignment() - v.AddArg(dst) - v.AddArg(src) v0 := b.NewValue0(v.Pos, OpMIPS64ADDVconst, src.Type) v0.AuxInt = s - moveSize(t.(*types.Type).Alignment(), config) v0.AddArg(src) - v.AddArg(v0) - v.AddArg(mem) + v.AddArg4(dst, src, v0, mem) return true } return false @@ -5932,8 +5614,7 @@ func rewriteValueMIPS64_OpMul16(v *Value) bool { y := v_1 v.reset(OpSelect1) v0 := b.NewValue0(v.Pos, OpMIPS64MULVU, types.NewTuple(typ.UInt64, typ.UInt64)) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -5950,8 +5631,7 @@ func rewriteValueMIPS64_OpMul32(v *Value) bool { y := v_1 v.reset(OpSelect1) v0 := b.NewValue0(v.Pos, OpMIPS64MULVU, types.NewTuple(typ.UInt64, typ.UInt64)) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -5968,8 +5648,7 @@ func rewriteValueMIPS64_OpMul64(v *Value) bool { y := v_1 v.reset(OpSelect1) v0 := b.NewValue0(v.Pos, OpMIPS64MULVU, types.NewTuple(typ.UInt64, typ.UInt64)) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -5986,8 +5665,7 @@ func rewriteValueMIPS64_OpMul8(v *Value) bool { y := v_1 v.reset(OpSelect1) v0 := b.NewValue0(v.Pos, OpMIPS64MULVU, types.NewTuple(typ.UInt64, typ.UInt64)) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -6006,14 +5684,12 @@ func rewriteValueMIPS64_OpNeq16(v *Value) bool { v0 := b.NewValue0(v.Pos, OpMIPS64XOR, typ.UInt64) v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) v1.AddArg(x) - v0.AddArg(v1) v2 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) v2.AddArg(y) - v0.AddArg(v2) - v.AddArg(v0) + v0.AddArg2(v1, v2) v3 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) v3.AuxInt = 0 - v.AddArg(v3) + v.AddArg2(v0, v3) return true } } @@ -6031,14 +5707,12 @@ func rewriteValueMIPS64_OpNeq32(v *Value) bool { v0 := b.NewValue0(v.Pos, OpMIPS64XOR, typ.UInt64) v1 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) v1.AddArg(x) - v0.AddArg(v1) v2 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) v2.AddArg(y) - v0.AddArg(v2) - v.AddArg(v0) + v0.AddArg2(v1, v2) v3 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) v3.AuxInt = 0 - v.AddArg(v3) + v.AddArg2(v0, v3) return true } } @@ -6053,8 +5727,7 @@ func rewriteValueMIPS64_OpNeq32F(v *Value) bool { y := v_1 v.reset(OpMIPS64FPFlagFalse) v0 := b.NewValue0(v.Pos, OpMIPS64CMPEQF, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -6071,12 +5744,10 @@ func rewriteValueMIPS64_OpNeq64(v *Value) bool { y := v_1 v.reset(OpMIPS64SGTU) v0 := b.NewValue0(v.Pos, OpMIPS64XOR, typ.UInt64) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) + v0.AddArg2(x, y) v1 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) v1.AuxInt = 0 - v.AddArg(v1) + v.AddArg2(v0, v1) return true } } @@ -6091,8 +5762,7 @@ func rewriteValueMIPS64_OpNeq64F(v *Value) bool { y := v_1 v.reset(OpMIPS64FPFlagFalse) v0 := b.NewValue0(v.Pos, OpMIPS64CMPEQD, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -6111,14 +5781,12 @@ func rewriteValueMIPS64_OpNeq8(v *Value) bool { v0 := b.NewValue0(v.Pos, OpMIPS64XOR, typ.UInt64) v1 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) v1.AddArg(x) - v0.AddArg(v1) v2 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) v2.AddArg(y) - v0.AddArg(v2) - v.AddArg(v0) + v0.AddArg2(v1, v2) v3 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) v3.AuxInt = 0 - v.AddArg(v3) + v.AddArg2(v0, v3) return true } } @@ -6134,12 +5802,10 @@ func rewriteValueMIPS64_OpNeqPtr(v *Value) bool { y := v_1 v.reset(OpMIPS64SGTU) v0 := b.NewValue0(v.Pos, OpMIPS64XOR, typ.UInt64) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) + v0.AddArg2(x, y) v1 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) v1.AuxInt = 0 - v.AddArg(v1) + v.AddArg2(v0, v1) return true } } @@ -6198,9 +5864,7 @@ func rewriteValueMIPS64_OpPanicBounds(v *Value) bool { } v.reset(OpMIPS64LoweredPanicBoundsA) v.AuxInt = kind - v.AddArg(x) - v.AddArg(y) - v.AddArg(mem) + v.AddArg3(x, y, mem) return true } // match: (PanicBounds [kind] x y mem) @@ -6216,9 +5880,7 @@ func rewriteValueMIPS64_OpPanicBounds(v *Value) bool { } v.reset(OpMIPS64LoweredPanicBoundsB) v.AuxInt = kind - v.AddArg(x) - v.AddArg(y) - v.AddArg(mem) + v.AddArg3(x, y, mem) return true } // match: (PanicBounds [kind] x y mem) @@ -6234,9 +5896,7 @@ func rewriteValueMIPS64_OpPanicBounds(v *Value) bool { } v.reset(OpMIPS64LoweredPanicBoundsC) v.AuxInt = kind - v.AddArg(x) - v.AddArg(y) - v.AddArg(mem) + v.AddArg3(x, y, mem) return true } return false @@ -6257,17 +5917,14 @@ func rewriteValueMIPS64_OpRotateLeft16(v *Value) bool { c := v_1.AuxInt v.reset(OpOr16) v0 := b.NewValue0(v.Pos, OpLsh16x64, t) - v0.AddArg(x) v1 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) v1.AuxInt = c & 15 - v0.AddArg(v1) - v.AddArg(v0) + v0.AddArg2(x, v1) v2 := b.NewValue0(v.Pos, OpRsh16Ux64, t) - v2.AddArg(x) v3 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) v3.AuxInt = -c & 15 - v2.AddArg(v3) - v.AddArg(v2) + v2.AddArg2(x, v3) + v.AddArg2(v0, v2) return true } return false @@ -6288,17 +5945,14 @@ func rewriteValueMIPS64_OpRotateLeft32(v *Value) bool { c := v_1.AuxInt v.reset(OpOr32) v0 := b.NewValue0(v.Pos, OpLsh32x64, t) - v0.AddArg(x) v1 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) v1.AuxInt = c & 31 - v0.AddArg(v1) - v.AddArg(v0) + v0.AddArg2(x, v1) v2 := b.NewValue0(v.Pos, OpRsh32Ux64, t) - v2.AddArg(x) v3 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) v3.AuxInt = -c & 31 - v2.AddArg(v3) - v.AddArg(v2) + v2.AddArg2(x, v3) + v.AddArg2(v0, v2) return true } return false @@ -6319,17 +5973,14 @@ func rewriteValueMIPS64_OpRotateLeft64(v *Value) bool { c := v_1.AuxInt v.reset(OpOr64) v0 := b.NewValue0(v.Pos, OpLsh64x64, t) - v0.AddArg(x) v1 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) v1.AuxInt = c & 63 - v0.AddArg(v1) - v.AddArg(v0) + v0.AddArg2(x, v1) v2 := b.NewValue0(v.Pos, OpRsh64Ux64, t) - v2.AddArg(x) v3 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) v3.AuxInt = -c & 63 - v2.AddArg(v3) - v.AddArg(v2) + v2.AddArg2(x, v3) + v.AddArg2(v0, v2) return true } return false @@ -6350,17 +6001,14 @@ func rewriteValueMIPS64_OpRotateLeft8(v *Value) bool { c := v_1.AuxInt v.reset(OpOr8) v0 := b.NewValue0(v.Pos, OpLsh8x64, t) - v0.AddArg(x) v1 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) v1.AuxInt = c & 7 - v0.AddArg(v1) - v.AddArg(v0) + v0.AddArg2(x, v1) v2 := b.NewValue0(v.Pos, OpRsh8Ux64, t) - v2.AddArg(x) v3 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) v3.AuxInt = -c & 7 - v2.AddArg(v3) - v.AddArg(v2) + v2.AddArg2(x, v3) + v.AddArg2(v0, v2) return true } return false @@ -6381,20 +6029,17 @@ func rewriteValueMIPS64_OpRsh16Ux16(v *Value) bool { v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool) v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) v2.AuxInt = 64 - v1.AddArg(v2) v3 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) v3.AddArg(y) - v1.AddArg(v3) + v1.AddArg2(v2, v3) v0.AddArg(v1) - v.AddArg(v0) v4 := b.NewValue0(v.Pos, OpMIPS64SRLV, t) v5 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) v5.AddArg(x) - v4.AddArg(v5) v6 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) v6.AddArg(y) - v4.AddArg(v6) - v.AddArg(v4) + v4.AddArg2(v5, v6) + v.AddArg2(v0, v4) return true } } @@ -6414,20 +6059,17 @@ func rewriteValueMIPS64_OpRsh16Ux32(v *Value) bool { v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool) v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) v2.AuxInt = 64 - v1.AddArg(v2) v3 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) v3.AddArg(y) - v1.AddArg(v3) + v1.AddArg2(v2, v3) v0.AddArg(v1) - v.AddArg(v0) v4 := b.NewValue0(v.Pos, OpMIPS64SRLV, t) v5 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) v5.AddArg(x) - v4.AddArg(v5) v6 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) v6.AddArg(y) - v4.AddArg(v6) - v.AddArg(v4) + v4.AddArg2(v5, v6) + v.AddArg2(v0, v4) return true } } @@ -6447,16 +6089,13 @@ func rewriteValueMIPS64_OpRsh16Ux64(v *Value) bool { v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool) v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) v2.AuxInt = 64 - v1.AddArg(v2) - v1.AddArg(y) + v1.AddArg2(v2, y) v0.AddArg(v1) - v.AddArg(v0) v3 := b.NewValue0(v.Pos, OpMIPS64SRLV, t) v4 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) v4.AddArg(x) - v3.AddArg(v4) - v3.AddArg(y) - v.AddArg(v3) + v3.AddArg2(v4, y) + v.AddArg2(v0, v3) return true } } @@ -6476,20 +6115,17 @@ func rewriteValueMIPS64_OpRsh16Ux8(v *Value) bool { v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool) v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) v2.AuxInt = 64 - v1.AddArg(v2) v3 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) v3.AddArg(y) - v1.AddArg(v3) + v1.AddArg2(v2, v3) v0.AddArg(v1) - v.AddArg(v0) v4 := b.NewValue0(v.Pos, OpMIPS64SRLV, t) v5 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) v5.AddArg(x) - v4.AddArg(v5) v6 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) v6.AddArg(y) - v4.AddArg(v6) - v.AddArg(v4) + v4.AddArg2(v5, v6) + v.AddArg2(v0, v4) return true } } @@ -6507,22 +6143,19 @@ func rewriteValueMIPS64_OpRsh16x16(v *Value) bool { v.reset(OpMIPS64SRAV) v0 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpMIPS64OR, t) v2 := b.NewValue0(v.Pos, OpMIPS64NEGV, t) v3 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool) v4 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) v4.AddArg(y) - v3.AddArg(v4) v5 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) v5.AuxInt = 63 - v3.AddArg(v5) + v3.AddArg2(v4, v5) v2.AddArg(v3) - v1.AddArg(v2) v6 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) v6.AddArg(y) - v1.AddArg(v6) - v.AddArg(v1) + v1.AddArg2(v2, v6) + v.AddArg2(v0, v1) return true } } @@ -6540,22 +6173,19 @@ func rewriteValueMIPS64_OpRsh16x32(v *Value) bool { v.reset(OpMIPS64SRAV) v0 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpMIPS64OR, t) v2 := b.NewValue0(v.Pos, OpMIPS64NEGV, t) v3 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool) v4 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) v4.AddArg(y) - v3.AddArg(v4) v5 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) v5.AuxInt = 63 - v3.AddArg(v5) + v3.AddArg2(v4, v5) v2.AddArg(v3) - v1.AddArg(v2) v6 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) v6.AddArg(y) - v1.AddArg(v6) - v.AddArg(v1) + v1.AddArg2(v2, v6) + v.AddArg2(v0, v1) return true } } @@ -6573,18 +6203,15 @@ func rewriteValueMIPS64_OpRsh16x64(v *Value) bool { v.reset(OpMIPS64SRAV) v0 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpMIPS64OR, t) v2 := b.NewValue0(v.Pos, OpMIPS64NEGV, t) v3 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool) - v3.AddArg(y) v4 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) v4.AuxInt = 63 - v3.AddArg(v4) + v3.AddArg2(y, v4) v2.AddArg(v3) - v1.AddArg(v2) - v1.AddArg(y) - v.AddArg(v1) + v1.AddArg2(v2, y) + v.AddArg2(v0, v1) return true } } @@ -6602,22 +6229,19 @@ func rewriteValueMIPS64_OpRsh16x8(v *Value) bool { v.reset(OpMIPS64SRAV) v0 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpMIPS64OR, t) v2 := b.NewValue0(v.Pos, OpMIPS64NEGV, t) v3 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool) v4 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) v4.AddArg(y) - v3.AddArg(v4) v5 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) v5.AuxInt = 63 - v3.AddArg(v5) + v3.AddArg2(v4, v5) v2.AddArg(v3) - v1.AddArg(v2) v6 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) v6.AddArg(y) - v1.AddArg(v6) - v.AddArg(v1) + v1.AddArg2(v2, v6) + v.AddArg2(v0, v1) return true } } @@ -6637,20 +6261,17 @@ func rewriteValueMIPS64_OpRsh32Ux16(v *Value) bool { v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool) v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) v2.AuxInt = 64 - v1.AddArg(v2) v3 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) v3.AddArg(y) - v1.AddArg(v3) + v1.AddArg2(v2, v3) v0.AddArg(v1) - v.AddArg(v0) v4 := b.NewValue0(v.Pos, OpMIPS64SRLV, t) v5 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) v5.AddArg(x) - v4.AddArg(v5) v6 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) v6.AddArg(y) - v4.AddArg(v6) - v.AddArg(v4) + v4.AddArg2(v5, v6) + v.AddArg2(v0, v4) return true } } @@ -6670,20 +6291,17 @@ func rewriteValueMIPS64_OpRsh32Ux32(v *Value) bool { v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool) v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) v2.AuxInt = 64 - v1.AddArg(v2) v3 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) v3.AddArg(y) - v1.AddArg(v3) + v1.AddArg2(v2, v3) v0.AddArg(v1) - v.AddArg(v0) v4 := b.NewValue0(v.Pos, OpMIPS64SRLV, t) v5 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) v5.AddArg(x) - v4.AddArg(v5) v6 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) v6.AddArg(y) - v4.AddArg(v6) - v.AddArg(v4) + v4.AddArg2(v5, v6) + v.AddArg2(v0, v4) return true } } @@ -6703,16 +6321,13 @@ func rewriteValueMIPS64_OpRsh32Ux64(v *Value) bool { v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool) v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) v2.AuxInt = 64 - v1.AddArg(v2) - v1.AddArg(y) + v1.AddArg2(v2, y) v0.AddArg(v1) - v.AddArg(v0) v3 := b.NewValue0(v.Pos, OpMIPS64SRLV, t) v4 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) v4.AddArg(x) - v3.AddArg(v4) - v3.AddArg(y) - v.AddArg(v3) + v3.AddArg2(v4, y) + v.AddArg2(v0, v3) return true } } @@ -6732,20 +6347,17 @@ func rewriteValueMIPS64_OpRsh32Ux8(v *Value) bool { v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool) v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) v2.AuxInt = 64 - v1.AddArg(v2) v3 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) v3.AddArg(y) - v1.AddArg(v3) + v1.AddArg2(v2, v3) v0.AddArg(v1) - v.AddArg(v0) v4 := b.NewValue0(v.Pos, OpMIPS64SRLV, t) v5 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) v5.AddArg(x) - v4.AddArg(v5) v6 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) v6.AddArg(y) - v4.AddArg(v6) - v.AddArg(v4) + v4.AddArg2(v5, v6) + v.AddArg2(v0, v4) return true } } @@ -6763,22 +6375,19 @@ func rewriteValueMIPS64_OpRsh32x16(v *Value) bool { v.reset(OpMIPS64SRAV) v0 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpMIPS64OR, t) v2 := b.NewValue0(v.Pos, OpMIPS64NEGV, t) v3 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool) v4 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) v4.AddArg(y) - v3.AddArg(v4) v5 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) v5.AuxInt = 63 - v3.AddArg(v5) + v3.AddArg2(v4, v5) v2.AddArg(v3) - v1.AddArg(v2) v6 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) v6.AddArg(y) - v1.AddArg(v6) - v.AddArg(v1) + v1.AddArg2(v2, v6) + v.AddArg2(v0, v1) return true } } @@ -6796,22 +6405,19 @@ func rewriteValueMIPS64_OpRsh32x32(v *Value) bool { v.reset(OpMIPS64SRAV) v0 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpMIPS64OR, t) v2 := b.NewValue0(v.Pos, OpMIPS64NEGV, t) v3 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool) v4 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) v4.AddArg(y) - v3.AddArg(v4) v5 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) v5.AuxInt = 63 - v3.AddArg(v5) + v3.AddArg2(v4, v5) v2.AddArg(v3) - v1.AddArg(v2) v6 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) v6.AddArg(y) - v1.AddArg(v6) - v.AddArg(v1) + v1.AddArg2(v2, v6) + v.AddArg2(v0, v1) return true } } @@ -6829,18 +6435,15 @@ func rewriteValueMIPS64_OpRsh32x64(v *Value) bool { v.reset(OpMIPS64SRAV) v0 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpMIPS64OR, t) v2 := b.NewValue0(v.Pos, OpMIPS64NEGV, t) v3 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool) - v3.AddArg(y) v4 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) v4.AuxInt = 63 - v3.AddArg(v4) + v3.AddArg2(y, v4) v2.AddArg(v3) - v1.AddArg(v2) - v1.AddArg(y) - v.AddArg(v1) + v1.AddArg2(v2, y) + v.AddArg2(v0, v1) return true } } @@ -6858,22 +6461,19 @@ func rewriteValueMIPS64_OpRsh32x8(v *Value) bool { v.reset(OpMIPS64SRAV) v0 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpMIPS64OR, t) v2 := b.NewValue0(v.Pos, OpMIPS64NEGV, t) v3 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool) v4 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) v4.AddArg(y) - v3.AddArg(v4) v5 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) v5.AuxInt = 63 - v3.AddArg(v5) + v3.AddArg2(v4, v5) v2.AddArg(v3) - v1.AddArg(v2) v6 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) v6.AddArg(y) - v1.AddArg(v6) - v.AddArg(v1) + v1.AddArg2(v2, v6) + v.AddArg2(v0, v1) return true } } @@ -6893,18 +6493,15 @@ func rewriteValueMIPS64_OpRsh64Ux16(v *Value) bool { v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool) v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) v2.AuxInt = 64 - v1.AddArg(v2) v3 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) v3.AddArg(y) - v1.AddArg(v3) + v1.AddArg2(v2, v3) v0.AddArg(v1) - v.AddArg(v0) v4 := b.NewValue0(v.Pos, OpMIPS64SRLV, t) - v4.AddArg(x) v5 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) v5.AddArg(y) - v4.AddArg(v5) - v.AddArg(v4) + v4.AddArg2(x, v5) + v.AddArg2(v0, v4) return true } } @@ -6924,18 +6521,15 @@ func rewriteValueMIPS64_OpRsh64Ux32(v *Value) bool { v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool) v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) v2.AuxInt = 64 - v1.AddArg(v2) v3 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) v3.AddArg(y) - v1.AddArg(v3) + v1.AddArg2(v2, v3) v0.AddArg(v1) - v.AddArg(v0) v4 := b.NewValue0(v.Pos, OpMIPS64SRLV, t) - v4.AddArg(x) v5 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) v5.AddArg(y) - v4.AddArg(v5) - v.AddArg(v4) + v4.AddArg2(x, v5) + v.AddArg2(v0, v4) return true } } @@ -6955,14 +6549,11 @@ func rewriteValueMIPS64_OpRsh64Ux64(v *Value) bool { v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool) v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) v2.AuxInt = 64 - v1.AddArg(v2) - v1.AddArg(y) + v1.AddArg2(v2, y) v0.AddArg(v1) - v.AddArg(v0) v3 := b.NewValue0(v.Pos, OpMIPS64SRLV, t) - v3.AddArg(x) - v3.AddArg(y) - v.AddArg(v3) + v3.AddArg2(x, y) + v.AddArg2(v0, v3) return true } } @@ -6982,18 +6573,15 @@ func rewriteValueMIPS64_OpRsh64Ux8(v *Value) bool { v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool) v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) v2.AuxInt = 64 - v1.AddArg(v2) v3 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) v3.AddArg(y) - v1.AddArg(v3) + v1.AddArg2(v2, v3) v0.AddArg(v1) - v.AddArg(v0) v4 := b.NewValue0(v.Pos, OpMIPS64SRLV, t) - v4.AddArg(x) v5 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) v5.AddArg(y) - v4.AddArg(v5) - v.AddArg(v4) + v4.AddArg2(x, v5) + v.AddArg2(v0, v4) return true } } @@ -7009,22 +6597,19 @@ func rewriteValueMIPS64_OpRsh64x16(v *Value) bool { x := v_0 y := v_1 v.reset(OpMIPS64SRAV) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpMIPS64OR, t) v1 := b.NewValue0(v.Pos, OpMIPS64NEGV, t) v2 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool) v3 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) v3.AddArg(y) - v2.AddArg(v3) v4 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) v4.AuxInt = 63 - v2.AddArg(v4) + v2.AddArg2(v3, v4) v1.AddArg(v2) - v0.AddArg(v1) v5 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) v5.AddArg(y) - v0.AddArg(v5) - v.AddArg(v0) + v0.AddArg2(v1, v5) + v.AddArg2(x, v0) return true } } @@ -7040,22 +6625,19 @@ func rewriteValueMIPS64_OpRsh64x32(v *Value) bool { x := v_0 y := v_1 v.reset(OpMIPS64SRAV) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpMIPS64OR, t) v1 := b.NewValue0(v.Pos, OpMIPS64NEGV, t) v2 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool) v3 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) v3.AddArg(y) - v2.AddArg(v3) v4 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) v4.AuxInt = 63 - v2.AddArg(v4) + v2.AddArg2(v3, v4) v1.AddArg(v2) - v0.AddArg(v1) v5 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) v5.AddArg(y) - v0.AddArg(v5) - v.AddArg(v0) + v0.AddArg2(v1, v5) + v.AddArg2(x, v0) return true } } @@ -7071,18 +6653,15 @@ func rewriteValueMIPS64_OpRsh64x64(v *Value) bool { x := v_0 y := v_1 v.reset(OpMIPS64SRAV) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpMIPS64OR, t) v1 := b.NewValue0(v.Pos, OpMIPS64NEGV, t) v2 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool) - v2.AddArg(y) v3 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) v3.AuxInt = 63 - v2.AddArg(v3) + v2.AddArg2(y, v3) v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - v.AddArg(v0) + v0.AddArg2(v1, y) + v.AddArg2(x, v0) return true } } @@ -7098,22 +6677,19 @@ func rewriteValueMIPS64_OpRsh64x8(v *Value) bool { x := v_0 y := v_1 v.reset(OpMIPS64SRAV) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpMIPS64OR, t) v1 := b.NewValue0(v.Pos, OpMIPS64NEGV, t) v2 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool) v3 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) v3.AddArg(y) - v2.AddArg(v3) v4 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) v4.AuxInt = 63 - v2.AddArg(v4) + v2.AddArg2(v3, v4) v1.AddArg(v2) - v0.AddArg(v1) v5 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) v5.AddArg(y) - v0.AddArg(v5) - v.AddArg(v0) + v0.AddArg2(v1, v5) + v.AddArg2(x, v0) return true } } @@ -7133,20 +6709,17 @@ func rewriteValueMIPS64_OpRsh8Ux16(v *Value) bool { v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool) v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) v2.AuxInt = 64 - v1.AddArg(v2) v3 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) v3.AddArg(y) - v1.AddArg(v3) + v1.AddArg2(v2, v3) v0.AddArg(v1) - v.AddArg(v0) v4 := b.NewValue0(v.Pos, OpMIPS64SRLV, t) v5 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) v5.AddArg(x) - v4.AddArg(v5) v6 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) v6.AddArg(y) - v4.AddArg(v6) - v.AddArg(v4) + v4.AddArg2(v5, v6) + v.AddArg2(v0, v4) return true } } @@ -7166,20 +6739,17 @@ func rewriteValueMIPS64_OpRsh8Ux32(v *Value) bool { v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool) v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) v2.AuxInt = 64 - v1.AddArg(v2) v3 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) v3.AddArg(y) - v1.AddArg(v3) + v1.AddArg2(v2, v3) v0.AddArg(v1) - v.AddArg(v0) v4 := b.NewValue0(v.Pos, OpMIPS64SRLV, t) v5 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) v5.AddArg(x) - v4.AddArg(v5) v6 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) v6.AddArg(y) - v4.AddArg(v6) - v.AddArg(v4) + v4.AddArg2(v5, v6) + v.AddArg2(v0, v4) return true } } @@ -7199,16 +6769,13 @@ func rewriteValueMIPS64_OpRsh8Ux64(v *Value) bool { v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool) v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) v2.AuxInt = 64 - v1.AddArg(v2) - v1.AddArg(y) + v1.AddArg2(v2, y) v0.AddArg(v1) - v.AddArg(v0) v3 := b.NewValue0(v.Pos, OpMIPS64SRLV, t) v4 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) v4.AddArg(x) - v3.AddArg(v4) - v3.AddArg(y) - v.AddArg(v3) + v3.AddArg2(v4, y) + v.AddArg2(v0, v3) return true } } @@ -7228,20 +6795,17 @@ func rewriteValueMIPS64_OpRsh8Ux8(v *Value) bool { v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool) v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) v2.AuxInt = 64 - v1.AddArg(v2) v3 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) v3.AddArg(y) - v1.AddArg(v3) + v1.AddArg2(v2, v3) v0.AddArg(v1) - v.AddArg(v0) v4 := b.NewValue0(v.Pos, OpMIPS64SRLV, t) v5 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) v5.AddArg(x) - v4.AddArg(v5) v6 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) v6.AddArg(y) - v4.AddArg(v6) - v.AddArg(v4) + v4.AddArg2(v5, v6) + v.AddArg2(v0, v4) return true } } @@ -7259,22 +6823,19 @@ func rewriteValueMIPS64_OpRsh8x16(v *Value) bool { v.reset(OpMIPS64SRAV) v0 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpMIPS64OR, t) v2 := b.NewValue0(v.Pos, OpMIPS64NEGV, t) v3 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool) v4 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) v4.AddArg(y) - v3.AddArg(v4) v5 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) v5.AuxInt = 63 - v3.AddArg(v5) + v3.AddArg2(v4, v5) v2.AddArg(v3) - v1.AddArg(v2) v6 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) v6.AddArg(y) - v1.AddArg(v6) - v.AddArg(v1) + v1.AddArg2(v2, v6) + v.AddArg2(v0, v1) return true } } @@ -7292,22 +6853,19 @@ func rewriteValueMIPS64_OpRsh8x32(v *Value) bool { v.reset(OpMIPS64SRAV) v0 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpMIPS64OR, t) v2 := b.NewValue0(v.Pos, OpMIPS64NEGV, t) v3 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool) v4 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) v4.AddArg(y) - v3.AddArg(v4) v5 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) v5.AuxInt = 63 - v3.AddArg(v5) + v3.AddArg2(v4, v5) v2.AddArg(v3) - v1.AddArg(v2) v6 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) v6.AddArg(y) - v1.AddArg(v6) - v.AddArg(v1) + v1.AddArg2(v2, v6) + v.AddArg2(v0, v1) return true } } @@ -7325,18 +6883,15 @@ func rewriteValueMIPS64_OpRsh8x64(v *Value) bool { v.reset(OpMIPS64SRAV) v0 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpMIPS64OR, t) v2 := b.NewValue0(v.Pos, OpMIPS64NEGV, t) v3 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool) - v3.AddArg(y) v4 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) v4.AuxInt = 63 - v3.AddArg(v4) + v3.AddArg2(y, v4) v2.AddArg(v3) - v1.AddArg(v2) - v1.AddArg(y) - v.AddArg(v1) + v1.AddArg2(v2, y) + v.AddArg2(v0, v1) return true } } @@ -7354,22 +6909,19 @@ func rewriteValueMIPS64_OpRsh8x8(v *Value) bool { v.reset(OpMIPS64SRAV) v0 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpMIPS64OR, t) v2 := b.NewValue0(v.Pos, OpMIPS64NEGV, t) v3 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool) v4 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) v4.AddArg(y) - v3.AddArg(v4) v5 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) v5.AuxInt = 63 - v3.AddArg(v5) + v3.AddArg2(v4, v5) v2.AddArg(v3) - v1.AddArg(v2) v6 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) v6.AddArg(y) - v1.AddArg(v6) - v.AddArg(v1) + v1.AddArg2(v2, v6) + v.AddArg2(v0, v1) return true } } @@ -7683,9 +7235,7 @@ func rewriteValueMIPS64_OpStore(v *Value) bool { break } v.reset(OpMIPS64MOVBstore) - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } // match: (Store {t} ptr val mem) @@ -7700,9 +7250,7 @@ func rewriteValueMIPS64_OpStore(v *Value) bool { break } v.reset(OpMIPS64MOVHstore) - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } // match: (Store {t} ptr val mem) @@ -7717,9 +7265,7 @@ func rewriteValueMIPS64_OpStore(v *Value) bool { break } v.reset(OpMIPS64MOVWstore) - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } // match: (Store {t} ptr val mem) @@ -7734,9 +7280,7 @@ func rewriteValueMIPS64_OpStore(v *Value) bool { break } v.reset(OpMIPS64MOVVstore) - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } // match: (Store {t} ptr val mem) @@ -7751,9 +7295,7 @@ func rewriteValueMIPS64_OpStore(v *Value) bool { break } v.reset(OpMIPS64MOVFstore) - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } // match: (Store {t} ptr val mem) @@ -7768,9 +7310,7 @@ func rewriteValueMIPS64_OpStore(v *Value) bool { break } v.reset(OpMIPS64MOVDstore) - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } return false @@ -7802,11 +7342,9 @@ func rewriteValueMIPS64_OpZero(v *Value) bool { ptr := v_0 mem := v_1 v.reset(OpMIPS64MOVBstore) - v.AddArg(ptr) v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) v0.AuxInt = 0 - v.AddArg(v0) - v.AddArg(mem) + v.AddArg3(ptr, v0, mem) return true } // match: (Zero [2] {t} ptr mem) @@ -7823,11 +7361,9 @@ func rewriteValueMIPS64_OpZero(v *Value) bool { break } v.reset(OpMIPS64MOVHstore) - v.AddArg(ptr) v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) v0.AuxInt = 0 - v.AddArg(v0) - v.AddArg(mem) + v.AddArg3(ptr, v0, mem) return true } // match: (Zero [2] ptr mem) @@ -7840,18 +7376,14 @@ func rewriteValueMIPS64_OpZero(v *Value) bool { mem := v_1 v.reset(OpMIPS64MOVBstore) v.AuxInt = 1 - v.AddArg(ptr) v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) v0.AuxInt = 0 - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpMIPS64MOVBstore, types.TypeMem) v1.AuxInt = 0 - v1.AddArg(ptr) v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) v2.AuxInt = 0 - v1.AddArg(v2) - v1.AddArg(mem) - v.AddArg(v1) + v1.AddArg3(ptr, v2, mem) + v.AddArg3(ptr, v0, v1) return true } // match: (Zero [4] {t} ptr mem) @@ -7868,11 +7400,9 @@ func rewriteValueMIPS64_OpZero(v *Value) bool { break } v.reset(OpMIPS64MOVWstore) - v.AddArg(ptr) v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) v0.AuxInt = 0 - v.AddArg(v0) - v.AddArg(mem) + v.AddArg3(ptr, v0, mem) return true } // match: (Zero [4] {t} ptr mem) @@ -7890,18 +7420,14 @@ func rewriteValueMIPS64_OpZero(v *Value) bool { } v.reset(OpMIPS64MOVHstore) v.AuxInt = 2 - v.AddArg(ptr) v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) v0.AuxInt = 0 - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpMIPS64MOVHstore, types.TypeMem) v1.AuxInt = 0 - v1.AddArg(ptr) v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) v2.AuxInt = 0 - v1.AddArg(v2) - v1.AddArg(mem) - v.AddArg(v1) + v1.AddArg3(ptr, v2, mem) + v.AddArg3(ptr, v0, v1) return true } // match: (Zero [4] ptr mem) @@ -7914,32 +7440,24 @@ func rewriteValueMIPS64_OpZero(v *Value) bool { mem := v_1 v.reset(OpMIPS64MOVBstore) v.AuxInt = 3 - v.AddArg(ptr) v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) v0.AuxInt = 0 - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpMIPS64MOVBstore, types.TypeMem) v1.AuxInt = 2 - v1.AddArg(ptr) v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) v2.AuxInt = 0 - v1.AddArg(v2) v3 := b.NewValue0(v.Pos, OpMIPS64MOVBstore, types.TypeMem) v3.AuxInt = 1 - v3.AddArg(ptr) v4 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) v4.AuxInt = 0 - v3.AddArg(v4) v5 := b.NewValue0(v.Pos, OpMIPS64MOVBstore, types.TypeMem) v5.AuxInt = 0 - v5.AddArg(ptr) v6 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) v6.AuxInt = 0 - v5.AddArg(v6) - v5.AddArg(mem) - v3.AddArg(v5) - v1.AddArg(v3) - v.AddArg(v1) + v5.AddArg3(ptr, v6, mem) + v3.AddArg3(ptr, v4, v5) + v1.AddArg3(ptr, v2, v3) + v.AddArg3(ptr, v0, v1) return true } // match: (Zero [8] {t} ptr mem) @@ -7956,11 +7474,9 @@ func rewriteValueMIPS64_OpZero(v *Value) bool { break } v.reset(OpMIPS64MOVVstore) - v.AddArg(ptr) v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) v0.AuxInt = 0 - v.AddArg(v0) - v.AddArg(mem) + v.AddArg3(ptr, v0, mem) return true } // match: (Zero [8] {t} ptr mem) @@ -7978,18 +7494,14 @@ func rewriteValueMIPS64_OpZero(v *Value) bool { } v.reset(OpMIPS64MOVWstore) v.AuxInt = 4 - v.AddArg(ptr) v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) v0.AuxInt = 0 - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpMIPS64MOVWstore, types.TypeMem) v1.AuxInt = 0 - v1.AddArg(ptr) v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) v2.AuxInt = 0 - v1.AddArg(v2) - v1.AddArg(mem) - v.AddArg(v1) + v1.AddArg3(ptr, v2, mem) + v.AddArg3(ptr, v0, v1) return true } // match: (Zero [8] {t} ptr mem) @@ -8007,32 +7519,24 @@ func rewriteValueMIPS64_OpZero(v *Value) bool { } v.reset(OpMIPS64MOVHstore) v.AuxInt = 6 - v.AddArg(ptr) v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) v0.AuxInt = 0 - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpMIPS64MOVHstore, types.TypeMem) v1.AuxInt = 4 - v1.AddArg(ptr) v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) v2.AuxInt = 0 - v1.AddArg(v2) v3 := b.NewValue0(v.Pos, OpMIPS64MOVHstore, types.TypeMem) v3.AuxInt = 2 - v3.AddArg(ptr) v4 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) v4.AuxInt = 0 - v3.AddArg(v4) v5 := b.NewValue0(v.Pos, OpMIPS64MOVHstore, types.TypeMem) v5.AuxInt = 0 - v5.AddArg(ptr) v6 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) v6.AuxInt = 0 - v5.AddArg(v6) - v5.AddArg(mem) - v3.AddArg(v5) - v1.AddArg(v3) - v.AddArg(v1) + v5.AddArg3(ptr, v6, mem) + v3.AddArg3(ptr, v4, v5) + v1.AddArg3(ptr, v2, v3) + v.AddArg3(ptr, v0, v1) return true } // match: (Zero [3] ptr mem) @@ -8045,25 +7549,19 @@ func rewriteValueMIPS64_OpZero(v *Value) bool { mem := v_1 v.reset(OpMIPS64MOVBstore) v.AuxInt = 2 - v.AddArg(ptr) v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) v0.AuxInt = 0 - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpMIPS64MOVBstore, types.TypeMem) v1.AuxInt = 1 - v1.AddArg(ptr) v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) v2.AuxInt = 0 - v1.AddArg(v2) v3 := b.NewValue0(v.Pos, OpMIPS64MOVBstore, types.TypeMem) v3.AuxInt = 0 - v3.AddArg(ptr) v4 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) v4.AuxInt = 0 - v3.AddArg(v4) - v3.AddArg(mem) - v1.AddArg(v3) - v.AddArg(v1) + v3.AddArg3(ptr, v4, mem) + v1.AddArg3(ptr, v2, v3) + v.AddArg3(ptr, v0, v1) return true } // match: (Zero [6] {t} ptr mem) @@ -8081,25 +7579,19 @@ func rewriteValueMIPS64_OpZero(v *Value) bool { } v.reset(OpMIPS64MOVHstore) v.AuxInt = 4 - v.AddArg(ptr) v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) v0.AuxInt = 0 - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpMIPS64MOVHstore, types.TypeMem) v1.AuxInt = 2 - v1.AddArg(ptr) v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) v2.AuxInt = 0 - v1.AddArg(v2) v3 := b.NewValue0(v.Pos, OpMIPS64MOVHstore, types.TypeMem) v3.AuxInt = 0 - v3.AddArg(ptr) v4 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) v4.AuxInt = 0 - v3.AddArg(v4) - v3.AddArg(mem) - v1.AddArg(v3) - v.AddArg(v1) + v3.AddArg3(ptr, v4, mem) + v1.AddArg3(ptr, v2, v3) + v.AddArg3(ptr, v0, v1) return true } // match: (Zero [12] {t} ptr mem) @@ -8117,25 +7609,19 @@ func rewriteValueMIPS64_OpZero(v *Value) bool { } v.reset(OpMIPS64MOVWstore) v.AuxInt = 8 - v.AddArg(ptr) v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) v0.AuxInt = 0 - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpMIPS64MOVWstore, types.TypeMem) v1.AuxInt = 4 - v1.AddArg(ptr) v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) v2.AuxInt = 0 - v1.AddArg(v2) v3 := b.NewValue0(v.Pos, OpMIPS64MOVWstore, types.TypeMem) v3.AuxInt = 0 - v3.AddArg(ptr) v4 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) v4.AuxInt = 0 - v3.AddArg(v4) - v3.AddArg(mem) - v1.AddArg(v3) - v.AddArg(v1) + v3.AddArg3(ptr, v4, mem) + v1.AddArg3(ptr, v2, v3) + v.AddArg3(ptr, v0, v1) return true } // match: (Zero [16] {t} ptr mem) @@ -8153,18 +7639,14 @@ func rewriteValueMIPS64_OpZero(v *Value) bool { } v.reset(OpMIPS64MOVVstore) v.AuxInt = 8 - v.AddArg(ptr) v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) v0.AuxInt = 0 - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpMIPS64MOVVstore, types.TypeMem) v1.AuxInt = 0 - v1.AddArg(ptr) v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) v2.AuxInt = 0 - v1.AddArg(v2) - v1.AddArg(mem) - v.AddArg(v1) + v1.AddArg3(ptr, v2, mem) + v.AddArg3(ptr, v0, v1) return true } // match: (Zero [24] {t} ptr mem) @@ -8182,25 +7664,19 @@ func rewriteValueMIPS64_OpZero(v *Value) bool { } v.reset(OpMIPS64MOVVstore) v.AuxInt = 16 - v.AddArg(ptr) v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) v0.AuxInt = 0 - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpMIPS64MOVVstore, types.TypeMem) v1.AuxInt = 8 - v1.AddArg(ptr) v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) v2.AuxInt = 0 - v1.AddArg(v2) v3 := b.NewValue0(v.Pos, OpMIPS64MOVVstore, types.TypeMem) v3.AuxInt = 0 - v3.AddArg(ptr) v4 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) v4.AuxInt = 0 - v3.AddArg(v4) - v3.AddArg(mem) - v1.AddArg(v3) - v.AddArg(v1) + v3.AddArg3(ptr, v4, mem) + v1.AddArg3(ptr, v2, v3) + v.AddArg3(ptr, v0, v1) return true } // match: (Zero [s] {t} ptr mem) @@ -8216,8 +7692,7 @@ func rewriteValueMIPS64_OpZero(v *Value) bool { } v.reset(OpMIPS64DUFFZERO) v.AuxInt = 8 * (128 - s/8) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (Zero [s] {t} ptr mem) @@ -8233,12 +7708,10 @@ func rewriteValueMIPS64_OpZero(v *Value) bool { } v.reset(OpMIPS64LoweredZero) v.AuxInt = t.(*types.Type).Alignment() - v.AddArg(ptr) v0 := b.NewValue0(v.Pos, OpMIPS64ADDVconst, ptr.Type) v0.AuxInt = s - moveSize(t.(*types.Type).Alignment(), config) v0.AddArg(ptr) - v.AddArg(v0) - v.AddArg(mem) + v.AddArg3(ptr, v0, mem) return true } return false diff --git a/src/cmd/compile/internal/ssa/rewritePPC64.go b/src/cmd/compile/internal/ssa/rewritePPC64.go index 53549dda74..c7b4f44920 100644 --- a/src/cmd/compile/internal/ssa/rewritePPC64.go +++ b/src/cmd/compile/internal/ssa/rewritePPC64.go @@ -810,10 +810,7 @@ func rewriteValuePPC64_OpAtomicCompareAndSwap32(v *Value) bool { mem := v_3 v.reset(OpPPC64LoweredAtomicCas32) v.AuxInt = 1 - v.AddArg(ptr) - v.AddArg(old) - v.AddArg(new_) - v.AddArg(mem) + v.AddArg4(ptr, old, new_, mem) return true } } @@ -831,10 +828,7 @@ func rewriteValuePPC64_OpAtomicCompareAndSwap64(v *Value) bool { mem := v_3 v.reset(OpPPC64LoweredAtomicCas64) v.AuxInt = 1 - v.AddArg(ptr) - v.AddArg(old) - v.AddArg(new_) - v.AddArg(mem) + v.AddArg4(ptr, old, new_, mem) return true } } @@ -852,10 +846,7 @@ func rewriteValuePPC64_OpAtomicCompareAndSwapRel32(v *Value) bool { mem := v_3 v.reset(OpPPC64LoweredAtomicCas32) v.AuxInt = 0 - v.AddArg(ptr) - v.AddArg(old) - v.AddArg(new_) - v.AddArg(mem) + v.AddArg4(ptr, old, new_, mem) return true } } @@ -869,8 +860,7 @@ func rewriteValuePPC64_OpAtomicLoad32(v *Value) bool { mem := v_1 v.reset(OpPPC64LoweredAtomicLoad32) v.AuxInt = 1 - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } } @@ -884,8 +874,7 @@ func rewriteValuePPC64_OpAtomicLoad64(v *Value) bool { mem := v_1 v.reset(OpPPC64LoweredAtomicLoad64) v.AuxInt = 1 - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } } @@ -899,8 +888,7 @@ func rewriteValuePPC64_OpAtomicLoad8(v *Value) bool { mem := v_1 v.reset(OpPPC64LoweredAtomicLoad8) v.AuxInt = 1 - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } } @@ -914,8 +902,7 @@ func rewriteValuePPC64_OpAtomicLoadAcq32(v *Value) bool { mem := v_1 v.reset(OpPPC64LoweredAtomicLoad32) v.AuxInt = 0 - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } } @@ -929,8 +916,7 @@ func rewriteValuePPC64_OpAtomicLoadPtr(v *Value) bool { mem := v_1 v.reset(OpPPC64LoweredAtomicLoadPtr) v.AuxInt = 1 - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } } @@ -946,9 +932,7 @@ func rewriteValuePPC64_OpAtomicStore32(v *Value) bool { mem := v_2 v.reset(OpPPC64LoweredAtomicStore32) v.AuxInt = 1 - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } } @@ -964,9 +948,7 @@ func rewriteValuePPC64_OpAtomicStore64(v *Value) bool { mem := v_2 v.reset(OpPPC64LoweredAtomicStore64) v.AuxInt = 1 - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } } @@ -982,9 +964,7 @@ func rewriteValuePPC64_OpAtomicStore8(v *Value) bool { mem := v_2 v.reset(OpPPC64LoweredAtomicStore8) v.AuxInt = 1 - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } } @@ -1000,9 +980,7 @@ func rewriteValuePPC64_OpAtomicStoreRel32(v *Value) bool { mem := v_2 v.reset(OpPPC64LoweredAtomicStore32) v.AuxInt = 0 - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } } @@ -1020,11 +998,9 @@ func rewriteValuePPC64_OpAvg64u(v *Value) bool { v0 := b.NewValue0(v.Pos, OpPPC64SRDconst, t) v0.AuxInt = 1 v1 := b.NewValue0(v.Pos, OpPPC64SUB, t) - v1.AddArg(x) - v1.AddArg(y) + v1.AddArg2(x, y) v0.AddArg(v1) - v.AddArg(v0) - v.AddArg(y) + v.AddArg2(v0, y) return true } } @@ -1039,10 +1015,9 @@ func rewriteValuePPC64_OpBitLen32(v *Value) bool { v.reset(OpPPC64SUB) v0 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64) v0.AuxInt = 32 - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpPPC64CNTLZW, typ.Int) v1.AddArg(x) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } } @@ -1057,10 +1032,9 @@ func rewriteValuePPC64_OpBitLen64(v *Value) bool { v.reset(OpPPC64SUB) v0 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64) v0.AuxInt = 64 - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpPPC64CNTLZD, typ.Int) v1.AddArg(x) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } } @@ -1071,8 +1045,7 @@ func rewriteValuePPC64_OpCom16(v *Value) bool { for { x := v_0 v.reset(OpPPC64NOR) - v.AddArg(x) - v.AddArg(x) + v.AddArg2(x, x) return true } } @@ -1083,8 +1056,7 @@ func rewriteValuePPC64_OpCom32(v *Value) bool { for { x := v_0 v.reset(OpPPC64NOR) - v.AddArg(x) - v.AddArg(x) + v.AddArg2(x, x) return true } } @@ -1095,8 +1067,7 @@ func rewriteValuePPC64_OpCom64(v *Value) bool { for { x := v_0 v.reset(OpPPC64NOR) - v.AddArg(x) - v.AddArg(x) + v.AddArg2(x, x) return true } } @@ -1107,8 +1078,7 @@ func rewriteValuePPC64_OpCom8(v *Value) bool { for { x := v_0 v.reset(OpPPC64NOR) - v.AddArg(x) - v.AddArg(x) + v.AddArg2(x, x) return true } } @@ -1129,9 +1099,7 @@ func rewriteValuePPC64_OpCondSelect(v *Value) bool { } v.reset(OpPPC64ISEL) v.AuxInt = 2 - v.AddArg(x) - v.AddArg(y) - v.AddArg(bool) + v.AddArg3(x, y, bool) return true } // match: (CondSelect x y bool) @@ -1146,12 +1114,10 @@ func rewriteValuePPC64_OpCondSelect(v *Value) bool { } v.reset(OpPPC64ISEL) v.AuxInt = 2 - v.AddArg(x) - v.AddArg(y) v0 := b.NewValue0(v.Pos, OpPPC64CMPWconst, types.TypeFlags) v0.AuxInt = 0 v0.AddArg(bool) - v.AddArg(v0) + v.AddArg3(x, y, v0) return true } return false @@ -1174,8 +1140,7 @@ func rewriteValuePPC64_OpCopysign(v *Value) bool { x := v_0 y := v_1 v.reset(OpPPC64FCPSGN) - v.AddArg(y) - v.AddArg(x) + v.AddArg2(y, x) return true } } @@ -1193,8 +1158,7 @@ func rewriteValuePPC64_OpCtz16(v *Value) bool { v2 := b.NewValue0(v.Pos, OpPPC64ADDconst, typ.Int16) v2.AuxInt = -1 v2.AddArg(x) - v1.AddArg(v2) - v1.AddArg(x) + v1.AddArg2(v2, x) v0.AddArg(v1) v.AddArg(v0) return true @@ -1218,8 +1182,7 @@ func rewriteValuePPC64_OpCtz32(v *Value) bool { v2 := b.NewValue0(v.Pos, OpPPC64ADDconst, typ.Int) v2.AuxInt = -1 v2.AddArg(x) - v1.AddArg(v2) - v1.AddArg(x) + v1.AddArg2(v2, x) v0.AddArg(v1) v.AddArg(v0) return true @@ -1252,8 +1215,7 @@ func rewriteValuePPC64_OpCtz64(v *Value) bool { v1 := b.NewValue0(v.Pos, OpPPC64ADDconst, typ.Int64) v1.AuxInt = -1 v1.AddArg(x) - v0.AddArg(v1) - v0.AddArg(x) + v0.AddArg2(v1, x) v.AddArg(v0) return true } @@ -1280,8 +1242,7 @@ func rewriteValuePPC64_OpCtz8(v *Value) bool { v2 := b.NewValue0(v.Pos, OpPPC64ADDconst, typ.UInt8) v2.AuxInt = -1 v2.AddArg(x) - v1.AddArg(v2) - v1.AddArg(x) + v1.AddArg2(v2, x) v0.AddArg(v1) v.AddArg(v0) return true @@ -1424,10 +1385,9 @@ func rewriteValuePPC64_OpDiv16(v *Value) bool { v.reset(OpPPC64DIVW) v0 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32) v1.AddArg(y) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } } @@ -1444,10 +1404,9 @@ func rewriteValuePPC64_OpDiv16u(v *Value) bool { v.reset(OpPPC64DIVWU) v0 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) v1.AddArg(y) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } } @@ -1464,10 +1423,9 @@ func rewriteValuePPC64_OpDiv8(v *Value) bool { v.reset(OpPPC64DIVW) v0 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32) v1.AddArg(y) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } } @@ -1484,10 +1442,9 @@ func rewriteValuePPC64_OpDiv8u(v *Value) bool { v.reset(OpPPC64DIVWU) v0 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) v1.AddArg(y) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } } @@ -1510,10 +1467,9 @@ func rewriteValuePPC64_OpEq16(v *Value) bool { v0 := b.NewValue0(v.Pos, OpPPC64CMPW, types.TypeFlags) v1 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32) v1.AddArg(x) - v0.AddArg(v1) v2 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32) v2.AddArg(y) - v0.AddArg(v2) + v0.AddArg2(v1, v2) v.AddArg(v0) return true } @@ -1528,10 +1484,9 @@ func rewriteValuePPC64_OpEq16(v *Value) bool { v0 := b.NewValue0(v.Pos, OpPPC64CMPW, types.TypeFlags) v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) v1.AddArg(x) - v0.AddArg(v1) v2 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) v2.AddArg(y) - v0.AddArg(v2) + v0.AddArg2(v1, v2) v.AddArg(v0) return true } @@ -1547,8 +1502,7 @@ func rewriteValuePPC64_OpEq32(v *Value) bool { y := v_1 v.reset(OpPPC64Equal) v0 := b.NewValue0(v.Pos, OpPPC64CMPW, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -1564,8 +1518,7 @@ func rewriteValuePPC64_OpEq32F(v *Value) bool { y := v_1 v.reset(OpPPC64Equal) v0 := b.NewValue0(v.Pos, OpPPC64FCMPU, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -1581,8 +1534,7 @@ func rewriteValuePPC64_OpEq64(v *Value) bool { y := v_1 v.reset(OpPPC64Equal) v0 := b.NewValue0(v.Pos, OpPPC64CMP, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -1598,8 +1550,7 @@ func rewriteValuePPC64_OpEq64F(v *Value) bool { y := v_1 v.reset(OpPPC64Equal) v0 := b.NewValue0(v.Pos, OpPPC64FCMPU, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -1623,10 +1574,9 @@ func rewriteValuePPC64_OpEq8(v *Value) bool { v0 := b.NewValue0(v.Pos, OpPPC64CMPW, types.TypeFlags) v1 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32) v1.AddArg(x) - v0.AddArg(v1) v2 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32) v2.AddArg(y) - v0.AddArg(v2) + v0.AddArg2(v1, v2) v.AddArg(v0) return true } @@ -1641,10 +1591,9 @@ func rewriteValuePPC64_OpEq8(v *Value) bool { v0 := b.NewValue0(v.Pos, OpPPC64CMPW, types.TypeFlags) v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) v1.AddArg(x) - v0.AddArg(v1) v2 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) v2.AddArg(y) - v0.AddArg(v2) + v0.AddArg2(v1, v2) v.AddArg(v0) return true } @@ -1662,8 +1611,7 @@ func rewriteValuePPC64_OpEqB(v *Value) bool { v.reset(OpPPC64ANDconst) v.AuxInt = 1 v0 := b.NewValue0(v.Pos, OpPPC64EQV, typ.Int64) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -1679,8 +1627,7 @@ func rewriteValuePPC64_OpEqPtr(v *Value) bool { y := v_1 v.reset(OpPPC64Equal) v0 := b.NewValue0(v.Pos, OpPPC64CMP, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -1696,8 +1643,7 @@ func rewriteValuePPC64_OpGeq32F(v *Value) bool { y := v_1 v.reset(OpPPC64FGreaterEqual) v0 := b.NewValue0(v.Pos, OpPPC64FCMPU, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -1713,8 +1659,7 @@ func rewriteValuePPC64_OpGeq64F(v *Value) bool { y := v_1 v.reset(OpPPC64FGreaterEqual) v0 := b.NewValue0(v.Pos, OpPPC64FCMPU, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -1730,8 +1675,7 @@ func rewriteValuePPC64_OpGreater32F(v *Value) bool { y := v_1 v.reset(OpPPC64FGreaterThan) v0 := b.NewValue0(v.Pos, OpPPC64FCMPU, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -1747,8 +1691,7 @@ func rewriteValuePPC64_OpGreater64F(v *Value) bool { y := v_1 v.reset(OpPPC64FGreaterThan) v0 := b.NewValue0(v.Pos, OpPPC64FCMPU, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -1764,8 +1707,7 @@ func rewriteValuePPC64_OpIsInBounds(v *Value) bool { len := v_1 v.reset(OpPPC64LessThan) v0 := b.NewValue0(v.Pos, OpPPC64CMPU, types.TypeFlags) - v0.AddArg(idx) - v0.AddArg(len) + v0.AddArg2(idx, len) v.AddArg(v0) return true } @@ -1796,8 +1738,7 @@ func rewriteValuePPC64_OpIsSliceInBounds(v *Value) bool { len := v_1 v.reset(OpPPC64LessEqual) v0 := b.NewValue0(v.Pos, OpPPC64CMPU, types.TypeFlags) - v0.AddArg(idx) - v0.AddArg(len) + v0.AddArg2(idx, len) v.AddArg(v0) return true } @@ -1816,10 +1757,9 @@ func rewriteValuePPC64_OpLeq16(v *Value) bool { v0 := b.NewValue0(v.Pos, OpPPC64CMPW, types.TypeFlags) v1 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32) v1.AddArg(x) - v0.AddArg(v1) v2 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32) v2.AddArg(y) - v0.AddArg(v2) + v0.AddArg2(v1, v2) v.AddArg(v0) return true } @@ -1838,10 +1778,9 @@ func rewriteValuePPC64_OpLeq16U(v *Value) bool { v0 := b.NewValue0(v.Pos, OpPPC64CMPWU, types.TypeFlags) v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) v1.AddArg(x) - v0.AddArg(v1) v2 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) v2.AddArg(y) - v0.AddArg(v2) + v0.AddArg2(v1, v2) v.AddArg(v0) return true } @@ -1857,8 +1796,7 @@ func rewriteValuePPC64_OpLeq32(v *Value) bool { y := v_1 v.reset(OpPPC64LessEqual) v0 := b.NewValue0(v.Pos, OpPPC64CMPW, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -1874,8 +1812,7 @@ func rewriteValuePPC64_OpLeq32F(v *Value) bool { y := v_1 v.reset(OpPPC64FLessEqual) v0 := b.NewValue0(v.Pos, OpPPC64FCMPU, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -1891,8 +1828,7 @@ func rewriteValuePPC64_OpLeq32U(v *Value) bool { y := v_1 v.reset(OpPPC64LessEqual) v0 := b.NewValue0(v.Pos, OpPPC64CMPWU, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -1908,8 +1844,7 @@ func rewriteValuePPC64_OpLeq64(v *Value) bool { y := v_1 v.reset(OpPPC64LessEqual) v0 := b.NewValue0(v.Pos, OpPPC64CMP, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -1925,8 +1860,7 @@ func rewriteValuePPC64_OpLeq64F(v *Value) bool { y := v_1 v.reset(OpPPC64FLessEqual) v0 := b.NewValue0(v.Pos, OpPPC64FCMPU, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -1942,8 +1876,7 @@ func rewriteValuePPC64_OpLeq64U(v *Value) bool { y := v_1 v.reset(OpPPC64LessEqual) v0 := b.NewValue0(v.Pos, OpPPC64CMPU, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -1962,10 +1895,9 @@ func rewriteValuePPC64_OpLeq8(v *Value) bool { v0 := b.NewValue0(v.Pos, OpPPC64CMPW, types.TypeFlags) v1 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32) v1.AddArg(x) - v0.AddArg(v1) v2 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32) v2.AddArg(y) - v0.AddArg(v2) + v0.AddArg2(v1, v2) v.AddArg(v0) return true } @@ -1984,10 +1916,9 @@ func rewriteValuePPC64_OpLeq8U(v *Value) bool { v0 := b.NewValue0(v.Pos, OpPPC64CMPWU, types.TypeFlags) v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) v1.AddArg(x) - v0.AddArg(v1) v2 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) v2.AddArg(y) - v0.AddArg(v2) + v0.AddArg2(v1, v2) v.AddArg(v0) return true } @@ -2006,10 +1937,9 @@ func rewriteValuePPC64_OpLess16(v *Value) bool { v0 := b.NewValue0(v.Pos, OpPPC64CMPW, types.TypeFlags) v1 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32) v1.AddArg(x) - v0.AddArg(v1) v2 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32) v2.AddArg(y) - v0.AddArg(v2) + v0.AddArg2(v1, v2) v.AddArg(v0) return true } @@ -2028,10 +1958,9 @@ func rewriteValuePPC64_OpLess16U(v *Value) bool { v0 := b.NewValue0(v.Pos, OpPPC64CMPWU, types.TypeFlags) v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) v1.AddArg(x) - v0.AddArg(v1) v2 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) v2.AddArg(y) - v0.AddArg(v2) + v0.AddArg2(v1, v2) v.AddArg(v0) return true } @@ -2047,8 +1976,7 @@ func rewriteValuePPC64_OpLess32(v *Value) bool { y := v_1 v.reset(OpPPC64LessThan) v0 := b.NewValue0(v.Pos, OpPPC64CMPW, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -2064,8 +1992,7 @@ func rewriteValuePPC64_OpLess32F(v *Value) bool { y := v_1 v.reset(OpPPC64FLessThan) v0 := b.NewValue0(v.Pos, OpPPC64FCMPU, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -2081,8 +2008,7 @@ func rewriteValuePPC64_OpLess32U(v *Value) bool { y := v_1 v.reset(OpPPC64LessThan) v0 := b.NewValue0(v.Pos, OpPPC64CMPWU, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -2098,8 +2024,7 @@ func rewriteValuePPC64_OpLess64(v *Value) bool { y := v_1 v.reset(OpPPC64LessThan) v0 := b.NewValue0(v.Pos, OpPPC64CMP, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -2115,8 +2040,7 @@ func rewriteValuePPC64_OpLess64F(v *Value) bool { y := v_1 v.reset(OpPPC64FLessThan) v0 := b.NewValue0(v.Pos, OpPPC64FCMPU, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -2132,8 +2056,7 @@ func rewriteValuePPC64_OpLess64U(v *Value) bool { y := v_1 v.reset(OpPPC64LessThan) v0 := b.NewValue0(v.Pos, OpPPC64CMPU, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -2152,10 +2075,9 @@ func rewriteValuePPC64_OpLess8(v *Value) bool { v0 := b.NewValue0(v.Pos, OpPPC64CMPW, types.TypeFlags) v1 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32) v1.AddArg(x) - v0.AddArg(v1) v2 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32) v2.AddArg(y) - v0.AddArg(v2) + v0.AddArg2(v1, v2) v.AddArg(v0) return true } @@ -2174,10 +2096,9 @@ func rewriteValuePPC64_OpLess8U(v *Value) bool { v0 := b.NewValue0(v.Pos, OpPPC64CMPWU, types.TypeFlags) v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) v1.AddArg(x) - v0.AddArg(v1) v2 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) v2.AddArg(y) - v0.AddArg(v2) + v0.AddArg2(v1, v2) v.AddArg(v0) return true } @@ -2198,8 +2119,7 @@ func rewriteValuePPC64_OpLoad(v *Value) bool { break } v.reset(OpPPC64MOVDload) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (Load ptr mem) @@ -2213,8 +2133,7 @@ func rewriteValuePPC64_OpLoad(v *Value) bool { break } v.reset(OpPPC64MOVWload) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (Load ptr mem) @@ -2228,8 +2147,7 @@ func rewriteValuePPC64_OpLoad(v *Value) bool { break } v.reset(OpPPC64MOVWZload) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (Load ptr mem) @@ -2243,8 +2161,7 @@ func rewriteValuePPC64_OpLoad(v *Value) bool { break } v.reset(OpPPC64MOVHload) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (Load ptr mem) @@ -2258,8 +2175,7 @@ func rewriteValuePPC64_OpLoad(v *Value) bool { break } v.reset(OpPPC64MOVHZload) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (Load ptr mem) @@ -2273,8 +2189,7 @@ func rewriteValuePPC64_OpLoad(v *Value) bool { break } v.reset(OpPPC64MOVBZload) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (Load ptr mem) @@ -2289,8 +2204,7 @@ func rewriteValuePPC64_OpLoad(v *Value) bool { } v.reset(OpPPC64MOVBreg) v0 := b.NewValue0(v.Pos, OpPPC64MOVBZload, typ.UInt8) - v0.AddArg(ptr) - v0.AddArg(mem) + v0.AddArg2(ptr, mem) v.AddArg(v0) return true } @@ -2305,8 +2219,7 @@ func rewriteValuePPC64_OpLoad(v *Value) bool { break } v.reset(OpPPC64MOVBZload) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (Load ptr mem) @@ -2320,8 +2233,7 @@ func rewriteValuePPC64_OpLoad(v *Value) bool { break } v.reset(OpPPC64FMOVSload) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (Load ptr mem) @@ -2335,8 +2247,7 @@ func rewriteValuePPC64_OpLoad(v *Value) bool { break } v.reset(OpPPC64FMOVDload) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } return false @@ -2369,8 +2280,7 @@ func rewriteValuePPC64_OpLsh16x16(v *Value) bool { break } v.reset(OpPPC64SLW) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (Lsh16x16 x y) @@ -2379,9 +2289,7 @@ func rewriteValuePPC64_OpLsh16x16(v *Value) bool { x := v_0 y := v_1 v.reset(OpPPC64SLW) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpPPC64ORN, typ.Int64) - v0.AddArg(y) v1 := b.NewValue0(v.Pos, OpPPC64MaskIfNotCarry, typ.Int64) v2 := b.NewValue0(v.Pos, OpPPC64ADDconstForCarry, types.TypeFlags) v2.AuxInt = -16 @@ -2389,8 +2297,8 @@ func rewriteValuePPC64_OpLsh16x16(v *Value) bool { v3.AddArg(y) v2.AddArg(v3) v1.AddArg(v2) - v0.AddArg(v1) - v.AddArg(v0) + v0.AddArg2(y, v1) + v.AddArg2(x, v0) return true } } @@ -2443,8 +2351,7 @@ func rewriteValuePPC64_OpLsh16x32(v *Value) bool { break } v.reset(OpPPC64SLW) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (Lsh16x32 x y) @@ -2453,9 +2360,7 @@ func rewriteValuePPC64_OpLsh16x32(v *Value) bool { x := v_0 y := v_1 v.reset(OpPPC64SLW) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpPPC64ORN, typ.Int64) - v0.AddArg(y) v1 := b.NewValue0(v.Pos, OpPPC64MaskIfNotCarry, typ.Int64) v2 := b.NewValue0(v.Pos, OpPPC64ADDconstForCarry, types.TypeFlags) v2.AuxInt = -16 @@ -2463,8 +2368,8 @@ func rewriteValuePPC64_OpLsh16x32(v *Value) bool { v3.AddArg(y) v2.AddArg(v3) v1.AddArg(v2) - v0.AddArg(v1) - v.AddArg(v0) + v0.AddArg2(y, v1) + v.AddArg2(x, v0) return true } } @@ -2532,8 +2437,7 @@ func rewriteValuePPC64_OpLsh16x64(v *Value) bool { break } v.reset(OpPPC64SLW) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (Lsh16x64 x y) @@ -2542,16 +2446,14 @@ func rewriteValuePPC64_OpLsh16x64(v *Value) bool { x := v_0 y := v_1 v.reset(OpPPC64SLW) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpPPC64ORN, typ.Int64) - v0.AddArg(y) v1 := b.NewValue0(v.Pos, OpPPC64MaskIfNotCarry, typ.Int64) v2 := b.NewValue0(v.Pos, OpPPC64ADDconstForCarry, types.TypeFlags) v2.AuxInt = -16 v2.AddArg(y) v1.AddArg(v2) - v0.AddArg(v1) - v.AddArg(v0) + v0.AddArg2(y, v1) + v.AddArg2(x, v0) return true } } @@ -2570,8 +2472,7 @@ func rewriteValuePPC64_OpLsh16x8(v *Value) bool { break } v.reset(OpPPC64SLW) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (Lsh16x8 x y) @@ -2580,9 +2481,7 @@ func rewriteValuePPC64_OpLsh16x8(v *Value) bool { x := v_0 y := v_1 v.reset(OpPPC64SLW) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpPPC64ORN, typ.Int64) - v0.AddArg(y) v1 := b.NewValue0(v.Pos, OpPPC64MaskIfNotCarry, typ.Int64) v2 := b.NewValue0(v.Pos, OpPPC64ADDconstForCarry, types.TypeFlags) v2.AuxInt = -16 @@ -2590,8 +2489,8 @@ func rewriteValuePPC64_OpLsh16x8(v *Value) bool { v3.AddArg(y) v2.AddArg(v3) v1.AddArg(v2) - v0.AddArg(v1) - v.AddArg(v0) + v0.AddArg2(y, v1) + v.AddArg2(x, v0) return true } } @@ -2610,8 +2509,7 @@ func rewriteValuePPC64_OpLsh32x16(v *Value) bool { break } v.reset(OpPPC64SLW) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (Lsh32x16 x y) @@ -2620,9 +2518,7 @@ func rewriteValuePPC64_OpLsh32x16(v *Value) bool { x := v_0 y := v_1 v.reset(OpPPC64SLW) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpPPC64ORN, typ.Int64) - v0.AddArg(y) v1 := b.NewValue0(v.Pos, OpPPC64MaskIfNotCarry, typ.Int64) v2 := b.NewValue0(v.Pos, OpPPC64ADDconstForCarry, types.TypeFlags) v2.AuxInt = -32 @@ -2630,8 +2526,8 @@ func rewriteValuePPC64_OpLsh32x16(v *Value) bool { v3.AddArg(y) v2.AddArg(v3) v1.AddArg(v2) - v0.AddArg(v1) - v.AddArg(v0) + v0.AddArg2(y, v1) + v.AddArg2(x, v0) return true } } @@ -2684,8 +2580,7 @@ func rewriteValuePPC64_OpLsh32x32(v *Value) bool { break } v.reset(OpPPC64SLW) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (Lsh32x32 x y) @@ -2694,9 +2589,7 @@ func rewriteValuePPC64_OpLsh32x32(v *Value) bool { x := v_0 y := v_1 v.reset(OpPPC64SLW) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpPPC64ORN, typ.Int64) - v0.AddArg(y) v1 := b.NewValue0(v.Pos, OpPPC64MaskIfNotCarry, typ.Int64) v2 := b.NewValue0(v.Pos, OpPPC64ADDconstForCarry, types.TypeFlags) v2.AuxInt = -32 @@ -2704,8 +2597,8 @@ func rewriteValuePPC64_OpLsh32x32(v *Value) bool { v3.AddArg(y) v2.AddArg(v3) v1.AddArg(v2) - v0.AddArg(v1) - v.AddArg(v0) + v0.AddArg2(y, v1) + v.AddArg2(x, v0) return true } } @@ -2773,8 +2666,7 @@ func rewriteValuePPC64_OpLsh32x64(v *Value) bool { break } v.reset(OpPPC64SLW) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (Lsh32x64 x (AND y (MOVDconst [31]))) @@ -2793,11 +2685,10 @@ func rewriteValuePPC64_OpLsh32x64(v *Value) bool { continue } v.reset(OpPPC64SLW) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpPPC64ANDconst, typ.Int32) v0.AuxInt = 31 v0.AddArg(y) - v.AddArg(v0) + v.AddArg2(x, v0) return true } break @@ -2811,11 +2702,10 @@ func rewriteValuePPC64_OpLsh32x64(v *Value) bool { } y := v_1.Args[0] v.reset(OpPPC64SLW) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpPPC64ANDconst, typ.Int32) v0.AuxInt = 31 v0.AddArg(y) - v.AddArg(v0) + v.AddArg2(x, v0) return true } // match: (Lsh32x64 x y) @@ -2824,16 +2714,14 @@ func rewriteValuePPC64_OpLsh32x64(v *Value) bool { x := v_0 y := v_1 v.reset(OpPPC64SLW) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpPPC64ORN, typ.Int64) - v0.AddArg(y) v1 := b.NewValue0(v.Pos, OpPPC64MaskIfNotCarry, typ.Int64) v2 := b.NewValue0(v.Pos, OpPPC64ADDconstForCarry, types.TypeFlags) v2.AuxInt = -32 v2.AddArg(y) v1.AddArg(v2) - v0.AddArg(v1) - v.AddArg(v0) + v0.AddArg2(y, v1) + v.AddArg2(x, v0) return true } } @@ -2852,8 +2740,7 @@ func rewriteValuePPC64_OpLsh32x8(v *Value) bool { break } v.reset(OpPPC64SLW) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (Lsh32x8 x y) @@ -2862,9 +2749,7 @@ func rewriteValuePPC64_OpLsh32x8(v *Value) bool { x := v_0 y := v_1 v.reset(OpPPC64SLW) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpPPC64ORN, typ.Int64) - v0.AddArg(y) v1 := b.NewValue0(v.Pos, OpPPC64MaskIfNotCarry, typ.Int64) v2 := b.NewValue0(v.Pos, OpPPC64ADDconstForCarry, types.TypeFlags) v2.AuxInt = -32 @@ -2872,8 +2757,8 @@ func rewriteValuePPC64_OpLsh32x8(v *Value) bool { v3.AddArg(y) v2.AddArg(v3) v1.AddArg(v2) - v0.AddArg(v1) - v.AddArg(v0) + v0.AddArg2(y, v1) + v.AddArg2(x, v0) return true } } @@ -2892,8 +2777,7 @@ func rewriteValuePPC64_OpLsh64x16(v *Value) bool { break } v.reset(OpPPC64SLD) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (Lsh64x16 x y) @@ -2902,9 +2786,7 @@ func rewriteValuePPC64_OpLsh64x16(v *Value) bool { x := v_0 y := v_1 v.reset(OpPPC64SLD) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpPPC64ORN, typ.Int64) - v0.AddArg(y) v1 := b.NewValue0(v.Pos, OpPPC64MaskIfNotCarry, typ.Int64) v2 := b.NewValue0(v.Pos, OpPPC64ADDconstForCarry, types.TypeFlags) v2.AuxInt = -64 @@ -2912,8 +2794,8 @@ func rewriteValuePPC64_OpLsh64x16(v *Value) bool { v3.AddArg(y) v2.AddArg(v3) v1.AddArg(v2) - v0.AddArg(v1) - v.AddArg(v0) + v0.AddArg2(y, v1) + v.AddArg2(x, v0) return true } } @@ -2966,8 +2848,7 @@ func rewriteValuePPC64_OpLsh64x32(v *Value) bool { break } v.reset(OpPPC64SLD) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (Lsh64x32 x y) @@ -2976,9 +2857,7 @@ func rewriteValuePPC64_OpLsh64x32(v *Value) bool { x := v_0 y := v_1 v.reset(OpPPC64SLD) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpPPC64ORN, typ.Int64) - v0.AddArg(y) v1 := b.NewValue0(v.Pos, OpPPC64MaskIfNotCarry, typ.Int64) v2 := b.NewValue0(v.Pos, OpPPC64ADDconstForCarry, types.TypeFlags) v2.AuxInt = -64 @@ -2986,8 +2865,8 @@ func rewriteValuePPC64_OpLsh64x32(v *Value) bool { v3.AddArg(y) v2.AddArg(v3) v1.AddArg(v2) - v0.AddArg(v1) - v.AddArg(v0) + v0.AddArg2(y, v1) + v.AddArg2(x, v0) return true } } @@ -3055,8 +2934,7 @@ func rewriteValuePPC64_OpLsh64x64(v *Value) bool { break } v.reset(OpPPC64SLD) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (Lsh64x64 x (AND y (MOVDconst [63]))) @@ -3075,11 +2953,10 @@ func rewriteValuePPC64_OpLsh64x64(v *Value) bool { continue } v.reset(OpPPC64SLD) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpPPC64ANDconst, typ.Int64) v0.AuxInt = 63 v0.AddArg(y) - v.AddArg(v0) + v.AddArg2(x, v0) return true } break @@ -3093,11 +2970,10 @@ func rewriteValuePPC64_OpLsh64x64(v *Value) bool { } y := v_1.Args[0] v.reset(OpPPC64SLD) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpPPC64ANDconst, typ.Int64) v0.AuxInt = 63 v0.AddArg(y) - v.AddArg(v0) + v.AddArg2(x, v0) return true } // match: (Lsh64x64 x y) @@ -3106,16 +2982,14 @@ func rewriteValuePPC64_OpLsh64x64(v *Value) bool { x := v_0 y := v_1 v.reset(OpPPC64SLD) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpPPC64ORN, typ.Int64) - v0.AddArg(y) v1 := b.NewValue0(v.Pos, OpPPC64MaskIfNotCarry, typ.Int64) v2 := b.NewValue0(v.Pos, OpPPC64ADDconstForCarry, types.TypeFlags) v2.AuxInt = -64 v2.AddArg(y) v1.AddArg(v2) - v0.AddArg(v1) - v.AddArg(v0) + v0.AddArg2(y, v1) + v.AddArg2(x, v0) return true } } @@ -3134,8 +3008,7 @@ func rewriteValuePPC64_OpLsh64x8(v *Value) bool { break } v.reset(OpPPC64SLD) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (Lsh64x8 x y) @@ -3144,9 +3017,7 @@ func rewriteValuePPC64_OpLsh64x8(v *Value) bool { x := v_0 y := v_1 v.reset(OpPPC64SLD) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpPPC64ORN, typ.Int64) - v0.AddArg(y) v1 := b.NewValue0(v.Pos, OpPPC64MaskIfNotCarry, typ.Int64) v2 := b.NewValue0(v.Pos, OpPPC64ADDconstForCarry, types.TypeFlags) v2.AuxInt = -64 @@ -3154,8 +3025,8 @@ func rewriteValuePPC64_OpLsh64x8(v *Value) bool { v3.AddArg(y) v2.AddArg(v3) v1.AddArg(v2) - v0.AddArg(v1) - v.AddArg(v0) + v0.AddArg2(y, v1) + v.AddArg2(x, v0) return true } } @@ -3174,8 +3045,7 @@ func rewriteValuePPC64_OpLsh8x16(v *Value) bool { break } v.reset(OpPPC64SLW) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (Lsh8x16 x y) @@ -3184,9 +3054,7 @@ func rewriteValuePPC64_OpLsh8x16(v *Value) bool { x := v_0 y := v_1 v.reset(OpPPC64SLW) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpPPC64ORN, typ.Int64) - v0.AddArg(y) v1 := b.NewValue0(v.Pos, OpPPC64MaskIfNotCarry, typ.Int64) v2 := b.NewValue0(v.Pos, OpPPC64ADDconstForCarry, types.TypeFlags) v2.AuxInt = -8 @@ -3194,8 +3062,8 @@ func rewriteValuePPC64_OpLsh8x16(v *Value) bool { v3.AddArg(y) v2.AddArg(v3) v1.AddArg(v2) - v0.AddArg(v1) - v.AddArg(v0) + v0.AddArg2(y, v1) + v.AddArg2(x, v0) return true } } @@ -3248,8 +3116,7 @@ func rewriteValuePPC64_OpLsh8x32(v *Value) bool { break } v.reset(OpPPC64SLW) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (Lsh8x32 x y) @@ -3258,9 +3125,7 @@ func rewriteValuePPC64_OpLsh8x32(v *Value) bool { x := v_0 y := v_1 v.reset(OpPPC64SLW) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpPPC64ORN, typ.Int64) - v0.AddArg(y) v1 := b.NewValue0(v.Pos, OpPPC64MaskIfNotCarry, typ.Int64) v2 := b.NewValue0(v.Pos, OpPPC64ADDconstForCarry, types.TypeFlags) v2.AuxInt = -8 @@ -3268,8 +3133,8 @@ func rewriteValuePPC64_OpLsh8x32(v *Value) bool { v3.AddArg(y) v2.AddArg(v3) v1.AddArg(v2) - v0.AddArg(v1) - v.AddArg(v0) + v0.AddArg2(y, v1) + v.AddArg2(x, v0) return true } } @@ -3337,8 +3202,7 @@ func rewriteValuePPC64_OpLsh8x64(v *Value) bool { break } v.reset(OpPPC64SLW) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (Lsh8x64 x y) @@ -3347,16 +3211,14 @@ func rewriteValuePPC64_OpLsh8x64(v *Value) bool { x := v_0 y := v_1 v.reset(OpPPC64SLW) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpPPC64ORN, typ.Int64) - v0.AddArg(y) v1 := b.NewValue0(v.Pos, OpPPC64MaskIfNotCarry, typ.Int64) v2 := b.NewValue0(v.Pos, OpPPC64ADDconstForCarry, types.TypeFlags) v2.AuxInt = -8 v2.AddArg(y) v1.AddArg(v2) - v0.AddArg(v1) - v.AddArg(v0) + v0.AddArg2(y, v1) + v.AddArg2(x, v0) return true } } @@ -3375,8 +3237,7 @@ func rewriteValuePPC64_OpLsh8x8(v *Value) bool { break } v.reset(OpPPC64SLW) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (Lsh8x8 x y) @@ -3385,9 +3246,7 @@ func rewriteValuePPC64_OpLsh8x8(v *Value) bool { x := v_0 y := v_1 v.reset(OpPPC64SLW) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpPPC64ORN, typ.Int64) - v0.AddArg(y) v1 := b.NewValue0(v.Pos, OpPPC64MaskIfNotCarry, typ.Int64) v2 := b.NewValue0(v.Pos, OpPPC64ADDconstForCarry, types.TypeFlags) v2.AuxInt = -8 @@ -3395,8 +3254,8 @@ func rewriteValuePPC64_OpLsh8x8(v *Value) bool { v3.AddArg(y) v2.AddArg(v3) v1.AddArg(v2) - v0.AddArg(v1) - v.AddArg(v0) + v0.AddArg2(y, v1) + v.AddArg2(x, v0) return true } } @@ -3413,10 +3272,9 @@ func rewriteValuePPC64_OpMod16(v *Value) bool { v.reset(OpMod32) v0 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32) v1.AddArg(y) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } } @@ -3433,10 +3291,9 @@ func rewriteValuePPC64_OpMod16u(v *Value) bool { v.reset(OpMod32u) v0 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) v1.AddArg(y) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } } @@ -3451,14 +3308,11 @@ func rewriteValuePPC64_OpMod32(v *Value) bool { x := v_0 y := v_1 v.reset(OpPPC64SUB) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpPPC64MULLW, typ.Int32) - v0.AddArg(y) v1 := b.NewValue0(v.Pos, OpPPC64DIVW, typ.Int32) - v1.AddArg(x) - v1.AddArg(y) - v0.AddArg(v1) - v.AddArg(v0) + v1.AddArg2(x, y) + v0.AddArg2(y, v1) + v.AddArg2(x, v0) return true } } @@ -3473,14 +3327,11 @@ func rewriteValuePPC64_OpMod32u(v *Value) bool { x := v_0 y := v_1 v.reset(OpPPC64SUB) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpPPC64MULLW, typ.Int32) - v0.AddArg(y) v1 := b.NewValue0(v.Pos, OpPPC64DIVWU, typ.Int32) - v1.AddArg(x) - v1.AddArg(y) - v0.AddArg(v1) - v.AddArg(v0) + v1.AddArg2(x, y) + v0.AddArg2(y, v1) + v.AddArg2(x, v0) return true } } @@ -3495,14 +3346,11 @@ func rewriteValuePPC64_OpMod64(v *Value) bool { x := v_0 y := v_1 v.reset(OpPPC64SUB) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpPPC64MULLD, typ.Int64) - v0.AddArg(y) v1 := b.NewValue0(v.Pos, OpPPC64DIVD, typ.Int64) - v1.AddArg(x) - v1.AddArg(y) - v0.AddArg(v1) - v.AddArg(v0) + v1.AddArg2(x, y) + v0.AddArg2(y, v1) + v.AddArg2(x, v0) return true } } @@ -3517,14 +3365,11 @@ func rewriteValuePPC64_OpMod64u(v *Value) bool { x := v_0 y := v_1 v.reset(OpPPC64SUB) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpPPC64MULLD, typ.Int64) - v0.AddArg(y) v1 := b.NewValue0(v.Pos, OpPPC64DIVDU, typ.Int64) - v1.AddArg(x) - v1.AddArg(y) - v0.AddArg(v1) - v.AddArg(v0) + v1.AddArg2(x, y) + v0.AddArg2(y, v1) + v.AddArg2(x, v0) return true } } @@ -3541,10 +3386,9 @@ func rewriteValuePPC64_OpMod8(v *Value) bool { v.reset(OpMod32) v0 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32) v1.AddArg(y) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } } @@ -3561,10 +3405,9 @@ func rewriteValuePPC64_OpMod8u(v *Value) bool { v.reset(OpMod32u) v0 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) v1.AddArg(y) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } } @@ -3596,12 +3439,9 @@ func rewriteValuePPC64_OpMove(v *Value) bool { src := v_1 mem := v_2 v.reset(OpPPC64MOVBstore) - v.AddArg(dst) v0 := b.NewValue0(v.Pos, OpPPC64MOVBZload, typ.UInt8) - v0.AddArg(src) - v0.AddArg(mem) - v.AddArg(v0) - v.AddArg(mem) + v0.AddArg2(src, mem) + v.AddArg3(dst, v0, mem) return true } // match: (Move [2] dst src mem) @@ -3614,12 +3454,9 @@ func rewriteValuePPC64_OpMove(v *Value) bool { src := v_1 mem := v_2 v.reset(OpPPC64MOVHstore) - v.AddArg(dst) v0 := b.NewValue0(v.Pos, OpPPC64MOVHZload, typ.UInt16) - v0.AddArg(src) - v0.AddArg(mem) - v.AddArg(v0) - v.AddArg(mem) + v0.AddArg2(src, mem) + v.AddArg3(dst, v0, mem) return true } // match: (Move [4] dst src mem) @@ -3632,12 +3469,9 @@ func rewriteValuePPC64_OpMove(v *Value) bool { src := v_1 mem := v_2 v.reset(OpPPC64MOVWstore) - v.AddArg(dst) v0 := b.NewValue0(v.Pos, OpPPC64MOVWZload, typ.UInt32) - v0.AddArg(src) - v0.AddArg(mem) - v.AddArg(v0) - v.AddArg(mem) + v0.AddArg2(src, mem) + v.AddArg3(dst, v0, mem) return true } // match: (Move [8] {t} dst src mem) @@ -3655,12 +3489,9 @@ func rewriteValuePPC64_OpMove(v *Value) bool { break } v.reset(OpPPC64MOVDstore) - v.AddArg(dst) v0 := b.NewValue0(v.Pos, OpPPC64MOVDload, typ.Int64) - v0.AddArg(src) - v0.AddArg(mem) - v.AddArg(v0) - v.AddArg(mem) + v0.AddArg2(src, mem) + v.AddArg3(dst, v0, mem) return true } // match: (Move [8] dst src mem) @@ -3674,20 +3505,14 @@ func rewriteValuePPC64_OpMove(v *Value) bool { mem := v_2 v.reset(OpPPC64MOVWstore) v.AuxInt = 4 - v.AddArg(dst) v0 := b.NewValue0(v.Pos, OpPPC64MOVWZload, typ.UInt32) v0.AuxInt = 4 - v0.AddArg(src) - v0.AddArg(mem) - v.AddArg(v0) + v0.AddArg2(src, mem) v1 := b.NewValue0(v.Pos, OpPPC64MOVWstore, types.TypeMem) - v1.AddArg(dst) v2 := b.NewValue0(v.Pos, OpPPC64MOVWZload, typ.UInt32) - v2.AddArg(src) - v2.AddArg(mem) - v1.AddArg(v2) - v1.AddArg(mem) - v.AddArg(v1) + v2.AddArg2(src, mem) + v1.AddArg3(dst, v2, mem) + v.AddArg3(dst, v0, v1) return true } // match: (Move [3] dst src mem) @@ -3701,20 +3526,14 @@ func rewriteValuePPC64_OpMove(v *Value) bool { mem := v_2 v.reset(OpPPC64MOVBstore) v.AuxInt = 2 - v.AddArg(dst) v0 := b.NewValue0(v.Pos, OpPPC64MOVBZload, typ.UInt8) v0.AuxInt = 2 - v0.AddArg(src) - v0.AddArg(mem) - v.AddArg(v0) + v0.AddArg2(src, mem) v1 := b.NewValue0(v.Pos, OpPPC64MOVHstore, types.TypeMem) - v1.AddArg(dst) v2 := b.NewValue0(v.Pos, OpPPC64MOVHload, typ.Int16) - v2.AddArg(src) - v2.AddArg(mem) - v1.AddArg(v2) - v1.AddArg(mem) - v.AddArg(v1) + v2.AddArg2(src, mem) + v1.AddArg3(dst, v2, mem) + v.AddArg3(dst, v0, v1) return true } // match: (Move [5] dst src mem) @@ -3728,20 +3547,14 @@ func rewriteValuePPC64_OpMove(v *Value) bool { mem := v_2 v.reset(OpPPC64MOVBstore) v.AuxInt = 4 - v.AddArg(dst) v0 := b.NewValue0(v.Pos, OpPPC64MOVBZload, typ.UInt8) v0.AuxInt = 4 - v0.AddArg(src) - v0.AddArg(mem) - v.AddArg(v0) + v0.AddArg2(src, mem) v1 := b.NewValue0(v.Pos, OpPPC64MOVWstore, types.TypeMem) - v1.AddArg(dst) v2 := b.NewValue0(v.Pos, OpPPC64MOVWZload, typ.UInt32) - v2.AddArg(src) - v2.AddArg(mem) - v1.AddArg(v2) - v1.AddArg(mem) - v.AddArg(v1) + v2.AddArg2(src, mem) + v1.AddArg3(dst, v2, mem) + v.AddArg3(dst, v0, v1) return true } // match: (Move [6] dst src mem) @@ -3755,20 +3568,14 @@ func rewriteValuePPC64_OpMove(v *Value) bool { mem := v_2 v.reset(OpPPC64MOVHstore) v.AuxInt = 4 - v.AddArg(dst) v0 := b.NewValue0(v.Pos, OpPPC64MOVHZload, typ.UInt16) v0.AuxInt = 4 - v0.AddArg(src) - v0.AddArg(mem) - v.AddArg(v0) + v0.AddArg2(src, mem) v1 := b.NewValue0(v.Pos, OpPPC64MOVWstore, types.TypeMem) - v1.AddArg(dst) v2 := b.NewValue0(v.Pos, OpPPC64MOVWZload, typ.UInt32) - v2.AddArg(src) - v2.AddArg(mem) - v1.AddArg(v2) - v1.AddArg(mem) - v.AddArg(v1) + v2.AddArg2(src, mem) + v1.AddArg3(dst, v2, mem) + v.AddArg3(dst, v0, v1) return true } // match: (Move [7] dst src mem) @@ -3782,29 +3589,20 @@ func rewriteValuePPC64_OpMove(v *Value) bool { mem := v_2 v.reset(OpPPC64MOVBstore) v.AuxInt = 6 - v.AddArg(dst) v0 := b.NewValue0(v.Pos, OpPPC64MOVBZload, typ.UInt8) v0.AuxInt = 6 - v0.AddArg(src) - v0.AddArg(mem) - v.AddArg(v0) + v0.AddArg2(src, mem) v1 := b.NewValue0(v.Pos, OpPPC64MOVHstore, types.TypeMem) v1.AuxInt = 4 - v1.AddArg(dst) v2 := b.NewValue0(v.Pos, OpPPC64MOVHZload, typ.UInt16) v2.AuxInt = 4 - v2.AddArg(src) - v2.AddArg(mem) - v1.AddArg(v2) + v2.AddArg2(src, mem) v3 := b.NewValue0(v.Pos, OpPPC64MOVWstore, types.TypeMem) - v3.AddArg(dst) v4 := b.NewValue0(v.Pos, OpPPC64MOVWZload, typ.UInt32) - v4.AddArg(src) - v4.AddArg(mem) - v3.AddArg(v4) - v3.AddArg(mem) - v1.AddArg(v3) - v.AddArg(v1) + v4.AddArg2(src, mem) + v3.AddArg3(dst, v4, mem) + v1.AddArg3(dst, v2, v3) + v.AddArg3(dst, v0, v1) return true } // match: (Move [s] dst src mem) @@ -3820,9 +3618,7 @@ func rewriteValuePPC64_OpMove(v *Value) bool { } v.reset(OpPPC64LoweredMove) v.AuxInt = s - v.AddArg(dst) - v.AddArg(src) - v.AddArg(mem) + v.AddArg3(dst, src, mem) return true } return false @@ -3846,10 +3642,9 @@ func rewriteValuePPC64_OpNeq16(v *Value) bool { v0 := b.NewValue0(v.Pos, OpPPC64CMPW, types.TypeFlags) v1 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32) v1.AddArg(x) - v0.AddArg(v1) v2 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32) v2.AddArg(y) - v0.AddArg(v2) + v0.AddArg2(v1, v2) v.AddArg(v0) return true } @@ -3864,10 +3659,9 @@ func rewriteValuePPC64_OpNeq16(v *Value) bool { v0 := b.NewValue0(v.Pos, OpPPC64CMPW, types.TypeFlags) v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) v1.AddArg(x) - v0.AddArg(v1) v2 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) v2.AddArg(y) - v0.AddArg(v2) + v0.AddArg2(v1, v2) v.AddArg(v0) return true } @@ -3883,8 +3677,7 @@ func rewriteValuePPC64_OpNeq32(v *Value) bool { y := v_1 v.reset(OpPPC64NotEqual) v0 := b.NewValue0(v.Pos, OpPPC64CMPW, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -3900,8 +3693,7 @@ func rewriteValuePPC64_OpNeq32F(v *Value) bool { y := v_1 v.reset(OpPPC64NotEqual) v0 := b.NewValue0(v.Pos, OpPPC64FCMPU, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -3917,8 +3709,7 @@ func rewriteValuePPC64_OpNeq64(v *Value) bool { y := v_1 v.reset(OpPPC64NotEqual) v0 := b.NewValue0(v.Pos, OpPPC64CMP, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -3934,8 +3725,7 @@ func rewriteValuePPC64_OpNeq64F(v *Value) bool { y := v_1 v.reset(OpPPC64NotEqual) v0 := b.NewValue0(v.Pos, OpPPC64FCMPU, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -3959,10 +3749,9 @@ func rewriteValuePPC64_OpNeq8(v *Value) bool { v0 := b.NewValue0(v.Pos, OpPPC64CMPW, types.TypeFlags) v1 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32) v1.AddArg(x) - v0.AddArg(v1) v2 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32) v2.AddArg(y) - v0.AddArg(v2) + v0.AddArg2(v1, v2) v.AddArg(v0) return true } @@ -3977,10 +3766,9 @@ func rewriteValuePPC64_OpNeq8(v *Value) bool { v0 := b.NewValue0(v.Pos, OpPPC64CMPW, types.TypeFlags) v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) v1.AddArg(x) - v0.AddArg(v1) v2 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) v2.AddArg(y) - v0.AddArg(v2) + v0.AddArg2(v1, v2) v.AddArg(v0) return true } @@ -3996,8 +3784,7 @@ func rewriteValuePPC64_OpNeqPtr(v *Value) bool { y := v_1 v.reset(OpPPC64NotEqual) v0 := b.NewValue0(v.Pos, OpPPC64CMP, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -4026,8 +3813,7 @@ func rewriteValuePPC64_OpOffPtr(v *Value) bool { v.reset(OpPPC64ADD) v0 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64) v0.AuxInt = off - v.AddArg(v0) - v.AddArg(ptr) + v.AddArg2(v0, ptr) return true } } @@ -4119,8 +3905,7 @@ func rewriteValuePPC64_OpPPC64ADD(v *Value) bool { continue } v.reset(OpPPC64ROTL) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } break @@ -4160,8 +3945,7 @@ func rewriteValuePPC64_OpPPC64ADD(v *Value) bool { continue } v.reset(OpPPC64ROTLW) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } break @@ -4254,8 +4038,7 @@ func rewriteValuePPC64_OpPPC64AND(v *Value) bool { continue } v.reset(OpPPC64ANDN) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } break @@ -4585,8 +4368,7 @@ func rewriteValuePPC64_OpPPC64CMP(v *Value) bool { } v.reset(OpPPC64InvertFlags) v0 := b.NewValue0(v.Pos, OpPPC64CMP, types.TypeFlags) - v0.AddArg(y) - v0.AddArg(x) + v0.AddArg2(y, x) v.AddArg(v0) return true } @@ -4643,8 +4425,7 @@ func rewriteValuePPC64_OpPPC64CMPU(v *Value) bool { } v.reset(OpPPC64InvertFlags) v0 := b.NewValue0(v.Pos, OpPPC64CMPU, types.TypeFlags) - v0.AddArg(y) - v0.AddArg(x) + v0.AddArg2(y, x) v.AddArg(v0) return true } @@ -4712,8 +4493,7 @@ func rewriteValuePPC64_OpPPC64CMPW(v *Value) bool { } y := v_1.Args[0] v.reset(OpPPC64CMPW) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (CMPW (MOVWreg x) y) @@ -4725,8 +4505,7 @@ func rewriteValuePPC64_OpPPC64CMPW(v *Value) bool { x := v_0.Args[0] y := v_1 v.reset(OpPPC64CMPW) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (CMPW x (MOVDconst [c])) @@ -4776,8 +4555,7 @@ func rewriteValuePPC64_OpPPC64CMPW(v *Value) bool { } v.reset(OpPPC64InvertFlags) v0 := b.NewValue0(v.Pos, OpPPC64CMPW, types.TypeFlags) - v0.AddArg(y) - v0.AddArg(x) + v0.AddArg2(y, x) v.AddArg(v0) return true } @@ -4796,8 +4574,7 @@ func rewriteValuePPC64_OpPPC64CMPWU(v *Value) bool { } y := v_1.Args[0] v.reset(OpPPC64CMPWU) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (CMPWU (MOVWZreg x) y) @@ -4809,8 +4586,7 @@ func rewriteValuePPC64_OpPPC64CMPWU(v *Value) bool { x := v_0.Args[0] y := v_1 v.reset(OpPPC64CMPWU) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (CMPWU x (MOVDconst [c])) @@ -4860,8 +4636,7 @@ func rewriteValuePPC64_OpPPC64CMPWU(v *Value) bool { } v.reset(OpPPC64InvertFlags) v0 := b.NewValue0(v.Pos, OpPPC64CMPWU, types.TypeFlags) - v0.AddArg(y) - v0.AddArg(x) + v0.AddArg2(y, x) v.AddArg(v0) return true } @@ -5067,8 +4842,7 @@ func rewriteValuePPC64_OpPPC64Equal(v *Value) bool { v.AuxInt = 2 v0 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64) v0.AuxInt = 1 - v.AddArg(v0) - v.AddArg(cmp) + v.AddArg2(v0, cmp) return true } } @@ -5101,9 +4875,7 @@ func rewriteValuePPC64_OpPPC64FADD(v *Value) bool { x := v_0.Args[0] z := v_1 v.reset(OpPPC64FMADD) - v.AddArg(x) - v.AddArg(y) - v.AddArg(z) + v.AddArg3(x, y, z) return true } break @@ -5124,9 +4896,7 @@ func rewriteValuePPC64_OpPPC64FADDS(v *Value) bool { x := v_0.Args[0] z := v_1 v.reset(OpPPC64FMADDS) - v.AddArg(x) - v.AddArg(y) - v.AddArg(z) + v.AddArg3(x, y, z) return true } break @@ -5175,15 +4945,12 @@ func rewriteValuePPC64_OpPPC64FGreaterEqual(v *Value) bool { v.AuxInt = 2 v0 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64) v0.AuxInt = 1 - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpPPC64ISELB, typ.Int32) v1.AuxInt = 1 v2 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64) v2.AuxInt = 1 - v1.AddArg(v2) - v1.AddArg(cmp) - v.AddArg(v1) - v.AddArg(cmp) + v1.AddArg2(v2, cmp) + v.AddArg3(v0, v1, cmp) return true } } @@ -5199,8 +4966,7 @@ func rewriteValuePPC64_OpPPC64FGreaterThan(v *Value) bool { v.AuxInt = 1 v0 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64) v0.AuxInt = 1 - v.AddArg(v0) - v.AddArg(cmp) + v.AddArg2(v0, cmp) return true } } @@ -5216,15 +4982,12 @@ func rewriteValuePPC64_OpPPC64FLessEqual(v *Value) bool { v.AuxInt = 2 v0 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64) v0.AuxInt = 1 - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpPPC64ISELB, typ.Int32) v1.AuxInt = 0 v2 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64) v2.AuxInt = 1 - v1.AddArg(v2) - v1.AddArg(cmp) - v.AddArg(v1) - v.AddArg(cmp) + v1.AddArg2(v2, cmp) + v.AddArg3(v0, v1, cmp) return true } } @@ -5240,8 +5003,7 @@ func rewriteValuePPC64_OpPPC64FLessThan(v *Value) bool { v.AuxInt = 0 v0 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64) v0.AuxInt = 1 - v.AddArg(v0) - v.AddArg(cmp) + v.AddArg2(v0, cmp) return true } } @@ -5286,8 +5048,7 @@ func rewriteValuePPC64_OpPPC64FMOVDload(v *Value) bool { v.reset(OpPPC64FMOVDload) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (FMOVDload [off1] {sym} (ADDconst [off2] ptr) mem) @@ -5308,8 +5069,7 @@ func rewriteValuePPC64_OpPPC64FMOVDload(v *Value) bool { v.reset(OpPPC64FMOVDload) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } return false @@ -5332,9 +5092,7 @@ func rewriteValuePPC64_OpPPC64FMOVDstore(v *Value) bool { v.reset(OpPPC64MOVDstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(x) - v.AddArg(mem) + v.AddArg3(ptr, x, mem) return true } // match: (FMOVDstore [off1] {sym} (ADDconst [off2] ptr) val mem) @@ -5356,9 +5114,7 @@ func rewriteValuePPC64_OpPPC64FMOVDstore(v *Value) bool { v.reset(OpPPC64FMOVDstore) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } // match: (FMOVDstore [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) val mem) @@ -5382,9 +5138,7 @@ func rewriteValuePPC64_OpPPC64FMOVDstore(v *Value) bool { v.reset(OpPPC64FMOVDstore) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } return false @@ -5412,8 +5166,7 @@ func rewriteValuePPC64_OpPPC64FMOVSload(v *Value) bool { v.reset(OpPPC64FMOVSload) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (FMOVSload [off1] {sym} (ADDconst [off2] ptr) mem) @@ -5434,8 +5187,7 @@ func rewriteValuePPC64_OpPPC64FMOVSload(v *Value) bool { v.reset(OpPPC64FMOVSload) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } return false @@ -5463,9 +5215,7 @@ func rewriteValuePPC64_OpPPC64FMOVSstore(v *Value) bool { v.reset(OpPPC64FMOVSstore) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } // match: (FMOVSstore [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) val mem) @@ -5489,9 +5239,7 @@ func rewriteValuePPC64_OpPPC64FMOVSstore(v *Value) bool { v.reset(OpPPC64FMOVSstore) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } return false @@ -5550,9 +5298,7 @@ func rewriteValuePPC64_OpPPC64FSUB(v *Value) bool { x := v_0.Args[0] z := v_1 v.reset(OpPPC64FMSUB) - v.AddArg(x) - v.AddArg(y) - v.AddArg(z) + v.AddArg3(x, y, z) return true } return false @@ -5570,9 +5316,7 @@ func rewriteValuePPC64_OpPPC64FSUBS(v *Value) bool { x := v_0.Args[0] z := v_1 v.reset(OpPPC64FMSUBS) - v.AddArg(x) - v.AddArg(y) - v.AddArg(z) + v.AddArg3(x, y, z) return true } return false @@ -5645,8 +5389,7 @@ func rewriteValuePPC64_OpPPC64GreaterEqual(v *Value) bool { v.AuxInt = 4 v0 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64) v0.AuxInt = 1 - v.AddArg(v0) - v.AddArg(cmp) + v.AddArg2(v0, cmp) return true } } @@ -5703,8 +5446,7 @@ func rewriteValuePPC64_OpPPC64GreaterThan(v *Value) bool { v.AuxInt = 1 v0 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64) v0.AuxInt = 1 - v.AddArg(v0) - v.AddArg(cmp) + v.AddArg2(v0, cmp) return true } } @@ -5998,9 +5740,7 @@ func rewriteValuePPC64_OpPPC64ISEL(v *Value) bool { } v.reset(OpPPC64ISEL) v.AuxInt = n + 1 - v.AddArg(x) - v.AddArg(y) - v.AddArg(bool) + v.AddArg3(x, y, bool) return true } // match: (ISEL [n] x y (InvertFlags bool)) @@ -6019,9 +5759,7 @@ func rewriteValuePPC64_OpPPC64ISEL(v *Value) bool { } v.reset(OpPPC64ISEL) v.AuxInt = n - 1 - v.AddArg(x) - v.AddArg(y) - v.AddArg(bool) + v.AddArg3(x, y, bool) return true } // match: (ISEL [n] x y (InvertFlags bool)) @@ -6040,9 +5778,7 @@ func rewriteValuePPC64_OpPPC64ISEL(v *Value) bool { } v.reset(OpPPC64ISEL) v.AuxInt = n - v.AddArg(x) - v.AddArg(y) - v.AddArg(bool) + v.AddArg3(x, y, bool) return true } return false @@ -6248,8 +5984,7 @@ func rewriteValuePPC64_OpPPC64ISELB(v *Value) bool { v.AuxInt = n + 1 v0 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64) v0.AuxInt = 1 - v.AddArg(v0) - v.AddArg(bool) + v.AddArg2(v0, bool) return true } // match: (ISELB [n] (MOVDconst [1]) (InvertFlags bool)) @@ -6268,8 +6003,7 @@ func rewriteValuePPC64_OpPPC64ISELB(v *Value) bool { v.AuxInt = n - 1 v0 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64) v0.AuxInt = 1 - v.AddArg(v0) - v.AddArg(bool) + v.AddArg2(v0, bool) return true } // match: (ISELB [n] (MOVDconst [1]) (InvertFlags bool)) @@ -6288,8 +6022,7 @@ func rewriteValuePPC64_OpPPC64ISELB(v *Value) bool { v.AuxInt = n v0 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64) v0.AuxInt = 1 - v.AddArg(v0) - v.AddArg(bool) + v.AddArg2(v0, bool) return true } return false @@ -6347,8 +6080,7 @@ func rewriteValuePPC64_OpPPC64LessEqual(v *Value) bool { v.AuxInt = 5 v0 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64) v0.AuxInt = 1 - v.AddArg(v0) - v.AddArg(cmp) + v.AddArg2(v0, cmp) return true } } @@ -6405,8 +6137,7 @@ func rewriteValuePPC64_OpPPC64LessThan(v *Value) bool { v.AuxInt = 0 v0 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64) v0.AuxInt = 1 - v.AddArg(v0) - v.AddArg(cmp) + v.AddArg2(v0, cmp) return true } } @@ -6446,8 +6177,7 @@ func rewriteValuePPC64_OpPPC64MFVSRD(v *Value) bool { v.AddArg(v0) v0.AuxInt = off v0.Aux = sym - v0.AddArg(ptr) - v0.AddArg(mem) + v0.AddArg2(ptr, mem) return true } return false @@ -6475,8 +6205,7 @@ func rewriteValuePPC64_OpPPC64MOVBZload(v *Value) bool { v.reset(OpPPC64MOVBZload) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVBZload [off1] {sym} (ADDconst [off2] x) mem) @@ -6497,8 +6226,7 @@ func rewriteValuePPC64_OpPPC64MOVBZload(v *Value) bool { v.reset(OpPPC64MOVBZload) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(x) - v.AddArg(mem) + v.AddArg2(x, mem) return true } // match: (MOVBZload [0] {sym} p:(ADD ptr idx) mem) @@ -6520,9 +6248,7 @@ func rewriteValuePPC64_OpPPC64MOVBZload(v *Value) bool { break } v.reset(OpPPC64MOVBZloadidx) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } return false @@ -6546,8 +6272,7 @@ func rewriteValuePPC64_OpPPC64MOVBZloadidx(v *Value) bool { } v.reset(OpPPC64MOVBZload) v.AuxInt = c - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVBZloadidx (MOVDconst [c]) ptr mem) @@ -6565,8 +6290,7 @@ func rewriteValuePPC64_OpPPC64MOVBZloadidx(v *Value) bool { } v.reset(OpPPC64MOVBZload) v.AuxInt = c - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } return false @@ -6945,9 +6669,7 @@ func rewriteValuePPC64_OpPPC64MOVBstore(v *Value) bool { v.reset(OpPPC64MOVBstore) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(x) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(x, val, mem) return true } // match: (MOVBstore [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) val mem) @@ -6971,9 +6693,7 @@ func rewriteValuePPC64_OpPPC64MOVBstore(v *Value) bool { v.reset(OpPPC64MOVBstore) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } // match: (MOVBstore [off] {sym} ptr (MOVDconst [0]) mem) @@ -6989,8 +6709,7 @@ func rewriteValuePPC64_OpPPC64MOVBstore(v *Value) bool { v.reset(OpPPC64MOVBstorezero) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVBstore [off] {sym} p:(ADD ptr idx) val mem) @@ -7011,10 +6730,7 @@ func rewriteValuePPC64_OpPPC64MOVBstore(v *Value) bool { break } v.reset(OpPPC64MOVBstoreidx) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) + v.AddArg4(ptr, idx, val, mem) return true } // match: (MOVBstore [off] {sym} ptr (MOVBreg x) mem) @@ -7031,9 +6747,7 @@ func rewriteValuePPC64_OpPPC64MOVBstore(v *Value) bool { v.reset(OpPPC64MOVBstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(x) - v.AddArg(mem) + v.AddArg3(ptr, x, mem) return true } // match: (MOVBstore [off] {sym} ptr (MOVBZreg x) mem) @@ -7050,9 +6764,7 @@ func rewriteValuePPC64_OpPPC64MOVBstore(v *Value) bool { v.reset(OpPPC64MOVBstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(x) - v.AddArg(mem) + v.AddArg3(ptr, x, mem) return true } // match: (MOVBstore [off] {sym} ptr (MOVHreg x) mem) @@ -7069,9 +6781,7 @@ func rewriteValuePPC64_OpPPC64MOVBstore(v *Value) bool { v.reset(OpPPC64MOVBstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(x) - v.AddArg(mem) + v.AddArg3(ptr, x, mem) return true } // match: (MOVBstore [off] {sym} ptr (MOVHZreg x) mem) @@ -7088,9 +6798,7 @@ func rewriteValuePPC64_OpPPC64MOVBstore(v *Value) bool { v.reset(OpPPC64MOVBstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(x) - v.AddArg(mem) + v.AddArg3(ptr, x, mem) return true } // match: (MOVBstore [off] {sym} ptr (MOVWreg x) mem) @@ -7107,9 +6815,7 @@ func rewriteValuePPC64_OpPPC64MOVBstore(v *Value) bool { v.reset(OpPPC64MOVBstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(x) - v.AddArg(mem) + v.AddArg3(ptr, x, mem) return true } // match: (MOVBstore [off] {sym} ptr (MOVWZreg x) mem) @@ -7126,9 +6832,7 @@ func rewriteValuePPC64_OpPPC64MOVBstore(v *Value) bool { v.reset(OpPPC64MOVBstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(x) - v.AddArg(mem) + v.AddArg3(ptr, x, mem) return true } // match: (MOVBstore [off] {sym} ptr (SRWconst (MOVHreg x) [c]) mem) @@ -7154,12 +6858,10 @@ func rewriteValuePPC64_OpPPC64MOVBstore(v *Value) bool { v.reset(OpPPC64MOVBstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) v0 := b.NewValue0(v.Pos, OpPPC64SRWconst, typ.UInt32) v0.AuxInt = c v0.AddArg(x) - v.AddArg(v0) - v.AddArg(mem) + v.AddArg3(ptr, v0, mem) return true } // match: (MOVBstore [off] {sym} ptr (SRWconst (MOVHZreg x) [c]) mem) @@ -7185,12 +6887,10 @@ func rewriteValuePPC64_OpPPC64MOVBstore(v *Value) bool { v.reset(OpPPC64MOVBstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) v0 := b.NewValue0(v.Pos, OpPPC64SRWconst, typ.UInt32) v0.AuxInt = c v0.AddArg(x) - v.AddArg(v0) - v.AddArg(mem) + v.AddArg3(ptr, v0, mem) return true } // match: (MOVBstore [off] {sym} ptr (SRWconst (MOVWreg x) [c]) mem) @@ -7216,12 +6916,10 @@ func rewriteValuePPC64_OpPPC64MOVBstore(v *Value) bool { v.reset(OpPPC64MOVBstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) v0 := b.NewValue0(v.Pos, OpPPC64SRWconst, typ.UInt32) v0.AuxInt = c v0.AddArg(x) - v.AddArg(v0) - v.AddArg(mem) + v.AddArg3(ptr, v0, mem) return true } // match: (MOVBstore [off] {sym} ptr (SRWconst (MOVWZreg x) [c]) mem) @@ -7247,12 +6945,10 @@ func rewriteValuePPC64_OpPPC64MOVBstore(v *Value) bool { v.reset(OpPPC64MOVBstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) v0 := b.NewValue0(v.Pos, OpPPC64SRWconst, typ.UInt32) v0.AuxInt = c v0.AddArg(x) - v.AddArg(v0) - v.AddArg(mem) + v.AddArg3(ptr, v0, mem) return true } // match: (MOVBstore [i1] {s} p (SRWconst w [24]) x0:(MOVBstore [i0] {s} p (SRWconst w [16]) mem)) @@ -7285,12 +6981,10 @@ func rewriteValuePPC64_OpPPC64MOVBstore(v *Value) bool { v.reset(OpPPC64MOVHstore) v.AuxInt = i0 v.Aux = s - v.AddArg(p) v0 := b.NewValue0(x0.Pos, OpPPC64SRWconst, typ.UInt16) v0.AuxInt = 16 v0.AddArg(w) - v.AddArg(v0) - v.AddArg(mem) + v.AddArg3(p, v0, mem) return true } // match: (MOVBstore [i1] {s} p (SRDconst w [24]) x0:(MOVBstore [i0] {s} p (SRDconst w [16]) mem)) @@ -7323,12 +7017,10 @@ func rewriteValuePPC64_OpPPC64MOVBstore(v *Value) bool { v.reset(OpPPC64MOVHstore) v.AuxInt = i0 v.Aux = s - v.AddArg(p) v0 := b.NewValue0(x0.Pos, OpPPC64SRWconst, typ.UInt16) v0.AuxInt = 16 v0.AddArg(w) - v.AddArg(v0) - v.AddArg(mem) + v.AddArg3(p, v0, mem) return true } // match: (MOVBstore [i1] {s} p (SRWconst w [8]) x0:(MOVBstore [i0] {s} p w mem)) @@ -7357,9 +7049,7 @@ func rewriteValuePPC64_OpPPC64MOVBstore(v *Value) bool { v.reset(OpPPC64MOVHstore) v.AuxInt = i0 v.Aux = s - v.AddArg(p) - v.AddArg(w) - v.AddArg(mem) + v.AddArg3(p, w, mem) return true } // match: (MOVBstore [i1] {s} p (SRDconst w [8]) x0:(MOVBstore [i0] {s} p w mem)) @@ -7388,9 +7078,7 @@ func rewriteValuePPC64_OpPPC64MOVBstore(v *Value) bool { v.reset(OpPPC64MOVHstore) v.AuxInt = i0 v.Aux = s - v.AddArg(p) - v.AddArg(w) - v.AddArg(mem) + v.AddArg3(p, w, mem) return true } // match: (MOVBstore [i3] {s} p w x0:(MOVBstore [i2] {s} p (SRWconst w [8]) x1:(MOVBstore [i1] {s} p (SRWconst w [16]) x2:(MOVBstore [i0] {s} p (SRWconst w [24]) mem)))) @@ -7454,9 +7142,7 @@ func rewriteValuePPC64_OpPPC64MOVBstore(v *Value) bool { v0.AuxInt = i0 v0.Aux = s v0.AddArg(p) - v.AddArg(v0) - v.AddArg(w) - v.AddArg(mem) + v.AddArg3(v0, w, mem) return true } // match: (MOVBstore [i1] {s} p w x0:(MOVBstore [i0] {s} p (SRWconst w [8]) mem)) @@ -7488,9 +7174,7 @@ func rewriteValuePPC64_OpPPC64MOVBstore(v *Value) bool { v0.AuxInt = i0 v0.Aux = s v0.AddArg(p) - v.AddArg(v0) - v.AddArg(w) - v.AddArg(mem) + v.AddArg3(v0, w, mem) return true } // match: (MOVBstore [i7] {s} p (SRDconst w [56]) x0:(MOVBstore [i6] {s} p (SRDconst w [48]) x1:(MOVBstore [i5] {s} p (SRDconst w [40]) x2:(MOVBstore [i4] {s} p (SRDconst w [32]) x3:(MOVWstore [i0] {s} p w mem))))) @@ -7567,9 +7251,7 @@ func rewriteValuePPC64_OpPPC64MOVBstore(v *Value) bool { v.reset(OpPPC64MOVDstore) v.AuxInt = i0 v.Aux = s - v.AddArg(p) - v.AddArg(w) - v.AddArg(mem) + v.AddArg3(p, w, mem) return true } // match: (MOVBstore [i7] {s} p w x0:(MOVBstore [i6] {s} p (SRDconst w [8]) x1:(MOVBstore [i5] {s} p (SRDconst w [16]) x2:(MOVBstore [i4] {s} p (SRDconst w [24]) x3:(MOVBstore [i3] {s} p (SRDconst w [32]) x4:(MOVBstore [i2] {s} p (SRDconst w [40]) x5:(MOVBstore [i1] {s} p (SRDconst w [48]) x6:(MOVBstore [i0] {s} p (SRDconst w [56]) mem)))))))) @@ -7697,9 +7379,7 @@ func rewriteValuePPC64_OpPPC64MOVBstore(v *Value) bool { v0.AuxInt = i0 v0.Aux = s v0.AddArg(p) - v.AddArg(v0) - v.AddArg(w) - v.AddArg(mem) + v.AddArg3(v0, w, mem) return true } return false @@ -7727,9 +7407,7 @@ func rewriteValuePPC64_OpPPC64MOVBstoreidx(v *Value) bool { } v.reset(OpPPC64MOVBstore) v.AuxInt = c - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } // match: (MOVBstoreidx (MOVDconst [c]) ptr val mem) @@ -7748,9 +7426,7 @@ func rewriteValuePPC64_OpPPC64MOVBstoreidx(v *Value) bool { } v.reset(OpPPC64MOVBstore) v.AuxInt = c - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } // match: (MOVBstoreidx [off] {sym} ptr idx (MOVBreg x) mem) @@ -7768,10 +7444,7 @@ func rewriteValuePPC64_OpPPC64MOVBstoreidx(v *Value) bool { v.reset(OpPPC64MOVBstoreidx) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(x) - v.AddArg(mem) + v.AddArg4(ptr, idx, x, mem) return true } // match: (MOVBstoreidx [off] {sym} ptr idx (MOVBZreg x) mem) @@ -7789,10 +7462,7 @@ func rewriteValuePPC64_OpPPC64MOVBstoreidx(v *Value) bool { v.reset(OpPPC64MOVBstoreidx) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(x) - v.AddArg(mem) + v.AddArg4(ptr, idx, x, mem) return true } // match: (MOVBstoreidx [off] {sym} ptr idx (MOVHreg x) mem) @@ -7810,10 +7480,7 @@ func rewriteValuePPC64_OpPPC64MOVBstoreidx(v *Value) bool { v.reset(OpPPC64MOVBstoreidx) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(x) - v.AddArg(mem) + v.AddArg4(ptr, idx, x, mem) return true } // match: (MOVBstoreidx [off] {sym} ptr idx (MOVHZreg x) mem) @@ -7831,10 +7498,7 @@ func rewriteValuePPC64_OpPPC64MOVBstoreidx(v *Value) bool { v.reset(OpPPC64MOVBstoreidx) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(x) - v.AddArg(mem) + v.AddArg4(ptr, idx, x, mem) return true } // match: (MOVBstoreidx [off] {sym} ptr idx (MOVWreg x) mem) @@ -7852,10 +7516,7 @@ func rewriteValuePPC64_OpPPC64MOVBstoreidx(v *Value) bool { v.reset(OpPPC64MOVBstoreidx) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(x) - v.AddArg(mem) + v.AddArg4(ptr, idx, x, mem) return true } // match: (MOVBstoreidx [off] {sym} ptr idx (MOVWZreg x) mem) @@ -7873,10 +7534,7 @@ func rewriteValuePPC64_OpPPC64MOVBstoreidx(v *Value) bool { v.reset(OpPPC64MOVBstoreidx) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(x) - v.AddArg(mem) + v.AddArg4(ptr, idx, x, mem) return true } // match: (MOVBstoreidx [off] {sym} ptr idx (SRWconst (MOVHreg x) [c]) mem) @@ -7903,13 +7561,10 @@ func rewriteValuePPC64_OpPPC64MOVBstoreidx(v *Value) bool { v.reset(OpPPC64MOVBstoreidx) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) v0 := b.NewValue0(v.Pos, OpPPC64SRWconst, typ.UInt32) v0.AuxInt = c v0.AddArg(x) - v.AddArg(v0) - v.AddArg(mem) + v.AddArg4(ptr, idx, v0, mem) return true } // match: (MOVBstoreidx [off] {sym} ptr idx (SRWconst (MOVHZreg x) [c]) mem) @@ -7936,13 +7591,10 @@ func rewriteValuePPC64_OpPPC64MOVBstoreidx(v *Value) bool { v.reset(OpPPC64MOVBstoreidx) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) v0 := b.NewValue0(v.Pos, OpPPC64SRWconst, typ.UInt32) v0.AuxInt = c v0.AddArg(x) - v.AddArg(v0) - v.AddArg(mem) + v.AddArg4(ptr, idx, v0, mem) return true } // match: (MOVBstoreidx [off] {sym} ptr idx (SRWconst (MOVWreg x) [c]) mem) @@ -7969,13 +7621,10 @@ func rewriteValuePPC64_OpPPC64MOVBstoreidx(v *Value) bool { v.reset(OpPPC64MOVBstoreidx) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) v0 := b.NewValue0(v.Pos, OpPPC64SRWconst, typ.UInt32) v0.AuxInt = c v0.AddArg(x) - v.AddArg(v0) - v.AddArg(mem) + v.AddArg4(ptr, idx, v0, mem) return true } // match: (MOVBstoreidx [off] {sym} ptr idx (SRWconst (MOVWZreg x) [c]) mem) @@ -8002,13 +7651,10 @@ func rewriteValuePPC64_OpPPC64MOVBstoreidx(v *Value) bool { v.reset(OpPPC64MOVBstoreidx) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) v0 := b.NewValue0(v.Pos, OpPPC64SRWconst, typ.UInt32) v0.AuxInt = c v0.AddArg(x) - v.AddArg(v0) - v.AddArg(mem) + v.AddArg4(ptr, idx, v0, mem) return true } return false @@ -8034,8 +7680,7 @@ func rewriteValuePPC64_OpPPC64MOVBstorezero(v *Value) bool { v.reset(OpPPC64MOVBstorezero) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(x) - v.AddArg(mem) + v.AddArg2(x, mem) return true } // match: (MOVBstorezero [off1] {sym1} p:(MOVDaddr [off2] {sym2} x) mem) @@ -8058,8 +7703,7 @@ func rewriteValuePPC64_OpPPC64MOVBstorezero(v *Value) bool { v.reset(OpPPC64MOVBstorezero) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(x) - v.AddArg(mem) + v.AddArg2(x, mem) return true } return false @@ -8105,8 +7749,7 @@ func rewriteValuePPC64_OpPPC64MOVDload(v *Value) bool { v.reset(OpPPC64MOVDload) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVDload [off1] {sym} (ADDconst [off2] x) mem) @@ -8127,8 +7770,7 @@ func rewriteValuePPC64_OpPPC64MOVDload(v *Value) bool { v.reset(OpPPC64MOVDload) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(x) - v.AddArg(mem) + v.AddArg2(x, mem) return true } // match: (MOVDload [0] {sym} p:(ADD ptr idx) mem) @@ -8150,9 +7792,7 @@ func rewriteValuePPC64_OpPPC64MOVDload(v *Value) bool { break } v.reset(OpPPC64MOVDloadidx) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } return false @@ -8176,8 +7816,7 @@ func rewriteValuePPC64_OpPPC64MOVDloadidx(v *Value) bool { } v.reset(OpPPC64MOVDload) v.AuxInt = c - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVDloadidx (MOVDconst [c]) ptr mem) @@ -8195,8 +7834,7 @@ func rewriteValuePPC64_OpPPC64MOVDloadidx(v *Value) bool { } v.reset(OpPPC64MOVDload) v.AuxInt = c - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } return false @@ -8219,9 +7857,7 @@ func rewriteValuePPC64_OpPPC64MOVDstore(v *Value) bool { v.reset(OpPPC64FMOVDstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(x) - v.AddArg(mem) + v.AddArg3(ptr, x, mem) return true } // match: (MOVDstore [off1] {sym} (ADDconst [off2] x) val mem) @@ -8243,9 +7879,7 @@ func rewriteValuePPC64_OpPPC64MOVDstore(v *Value) bool { v.reset(OpPPC64MOVDstore) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(x) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(x, val, mem) return true } // match: (MOVDstore [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) val mem) @@ -8269,9 +7903,7 @@ func rewriteValuePPC64_OpPPC64MOVDstore(v *Value) bool { v.reset(OpPPC64MOVDstore) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } // match: (MOVDstore [off] {sym} ptr (MOVDconst [0]) mem) @@ -8287,8 +7919,7 @@ func rewriteValuePPC64_OpPPC64MOVDstore(v *Value) bool { v.reset(OpPPC64MOVDstorezero) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVDstore [off] {sym} p:(ADD ptr idx) val mem) @@ -8309,10 +7940,7 @@ func rewriteValuePPC64_OpPPC64MOVDstore(v *Value) bool { break } v.reset(OpPPC64MOVDstoreidx) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) + v.AddArg4(ptr, idx, val, mem) return true } return false @@ -8338,9 +7966,7 @@ func rewriteValuePPC64_OpPPC64MOVDstoreidx(v *Value) bool { } v.reset(OpPPC64MOVDstore) v.AuxInt = c - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } // match: (MOVDstoreidx (MOVDconst [c]) ptr val mem) @@ -8359,9 +7985,7 @@ func rewriteValuePPC64_OpPPC64MOVDstoreidx(v *Value) bool { } v.reset(OpPPC64MOVDstore) v.AuxInt = c - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } return false @@ -8387,8 +8011,7 @@ func rewriteValuePPC64_OpPPC64MOVDstorezero(v *Value) bool { v.reset(OpPPC64MOVDstorezero) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(x) - v.AddArg(mem) + v.AddArg2(x, mem) return true } // match: (MOVDstorezero [off1] {sym1} p:(MOVDaddr [off2] {sym2} x) mem) @@ -8411,8 +8034,7 @@ func rewriteValuePPC64_OpPPC64MOVDstorezero(v *Value) bool { v.reset(OpPPC64MOVDstorezero) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(x) - v.AddArg(mem) + v.AddArg2(x, mem) return true } return false @@ -8433,9 +8055,7 @@ func rewriteValuePPC64_OpPPC64MOVHBRstore(v *Value) bool { mem := v_2 v.reset(OpPPC64MOVHBRstore) v.Aux = sym - v.AddArg(ptr) - v.AddArg(x) - v.AddArg(mem) + v.AddArg3(ptr, x, mem) return true } // match: (MOVHBRstore {sym} ptr (MOVHZreg x) mem) @@ -8450,9 +8070,7 @@ func rewriteValuePPC64_OpPPC64MOVHBRstore(v *Value) bool { mem := v_2 v.reset(OpPPC64MOVHBRstore) v.Aux = sym - v.AddArg(ptr) - v.AddArg(x) - v.AddArg(mem) + v.AddArg3(ptr, x, mem) return true } // match: (MOVHBRstore {sym} ptr (MOVWreg x) mem) @@ -8467,9 +8085,7 @@ func rewriteValuePPC64_OpPPC64MOVHBRstore(v *Value) bool { mem := v_2 v.reset(OpPPC64MOVHBRstore) v.Aux = sym - v.AddArg(ptr) - v.AddArg(x) - v.AddArg(mem) + v.AddArg3(ptr, x, mem) return true } // match: (MOVHBRstore {sym} ptr (MOVWZreg x) mem) @@ -8484,9 +8100,7 @@ func rewriteValuePPC64_OpPPC64MOVHBRstore(v *Value) bool { mem := v_2 v.reset(OpPPC64MOVHBRstore) v.Aux = sym - v.AddArg(ptr) - v.AddArg(x) - v.AddArg(mem) + v.AddArg3(ptr, x, mem) return true } return false @@ -8514,8 +8128,7 @@ func rewriteValuePPC64_OpPPC64MOVHZload(v *Value) bool { v.reset(OpPPC64MOVHZload) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVHZload [off1] {sym} (ADDconst [off2] x) mem) @@ -8536,8 +8149,7 @@ func rewriteValuePPC64_OpPPC64MOVHZload(v *Value) bool { v.reset(OpPPC64MOVHZload) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(x) - v.AddArg(mem) + v.AddArg2(x, mem) return true } // match: (MOVHZload [0] {sym} p:(ADD ptr idx) mem) @@ -8559,9 +8171,7 @@ func rewriteValuePPC64_OpPPC64MOVHZload(v *Value) bool { break } v.reset(OpPPC64MOVHZloadidx) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } return false @@ -8585,8 +8195,7 @@ func rewriteValuePPC64_OpPPC64MOVHZloadidx(v *Value) bool { } v.reset(OpPPC64MOVHZload) v.AuxInt = c - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVHZloadidx (MOVDconst [c]) ptr mem) @@ -8604,8 +8213,7 @@ func rewriteValuePPC64_OpPPC64MOVHZloadidx(v *Value) bool { } v.reset(OpPPC64MOVHZload) v.AuxInt = c - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } return false @@ -8874,8 +8482,7 @@ func rewriteValuePPC64_OpPPC64MOVHload(v *Value) bool { v.reset(OpPPC64MOVHload) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVHload [off1] {sym} (ADDconst [off2] x) mem) @@ -8896,8 +8503,7 @@ func rewriteValuePPC64_OpPPC64MOVHload(v *Value) bool { v.reset(OpPPC64MOVHload) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(x) - v.AddArg(mem) + v.AddArg2(x, mem) return true } // match: (MOVHload [0] {sym} p:(ADD ptr idx) mem) @@ -8919,9 +8525,7 @@ func rewriteValuePPC64_OpPPC64MOVHload(v *Value) bool { break } v.reset(OpPPC64MOVHloadidx) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } return false @@ -8945,8 +8549,7 @@ func rewriteValuePPC64_OpPPC64MOVHloadidx(v *Value) bool { } v.reset(OpPPC64MOVHload) v.AuxInt = c - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVHloadidx (MOVDconst [c]) ptr mem) @@ -8964,8 +8567,7 @@ func rewriteValuePPC64_OpPPC64MOVHloadidx(v *Value) bool { } v.reset(OpPPC64MOVHload) v.AuxInt = c - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } return false @@ -9231,9 +8833,7 @@ func rewriteValuePPC64_OpPPC64MOVHstore(v *Value) bool { v.reset(OpPPC64MOVHstore) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(x) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(x, val, mem) return true } // match: (MOVHstore [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) val mem) @@ -9257,9 +8857,7 @@ func rewriteValuePPC64_OpPPC64MOVHstore(v *Value) bool { v.reset(OpPPC64MOVHstore) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } // match: (MOVHstore [off] {sym} ptr (MOVDconst [0]) mem) @@ -9275,8 +8873,7 @@ func rewriteValuePPC64_OpPPC64MOVHstore(v *Value) bool { v.reset(OpPPC64MOVHstorezero) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVHstore [off] {sym} p:(ADD ptr idx) val mem) @@ -9297,10 +8894,7 @@ func rewriteValuePPC64_OpPPC64MOVHstore(v *Value) bool { break } v.reset(OpPPC64MOVHstoreidx) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) + v.AddArg4(ptr, idx, val, mem) return true } // match: (MOVHstore [off] {sym} ptr (MOVHreg x) mem) @@ -9317,9 +8911,7 @@ func rewriteValuePPC64_OpPPC64MOVHstore(v *Value) bool { v.reset(OpPPC64MOVHstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(x) - v.AddArg(mem) + v.AddArg3(ptr, x, mem) return true } // match: (MOVHstore [off] {sym} ptr (MOVHZreg x) mem) @@ -9336,9 +8928,7 @@ func rewriteValuePPC64_OpPPC64MOVHstore(v *Value) bool { v.reset(OpPPC64MOVHstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(x) - v.AddArg(mem) + v.AddArg3(ptr, x, mem) return true } // match: (MOVHstore [off] {sym} ptr (MOVWreg x) mem) @@ -9355,9 +8945,7 @@ func rewriteValuePPC64_OpPPC64MOVHstore(v *Value) bool { v.reset(OpPPC64MOVHstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(x) - v.AddArg(mem) + v.AddArg3(ptr, x, mem) return true } // match: (MOVHstore [off] {sym} ptr (MOVWZreg x) mem) @@ -9374,9 +8962,7 @@ func rewriteValuePPC64_OpPPC64MOVHstore(v *Value) bool { v.reset(OpPPC64MOVHstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(x) - v.AddArg(mem) + v.AddArg3(ptr, x, mem) return true } // match: (MOVHstore [i1] {s} p (SRWconst w [16]) x0:(MOVHstore [i0] {s} p w mem)) @@ -9405,9 +8991,7 @@ func rewriteValuePPC64_OpPPC64MOVHstore(v *Value) bool { v.reset(OpPPC64MOVWstore) v.AuxInt = i0 v.Aux = s - v.AddArg(p) - v.AddArg(w) - v.AddArg(mem) + v.AddArg3(p, w, mem) return true } // match: (MOVHstore [i1] {s} p (SRDconst w [16]) x0:(MOVHstore [i0] {s} p w mem)) @@ -9436,9 +9020,7 @@ func rewriteValuePPC64_OpPPC64MOVHstore(v *Value) bool { v.reset(OpPPC64MOVWstore) v.AuxInt = i0 v.Aux = s - v.AddArg(p) - v.AddArg(w) - v.AddArg(mem) + v.AddArg3(p, w, mem) return true } return false @@ -9464,9 +9046,7 @@ func rewriteValuePPC64_OpPPC64MOVHstoreidx(v *Value) bool { } v.reset(OpPPC64MOVHstore) v.AuxInt = c - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } // match: (MOVHstoreidx (MOVDconst [c]) ptr val mem) @@ -9485,9 +9065,7 @@ func rewriteValuePPC64_OpPPC64MOVHstoreidx(v *Value) bool { } v.reset(OpPPC64MOVHstore) v.AuxInt = c - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } // match: (MOVHstoreidx [off] {sym} ptr idx (MOVHreg x) mem) @@ -9505,10 +9083,7 @@ func rewriteValuePPC64_OpPPC64MOVHstoreidx(v *Value) bool { v.reset(OpPPC64MOVHstoreidx) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(x) - v.AddArg(mem) + v.AddArg4(ptr, idx, x, mem) return true } // match: (MOVHstoreidx [off] {sym} ptr idx (MOVHZreg x) mem) @@ -9526,10 +9101,7 @@ func rewriteValuePPC64_OpPPC64MOVHstoreidx(v *Value) bool { v.reset(OpPPC64MOVHstoreidx) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(x) - v.AddArg(mem) + v.AddArg4(ptr, idx, x, mem) return true } // match: (MOVHstoreidx [off] {sym} ptr idx (MOVWreg x) mem) @@ -9547,10 +9119,7 @@ func rewriteValuePPC64_OpPPC64MOVHstoreidx(v *Value) bool { v.reset(OpPPC64MOVHstoreidx) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(x) - v.AddArg(mem) + v.AddArg4(ptr, idx, x, mem) return true } // match: (MOVHstoreidx [off] {sym} ptr idx (MOVWZreg x) mem) @@ -9568,10 +9137,7 @@ func rewriteValuePPC64_OpPPC64MOVHstoreidx(v *Value) bool { v.reset(OpPPC64MOVHstoreidx) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(x) - v.AddArg(mem) + v.AddArg4(ptr, idx, x, mem) return true } return false @@ -9597,8 +9163,7 @@ func rewriteValuePPC64_OpPPC64MOVHstorezero(v *Value) bool { v.reset(OpPPC64MOVHstorezero) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(x) - v.AddArg(mem) + v.AddArg2(x, mem) return true } // match: (MOVHstorezero [off1] {sym1} p:(MOVDaddr [off2] {sym2} x) mem) @@ -9621,8 +9186,7 @@ func rewriteValuePPC64_OpPPC64MOVHstorezero(v *Value) bool { v.reset(OpPPC64MOVHstorezero) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(x) - v.AddArg(mem) + v.AddArg2(x, mem) return true } return false @@ -9643,9 +9207,7 @@ func rewriteValuePPC64_OpPPC64MOVWBRstore(v *Value) bool { mem := v_2 v.reset(OpPPC64MOVWBRstore) v.Aux = sym - v.AddArg(ptr) - v.AddArg(x) - v.AddArg(mem) + v.AddArg3(ptr, x, mem) return true } // match: (MOVWBRstore {sym} ptr (MOVWZreg x) mem) @@ -9660,9 +9222,7 @@ func rewriteValuePPC64_OpPPC64MOVWBRstore(v *Value) bool { mem := v_2 v.reset(OpPPC64MOVWBRstore) v.Aux = sym - v.AddArg(ptr) - v.AddArg(x) - v.AddArg(mem) + v.AddArg3(ptr, x, mem) return true } return false @@ -9690,8 +9250,7 @@ func rewriteValuePPC64_OpPPC64MOVWZload(v *Value) bool { v.reset(OpPPC64MOVWZload) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVWZload [off1] {sym} (ADDconst [off2] x) mem) @@ -9712,8 +9271,7 @@ func rewriteValuePPC64_OpPPC64MOVWZload(v *Value) bool { v.reset(OpPPC64MOVWZload) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(x) - v.AddArg(mem) + v.AddArg2(x, mem) return true } // match: (MOVWZload [0] {sym} p:(ADD ptr idx) mem) @@ -9735,9 +9293,7 @@ func rewriteValuePPC64_OpPPC64MOVWZload(v *Value) bool { break } v.reset(OpPPC64MOVWZloadidx) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } return false @@ -9761,8 +9317,7 @@ func rewriteValuePPC64_OpPPC64MOVWZloadidx(v *Value) bool { } v.reset(OpPPC64MOVWZload) v.AuxInt = c - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVWZloadidx (MOVDconst [c]) ptr mem) @@ -9780,8 +9335,7 @@ func rewriteValuePPC64_OpPPC64MOVWZloadidx(v *Value) bool { } v.reset(OpPPC64MOVWZload) v.AuxInt = c - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } return false @@ -10129,8 +9683,7 @@ func rewriteValuePPC64_OpPPC64MOVWload(v *Value) bool { v.reset(OpPPC64MOVWload) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVWload [off1] {sym} (ADDconst [off2] x) mem) @@ -10151,8 +9704,7 @@ func rewriteValuePPC64_OpPPC64MOVWload(v *Value) bool { v.reset(OpPPC64MOVWload) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(x) - v.AddArg(mem) + v.AddArg2(x, mem) return true } // match: (MOVWload [0] {sym} p:(ADD ptr idx) mem) @@ -10174,9 +9726,7 @@ func rewriteValuePPC64_OpPPC64MOVWload(v *Value) bool { break } v.reset(OpPPC64MOVWloadidx) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } return false @@ -10200,8 +9750,7 @@ func rewriteValuePPC64_OpPPC64MOVWloadidx(v *Value) bool { } v.reset(OpPPC64MOVWload) v.AuxInt = c - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVWloadidx (MOVDconst [c]) ptr mem) @@ -10219,8 +9768,7 @@ func rewriteValuePPC64_OpPPC64MOVWloadidx(v *Value) bool { } v.reset(OpPPC64MOVWload) v.AuxInt = c - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } return false @@ -10533,9 +10081,7 @@ func rewriteValuePPC64_OpPPC64MOVWstore(v *Value) bool { v.reset(OpPPC64MOVWstore) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(x) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(x, val, mem) return true } // match: (MOVWstore [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) val mem) @@ -10559,9 +10105,7 @@ func rewriteValuePPC64_OpPPC64MOVWstore(v *Value) bool { v.reset(OpPPC64MOVWstore) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } // match: (MOVWstore [off] {sym} ptr (MOVDconst [0]) mem) @@ -10577,8 +10121,7 @@ func rewriteValuePPC64_OpPPC64MOVWstore(v *Value) bool { v.reset(OpPPC64MOVWstorezero) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVWstore [off] {sym} p:(ADD ptr idx) val mem) @@ -10599,10 +10142,7 @@ func rewriteValuePPC64_OpPPC64MOVWstore(v *Value) bool { break } v.reset(OpPPC64MOVWstoreidx) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) + v.AddArg4(ptr, idx, val, mem) return true } // match: (MOVWstore [off] {sym} ptr (MOVWreg x) mem) @@ -10619,9 +10159,7 @@ func rewriteValuePPC64_OpPPC64MOVWstore(v *Value) bool { v.reset(OpPPC64MOVWstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(x) - v.AddArg(mem) + v.AddArg3(ptr, x, mem) return true } // match: (MOVWstore [off] {sym} ptr (MOVWZreg x) mem) @@ -10638,9 +10176,7 @@ func rewriteValuePPC64_OpPPC64MOVWstore(v *Value) bool { v.reset(OpPPC64MOVWstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(x) - v.AddArg(mem) + v.AddArg3(ptr, x, mem) return true } return false @@ -10666,9 +10202,7 @@ func rewriteValuePPC64_OpPPC64MOVWstoreidx(v *Value) bool { } v.reset(OpPPC64MOVWstore) v.AuxInt = c - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } // match: (MOVWstoreidx (MOVDconst [c]) ptr val mem) @@ -10687,9 +10221,7 @@ func rewriteValuePPC64_OpPPC64MOVWstoreidx(v *Value) bool { } v.reset(OpPPC64MOVWstore) v.AuxInt = c - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } // match: (MOVWstoreidx [off] {sym} ptr idx (MOVWreg x) mem) @@ -10707,10 +10239,7 @@ func rewriteValuePPC64_OpPPC64MOVWstoreidx(v *Value) bool { v.reset(OpPPC64MOVWstoreidx) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(x) - v.AddArg(mem) + v.AddArg4(ptr, idx, x, mem) return true } // match: (MOVWstoreidx [off] {sym} ptr idx (MOVWZreg x) mem) @@ -10728,10 +10257,7 @@ func rewriteValuePPC64_OpPPC64MOVWstoreidx(v *Value) bool { v.reset(OpPPC64MOVWstoreidx) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(x) - v.AddArg(mem) + v.AddArg4(ptr, idx, x, mem) return true } return false @@ -10757,8 +10283,7 @@ func rewriteValuePPC64_OpPPC64MOVWstorezero(v *Value) bool { v.reset(OpPPC64MOVWstorezero) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(x) - v.AddArg(mem) + v.AddArg2(x, mem) return true } // match: (MOVWstorezero [off1] {sym1} p:(MOVDaddr [off2] {sym2} x) mem) @@ -10781,8 +10306,7 @@ func rewriteValuePPC64_OpPPC64MOVWstorezero(v *Value) bool { v.reset(OpPPC64MOVWstorezero) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(x) - v.AddArg(mem) + v.AddArg2(x, mem) return true } return false @@ -10823,8 +10347,7 @@ func rewriteValuePPC64_OpPPC64MTVSRD(v *Value) bool { v.AddArg(v0) v0.AuxInt = off v0.Aux = sym - v0.AddArg(ptr) - v0.AddArg(mem) + v0.AddArg2(ptr, mem) return true } return false @@ -10906,8 +10429,7 @@ func rewriteValuePPC64_OpPPC64NotEqual(v *Value) bool { v.AuxInt = 6 v0 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64) v0.AuxInt = 1 - v.AddArg(v0) - v.AddArg(cmp) + v.AddArg2(v0, cmp) return true } } @@ -11000,8 +10522,7 @@ func rewriteValuePPC64_OpPPC64OR(v *Value) bool { continue } v.reset(OpPPC64ROTL) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } break @@ -11041,8 +10562,7 @@ func rewriteValuePPC64_OpPPC64OR(v *Value) bool { continue } v.reset(OpPPC64ROTLW) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } break @@ -11121,8 +10641,7 @@ func rewriteValuePPC64_OpPPC64OR(v *Value) bool { v.AddArg(v0) v0.AuxInt = i0 v0.Aux = s - v0.AddArg(p) - v0.AddArg(mem) + v0.AddArg2(p, mem) return true } break @@ -11163,8 +10682,7 @@ func rewriteValuePPC64_OpPPC64OR(v *Value) bool { v.AddArg(v0) v0.AuxInt = i0 v0.Aux = s - v0.AddArg(p) - v0.AddArg(mem) + v0.AddArg2(p, mem) return true } break @@ -11207,8 +10725,7 @@ func rewriteValuePPC64_OpPPC64OR(v *Value) bool { v1.AuxInt = i0 v1.Aux = s v1.AddArg(p) - v0.AddArg(v1) - v0.AddArg(mem) + v0.AddArg2(v1, mem) return true } break @@ -11251,8 +10768,7 @@ func rewriteValuePPC64_OpPPC64OR(v *Value) bool { v1.AuxInt = i0 v1.Aux = s v1.AddArg(p) - v0.AddArg(v1) - v0.AddArg(mem) + v0.AddArg2(v1, mem) return true } break @@ -11303,8 +10819,7 @@ func rewriteValuePPC64_OpPPC64OR(v *Value) bool { v2.AuxInt = i0 v2.Aux = s v2.AddArg(p) - v1.AddArg(v2) - v1.AddArg(mem) + v1.AddArg2(v2, mem) v0.AddArg(v1) return true } @@ -11356,8 +10871,7 @@ func rewriteValuePPC64_OpPPC64OR(v *Value) bool { v2.AuxInt = i0 v2.Aux = s v2.AddArg(p) - v1.AddArg(v2) - v1.AddArg(mem) + v1.AddArg2(v2, mem) v0.AddArg(v1) return true } @@ -11423,8 +10937,7 @@ func rewriteValuePPC64_OpPPC64OR(v *Value) bool { v.AddArg(v0) v0.AuxInt = i0 v0.Aux = s - v0.AddArg(p) - v0.AddArg(mem) + v0.AddArg2(p, mem) return true } } @@ -11490,8 +11003,7 @@ func rewriteValuePPC64_OpPPC64OR(v *Value) bool { v.AddArg(v0) v0.AuxInt = i0 v0.Aux = s - v0.AddArg(p) - v0.AddArg(mem) + v0.AddArg2(p, mem) return true } } @@ -11560,8 +11072,7 @@ func rewriteValuePPC64_OpPPC64OR(v *Value) bool { v1.AuxInt = i0 v1.Aux = s v1.AddArg(p) - v0.AddArg(v1) - v0.AddArg(mem) + v0.AddArg2(v1, mem) return true } } @@ -11630,8 +11141,7 @@ func rewriteValuePPC64_OpPPC64OR(v *Value) bool { v1.AuxInt = i0 v1.Aux = s v1.AddArg(p) - v0.AddArg(v1) - v0.AddArg(mem) + v0.AddArg2(v1, mem) return true } } @@ -11700,8 +11210,7 @@ func rewriteValuePPC64_OpPPC64OR(v *Value) bool { v1.AuxInt = i0 v1.Aux = s v1.AddArg(p) - v0.AddArg(v1) - v0.AddArg(mem) + v0.AddArg2(v1, mem) return true } } @@ -11770,8 +11279,7 @@ func rewriteValuePPC64_OpPPC64OR(v *Value) bool { v1.AuxInt = i0 v1.Aux = s v1.AddArg(p) - v0.AddArg(v1) - v0.AddArg(mem) + v0.AddArg2(v1, mem) return true } } @@ -11846,8 +11354,7 @@ func rewriteValuePPC64_OpPPC64OR(v *Value) bool { v2.AuxInt = i0 v2.Aux = s v2.AddArg(p) - v1.AddArg(v2) - v1.AddArg(mem) + v1.AddArg2(v2, mem) v0.AddArg(v1) return true } @@ -11923,8 +11430,7 @@ func rewriteValuePPC64_OpPPC64OR(v *Value) bool { v2.AuxInt = i0 v2.Aux = s v2.AddArg(p) - v1.AddArg(v2) - v1.AddArg(mem) + v1.AddArg2(v2, mem) v0.AddArg(v1) return true } @@ -12039,8 +11545,7 @@ func rewriteValuePPC64_OpPPC64OR(v *Value) bool { v.AddArg(v0) v0.AuxInt = i0 v0.Aux = s - v0.AddArg(p) - v0.AddArg(mem) + v0.AddArg2(p, mem) return true } } @@ -12159,8 +11664,7 @@ func rewriteValuePPC64_OpPPC64OR(v *Value) bool { v1.AuxInt = i0 v1.Aux = s v1.AddArg(p) - v0.AddArg(v1) - v0.AddArg(mem) + v0.AddArg2(v1, mem) return true } } @@ -12279,8 +11783,7 @@ func rewriteValuePPC64_OpPPC64OR(v *Value) bool { v1.AuxInt = i0 v1.Aux = s v1.AddArg(p) - v0.AddArg(v1) - v0.AddArg(mem) + v0.AddArg2(v1, mem) return true } } @@ -12399,8 +11902,7 @@ func rewriteValuePPC64_OpPPC64OR(v *Value) bool { v1.AuxInt = i0 v1.Aux = s v1.AddArg(p) - v0.AddArg(v1) - v0.AddArg(mem) + v0.AddArg2(v1, mem) return true } } @@ -12613,8 +12115,7 @@ func rewriteValuePPC64_OpPPC64XOR(v *Value) bool { continue } v.reset(OpPPC64ROTL) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } break @@ -12654,8 +12155,7 @@ func rewriteValuePPC64_OpPPC64XOR(v *Value) bool { continue } v.reset(OpPPC64ROTLW) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } break @@ -12747,9 +12247,7 @@ func rewriteValuePPC64_OpPanicBounds(v *Value) bool { } v.reset(OpPPC64LoweredPanicBoundsA) v.AuxInt = kind - v.AddArg(x) - v.AddArg(y) - v.AddArg(mem) + v.AddArg3(x, y, mem) return true } // match: (PanicBounds [kind] x y mem) @@ -12765,9 +12263,7 @@ func rewriteValuePPC64_OpPanicBounds(v *Value) bool { } v.reset(OpPPC64LoweredPanicBoundsB) v.AuxInt = kind - v.AddArg(x) - v.AddArg(y) - v.AddArg(mem) + v.AddArg3(x, y, mem) return true } // match: (PanicBounds [kind] x y mem) @@ -12783,9 +12279,7 @@ func rewriteValuePPC64_OpPanicBounds(v *Value) bool { } v.reset(OpPPC64LoweredPanicBoundsC) v.AuxInt = kind - v.AddArg(x) - v.AddArg(y) - v.AddArg(mem) + v.AddArg3(x, y, mem) return true } return false @@ -12851,17 +12345,14 @@ func rewriteValuePPC64_OpRotateLeft16(v *Value) bool { c := v_1.AuxInt v.reset(OpOr16) v0 := b.NewValue0(v.Pos, OpLsh16x64, t) - v0.AddArg(x) v1 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64) v1.AuxInt = c & 15 - v0.AddArg(v1) - v.AddArg(v0) + v0.AddArg2(x, v1) v2 := b.NewValue0(v.Pos, OpRsh16Ux64, t) - v2.AddArg(x) v3 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64) v3.AuxInt = -c & 15 - v2.AddArg(v3) - v.AddArg(v2) + v2.AddArg2(x, v3) + v.AddArg2(v0, v2) return true } return false @@ -12888,8 +12379,7 @@ func rewriteValuePPC64_OpRotateLeft32(v *Value) bool { x := v_0 y := v_1 v.reset(OpPPC64ROTLW) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } } @@ -12915,8 +12405,7 @@ func rewriteValuePPC64_OpRotateLeft64(v *Value) bool { x := v_0 y := v_1 v.reset(OpPPC64ROTL) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } } @@ -12936,17 +12425,14 @@ func rewriteValuePPC64_OpRotateLeft8(v *Value) bool { c := v_1.AuxInt v.reset(OpOr8) v0 := b.NewValue0(v.Pos, OpLsh8x64, t) - v0.AddArg(x) v1 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64) v1.AuxInt = c & 7 - v0.AddArg(v1) - v.AddArg(v0) + v0.AddArg2(x, v1) v2 := b.NewValue0(v.Pos, OpRsh8Ux64, t) - v2.AddArg(x) v3 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64) v3.AuxInt = -c & 7 - v2.AddArg(v3) - v.AddArg(v2) + v2.AddArg2(x, v3) + v.AddArg2(v0, v2) return true } return false @@ -12968,8 +12454,7 @@ func rewriteValuePPC64_OpRsh16Ux16(v *Value) bool { v.reset(OpPPC64SRW) v0 := b.NewValue0(v.Pos, OpPPC64MOVHZreg, typ.Int64) v0.AddArg(x) - v.AddArg(v0) - v.AddArg(y) + v.AddArg2(v0, y) return true } // match: (Rsh16Ux16 x y) @@ -12980,9 +12465,7 @@ func rewriteValuePPC64_OpRsh16Ux16(v *Value) bool { v.reset(OpPPC64SRW) v0 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpPPC64ORN, typ.Int64) - v1.AddArg(y) v2 := b.NewValue0(v.Pos, OpPPC64MaskIfNotCarry, typ.Int64) v3 := b.NewValue0(v.Pos, OpPPC64ADDconstForCarry, types.TypeFlags) v3.AuxInt = -16 @@ -12990,8 +12473,8 @@ func rewriteValuePPC64_OpRsh16Ux16(v *Value) bool { v4.AddArg(y) v3.AddArg(v4) v2.AddArg(v3) - v1.AddArg(v2) - v.AddArg(v1) + v1.AddArg2(y, v2) + v.AddArg2(v0, v1) return true } } @@ -13050,8 +12533,7 @@ func rewriteValuePPC64_OpRsh16Ux32(v *Value) bool { v.reset(OpPPC64SRW) v0 := b.NewValue0(v.Pos, OpPPC64MOVHZreg, typ.Int64) v0.AddArg(x) - v.AddArg(v0) - v.AddArg(y) + v.AddArg2(v0, y) return true } // match: (Rsh16Ux32 x y) @@ -13062,9 +12544,7 @@ func rewriteValuePPC64_OpRsh16Ux32(v *Value) bool { v.reset(OpPPC64SRW) v0 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpPPC64ORN, typ.Int64) - v1.AddArg(y) v2 := b.NewValue0(v.Pos, OpPPC64MaskIfNotCarry, typ.Int64) v3 := b.NewValue0(v.Pos, OpPPC64ADDconstForCarry, types.TypeFlags) v3.AuxInt = -16 @@ -13072,8 +12552,8 @@ func rewriteValuePPC64_OpRsh16Ux32(v *Value) bool { v4.AddArg(y) v3.AddArg(v4) v2.AddArg(v3) - v1.AddArg(v2) - v.AddArg(v1) + v1.AddArg2(y, v2) + v.AddArg2(v0, v1) return true } } @@ -13147,8 +12627,7 @@ func rewriteValuePPC64_OpRsh16Ux64(v *Value) bool { v.reset(OpPPC64SRW) v0 := b.NewValue0(v.Pos, OpPPC64MOVHZreg, typ.Int64) v0.AddArg(x) - v.AddArg(v0) - v.AddArg(y) + v.AddArg2(v0, y) return true } // match: (Rsh16Ux64 x y) @@ -13159,16 +12638,14 @@ func rewriteValuePPC64_OpRsh16Ux64(v *Value) bool { v.reset(OpPPC64SRW) v0 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpPPC64ORN, typ.Int64) - v1.AddArg(y) v2 := b.NewValue0(v.Pos, OpPPC64MaskIfNotCarry, typ.Int64) v3 := b.NewValue0(v.Pos, OpPPC64ADDconstForCarry, types.TypeFlags) v3.AuxInt = -16 v3.AddArg(y) v2.AddArg(v3) - v1.AddArg(v2) - v.AddArg(v1) + v1.AddArg2(y, v2) + v.AddArg2(v0, v1) return true } } @@ -13189,8 +12666,7 @@ func rewriteValuePPC64_OpRsh16Ux8(v *Value) bool { v.reset(OpPPC64SRW) v0 := b.NewValue0(v.Pos, OpPPC64MOVHZreg, typ.Int64) v0.AddArg(x) - v.AddArg(v0) - v.AddArg(y) + v.AddArg2(v0, y) return true } // match: (Rsh16Ux8 x y) @@ -13201,9 +12677,7 @@ func rewriteValuePPC64_OpRsh16Ux8(v *Value) bool { v.reset(OpPPC64SRW) v0 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpPPC64ORN, typ.Int64) - v1.AddArg(y) v2 := b.NewValue0(v.Pos, OpPPC64MaskIfNotCarry, typ.Int64) v3 := b.NewValue0(v.Pos, OpPPC64ADDconstForCarry, types.TypeFlags) v3.AuxInt = -16 @@ -13211,8 +12685,8 @@ func rewriteValuePPC64_OpRsh16Ux8(v *Value) bool { v4.AddArg(y) v3.AddArg(v4) v2.AddArg(v3) - v1.AddArg(v2) - v.AddArg(v1) + v1.AddArg2(y, v2) + v.AddArg2(v0, v1) return true } } @@ -13233,8 +12707,7 @@ func rewriteValuePPC64_OpRsh16x16(v *Value) bool { v.reset(OpPPC64SRAW) v0 := b.NewValue0(v.Pos, OpPPC64MOVHreg, typ.Int64) v0.AddArg(x) - v.AddArg(v0) - v.AddArg(y) + v.AddArg2(v0, y) return true } // match: (Rsh16x16 x y) @@ -13245,9 +12718,7 @@ func rewriteValuePPC64_OpRsh16x16(v *Value) bool { v.reset(OpPPC64SRAW) v0 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpPPC64ORN, typ.Int64) - v1.AddArg(y) v2 := b.NewValue0(v.Pos, OpPPC64MaskIfNotCarry, typ.Int64) v3 := b.NewValue0(v.Pos, OpPPC64ADDconstForCarry, types.TypeFlags) v3.AuxInt = -16 @@ -13255,8 +12726,8 @@ func rewriteValuePPC64_OpRsh16x16(v *Value) bool { v4.AddArg(y) v3.AddArg(v4) v2.AddArg(v3) - v1.AddArg(v2) - v.AddArg(v1) + v1.AddArg2(y, v2) + v.AddArg2(v0, v1) return true } } @@ -13315,8 +12786,7 @@ func rewriteValuePPC64_OpRsh16x32(v *Value) bool { v.reset(OpPPC64SRAW) v0 := b.NewValue0(v.Pos, OpPPC64MOVHreg, typ.Int64) v0.AddArg(x) - v.AddArg(v0) - v.AddArg(y) + v.AddArg2(v0, y) return true } // match: (Rsh16x32 x y) @@ -13327,9 +12797,7 @@ func rewriteValuePPC64_OpRsh16x32(v *Value) bool { v.reset(OpPPC64SRAW) v0 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpPPC64ORN, typ.Int64) - v1.AddArg(y) v2 := b.NewValue0(v.Pos, OpPPC64MaskIfNotCarry, typ.Int64) v3 := b.NewValue0(v.Pos, OpPPC64ADDconstForCarry, types.TypeFlags) v3.AuxInt = -16 @@ -13337,8 +12805,8 @@ func rewriteValuePPC64_OpRsh16x32(v *Value) bool { v4.AddArg(y) v3.AddArg(v4) v2.AddArg(v3) - v1.AddArg(v2) - v.AddArg(v1) + v1.AddArg2(y, v2) + v.AddArg2(v0, v1) return true } } @@ -13416,8 +12884,7 @@ func rewriteValuePPC64_OpRsh16x64(v *Value) bool { v.reset(OpPPC64SRAW) v0 := b.NewValue0(v.Pos, OpPPC64MOVHreg, typ.Int64) v0.AddArg(x) - v.AddArg(v0) - v.AddArg(y) + v.AddArg2(v0, y) return true } // match: (Rsh16x64 x y) @@ -13428,16 +12895,14 @@ func rewriteValuePPC64_OpRsh16x64(v *Value) bool { v.reset(OpPPC64SRAW) v0 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpPPC64ORN, typ.Int64) - v1.AddArg(y) v2 := b.NewValue0(v.Pos, OpPPC64MaskIfNotCarry, typ.Int64) v3 := b.NewValue0(v.Pos, OpPPC64ADDconstForCarry, types.TypeFlags) v3.AuxInt = -16 v3.AddArg(y) v2.AddArg(v3) - v1.AddArg(v2) - v.AddArg(v1) + v1.AddArg2(y, v2) + v.AddArg2(v0, v1) return true } } @@ -13458,8 +12923,7 @@ func rewriteValuePPC64_OpRsh16x8(v *Value) bool { v.reset(OpPPC64SRAW) v0 := b.NewValue0(v.Pos, OpPPC64MOVHreg, typ.Int64) v0.AddArg(x) - v.AddArg(v0) - v.AddArg(y) + v.AddArg2(v0, y) return true } // match: (Rsh16x8 x y) @@ -13470,9 +12934,7 @@ func rewriteValuePPC64_OpRsh16x8(v *Value) bool { v.reset(OpPPC64SRAW) v0 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpPPC64ORN, typ.Int64) - v1.AddArg(y) v2 := b.NewValue0(v.Pos, OpPPC64MaskIfNotCarry, typ.Int64) v3 := b.NewValue0(v.Pos, OpPPC64ADDconstForCarry, types.TypeFlags) v3.AuxInt = -16 @@ -13480,8 +12942,8 @@ func rewriteValuePPC64_OpRsh16x8(v *Value) bool { v4.AddArg(y) v3.AddArg(v4) v2.AddArg(v3) - v1.AddArg(v2) - v.AddArg(v1) + v1.AddArg2(y, v2) + v.AddArg2(v0, v1) return true } } @@ -13500,8 +12962,7 @@ func rewriteValuePPC64_OpRsh32Ux16(v *Value) bool { break } v.reset(OpPPC64SRW) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (Rsh32Ux16 x y) @@ -13510,9 +12971,7 @@ func rewriteValuePPC64_OpRsh32Ux16(v *Value) bool { x := v_0 y := v_1 v.reset(OpPPC64SRW) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpPPC64ORN, typ.Int64) - v0.AddArg(y) v1 := b.NewValue0(v.Pos, OpPPC64MaskIfNotCarry, typ.Int64) v2 := b.NewValue0(v.Pos, OpPPC64ADDconstForCarry, types.TypeFlags) v2.AuxInt = -32 @@ -13520,8 +12979,8 @@ func rewriteValuePPC64_OpRsh32Ux16(v *Value) bool { v3.AddArg(y) v2.AddArg(v3) v1.AddArg(v2) - v0.AddArg(v1) - v.AddArg(v0) + v0.AddArg2(y, v1) + v.AddArg2(x, v0) return true } } @@ -13574,8 +13033,7 @@ func rewriteValuePPC64_OpRsh32Ux32(v *Value) bool { break } v.reset(OpPPC64SRW) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (Rsh32Ux32 x y) @@ -13584,9 +13042,7 @@ func rewriteValuePPC64_OpRsh32Ux32(v *Value) bool { x := v_0 y := v_1 v.reset(OpPPC64SRW) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpPPC64ORN, typ.Int64) - v0.AddArg(y) v1 := b.NewValue0(v.Pos, OpPPC64MaskIfNotCarry, typ.Int64) v2 := b.NewValue0(v.Pos, OpPPC64ADDconstForCarry, types.TypeFlags) v2.AuxInt = -32 @@ -13594,8 +13050,8 @@ func rewriteValuePPC64_OpRsh32Ux32(v *Value) bool { v3.AddArg(y) v2.AddArg(v3) v1.AddArg(v2) - v0.AddArg(v1) - v.AddArg(v0) + v0.AddArg2(y, v1) + v.AddArg2(x, v0) return true } } @@ -13663,8 +13119,7 @@ func rewriteValuePPC64_OpRsh32Ux64(v *Value) bool { break } v.reset(OpPPC64SRW) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (Rsh32Ux64 x (AND y (MOVDconst [31]))) @@ -13683,11 +13138,10 @@ func rewriteValuePPC64_OpRsh32Ux64(v *Value) bool { continue } v.reset(OpPPC64SRW) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpPPC64ANDconst, typ.Int32) v0.AuxInt = 31 v0.AddArg(y) - v.AddArg(v0) + v.AddArg2(x, v0) return true } break @@ -13701,11 +13155,10 @@ func rewriteValuePPC64_OpRsh32Ux64(v *Value) bool { } y := v_1.Args[0] v.reset(OpPPC64SRW) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpPPC64ANDconst, typ.UInt) v0.AuxInt = 31 v0.AddArg(y) - v.AddArg(v0) + v.AddArg2(x, v0) return true } // match: (Rsh32Ux64 x (SUB (MOVDconst [32]) (ANDconst [31] y))) @@ -13726,16 +13179,14 @@ func rewriteValuePPC64_OpRsh32Ux64(v *Value) bool { } y := v_1_1.Args[0] v.reset(OpPPC64SRW) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpPPC64SUB, typ.UInt) v1 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64) v1.AuxInt = 32 - v0.AddArg(v1) v2 := b.NewValue0(v.Pos, OpPPC64ANDconst, typ.UInt) v2.AuxInt = 31 v2.AddArg(y) - v0.AddArg(v2) - v.AddArg(v0) + v0.AddArg2(v1, v2) + v.AddArg2(x, v0) return true } // match: (Rsh32Ux64 x (SUB (MOVDconst [32]) (AND y (MOVDconst [31])))) @@ -13763,16 +13214,14 @@ func rewriteValuePPC64_OpRsh32Ux64(v *Value) bool { continue } v.reset(OpPPC64SRW) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpPPC64SUB, typ.UInt) v1 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64) v1.AuxInt = 32 - v0.AddArg(v1) v2 := b.NewValue0(v.Pos, OpPPC64ANDconst, typ.UInt) v2.AuxInt = 31 v2.AddArg(y) - v0.AddArg(v2) - v.AddArg(v0) + v0.AddArg2(v1, v2) + v.AddArg2(x, v0) return true } break @@ -13783,16 +13232,14 @@ func rewriteValuePPC64_OpRsh32Ux64(v *Value) bool { x := v_0 y := v_1 v.reset(OpPPC64SRW) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpPPC64ORN, typ.Int64) - v0.AddArg(y) v1 := b.NewValue0(v.Pos, OpPPC64MaskIfNotCarry, typ.Int64) v2 := b.NewValue0(v.Pos, OpPPC64ADDconstForCarry, types.TypeFlags) v2.AuxInt = -32 v2.AddArg(y) v1.AddArg(v2) - v0.AddArg(v1) - v.AddArg(v0) + v0.AddArg2(y, v1) + v.AddArg2(x, v0) return true } } @@ -13811,8 +13258,7 @@ func rewriteValuePPC64_OpRsh32Ux8(v *Value) bool { break } v.reset(OpPPC64SRW) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (Rsh32Ux8 x y) @@ -13821,9 +13267,7 @@ func rewriteValuePPC64_OpRsh32Ux8(v *Value) bool { x := v_0 y := v_1 v.reset(OpPPC64SRW) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpPPC64ORN, typ.Int64) - v0.AddArg(y) v1 := b.NewValue0(v.Pos, OpPPC64MaskIfNotCarry, typ.Int64) v2 := b.NewValue0(v.Pos, OpPPC64ADDconstForCarry, types.TypeFlags) v2.AuxInt = -32 @@ -13831,8 +13275,8 @@ func rewriteValuePPC64_OpRsh32Ux8(v *Value) bool { v3.AddArg(y) v2.AddArg(v3) v1.AddArg(v2) - v0.AddArg(v1) - v.AddArg(v0) + v0.AddArg2(y, v1) + v.AddArg2(x, v0) return true } } @@ -13851,8 +13295,7 @@ func rewriteValuePPC64_OpRsh32x16(v *Value) bool { break } v.reset(OpPPC64SRAW) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (Rsh32x16 x y) @@ -13861,9 +13304,7 @@ func rewriteValuePPC64_OpRsh32x16(v *Value) bool { x := v_0 y := v_1 v.reset(OpPPC64SRAW) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpPPC64ORN, typ.Int64) - v0.AddArg(y) v1 := b.NewValue0(v.Pos, OpPPC64MaskIfNotCarry, typ.Int64) v2 := b.NewValue0(v.Pos, OpPPC64ADDconstForCarry, types.TypeFlags) v2.AuxInt = -32 @@ -13871,8 +13312,8 @@ func rewriteValuePPC64_OpRsh32x16(v *Value) bool { v3.AddArg(y) v2.AddArg(v3) v1.AddArg(v2) - v0.AddArg(v1) - v.AddArg(v0) + v0.AddArg2(y, v1) + v.AddArg2(x, v0) return true } } @@ -13925,8 +13366,7 @@ func rewriteValuePPC64_OpRsh32x32(v *Value) bool { break } v.reset(OpPPC64SRAW) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (Rsh32x32 x y) @@ -13935,9 +13375,7 @@ func rewriteValuePPC64_OpRsh32x32(v *Value) bool { x := v_0 y := v_1 v.reset(OpPPC64SRAW) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpPPC64ORN, typ.Int64) - v0.AddArg(y) v1 := b.NewValue0(v.Pos, OpPPC64MaskIfNotCarry, typ.Int64) v2 := b.NewValue0(v.Pos, OpPPC64ADDconstForCarry, types.TypeFlags) v2.AuxInt = -32 @@ -13945,8 +13383,8 @@ func rewriteValuePPC64_OpRsh32x32(v *Value) bool { v3.AddArg(y) v2.AddArg(v3) v1.AddArg(v2) - v0.AddArg(v1) - v.AddArg(v0) + v0.AddArg2(y, v1) + v.AddArg2(x, v0) return true } } @@ -14016,8 +13454,7 @@ func rewriteValuePPC64_OpRsh32x64(v *Value) bool { break } v.reset(OpPPC64SRAW) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (Rsh32x64 x (AND y (MOVDconst [31]))) @@ -14036,11 +13473,10 @@ func rewriteValuePPC64_OpRsh32x64(v *Value) bool { continue } v.reset(OpPPC64SRAW) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpPPC64ANDconst, typ.Int32) v0.AuxInt = 31 v0.AddArg(y) - v.AddArg(v0) + v.AddArg2(x, v0) return true } break @@ -14054,11 +13490,10 @@ func rewriteValuePPC64_OpRsh32x64(v *Value) bool { } y := v_1.Args[0] v.reset(OpPPC64SRAW) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpPPC64ANDconst, typ.UInt) v0.AuxInt = 31 v0.AddArg(y) - v.AddArg(v0) + v.AddArg2(x, v0) return true } // match: (Rsh32x64 x (SUB (MOVDconst [32]) (ANDconst [31] y))) @@ -14079,16 +13514,14 @@ func rewriteValuePPC64_OpRsh32x64(v *Value) bool { } y := v_1_1.Args[0] v.reset(OpPPC64SRAW) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpPPC64SUB, typ.UInt) v1 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64) v1.AuxInt = 32 - v0.AddArg(v1) v2 := b.NewValue0(v.Pos, OpPPC64ANDconst, typ.UInt) v2.AuxInt = 31 v2.AddArg(y) - v0.AddArg(v2) - v.AddArg(v0) + v0.AddArg2(v1, v2) + v.AddArg2(x, v0) return true } // match: (Rsh32x64 x (SUB (MOVDconst [32]) (AND y (MOVDconst [31])))) @@ -14116,16 +13549,14 @@ func rewriteValuePPC64_OpRsh32x64(v *Value) bool { continue } v.reset(OpPPC64SRAW) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpPPC64SUB, typ.UInt) v1 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64) v1.AuxInt = 32 - v0.AddArg(v1) v2 := b.NewValue0(v.Pos, OpPPC64ANDconst, typ.UInt) v2.AuxInt = 31 v2.AddArg(y) - v0.AddArg(v2) - v.AddArg(v0) + v0.AddArg2(v1, v2) + v.AddArg2(x, v0) return true } break @@ -14136,16 +13567,14 @@ func rewriteValuePPC64_OpRsh32x64(v *Value) bool { x := v_0 y := v_1 v.reset(OpPPC64SRAW) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpPPC64ORN, typ.Int64) - v0.AddArg(y) v1 := b.NewValue0(v.Pos, OpPPC64MaskIfNotCarry, typ.Int64) v2 := b.NewValue0(v.Pos, OpPPC64ADDconstForCarry, types.TypeFlags) v2.AuxInt = -32 v2.AddArg(y) v1.AddArg(v2) - v0.AddArg(v1) - v.AddArg(v0) + v0.AddArg2(y, v1) + v.AddArg2(x, v0) return true } } @@ -14164,8 +13593,7 @@ func rewriteValuePPC64_OpRsh32x8(v *Value) bool { break } v.reset(OpPPC64SRAW) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (Rsh32x8 x y) @@ -14174,9 +13602,7 @@ func rewriteValuePPC64_OpRsh32x8(v *Value) bool { x := v_0 y := v_1 v.reset(OpPPC64SRAW) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpPPC64ORN, typ.Int64) - v0.AddArg(y) v1 := b.NewValue0(v.Pos, OpPPC64MaskIfNotCarry, typ.Int64) v2 := b.NewValue0(v.Pos, OpPPC64ADDconstForCarry, types.TypeFlags) v2.AuxInt = -32 @@ -14184,8 +13610,8 @@ func rewriteValuePPC64_OpRsh32x8(v *Value) bool { v3.AddArg(y) v2.AddArg(v3) v1.AddArg(v2) - v0.AddArg(v1) - v.AddArg(v0) + v0.AddArg2(y, v1) + v.AddArg2(x, v0) return true } } @@ -14204,8 +13630,7 @@ func rewriteValuePPC64_OpRsh64Ux16(v *Value) bool { break } v.reset(OpPPC64SRD) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (Rsh64Ux16 x y) @@ -14214,9 +13639,7 @@ func rewriteValuePPC64_OpRsh64Ux16(v *Value) bool { x := v_0 y := v_1 v.reset(OpPPC64SRD) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpPPC64ORN, typ.Int64) - v0.AddArg(y) v1 := b.NewValue0(v.Pos, OpPPC64MaskIfNotCarry, typ.Int64) v2 := b.NewValue0(v.Pos, OpPPC64ADDconstForCarry, types.TypeFlags) v2.AuxInt = -64 @@ -14224,8 +13647,8 @@ func rewriteValuePPC64_OpRsh64Ux16(v *Value) bool { v3.AddArg(y) v2.AddArg(v3) v1.AddArg(v2) - v0.AddArg(v1) - v.AddArg(v0) + v0.AddArg2(y, v1) + v.AddArg2(x, v0) return true } } @@ -14278,8 +13701,7 @@ func rewriteValuePPC64_OpRsh64Ux32(v *Value) bool { break } v.reset(OpPPC64SRD) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (Rsh64Ux32 x y) @@ -14288,9 +13710,7 @@ func rewriteValuePPC64_OpRsh64Ux32(v *Value) bool { x := v_0 y := v_1 v.reset(OpPPC64SRD) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpPPC64ORN, typ.Int64) - v0.AddArg(y) v1 := b.NewValue0(v.Pos, OpPPC64MaskIfNotCarry, typ.Int64) v2 := b.NewValue0(v.Pos, OpPPC64ADDconstForCarry, types.TypeFlags) v2.AuxInt = -64 @@ -14298,8 +13718,8 @@ func rewriteValuePPC64_OpRsh64Ux32(v *Value) bool { v3.AddArg(y) v2.AddArg(v3) v1.AddArg(v2) - v0.AddArg(v1) - v.AddArg(v0) + v0.AddArg2(y, v1) + v.AddArg2(x, v0) return true } } @@ -14367,8 +13787,7 @@ func rewriteValuePPC64_OpRsh64Ux64(v *Value) bool { break } v.reset(OpPPC64SRD) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (Rsh64Ux64 x (AND y (MOVDconst [63]))) @@ -14387,11 +13806,10 @@ func rewriteValuePPC64_OpRsh64Ux64(v *Value) bool { continue } v.reset(OpPPC64SRD) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpPPC64ANDconst, typ.Int64) v0.AuxInt = 63 v0.AddArg(y) - v.AddArg(v0) + v.AddArg2(x, v0) return true } break @@ -14405,11 +13823,10 @@ func rewriteValuePPC64_OpRsh64Ux64(v *Value) bool { } y := v_1.Args[0] v.reset(OpPPC64SRD) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpPPC64ANDconst, typ.UInt) v0.AuxInt = 63 v0.AddArg(y) - v.AddArg(v0) + v.AddArg2(x, v0) return true } // match: (Rsh64Ux64 x (SUB (MOVDconst [64]) (ANDconst [63] y))) @@ -14430,16 +13847,14 @@ func rewriteValuePPC64_OpRsh64Ux64(v *Value) bool { } y := v_1_1.Args[0] v.reset(OpPPC64SRD) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpPPC64SUB, typ.UInt) v1 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64) v1.AuxInt = 64 - v0.AddArg(v1) v2 := b.NewValue0(v.Pos, OpPPC64ANDconst, typ.UInt) v2.AuxInt = 63 v2.AddArg(y) - v0.AddArg(v2) - v.AddArg(v0) + v0.AddArg2(v1, v2) + v.AddArg2(x, v0) return true } // match: (Rsh64Ux64 x (SUB (MOVDconst [64]) (AND y (MOVDconst [63])))) @@ -14467,16 +13882,14 @@ func rewriteValuePPC64_OpRsh64Ux64(v *Value) bool { continue } v.reset(OpPPC64SRD) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpPPC64SUB, typ.UInt) v1 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64) v1.AuxInt = 64 - v0.AddArg(v1) v2 := b.NewValue0(v.Pos, OpPPC64ANDconst, typ.UInt) v2.AuxInt = 63 v2.AddArg(y) - v0.AddArg(v2) - v.AddArg(v0) + v0.AddArg2(v1, v2) + v.AddArg2(x, v0) return true } break @@ -14487,16 +13900,14 @@ func rewriteValuePPC64_OpRsh64Ux64(v *Value) bool { x := v_0 y := v_1 v.reset(OpPPC64SRD) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpPPC64ORN, typ.Int64) - v0.AddArg(y) v1 := b.NewValue0(v.Pos, OpPPC64MaskIfNotCarry, typ.Int64) v2 := b.NewValue0(v.Pos, OpPPC64ADDconstForCarry, types.TypeFlags) v2.AuxInt = -64 v2.AddArg(y) v1.AddArg(v2) - v0.AddArg(v1) - v.AddArg(v0) + v0.AddArg2(y, v1) + v.AddArg2(x, v0) return true } } @@ -14515,8 +13926,7 @@ func rewriteValuePPC64_OpRsh64Ux8(v *Value) bool { break } v.reset(OpPPC64SRD) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (Rsh64Ux8 x y) @@ -14525,9 +13935,7 @@ func rewriteValuePPC64_OpRsh64Ux8(v *Value) bool { x := v_0 y := v_1 v.reset(OpPPC64SRD) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpPPC64ORN, typ.Int64) - v0.AddArg(y) v1 := b.NewValue0(v.Pos, OpPPC64MaskIfNotCarry, typ.Int64) v2 := b.NewValue0(v.Pos, OpPPC64ADDconstForCarry, types.TypeFlags) v2.AuxInt = -64 @@ -14535,8 +13943,8 @@ func rewriteValuePPC64_OpRsh64Ux8(v *Value) bool { v3.AddArg(y) v2.AddArg(v3) v1.AddArg(v2) - v0.AddArg(v1) - v.AddArg(v0) + v0.AddArg2(y, v1) + v.AddArg2(x, v0) return true } } @@ -14555,8 +13963,7 @@ func rewriteValuePPC64_OpRsh64x16(v *Value) bool { break } v.reset(OpPPC64SRAD) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (Rsh64x16 x y) @@ -14565,9 +13972,7 @@ func rewriteValuePPC64_OpRsh64x16(v *Value) bool { x := v_0 y := v_1 v.reset(OpPPC64SRAD) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpPPC64ORN, typ.Int64) - v0.AddArg(y) v1 := b.NewValue0(v.Pos, OpPPC64MaskIfNotCarry, typ.Int64) v2 := b.NewValue0(v.Pos, OpPPC64ADDconstForCarry, types.TypeFlags) v2.AuxInt = -64 @@ -14575,8 +13980,8 @@ func rewriteValuePPC64_OpRsh64x16(v *Value) bool { v3.AddArg(y) v2.AddArg(v3) v1.AddArg(v2) - v0.AddArg(v1) - v.AddArg(v0) + v0.AddArg2(y, v1) + v.AddArg2(x, v0) return true } } @@ -14629,8 +14034,7 @@ func rewriteValuePPC64_OpRsh64x32(v *Value) bool { break } v.reset(OpPPC64SRAD) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (Rsh64x32 x y) @@ -14639,9 +14043,7 @@ func rewriteValuePPC64_OpRsh64x32(v *Value) bool { x := v_0 y := v_1 v.reset(OpPPC64SRAD) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpPPC64ORN, typ.Int64) - v0.AddArg(y) v1 := b.NewValue0(v.Pos, OpPPC64MaskIfNotCarry, typ.Int64) v2 := b.NewValue0(v.Pos, OpPPC64ADDconstForCarry, types.TypeFlags) v2.AuxInt = -64 @@ -14649,8 +14051,8 @@ func rewriteValuePPC64_OpRsh64x32(v *Value) bool { v3.AddArg(y) v2.AddArg(v3) v1.AddArg(v2) - v0.AddArg(v1) - v.AddArg(v0) + v0.AddArg2(y, v1) + v.AddArg2(x, v0) return true } } @@ -14720,8 +14122,7 @@ func rewriteValuePPC64_OpRsh64x64(v *Value) bool { break } v.reset(OpPPC64SRAD) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (Rsh64x64 x (AND y (MOVDconst [63]))) @@ -14740,11 +14141,10 @@ func rewriteValuePPC64_OpRsh64x64(v *Value) bool { continue } v.reset(OpPPC64SRAD) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpPPC64ANDconst, typ.Int64) v0.AuxInt = 63 v0.AddArg(y) - v.AddArg(v0) + v.AddArg2(x, v0) return true } break @@ -14758,11 +14158,10 @@ func rewriteValuePPC64_OpRsh64x64(v *Value) bool { } y := v_1.Args[0] v.reset(OpPPC64SRAD) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpPPC64ANDconst, typ.UInt) v0.AuxInt = 63 v0.AddArg(y) - v.AddArg(v0) + v.AddArg2(x, v0) return true } // match: (Rsh64x64 x (SUB (MOVDconst [64]) (ANDconst [63] y))) @@ -14783,16 +14182,14 @@ func rewriteValuePPC64_OpRsh64x64(v *Value) bool { } y := v_1_1.Args[0] v.reset(OpPPC64SRAD) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpPPC64SUB, typ.UInt) v1 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64) v1.AuxInt = 64 - v0.AddArg(v1) v2 := b.NewValue0(v.Pos, OpPPC64ANDconst, typ.UInt) v2.AuxInt = 63 v2.AddArg(y) - v0.AddArg(v2) - v.AddArg(v0) + v0.AddArg2(v1, v2) + v.AddArg2(x, v0) return true } // match: (Rsh64x64 x (SUB (MOVDconst [64]) (AND y (MOVDconst [63])))) @@ -14820,16 +14217,14 @@ func rewriteValuePPC64_OpRsh64x64(v *Value) bool { continue } v.reset(OpPPC64SRAD) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpPPC64SUB, typ.UInt) v1 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64) v1.AuxInt = 64 - v0.AddArg(v1) v2 := b.NewValue0(v.Pos, OpPPC64ANDconst, typ.UInt) v2.AuxInt = 63 v2.AddArg(y) - v0.AddArg(v2) - v.AddArg(v0) + v0.AddArg2(v1, v2) + v.AddArg2(x, v0) return true } break @@ -14840,16 +14235,14 @@ func rewriteValuePPC64_OpRsh64x64(v *Value) bool { x := v_0 y := v_1 v.reset(OpPPC64SRAD) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpPPC64ORN, typ.Int64) - v0.AddArg(y) v1 := b.NewValue0(v.Pos, OpPPC64MaskIfNotCarry, typ.Int64) v2 := b.NewValue0(v.Pos, OpPPC64ADDconstForCarry, types.TypeFlags) v2.AuxInt = -64 v2.AddArg(y) v1.AddArg(v2) - v0.AddArg(v1) - v.AddArg(v0) + v0.AddArg2(y, v1) + v.AddArg2(x, v0) return true } } @@ -14868,8 +14261,7 @@ func rewriteValuePPC64_OpRsh64x8(v *Value) bool { break } v.reset(OpPPC64SRAD) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (Rsh64x8 x y) @@ -14878,9 +14270,7 @@ func rewriteValuePPC64_OpRsh64x8(v *Value) bool { x := v_0 y := v_1 v.reset(OpPPC64SRAD) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpPPC64ORN, typ.Int64) - v0.AddArg(y) v1 := b.NewValue0(v.Pos, OpPPC64MaskIfNotCarry, typ.Int64) v2 := b.NewValue0(v.Pos, OpPPC64ADDconstForCarry, types.TypeFlags) v2.AuxInt = -64 @@ -14888,8 +14278,8 @@ func rewriteValuePPC64_OpRsh64x8(v *Value) bool { v3.AddArg(y) v2.AddArg(v3) v1.AddArg(v2) - v0.AddArg(v1) - v.AddArg(v0) + v0.AddArg2(y, v1) + v.AddArg2(x, v0) return true } } @@ -14910,8 +14300,7 @@ func rewriteValuePPC64_OpRsh8Ux16(v *Value) bool { v.reset(OpPPC64SRW) v0 := b.NewValue0(v.Pos, OpPPC64MOVBZreg, typ.Int64) v0.AddArg(x) - v.AddArg(v0) - v.AddArg(y) + v.AddArg2(v0, y) return true } // match: (Rsh8Ux16 x y) @@ -14922,9 +14311,7 @@ func rewriteValuePPC64_OpRsh8Ux16(v *Value) bool { v.reset(OpPPC64SRW) v0 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpPPC64ORN, typ.Int64) - v1.AddArg(y) v2 := b.NewValue0(v.Pos, OpPPC64MaskIfNotCarry, typ.Int64) v3 := b.NewValue0(v.Pos, OpPPC64ADDconstForCarry, types.TypeFlags) v3.AuxInt = -8 @@ -14932,8 +14319,8 @@ func rewriteValuePPC64_OpRsh8Ux16(v *Value) bool { v4.AddArg(y) v3.AddArg(v4) v2.AddArg(v3) - v1.AddArg(v2) - v.AddArg(v1) + v1.AddArg2(y, v2) + v.AddArg2(v0, v1) return true } } @@ -14992,8 +14379,7 @@ func rewriteValuePPC64_OpRsh8Ux32(v *Value) bool { v.reset(OpPPC64SRW) v0 := b.NewValue0(v.Pos, OpPPC64MOVBZreg, typ.Int64) v0.AddArg(x) - v.AddArg(v0) - v.AddArg(y) + v.AddArg2(v0, y) return true } // match: (Rsh8Ux32 x y) @@ -15004,9 +14390,7 @@ func rewriteValuePPC64_OpRsh8Ux32(v *Value) bool { v.reset(OpPPC64SRW) v0 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpPPC64ORN, typ.Int64) - v1.AddArg(y) v2 := b.NewValue0(v.Pos, OpPPC64MaskIfNotCarry, typ.Int64) v3 := b.NewValue0(v.Pos, OpPPC64ADDconstForCarry, types.TypeFlags) v3.AuxInt = -8 @@ -15014,8 +14398,8 @@ func rewriteValuePPC64_OpRsh8Ux32(v *Value) bool { v4.AddArg(y) v3.AddArg(v4) v2.AddArg(v3) - v1.AddArg(v2) - v.AddArg(v1) + v1.AddArg2(y, v2) + v.AddArg2(v0, v1) return true } } @@ -15089,8 +14473,7 @@ func rewriteValuePPC64_OpRsh8Ux64(v *Value) bool { v.reset(OpPPC64SRW) v0 := b.NewValue0(v.Pos, OpPPC64MOVBZreg, typ.Int64) v0.AddArg(x) - v.AddArg(v0) - v.AddArg(y) + v.AddArg2(v0, y) return true } // match: (Rsh8Ux64 x y) @@ -15101,16 +14484,14 @@ func rewriteValuePPC64_OpRsh8Ux64(v *Value) bool { v.reset(OpPPC64SRW) v0 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpPPC64ORN, typ.Int64) - v1.AddArg(y) v2 := b.NewValue0(v.Pos, OpPPC64MaskIfNotCarry, typ.Int64) v3 := b.NewValue0(v.Pos, OpPPC64ADDconstForCarry, types.TypeFlags) v3.AuxInt = -8 v3.AddArg(y) v2.AddArg(v3) - v1.AddArg(v2) - v.AddArg(v1) + v1.AddArg2(y, v2) + v.AddArg2(v0, v1) return true } } @@ -15131,8 +14512,7 @@ func rewriteValuePPC64_OpRsh8Ux8(v *Value) bool { v.reset(OpPPC64SRW) v0 := b.NewValue0(v.Pos, OpPPC64MOVBZreg, typ.Int64) v0.AddArg(x) - v.AddArg(v0) - v.AddArg(y) + v.AddArg2(v0, y) return true } // match: (Rsh8Ux8 x y) @@ -15143,9 +14523,7 @@ func rewriteValuePPC64_OpRsh8Ux8(v *Value) bool { v.reset(OpPPC64SRW) v0 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpPPC64ORN, typ.Int64) - v1.AddArg(y) v2 := b.NewValue0(v.Pos, OpPPC64MaskIfNotCarry, typ.Int64) v3 := b.NewValue0(v.Pos, OpPPC64ADDconstForCarry, types.TypeFlags) v3.AuxInt = -8 @@ -15153,8 +14531,8 @@ func rewriteValuePPC64_OpRsh8Ux8(v *Value) bool { v4.AddArg(y) v3.AddArg(v4) v2.AddArg(v3) - v1.AddArg(v2) - v.AddArg(v1) + v1.AddArg2(y, v2) + v.AddArg2(v0, v1) return true } } @@ -15175,8 +14553,7 @@ func rewriteValuePPC64_OpRsh8x16(v *Value) bool { v.reset(OpPPC64SRAW) v0 := b.NewValue0(v.Pos, OpPPC64MOVBreg, typ.Int64) v0.AddArg(x) - v.AddArg(v0) - v.AddArg(y) + v.AddArg2(v0, y) return true } // match: (Rsh8x16 x y) @@ -15187,9 +14564,7 @@ func rewriteValuePPC64_OpRsh8x16(v *Value) bool { v.reset(OpPPC64SRAW) v0 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpPPC64ORN, typ.Int64) - v1.AddArg(y) v2 := b.NewValue0(v.Pos, OpPPC64MaskIfNotCarry, typ.Int64) v3 := b.NewValue0(v.Pos, OpPPC64ADDconstForCarry, types.TypeFlags) v3.AuxInt = -8 @@ -15197,8 +14572,8 @@ func rewriteValuePPC64_OpRsh8x16(v *Value) bool { v4.AddArg(y) v3.AddArg(v4) v2.AddArg(v3) - v1.AddArg(v2) - v.AddArg(v1) + v1.AddArg2(y, v2) + v.AddArg2(v0, v1) return true } } @@ -15257,8 +14632,7 @@ func rewriteValuePPC64_OpRsh8x32(v *Value) bool { v.reset(OpPPC64SRAW) v0 := b.NewValue0(v.Pos, OpPPC64MOVBreg, typ.Int64) v0.AddArg(x) - v.AddArg(v0) - v.AddArg(y) + v.AddArg2(v0, y) return true } // match: (Rsh8x32 x y) @@ -15269,9 +14643,7 @@ func rewriteValuePPC64_OpRsh8x32(v *Value) bool { v.reset(OpPPC64SRAW) v0 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpPPC64ORN, typ.Int64) - v1.AddArg(y) v2 := b.NewValue0(v.Pos, OpPPC64MaskIfNotCarry, typ.Int64) v3 := b.NewValue0(v.Pos, OpPPC64ADDconstForCarry, types.TypeFlags) v3.AuxInt = -8 @@ -15279,8 +14651,8 @@ func rewriteValuePPC64_OpRsh8x32(v *Value) bool { v4.AddArg(y) v3.AddArg(v4) v2.AddArg(v3) - v1.AddArg(v2) - v.AddArg(v1) + v1.AddArg2(y, v2) + v.AddArg2(v0, v1) return true } } @@ -15358,8 +14730,7 @@ func rewriteValuePPC64_OpRsh8x64(v *Value) bool { v.reset(OpPPC64SRAW) v0 := b.NewValue0(v.Pos, OpPPC64MOVBreg, typ.Int64) v0.AddArg(x) - v.AddArg(v0) - v.AddArg(y) + v.AddArg2(v0, y) return true } // match: (Rsh8x64 x y) @@ -15370,16 +14741,14 @@ func rewriteValuePPC64_OpRsh8x64(v *Value) bool { v.reset(OpPPC64SRAW) v0 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpPPC64ORN, typ.Int64) - v1.AddArg(y) v2 := b.NewValue0(v.Pos, OpPPC64MaskIfNotCarry, typ.Int64) v3 := b.NewValue0(v.Pos, OpPPC64ADDconstForCarry, types.TypeFlags) v3.AuxInt = -8 v3.AddArg(y) v2.AddArg(v3) - v1.AddArg(v2) - v.AddArg(v1) + v1.AddArg2(y, v2) + v.AddArg2(v0, v1) return true } } @@ -15400,8 +14769,7 @@ func rewriteValuePPC64_OpRsh8x8(v *Value) bool { v.reset(OpPPC64SRAW) v0 := b.NewValue0(v.Pos, OpPPC64MOVBreg, typ.Int64) v0.AddArg(x) - v.AddArg(v0) - v.AddArg(y) + v.AddArg2(v0, y) return true } // match: (Rsh8x8 x y) @@ -15412,9 +14780,7 @@ func rewriteValuePPC64_OpRsh8x8(v *Value) bool { v.reset(OpPPC64SRAW) v0 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpPPC64ORN, typ.Int64) - v1.AddArg(y) v2 := b.NewValue0(v.Pos, OpPPC64MaskIfNotCarry, typ.Int64) v3 := b.NewValue0(v.Pos, OpPPC64ADDconstForCarry, types.TypeFlags) v3.AuxInt = -8 @@ -15422,8 +14788,8 @@ func rewriteValuePPC64_OpRsh8x8(v *Value) bool { v4.AddArg(y) v3.AddArg(v4) v2.AddArg(v3) - v1.AddArg(v2) - v.AddArg(v1) + v1.AddArg2(y, v2) + v.AddArg2(v0, v1) return true } } @@ -15459,9 +14825,7 @@ func rewriteValuePPC64_OpStore(v *Value) bool { break } v.reset(OpPPC64FMOVDstore) - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } // match: (Store {t} ptr val mem) @@ -15476,9 +14840,7 @@ func rewriteValuePPC64_OpStore(v *Value) bool { break } v.reset(OpPPC64FMOVDstore) - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } // match: (Store {t} ptr val mem) @@ -15493,9 +14855,7 @@ func rewriteValuePPC64_OpStore(v *Value) bool { break } v.reset(OpPPC64FMOVSstore) - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } // match: (Store {t} ptr val mem) @@ -15510,9 +14870,7 @@ func rewriteValuePPC64_OpStore(v *Value) bool { break } v.reset(OpPPC64MOVDstore) - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } // match: (Store {t} ptr val mem) @@ -15527,9 +14885,7 @@ func rewriteValuePPC64_OpStore(v *Value) bool { break } v.reset(OpPPC64MOVWstore) - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } // match: (Store {t} ptr val mem) @@ -15544,9 +14900,7 @@ func rewriteValuePPC64_OpStore(v *Value) bool { break } v.reset(OpPPC64MOVHstore) - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } // match: (Store {t} ptr val mem) @@ -15561,9 +14915,7 @@ func rewriteValuePPC64_OpStore(v *Value) bool { break } v.reset(OpPPC64MOVBstore) - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } return false @@ -15737,8 +15089,7 @@ func rewriteValuePPC64_OpZero(v *Value) bool { destptr := v_0 mem := v_1 v.reset(OpPPC64MOVBstorezero) - v.AddArg(destptr) - v.AddArg(mem) + v.AddArg2(destptr, mem) return true } // match: (Zero [2] destptr mem) @@ -15750,8 +15101,7 @@ func rewriteValuePPC64_OpZero(v *Value) bool { destptr := v_0 mem := v_1 v.reset(OpPPC64MOVHstorezero) - v.AddArg(destptr) - v.AddArg(mem) + v.AddArg2(destptr, mem) return true } // match: (Zero [3] destptr mem) @@ -15764,11 +15114,9 @@ func rewriteValuePPC64_OpZero(v *Value) bool { mem := v_1 v.reset(OpPPC64MOVBstorezero) v.AuxInt = 2 - v.AddArg(destptr) v0 := b.NewValue0(v.Pos, OpPPC64MOVHstorezero, types.TypeMem) - v0.AddArg(destptr) - v0.AddArg(mem) - v.AddArg(v0) + v0.AddArg2(destptr, mem) + v.AddArg2(destptr, v0) return true } // match: (Zero [4] destptr mem) @@ -15780,8 +15128,7 @@ func rewriteValuePPC64_OpZero(v *Value) bool { destptr := v_0 mem := v_1 v.reset(OpPPC64MOVWstorezero) - v.AddArg(destptr) - v.AddArg(mem) + v.AddArg2(destptr, mem) return true } // match: (Zero [5] destptr mem) @@ -15794,11 +15141,9 @@ func rewriteValuePPC64_OpZero(v *Value) bool { mem := v_1 v.reset(OpPPC64MOVBstorezero) v.AuxInt = 4 - v.AddArg(destptr) v0 := b.NewValue0(v.Pos, OpPPC64MOVWstorezero, types.TypeMem) - v0.AddArg(destptr) - v0.AddArg(mem) - v.AddArg(v0) + v0.AddArg2(destptr, mem) + v.AddArg2(destptr, v0) return true } // match: (Zero [6] destptr mem) @@ -15811,11 +15156,9 @@ func rewriteValuePPC64_OpZero(v *Value) bool { mem := v_1 v.reset(OpPPC64MOVHstorezero) v.AuxInt = 4 - v.AddArg(destptr) v0 := b.NewValue0(v.Pos, OpPPC64MOVWstorezero, types.TypeMem) - v0.AddArg(destptr) - v0.AddArg(mem) - v.AddArg(v0) + v0.AddArg2(destptr, mem) + v.AddArg2(destptr, v0) return true } // match: (Zero [7] destptr mem) @@ -15828,15 +15171,12 @@ func rewriteValuePPC64_OpZero(v *Value) bool { mem := v_1 v.reset(OpPPC64MOVBstorezero) v.AuxInt = 6 - v.AddArg(destptr) v0 := b.NewValue0(v.Pos, OpPPC64MOVHstorezero, types.TypeMem) v0.AuxInt = 4 - v0.AddArg(destptr) v1 := b.NewValue0(v.Pos, OpPPC64MOVWstorezero, types.TypeMem) - v1.AddArg(destptr) - v1.AddArg(mem) - v0.AddArg(v1) - v.AddArg(v0) + v1.AddArg2(destptr, mem) + v0.AddArg2(destptr, v1) + v.AddArg2(destptr, v0) return true } // match: (Zero [8] {t} destptr mem) @@ -15853,8 +15193,7 @@ func rewriteValuePPC64_OpZero(v *Value) bool { break } v.reset(OpPPC64MOVDstorezero) - v.AddArg(destptr) - v.AddArg(mem) + v.AddArg2(destptr, mem) return true } // match: (Zero [8] destptr mem) @@ -15867,12 +15206,10 @@ func rewriteValuePPC64_OpZero(v *Value) bool { mem := v_1 v.reset(OpPPC64MOVWstorezero) v.AuxInt = 4 - v.AddArg(destptr) v0 := b.NewValue0(v.Pos, OpPPC64MOVWstorezero, types.TypeMem) v0.AuxInt = 0 - v0.AddArg(destptr) - v0.AddArg(mem) - v.AddArg(v0) + v0.AddArg2(destptr, mem) + v.AddArg2(destptr, v0) return true } // match: (Zero [12] {t} destptr mem) @@ -15890,12 +15227,10 @@ func rewriteValuePPC64_OpZero(v *Value) bool { } v.reset(OpPPC64MOVWstorezero) v.AuxInt = 8 - v.AddArg(destptr) v0 := b.NewValue0(v.Pos, OpPPC64MOVDstorezero, types.TypeMem) v0.AuxInt = 0 - v0.AddArg(destptr) - v0.AddArg(mem) - v.AddArg(v0) + v0.AddArg2(destptr, mem) + v.AddArg2(destptr, v0) return true } // match: (Zero [16] {t} destptr mem) @@ -15913,12 +15248,10 @@ func rewriteValuePPC64_OpZero(v *Value) bool { } v.reset(OpPPC64MOVDstorezero) v.AuxInt = 8 - v.AddArg(destptr) v0 := b.NewValue0(v.Pos, OpPPC64MOVDstorezero, types.TypeMem) v0.AuxInt = 0 - v0.AddArg(destptr) - v0.AddArg(mem) - v.AddArg(v0) + v0.AddArg2(destptr, mem) + v.AddArg2(destptr, v0) return true } // match: (Zero [24] {t} destptr mem) @@ -15936,16 +15269,13 @@ func rewriteValuePPC64_OpZero(v *Value) bool { } v.reset(OpPPC64MOVDstorezero) v.AuxInt = 16 - v.AddArg(destptr) v0 := b.NewValue0(v.Pos, OpPPC64MOVDstorezero, types.TypeMem) v0.AuxInt = 8 - v0.AddArg(destptr) v1 := b.NewValue0(v.Pos, OpPPC64MOVDstorezero, types.TypeMem) v1.AuxInt = 0 - v1.AddArg(destptr) - v1.AddArg(mem) - v0.AddArg(v1) - v.AddArg(v0) + v1.AddArg2(destptr, mem) + v0.AddArg2(destptr, v1) + v.AddArg2(destptr, v0) return true } // match: (Zero [32] {t} destptr mem) @@ -15963,20 +15293,16 @@ func rewriteValuePPC64_OpZero(v *Value) bool { } v.reset(OpPPC64MOVDstorezero) v.AuxInt = 24 - v.AddArg(destptr) v0 := b.NewValue0(v.Pos, OpPPC64MOVDstorezero, types.TypeMem) v0.AuxInt = 16 - v0.AddArg(destptr) v1 := b.NewValue0(v.Pos, OpPPC64MOVDstorezero, types.TypeMem) v1.AuxInt = 8 - v1.AddArg(destptr) v2 := b.NewValue0(v.Pos, OpPPC64MOVDstorezero, types.TypeMem) v2.AuxInt = 0 - v2.AddArg(destptr) - v2.AddArg(mem) - v1.AddArg(v2) - v0.AddArg(v1) - v.AddArg(v0) + v2.AddArg2(destptr, mem) + v1.AddArg2(destptr, v2) + v0.AddArg2(destptr, v1) + v.AddArg2(destptr, v0) return true } // match: (Zero [s] ptr mem) @@ -15987,8 +15313,7 @@ func rewriteValuePPC64_OpZero(v *Value) bool { mem := v_1 v.reset(OpPPC64LoweredZero) v.AuxInt = s - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } } @@ -16127,8 +15452,7 @@ func rewriteBlockPPC64(b *Block) bool { } b.Reset(BlockPPC64EQ) v0 := b.NewValue0(v_0.Pos, OpPPC64ANDCC, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) b.AddControl(v0) return true } @@ -16157,8 +15481,7 @@ func rewriteBlockPPC64(b *Block) bool { } b.Reset(BlockPPC64EQ) v0 := b.NewValue0(v_0.Pos, OpPPC64ORCC, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) b.AddControl(v0) return true } @@ -16187,8 +15510,7 @@ func rewriteBlockPPC64(b *Block) bool { } b.Reset(BlockPPC64EQ) v0 := b.NewValue0(v_0.Pos, OpPPC64XORCC, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) b.AddControl(v0) return true } @@ -16286,8 +15608,7 @@ func rewriteBlockPPC64(b *Block) bool { } b.Reset(BlockPPC64GE) v0 := b.NewValue0(v_0.Pos, OpPPC64ANDCC, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) b.AddControl(v0) return true } @@ -16316,8 +15637,7 @@ func rewriteBlockPPC64(b *Block) bool { } b.Reset(BlockPPC64GE) v0 := b.NewValue0(v_0.Pos, OpPPC64ORCC, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) b.AddControl(v0) return true } @@ -16346,8 +15666,7 @@ func rewriteBlockPPC64(b *Block) bool { } b.Reset(BlockPPC64GE) v0 := b.NewValue0(v_0.Pos, OpPPC64XORCC, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) b.AddControl(v0) return true } @@ -16446,8 +15765,7 @@ func rewriteBlockPPC64(b *Block) bool { } b.Reset(BlockPPC64GT) v0 := b.NewValue0(v_0.Pos, OpPPC64ANDCC, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) b.AddControl(v0) return true } @@ -16476,8 +15794,7 @@ func rewriteBlockPPC64(b *Block) bool { } b.Reset(BlockPPC64GT) v0 := b.NewValue0(v_0.Pos, OpPPC64ORCC, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) b.AddControl(v0) return true } @@ -16506,8 +15823,7 @@ func rewriteBlockPPC64(b *Block) bool { } b.Reset(BlockPPC64GT) v0 := b.NewValue0(v_0.Pos, OpPPC64XORCC, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) b.AddControl(v0) return true } @@ -16707,8 +16023,7 @@ func rewriteBlockPPC64(b *Block) bool { } b.Reset(BlockPPC64LE) v0 := b.NewValue0(v_0.Pos, OpPPC64ANDCC, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) b.AddControl(v0) return true } @@ -16737,8 +16052,7 @@ func rewriteBlockPPC64(b *Block) bool { } b.Reset(BlockPPC64LE) v0 := b.NewValue0(v_0.Pos, OpPPC64ORCC, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) b.AddControl(v0) return true } @@ -16767,8 +16081,7 @@ func rewriteBlockPPC64(b *Block) bool { } b.Reset(BlockPPC64LE) v0 := b.NewValue0(v_0.Pos, OpPPC64XORCC, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) b.AddControl(v0) return true } @@ -16867,8 +16180,7 @@ func rewriteBlockPPC64(b *Block) bool { } b.Reset(BlockPPC64LT) v0 := b.NewValue0(v_0.Pos, OpPPC64ANDCC, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) b.AddControl(v0) return true } @@ -16897,8 +16209,7 @@ func rewriteBlockPPC64(b *Block) bool { } b.Reset(BlockPPC64LT) v0 := b.NewValue0(v_0.Pos, OpPPC64ORCC, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) b.AddControl(v0) return true } @@ -16927,8 +16238,7 @@ func rewriteBlockPPC64(b *Block) bool { } b.Reset(BlockPPC64LT) v0 := b.NewValue0(v_0.Pos, OpPPC64XORCC, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) b.AddControl(v0) return true } @@ -17226,8 +16536,7 @@ func rewriteBlockPPC64(b *Block) bool { } b.Reset(BlockPPC64NE) v0 := b.NewValue0(v_0.Pos, OpPPC64ANDCC, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) b.AddControl(v0) return true } @@ -17256,8 +16565,7 @@ func rewriteBlockPPC64(b *Block) bool { } b.Reset(BlockPPC64NE) v0 := b.NewValue0(v_0.Pos, OpPPC64ORCC, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) b.AddControl(v0) return true } @@ -17286,8 +16594,7 @@ func rewriteBlockPPC64(b *Block) bool { } b.Reset(BlockPPC64NE) v0 := b.NewValue0(v_0.Pos, OpPPC64XORCC, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) b.AddControl(v0) return true } diff --git a/src/cmd/compile/internal/ssa/rewriteRISCV64.go b/src/cmd/compile/internal/ssa/rewriteRISCV64.go index 4d70814cfd..e40fe69930 100644 --- a/src/cmd/compile/internal/ssa/rewriteRISCV64.go +++ b/src/cmd/compile/internal/ssa/rewriteRISCV64.go @@ -590,19 +590,16 @@ func rewriteValueRISCV64_OpAvg64u(v *Value) bool { v1 := b.NewValue0(v.Pos, OpRISCV64SRLI, t) v1.AuxInt = 1 v1.AddArg(x) - v0.AddArg(v1) v2 := b.NewValue0(v.Pos, OpRISCV64SRLI, t) v2.AuxInt = 1 v2.AddArg(y) - v0.AddArg(v2) - v.AddArg(v0) + v0.AddArg2(v1, v2) v3 := b.NewValue0(v.Pos, OpRISCV64ANDI, t) v3.AuxInt = 1 v4 := b.NewValue0(v.Pos, OpRISCV64AND, t) - v4.AddArg(x) - v4.AddArg(y) + v4.AddArg2(x, y) v3.AddArg(v4) - v.AddArg(v3) + v.AddArg2(v0, v3) return true } } @@ -704,10 +701,9 @@ func rewriteValueRISCV64_OpDiv16(v *Value) bool { v.reset(OpRISCV64DIVW) v0 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32) v1.AddArg(y) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } } @@ -724,10 +720,9 @@ func rewriteValueRISCV64_OpDiv16u(v *Value) bool { v.reset(OpRISCV64DIVUW) v0 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) v1.AddArg(y) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } } @@ -744,10 +739,9 @@ func rewriteValueRISCV64_OpDiv8(v *Value) bool { v.reset(OpRISCV64DIVW) v0 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32) v1.AddArg(y) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } } @@ -764,10 +758,9 @@ func rewriteValueRISCV64_OpDiv8u(v *Value) bool { v.reset(OpRISCV64DIVUW) v0 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) v1.AddArg(y) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } } @@ -784,8 +777,7 @@ func rewriteValueRISCV64_OpEq16(v *Value) bool { v.reset(OpRISCV64SEQZ) v0 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) v1 := b.NewValue0(v.Pos, OpRISCV64SUB, x.Type) - v1.AddArg(x) - v1.AddArg(y) + v1.AddArg2(x, y) v0.AddArg(v1) v.AddArg(v0) return true @@ -802,8 +794,7 @@ func rewriteValueRISCV64_OpEq32(v *Value) bool { y := v_1 v.reset(OpRISCV64SEQZ) v0 := b.NewValue0(v.Pos, OpRISCV64SUBW, x.Type) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -819,8 +810,7 @@ func rewriteValueRISCV64_OpEq64(v *Value) bool { y := v_1 v.reset(OpRISCV64SEQZ) v0 := b.NewValue0(v.Pos, OpRISCV64SUB, x.Type) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -838,8 +828,7 @@ func rewriteValueRISCV64_OpEq8(v *Value) bool { v.reset(OpRISCV64SEQZ) v0 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) v1 := b.NewValue0(v.Pos, OpRISCV64SUB, x.Type) - v1.AddArg(x) - v1.AddArg(y) + v1.AddArg2(x, y) v0.AddArg(v1) v.AddArg(v0) return true @@ -858,8 +847,7 @@ func rewriteValueRISCV64_OpEqB(v *Value) bool { v.reset(OpRISCV64XORI) v.AuxInt = 1 v0 := b.NewValue0(v.Pos, OpRISCV64XOR, typ.Bool) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -875,8 +863,7 @@ func rewriteValueRISCV64_OpEqPtr(v *Value) bool { y := v_1 v.reset(OpRISCV64SEQZ) v0 := b.NewValue0(v.Pos, OpRISCV64SUB, x.Type) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -890,8 +877,7 @@ func rewriteValueRISCV64_OpGeq32F(v *Value) bool { x := v_0 y := v_1 v.reset(OpRISCV64FLES) - v.AddArg(y) - v.AddArg(x) + v.AddArg2(y, x) return true } } @@ -904,8 +890,7 @@ func rewriteValueRISCV64_OpGeq64F(v *Value) bool { x := v_0 y := v_1 v.reset(OpRISCV64FLED) - v.AddArg(y) - v.AddArg(x) + v.AddArg2(y, x) return true } } @@ -918,8 +903,7 @@ func rewriteValueRISCV64_OpGreater32F(v *Value) bool { x := v_0 y := v_1 v.reset(OpRISCV64FLTS) - v.AddArg(y) - v.AddArg(x) + v.AddArg2(y, x) return true } } @@ -932,8 +916,7 @@ func rewriteValueRISCV64_OpGreater64F(v *Value) bool { x := v_0 y := v_1 v.reset(OpRISCV64FLTD) - v.AddArg(y) - v.AddArg(x) + v.AddArg2(y, x) return true } } @@ -952,10 +935,9 @@ func rewriteValueRISCV64_OpHmul32(v *Value) bool { v0 := b.NewValue0(v.Pos, OpRISCV64MUL, typ.Int64) v1 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64) v1.AddArg(x) - v0.AddArg(v1) v2 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64) v2.AddArg(y) - v0.AddArg(v2) + v0.AddArg2(v1, v2) v.AddArg(v0) return true } @@ -975,10 +957,9 @@ func rewriteValueRISCV64_OpHmul32u(v *Value) bool { v0 := b.NewValue0(v.Pos, OpRISCV64MUL, typ.Int64) v1 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) v1.AddArg(x) - v0.AddArg(v1) v2 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) v2.AddArg(y) - v0.AddArg(v2) + v0.AddArg2(v1, v2) v.AddArg(v0) return true } @@ -993,8 +974,7 @@ func rewriteValueRISCV64_OpIsNonNil(v *Value) bool { p := v_0 v.reset(OpNeqPtr) v0 := b.NewValue0(v.Pos, OpRISCV64MOVDconst, typ.UInt64) - v.AddArg(v0) - v.AddArg(p) + v.AddArg2(v0, p) return true } } @@ -1010,8 +990,7 @@ func rewriteValueRISCV64_OpLeq16(v *Value) bool { y := v_1 v.reset(OpNot) v0 := b.NewValue0(v.Pos, OpLess16, typ.Bool) - v0.AddArg(y) - v0.AddArg(x) + v0.AddArg2(y, x) v.AddArg(v0) return true } @@ -1028,8 +1007,7 @@ func rewriteValueRISCV64_OpLeq16U(v *Value) bool { y := v_1 v.reset(OpNot) v0 := b.NewValue0(v.Pos, OpLess16U, typ.Bool) - v0.AddArg(y) - v0.AddArg(x) + v0.AddArg2(y, x) v.AddArg(v0) return true } @@ -1046,8 +1024,7 @@ func rewriteValueRISCV64_OpLeq32(v *Value) bool { y := v_1 v.reset(OpNot) v0 := b.NewValue0(v.Pos, OpLess32, typ.Bool) - v0.AddArg(y) - v0.AddArg(x) + v0.AddArg2(y, x) v.AddArg(v0) return true } @@ -1064,8 +1041,7 @@ func rewriteValueRISCV64_OpLeq32U(v *Value) bool { y := v_1 v.reset(OpNot) v0 := b.NewValue0(v.Pos, OpLess32U, typ.Bool) - v0.AddArg(y) - v0.AddArg(x) + v0.AddArg2(y, x) v.AddArg(v0) return true } @@ -1082,8 +1058,7 @@ func rewriteValueRISCV64_OpLeq64(v *Value) bool { y := v_1 v.reset(OpNot) v0 := b.NewValue0(v.Pos, OpLess64, typ.Bool) - v0.AddArg(y) - v0.AddArg(x) + v0.AddArg2(y, x) v.AddArg(v0) return true } @@ -1100,8 +1075,7 @@ func rewriteValueRISCV64_OpLeq64U(v *Value) bool { y := v_1 v.reset(OpNot) v0 := b.NewValue0(v.Pos, OpLess64U, typ.Bool) - v0.AddArg(y) - v0.AddArg(x) + v0.AddArg2(y, x) v.AddArg(v0) return true } @@ -1118,8 +1092,7 @@ func rewriteValueRISCV64_OpLeq8(v *Value) bool { y := v_1 v.reset(OpNot) v0 := b.NewValue0(v.Pos, OpLess8, typ.Bool) - v0.AddArg(y) - v0.AddArg(x) + v0.AddArg2(y, x) v.AddArg(v0) return true } @@ -1136,8 +1109,7 @@ func rewriteValueRISCV64_OpLeq8U(v *Value) bool { y := v_1 v.reset(OpNot) v0 := b.NewValue0(v.Pos, OpLess8U, typ.Bool) - v0.AddArg(y) - v0.AddArg(x) + v0.AddArg2(y, x) v.AddArg(v0) return true } @@ -1155,10 +1127,9 @@ func rewriteValueRISCV64_OpLess16(v *Value) bool { v.reset(OpRISCV64SLT) v0 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64) v1.AddArg(y) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } } @@ -1175,10 +1146,9 @@ func rewriteValueRISCV64_OpLess16U(v *Value) bool { v.reset(OpRISCV64SLTU) v0 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) v1.AddArg(y) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } } @@ -1195,10 +1165,9 @@ func rewriteValueRISCV64_OpLess32(v *Value) bool { v.reset(OpRISCV64SLT) v0 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64) v1.AddArg(y) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } } @@ -1215,10 +1184,9 @@ func rewriteValueRISCV64_OpLess32U(v *Value) bool { v.reset(OpRISCV64SLTU) v0 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) v1.AddArg(y) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } } @@ -1235,10 +1203,9 @@ func rewriteValueRISCV64_OpLess8(v *Value) bool { v.reset(OpRISCV64SLT) v0 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64) v1.AddArg(y) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } } @@ -1255,10 +1222,9 @@ func rewriteValueRISCV64_OpLess8U(v *Value) bool { v.reset(OpRISCV64SLTU) v0 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) v1.AddArg(y) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } } @@ -1276,8 +1242,7 @@ func rewriteValueRISCV64_OpLoad(v *Value) bool { break } v.reset(OpRISCV64MOVBUload) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (Load ptr mem) @@ -1291,8 +1256,7 @@ func rewriteValueRISCV64_OpLoad(v *Value) bool { break } v.reset(OpRISCV64MOVBload) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (Load ptr mem) @@ -1306,8 +1270,7 @@ func rewriteValueRISCV64_OpLoad(v *Value) bool { break } v.reset(OpRISCV64MOVBUload) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (Load ptr mem) @@ -1321,8 +1284,7 @@ func rewriteValueRISCV64_OpLoad(v *Value) bool { break } v.reset(OpRISCV64MOVHload) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (Load ptr mem) @@ -1336,8 +1298,7 @@ func rewriteValueRISCV64_OpLoad(v *Value) bool { break } v.reset(OpRISCV64MOVHUload) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (Load ptr mem) @@ -1351,8 +1312,7 @@ func rewriteValueRISCV64_OpLoad(v *Value) bool { break } v.reset(OpRISCV64MOVWload) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (Load ptr mem) @@ -1366,8 +1326,7 @@ func rewriteValueRISCV64_OpLoad(v *Value) bool { break } v.reset(OpRISCV64MOVWUload) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (Load ptr mem) @@ -1381,8 +1340,7 @@ func rewriteValueRISCV64_OpLoad(v *Value) bool { break } v.reset(OpRISCV64MOVDload) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (Load ptr mem) @@ -1396,8 +1354,7 @@ func rewriteValueRISCV64_OpLoad(v *Value) bool { break } v.reset(OpRISCV64FMOVWload) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (Load ptr mem) @@ -1411,8 +1368,7 @@ func rewriteValueRISCV64_OpLoad(v *Value) bool { break } v.reset(OpRISCV64FMOVDload) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } return false @@ -1443,9 +1399,7 @@ func rewriteValueRISCV64_OpLsh16x16(v *Value) bool { y := v_1 v.reset(OpRISCV64AND) v0 := b.NewValue0(v.Pos, OpRISCV64SLL, t) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) + v0.AddArg2(x, y) v1 := b.NewValue0(v.Pos, OpNeg16, t) v2 := b.NewValue0(v.Pos, OpRISCV64SLTIU, t) v2.AuxInt = 64 @@ -1453,7 +1407,7 @@ func rewriteValueRISCV64_OpLsh16x16(v *Value) bool { v3.AddArg(y) v2.AddArg(v3) v1.AddArg(v2) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } } @@ -1470,9 +1424,7 @@ func rewriteValueRISCV64_OpLsh16x32(v *Value) bool { y := v_1 v.reset(OpRISCV64AND) v0 := b.NewValue0(v.Pos, OpRISCV64SLL, t) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) + v0.AddArg2(x, y) v1 := b.NewValue0(v.Pos, OpNeg16, t) v2 := b.NewValue0(v.Pos, OpRISCV64SLTIU, t) v2.AuxInt = 64 @@ -1480,7 +1432,7 @@ func rewriteValueRISCV64_OpLsh16x32(v *Value) bool { v3.AddArg(y) v2.AddArg(v3) v1.AddArg(v2) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } } @@ -1496,15 +1448,13 @@ func rewriteValueRISCV64_OpLsh16x64(v *Value) bool { y := v_1 v.reset(OpRISCV64AND) v0 := b.NewValue0(v.Pos, OpRISCV64SLL, t) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) + v0.AddArg2(x, y) v1 := b.NewValue0(v.Pos, OpNeg16, t) v2 := b.NewValue0(v.Pos, OpRISCV64SLTIU, t) v2.AuxInt = 64 v2.AddArg(y) v1.AddArg(v2) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } } @@ -1521,9 +1471,7 @@ func rewriteValueRISCV64_OpLsh16x8(v *Value) bool { y := v_1 v.reset(OpRISCV64AND) v0 := b.NewValue0(v.Pos, OpRISCV64SLL, t) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) + v0.AddArg2(x, y) v1 := b.NewValue0(v.Pos, OpNeg16, t) v2 := b.NewValue0(v.Pos, OpRISCV64SLTIU, t) v2.AuxInt = 64 @@ -1531,7 +1479,7 @@ func rewriteValueRISCV64_OpLsh16x8(v *Value) bool { v3.AddArg(y) v2.AddArg(v3) v1.AddArg(v2) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } } @@ -1548,9 +1496,7 @@ func rewriteValueRISCV64_OpLsh32x16(v *Value) bool { y := v_1 v.reset(OpRISCV64AND) v0 := b.NewValue0(v.Pos, OpRISCV64SLL, t) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) + v0.AddArg2(x, y) v1 := b.NewValue0(v.Pos, OpNeg32, t) v2 := b.NewValue0(v.Pos, OpRISCV64SLTIU, t) v2.AuxInt = 64 @@ -1558,7 +1504,7 @@ func rewriteValueRISCV64_OpLsh32x16(v *Value) bool { v3.AddArg(y) v2.AddArg(v3) v1.AddArg(v2) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } } @@ -1575,9 +1521,7 @@ func rewriteValueRISCV64_OpLsh32x32(v *Value) bool { y := v_1 v.reset(OpRISCV64AND) v0 := b.NewValue0(v.Pos, OpRISCV64SLL, t) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) + v0.AddArg2(x, y) v1 := b.NewValue0(v.Pos, OpNeg32, t) v2 := b.NewValue0(v.Pos, OpRISCV64SLTIU, t) v2.AuxInt = 64 @@ -1585,7 +1529,7 @@ func rewriteValueRISCV64_OpLsh32x32(v *Value) bool { v3.AddArg(y) v2.AddArg(v3) v1.AddArg(v2) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } } @@ -1601,15 +1545,13 @@ func rewriteValueRISCV64_OpLsh32x64(v *Value) bool { y := v_1 v.reset(OpRISCV64AND) v0 := b.NewValue0(v.Pos, OpRISCV64SLL, t) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) + v0.AddArg2(x, y) v1 := b.NewValue0(v.Pos, OpNeg32, t) v2 := b.NewValue0(v.Pos, OpRISCV64SLTIU, t) v2.AuxInt = 64 v2.AddArg(y) v1.AddArg(v2) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } } @@ -1626,9 +1568,7 @@ func rewriteValueRISCV64_OpLsh32x8(v *Value) bool { y := v_1 v.reset(OpRISCV64AND) v0 := b.NewValue0(v.Pos, OpRISCV64SLL, t) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) + v0.AddArg2(x, y) v1 := b.NewValue0(v.Pos, OpNeg32, t) v2 := b.NewValue0(v.Pos, OpRISCV64SLTIU, t) v2.AuxInt = 64 @@ -1636,7 +1576,7 @@ func rewriteValueRISCV64_OpLsh32x8(v *Value) bool { v3.AddArg(y) v2.AddArg(v3) v1.AddArg(v2) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } } @@ -1653,9 +1593,7 @@ func rewriteValueRISCV64_OpLsh64x16(v *Value) bool { y := v_1 v.reset(OpRISCV64AND) v0 := b.NewValue0(v.Pos, OpRISCV64SLL, t) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) + v0.AddArg2(x, y) v1 := b.NewValue0(v.Pos, OpNeg64, t) v2 := b.NewValue0(v.Pos, OpRISCV64SLTIU, t) v2.AuxInt = 64 @@ -1663,7 +1601,7 @@ func rewriteValueRISCV64_OpLsh64x16(v *Value) bool { v3.AddArg(y) v2.AddArg(v3) v1.AddArg(v2) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } } @@ -1680,9 +1618,7 @@ func rewriteValueRISCV64_OpLsh64x32(v *Value) bool { y := v_1 v.reset(OpRISCV64AND) v0 := b.NewValue0(v.Pos, OpRISCV64SLL, t) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) + v0.AddArg2(x, y) v1 := b.NewValue0(v.Pos, OpNeg64, t) v2 := b.NewValue0(v.Pos, OpRISCV64SLTIU, t) v2.AuxInt = 64 @@ -1690,7 +1626,7 @@ func rewriteValueRISCV64_OpLsh64x32(v *Value) bool { v3.AddArg(y) v2.AddArg(v3) v1.AddArg(v2) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } } @@ -1706,15 +1642,13 @@ func rewriteValueRISCV64_OpLsh64x64(v *Value) bool { y := v_1 v.reset(OpRISCV64AND) v0 := b.NewValue0(v.Pos, OpRISCV64SLL, t) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) + v0.AddArg2(x, y) v1 := b.NewValue0(v.Pos, OpNeg64, t) v2 := b.NewValue0(v.Pos, OpRISCV64SLTIU, t) v2.AuxInt = 64 v2.AddArg(y) v1.AddArg(v2) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } } @@ -1731,9 +1665,7 @@ func rewriteValueRISCV64_OpLsh64x8(v *Value) bool { y := v_1 v.reset(OpRISCV64AND) v0 := b.NewValue0(v.Pos, OpRISCV64SLL, t) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) + v0.AddArg2(x, y) v1 := b.NewValue0(v.Pos, OpNeg64, t) v2 := b.NewValue0(v.Pos, OpRISCV64SLTIU, t) v2.AuxInt = 64 @@ -1741,7 +1673,7 @@ func rewriteValueRISCV64_OpLsh64x8(v *Value) bool { v3.AddArg(y) v2.AddArg(v3) v1.AddArg(v2) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } } @@ -1758,9 +1690,7 @@ func rewriteValueRISCV64_OpLsh8x16(v *Value) bool { y := v_1 v.reset(OpRISCV64AND) v0 := b.NewValue0(v.Pos, OpRISCV64SLL, t) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) + v0.AddArg2(x, y) v1 := b.NewValue0(v.Pos, OpNeg8, t) v2 := b.NewValue0(v.Pos, OpRISCV64SLTIU, t) v2.AuxInt = 64 @@ -1768,7 +1698,7 @@ func rewriteValueRISCV64_OpLsh8x16(v *Value) bool { v3.AddArg(y) v2.AddArg(v3) v1.AddArg(v2) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } } @@ -1785,9 +1715,7 @@ func rewriteValueRISCV64_OpLsh8x32(v *Value) bool { y := v_1 v.reset(OpRISCV64AND) v0 := b.NewValue0(v.Pos, OpRISCV64SLL, t) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) + v0.AddArg2(x, y) v1 := b.NewValue0(v.Pos, OpNeg8, t) v2 := b.NewValue0(v.Pos, OpRISCV64SLTIU, t) v2.AuxInt = 64 @@ -1795,7 +1723,7 @@ func rewriteValueRISCV64_OpLsh8x32(v *Value) bool { v3.AddArg(y) v2.AddArg(v3) v1.AddArg(v2) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } } @@ -1811,15 +1739,13 @@ func rewriteValueRISCV64_OpLsh8x64(v *Value) bool { y := v_1 v.reset(OpRISCV64AND) v0 := b.NewValue0(v.Pos, OpRISCV64SLL, t) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) + v0.AddArg2(x, y) v1 := b.NewValue0(v.Pos, OpNeg8, t) v2 := b.NewValue0(v.Pos, OpRISCV64SLTIU, t) v2.AuxInt = 64 v2.AddArg(y) v1.AddArg(v2) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } } @@ -1836,9 +1762,7 @@ func rewriteValueRISCV64_OpLsh8x8(v *Value) bool { y := v_1 v.reset(OpRISCV64AND) v0 := b.NewValue0(v.Pos, OpRISCV64SLL, t) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) + v0.AddArg2(x, y) v1 := b.NewValue0(v.Pos, OpNeg8, t) v2 := b.NewValue0(v.Pos, OpRISCV64SLTIU, t) v2.AuxInt = 64 @@ -1846,7 +1770,7 @@ func rewriteValueRISCV64_OpLsh8x8(v *Value) bool { v3.AddArg(y) v2.AddArg(v3) v1.AddArg(v2) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } } @@ -1863,10 +1787,9 @@ func rewriteValueRISCV64_OpMod16(v *Value) bool { v.reset(OpRISCV64REMW) v0 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32) v1.AddArg(y) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } } @@ -1883,10 +1806,9 @@ func rewriteValueRISCV64_OpMod16u(v *Value) bool { v.reset(OpRISCV64REMUW) v0 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) v1.AddArg(y) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } } @@ -1903,10 +1825,9 @@ func rewriteValueRISCV64_OpMod8(v *Value) bool { v.reset(OpRISCV64REMW) v0 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32) v1.AddArg(y) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } } @@ -1923,10 +1844,9 @@ func rewriteValueRISCV64_OpMod8u(v *Value) bool { v.reset(OpRISCV64REMUW) v0 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) v1.AddArg(y) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } } @@ -1959,12 +1879,9 @@ func rewriteValueRISCV64_OpMove(v *Value) bool { src := v_1 mem := v_2 v.reset(OpRISCV64MOVBstore) - v.AddArg(dst) v0 := b.NewValue0(v.Pos, OpRISCV64MOVBload, typ.Int8) - v0.AddArg(src) - v0.AddArg(mem) - v.AddArg(v0) - v.AddArg(mem) + v0.AddArg2(src, mem) + v.AddArg3(dst, v0, mem) return true } // match: (Move [2] dst src mem) @@ -1977,12 +1894,9 @@ func rewriteValueRISCV64_OpMove(v *Value) bool { src := v_1 mem := v_2 v.reset(OpRISCV64MOVHstore) - v.AddArg(dst) v0 := b.NewValue0(v.Pos, OpRISCV64MOVHload, typ.Int16) - v0.AddArg(src) - v0.AddArg(mem) - v.AddArg(v0) - v.AddArg(mem) + v0.AddArg2(src, mem) + v.AddArg3(dst, v0, mem) return true } // match: (Move [4] dst src mem) @@ -1995,12 +1909,9 @@ func rewriteValueRISCV64_OpMove(v *Value) bool { src := v_1 mem := v_2 v.reset(OpRISCV64MOVWstore) - v.AddArg(dst) v0 := b.NewValue0(v.Pos, OpRISCV64MOVWload, typ.Int32) - v0.AddArg(src) - v0.AddArg(mem) - v.AddArg(v0) - v.AddArg(mem) + v0.AddArg2(src, mem) + v.AddArg3(dst, v0, mem) return true } // match: (Move [8] dst src mem) @@ -2013,12 +1924,9 @@ func rewriteValueRISCV64_OpMove(v *Value) bool { src := v_1 mem := v_2 v.reset(OpRISCV64MOVDstore) - v.AddArg(dst) v0 := b.NewValue0(v.Pos, OpRISCV64MOVDload, typ.Int64) - v0.AddArg(src) - v0.AddArg(mem) - v.AddArg(v0) - v.AddArg(mem) + v0.AddArg2(src, mem) + v.AddArg3(dst, v0, mem) return true } // match: (Move [s] {t} dst src mem) @@ -2031,13 +1939,10 @@ func rewriteValueRISCV64_OpMove(v *Value) bool { mem := v_2 v.reset(OpRISCV64LoweredMove) v.AuxInt = t.(*types.Type).Alignment() - v.AddArg(dst) - v.AddArg(src) v0 := b.NewValue0(v.Pos, OpRISCV64ADDI, src.Type) v0.AuxInt = s - moveSize(t.(*types.Type).Alignment(), config) v0.AddArg(src) - v.AddArg(v0) - v.AddArg(mem) + v.AddArg4(dst, src, v0, mem) return true } } @@ -2054,10 +1959,9 @@ func rewriteValueRISCV64_OpMul16(v *Value) bool { v.reset(OpRISCV64MULW) v0 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32) v1.AddArg(y) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } } @@ -2074,10 +1978,9 @@ func rewriteValueRISCV64_OpMul8(v *Value) bool { v.reset(OpRISCV64MULW) v0 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32) v1.AddArg(y) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } } @@ -2091,8 +1994,7 @@ func rewriteValueRISCV64_OpNeg16(v *Value) bool { x := v_0 v.reset(OpRISCV64SUB) v0 := b.NewValue0(v.Pos, OpRISCV64MOVHconst, typ.UInt16) - v.AddArg(v0) - v.AddArg(x) + v.AddArg2(v0, x) return true } } @@ -2106,8 +2008,7 @@ func rewriteValueRISCV64_OpNeg32(v *Value) bool { x := v_0 v.reset(OpRISCV64SUB) v0 := b.NewValue0(v.Pos, OpRISCV64MOVWconst, typ.UInt32) - v.AddArg(v0) - v.AddArg(x) + v.AddArg2(v0, x) return true } } @@ -2121,8 +2022,7 @@ func rewriteValueRISCV64_OpNeg64(v *Value) bool { x := v_0 v.reset(OpRISCV64SUB) v0 := b.NewValue0(v.Pos, OpRISCV64MOVDconst, typ.UInt64) - v.AddArg(v0) - v.AddArg(x) + v.AddArg2(v0, x) return true } } @@ -2136,8 +2036,7 @@ func rewriteValueRISCV64_OpNeg8(v *Value) bool { x := v_0 v.reset(OpRISCV64SUB) v0 := b.NewValue0(v.Pos, OpRISCV64MOVBconst, typ.UInt8) - v.AddArg(v0) - v.AddArg(x) + v.AddArg2(v0, x) return true } } @@ -2154,8 +2053,7 @@ func rewriteValueRISCV64_OpNeq16(v *Value) bool { v.reset(OpRISCV64SNEZ) v0 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) v1 := b.NewValue0(v.Pos, OpRISCV64SUB, x.Type) - v1.AddArg(x) - v1.AddArg(y) + v1.AddArg2(x, y) v0.AddArg(v1) v.AddArg(v0) return true @@ -2172,8 +2070,7 @@ func rewriteValueRISCV64_OpNeq32(v *Value) bool { y := v_1 v.reset(OpRISCV64SNEZ) v0 := b.NewValue0(v.Pos, OpRISCV64SUBW, x.Type) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -2189,8 +2086,7 @@ func rewriteValueRISCV64_OpNeq64(v *Value) bool { y := v_1 v.reset(OpRISCV64SNEZ) v0 := b.NewValue0(v.Pos, OpRISCV64SUB, x.Type) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -2208,8 +2104,7 @@ func rewriteValueRISCV64_OpNeq8(v *Value) bool { v.reset(OpRISCV64SNEZ) v0 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) v1 := b.NewValue0(v.Pos, OpRISCV64SUB, x.Type) - v1.AddArg(x) - v1.AddArg(y) + v1.AddArg2(x, y) v0.AddArg(v1) v.AddArg(v0) return true @@ -2226,8 +2121,7 @@ func rewriteValueRISCV64_OpNeqPtr(v *Value) bool { y := v_1 v.reset(OpRISCV64SNEZ) v0 := b.NewValue0(v.Pos, OpRISCV64SUB, x.Type) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -2283,8 +2177,7 @@ func rewriteValueRISCV64_OpOffPtr(v *Value) bool { v.reset(OpRISCV64ADD) v0 := b.NewValue0(v.Pos, OpRISCV64MOVDconst, typ.UInt64) v0.AuxInt = off - v.AddArg(v0) - v.AddArg(ptr) + v.AddArg2(v0, ptr) return true } } @@ -2305,9 +2198,7 @@ func rewriteValueRISCV64_OpPanicBounds(v *Value) bool { } v.reset(OpRISCV64LoweredPanicBoundsA) v.AuxInt = kind - v.AddArg(x) - v.AddArg(y) - v.AddArg(mem) + v.AddArg3(x, y, mem) return true } // match: (PanicBounds [kind] x y mem) @@ -2323,9 +2214,7 @@ func rewriteValueRISCV64_OpPanicBounds(v *Value) bool { } v.reset(OpRISCV64LoweredPanicBoundsB) v.AuxInt = kind - v.AddArg(x) - v.AddArg(y) - v.AddArg(mem) + v.AddArg3(x, y, mem) return true } // match: (PanicBounds [kind] x y mem) @@ -2341,9 +2230,7 @@ func rewriteValueRISCV64_OpPanicBounds(v *Value) bool { } v.reset(OpRISCV64LoweredPanicBoundsC) v.AuxInt = kind - v.AddArg(x) - v.AddArg(y) - v.AddArg(mem) + v.AddArg3(x, y, mem) return true } return false @@ -2431,8 +2318,7 @@ func rewriteValueRISCV64_OpRISCV64MOVBUload(v *Value) bool { v.reset(OpRISCV64MOVBUload) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(base) - v.AddArg(mem) + v.AddArg2(base, mem) return true } // match: (MOVBUload [off1] {sym} (ADDI [off2] base) mem) @@ -2453,8 +2339,7 @@ func rewriteValueRISCV64_OpRISCV64MOVBUload(v *Value) bool { v.reset(OpRISCV64MOVBUload) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(base) - v.AddArg(mem) + v.AddArg2(base, mem) return true } return false @@ -2481,8 +2366,7 @@ func rewriteValueRISCV64_OpRISCV64MOVBload(v *Value) bool { v.reset(OpRISCV64MOVBload) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(base) - v.AddArg(mem) + v.AddArg2(base, mem) return true } // match: (MOVBload [off1] {sym} (ADDI [off2] base) mem) @@ -2503,8 +2387,7 @@ func rewriteValueRISCV64_OpRISCV64MOVBload(v *Value) bool { v.reset(OpRISCV64MOVBload) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(base) - v.AddArg(mem) + v.AddArg2(base, mem) return true } return false @@ -2533,9 +2416,7 @@ func rewriteValueRISCV64_OpRISCV64MOVBstore(v *Value) bool { v.reset(OpRISCV64MOVBstore) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(base) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(base, val, mem) return true } // match: (MOVBstore [off1] {sym} (ADDI [off2] base) val mem) @@ -2557,9 +2438,7 @@ func rewriteValueRISCV64_OpRISCV64MOVBstore(v *Value) bool { v.reset(OpRISCV64MOVBstore) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(base) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(base, val, mem) return true } return false @@ -2582,10 +2461,9 @@ func rewriteValueRISCV64_OpRISCV64MOVDconst(v *Value) bool { v1 := b.NewValue0(v.Pos, OpRISCV64MOVDconst, typ.UInt64) v1.AuxInt = c>>32 + 1 v0.AddArg(v1) - v.AddArg(v0) v2 := b.NewValue0(v.Pos, OpRISCV64MOVDconst, typ.UInt64) v2.AuxInt = int64(int32(c)) - v.AddArg(v2) + v.AddArg2(v0, v2) return true } // match: (MOVDconst [c]) @@ -2603,10 +2481,9 @@ func rewriteValueRISCV64_OpRISCV64MOVDconst(v *Value) bool { v1 := b.NewValue0(v.Pos, OpRISCV64MOVDconst, typ.UInt64) v1.AuxInt = c>>32 + 0 v0.AddArg(v1) - v.AddArg(v0) v2 := b.NewValue0(v.Pos, OpRISCV64MOVDconst, typ.UInt64) v2.AuxInt = int64(int32(c)) - v.AddArg(v2) + v.AddArg2(v0, v2) return true } return false @@ -2633,8 +2510,7 @@ func rewriteValueRISCV64_OpRISCV64MOVDload(v *Value) bool { v.reset(OpRISCV64MOVDload) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(base) - v.AddArg(mem) + v.AddArg2(base, mem) return true } // match: (MOVDload [off1] {sym} (ADDI [off2] base) mem) @@ -2655,8 +2531,7 @@ func rewriteValueRISCV64_OpRISCV64MOVDload(v *Value) bool { v.reset(OpRISCV64MOVDload) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(base) - v.AddArg(mem) + v.AddArg2(base, mem) return true } return false @@ -2685,9 +2560,7 @@ func rewriteValueRISCV64_OpRISCV64MOVDstore(v *Value) bool { v.reset(OpRISCV64MOVDstore) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(base) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(base, val, mem) return true } // match: (MOVDstore [off1] {sym} (ADDI [off2] base) val mem) @@ -2709,9 +2582,7 @@ func rewriteValueRISCV64_OpRISCV64MOVDstore(v *Value) bool { v.reset(OpRISCV64MOVDstore) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(base) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(base, val, mem) return true } return false @@ -2738,8 +2609,7 @@ func rewriteValueRISCV64_OpRISCV64MOVHUload(v *Value) bool { v.reset(OpRISCV64MOVHUload) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(base) - v.AddArg(mem) + v.AddArg2(base, mem) return true } // match: (MOVHUload [off1] {sym} (ADDI [off2] base) mem) @@ -2760,8 +2630,7 @@ func rewriteValueRISCV64_OpRISCV64MOVHUload(v *Value) bool { v.reset(OpRISCV64MOVHUload) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(base) - v.AddArg(mem) + v.AddArg2(base, mem) return true } return false @@ -2788,8 +2657,7 @@ func rewriteValueRISCV64_OpRISCV64MOVHload(v *Value) bool { v.reset(OpRISCV64MOVHload) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(base) - v.AddArg(mem) + v.AddArg2(base, mem) return true } // match: (MOVHload [off1] {sym} (ADDI [off2] base) mem) @@ -2810,8 +2678,7 @@ func rewriteValueRISCV64_OpRISCV64MOVHload(v *Value) bool { v.reset(OpRISCV64MOVHload) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(base) - v.AddArg(mem) + v.AddArg2(base, mem) return true } return false @@ -2840,9 +2707,7 @@ func rewriteValueRISCV64_OpRISCV64MOVHstore(v *Value) bool { v.reset(OpRISCV64MOVHstore) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(base) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(base, val, mem) return true } // match: (MOVHstore [off1] {sym} (ADDI [off2] base) val mem) @@ -2864,9 +2729,7 @@ func rewriteValueRISCV64_OpRISCV64MOVHstore(v *Value) bool { v.reset(OpRISCV64MOVHstore) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(base) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(base, val, mem) return true } return false @@ -2893,8 +2756,7 @@ func rewriteValueRISCV64_OpRISCV64MOVWUload(v *Value) bool { v.reset(OpRISCV64MOVWUload) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(base) - v.AddArg(mem) + v.AddArg2(base, mem) return true } // match: (MOVWUload [off1] {sym} (ADDI [off2] base) mem) @@ -2915,8 +2777,7 @@ func rewriteValueRISCV64_OpRISCV64MOVWUload(v *Value) bool { v.reset(OpRISCV64MOVWUload) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(base) - v.AddArg(mem) + v.AddArg2(base, mem) return true } return false @@ -2943,8 +2804,7 @@ func rewriteValueRISCV64_OpRISCV64MOVWload(v *Value) bool { v.reset(OpRISCV64MOVWload) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(base) - v.AddArg(mem) + v.AddArg2(base, mem) return true } // match: (MOVWload [off1] {sym} (ADDI [off2] base) mem) @@ -2965,8 +2825,7 @@ func rewriteValueRISCV64_OpRISCV64MOVWload(v *Value) bool { v.reset(OpRISCV64MOVWload) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(base) - v.AddArg(mem) + v.AddArg2(base, mem) return true } return false @@ -2995,9 +2854,7 @@ func rewriteValueRISCV64_OpRISCV64MOVWstore(v *Value) bool { v.reset(OpRISCV64MOVWstore) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(base) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(base, val, mem) return true } // match: (MOVWstore [off1] {sym} (ADDI [off2] base) val mem) @@ -3019,9 +2876,7 @@ func rewriteValueRISCV64_OpRISCV64MOVWstore(v *Value) bool { v.reset(OpRISCV64MOVWstore) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(base) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(base, val, mem) return true } return false @@ -3042,17 +2897,14 @@ func rewriteValueRISCV64_OpRotateLeft16(v *Value) bool { c := v_1.AuxInt v.reset(OpOr16) v0 := b.NewValue0(v.Pos, OpLsh16x64, t) - v0.AddArg(x) v1 := b.NewValue0(v.Pos, OpRISCV64MOVHconst, typ.UInt16) v1.AuxInt = c & 15 - v0.AddArg(v1) - v.AddArg(v0) + v0.AddArg2(x, v1) v2 := b.NewValue0(v.Pos, OpRsh16Ux64, t) - v2.AddArg(x) v3 := b.NewValue0(v.Pos, OpRISCV64MOVHconst, typ.UInt16) v3.AuxInt = -c & 15 - v2.AddArg(v3) - v.AddArg(v2) + v2.AddArg2(x, v3) + v.AddArg2(v0, v2) return true } return false @@ -3073,17 +2925,14 @@ func rewriteValueRISCV64_OpRotateLeft32(v *Value) bool { c := v_1.AuxInt v.reset(OpOr32) v0 := b.NewValue0(v.Pos, OpLsh32x64, t) - v0.AddArg(x) v1 := b.NewValue0(v.Pos, OpRISCV64MOVWconst, typ.UInt32) v1.AuxInt = c & 31 - v0.AddArg(v1) - v.AddArg(v0) + v0.AddArg2(x, v1) v2 := b.NewValue0(v.Pos, OpRsh32Ux64, t) - v2.AddArg(x) v3 := b.NewValue0(v.Pos, OpRISCV64MOVWconst, typ.UInt32) v3.AuxInt = -c & 31 - v2.AddArg(v3) - v.AddArg(v2) + v2.AddArg2(x, v3) + v.AddArg2(v0, v2) return true } return false @@ -3104,17 +2953,14 @@ func rewriteValueRISCV64_OpRotateLeft64(v *Value) bool { c := v_1.AuxInt v.reset(OpOr64) v0 := b.NewValue0(v.Pos, OpLsh64x64, t) - v0.AddArg(x) v1 := b.NewValue0(v.Pos, OpRISCV64MOVDconst, typ.UInt64) v1.AuxInt = c & 63 - v0.AddArg(v1) - v.AddArg(v0) + v0.AddArg2(x, v1) v2 := b.NewValue0(v.Pos, OpRsh64Ux64, t) - v2.AddArg(x) v3 := b.NewValue0(v.Pos, OpRISCV64MOVDconst, typ.UInt64) v3.AuxInt = -c & 63 - v2.AddArg(v3) - v.AddArg(v2) + v2.AddArg2(x, v3) + v.AddArg2(v0, v2) return true } return false @@ -3135,17 +2981,14 @@ func rewriteValueRISCV64_OpRotateLeft8(v *Value) bool { c := v_1.AuxInt v.reset(OpOr8) v0 := b.NewValue0(v.Pos, OpLsh8x64, t) - v0.AddArg(x) v1 := b.NewValue0(v.Pos, OpRISCV64MOVBconst, typ.UInt8) v1.AuxInt = c & 7 - v0.AddArg(v1) - v.AddArg(v0) + v0.AddArg2(x, v1) v2 := b.NewValue0(v.Pos, OpRsh8Ux64, t) - v2.AddArg(x) v3 := b.NewValue0(v.Pos, OpRISCV64MOVBconst, typ.UInt8) v3.AuxInt = -c & 7 - v2.AddArg(v3) - v.AddArg(v2) + v2.AddArg2(x, v3) + v.AddArg2(v0, v2) return true } return false @@ -3165,9 +3008,7 @@ func rewriteValueRISCV64_OpRsh16Ux16(v *Value) bool { v0 := b.NewValue0(v.Pos, OpRISCV64SRL, t) v1 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) v1.AddArg(x) - v0.AddArg(v1) - v0.AddArg(y) - v.AddArg(v0) + v0.AddArg2(v1, y) v2 := b.NewValue0(v.Pos, OpNeg16, t) v3 := b.NewValue0(v.Pos, OpRISCV64SLTIU, t) v3.AuxInt = 64 @@ -3175,7 +3016,7 @@ func rewriteValueRISCV64_OpRsh16Ux16(v *Value) bool { v4.AddArg(y) v3.AddArg(v4) v2.AddArg(v3) - v.AddArg(v2) + v.AddArg2(v0, v2) return true } } @@ -3194,9 +3035,7 @@ func rewriteValueRISCV64_OpRsh16Ux32(v *Value) bool { v0 := b.NewValue0(v.Pos, OpRISCV64SRL, t) v1 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) v1.AddArg(x) - v0.AddArg(v1) - v0.AddArg(y) - v.AddArg(v0) + v0.AddArg2(v1, y) v2 := b.NewValue0(v.Pos, OpNeg16, t) v3 := b.NewValue0(v.Pos, OpRISCV64SLTIU, t) v3.AuxInt = 64 @@ -3204,7 +3043,7 @@ func rewriteValueRISCV64_OpRsh16Ux32(v *Value) bool { v4.AddArg(y) v3.AddArg(v4) v2.AddArg(v3) - v.AddArg(v2) + v.AddArg2(v0, v2) return true } } @@ -3223,15 +3062,13 @@ func rewriteValueRISCV64_OpRsh16Ux64(v *Value) bool { v0 := b.NewValue0(v.Pos, OpRISCV64SRL, t) v1 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) v1.AddArg(x) - v0.AddArg(v1) - v0.AddArg(y) - v.AddArg(v0) + v0.AddArg2(v1, y) v2 := b.NewValue0(v.Pos, OpNeg16, t) v3 := b.NewValue0(v.Pos, OpRISCV64SLTIU, t) v3.AuxInt = 64 v3.AddArg(y) v2.AddArg(v3) - v.AddArg(v2) + v.AddArg2(v0, v2) return true } } @@ -3250,9 +3087,7 @@ func rewriteValueRISCV64_OpRsh16Ux8(v *Value) bool { v0 := b.NewValue0(v.Pos, OpRISCV64SRL, t) v1 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) v1.AddArg(x) - v0.AddArg(v1) - v0.AddArg(y) - v.AddArg(v0) + v0.AddArg2(v1, y) v2 := b.NewValue0(v.Pos, OpNeg16, t) v3 := b.NewValue0(v.Pos, OpRISCV64SLTIU, t) v3.AuxInt = 64 @@ -3260,7 +3095,7 @@ func rewriteValueRISCV64_OpRsh16Ux8(v *Value) bool { v4.AddArg(y) v3.AddArg(v4) v2.AddArg(v3) - v.AddArg(v2) + v.AddArg2(v0, v2) return true } } @@ -3279,9 +3114,7 @@ func rewriteValueRISCV64_OpRsh16x16(v *Value) bool { v.Type = t v0 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpRISCV64OR, y.Type) - v1.AddArg(y) v2 := b.NewValue0(v.Pos, OpRISCV64ADDI, y.Type) v2.AuxInt = -1 v3 := b.NewValue0(v.Pos, OpRISCV64SLTIU, y.Type) @@ -3290,8 +3123,8 @@ func rewriteValueRISCV64_OpRsh16x16(v *Value) bool { v4.AddArg(y) v3.AddArg(v4) v2.AddArg(v3) - v1.AddArg(v2) - v.AddArg(v1) + v1.AddArg2(y, v2) + v.AddArg2(v0, v1) return true } } @@ -3310,9 +3143,7 @@ func rewriteValueRISCV64_OpRsh16x32(v *Value) bool { v.Type = t v0 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpRISCV64OR, y.Type) - v1.AddArg(y) v2 := b.NewValue0(v.Pos, OpRISCV64ADDI, y.Type) v2.AuxInt = -1 v3 := b.NewValue0(v.Pos, OpRISCV64SLTIU, y.Type) @@ -3321,8 +3152,8 @@ func rewriteValueRISCV64_OpRsh16x32(v *Value) bool { v4.AddArg(y) v3.AddArg(v4) v2.AddArg(v3) - v1.AddArg(v2) - v.AddArg(v1) + v1.AddArg2(y, v2) + v.AddArg2(v0, v1) return true } } @@ -3341,17 +3172,15 @@ func rewriteValueRISCV64_OpRsh16x64(v *Value) bool { v.Type = t v0 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpRISCV64OR, y.Type) - v1.AddArg(y) v2 := b.NewValue0(v.Pos, OpRISCV64ADDI, y.Type) v2.AuxInt = -1 v3 := b.NewValue0(v.Pos, OpRISCV64SLTIU, y.Type) v3.AuxInt = 64 v3.AddArg(y) v2.AddArg(v3) - v1.AddArg(v2) - v.AddArg(v1) + v1.AddArg2(y, v2) + v.AddArg2(v0, v1) return true } } @@ -3370,9 +3199,7 @@ func rewriteValueRISCV64_OpRsh16x8(v *Value) bool { v.Type = t v0 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpRISCV64OR, y.Type) - v1.AddArg(y) v2 := b.NewValue0(v.Pos, OpRISCV64ADDI, y.Type) v2.AuxInt = -1 v3 := b.NewValue0(v.Pos, OpRISCV64SLTIU, y.Type) @@ -3381,8 +3208,8 @@ func rewriteValueRISCV64_OpRsh16x8(v *Value) bool { v4.AddArg(y) v3.AddArg(v4) v2.AddArg(v3) - v1.AddArg(v2) - v.AddArg(v1) + v1.AddArg2(y, v2) + v.AddArg2(v0, v1) return true } } @@ -3401,9 +3228,7 @@ func rewriteValueRISCV64_OpRsh32Ux16(v *Value) bool { v0 := b.NewValue0(v.Pos, OpRISCV64SRL, t) v1 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) v1.AddArg(x) - v0.AddArg(v1) - v0.AddArg(y) - v.AddArg(v0) + v0.AddArg2(v1, y) v2 := b.NewValue0(v.Pos, OpNeg32, t) v3 := b.NewValue0(v.Pos, OpRISCV64SLTIU, t) v3.AuxInt = 64 @@ -3411,7 +3236,7 @@ func rewriteValueRISCV64_OpRsh32Ux16(v *Value) bool { v4.AddArg(y) v3.AddArg(v4) v2.AddArg(v3) - v.AddArg(v2) + v.AddArg2(v0, v2) return true } } @@ -3430,9 +3255,7 @@ func rewriteValueRISCV64_OpRsh32Ux32(v *Value) bool { v0 := b.NewValue0(v.Pos, OpRISCV64SRL, t) v1 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) v1.AddArg(x) - v0.AddArg(v1) - v0.AddArg(y) - v.AddArg(v0) + v0.AddArg2(v1, y) v2 := b.NewValue0(v.Pos, OpNeg32, t) v3 := b.NewValue0(v.Pos, OpRISCV64SLTIU, t) v3.AuxInt = 64 @@ -3440,7 +3263,7 @@ func rewriteValueRISCV64_OpRsh32Ux32(v *Value) bool { v4.AddArg(y) v3.AddArg(v4) v2.AddArg(v3) - v.AddArg(v2) + v.AddArg2(v0, v2) return true } } @@ -3459,15 +3282,13 @@ func rewriteValueRISCV64_OpRsh32Ux64(v *Value) bool { v0 := b.NewValue0(v.Pos, OpRISCV64SRL, t) v1 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) v1.AddArg(x) - v0.AddArg(v1) - v0.AddArg(y) - v.AddArg(v0) + v0.AddArg2(v1, y) v2 := b.NewValue0(v.Pos, OpNeg32, t) v3 := b.NewValue0(v.Pos, OpRISCV64SLTIU, t) v3.AuxInt = 64 v3.AddArg(y) v2.AddArg(v3) - v.AddArg(v2) + v.AddArg2(v0, v2) return true } } @@ -3486,9 +3307,7 @@ func rewriteValueRISCV64_OpRsh32Ux8(v *Value) bool { v0 := b.NewValue0(v.Pos, OpRISCV64SRL, t) v1 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) v1.AddArg(x) - v0.AddArg(v1) - v0.AddArg(y) - v.AddArg(v0) + v0.AddArg2(v1, y) v2 := b.NewValue0(v.Pos, OpNeg32, t) v3 := b.NewValue0(v.Pos, OpRISCV64SLTIU, t) v3.AuxInt = 64 @@ -3496,7 +3315,7 @@ func rewriteValueRISCV64_OpRsh32Ux8(v *Value) bool { v4.AddArg(y) v3.AddArg(v4) v2.AddArg(v3) - v.AddArg(v2) + v.AddArg2(v0, v2) return true } } @@ -3515,9 +3334,7 @@ func rewriteValueRISCV64_OpRsh32x16(v *Value) bool { v.Type = t v0 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpRISCV64OR, y.Type) - v1.AddArg(y) v2 := b.NewValue0(v.Pos, OpRISCV64ADDI, y.Type) v2.AuxInt = -1 v3 := b.NewValue0(v.Pos, OpRISCV64SLTIU, y.Type) @@ -3526,8 +3343,8 @@ func rewriteValueRISCV64_OpRsh32x16(v *Value) bool { v4.AddArg(y) v3.AddArg(v4) v2.AddArg(v3) - v1.AddArg(v2) - v.AddArg(v1) + v1.AddArg2(y, v2) + v.AddArg2(v0, v1) return true } } @@ -3546,9 +3363,7 @@ func rewriteValueRISCV64_OpRsh32x32(v *Value) bool { v.Type = t v0 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpRISCV64OR, y.Type) - v1.AddArg(y) v2 := b.NewValue0(v.Pos, OpRISCV64ADDI, y.Type) v2.AuxInt = -1 v3 := b.NewValue0(v.Pos, OpRISCV64SLTIU, y.Type) @@ -3557,8 +3372,8 @@ func rewriteValueRISCV64_OpRsh32x32(v *Value) bool { v4.AddArg(y) v3.AddArg(v4) v2.AddArg(v3) - v1.AddArg(v2) - v.AddArg(v1) + v1.AddArg2(y, v2) + v.AddArg2(v0, v1) return true } } @@ -3577,17 +3392,15 @@ func rewriteValueRISCV64_OpRsh32x64(v *Value) bool { v.Type = t v0 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpRISCV64OR, y.Type) - v1.AddArg(y) v2 := b.NewValue0(v.Pos, OpRISCV64ADDI, y.Type) v2.AuxInt = -1 v3 := b.NewValue0(v.Pos, OpRISCV64SLTIU, y.Type) v3.AuxInt = 64 v3.AddArg(y) v2.AddArg(v3) - v1.AddArg(v2) - v.AddArg(v1) + v1.AddArg2(y, v2) + v.AddArg2(v0, v1) return true } } @@ -3606,9 +3419,7 @@ func rewriteValueRISCV64_OpRsh32x8(v *Value) bool { v.Type = t v0 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpRISCV64OR, y.Type) - v1.AddArg(y) v2 := b.NewValue0(v.Pos, OpRISCV64ADDI, y.Type) v2.AuxInt = -1 v3 := b.NewValue0(v.Pos, OpRISCV64SLTIU, y.Type) @@ -3617,8 +3428,8 @@ func rewriteValueRISCV64_OpRsh32x8(v *Value) bool { v4.AddArg(y) v3.AddArg(v4) v2.AddArg(v3) - v1.AddArg(v2) - v.AddArg(v1) + v1.AddArg2(y, v2) + v.AddArg2(v0, v1) return true } } @@ -3635,9 +3446,7 @@ func rewriteValueRISCV64_OpRsh64Ux16(v *Value) bool { y := v_1 v.reset(OpRISCV64AND) v0 := b.NewValue0(v.Pos, OpRISCV64SRL, t) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) + v0.AddArg2(x, y) v1 := b.NewValue0(v.Pos, OpNeg64, t) v2 := b.NewValue0(v.Pos, OpRISCV64SLTIU, t) v2.AuxInt = 64 @@ -3645,7 +3454,7 @@ func rewriteValueRISCV64_OpRsh64Ux16(v *Value) bool { v3.AddArg(y) v2.AddArg(v3) v1.AddArg(v2) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } } @@ -3662,9 +3471,7 @@ func rewriteValueRISCV64_OpRsh64Ux32(v *Value) bool { y := v_1 v.reset(OpRISCV64AND) v0 := b.NewValue0(v.Pos, OpRISCV64SRL, t) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) + v0.AddArg2(x, y) v1 := b.NewValue0(v.Pos, OpNeg64, t) v2 := b.NewValue0(v.Pos, OpRISCV64SLTIU, t) v2.AuxInt = 64 @@ -3672,7 +3479,7 @@ func rewriteValueRISCV64_OpRsh64Ux32(v *Value) bool { v3.AddArg(y) v2.AddArg(v3) v1.AddArg(v2) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } } @@ -3688,15 +3495,13 @@ func rewriteValueRISCV64_OpRsh64Ux64(v *Value) bool { y := v_1 v.reset(OpRISCV64AND) v0 := b.NewValue0(v.Pos, OpRISCV64SRL, t) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) + v0.AddArg2(x, y) v1 := b.NewValue0(v.Pos, OpNeg64, t) v2 := b.NewValue0(v.Pos, OpRISCV64SLTIU, t) v2.AuxInt = 64 v2.AddArg(y) v1.AddArg(v2) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } } @@ -3713,9 +3518,7 @@ func rewriteValueRISCV64_OpRsh64Ux8(v *Value) bool { y := v_1 v.reset(OpRISCV64AND) v0 := b.NewValue0(v.Pos, OpRISCV64SRL, t) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) + v0.AddArg2(x, y) v1 := b.NewValue0(v.Pos, OpNeg64, t) v2 := b.NewValue0(v.Pos, OpRISCV64SLTIU, t) v2.AuxInt = 64 @@ -3723,7 +3526,7 @@ func rewriteValueRISCV64_OpRsh64Ux8(v *Value) bool { v3.AddArg(y) v2.AddArg(v3) v1.AddArg(v2) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } } @@ -3740,9 +3543,7 @@ func rewriteValueRISCV64_OpRsh64x16(v *Value) bool { y := v_1 v.reset(OpRISCV64SRA) v.Type = t - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpRISCV64OR, y.Type) - v0.AddArg(y) v1 := b.NewValue0(v.Pos, OpRISCV64ADDI, y.Type) v1.AuxInt = -1 v2 := b.NewValue0(v.Pos, OpRISCV64SLTIU, y.Type) @@ -3751,8 +3552,8 @@ func rewriteValueRISCV64_OpRsh64x16(v *Value) bool { v3.AddArg(y) v2.AddArg(v3) v1.AddArg(v2) - v0.AddArg(v1) - v.AddArg(v0) + v0.AddArg2(y, v1) + v.AddArg2(x, v0) return true } } @@ -3769,9 +3570,7 @@ func rewriteValueRISCV64_OpRsh64x32(v *Value) bool { y := v_1 v.reset(OpRISCV64SRA) v.Type = t - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpRISCV64OR, y.Type) - v0.AddArg(y) v1 := b.NewValue0(v.Pos, OpRISCV64ADDI, y.Type) v1.AuxInt = -1 v2 := b.NewValue0(v.Pos, OpRISCV64SLTIU, y.Type) @@ -3780,8 +3579,8 @@ func rewriteValueRISCV64_OpRsh64x32(v *Value) bool { v3.AddArg(y) v2.AddArg(v3) v1.AddArg(v2) - v0.AddArg(v1) - v.AddArg(v0) + v0.AddArg2(y, v1) + v.AddArg2(x, v0) return true } } @@ -3797,17 +3596,15 @@ func rewriteValueRISCV64_OpRsh64x64(v *Value) bool { y := v_1 v.reset(OpRISCV64SRA) v.Type = t - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpRISCV64OR, y.Type) - v0.AddArg(y) v1 := b.NewValue0(v.Pos, OpRISCV64ADDI, y.Type) v1.AuxInt = -1 v2 := b.NewValue0(v.Pos, OpRISCV64SLTIU, y.Type) v2.AuxInt = 64 v2.AddArg(y) v1.AddArg(v2) - v0.AddArg(v1) - v.AddArg(v0) + v0.AddArg2(y, v1) + v.AddArg2(x, v0) return true } } @@ -3824,9 +3621,7 @@ func rewriteValueRISCV64_OpRsh64x8(v *Value) bool { y := v_1 v.reset(OpRISCV64SRA) v.Type = t - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpRISCV64OR, y.Type) - v0.AddArg(y) v1 := b.NewValue0(v.Pos, OpRISCV64ADDI, y.Type) v1.AuxInt = -1 v2 := b.NewValue0(v.Pos, OpRISCV64SLTIU, y.Type) @@ -3835,8 +3630,8 @@ func rewriteValueRISCV64_OpRsh64x8(v *Value) bool { v3.AddArg(y) v2.AddArg(v3) v1.AddArg(v2) - v0.AddArg(v1) - v.AddArg(v0) + v0.AddArg2(y, v1) + v.AddArg2(x, v0) return true } } @@ -3855,9 +3650,7 @@ func rewriteValueRISCV64_OpRsh8Ux16(v *Value) bool { v0 := b.NewValue0(v.Pos, OpRISCV64SRL, t) v1 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) v1.AddArg(x) - v0.AddArg(v1) - v0.AddArg(y) - v.AddArg(v0) + v0.AddArg2(v1, y) v2 := b.NewValue0(v.Pos, OpNeg8, t) v3 := b.NewValue0(v.Pos, OpRISCV64SLTIU, t) v3.AuxInt = 64 @@ -3865,7 +3658,7 @@ func rewriteValueRISCV64_OpRsh8Ux16(v *Value) bool { v4.AddArg(y) v3.AddArg(v4) v2.AddArg(v3) - v.AddArg(v2) + v.AddArg2(v0, v2) return true } } @@ -3884,9 +3677,7 @@ func rewriteValueRISCV64_OpRsh8Ux32(v *Value) bool { v0 := b.NewValue0(v.Pos, OpRISCV64SRL, t) v1 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) v1.AddArg(x) - v0.AddArg(v1) - v0.AddArg(y) - v.AddArg(v0) + v0.AddArg2(v1, y) v2 := b.NewValue0(v.Pos, OpNeg8, t) v3 := b.NewValue0(v.Pos, OpRISCV64SLTIU, t) v3.AuxInt = 64 @@ -3894,7 +3685,7 @@ func rewriteValueRISCV64_OpRsh8Ux32(v *Value) bool { v4.AddArg(y) v3.AddArg(v4) v2.AddArg(v3) - v.AddArg(v2) + v.AddArg2(v0, v2) return true } } @@ -3913,15 +3704,13 @@ func rewriteValueRISCV64_OpRsh8Ux64(v *Value) bool { v0 := b.NewValue0(v.Pos, OpRISCV64SRL, t) v1 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) v1.AddArg(x) - v0.AddArg(v1) - v0.AddArg(y) - v.AddArg(v0) + v0.AddArg2(v1, y) v2 := b.NewValue0(v.Pos, OpNeg8, t) v3 := b.NewValue0(v.Pos, OpRISCV64SLTIU, t) v3.AuxInt = 64 v3.AddArg(y) v2.AddArg(v3) - v.AddArg(v2) + v.AddArg2(v0, v2) return true } } @@ -3940,9 +3729,7 @@ func rewriteValueRISCV64_OpRsh8Ux8(v *Value) bool { v0 := b.NewValue0(v.Pos, OpRISCV64SRL, t) v1 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) v1.AddArg(x) - v0.AddArg(v1) - v0.AddArg(y) - v.AddArg(v0) + v0.AddArg2(v1, y) v2 := b.NewValue0(v.Pos, OpNeg8, t) v3 := b.NewValue0(v.Pos, OpRISCV64SLTIU, t) v3.AuxInt = 64 @@ -3950,7 +3737,7 @@ func rewriteValueRISCV64_OpRsh8Ux8(v *Value) bool { v4.AddArg(y) v3.AddArg(v4) v2.AddArg(v3) - v.AddArg(v2) + v.AddArg2(v0, v2) return true } } @@ -3969,9 +3756,7 @@ func rewriteValueRISCV64_OpRsh8x16(v *Value) bool { v.Type = t v0 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpRISCV64OR, y.Type) - v1.AddArg(y) v2 := b.NewValue0(v.Pos, OpRISCV64ADDI, y.Type) v2.AuxInt = -1 v3 := b.NewValue0(v.Pos, OpRISCV64SLTIU, y.Type) @@ -3980,8 +3765,8 @@ func rewriteValueRISCV64_OpRsh8x16(v *Value) bool { v4.AddArg(y) v3.AddArg(v4) v2.AddArg(v3) - v1.AddArg(v2) - v.AddArg(v1) + v1.AddArg2(y, v2) + v.AddArg2(v0, v1) return true } } @@ -4000,9 +3785,7 @@ func rewriteValueRISCV64_OpRsh8x32(v *Value) bool { v.Type = t v0 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpRISCV64OR, y.Type) - v1.AddArg(y) v2 := b.NewValue0(v.Pos, OpRISCV64ADDI, y.Type) v2.AuxInt = -1 v3 := b.NewValue0(v.Pos, OpRISCV64SLTIU, y.Type) @@ -4011,8 +3794,8 @@ func rewriteValueRISCV64_OpRsh8x32(v *Value) bool { v4.AddArg(y) v3.AddArg(v4) v2.AddArg(v3) - v1.AddArg(v2) - v.AddArg(v1) + v1.AddArg2(y, v2) + v.AddArg2(v0, v1) return true } } @@ -4031,17 +3814,15 @@ func rewriteValueRISCV64_OpRsh8x64(v *Value) bool { v.Type = t v0 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpRISCV64OR, y.Type) - v1.AddArg(y) v2 := b.NewValue0(v.Pos, OpRISCV64ADDI, y.Type) v2.AuxInt = -1 v3 := b.NewValue0(v.Pos, OpRISCV64SLTIU, y.Type) v3.AuxInt = 64 v3.AddArg(y) v2.AddArg(v3) - v1.AddArg(v2) - v.AddArg(v1) + v1.AddArg2(y, v2) + v.AddArg2(v0, v1) return true } } @@ -4060,9 +3841,7 @@ func rewriteValueRISCV64_OpRsh8x8(v *Value) bool { v.Type = t v0 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpRISCV64OR, y.Type) - v1.AddArg(y) v2 := b.NewValue0(v.Pos, OpRISCV64ADDI, y.Type) v2.AuxInt = -1 v3 := b.NewValue0(v.Pos, OpRISCV64SLTIU, y.Type) @@ -4071,8 +3850,8 @@ func rewriteValueRISCV64_OpRsh8x8(v *Value) bool { v4.AddArg(y) v3.AddArg(v4) v2.AddArg(v3) - v1.AddArg(v2) - v.AddArg(v1) + v1.AddArg2(y, v2) + v.AddArg2(v0, v1) return true } } @@ -4209,9 +3988,7 @@ func rewriteValueRISCV64_OpStore(v *Value) bool { break } v.reset(OpRISCV64MOVBstore) - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } // match: (Store {t} ptr val mem) @@ -4226,9 +4003,7 @@ func rewriteValueRISCV64_OpStore(v *Value) bool { break } v.reset(OpRISCV64MOVHstore) - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } // match: (Store {t} ptr val mem) @@ -4243,9 +4018,7 @@ func rewriteValueRISCV64_OpStore(v *Value) bool { break } v.reset(OpRISCV64MOVWstore) - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } // match: (Store {t} ptr val mem) @@ -4260,9 +4033,7 @@ func rewriteValueRISCV64_OpStore(v *Value) bool { break } v.reset(OpRISCV64MOVDstore) - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } // match: (Store {t} ptr val mem) @@ -4277,9 +4048,7 @@ func rewriteValueRISCV64_OpStore(v *Value) bool { break } v.reset(OpRISCV64FMOVWstore) - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } // match: (Store {t} ptr val mem) @@ -4294,9 +4063,7 @@ func rewriteValueRISCV64_OpStore(v *Value) bool { break } v.reset(OpRISCV64FMOVDstore) - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } return false @@ -4328,10 +4095,8 @@ func rewriteValueRISCV64_OpZero(v *Value) bool { ptr := v_0 mem := v_1 v.reset(OpRISCV64MOVBstore) - v.AddArg(ptr) v0 := b.NewValue0(v.Pos, OpRISCV64MOVBconst, typ.UInt8) - v.AddArg(v0) - v.AddArg(mem) + v.AddArg3(ptr, v0, mem) return true } // match: (Zero [2] ptr mem) @@ -4343,10 +4108,8 @@ func rewriteValueRISCV64_OpZero(v *Value) bool { ptr := v_0 mem := v_1 v.reset(OpRISCV64MOVHstore) - v.AddArg(ptr) v0 := b.NewValue0(v.Pos, OpRISCV64MOVHconst, typ.UInt16) - v.AddArg(v0) - v.AddArg(mem) + v.AddArg3(ptr, v0, mem) return true } // match: (Zero [4] ptr mem) @@ -4358,10 +4121,8 @@ func rewriteValueRISCV64_OpZero(v *Value) bool { ptr := v_0 mem := v_1 v.reset(OpRISCV64MOVWstore) - v.AddArg(ptr) v0 := b.NewValue0(v.Pos, OpRISCV64MOVWconst, typ.UInt32) - v.AddArg(v0) - v.AddArg(mem) + v.AddArg3(ptr, v0, mem) return true } // match: (Zero [8] ptr mem) @@ -4373,10 +4134,8 @@ func rewriteValueRISCV64_OpZero(v *Value) bool { ptr := v_0 mem := v_1 v.reset(OpRISCV64MOVDstore) - v.AddArg(ptr) v0 := b.NewValue0(v.Pos, OpRISCV64MOVDconst, typ.UInt64) - v.AddArg(v0) - v.AddArg(mem) + v.AddArg3(ptr, v0, mem) return true } // match: (Zero [s] {t} ptr mem) @@ -4388,14 +4147,11 @@ func rewriteValueRISCV64_OpZero(v *Value) bool { mem := v_1 v.reset(OpRISCV64LoweredZero) v.AuxInt = t.(*types.Type).Alignment() - v.AddArg(ptr) v0 := b.NewValue0(v.Pos, OpRISCV64ADD, ptr.Type) - v0.AddArg(ptr) v1 := b.NewValue0(v.Pos, OpRISCV64MOVDconst, typ.UInt64) v1.AuxInt = s - moveSize(t.(*types.Type).Alignment(), config) - v0.AddArg(v1) - v.AddArg(v0) - v.AddArg(mem) + v0.AddArg2(ptr, v1) + v.AddArg3(ptr, v0, mem) return true } } diff --git a/src/cmd/compile/internal/ssa/rewriteS390X.go b/src/cmd/compile/internal/ssa/rewriteS390X.go index 4fc90128d0..7a78dfdac7 100644 --- a/src/cmd/compile/internal/ssa/rewriteS390X.go +++ b/src/cmd/compile/internal/ssa/rewriteS390X.go @@ -936,12 +936,9 @@ func rewriteValueS390X_OpAtomicAdd32(v *Value) bool { val := v_1 mem := v_2 v.reset(OpS390XAddTupleFirst32) - v.AddArg(val) v0 := b.NewValue0(v.Pos, OpS390XLAA, types.NewTuple(typ.UInt32, types.TypeMem)) - v0.AddArg(ptr) - v0.AddArg(val) - v0.AddArg(mem) - v.AddArg(v0) + v0.AddArg3(ptr, val, mem) + v.AddArg2(val, v0) return true } } @@ -958,12 +955,9 @@ func rewriteValueS390X_OpAtomicAdd64(v *Value) bool { val := v_1 mem := v_2 v.reset(OpS390XAddTupleFirst64) - v.AddArg(val) v0 := b.NewValue0(v.Pos, OpS390XLAAG, types.NewTuple(typ.UInt64, types.TypeMem)) - v0.AddArg(ptr) - v0.AddArg(val) - v0.AddArg(mem) - v.AddArg(v0) + v0.AddArg3(ptr, val, mem) + v.AddArg2(val, v0) return true } } @@ -980,21 +974,17 @@ func rewriteValueS390X_OpAtomicAnd8(v *Value) bool { val := v_1 mem := v_2 v.reset(OpS390XLANfloor) - v.AddArg(ptr) v0 := b.NewValue0(v.Pos, OpS390XRLL, typ.UInt32) v1 := b.NewValue0(v.Pos, OpS390XORWconst, typ.UInt32) v1.AuxInt = -1 << 8 v1.AddArg(val) - v0.AddArg(v1) v2 := b.NewValue0(v.Pos, OpS390XRXSBG, typ.UInt32) v2.Aux = s390x.NewRotateParams(59, 60, 3) v3 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) v3.AuxInt = 3 << 3 - v2.AddArg(v3) - v2.AddArg(ptr) - v0.AddArg(v2) - v.AddArg(v0) - v.AddArg(mem) + v2.AddArg2(v3, ptr) + v0.AddArg2(v1, v2) + v.AddArg3(ptr, v0, mem) return true } } @@ -1011,20 +1001,16 @@ func rewriteValueS390X_OpAtomicOr8(v *Value) bool { val := v_1 mem := v_2 v.reset(OpS390XLAOfloor) - v.AddArg(ptr) v0 := b.NewValue0(v.Pos, OpS390XSLW, typ.UInt32) v1 := b.NewValue0(v.Pos, OpS390XMOVBZreg, typ.UInt32) v1.AddArg(val) - v0.AddArg(v1) v2 := b.NewValue0(v.Pos, OpS390XRXSBG, typ.UInt32) v2.Aux = s390x.NewRotateParams(59, 60, 3) v3 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) v3.AuxInt = 3 << 3 - v2.AddArg(v3) - v2.AddArg(ptr) - v0.AddArg(v2) - v.AddArg(v0) - v.AddArg(mem) + v2.AddArg2(v3, ptr) + v0.AddArg2(v1, v2) + v.AddArg3(ptr, v0, mem) return true } } @@ -1041,9 +1027,7 @@ func rewriteValueS390X_OpAtomicStore32(v *Value) bool { mem := v_2 v.reset(OpS390XSYNC) v0 := b.NewValue0(v.Pos, OpS390XMOVWatomicstore, types.TypeMem) - v0.AddArg(ptr) - v0.AddArg(val) - v0.AddArg(mem) + v0.AddArg3(ptr, val, mem) v.AddArg(v0) return true } @@ -1061,9 +1045,7 @@ func rewriteValueS390X_OpAtomicStore64(v *Value) bool { mem := v_2 v.reset(OpS390XSYNC) v0 := b.NewValue0(v.Pos, OpS390XMOVDatomicstore, types.TypeMem) - v0.AddArg(ptr) - v0.AddArg(val) - v0.AddArg(mem) + v0.AddArg3(ptr, val, mem) v.AddArg(v0) return true } @@ -1081,9 +1063,7 @@ func rewriteValueS390X_OpAtomicStore8(v *Value) bool { mem := v_2 v.reset(OpS390XSYNC) v0 := b.NewValue0(v.Pos, OpS390XMOVBatomicstore, types.TypeMem) - v0.AddArg(ptr) - v0.AddArg(val) - v0.AddArg(mem) + v0.AddArg3(ptr, val, mem) v.AddArg(v0) return true } @@ -1101,9 +1081,7 @@ func rewriteValueS390X_OpAtomicStorePtrNoWB(v *Value) bool { mem := v_2 v.reset(OpS390XSYNC) v0 := b.NewValue0(v.Pos, OpS390XMOVDatomicstore, types.TypeMem) - v0.AddArg(ptr) - v0.AddArg(val) - v0.AddArg(mem) + v0.AddArg3(ptr, val, mem) v.AddArg(v0) return true } @@ -1122,11 +1100,9 @@ func rewriteValueS390X_OpAvg64u(v *Value) bool { v0 := b.NewValue0(v.Pos, OpS390XSRDconst, t) v0.AuxInt = 1 v1 := b.NewValue0(v.Pos, OpS390XSUB, t) - v1.AddArg(x) - v1.AddArg(y) + v1.AddArg2(x, y) v0.AddArg(v1) - v.AddArg(v0) - v.AddArg(y) + v.AddArg2(v0, y) return true } } @@ -1141,10 +1117,9 @@ func rewriteValueS390X_OpBitLen64(v *Value) bool { v.reset(OpS390XSUB) v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) v0.AuxInt = 64 - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpS390XFLOGR, typ.UInt64) v1.AddArg(x) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } } @@ -1181,20 +1156,18 @@ func rewriteValueS390X_OpCtz32(v *Value) bool { v.reset(OpS390XSUB) v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) v0.AuxInt = 64 - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpS390XFLOGR, typ.UInt64) v2 := b.NewValue0(v.Pos, OpS390XMOVWZreg, typ.UInt64) v3 := b.NewValue0(v.Pos, OpS390XANDW, t) v4 := b.NewValue0(v.Pos, OpS390XSUBWconst, t) v4.AuxInt = 1 v4.AddArg(x) - v3.AddArg(v4) v5 := b.NewValue0(v.Pos, OpS390XNOTW, t) v5.AddArg(x) - v3.AddArg(v5) + v3.AddArg2(v4, v5) v2.AddArg(v3) v1.AddArg(v2) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } } @@ -1210,18 +1183,16 @@ func rewriteValueS390X_OpCtz64(v *Value) bool { v.reset(OpS390XSUB) v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) v0.AuxInt = 64 - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpS390XFLOGR, typ.UInt64) v2 := b.NewValue0(v.Pos, OpS390XAND, t) v3 := b.NewValue0(v.Pos, OpS390XSUBconst, t) v3.AuxInt = 1 v3.AddArg(x) - v2.AddArg(v3) v4 := b.NewValue0(v.Pos, OpS390XNOT, t) v4.AddArg(x) - v2.AddArg(v4) + v2.AddArg2(v3, v4) v1.AddArg(v2) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } } @@ -1238,10 +1209,9 @@ func rewriteValueS390X_OpDiv16(v *Value) bool { v.reset(OpS390XDIVW) v0 := b.NewValue0(v.Pos, OpS390XMOVHreg, typ.Int64) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpS390XMOVHreg, typ.Int64) v1.AddArg(y) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } } @@ -1258,10 +1228,9 @@ func rewriteValueS390X_OpDiv16u(v *Value) bool { v.reset(OpS390XDIVWU) v0 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64) v1.AddArg(y) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } } @@ -1278,8 +1247,7 @@ func rewriteValueS390X_OpDiv32(v *Value) bool { v.reset(OpS390XDIVW) v0 := b.NewValue0(v.Pos, OpS390XMOVWreg, typ.Int64) v0.AddArg(x) - v.AddArg(v0) - v.AddArg(y) + v.AddArg2(v0, y) return true } } @@ -1296,8 +1264,7 @@ func rewriteValueS390X_OpDiv32u(v *Value) bool { v.reset(OpS390XDIVWU) v0 := b.NewValue0(v.Pos, OpS390XMOVWZreg, typ.UInt64) v0.AddArg(x) - v.AddArg(v0) - v.AddArg(y) + v.AddArg2(v0, y) return true } } @@ -1314,10 +1281,9 @@ func rewriteValueS390X_OpDiv8(v *Value) bool { v.reset(OpS390XDIVW) v0 := b.NewValue0(v.Pos, OpS390XMOVBreg, typ.Int64) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpS390XMOVBreg, typ.Int64) v1.AddArg(y) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } } @@ -1334,10 +1300,9 @@ func rewriteValueS390X_OpDiv8u(v *Value) bool { v.reset(OpS390XDIVWU) v0 := b.NewValue0(v.Pos, OpS390XMOVBZreg, typ.UInt64) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpS390XMOVBZreg, typ.UInt64) v1.AddArg(y) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } } @@ -1355,18 +1320,15 @@ func rewriteValueS390X_OpEq16(v *Value) bool { v.Aux = s390x.Equal v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) v0.AuxInt = 0 - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) v1.AuxInt = 1 - v.AddArg(v1) v2 := b.NewValue0(v.Pos, OpS390XCMPW, types.TypeFlags) v3 := b.NewValue0(v.Pos, OpS390XMOVHreg, typ.Int64) v3.AddArg(x) - v2.AddArg(v3) v4 := b.NewValue0(v.Pos, OpS390XMOVHreg, typ.Int64) v4.AddArg(y) - v2.AddArg(v4) - v.AddArg(v2) + v2.AddArg2(v3, v4) + v.AddArg3(v0, v1, v2) return true } } @@ -1384,14 +1346,11 @@ func rewriteValueS390X_OpEq32(v *Value) bool { v.Aux = s390x.Equal v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) v0.AuxInt = 0 - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) v1.AuxInt = 1 - v.AddArg(v1) v2 := b.NewValue0(v.Pos, OpS390XCMPW, types.TypeFlags) - v2.AddArg(x) - v2.AddArg(y) - v.AddArg(v2) + v2.AddArg2(x, y) + v.AddArg3(v0, v1, v2) return true } } @@ -1409,14 +1368,11 @@ func rewriteValueS390X_OpEq32F(v *Value) bool { v.Aux = s390x.Equal v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) v0.AuxInt = 0 - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) v1.AuxInt = 1 - v.AddArg(v1) v2 := b.NewValue0(v.Pos, OpS390XFCMPS, types.TypeFlags) - v2.AddArg(x) - v2.AddArg(y) - v.AddArg(v2) + v2.AddArg2(x, y) + v.AddArg3(v0, v1, v2) return true } } @@ -1434,14 +1390,11 @@ func rewriteValueS390X_OpEq64(v *Value) bool { v.Aux = s390x.Equal v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) v0.AuxInt = 0 - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) v1.AuxInt = 1 - v.AddArg(v1) v2 := b.NewValue0(v.Pos, OpS390XCMP, types.TypeFlags) - v2.AddArg(x) - v2.AddArg(y) - v.AddArg(v2) + v2.AddArg2(x, y) + v.AddArg3(v0, v1, v2) return true } } @@ -1459,14 +1412,11 @@ func rewriteValueS390X_OpEq64F(v *Value) bool { v.Aux = s390x.Equal v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) v0.AuxInt = 0 - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) v1.AuxInt = 1 - v.AddArg(v1) v2 := b.NewValue0(v.Pos, OpS390XFCMP, types.TypeFlags) - v2.AddArg(x) - v2.AddArg(y) - v.AddArg(v2) + v2.AddArg2(x, y) + v.AddArg3(v0, v1, v2) return true } } @@ -1484,18 +1434,15 @@ func rewriteValueS390X_OpEq8(v *Value) bool { v.Aux = s390x.Equal v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) v0.AuxInt = 0 - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) v1.AuxInt = 1 - v.AddArg(v1) v2 := b.NewValue0(v.Pos, OpS390XCMPW, types.TypeFlags) v3 := b.NewValue0(v.Pos, OpS390XMOVBreg, typ.Int64) v3.AddArg(x) - v2.AddArg(v3) v4 := b.NewValue0(v.Pos, OpS390XMOVBreg, typ.Int64) v4.AddArg(y) - v2.AddArg(v4) - v.AddArg(v2) + v2.AddArg2(v3, v4) + v.AddArg3(v0, v1, v2) return true } } @@ -1513,18 +1460,15 @@ func rewriteValueS390X_OpEqB(v *Value) bool { v.Aux = s390x.Equal v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) v0.AuxInt = 0 - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) v1.AuxInt = 1 - v.AddArg(v1) v2 := b.NewValue0(v.Pos, OpS390XCMPW, types.TypeFlags) v3 := b.NewValue0(v.Pos, OpS390XMOVBreg, typ.Int64) v3.AddArg(x) - v2.AddArg(v3) v4 := b.NewValue0(v.Pos, OpS390XMOVBreg, typ.Int64) v4.AddArg(y) - v2.AddArg(v4) - v.AddArg(v2) + v2.AddArg2(v3, v4) + v.AddArg3(v0, v1, v2) return true } } @@ -1542,14 +1486,11 @@ func rewriteValueS390X_OpEqPtr(v *Value) bool { v.Aux = s390x.Equal v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) v0.AuxInt = 0 - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) v1.AuxInt = 1 - v.AddArg(v1) v2 := b.NewValue0(v.Pos, OpS390XCMP, types.TypeFlags) - v2.AddArg(x) - v2.AddArg(y) - v.AddArg(v2) + v2.AddArg2(x, y) + v.AddArg3(v0, v1, v2) return true } } @@ -1564,9 +1505,7 @@ func rewriteValueS390X_OpFMA(v *Value) bool { y := v_1 z := v_2 v.reset(OpS390XFMADD) - v.AddArg(z) - v.AddArg(x) - v.AddArg(y) + v.AddArg3(z, x, y) return true } } @@ -1596,14 +1535,11 @@ func rewriteValueS390X_OpGeq32F(v *Value) bool { v.Aux = s390x.GreaterOrEqual v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) v0.AuxInt = 0 - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) v1.AuxInt = 1 - v.AddArg(v1) v2 := b.NewValue0(v.Pos, OpS390XFCMPS, types.TypeFlags) - v2.AddArg(x) - v2.AddArg(y) - v.AddArg(v2) + v2.AddArg2(x, y) + v.AddArg3(v0, v1, v2) return true } } @@ -1621,14 +1557,11 @@ func rewriteValueS390X_OpGeq64F(v *Value) bool { v.Aux = s390x.GreaterOrEqual v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) v0.AuxInt = 0 - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) v1.AuxInt = 1 - v.AddArg(v1) v2 := b.NewValue0(v.Pos, OpS390XFCMP, types.TypeFlags) - v2.AddArg(x) - v2.AddArg(y) - v.AddArg(v2) + v2.AddArg2(x, y) + v.AddArg3(v0, v1, v2) return true } } @@ -1646,14 +1579,11 @@ func rewriteValueS390X_OpGreater32F(v *Value) bool { v.Aux = s390x.Greater v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) v0.AuxInt = 0 - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) v1.AuxInt = 1 - v.AddArg(v1) v2 := b.NewValue0(v.Pos, OpS390XFCMPS, types.TypeFlags) - v2.AddArg(x) - v2.AddArg(y) - v.AddArg(v2) + v2.AddArg2(x, y) + v.AddArg3(v0, v1, v2) return true } } @@ -1671,14 +1601,11 @@ func rewriteValueS390X_OpGreater64F(v *Value) bool { v.Aux = s390x.Greater v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) v0.AuxInt = 0 - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) v1.AuxInt = 1 - v.AddArg(v1) v2 := b.NewValue0(v.Pos, OpS390XFCMP, types.TypeFlags) - v2.AddArg(x) - v2.AddArg(y) - v.AddArg(v2) + v2.AddArg2(x, y) + v.AddArg3(v0, v1, v2) return true } } @@ -1697,10 +1624,9 @@ func rewriteValueS390X_OpHmul32(v *Value) bool { v0 := b.NewValue0(v.Pos, OpS390XMULLD, typ.Int64) v1 := b.NewValue0(v.Pos, OpS390XMOVWreg, typ.Int64) v1.AddArg(x) - v0.AddArg(v1) v2 := b.NewValue0(v.Pos, OpS390XMOVWreg, typ.Int64) v2.AddArg(y) - v0.AddArg(v2) + v0.AddArg2(v1, v2) v.AddArg(v0) return true } @@ -1720,10 +1646,9 @@ func rewriteValueS390X_OpHmul32u(v *Value) bool { v0 := b.NewValue0(v.Pos, OpS390XMULLD, typ.Int64) v1 := b.NewValue0(v.Pos, OpS390XMOVWZreg, typ.UInt64) v1.AddArg(x) - v0.AddArg(v1) v2 := b.NewValue0(v.Pos, OpS390XMOVWZreg, typ.UInt64) v2.AddArg(y) - v0.AddArg(v2) + v0.AddArg2(v1, v2) v.AddArg(v0) return true } @@ -1739,8 +1664,7 @@ func rewriteValueS390X_OpITab(v *Value) bool { mem := v_0.Args[1] ptr := v_0.Args[0] v.reset(OpS390XMOVDload) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } return false @@ -1759,14 +1683,11 @@ func rewriteValueS390X_OpIsInBounds(v *Value) bool { v.Aux = s390x.Less v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) v0.AuxInt = 0 - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) v1.AuxInt = 1 - v.AddArg(v1) v2 := b.NewValue0(v.Pos, OpS390XCMPU, types.TypeFlags) - v2.AddArg(idx) - v2.AddArg(len) - v.AddArg(v2) + v2.AddArg2(idx, len) + v.AddArg3(v0, v1, v2) return true } } @@ -1782,14 +1703,12 @@ func rewriteValueS390X_OpIsNonNil(v *Value) bool { v.Aux = s390x.NotEqual v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) v0.AuxInt = 0 - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) v1.AuxInt = 1 - v.AddArg(v1) v2 := b.NewValue0(v.Pos, OpS390XCMPconst, types.TypeFlags) v2.AuxInt = 0 v2.AddArg(p) - v.AddArg(v2) + v.AddArg3(v0, v1, v2) return true } } @@ -1807,14 +1726,11 @@ func rewriteValueS390X_OpIsSliceInBounds(v *Value) bool { v.Aux = s390x.LessOrEqual v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) v0.AuxInt = 0 - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) v1.AuxInt = 1 - v.AddArg(v1) v2 := b.NewValue0(v.Pos, OpS390XCMPU, types.TypeFlags) - v2.AddArg(idx) - v2.AddArg(len) - v.AddArg(v2) + v2.AddArg2(idx, len) + v.AddArg3(v0, v1, v2) return true } } @@ -1832,18 +1748,15 @@ func rewriteValueS390X_OpLeq16(v *Value) bool { v.Aux = s390x.LessOrEqual v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) v0.AuxInt = 0 - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) v1.AuxInt = 1 - v.AddArg(v1) v2 := b.NewValue0(v.Pos, OpS390XCMPW, types.TypeFlags) v3 := b.NewValue0(v.Pos, OpS390XMOVHreg, typ.Int64) v3.AddArg(x) - v2.AddArg(v3) v4 := b.NewValue0(v.Pos, OpS390XMOVHreg, typ.Int64) v4.AddArg(y) - v2.AddArg(v4) - v.AddArg(v2) + v2.AddArg2(v3, v4) + v.AddArg3(v0, v1, v2) return true } } @@ -1861,18 +1774,15 @@ func rewriteValueS390X_OpLeq16U(v *Value) bool { v.Aux = s390x.LessOrEqual v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) v0.AuxInt = 0 - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) v1.AuxInt = 1 - v.AddArg(v1) v2 := b.NewValue0(v.Pos, OpS390XCMPWU, types.TypeFlags) v3 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64) v3.AddArg(x) - v2.AddArg(v3) v4 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64) v4.AddArg(y) - v2.AddArg(v4) - v.AddArg(v2) + v2.AddArg2(v3, v4) + v.AddArg3(v0, v1, v2) return true } } @@ -1890,14 +1800,11 @@ func rewriteValueS390X_OpLeq32(v *Value) bool { v.Aux = s390x.LessOrEqual v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) v0.AuxInt = 0 - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) v1.AuxInt = 1 - v.AddArg(v1) v2 := b.NewValue0(v.Pos, OpS390XCMPW, types.TypeFlags) - v2.AddArg(x) - v2.AddArg(y) - v.AddArg(v2) + v2.AddArg2(x, y) + v.AddArg3(v0, v1, v2) return true } } @@ -1915,14 +1822,11 @@ func rewriteValueS390X_OpLeq32F(v *Value) bool { v.Aux = s390x.LessOrEqual v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) v0.AuxInt = 0 - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) v1.AuxInt = 1 - v.AddArg(v1) v2 := b.NewValue0(v.Pos, OpS390XFCMPS, types.TypeFlags) - v2.AddArg(x) - v2.AddArg(y) - v.AddArg(v2) + v2.AddArg2(x, y) + v.AddArg3(v0, v1, v2) return true } } @@ -1940,14 +1844,11 @@ func rewriteValueS390X_OpLeq32U(v *Value) bool { v.Aux = s390x.LessOrEqual v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) v0.AuxInt = 0 - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) v1.AuxInt = 1 - v.AddArg(v1) v2 := b.NewValue0(v.Pos, OpS390XCMPWU, types.TypeFlags) - v2.AddArg(x) - v2.AddArg(y) - v.AddArg(v2) + v2.AddArg2(x, y) + v.AddArg3(v0, v1, v2) return true } } @@ -1965,14 +1866,11 @@ func rewriteValueS390X_OpLeq64(v *Value) bool { v.Aux = s390x.LessOrEqual v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) v0.AuxInt = 0 - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) v1.AuxInt = 1 - v.AddArg(v1) v2 := b.NewValue0(v.Pos, OpS390XCMP, types.TypeFlags) - v2.AddArg(x) - v2.AddArg(y) - v.AddArg(v2) + v2.AddArg2(x, y) + v.AddArg3(v0, v1, v2) return true } } @@ -1990,14 +1888,11 @@ func rewriteValueS390X_OpLeq64F(v *Value) bool { v.Aux = s390x.LessOrEqual v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) v0.AuxInt = 0 - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) v1.AuxInt = 1 - v.AddArg(v1) v2 := b.NewValue0(v.Pos, OpS390XFCMP, types.TypeFlags) - v2.AddArg(x) - v2.AddArg(y) - v.AddArg(v2) + v2.AddArg2(x, y) + v.AddArg3(v0, v1, v2) return true } } @@ -2015,14 +1910,11 @@ func rewriteValueS390X_OpLeq64U(v *Value) bool { v.Aux = s390x.LessOrEqual v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) v0.AuxInt = 0 - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) v1.AuxInt = 1 - v.AddArg(v1) v2 := b.NewValue0(v.Pos, OpS390XCMPU, types.TypeFlags) - v2.AddArg(x) - v2.AddArg(y) - v.AddArg(v2) + v2.AddArg2(x, y) + v.AddArg3(v0, v1, v2) return true } } @@ -2040,18 +1932,15 @@ func rewriteValueS390X_OpLeq8(v *Value) bool { v.Aux = s390x.LessOrEqual v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) v0.AuxInt = 0 - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) v1.AuxInt = 1 - v.AddArg(v1) v2 := b.NewValue0(v.Pos, OpS390XCMPW, types.TypeFlags) v3 := b.NewValue0(v.Pos, OpS390XMOVBreg, typ.Int64) v3.AddArg(x) - v2.AddArg(v3) v4 := b.NewValue0(v.Pos, OpS390XMOVBreg, typ.Int64) v4.AddArg(y) - v2.AddArg(v4) - v.AddArg(v2) + v2.AddArg2(v3, v4) + v.AddArg3(v0, v1, v2) return true } } @@ -2069,18 +1958,15 @@ func rewriteValueS390X_OpLeq8U(v *Value) bool { v.Aux = s390x.LessOrEqual v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) v0.AuxInt = 0 - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) v1.AuxInt = 1 - v.AddArg(v1) v2 := b.NewValue0(v.Pos, OpS390XCMPWU, types.TypeFlags) v3 := b.NewValue0(v.Pos, OpS390XMOVBZreg, typ.UInt64) v3.AddArg(x) - v2.AddArg(v3) v4 := b.NewValue0(v.Pos, OpS390XMOVBZreg, typ.UInt64) v4.AddArg(y) - v2.AddArg(v4) - v.AddArg(v2) + v2.AddArg2(v3, v4) + v.AddArg3(v0, v1, v2) return true } } @@ -2098,18 +1984,15 @@ func rewriteValueS390X_OpLess16(v *Value) bool { v.Aux = s390x.Less v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) v0.AuxInt = 0 - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) v1.AuxInt = 1 - v.AddArg(v1) v2 := b.NewValue0(v.Pos, OpS390XCMPW, types.TypeFlags) v3 := b.NewValue0(v.Pos, OpS390XMOVHreg, typ.Int64) v3.AddArg(x) - v2.AddArg(v3) v4 := b.NewValue0(v.Pos, OpS390XMOVHreg, typ.Int64) v4.AddArg(y) - v2.AddArg(v4) - v.AddArg(v2) + v2.AddArg2(v3, v4) + v.AddArg3(v0, v1, v2) return true } } @@ -2127,18 +2010,15 @@ func rewriteValueS390X_OpLess16U(v *Value) bool { v.Aux = s390x.Less v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) v0.AuxInt = 0 - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) v1.AuxInt = 1 - v.AddArg(v1) v2 := b.NewValue0(v.Pos, OpS390XCMPWU, types.TypeFlags) v3 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64) v3.AddArg(x) - v2.AddArg(v3) v4 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64) v4.AddArg(y) - v2.AddArg(v4) - v.AddArg(v2) + v2.AddArg2(v3, v4) + v.AddArg3(v0, v1, v2) return true } } @@ -2156,14 +2036,11 @@ func rewriteValueS390X_OpLess32(v *Value) bool { v.Aux = s390x.Less v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) v0.AuxInt = 0 - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) v1.AuxInt = 1 - v.AddArg(v1) v2 := b.NewValue0(v.Pos, OpS390XCMPW, types.TypeFlags) - v2.AddArg(x) - v2.AddArg(y) - v.AddArg(v2) + v2.AddArg2(x, y) + v.AddArg3(v0, v1, v2) return true } } @@ -2181,14 +2058,11 @@ func rewriteValueS390X_OpLess32F(v *Value) bool { v.Aux = s390x.Less v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) v0.AuxInt = 0 - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) v1.AuxInt = 1 - v.AddArg(v1) v2 := b.NewValue0(v.Pos, OpS390XFCMPS, types.TypeFlags) - v2.AddArg(x) - v2.AddArg(y) - v.AddArg(v2) + v2.AddArg2(x, y) + v.AddArg3(v0, v1, v2) return true } } @@ -2206,14 +2080,11 @@ func rewriteValueS390X_OpLess32U(v *Value) bool { v.Aux = s390x.Less v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) v0.AuxInt = 0 - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) v1.AuxInt = 1 - v.AddArg(v1) v2 := b.NewValue0(v.Pos, OpS390XCMPWU, types.TypeFlags) - v2.AddArg(x) - v2.AddArg(y) - v.AddArg(v2) + v2.AddArg2(x, y) + v.AddArg3(v0, v1, v2) return true } } @@ -2231,14 +2102,11 @@ func rewriteValueS390X_OpLess64(v *Value) bool { v.Aux = s390x.Less v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) v0.AuxInt = 0 - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) v1.AuxInt = 1 - v.AddArg(v1) v2 := b.NewValue0(v.Pos, OpS390XCMP, types.TypeFlags) - v2.AddArg(x) - v2.AddArg(y) - v.AddArg(v2) + v2.AddArg2(x, y) + v.AddArg3(v0, v1, v2) return true } } @@ -2256,14 +2124,11 @@ func rewriteValueS390X_OpLess64F(v *Value) bool { v.Aux = s390x.Less v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) v0.AuxInt = 0 - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) v1.AuxInt = 1 - v.AddArg(v1) v2 := b.NewValue0(v.Pos, OpS390XFCMP, types.TypeFlags) - v2.AddArg(x) - v2.AddArg(y) - v.AddArg(v2) + v2.AddArg2(x, y) + v.AddArg3(v0, v1, v2) return true } } @@ -2281,14 +2146,11 @@ func rewriteValueS390X_OpLess64U(v *Value) bool { v.Aux = s390x.Less v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) v0.AuxInt = 0 - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) v1.AuxInt = 1 - v.AddArg(v1) v2 := b.NewValue0(v.Pos, OpS390XCMPU, types.TypeFlags) - v2.AddArg(x) - v2.AddArg(y) - v.AddArg(v2) + v2.AddArg2(x, y) + v.AddArg3(v0, v1, v2) return true } } @@ -2306,18 +2168,15 @@ func rewriteValueS390X_OpLess8(v *Value) bool { v.Aux = s390x.Less v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) v0.AuxInt = 0 - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) v1.AuxInt = 1 - v.AddArg(v1) v2 := b.NewValue0(v.Pos, OpS390XCMPW, types.TypeFlags) v3 := b.NewValue0(v.Pos, OpS390XMOVBreg, typ.Int64) v3.AddArg(x) - v2.AddArg(v3) v4 := b.NewValue0(v.Pos, OpS390XMOVBreg, typ.Int64) v4.AddArg(y) - v2.AddArg(v4) - v.AddArg(v2) + v2.AddArg2(v3, v4) + v.AddArg3(v0, v1, v2) return true } } @@ -2335,18 +2194,15 @@ func rewriteValueS390X_OpLess8U(v *Value) bool { v.Aux = s390x.Less v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) v0.AuxInt = 0 - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) v1.AuxInt = 1 - v.AddArg(v1) v2 := b.NewValue0(v.Pos, OpS390XCMPWU, types.TypeFlags) v3 := b.NewValue0(v.Pos, OpS390XMOVBZreg, typ.UInt64) v3.AddArg(x) - v2.AddArg(v3) v4 := b.NewValue0(v.Pos, OpS390XMOVBZreg, typ.UInt64) v4.AddArg(y) - v2.AddArg(v4) - v.AddArg(v2) + v2.AddArg2(v3, v4) + v.AddArg3(v0, v1, v2) return true } } @@ -2364,8 +2220,7 @@ func rewriteValueS390X_OpLoad(v *Value) bool { break } v.reset(OpS390XMOVDload) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (Load ptr mem) @@ -2379,8 +2234,7 @@ func rewriteValueS390X_OpLoad(v *Value) bool { break } v.reset(OpS390XMOVWload) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (Load ptr mem) @@ -2394,8 +2248,7 @@ func rewriteValueS390X_OpLoad(v *Value) bool { break } v.reset(OpS390XMOVWZload) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (Load ptr mem) @@ -2409,8 +2262,7 @@ func rewriteValueS390X_OpLoad(v *Value) bool { break } v.reset(OpS390XMOVHload) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (Load ptr mem) @@ -2424,8 +2276,7 @@ func rewriteValueS390X_OpLoad(v *Value) bool { break } v.reset(OpS390XMOVHZload) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (Load ptr mem) @@ -2439,8 +2290,7 @@ func rewriteValueS390X_OpLoad(v *Value) bool { break } v.reset(OpS390XMOVBload) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (Load ptr mem) @@ -2454,8 +2304,7 @@ func rewriteValueS390X_OpLoad(v *Value) bool { break } v.reset(OpS390XMOVBZload) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (Load ptr mem) @@ -2469,8 +2318,7 @@ func rewriteValueS390X_OpLoad(v *Value) bool { break } v.reset(OpS390XFMOVSload) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (Load ptr mem) @@ -2484,8 +2332,7 @@ func rewriteValueS390X_OpLoad(v *Value) bool { break } v.reset(OpS390XFMOVDload) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } return false @@ -2518,8 +2365,7 @@ func rewriteValueS390X_OpLsh16x16(v *Value) bool { break } v.reset(OpS390XSLW) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (Lsh16x16 x y) @@ -2532,18 +2378,15 @@ func rewriteValueS390X_OpLsh16x16(v *Value) bool { v.Type = t v.Aux = s390x.GreaterOrEqual v0 := b.NewValue0(v.Pos, OpS390XSLW, t) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) + v0.AddArg2(x, y) v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) v1.AuxInt = 0 - v.AddArg(v1) v2 := b.NewValue0(v.Pos, OpS390XCMPWUconst, types.TypeFlags) v2.AuxInt = 64 v3 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64) v3.AddArg(y) v2.AddArg(v3) - v.AddArg(v2) + v.AddArg3(v0, v1, v2) return true } } @@ -2562,8 +2405,7 @@ func rewriteValueS390X_OpLsh16x32(v *Value) bool { break } v.reset(OpS390XSLW) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (Lsh16x32 x y) @@ -2576,16 +2418,13 @@ func rewriteValueS390X_OpLsh16x32(v *Value) bool { v.Type = t v.Aux = s390x.GreaterOrEqual v0 := b.NewValue0(v.Pos, OpS390XSLW, t) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) + v0.AddArg2(x, y) v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) v1.AuxInt = 0 - v.AddArg(v1) v2 := b.NewValue0(v.Pos, OpS390XCMPWUconst, types.TypeFlags) v2.AuxInt = 64 v2.AddArg(y) - v.AddArg(v2) + v.AddArg3(v0, v1, v2) return true } } @@ -2604,8 +2443,7 @@ func rewriteValueS390X_OpLsh16x64(v *Value) bool { break } v.reset(OpS390XSLW) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (Lsh16x64 x y) @@ -2618,16 +2456,13 @@ func rewriteValueS390X_OpLsh16x64(v *Value) bool { v.Type = t v.Aux = s390x.GreaterOrEqual v0 := b.NewValue0(v.Pos, OpS390XSLW, t) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) + v0.AddArg2(x, y) v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) v1.AuxInt = 0 - v.AddArg(v1) v2 := b.NewValue0(v.Pos, OpS390XCMPUconst, types.TypeFlags) v2.AuxInt = 64 v2.AddArg(y) - v.AddArg(v2) + v.AddArg3(v0, v1, v2) return true } } @@ -2646,8 +2481,7 @@ func rewriteValueS390X_OpLsh16x8(v *Value) bool { break } v.reset(OpS390XSLW) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (Lsh16x8 x y) @@ -2660,18 +2494,15 @@ func rewriteValueS390X_OpLsh16x8(v *Value) bool { v.Type = t v.Aux = s390x.GreaterOrEqual v0 := b.NewValue0(v.Pos, OpS390XSLW, t) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) + v0.AddArg2(x, y) v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) v1.AuxInt = 0 - v.AddArg(v1) v2 := b.NewValue0(v.Pos, OpS390XCMPWUconst, types.TypeFlags) v2.AuxInt = 64 v3 := b.NewValue0(v.Pos, OpS390XMOVBZreg, typ.UInt64) v3.AddArg(y) v2.AddArg(v3) - v.AddArg(v2) + v.AddArg3(v0, v1, v2) return true } } @@ -2690,8 +2521,7 @@ func rewriteValueS390X_OpLsh32x16(v *Value) bool { break } v.reset(OpS390XSLW) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (Lsh32x16 x y) @@ -2704,18 +2534,15 @@ func rewriteValueS390X_OpLsh32x16(v *Value) bool { v.Type = t v.Aux = s390x.GreaterOrEqual v0 := b.NewValue0(v.Pos, OpS390XSLW, t) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) + v0.AddArg2(x, y) v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) v1.AuxInt = 0 - v.AddArg(v1) v2 := b.NewValue0(v.Pos, OpS390XCMPWUconst, types.TypeFlags) v2.AuxInt = 64 v3 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64) v3.AddArg(y) v2.AddArg(v3) - v.AddArg(v2) + v.AddArg3(v0, v1, v2) return true } } @@ -2734,8 +2561,7 @@ func rewriteValueS390X_OpLsh32x32(v *Value) bool { break } v.reset(OpS390XSLW) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (Lsh32x32 x y) @@ -2748,16 +2574,13 @@ func rewriteValueS390X_OpLsh32x32(v *Value) bool { v.Type = t v.Aux = s390x.GreaterOrEqual v0 := b.NewValue0(v.Pos, OpS390XSLW, t) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) + v0.AddArg2(x, y) v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) v1.AuxInt = 0 - v.AddArg(v1) v2 := b.NewValue0(v.Pos, OpS390XCMPWUconst, types.TypeFlags) v2.AuxInt = 64 v2.AddArg(y) - v.AddArg(v2) + v.AddArg3(v0, v1, v2) return true } } @@ -2776,8 +2599,7 @@ func rewriteValueS390X_OpLsh32x64(v *Value) bool { break } v.reset(OpS390XSLW) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (Lsh32x64 x y) @@ -2790,16 +2612,13 @@ func rewriteValueS390X_OpLsh32x64(v *Value) bool { v.Type = t v.Aux = s390x.GreaterOrEqual v0 := b.NewValue0(v.Pos, OpS390XSLW, t) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) + v0.AddArg2(x, y) v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) v1.AuxInt = 0 - v.AddArg(v1) v2 := b.NewValue0(v.Pos, OpS390XCMPUconst, types.TypeFlags) v2.AuxInt = 64 v2.AddArg(y) - v.AddArg(v2) + v.AddArg3(v0, v1, v2) return true } } @@ -2818,8 +2637,7 @@ func rewriteValueS390X_OpLsh32x8(v *Value) bool { break } v.reset(OpS390XSLW) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (Lsh32x8 x y) @@ -2832,18 +2650,15 @@ func rewriteValueS390X_OpLsh32x8(v *Value) bool { v.Type = t v.Aux = s390x.GreaterOrEqual v0 := b.NewValue0(v.Pos, OpS390XSLW, t) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) + v0.AddArg2(x, y) v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) v1.AuxInt = 0 - v.AddArg(v1) v2 := b.NewValue0(v.Pos, OpS390XCMPWUconst, types.TypeFlags) v2.AuxInt = 64 v3 := b.NewValue0(v.Pos, OpS390XMOVBZreg, typ.UInt64) v3.AddArg(y) v2.AddArg(v3) - v.AddArg(v2) + v.AddArg3(v0, v1, v2) return true } } @@ -2862,8 +2677,7 @@ func rewriteValueS390X_OpLsh64x16(v *Value) bool { break } v.reset(OpS390XSLD) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (Lsh64x16 x y) @@ -2876,18 +2690,15 @@ func rewriteValueS390X_OpLsh64x16(v *Value) bool { v.Type = t v.Aux = s390x.GreaterOrEqual v0 := b.NewValue0(v.Pos, OpS390XSLD, t) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) + v0.AddArg2(x, y) v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) v1.AuxInt = 0 - v.AddArg(v1) v2 := b.NewValue0(v.Pos, OpS390XCMPWUconst, types.TypeFlags) v2.AuxInt = 64 v3 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64) v3.AddArg(y) v2.AddArg(v3) - v.AddArg(v2) + v.AddArg3(v0, v1, v2) return true } } @@ -2906,8 +2717,7 @@ func rewriteValueS390X_OpLsh64x32(v *Value) bool { break } v.reset(OpS390XSLD) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (Lsh64x32 x y) @@ -2920,16 +2730,13 @@ func rewriteValueS390X_OpLsh64x32(v *Value) bool { v.Type = t v.Aux = s390x.GreaterOrEqual v0 := b.NewValue0(v.Pos, OpS390XSLD, t) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) + v0.AddArg2(x, y) v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) v1.AuxInt = 0 - v.AddArg(v1) v2 := b.NewValue0(v.Pos, OpS390XCMPWUconst, types.TypeFlags) v2.AuxInt = 64 v2.AddArg(y) - v.AddArg(v2) + v.AddArg3(v0, v1, v2) return true } } @@ -2948,8 +2755,7 @@ func rewriteValueS390X_OpLsh64x64(v *Value) bool { break } v.reset(OpS390XSLD) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (Lsh64x64 x y) @@ -2962,16 +2768,13 @@ func rewriteValueS390X_OpLsh64x64(v *Value) bool { v.Type = t v.Aux = s390x.GreaterOrEqual v0 := b.NewValue0(v.Pos, OpS390XSLD, t) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) + v0.AddArg2(x, y) v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) v1.AuxInt = 0 - v.AddArg(v1) v2 := b.NewValue0(v.Pos, OpS390XCMPUconst, types.TypeFlags) v2.AuxInt = 64 v2.AddArg(y) - v.AddArg(v2) + v.AddArg3(v0, v1, v2) return true } } @@ -2990,8 +2793,7 @@ func rewriteValueS390X_OpLsh64x8(v *Value) bool { break } v.reset(OpS390XSLD) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (Lsh64x8 x y) @@ -3004,18 +2806,15 @@ func rewriteValueS390X_OpLsh64x8(v *Value) bool { v.Type = t v.Aux = s390x.GreaterOrEqual v0 := b.NewValue0(v.Pos, OpS390XSLD, t) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) + v0.AddArg2(x, y) v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) v1.AuxInt = 0 - v.AddArg(v1) v2 := b.NewValue0(v.Pos, OpS390XCMPWUconst, types.TypeFlags) v2.AuxInt = 64 v3 := b.NewValue0(v.Pos, OpS390XMOVBZreg, typ.UInt64) v3.AddArg(y) v2.AddArg(v3) - v.AddArg(v2) + v.AddArg3(v0, v1, v2) return true } } @@ -3034,8 +2833,7 @@ func rewriteValueS390X_OpLsh8x16(v *Value) bool { break } v.reset(OpS390XSLW) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (Lsh8x16 x y) @@ -3048,18 +2846,15 @@ func rewriteValueS390X_OpLsh8x16(v *Value) bool { v.Type = t v.Aux = s390x.GreaterOrEqual v0 := b.NewValue0(v.Pos, OpS390XSLW, t) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) + v0.AddArg2(x, y) v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) v1.AuxInt = 0 - v.AddArg(v1) v2 := b.NewValue0(v.Pos, OpS390XCMPWUconst, types.TypeFlags) v2.AuxInt = 64 v3 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64) v3.AddArg(y) v2.AddArg(v3) - v.AddArg(v2) + v.AddArg3(v0, v1, v2) return true } } @@ -3078,8 +2873,7 @@ func rewriteValueS390X_OpLsh8x32(v *Value) bool { break } v.reset(OpS390XSLW) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (Lsh8x32 x y) @@ -3092,16 +2886,13 @@ func rewriteValueS390X_OpLsh8x32(v *Value) bool { v.Type = t v.Aux = s390x.GreaterOrEqual v0 := b.NewValue0(v.Pos, OpS390XSLW, t) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) + v0.AddArg2(x, y) v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) v1.AuxInt = 0 - v.AddArg(v1) v2 := b.NewValue0(v.Pos, OpS390XCMPWUconst, types.TypeFlags) v2.AuxInt = 64 v2.AddArg(y) - v.AddArg(v2) + v.AddArg3(v0, v1, v2) return true } } @@ -3120,8 +2911,7 @@ func rewriteValueS390X_OpLsh8x64(v *Value) bool { break } v.reset(OpS390XSLW) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (Lsh8x64 x y) @@ -3134,16 +2924,13 @@ func rewriteValueS390X_OpLsh8x64(v *Value) bool { v.Type = t v.Aux = s390x.GreaterOrEqual v0 := b.NewValue0(v.Pos, OpS390XSLW, t) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) + v0.AddArg2(x, y) v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) v1.AuxInt = 0 - v.AddArg(v1) v2 := b.NewValue0(v.Pos, OpS390XCMPUconst, types.TypeFlags) v2.AuxInt = 64 v2.AddArg(y) - v.AddArg(v2) + v.AddArg3(v0, v1, v2) return true } } @@ -3162,8 +2949,7 @@ func rewriteValueS390X_OpLsh8x8(v *Value) bool { break } v.reset(OpS390XSLW) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (Lsh8x8 x y) @@ -3176,18 +2962,15 @@ func rewriteValueS390X_OpLsh8x8(v *Value) bool { v.Type = t v.Aux = s390x.GreaterOrEqual v0 := b.NewValue0(v.Pos, OpS390XSLW, t) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) + v0.AddArg2(x, y) v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) v1.AuxInt = 0 - v.AddArg(v1) v2 := b.NewValue0(v.Pos, OpS390XCMPWUconst, types.TypeFlags) v2.AuxInt = 64 v3 := b.NewValue0(v.Pos, OpS390XMOVBZreg, typ.UInt64) v3.AddArg(y) v2.AddArg(v3) - v.AddArg(v2) + v.AddArg3(v0, v1, v2) return true } } @@ -3204,10 +2987,9 @@ func rewriteValueS390X_OpMod16(v *Value) bool { v.reset(OpS390XMODW) v0 := b.NewValue0(v.Pos, OpS390XMOVHreg, typ.Int64) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpS390XMOVHreg, typ.Int64) v1.AddArg(y) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } } @@ -3224,10 +3006,9 @@ func rewriteValueS390X_OpMod16u(v *Value) bool { v.reset(OpS390XMODWU) v0 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64) v1.AddArg(y) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } } @@ -3244,8 +3025,7 @@ func rewriteValueS390X_OpMod32(v *Value) bool { v.reset(OpS390XMODW) v0 := b.NewValue0(v.Pos, OpS390XMOVWreg, typ.Int64) v0.AddArg(x) - v.AddArg(v0) - v.AddArg(y) + v.AddArg2(v0, y) return true } } @@ -3262,8 +3042,7 @@ func rewriteValueS390X_OpMod32u(v *Value) bool { v.reset(OpS390XMODWU) v0 := b.NewValue0(v.Pos, OpS390XMOVWZreg, typ.UInt64) v0.AddArg(x) - v.AddArg(v0) - v.AddArg(y) + v.AddArg2(v0, y) return true } } @@ -3280,10 +3059,9 @@ func rewriteValueS390X_OpMod8(v *Value) bool { v.reset(OpS390XMODW) v0 := b.NewValue0(v.Pos, OpS390XMOVBreg, typ.Int64) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpS390XMOVBreg, typ.Int64) v1.AddArg(y) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } } @@ -3300,10 +3078,9 @@ func rewriteValueS390X_OpMod8u(v *Value) bool { v.reset(OpS390XMODWU) v0 := b.NewValue0(v.Pos, OpS390XMOVBZreg, typ.UInt64) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpS390XMOVBZreg, typ.UInt64) v1.AddArg(y) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } } @@ -3335,12 +3112,9 @@ func rewriteValueS390X_OpMove(v *Value) bool { src := v_1 mem := v_2 v.reset(OpS390XMOVBstore) - v.AddArg(dst) v0 := b.NewValue0(v.Pos, OpS390XMOVBZload, typ.UInt8) - v0.AddArg(src) - v0.AddArg(mem) - v.AddArg(v0) - v.AddArg(mem) + v0.AddArg2(src, mem) + v.AddArg3(dst, v0, mem) return true } // match: (Move [2] dst src mem) @@ -3353,12 +3127,9 @@ func rewriteValueS390X_OpMove(v *Value) bool { src := v_1 mem := v_2 v.reset(OpS390XMOVHstore) - v.AddArg(dst) v0 := b.NewValue0(v.Pos, OpS390XMOVHZload, typ.UInt16) - v0.AddArg(src) - v0.AddArg(mem) - v.AddArg(v0) - v.AddArg(mem) + v0.AddArg2(src, mem) + v.AddArg3(dst, v0, mem) return true } // match: (Move [4] dst src mem) @@ -3371,12 +3142,9 @@ func rewriteValueS390X_OpMove(v *Value) bool { src := v_1 mem := v_2 v.reset(OpS390XMOVWstore) - v.AddArg(dst) v0 := b.NewValue0(v.Pos, OpS390XMOVWZload, typ.UInt32) - v0.AddArg(src) - v0.AddArg(mem) - v.AddArg(v0) - v.AddArg(mem) + v0.AddArg2(src, mem) + v.AddArg3(dst, v0, mem) return true } // match: (Move [8] dst src mem) @@ -3389,12 +3157,9 @@ func rewriteValueS390X_OpMove(v *Value) bool { src := v_1 mem := v_2 v.reset(OpS390XMOVDstore) - v.AddArg(dst) v0 := b.NewValue0(v.Pos, OpS390XMOVDload, typ.UInt64) - v0.AddArg(src) - v0.AddArg(mem) - v.AddArg(v0) - v.AddArg(mem) + v0.AddArg2(src, mem) + v.AddArg3(dst, v0, mem) return true } // match: (Move [16] dst src mem) @@ -3408,20 +3173,14 @@ func rewriteValueS390X_OpMove(v *Value) bool { mem := v_2 v.reset(OpS390XMOVDstore) v.AuxInt = 8 - v.AddArg(dst) v0 := b.NewValue0(v.Pos, OpS390XMOVDload, typ.UInt64) v0.AuxInt = 8 - v0.AddArg(src) - v0.AddArg(mem) - v.AddArg(v0) + v0.AddArg2(src, mem) v1 := b.NewValue0(v.Pos, OpS390XMOVDstore, types.TypeMem) - v1.AddArg(dst) v2 := b.NewValue0(v.Pos, OpS390XMOVDload, typ.UInt64) - v2.AddArg(src) - v2.AddArg(mem) - v1.AddArg(v2) - v1.AddArg(mem) - v.AddArg(v1) + v2.AddArg2(src, mem) + v1.AddArg3(dst, v2, mem) + v.AddArg3(dst, v0, v1) return true } // match: (Move [24] dst src mem) @@ -3435,29 +3194,20 @@ func rewriteValueS390X_OpMove(v *Value) bool { mem := v_2 v.reset(OpS390XMOVDstore) v.AuxInt = 16 - v.AddArg(dst) v0 := b.NewValue0(v.Pos, OpS390XMOVDload, typ.UInt64) v0.AuxInt = 16 - v0.AddArg(src) - v0.AddArg(mem) - v.AddArg(v0) + v0.AddArg2(src, mem) v1 := b.NewValue0(v.Pos, OpS390XMOVDstore, types.TypeMem) v1.AuxInt = 8 - v1.AddArg(dst) v2 := b.NewValue0(v.Pos, OpS390XMOVDload, typ.UInt64) v2.AuxInt = 8 - v2.AddArg(src) - v2.AddArg(mem) - v1.AddArg(v2) + v2.AddArg2(src, mem) v3 := b.NewValue0(v.Pos, OpS390XMOVDstore, types.TypeMem) - v3.AddArg(dst) v4 := b.NewValue0(v.Pos, OpS390XMOVDload, typ.UInt64) - v4.AddArg(src) - v4.AddArg(mem) - v3.AddArg(v4) - v3.AddArg(mem) - v1.AddArg(v3) - v.AddArg(v1) + v4.AddArg2(src, mem) + v3.AddArg3(dst, v4, mem) + v1.AddArg3(dst, v2, v3) + v.AddArg3(dst, v0, v1) return true } // match: (Move [3] dst src mem) @@ -3471,20 +3221,14 @@ func rewriteValueS390X_OpMove(v *Value) bool { mem := v_2 v.reset(OpS390XMOVBstore) v.AuxInt = 2 - v.AddArg(dst) v0 := b.NewValue0(v.Pos, OpS390XMOVBZload, typ.UInt8) v0.AuxInt = 2 - v0.AddArg(src) - v0.AddArg(mem) - v.AddArg(v0) + v0.AddArg2(src, mem) v1 := b.NewValue0(v.Pos, OpS390XMOVHstore, types.TypeMem) - v1.AddArg(dst) v2 := b.NewValue0(v.Pos, OpS390XMOVHZload, typ.UInt16) - v2.AddArg(src) - v2.AddArg(mem) - v1.AddArg(v2) - v1.AddArg(mem) - v.AddArg(v1) + v2.AddArg2(src, mem) + v1.AddArg3(dst, v2, mem) + v.AddArg3(dst, v0, v1) return true } // match: (Move [5] dst src mem) @@ -3498,20 +3242,14 @@ func rewriteValueS390X_OpMove(v *Value) bool { mem := v_2 v.reset(OpS390XMOVBstore) v.AuxInt = 4 - v.AddArg(dst) v0 := b.NewValue0(v.Pos, OpS390XMOVBZload, typ.UInt8) v0.AuxInt = 4 - v0.AddArg(src) - v0.AddArg(mem) - v.AddArg(v0) + v0.AddArg2(src, mem) v1 := b.NewValue0(v.Pos, OpS390XMOVWstore, types.TypeMem) - v1.AddArg(dst) v2 := b.NewValue0(v.Pos, OpS390XMOVWZload, typ.UInt32) - v2.AddArg(src) - v2.AddArg(mem) - v1.AddArg(v2) - v1.AddArg(mem) - v.AddArg(v1) + v2.AddArg2(src, mem) + v1.AddArg3(dst, v2, mem) + v.AddArg3(dst, v0, v1) return true } // match: (Move [6] dst src mem) @@ -3525,20 +3263,14 @@ func rewriteValueS390X_OpMove(v *Value) bool { mem := v_2 v.reset(OpS390XMOVHstore) v.AuxInt = 4 - v.AddArg(dst) v0 := b.NewValue0(v.Pos, OpS390XMOVHZload, typ.UInt16) v0.AuxInt = 4 - v0.AddArg(src) - v0.AddArg(mem) - v.AddArg(v0) + v0.AddArg2(src, mem) v1 := b.NewValue0(v.Pos, OpS390XMOVWstore, types.TypeMem) - v1.AddArg(dst) v2 := b.NewValue0(v.Pos, OpS390XMOVWZload, typ.UInt32) - v2.AddArg(src) - v2.AddArg(mem) - v1.AddArg(v2) - v1.AddArg(mem) - v.AddArg(v1) + v2.AddArg2(src, mem) + v1.AddArg3(dst, v2, mem) + v.AddArg3(dst, v0, v1) return true } // match: (Move [7] dst src mem) @@ -3552,29 +3284,20 @@ func rewriteValueS390X_OpMove(v *Value) bool { mem := v_2 v.reset(OpS390XMOVBstore) v.AuxInt = 6 - v.AddArg(dst) v0 := b.NewValue0(v.Pos, OpS390XMOVBZload, typ.UInt8) v0.AuxInt = 6 - v0.AddArg(src) - v0.AddArg(mem) - v.AddArg(v0) + v0.AddArg2(src, mem) v1 := b.NewValue0(v.Pos, OpS390XMOVHstore, types.TypeMem) v1.AuxInt = 4 - v1.AddArg(dst) v2 := b.NewValue0(v.Pos, OpS390XMOVHZload, typ.UInt16) v2.AuxInt = 4 - v2.AddArg(src) - v2.AddArg(mem) - v1.AddArg(v2) + v2.AddArg2(src, mem) v3 := b.NewValue0(v.Pos, OpS390XMOVWstore, types.TypeMem) - v3.AddArg(dst) v4 := b.NewValue0(v.Pos, OpS390XMOVWZload, typ.UInt32) - v4.AddArg(src) - v4.AddArg(mem) - v3.AddArg(v4) - v3.AddArg(mem) - v1.AddArg(v3) - v.AddArg(v1) + v4.AddArg2(src, mem) + v3.AddArg3(dst, v4, mem) + v1.AddArg3(dst, v2, v3) + v.AddArg3(dst, v0, v1) return true } // match: (Move [s] dst src mem) @@ -3590,9 +3313,7 @@ func rewriteValueS390X_OpMove(v *Value) bool { } v.reset(OpS390XMVC) v.AuxInt = makeValAndOff(s, 0) - v.AddArg(dst) - v.AddArg(src) - v.AddArg(mem) + v.AddArg3(dst, src, mem) return true } // match: (Move [s] dst src mem) @@ -3608,14 +3329,10 @@ func rewriteValueS390X_OpMove(v *Value) bool { } v.reset(OpS390XMVC) v.AuxInt = makeValAndOff(s-256, 256) - v.AddArg(dst) - v.AddArg(src) v0 := b.NewValue0(v.Pos, OpS390XMVC, types.TypeMem) v0.AuxInt = makeValAndOff(256, 0) - v0.AddArg(dst) - v0.AddArg(src) - v0.AddArg(mem) - v.AddArg(v0) + v0.AddArg3(dst, src, mem) + v.AddArg3(dst, src, v0) return true } // match: (Move [s] dst src mem) @@ -3631,19 +3348,13 @@ func rewriteValueS390X_OpMove(v *Value) bool { } v.reset(OpS390XMVC) v.AuxInt = makeValAndOff(s-512, 512) - v.AddArg(dst) - v.AddArg(src) v0 := b.NewValue0(v.Pos, OpS390XMVC, types.TypeMem) v0.AuxInt = makeValAndOff(256, 256) - v0.AddArg(dst) - v0.AddArg(src) v1 := b.NewValue0(v.Pos, OpS390XMVC, types.TypeMem) v1.AuxInt = makeValAndOff(256, 0) - v1.AddArg(dst) - v1.AddArg(src) - v1.AddArg(mem) - v0.AddArg(v1) - v.AddArg(v0) + v1.AddArg3(dst, src, mem) + v0.AddArg3(dst, src, v1) + v.AddArg3(dst, src, v0) return true } // match: (Move [s] dst src mem) @@ -3659,24 +3370,16 @@ func rewriteValueS390X_OpMove(v *Value) bool { } v.reset(OpS390XMVC) v.AuxInt = makeValAndOff(s-768, 768) - v.AddArg(dst) - v.AddArg(src) v0 := b.NewValue0(v.Pos, OpS390XMVC, types.TypeMem) v0.AuxInt = makeValAndOff(256, 512) - v0.AddArg(dst) - v0.AddArg(src) v1 := b.NewValue0(v.Pos, OpS390XMVC, types.TypeMem) v1.AuxInt = makeValAndOff(256, 256) - v1.AddArg(dst) - v1.AddArg(src) v2 := b.NewValue0(v.Pos, OpS390XMVC, types.TypeMem) v2.AuxInt = makeValAndOff(256, 0) - v2.AddArg(dst) - v2.AddArg(src) - v2.AddArg(mem) - v1.AddArg(v2) - v0.AddArg(v1) - v.AddArg(v0) + v2.AddArg3(dst, src, mem) + v1.AddArg3(dst, src, v2) + v0.AddArg3(dst, src, v1) + v.AddArg3(dst, src, v0) return true } // match: (Move [s] dst src mem) @@ -3692,15 +3395,11 @@ func rewriteValueS390X_OpMove(v *Value) bool { } v.reset(OpS390XLoweredMove) v.AuxInt = s % 256 - v.AddArg(dst) - v.AddArg(src) v0 := b.NewValue0(v.Pos, OpS390XADD, src.Type) - v0.AddArg(src) v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) v1.AuxInt = (s / 256) * 256 - v0.AddArg(v1) - v.AddArg(v0) - v.AddArg(mem) + v0.AddArg2(src, v1) + v.AddArg4(dst, src, v0, mem) return true } return false @@ -3719,18 +3418,15 @@ func rewriteValueS390X_OpNeq16(v *Value) bool { v.Aux = s390x.NotEqual v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) v0.AuxInt = 0 - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) v1.AuxInt = 1 - v.AddArg(v1) v2 := b.NewValue0(v.Pos, OpS390XCMPW, types.TypeFlags) v3 := b.NewValue0(v.Pos, OpS390XMOVHreg, typ.Int64) v3.AddArg(x) - v2.AddArg(v3) v4 := b.NewValue0(v.Pos, OpS390XMOVHreg, typ.Int64) v4.AddArg(y) - v2.AddArg(v4) - v.AddArg(v2) + v2.AddArg2(v3, v4) + v.AddArg3(v0, v1, v2) return true } } @@ -3748,14 +3444,11 @@ func rewriteValueS390X_OpNeq32(v *Value) bool { v.Aux = s390x.NotEqual v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) v0.AuxInt = 0 - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) v1.AuxInt = 1 - v.AddArg(v1) v2 := b.NewValue0(v.Pos, OpS390XCMPW, types.TypeFlags) - v2.AddArg(x) - v2.AddArg(y) - v.AddArg(v2) + v2.AddArg2(x, y) + v.AddArg3(v0, v1, v2) return true } } @@ -3773,14 +3466,11 @@ func rewriteValueS390X_OpNeq32F(v *Value) bool { v.Aux = s390x.NotEqual v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) v0.AuxInt = 0 - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) v1.AuxInt = 1 - v.AddArg(v1) v2 := b.NewValue0(v.Pos, OpS390XFCMPS, types.TypeFlags) - v2.AddArg(x) - v2.AddArg(y) - v.AddArg(v2) + v2.AddArg2(x, y) + v.AddArg3(v0, v1, v2) return true } } @@ -3798,14 +3488,11 @@ func rewriteValueS390X_OpNeq64(v *Value) bool { v.Aux = s390x.NotEqual v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) v0.AuxInt = 0 - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) v1.AuxInt = 1 - v.AddArg(v1) v2 := b.NewValue0(v.Pos, OpS390XCMP, types.TypeFlags) - v2.AddArg(x) - v2.AddArg(y) - v.AddArg(v2) + v2.AddArg2(x, y) + v.AddArg3(v0, v1, v2) return true } } @@ -3823,14 +3510,11 @@ func rewriteValueS390X_OpNeq64F(v *Value) bool { v.Aux = s390x.NotEqual v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) v0.AuxInt = 0 - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) v1.AuxInt = 1 - v.AddArg(v1) v2 := b.NewValue0(v.Pos, OpS390XFCMP, types.TypeFlags) - v2.AddArg(x) - v2.AddArg(y) - v.AddArg(v2) + v2.AddArg2(x, y) + v.AddArg3(v0, v1, v2) return true } } @@ -3848,18 +3532,15 @@ func rewriteValueS390X_OpNeq8(v *Value) bool { v.Aux = s390x.NotEqual v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) v0.AuxInt = 0 - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) v1.AuxInt = 1 - v.AddArg(v1) v2 := b.NewValue0(v.Pos, OpS390XCMPW, types.TypeFlags) v3 := b.NewValue0(v.Pos, OpS390XMOVBreg, typ.Int64) v3.AddArg(x) - v2.AddArg(v3) v4 := b.NewValue0(v.Pos, OpS390XMOVBreg, typ.Int64) v4.AddArg(y) - v2.AddArg(v4) - v.AddArg(v2) + v2.AddArg2(v3, v4) + v.AddArg3(v0, v1, v2) return true } } @@ -3877,18 +3558,15 @@ func rewriteValueS390X_OpNeqB(v *Value) bool { v.Aux = s390x.NotEqual v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) v0.AuxInt = 0 - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) v1.AuxInt = 1 - v.AddArg(v1) v2 := b.NewValue0(v.Pos, OpS390XCMPW, types.TypeFlags) v3 := b.NewValue0(v.Pos, OpS390XMOVBreg, typ.Int64) v3.AddArg(x) - v2.AddArg(v3) v4 := b.NewValue0(v.Pos, OpS390XMOVBreg, typ.Int64) v4.AddArg(y) - v2.AddArg(v4) - v.AddArg(v2) + v2.AddArg2(v3, v4) + v.AddArg3(v0, v1, v2) return true } } @@ -3906,14 +3584,11 @@ func rewriteValueS390X_OpNeqPtr(v *Value) bool { v.Aux = s390x.NotEqual v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) v0.AuxInt = 0 - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) v1.AuxInt = 1 - v.AddArg(v1) v2 := b.NewValue0(v.Pos, OpS390XCMP, types.TypeFlags) - v2.AddArg(x) - v2.AddArg(y) - v.AddArg(v2) + v2.AddArg2(x, y) + v.AddArg3(v0, v1, v2) return true } } @@ -3968,8 +3643,7 @@ func rewriteValueS390X_OpOffPtr(v *Value) bool { v.reset(OpS390XADD) v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) v0.AuxInt = off - v.AddArg(v0) - v.AddArg(ptr) + v.AddArg2(v0, ptr) return true } } @@ -3990,9 +3664,7 @@ func rewriteValueS390X_OpPanicBounds(v *Value) bool { } v.reset(OpS390XLoweredPanicBoundsA) v.AuxInt = kind - v.AddArg(x) - v.AddArg(y) - v.AddArg(mem) + v.AddArg3(x, y, mem) return true } // match: (PanicBounds [kind] x y mem) @@ -4008,9 +3680,7 @@ func rewriteValueS390X_OpPanicBounds(v *Value) bool { } v.reset(OpS390XLoweredPanicBoundsB) v.AuxInt = kind - v.AddArg(x) - v.AddArg(y) - v.AddArg(mem) + v.AddArg3(x, y, mem) return true } // match: (PanicBounds [kind] x y mem) @@ -4026,9 +3696,7 @@ func rewriteValueS390X_OpPanicBounds(v *Value) bool { } v.reset(OpS390XLoweredPanicBoundsC) v.AuxInt = kind - v.AddArg(x) - v.AddArg(y) - v.AddArg(mem) + v.AddArg3(x, y, mem) return true } return false @@ -4115,17 +3783,14 @@ func rewriteValueS390X_OpRotateLeft16(v *Value) bool { c := v_1.AuxInt v.reset(OpOr16) v0 := b.NewValue0(v.Pos, OpLsh16x64, t) - v0.AddArg(x) v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) v1.AuxInt = c & 15 - v0.AddArg(v1) - v.AddArg(v0) + v0.AddArg2(x, v1) v2 := b.NewValue0(v.Pos, OpRsh16Ux64, t) - v2.AddArg(x) v3 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) v3.AuxInt = -c & 15 - v2.AddArg(v3) - v.AddArg(v2) + v2.AddArg2(x, v3) + v.AddArg2(v0, v2) return true } return false @@ -4146,17 +3811,14 @@ func rewriteValueS390X_OpRotateLeft8(v *Value) bool { c := v_1.AuxInt v.reset(OpOr8) v0 := b.NewValue0(v.Pos, OpLsh8x64, t) - v0.AddArg(x) v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) v1.AuxInt = c & 7 - v0.AddArg(v1) - v.AddArg(v0) + v0.AddArg2(x, v1) v2 := b.NewValue0(v.Pos, OpRsh8Ux64, t) - v2.AddArg(x) v3 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) v3.AuxInt = -c & 7 - v2.AddArg(v3) - v.AddArg(v2) + v2.AddArg2(x, v3) + v.AddArg2(v0, v2) return true } return false @@ -4202,8 +3864,7 @@ func rewriteValueS390X_OpRsh16Ux16(v *Value) bool { v.reset(OpS390XSRW) v0 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64) v0.AddArg(x) - v.AddArg(v0) - v.AddArg(y) + v.AddArg2(v0, y) return true } // match: (Rsh16Ux16 x y) @@ -4218,18 +3879,15 @@ func rewriteValueS390X_OpRsh16Ux16(v *Value) bool { v0 := b.NewValue0(v.Pos, OpS390XSRW, t) v1 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64) v1.AddArg(x) - v0.AddArg(v1) - v0.AddArg(y) - v.AddArg(v0) + v0.AddArg2(v1, y) v2 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) v2.AuxInt = 0 - v.AddArg(v2) v3 := b.NewValue0(v.Pos, OpS390XCMPWUconst, types.TypeFlags) v3.AuxInt = 64 v4 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64) v4.AddArg(y) v3.AddArg(v4) - v.AddArg(v3) + v.AddArg3(v0, v2, v3) return true } } @@ -4250,8 +3908,7 @@ func rewriteValueS390X_OpRsh16Ux32(v *Value) bool { v.reset(OpS390XSRW) v0 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64) v0.AddArg(x) - v.AddArg(v0) - v.AddArg(y) + v.AddArg2(v0, y) return true } // match: (Rsh16Ux32 x y) @@ -4266,16 +3923,13 @@ func rewriteValueS390X_OpRsh16Ux32(v *Value) bool { v0 := b.NewValue0(v.Pos, OpS390XSRW, t) v1 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64) v1.AddArg(x) - v0.AddArg(v1) - v0.AddArg(y) - v.AddArg(v0) + v0.AddArg2(v1, y) v2 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) v2.AuxInt = 0 - v.AddArg(v2) v3 := b.NewValue0(v.Pos, OpS390XCMPWUconst, types.TypeFlags) v3.AuxInt = 64 v3.AddArg(y) - v.AddArg(v3) + v.AddArg3(v0, v2, v3) return true } } @@ -4296,8 +3950,7 @@ func rewriteValueS390X_OpRsh16Ux64(v *Value) bool { v.reset(OpS390XSRW) v0 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64) v0.AddArg(x) - v.AddArg(v0) - v.AddArg(y) + v.AddArg2(v0, y) return true } // match: (Rsh16Ux64 x y) @@ -4312,16 +3965,13 @@ func rewriteValueS390X_OpRsh16Ux64(v *Value) bool { v0 := b.NewValue0(v.Pos, OpS390XSRW, t) v1 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64) v1.AddArg(x) - v0.AddArg(v1) - v0.AddArg(y) - v.AddArg(v0) + v0.AddArg2(v1, y) v2 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) v2.AuxInt = 0 - v.AddArg(v2) v3 := b.NewValue0(v.Pos, OpS390XCMPUconst, types.TypeFlags) v3.AuxInt = 64 v3.AddArg(y) - v.AddArg(v3) + v.AddArg3(v0, v2, v3) return true } } @@ -4342,8 +3992,7 @@ func rewriteValueS390X_OpRsh16Ux8(v *Value) bool { v.reset(OpS390XSRW) v0 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64) v0.AddArg(x) - v.AddArg(v0) - v.AddArg(y) + v.AddArg2(v0, y) return true } // match: (Rsh16Ux8 x y) @@ -4358,18 +4007,15 @@ func rewriteValueS390X_OpRsh16Ux8(v *Value) bool { v0 := b.NewValue0(v.Pos, OpS390XSRW, t) v1 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64) v1.AddArg(x) - v0.AddArg(v1) - v0.AddArg(y) - v.AddArg(v0) + v0.AddArg2(v1, y) v2 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) v2.AuxInt = 0 - v.AddArg(v2) v3 := b.NewValue0(v.Pos, OpS390XCMPWUconst, types.TypeFlags) v3.AuxInt = 64 v4 := b.NewValue0(v.Pos, OpS390XMOVBZreg, typ.UInt64) v4.AddArg(y) v3.AddArg(v4) - v.AddArg(v3) + v.AddArg3(v0, v2, v3) return true } } @@ -4390,8 +4036,7 @@ func rewriteValueS390X_OpRsh16x16(v *Value) bool { v.reset(OpS390XSRAW) v0 := b.NewValue0(v.Pos, OpS390XMOVHreg, typ.Int64) v0.AddArg(x) - v.AddArg(v0) - v.AddArg(y) + v.AddArg2(v0, y) return true } // match: (Rsh16x16 x y) @@ -4402,20 +4047,17 @@ func rewriteValueS390X_OpRsh16x16(v *Value) bool { v.reset(OpS390XSRAW) v0 := b.NewValue0(v.Pos, OpS390XMOVHreg, typ.Int64) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpS390XLOCGR, y.Type) v1.Aux = s390x.GreaterOrEqual - v1.AddArg(y) v2 := b.NewValue0(v.Pos, OpS390XMOVDconst, y.Type) v2.AuxInt = 63 - v1.AddArg(v2) v3 := b.NewValue0(v.Pos, OpS390XCMPWUconst, types.TypeFlags) v3.AuxInt = 64 v4 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64) v4.AddArg(y) v3.AddArg(v4) - v1.AddArg(v3) - v.AddArg(v1) + v1.AddArg3(y, v2, v3) + v.AddArg2(v0, v1) return true } } @@ -4436,8 +4078,7 @@ func rewriteValueS390X_OpRsh16x32(v *Value) bool { v.reset(OpS390XSRAW) v0 := b.NewValue0(v.Pos, OpS390XMOVHreg, typ.Int64) v0.AddArg(x) - v.AddArg(v0) - v.AddArg(y) + v.AddArg2(v0, y) return true } // match: (Rsh16x32 x y) @@ -4448,18 +4089,15 @@ func rewriteValueS390X_OpRsh16x32(v *Value) bool { v.reset(OpS390XSRAW) v0 := b.NewValue0(v.Pos, OpS390XMOVHreg, typ.Int64) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpS390XLOCGR, y.Type) v1.Aux = s390x.GreaterOrEqual - v1.AddArg(y) v2 := b.NewValue0(v.Pos, OpS390XMOVDconst, y.Type) v2.AuxInt = 63 - v1.AddArg(v2) v3 := b.NewValue0(v.Pos, OpS390XCMPWUconst, types.TypeFlags) v3.AuxInt = 64 v3.AddArg(y) - v1.AddArg(v3) - v.AddArg(v1) + v1.AddArg3(y, v2, v3) + v.AddArg2(v0, v1) return true } } @@ -4480,8 +4118,7 @@ func rewriteValueS390X_OpRsh16x64(v *Value) bool { v.reset(OpS390XSRAW) v0 := b.NewValue0(v.Pos, OpS390XMOVHreg, typ.Int64) v0.AddArg(x) - v.AddArg(v0) - v.AddArg(y) + v.AddArg2(v0, y) return true } // match: (Rsh16x64 x y) @@ -4492,18 +4129,15 @@ func rewriteValueS390X_OpRsh16x64(v *Value) bool { v.reset(OpS390XSRAW) v0 := b.NewValue0(v.Pos, OpS390XMOVHreg, typ.Int64) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpS390XLOCGR, y.Type) v1.Aux = s390x.GreaterOrEqual - v1.AddArg(y) v2 := b.NewValue0(v.Pos, OpS390XMOVDconst, y.Type) v2.AuxInt = 63 - v1.AddArg(v2) v3 := b.NewValue0(v.Pos, OpS390XCMPUconst, types.TypeFlags) v3.AuxInt = 64 v3.AddArg(y) - v1.AddArg(v3) - v.AddArg(v1) + v1.AddArg3(y, v2, v3) + v.AddArg2(v0, v1) return true } } @@ -4524,8 +4158,7 @@ func rewriteValueS390X_OpRsh16x8(v *Value) bool { v.reset(OpS390XSRAW) v0 := b.NewValue0(v.Pos, OpS390XMOVHreg, typ.Int64) v0.AddArg(x) - v.AddArg(v0) - v.AddArg(y) + v.AddArg2(v0, y) return true } // match: (Rsh16x8 x y) @@ -4536,20 +4169,17 @@ func rewriteValueS390X_OpRsh16x8(v *Value) bool { v.reset(OpS390XSRAW) v0 := b.NewValue0(v.Pos, OpS390XMOVHreg, typ.Int64) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpS390XLOCGR, y.Type) v1.Aux = s390x.GreaterOrEqual - v1.AddArg(y) v2 := b.NewValue0(v.Pos, OpS390XMOVDconst, y.Type) v2.AuxInt = 63 - v1.AddArg(v2) v3 := b.NewValue0(v.Pos, OpS390XCMPWUconst, types.TypeFlags) v3.AuxInt = 64 v4 := b.NewValue0(v.Pos, OpS390XMOVBZreg, typ.UInt64) v4.AddArg(y) v3.AddArg(v4) - v1.AddArg(v3) - v.AddArg(v1) + v1.AddArg3(y, v2, v3) + v.AddArg2(v0, v1) return true } } @@ -4568,8 +4198,7 @@ func rewriteValueS390X_OpRsh32Ux16(v *Value) bool { break } v.reset(OpS390XSRW) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (Rsh32Ux16 x y) @@ -4582,18 +4211,15 @@ func rewriteValueS390X_OpRsh32Ux16(v *Value) bool { v.Type = t v.Aux = s390x.GreaterOrEqual v0 := b.NewValue0(v.Pos, OpS390XSRW, t) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) + v0.AddArg2(x, y) v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) v1.AuxInt = 0 - v.AddArg(v1) v2 := b.NewValue0(v.Pos, OpS390XCMPWUconst, types.TypeFlags) v2.AuxInt = 64 v3 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64) v3.AddArg(y) v2.AddArg(v3) - v.AddArg(v2) + v.AddArg3(v0, v1, v2) return true } } @@ -4612,8 +4238,7 @@ func rewriteValueS390X_OpRsh32Ux32(v *Value) bool { break } v.reset(OpS390XSRW) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (Rsh32Ux32 x y) @@ -4626,16 +4251,13 @@ func rewriteValueS390X_OpRsh32Ux32(v *Value) bool { v.Type = t v.Aux = s390x.GreaterOrEqual v0 := b.NewValue0(v.Pos, OpS390XSRW, t) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) + v0.AddArg2(x, y) v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) v1.AuxInt = 0 - v.AddArg(v1) v2 := b.NewValue0(v.Pos, OpS390XCMPWUconst, types.TypeFlags) v2.AuxInt = 64 v2.AddArg(y) - v.AddArg(v2) + v.AddArg3(v0, v1, v2) return true } } @@ -4654,8 +4276,7 @@ func rewriteValueS390X_OpRsh32Ux64(v *Value) bool { break } v.reset(OpS390XSRW) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (Rsh32Ux64 x y) @@ -4668,16 +4289,13 @@ func rewriteValueS390X_OpRsh32Ux64(v *Value) bool { v.Type = t v.Aux = s390x.GreaterOrEqual v0 := b.NewValue0(v.Pos, OpS390XSRW, t) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) + v0.AddArg2(x, y) v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) v1.AuxInt = 0 - v.AddArg(v1) v2 := b.NewValue0(v.Pos, OpS390XCMPUconst, types.TypeFlags) v2.AuxInt = 64 v2.AddArg(y) - v.AddArg(v2) + v.AddArg3(v0, v1, v2) return true } } @@ -4696,8 +4314,7 @@ func rewriteValueS390X_OpRsh32Ux8(v *Value) bool { break } v.reset(OpS390XSRW) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (Rsh32Ux8 x y) @@ -4710,18 +4327,15 @@ func rewriteValueS390X_OpRsh32Ux8(v *Value) bool { v.Type = t v.Aux = s390x.GreaterOrEqual v0 := b.NewValue0(v.Pos, OpS390XSRW, t) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) + v0.AddArg2(x, y) v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) v1.AuxInt = 0 - v.AddArg(v1) v2 := b.NewValue0(v.Pos, OpS390XCMPWUconst, types.TypeFlags) v2.AuxInt = 64 v3 := b.NewValue0(v.Pos, OpS390XMOVBZreg, typ.UInt64) v3.AddArg(y) v2.AddArg(v3) - v.AddArg(v2) + v.AddArg3(v0, v1, v2) return true } } @@ -4740,8 +4354,7 @@ func rewriteValueS390X_OpRsh32x16(v *Value) bool { break } v.reset(OpS390XSRAW) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (Rsh32x16 x y) @@ -4750,20 +4363,17 @@ func rewriteValueS390X_OpRsh32x16(v *Value) bool { x := v_0 y := v_1 v.reset(OpS390XSRAW) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpS390XLOCGR, y.Type) v0.Aux = s390x.GreaterOrEqual - v0.AddArg(y) v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, y.Type) v1.AuxInt = 63 - v0.AddArg(v1) v2 := b.NewValue0(v.Pos, OpS390XCMPWUconst, types.TypeFlags) v2.AuxInt = 64 v3 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64) v3.AddArg(y) v2.AddArg(v3) - v0.AddArg(v2) - v.AddArg(v0) + v0.AddArg3(y, v1, v2) + v.AddArg2(x, v0) return true } } @@ -4781,8 +4391,7 @@ func rewriteValueS390X_OpRsh32x32(v *Value) bool { break } v.reset(OpS390XSRAW) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (Rsh32x32 x y) @@ -4791,18 +4400,15 @@ func rewriteValueS390X_OpRsh32x32(v *Value) bool { x := v_0 y := v_1 v.reset(OpS390XSRAW) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpS390XLOCGR, y.Type) v0.Aux = s390x.GreaterOrEqual - v0.AddArg(y) v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, y.Type) v1.AuxInt = 63 - v0.AddArg(v1) v2 := b.NewValue0(v.Pos, OpS390XCMPWUconst, types.TypeFlags) v2.AuxInt = 64 v2.AddArg(y) - v0.AddArg(v2) - v.AddArg(v0) + v0.AddArg3(y, v1, v2) + v.AddArg2(x, v0) return true } } @@ -4820,8 +4426,7 @@ func rewriteValueS390X_OpRsh32x64(v *Value) bool { break } v.reset(OpS390XSRAW) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (Rsh32x64 x y) @@ -4830,18 +4435,15 @@ func rewriteValueS390X_OpRsh32x64(v *Value) bool { x := v_0 y := v_1 v.reset(OpS390XSRAW) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpS390XLOCGR, y.Type) v0.Aux = s390x.GreaterOrEqual - v0.AddArg(y) v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, y.Type) v1.AuxInt = 63 - v0.AddArg(v1) v2 := b.NewValue0(v.Pos, OpS390XCMPUconst, types.TypeFlags) v2.AuxInt = 64 v2.AddArg(y) - v0.AddArg(v2) - v.AddArg(v0) + v0.AddArg3(y, v1, v2) + v.AddArg2(x, v0) return true } } @@ -4860,8 +4462,7 @@ func rewriteValueS390X_OpRsh32x8(v *Value) bool { break } v.reset(OpS390XSRAW) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (Rsh32x8 x y) @@ -4870,20 +4471,17 @@ func rewriteValueS390X_OpRsh32x8(v *Value) bool { x := v_0 y := v_1 v.reset(OpS390XSRAW) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpS390XLOCGR, y.Type) v0.Aux = s390x.GreaterOrEqual - v0.AddArg(y) v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, y.Type) v1.AuxInt = 63 - v0.AddArg(v1) v2 := b.NewValue0(v.Pos, OpS390XCMPWUconst, types.TypeFlags) v2.AuxInt = 64 v3 := b.NewValue0(v.Pos, OpS390XMOVBZreg, typ.UInt64) v3.AddArg(y) v2.AddArg(v3) - v0.AddArg(v2) - v.AddArg(v0) + v0.AddArg3(y, v1, v2) + v.AddArg2(x, v0) return true } } @@ -4902,8 +4500,7 @@ func rewriteValueS390X_OpRsh64Ux16(v *Value) bool { break } v.reset(OpS390XSRD) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (Rsh64Ux16 x y) @@ -4916,18 +4513,15 @@ func rewriteValueS390X_OpRsh64Ux16(v *Value) bool { v.Type = t v.Aux = s390x.GreaterOrEqual v0 := b.NewValue0(v.Pos, OpS390XSRD, t) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) + v0.AddArg2(x, y) v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) v1.AuxInt = 0 - v.AddArg(v1) v2 := b.NewValue0(v.Pos, OpS390XCMPWUconst, types.TypeFlags) v2.AuxInt = 64 v3 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64) v3.AddArg(y) v2.AddArg(v3) - v.AddArg(v2) + v.AddArg3(v0, v1, v2) return true } } @@ -4946,8 +4540,7 @@ func rewriteValueS390X_OpRsh64Ux32(v *Value) bool { break } v.reset(OpS390XSRD) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (Rsh64Ux32 x y) @@ -4960,16 +4553,13 @@ func rewriteValueS390X_OpRsh64Ux32(v *Value) bool { v.Type = t v.Aux = s390x.GreaterOrEqual v0 := b.NewValue0(v.Pos, OpS390XSRD, t) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) + v0.AddArg2(x, y) v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) v1.AuxInt = 0 - v.AddArg(v1) v2 := b.NewValue0(v.Pos, OpS390XCMPWUconst, types.TypeFlags) v2.AuxInt = 64 v2.AddArg(y) - v.AddArg(v2) + v.AddArg3(v0, v1, v2) return true } } @@ -4988,8 +4578,7 @@ func rewriteValueS390X_OpRsh64Ux64(v *Value) bool { break } v.reset(OpS390XSRD) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (Rsh64Ux64 x y) @@ -5002,16 +4591,13 @@ func rewriteValueS390X_OpRsh64Ux64(v *Value) bool { v.Type = t v.Aux = s390x.GreaterOrEqual v0 := b.NewValue0(v.Pos, OpS390XSRD, t) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) + v0.AddArg2(x, y) v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) v1.AuxInt = 0 - v.AddArg(v1) v2 := b.NewValue0(v.Pos, OpS390XCMPUconst, types.TypeFlags) v2.AuxInt = 64 v2.AddArg(y) - v.AddArg(v2) + v.AddArg3(v0, v1, v2) return true } } @@ -5030,8 +4616,7 @@ func rewriteValueS390X_OpRsh64Ux8(v *Value) bool { break } v.reset(OpS390XSRD) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (Rsh64Ux8 x y) @@ -5044,18 +4629,15 @@ func rewriteValueS390X_OpRsh64Ux8(v *Value) bool { v.Type = t v.Aux = s390x.GreaterOrEqual v0 := b.NewValue0(v.Pos, OpS390XSRD, t) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) + v0.AddArg2(x, y) v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) v1.AuxInt = 0 - v.AddArg(v1) v2 := b.NewValue0(v.Pos, OpS390XCMPWUconst, types.TypeFlags) v2.AuxInt = 64 v3 := b.NewValue0(v.Pos, OpS390XMOVBZreg, typ.UInt64) v3.AddArg(y) v2.AddArg(v3) - v.AddArg(v2) + v.AddArg3(v0, v1, v2) return true } } @@ -5074,8 +4656,7 @@ func rewriteValueS390X_OpRsh64x16(v *Value) bool { break } v.reset(OpS390XSRAD) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (Rsh64x16 x y) @@ -5084,20 +4665,17 @@ func rewriteValueS390X_OpRsh64x16(v *Value) bool { x := v_0 y := v_1 v.reset(OpS390XSRAD) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpS390XLOCGR, y.Type) v0.Aux = s390x.GreaterOrEqual - v0.AddArg(y) v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, y.Type) v1.AuxInt = 63 - v0.AddArg(v1) v2 := b.NewValue0(v.Pos, OpS390XCMPWUconst, types.TypeFlags) v2.AuxInt = 64 v3 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64) v3.AddArg(y) v2.AddArg(v3) - v0.AddArg(v2) - v.AddArg(v0) + v0.AddArg3(y, v1, v2) + v.AddArg2(x, v0) return true } } @@ -5115,8 +4693,7 @@ func rewriteValueS390X_OpRsh64x32(v *Value) bool { break } v.reset(OpS390XSRAD) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (Rsh64x32 x y) @@ -5125,18 +4702,15 @@ func rewriteValueS390X_OpRsh64x32(v *Value) bool { x := v_0 y := v_1 v.reset(OpS390XSRAD) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpS390XLOCGR, y.Type) v0.Aux = s390x.GreaterOrEqual - v0.AddArg(y) v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, y.Type) v1.AuxInt = 63 - v0.AddArg(v1) v2 := b.NewValue0(v.Pos, OpS390XCMPWUconst, types.TypeFlags) v2.AuxInt = 64 v2.AddArg(y) - v0.AddArg(v2) - v.AddArg(v0) + v0.AddArg3(y, v1, v2) + v.AddArg2(x, v0) return true } } @@ -5154,8 +4728,7 @@ func rewriteValueS390X_OpRsh64x64(v *Value) bool { break } v.reset(OpS390XSRAD) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (Rsh64x64 x y) @@ -5164,18 +4737,15 @@ func rewriteValueS390X_OpRsh64x64(v *Value) bool { x := v_0 y := v_1 v.reset(OpS390XSRAD) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpS390XLOCGR, y.Type) v0.Aux = s390x.GreaterOrEqual - v0.AddArg(y) v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, y.Type) v1.AuxInt = 63 - v0.AddArg(v1) v2 := b.NewValue0(v.Pos, OpS390XCMPUconst, types.TypeFlags) v2.AuxInt = 64 v2.AddArg(y) - v0.AddArg(v2) - v.AddArg(v0) + v0.AddArg3(y, v1, v2) + v.AddArg2(x, v0) return true } } @@ -5194,8 +4764,7 @@ func rewriteValueS390X_OpRsh64x8(v *Value) bool { break } v.reset(OpS390XSRAD) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (Rsh64x8 x y) @@ -5204,20 +4773,17 @@ func rewriteValueS390X_OpRsh64x8(v *Value) bool { x := v_0 y := v_1 v.reset(OpS390XSRAD) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpS390XLOCGR, y.Type) v0.Aux = s390x.GreaterOrEqual - v0.AddArg(y) v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, y.Type) v1.AuxInt = 63 - v0.AddArg(v1) v2 := b.NewValue0(v.Pos, OpS390XCMPWUconst, types.TypeFlags) v2.AuxInt = 64 v3 := b.NewValue0(v.Pos, OpS390XMOVBZreg, typ.UInt64) v3.AddArg(y) v2.AddArg(v3) - v0.AddArg(v2) - v.AddArg(v0) + v0.AddArg3(y, v1, v2) + v.AddArg2(x, v0) return true } } @@ -5238,8 +4804,7 @@ func rewriteValueS390X_OpRsh8Ux16(v *Value) bool { v.reset(OpS390XSRW) v0 := b.NewValue0(v.Pos, OpS390XMOVBZreg, typ.UInt64) v0.AddArg(x) - v.AddArg(v0) - v.AddArg(y) + v.AddArg2(v0, y) return true } // match: (Rsh8Ux16 x y) @@ -5254,18 +4819,15 @@ func rewriteValueS390X_OpRsh8Ux16(v *Value) bool { v0 := b.NewValue0(v.Pos, OpS390XSRW, t) v1 := b.NewValue0(v.Pos, OpS390XMOVBZreg, typ.UInt64) v1.AddArg(x) - v0.AddArg(v1) - v0.AddArg(y) - v.AddArg(v0) + v0.AddArg2(v1, y) v2 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) v2.AuxInt = 0 - v.AddArg(v2) v3 := b.NewValue0(v.Pos, OpS390XCMPWUconst, types.TypeFlags) v3.AuxInt = 64 v4 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64) v4.AddArg(y) v3.AddArg(v4) - v.AddArg(v3) + v.AddArg3(v0, v2, v3) return true } } @@ -5286,8 +4848,7 @@ func rewriteValueS390X_OpRsh8Ux32(v *Value) bool { v.reset(OpS390XSRW) v0 := b.NewValue0(v.Pos, OpS390XMOVBZreg, typ.UInt64) v0.AddArg(x) - v.AddArg(v0) - v.AddArg(y) + v.AddArg2(v0, y) return true } // match: (Rsh8Ux32 x y) @@ -5302,16 +4863,13 @@ func rewriteValueS390X_OpRsh8Ux32(v *Value) bool { v0 := b.NewValue0(v.Pos, OpS390XSRW, t) v1 := b.NewValue0(v.Pos, OpS390XMOVBZreg, typ.UInt64) v1.AddArg(x) - v0.AddArg(v1) - v0.AddArg(y) - v.AddArg(v0) + v0.AddArg2(v1, y) v2 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) v2.AuxInt = 0 - v.AddArg(v2) v3 := b.NewValue0(v.Pos, OpS390XCMPWUconst, types.TypeFlags) v3.AuxInt = 64 v3.AddArg(y) - v.AddArg(v3) + v.AddArg3(v0, v2, v3) return true } } @@ -5332,8 +4890,7 @@ func rewriteValueS390X_OpRsh8Ux64(v *Value) bool { v.reset(OpS390XSRW) v0 := b.NewValue0(v.Pos, OpS390XMOVBZreg, typ.UInt64) v0.AddArg(x) - v.AddArg(v0) - v.AddArg(y) + v.AddArg2(v0, y) return true } // match: (Rsh8Ux64 x y) @@ -5348,16 +4905,13 @@ func rewriteValueS390X_OpRsh8Ux64(v *Value) bool { v0 := b.NewValue0(v.Pos, OpS390XSRW, t) v1 := b.NewValue0(v.Pos, OpS390XMOVBZreg, typ.UInt64) v1.AddArg(x) - v0.AddArg(v1) - v0.AddArg(y) - v.AddArg(v0) + v0.AddArg2(v1, y) v2 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) v2.AuxInt = 0 - v.AddArg(v2) v3 := b.NewValue0(v.Pos, OpS390XCMPUconst, types.TypeFlags) v3.AuxInt = 64 v3.AddArg(y) - v.AddArg(v3) + v.AddArg3(v0, v2, v3) return true } } @@ -5378,8 +4932,7 @@ func rewriteValueS390X_OpRsh8Ux8(v *Value) bool { v.reset(OpS390XSRW) v0 := b.NewValue0(v.Pos, OpS390XMOVBZreg, typ.UInt64) v0.AddArg(x) - v.AddArg(v0) - v.AddArg(y) + v.AddArg2(v0, y) return true } // match: (Rsh8Ux8 x y) @@ -5394,18 +4947,15 @@ func rewriteValueS390X_OpRsh8Ux8(v *Value) bool { v0 := b.NewValue0(v.Pos, OpS390XSRW, t) v1 := b.NewValue0(v.Pos, OpS390XMOVBZreg, typ.UInt64) v1.AddArg(x) - v0.AddArg(v1) - v0.AddArg(y) - v.AddArg(v0) + v0.AddArg2(v1, y) v2 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) v2.AuxInt = 0 - v.AddArg(v2) v3 := b.NewValue0(v.Pos, OpS390XCMPWUconst, types.TypeFlags) v3.AuxInt = 64 v4 := b.NewValue0(v.Pos, OpS390XMOVBZreg, typ.UInt64) v4.AddArg(y) v3.AddArg(v4) - v.AddArg(v3) + v.AddArg3(v0, v2, v3) return true } } @@ -5426,8 +4976,7 @@ func rewriteValueS390X_OpRsh8x16(v *Value) bool { v.reset(OpS390XSRAW) v0 := b.NewValue0(v.Pos, OpS390XMOVBreg, typ.Int64) v0.AddArg(x) - v.AddArg(v0) - v.AddArg(y) + v.AddArg2(v0, y) return true } // match: (Rsh8x16 x y) @@ -5438,20 +4987,17 @@ func rewriteValueS390X_OpRsh8x16(v *Value) bool { v.reset(OpS390XSRAW) v0 := b.NewValue0(v.Pos, OpS390XMOVBreg, typ.Int64) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpS390XLOCGR, y.Type) v1.Aux = s390x.GreaterOrEqual - v1.AddArg(y) v2 := b.NewValue0(v.Pos, OpS390XMOVDconst, y.Type) v2.AuxInt = 63 - v1.AddArg(v2) v3 := b.NewValue0(v.Pos, OpS390XCMPWUconst, types.TypeFlags) v3.AuxInt = 64 v4 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64) v4.AddArg(y) v3.AddArg(v4) - v1.AddArg(v3) - v.AddArg(v1) + v1.AddArg3(y, v2, v3) + v.AddArg2(v0, v1) return true } } @@ -5472,8 +5018,7 @@ func rewriteValueS390X_OpRsh8x32(v *Value) bool { v.reset(OpS390XSRAW) v0 := b.NewValue0(v.Pos, OpS390XMOVBreg, typ.Int64) v0.AddArg(x) - v.AddArg(v0) - v.AddArg(y) + v.AddArg2(v0, y) return true } // match: (Rsh8x32 x y) @@ -5484,18 +5029,15 @@ func rewriteValueS390X_OpRsh8x32(v *Value) bool { v.reset(OpS390XSRAW) v0 := b.NewValue0(v.Pos, OpS390XMOVBreg, typ.Int64) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpS390XLOCGR, y.Type) v1.Aux = s390x.GreaterOrEqual - v1.AddArg(y) v2 := b.NewValue0(v.Pos, OpS390XMOVDconst, y.Type) v2.AuxInt = 63 - v1.AddArg(v2) v3 := b.NewValue0(v.Pos, OpS390XCMPWUconst, types.TypeFlags) v3.AuxInt = 64 v3.AddArg(y) - v1.AddArg(v3) - v.AddArg(v1) + v1.AddArg3(y, v2, v3) + v.AddArg2(v0, v1) return true } } @@ -5516,8 +5058,7 @@ func rewriteValueS390X_OpRsh8x64(v *Value) bool { v.reset(OpS390XSRAW) v0 := b.NewValue0(v.Pos, OpS390XMOVBreg, typ.Int64) v0.AddArg(x) - v.AddArg(v0) - v.AddArg(y) + v.AddArg2(v0, y) return true } // match: (Rsh8x64 x y) @@ -5528,18 +5069,15 @@ func rewriteValueS390X_OpRsh8x64(v *Value) bool { v.reset(OpS390XSRAW) v0 := b.NewValue0(v.Pos, OpS390XMOVBreg, typ.Int64) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpS390XLOCGR, y.Type) v1.Aux = s390x.GreaterOrEqual - v1.AddArg(y) v2 := b.NewValue0(v.Pos, OpS390XMOVDconst, y.Type) v2.AuxInt = 63 - v1.AddArg(v2) v3 := b.NewValue0(v.Pos, OpS390XCMPUconst, types.TypeFlags) v3.AuxInt = 64 v3.AddArg(y) - v1.AddArg(v3) - v.AddArg(v1) + v1.AddArg3(y, v2, v3) + v.AddArg2(v0, v1) return true } } @@ -5560,8 +5098,7 @@ func rewriteValueS390X_OpRsh8x8(v *Value) bool { v.reset(OpS390XSRAW) v0 := b.NewValue0(v.Pos, OpS390XMOVBreg, typ.Int64) v0.AddArg(x) - v.AddArg(v0) - v.AddArg(y) + v.AddArg2(v0, y) return true } // match: (Rsh8x8 x y) @@ -5572,20 +5109,17 @@ func rewriteValueS390X_OpRsh8x8(v *Value) bool { v.reset(OpS390XSRAW) v0 := b.NewValue0(v.Pos, OpS390XMOVBreg, typ.Int64) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpS390XLOCGR, y.Type) v1.Aux = s390x.GreaterOrEqual - v1.AddArg(y) v2 := b.NewValue0(v.Pos, OpS390XMOVDconst, y.Type) v2.AuxInt = 63 - v1.AddArg(v2) v3 := b.NewValue0(v.Pos, OpS390XCMPWUconst, types.TypeFlags) v3.AuxInt = 64 v4 := b.NewValue0(v.Pos, OpS390XMOVBZreg, typ.UInt64) v4.AddArg(y) v3.AddArg(v4) - v1.AddArg(v3) - v.AddArg(v1) + v1.AddArg3(y, v2, v3) + v.AddArg2(v0, v1) return true } } @@ -5654,8 +5188,7 @@ func rewriteValueS390X_OpS390XADD(v *Value) bool { v.reset(OpS390XMOVDaddridx) v.AuxInt = c v.Aux = s - v.AddArg(ptr) - v.AddArg(idx) + v.AddArg2(ptr, idx) return true } break @@ -5670,8 +5203,7 @@ func rewriteValueS390X_OpS390XADD(v *Value) bool { } y := v_1.Args[0] v.reset(OpS390XSUB) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } break @@ -5698,9 +5230,7 @@ func rewriteValueS390X_OpS390XADD(v *Value) bool { v.Type = t v.AuxInt = off v.Aux = sym - v.AddArg(x) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg3(x, ptr, mem) return true } break @@ -5745,8 +5275,7 @@ func rewriteValueS390X_OpS390XADDE(v *Value) bool { break } v.reset(OpS390XADDC) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (ADDE x y (FlagLT)) @@ -5758,8 +5287,7 @@ func rewriteValueS390X_OpS390XADDE(v *Value) bool { break } v.reset(OpS390XADDC) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (ADDE x y (Select1 (ADDCconst [-1] (Select0 (ADDE (MOVDconst [0]) (MOVDconst [0]) c))))) @@ -5792,9 +5320,7 @@ func rewriteValueS390X_OpS390XADDE(v *Value) bool { break } v.reset(OpS390XADDE) - v.AddArg(x) - v.AddArg(y) - v.AddArg(c) + v.AddArg3(x, y, c) return true } return false @@ -5852,8 +5378,7 @@ func rewriteValueS390X_OpS390XADDW(v *Value) bool { } y := v_1.Args[0] v.reset(OpS390XSUBW) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } break @@ -5880,9 +5405,7 @@ func rewriteValueS390X_OpS390XADDW(v *Value) bool { v.Type = t v.AuxInt = off v.Aux = sym - v.AddArg(x) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg3(x, ptr, mem) return true } break @@ -5909,9 +5432,7 @@ func rewriteValueS390X_OpS390XADDW(v *Value) bool { v.Type = t v.AuxInt = off v.Aux = sym - v.AddArg(x) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg3(x, ptr, mem) return true } break @@ -5985,9 +5506,7 @@ func rewriteValueS390X_OpS390XADDWload(v *Value) bool { v.reset(OpS390XADDWload) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(x) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg3(x, ptr, mem) return true } // match: (ADDWload [o1] {s1} x (MOVDaddr [o2] {s2} ptr) mem) @@ -6010,9 +5529,7 @@ func rewriteValueS390X_OpS390XADDWload(v *Value) bool { v.reset(OpS390XADDWload) v.AuxInt = o1 + o2 v.Aux = mergeSym(s1, s2) - v.AddArg(x) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg3(x, ptr, mem) return true } return false @@ -6077,8 +5594,7 @@ func rewriteValueS390X_OpS390XADDconst(v *Value) bool { v.reset(OpS390XMOVDaddridx) v.AuxInt = c + d v.Aux = s - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (ADDconst [0] x) @@ -6149,10 +5665,9 @@ func rewriteValueS390X_OpS390XADDload(v *Value) bool { break } v.reset(OpS390XADD) - v.AddArg(x) v0 := b.NewValue0(v_2.Pos, OpS390XLGDR, t) v0.AddArg(y) - v.AddArg(v0) + v.AddArg2(x, v0) return true } // match: (ADDload [off1] {sym} x (ADDconst [off2] ptr) mem) @@ -6174,9 +5689,7 @@ func rewriteValueS390X_OpS390XADDload(v *Value) bool { v.reset(OpS390XADDload) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(x) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg3(x, ptr, mem) return true } // match: (ADDload [o1] {s1} x (MOVDaddr [o2] {s2} ptr) mem) @@ -6199,9 +5712,7 @@ func rewriteValueS390X_OpS390XADDload(v *Value) bool { v.reset(OpS390XADDload) v.AuxInt = o1 + o2 v.Aux = mergeSym(s1, s2) - v.AddArg(x) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg3(x, ptr, mem) return true } return false @@ -6365,9 +5876,7 @@ func rewriteValueS390X_OpS390XAND(v *Value) bool { v.Type = t v.AuxInt = off v.Aux = sym - v.AddArg(x) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg3(x, ptr, mem) return true } break @@ -6427,9 +5936,7 @@ func rewriteValueS390X_OpS390XANDW(v *Value) bool { v.Type = t v.AuxInt = off v.Aux = sym - v.AddArg(x) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg3(x, ptr, mem) return true } break @@ -6456,9 +5963,7 @@ func rewriteValueS390X_OpS390XANDW(v *Value) bool { v.Type = t v.AuxInt = off v.Aux = sym - v.AddArg(x) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg3(x, ptr, mem) return true } break @@ -6566,9 +6071,7 @@ func rewriteValueS390X_OpS390XANDWload(v *Value) bool { v.reset(OpS390XANDWload) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(x) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg3(x, ptr, mem) return true } // match: (ANDWload [o1] {s1} x (MOVDaddr [o2] {s2} ptr) mem) @@ -6591,9 +6094,7 @@ func rewriteValueS390X_OpS390XANDWload(v *Value) bool { v.reset(OpS390XANDWload) v.AuxInt = o1 + o2 v.Aux = mergeSym(s1, s2) - v.AddArg(x) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg3(x, ptr, mem) return true } return false @@ -6674,10 +6175,9 @@ func rewriteValueS390X_OpS390XANDload(v *Value) bool { break } v.reset(OpS390XAND) - v.AddArg(x) v0 := b.NewValue0(v_2.Pos, OpS390XLGDR, t) v0.AddArg(y) - v.AddArg(v0) + v.AddArg2(x, v0) return true } // match: (ANDload [off1] {sym} x (ADDconst [off2] ptr) mem) @@ -6699,9 +6199,7 @@ func rewriteValueS390X_OpS390XANDload(v *Value) bool { v.reset(OpS390XANDload) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(x) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg3(x, ptr, mem) return true } // match: (ANDload [o1] {s1} x (MOVDaddr [o2] {s2} ptr) mem) @@ -6724,9 +6222,7 @@ func rewriteValueS390X_OpS390XANDload(v *Value) bool { v.reset(OpS390XANDload) v.AuxInt = o1 + o2 v.Aux = mergeSym(s1, s2) - v.AddArg(x) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg3(x, ptr, mem) return true } return false @@ -6782,8 +6278,7 @@ func rewriteValueS390X_OpS390XCMP(v *Value) bool { } v.reset(OpS390XInvertFlags) v0 := b.NewValue0(v.Pos, OpS390XCMP, types.TypeFlags) - v0.AddArg(y) - v0.AddArg(x) + v0.AddArg2(y, x) v.AddArg(v0) return true } @@ -6840,8 +6335,7 @@ func rewriteValueS390X_OpS390XCMPU(v *Value) bool { } v.reset(OpS390XInvertFlags) v0 := b.NewValue0(v.Pos, OpS390XCMPU, types.TypeFlags) - v0.AddArg(y) - v0.AddArg(x) + v0.AddArg2(y, x) v.AddArg(v0) return true } @@ -7061,8 +6555,7 @@ func rewriteValueS390X_OpS390XCMPW(v *Value) bool { } v.reset(OpS390XInvertFlags) v0 := b.NewValue0(v.Pos, OpS390XCMPW, types.TypeFlags) - v0.AddArg(y) - v0.AddArg(x) + v0.AddArg2(y, x) v.AddArg(v0) return true } @@ -7075,8 +6568,7 @@ func rewriteValueS390X_OpS390XCMPW(v *Value) bool { } y := v_1.Args[0] v.reset(OpS390XCMPW) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (CMPW x (MOVWZreg y)) @@ -7088,8 +6580,7 @@ func rewriteValueS390X_OpS390XCMPW(v *Value) bool { } y := v_1.Args[0] v.reset(OpS390XCMPW) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (CMPW (MOVWreg x) y) @@ -7101,8 +6592,7 @@ func rewriteValueS390X_OpS390XCMPW(v *Value) bool { x := v_0.Args[0] y := v_1 v.reset(OpS390XCMPW) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (CMPW (MOVWZreg x) y) @@ -7114,8 +6604,7 @@ func rewriteValueS390X_OpS390XCMPW(v *Value) bool { x := v_0.Args[0] y := v_1 v.reset(OpS390XCMPW) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } return false @@ -7163,8 +6652,7 @@ func rewriteValueS390X_OpS390XCMPWU(v *Value) bool { } v.reset(OpS390XInvertFlags) v0 := b.NewValue0(v.Pos, OpS390XCMPWU, types.TypeFlags) - v0.AddArg(y) - v0.AddArg(x) + v0.AddArg2(y, x) v.AddArg(v0) return true } @@ -7177,8 +6665,7 @@ func rewriteValueS390X_OpS390XCMPWU(v *Value) bool { } y := v_1.Args[0] v.reset(OpS390XCMPWU) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (CMPWU x (MOVWZreg y)) @@ -7190,8 +6677,7 @@ func rewriteValueS390X_OpS390XCMPWU(v *Value) bool { } y := v_1.Args[0] v.reset(OpS390XCMPWU) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (CMPWU (MOVWreg x) y) @@ -7203,8 +6689,7 @@ func rewriteValueS390X_OpS390XCMPWU(v *Value) bool { x := v_0.Args[0] y := v_1 v.reset(OpS390XCMPWU) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (CMPWU (MOVWZreg x) y) @@ -7216,8 +6701,7 @@ func rewriteValueS390X_OpS390XCMPWU(v *Value) bool { x := v_0.Args[0] y := v_1 v.reset(OpS390XCMPWU) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } return false @@ -7734,9 +7218,7 @@ func rewriteValueS390X_OpS390XFADD(v *Value) bool { y := v_0.Args[0] x := v_1 v.reset(OpS390XFMADD) - v.AddArg(x) - v.AddArg(y) - v.AddArg(z) + v.AddArg3(x, y, z) return true } break @@ -7757,9 +7239,7 @@ func rewriteValueS390X_OpS390XFADDS(v *Value) bool { y := v_0.Args[0] x := v_1 v.reset(OpS390XFMADDS) - v.AddArg(x) - v.AddArg(y) - v.AddArg(z) + v.AddArg3(x, y, z) return true } break @@ -7828,8 +7308,7 @@ func rewriteValueS390X_OpS390XFMOVDload(v *Value) bool { v.reset(OpS390XFMOVDload) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (FMOVDload [off1] {sym1} (MOVDaddr [off2] {sym2} base) mem) @@ -7851,8 +7330,7 @@ func rewriteValueS390X_OpS390XFMOVDload(v *Value) bool { v.reset(OpS390XFMOVDload) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(base) - v.AddArg(mem) + v.AddArg2(base, mem) return true } // match: (FMOVDload [off1] {sym1} (MOVDaddridx [off2] {sym2} ptr idx) mem) @@ -7875,9 +7353,7 @@ func rewriteValueS390X_OpS390XFMOVDload(v *Value) bool { v.reset(OpS390XFMOVDloadidx) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } // match: (FMOVDload [off] {sym} (ADD ptr idx) mem) @@ -7902,9 +7378,7 @@ func rewriteValueS390X_OpS390XFMOVDload(v *Value) bool { v.reset(OpS390XFMOVDloadidx) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } break @@ -7934,9 +7408,7 @@ func rewriteValueS390X_OpS390XFMOVDloadidx(v *Value) bool { v.reset(OpS390XFMOVDloadidx) v.AuxInt = c + d v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } // match: (FMOVDloadidx [c] {sym} ptr (ADDconst [d] idx) mem) @@ -7958,9 +7430,7 @@ func rewriteValueS390X_OpS390XFMOVDloadidx(v *Value) bool { v.reset(OpS390XFMOVDloadidx) v.AuxInt = c + d v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } return false @@ -7988,9 +7458,7 @@ func rewriteValueS390X_OpS390XFMOVDstore(v *Value) bool { v.reset(OpS390XFMOVDstore) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } // match: (FMOVDstore [off1] {sym1} (MOVDaddr [off2] {sym2} base) val mem) @@ -8013,9 +7481,7 @@ func rewriteValueS390X_OpS390XFMOVDstore(v *Value) bool { v.reset(OpS390XFMOVDstore) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(base) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(base, val, mem) return true } // match: (FMOVDstore [off1] {sym1} (MOVDaddridx [off2] {sym2} ptr idx) val mem) @@ -8039,10 +7505,7 @@ func rewriteValueS390X_OpS390XFMOVDstore(v *Value) bool { v.reset(OpS390XFMOVDstoreidx) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) + v.AddArg4(ptr, idx, val, mem) return true } // match: (FMOVDstore [off] {sym} (ADD ptr idx) val mem) @@ -8068,10 +7531,7 @@ func rewriteValueS390X_OpS390XFMOVDstore(v *Value) bool { v.reset(OpS390XFMOVDstoreidx) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) + v.AddArg4(ptr, idx, val, mem) return true } break @@ -8103,10 +7563,7 @@ func rewriteValueS390X_OpS390XFMOVDstoreidx(v *Value) bool { v.reset(OpS390XFMOVDstoreidx) v.AuxInt = c + d v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) + v.AddArg4(ptr, idx, val, mem) return true } // match: (FMOVDstoreidx [c] {sym} ptr (ADDconst [d] idx) val mem) @@ -8129,10 +7586,7 @@ func rewriteValueS390X_OpS390XFMOVDstoreidx(v *Value) bool { v.reset(OpS390XFMOVDstoreidx) v.AuxInt = c + d v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) + v.AddArg4(ptr, idx, val, mem) return true } return false @@ -8179,8 +7633,7 @@ func rewriteValueS390X_OpS390XFMOVSload(v *Value) bool { v.reset(OpS390XFMOVSload) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (FMOVSload [off1] {sym1} (MOVDaddr [off2] {sym2} base) mem) @@ -8202,8 +7655,7 @@ func rewriteValueS390X_OpS390XFMOVSload(v *Value) bool { v.reset(OpS390XFMOVSload) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(base) - v.AddArg(mem) + v.AddArg2(base, mem) return true } // match: (FMOVSload [off1] {sym1} (MOVDaddridx [off2] {sym2} ptr idx) mem) @@ -8226,9 +7678,7 @@ func rewriteValueS390X_OpS390XFMOVSload(v *Value) bool { v.reset(OpS390XFMOVSloadidx) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } // match: (FMOVSload [off] {sym} (ADD ptr idx) mem) @@ -8253,9 +7703,7 @@ func rewriteValueS390X_OpS390XFMOVSload(v *Value) bool { v.reset(OpS390XFMOVSloadidx) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } break @@ -8285,9 +7733,7 @@ func rewriteValueS390X_OpS390XFMOVSloadidx(v *Value) bool { v.reset(OpS390XFMOVSloadidx) v.AuxInt = c + d v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } // match: (FMOVSloadidx [c] {sym} ptr (ADDconst [d] idx) mem) @@ -8309,9 +7755,7 @@ func rewriteValueS390X_OpS390XFMOVSloadidx(v *Value) bool { v.reset(OpS390XFMOVSloadidx) v.AuxInt = c + d v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } return false @@ -8339,9 +7783,7 @@ func rewriteValueS390X_OpS390XFMOVSstore(v *Value) bool { v.reset(OpS390XFMOVSstore) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } // match: (FMOVSstore [off1] {sym1} (MOVDaddr [off2] {sym2} base) val mem) @@ -8364,9 +7806,7 @@ func rewriteValueS390X_OpS390XFMOVSstore(v *Value) bool { v.reset(OpS390XFMOVSstore) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(base) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(base, val, mem) return true } // match: (FMOVSstore [off1] {sym1} (MOVDaddridx [off2] {sym2} ptr idx) val mem) @@ -8390,10 +7830,7 @@ func rewriteValueS390X_OpS390XFMOVSstore(v *Value) bool { v.reset(OpS390XFMOVSstoreidx) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) + v.AddArg4(ptr, idx, val, mem) return true } // match: (FMOVSstore [off] {sym} (ADD ptr idx) val mem) @@ -8419,10 +7856,7 @@ func rewriteValueS390X_OpS390XFMOVSstore(v *Value) bool { v.reset(OpS390XFMOVSstoreidx) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) + v.AddArg4(ptr, idx, val, mem) return true } break @@ -8454,10 +7888,7 @@ func rewriteValueS390X_OpS390XFMOVSstoreidx(v *Value) bool { v.reset(OpS390XFMOVSstoreidx) v.AuxInt = c + d v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) + v.AddArg4(ptr, idx, val, mem) return true } // match: (FMOVSstoreidx [c] {sym} ptr (ADDconst [d] idx) val mem) @@ -8480,10 +7911,7 @@ func rewriteValueS390X_OpS390XFMOVSstoreidx(v *Value) bool { v.reset(OpS390XFMOVSstoreidx) v.AuxInt = c + d v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) + v.AddArg4(ptr, idx, val, mem) return true } return false @@ -8553,9 +7981,7 @@ func rewriteValueS390X_OpS390XFSUB(v *Value) bool { y := v_0.Args[0] x := v_1 v.reset(OpS390XFMSUB) - v.AddArg(x) - v.AddArg(y) - v.AddArg(z) + v.AddArg3(x, y, z) return true } return false @@ -8573,9 +7999,7 @@ func rewriteValueS390X_OpS390XFSUBS(v *Value) bool { y := v_0.Args[0] x := v_1 v.reset(OpS390XFMSUBS) - v.AddArg(x) - v.AddArg(y) - v.AddArg(z) + v.AddArg3(x, y, z) return true } return false @@ -8676,8 +8100,7 @@ func rewriteValueS390X_OpS390XLDGR(v *Value) bool { v2 := b.NewValue0(x.Pos, OpS390XMOVDload, t1) v2.AuxInt = off v2.Aux = sym - v2.AddArg(ptr) - v2.AddArg(mem) + v2.AddArg2(ptr, mem) v1.AddArg(v2) v0.AddArg(v1) return true @@ -8762,9 +8185,7 @@ func rewriteValueS390X_OpS390XLOCGR(v *Value) bool { cmp := v_2.Args[0] v.reset(OpS390XLOCGR) v.Aux = c.(s390x.CCMask).ReverseComparison() - v.AddArg(x) - v.AddArg(y) - v.AddArg(cmp) + v.AddArg3(x, y, cmp) return true } // match: (LOCGR {c} _ x (FlagEQ)) @@ -8954,8 +8375,7 @@ func rewriteValueS390X_OpS390XMOVBZload(v *Value) bool { v.reset(OpS390XMOVBZload) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVBZload [off1] {sym1} (MOVDaddr [off2] {sym2} base) mem) @@ -8977,8 +8397,7 @@ func rewriteValueS390X_OpS390XMOVBZload(v *Value) bool { v.reset(OpS390XMOVBZload) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(base) - v.AddArg(mem) + v.AddArg2(base, mem) return true } // match: (MOVBZload [off1] {sym1} (MOVDaddridx [off2] {sym2} ptr idx) mem) @@ -9001,9 +8420,7 @@ func rewriteValueS390X_OpS390XMOVBZload(v *Value) bool { v.reset(OpS390XMOVBZloadidx) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } // match: (MOVBZload [off] {sym} (ADD ptr idx) mem) @@ -9028,9 +8445,7 @@ func rewriteValueS390X_OpS390XMOVBZload(v *Value) bool { v.reset(OpS390XMOVBZloadidx) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } break @@ -9061,9 +8476,7 @@ func rewriteValueS390X_OpS390XMOVBZloadidx(v *Value) bool { v.reset(OpS390XMOVBZloadidx) v.AuxInt = c + d v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } break @@ -9088,9 +8501,7 @@ func rewriteValueS390X_OpS390XMOVBZloadidx(v *Value) bool { v.reset(OpS390XMOVBZloadidx) v.AuxInt = c + d v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } break @@ -9253,8 +8664,7 @@ func rewriteValueS390X_OpS390XMOVBZreg(v *Value) bool { v.AddArg(v0) v0.AuxInt = o v0.Aux = s - v0.AddArg(p) - v0.AddArg(mem) + v0.AddArg2(p, mem) return true } // match: (MOVBZreg x:(MOVBloadidx [o] {s} p i mem)) @@ -9280,9 +8690,7 @@ func rewriteValueS390X_OpS390XMOVBZreg(v *Value) bool { v.AddArg(v0) v0.AuxInt = o v0.Aux = s - v0.AddArg(p) - v0.AddArg(i) - v0.AddArg(mem) + v0.AddArg3(p, i, mem) return true } // match: (MOVBZreg x:(Arg )) @@ -9398,8 +8806,7 @@ func rewriteValueS390X_OpS390XMOVBload(v *Value) bool { v.reset(OpS390XMOVBload) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVBload [off1] {sym1} (MOVDaddr [off2] {sym2} base) mem) @@ -9421,8 +8828,7 @@ func rewriteValueS390X_OpS390XMOVBload(v *Value) bool { v.reset(OpS390XMOVBload) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(base) - v.AddArg(mem) + v.AddArg2(base, mem) return true } // match: (MOVBload [off1] {sym1} (MOVDaddridx [off2] {sym2} ptr idx) mem) @@ -9445,9 +8851,7 @@ func rewriteValueS390X_OpS390XMOVBload(v *Value) bool { v.reset(OpS390XMOVBloadidx) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } // match: (MOVBload [off] {sym} (ADD ptr idx) mem) @@ -9472,9 +8876,7 @@ func rewriteValueS390X_OpS390XMOVBload(v *Value) bool { v.reset(OpS390XMOVBloadidx) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } break @@ -9505,9 +8907,7 @@ func rewriteValueS390X_OpS390XMOVBloadidx(v *Value) bool { v.reset(OpS390XMOVBloadidx) v.AuxInt = c + d v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } break @@ -9532,9 +8932,7 @@ func rewriteValueS390X_OpS390XMOVBloadidx(v *Value) bool { v.reset(OpS390XMOVBloadidx) v.AuxInt = c + d v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } break @@ -9697,8 +9095,7 @@ func rewriteValueS390X_OpS390XMOVBreg(v *Value) bool { v.AddArg(v0) v0.AuxInt = o v0.Aux = s - v0.AddArg(p) - v0.AddArg(mem) + v0.AddArg2(p, mem) return true } // match: (MOVBreg x:(MOVBZloadidx [o] {s} p i mem)) @@ -9724,9 +9121,7 @@ func rewriteValueS390X_OpS390XMOVBreg(v *Value) bool { v.AddArg(v0) v0.AuxInt = o v0.Aux = s - v0.AddArg(p) - v0.AddArg(i) - v0.AddArg(mem) + v0.AddArg3(p, i, mem) return true } // match: (MOVBreg x:(Arg )) @@ -9796,9 +9191,7 @@ func rewriteValueS390X_OpS390XMOVBstore(v *Value) bool { v.reset(OpS390XMOVBstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(x) - v.AddArg(mem) + v.AddArg3(ptr, x, mem) return true } // match: (MOVBstore [off] {sym} ptr (MOVBZreg x) mem) @@ -9815,9 +9208,7 @@ func rewriteValueS390X_OpS390XMOVBstore(v *Value) bool { v.reset(OpS390XMOVBstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(x) - v.AddArg(mem) + v.AddArg3(ptr, x, mem) return true } // match: (MOVBstore [off1] {sym} (ADDconst [off2] ptr) val mem) @@ -9839,9 +9230,7 @@ func rewriteValueS390X_OpS390XMOVBstore(v *Value) bool { v.reset(OpS390XMOVBstore) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } // match: (MOVBstore [off] {sym} ptr (MOVDconst [c]) mem) @@ -9862,8 +9251,7 @@ func rewriteValueS390X_OpS390XMOVBstore(v *Value) bool { v.reset(OpS390XMOVBstoreconst) v.AuxInt = makeValAndOff(int64(int8(c)), off) v.Aux = sym - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVBstore [off1] {sym1} (MOVDaddr [off2] {sym2} base) val mem) @@ -9886,9 +9274,7 @@ func rewriteValueS390X_OpS390XMOVBstore(v *Value) bool { v.reset(OpS390XMOVBstore) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(base) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(base, val, mem) return true } // match: (MOVBstore [off1] {sym1} (MOVDaddridx [off2] {sym2} ptr idx) val mem) @@ -9912,10 +9298,7 @@ func rewriteValueS390X_OpS390XMOVBstore(v *Value) bool { v.reset(OpS390XMOVBstoreidx) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) + v.AddArg4(ptr, idx, val, mem) return true } // match: (MOVBstore [off] {sym} (ADD ptr idx) val mem) @@ -9941,10 +9324,7 @@ func rewriteValueS390X_OpS390XMOVBstore(v *Value) bool { v.reset(OpS390XMOVBstoreidx) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) + v.AddArg4(ptr, idx, val, mem) return true } break @@ -9972,9 +9352,7 @@ func rewriteValueS390X_OpS390XMOVBstore(v *Value) bool { v.reset(OpS390XMOVHstore) v.AuxInt = i - 1 v.Aux = s - v.AddArg(p) - v.AddArg(w) - v.AddArg(mem) + v.AddArg3(p, w, mem) return true } // match: (MOVBstore [i] {s} p w0:(SRDconst [j] w) x:(MOVBstore [i-1] {s} p (SRDconst [j+8] w) mem)) @@ -10005,9 +9383,7 @@ func rewriteValueS390X_OpS390XMOVBstore(v *Value) bool { v.reset(OpS390XMOVHstore) v.AuxInt = i - 1 v.Aux = s - v.AddArg(p) - v.AddArg(w0) - v.AddArg(mem) + v.AddArg3(p, w0, mem) return true } // match: (MOVBstore [i] {s} p w x:(MOVBstore [i-1] {s} p (SRWconst [8] w) mem)) @@ -10033,9 +9409,7 @@ func rewriteValueS390X_OpS390XMOVBstore(v *Value) bool { v.reset(OpS390XMOVHstore) v.AuxInt = i - 1 v.Aux = s - v.AddArg(p) - v.AddArg(w) - v.AddArg(mem) + v.AddArg3(p, w, mem) return true } // match: (MOVBstore [i] {s} p w0:(SRWconst [j] w) x:(MOVBstore [i-1] {s} p (SRWconst [j+8] w) mem)) @@ -10066,9 +9440,7 @@ func rewriteValueS390X_OpS390XMOVBstore(v *Value) bool { v.reset(OpS390XMOVHstore) v.AuxInt = i - 1 v.Aux = s - v.AddArg(p) - v.AddArg(w0) - v.AddArg(mem) + v.AddArg3(p, w0, mem) return true } // match: (MOVBstore [i] {s} p (SRDconst [8] w) x:(MOVBstore [i-1] {s} p w mem)) @@ -10093,9 +9465,7 @@ func rewriteValueS390X_OpS390XMOVBstore(v *Value) bool { v.reset(OpS390XMOVHBRstore) v.AuxInt = i - 1 v.Aux = s - v.AddArg(p) - v.AddArg(w) - v.AddArg(mem) + v.AddArg3(p, w, mem) return true } // match: (MOVBstore [i] {s} p (SRDconst [j] w) x:(MOVBstore [i-1] {s} p w0:(SRDconst [j-8] w) mem)) @@ -10125,9 +9495,7 @@ func rewriteValueS390X_OpS390XMOVBstore(v *Value) bool { v.reset(OpS390XMOVHBRstore) v.AuxInt = i - 1 v.Aux = s - v.AddArg(p) - v.AddArg(w0) - v.AddArg(mem) + v.AddArg3(p, w0, mem) return true } // match: (MOVBstore [i] {s} p (SRWconst [8] w) x:(MOVBstore [i-1] {s} p w mem)) @@ -10152,9 +9520,7 @@ func rewriteValueS390X_OpS390XMOVBstore(v *Value) bool { v.reset(OpS390XMOVHBRstore) v.AuxInt = i - 1 v.Aux = s - v.AddArg(p) - v.AddArg(w) - v.AddArg(mem) + v.AddArg3(p, w, mem) return true } // match: (MOVBstore [i] {s} p (SRWconst [j] w) x:(MOVBstore [i-1] {s} p w0:(SRWconst [j-8] w) mem)) @@ -10184,9 +9550,7 @@ func rewriteValueS390X_OpS390XMOVBstore(v *Value) bool { v.reset(OpS390XMOVHBRstore) v.AuxInt = i - 1 v.Aux = s - v.AddArg(p) - v.AddArg(w0) - v.AddArg(mem) + v.AddArg3(p, w0, mem) return true } return false @@ -10212,8 +9576,7 @@ func rewriteValueS390X_OpS390XMOVBstoreconst(v *Value) bool { v.reset(OpS390XMOVBstoreconst) v.AuxInt = ValAndOff(sc).add(off) v.Aux = s - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVBstoreconst [sc] {sym1} (MOVDaddr [off] {sym2} ptr) mem) @@ -10235,8 +9598,7 @@ func rewriteValueS390X_OpS390XMOVBstoreconst(v *Value) bool { v.reset(OpS390XMOVBstoreconst) v.AuxInt = ValAndOff(sc).add(off) v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVBstoreconst [c] {s} p x:(MOVBstoreconst [a] {s} p mem)) @@ -10261,8 +9623,7 @@ func rewriteValueS390X_OpS390XMOVBstoreconst(v *Value) bool { v.reset(OpS390XMOVHstoreconst) v.AuxInt = makeValAndOff(ValAndOff(c).Val()&0xff|ValAndOff(a).Val()<<8, ValAndOff(a).Off()) v.Aux = s - v.AddArg(p) - v.AddArg(mem) + v.AddArg2(p, mem) return true } return false @@ -10293,10 +9654,7 @@ func rewriteValueS390X_OpS390XMOVBstoreidx(v *Value) bool { v.reset(OpS390XMOVBstoreidx) v.AuxInt = c + d v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) + v.AddArg4(ptr, idx, val, mem) return true } break @@ -10322,10 +9680,7 @@ func rewriteValueS390X_OpS390XMOVBstoreidx(v *Value) bool { v.reset(OpS390XMOVBstoreidx) v.AuxInt = c + d v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) + v.AddArg4(ptr, idx, val, mem) return true } break @@ -10358,10 +9713,7 @@ func rewriteValueS390X_OpS390XMOVBstoreidx(v *Value) bool { v.reset(OpS390XMOVHstoreidx) v.AuxInt = i - 1 v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v.AddArg(w) - v.AddArg(mem) + v.AddArg4(p, idx, w, mem) return true } } @@ -10400,10 +9752,7 @@ func rewriteValueS390X_OpS390XMOVBstoreidx(v *Value) bool { v.reset(OpS390XMOVHstoreidx) v.AuxInt = i - 1 v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v.AddArg(w0) - v.AddArg(mem) + v.AddArg4(p, idx, w0, mem) return true } } @@ -10437,10 +9786,7 @@ func rewriteValueS390X_OpS390XMOVBstoreidx(v *Value) bool { v.reset(OpS390XMOVHstoreidx) v.AuxInt = i - 1 v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v.AddArg(w) - v.AddArg(mem) + v.AddArg4(p, idx, w, mem) return true } } @@ -10479,10 +9825,7 @@ func rewriteValueS390X_OpS390XMOVBstoreidx(v *Value) bool { v.reset(OpS390XMOVHstoreidx) v.AuxInt = i - 1 v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v.AddArg(w0) - v.AddArg(mem) + v.AddArg4(p, idx, w0, mem) return true } } @@ -10515,10 +9858,7 @@ func rewriteValueS390X_OpS390XMOVBstoreidx(v *Value) bool { v.reset(OpS390XMOVHBRstoreidx) v.AuxInt = i - 1 v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v.AddArg(w) - v.AddArg(mem) + v.AddArg4(p, idx, w, mem) return true } } @@ -10556,10 +9896,7 @@ func rewriteValueS390X_OpS390XMOVBstoreidx(v *Value) bool { v.reset(OpS390XMOVHBRstoreidx) v.AuxInt = i - 1 v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v.AddArg(w0) - v.AddArg(mem) + v.AddArg4(p, idx, w0, mem) return true } } @@ -10592,10 +9929,7 @@ func rewriteValueS390X_OpS390XMOVBstoreidx(v *Value) bool { v.reset(OpS390XMOVHBRstoreidx) v.AuxInt = i - 1 v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v.AddArg(w) - v.AddArg(mem) + v.AddArg4(p, idx, w, mem) return true } } @@ -10633,10 +9967,7 @@ func rewriteValueS390X_OpS390XMOVBstoreidx(v *Value) bool { v.reset(OpS390XMOVHBRstoreidx) v.AuxInt = i - 1 v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v.AddArg(w0) - v.AddArg(mem) + v.AddArg4(p, idx, w0, mem) return true } } @@ -10665,8 +9996,7 @@ func rewriteValueS390X_OpS390XMOVDaddridx(v *Value) bool { v.reset(OpS390XMOVDaddridx) v.AuxInt = c + d v.Aux = s - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (MOVDaddridx [c] {s} x (ADDconst [d] y)) @@ -10687,8 +10017,7 @@ func rewriteValueS390X_OpS390XMOVDaddridx(v *Value) bool { v.reset(OpS390XMOVDaddridx) v.AuxInt = c + d v.Aux = s - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (MOVDaddridx [off1] {sym1} (MOVDaddr [off2] {sym2} x) y) @@ -10710,8 +10039,7 @@ func rewriteValueS390X_OpS390XMOVDaddridx(v *Value) bool { v.reset(OpS390XMOVDaddridx) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (MOVDaddridx [off1] {sym1} x (MOVDaddr [off2] {sym2} y)) @@ -10733,8 +10061,7 @@ func rewriteValueS390X_OpS390XMOVDaddridx(v *Value) bool { v.reset(OpS390XMOVDaddridx) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } return false @@ -10801,8 +10128,7 @@ func rewriteValueS390X_OpS390XMOVDload(v *Value) bool { v.reset(OpS390XMOVDload) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVDload [off1] {sym1} (MOVDaddr [off2] {sym2} base) mem) @@ -10825,8 +10151,7 @@ func rewriteValueS390X_OpS390XMOVDload(v *Value) bool { v.reset(OpS390XMOVDload) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(base) - v.AddArg(mem) + v.AddArg2(base, mem) return true } // match: (MOVDload [off1] {sym1} (MOVDaddridx [off2] {sym2} ptr idx) mem) @@ -10849,9 +10174,7 @@ func rewriteValueS390X_OpS390XMOVDload(v *Value) bool { v.reset(OpS390XMOVDloadidx) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } // match: (MOVDload [off] {sym} (ADD ptr idx) mem) @@ -10876,9 +10199,7 @@ func rewriteValueS390X_OpS390XMOVDload(v *Value) bool { v.reset(OpS390XMOVDloadidx) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } break @@ -10909,9 +10230,7 @@ func rewriteValueS390X_OpS390XMOVDloadidx(v *Value) bool { v.reset(OpS390XMOVDloadidx) v.AuxInt = c + d v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } break @@ -10936,9 +10255,7 @@ func rewriteValueS390X_OpS390XMOVDloadidx(v *Value) bool { v.reset(OpS390XMOVDloadidx) v.AuxInt = c + d v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } break @@ -10968,9 +10285,7 @@ func rewriteValueS390X_OpS390XMOVDstore(v *Value) bool { v.reset(OpS390XMOVDstore) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } // match: (MOVDstore [off] {sym} ptr (MOVDconst [c]) mem) @@ -10991,8 +10306,7 @@ func rewriteValueS390X_OpS390XMOVDstore(v *Value) bool { v.reset(OpS390XMOVDstoreconst) v.AuxInt = makeValAndOff(c, off) v.Aux = sym - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVDstore [off1] {sym1} (MOVDaddr [off2] {sym2} base) val mem) @@ -11016,9 +10330,7 @@ func rewriteValueS390X_OpS390XMOVDstore(v *Value) bool { v.reset(OpS390XMOVDstore) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(base) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(base, val, mem) return true } // match: (MOVDstore [off1] {sym1} (MOVDaddridx [off2] {sym2} ptr idx) val mem) @@ -11042,10 +10354,7 @@ func rewriteValueS390X_OpS390XMOVDstore(v *Value) bool { v.reset(OpS390XMOVDstoreidx) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) + v.AddArg4(ptr, idx, val, mem) return true } // match: (MOVDstore [off] {sym} (ADD ptr idx) val mem) @@ -11071,10 +10380,7 @@ func rewriteValueS390X_OpS390XMOVDstore(v *Value) bool { v.reset(OpS390XMOVDstoreidx) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) + v.AddArg4(ptr, idx, val, mem) return true } break @@ -11102,10 +10408,7 @@ func rewriteValueS390X_OpS390XMOVDstore(v *Value) bool { v.reset(OpS390XSTMG2) v.AuxInt = i - 8 v.Aux = s - v.AddArg(p) - v.AddArg(w0) - v.AddArg(w1) - v.AddArg(mem) + v.AddArg4(p, w0, w1, mem) return true } // match: (MOVDstore [i] {s} p w2 x:(STMG2 [i-16] {s} p w0 w1 mem)) @@ -11132,11 +10435,7 @@ func rewriteValueS390X_OpS390XMOVDstore(v *Value) bool { v.reset(OpS390XSTMG3) v.AuxInt = i - 16 v.Aux = s - v.AddArg(p) - v.AddArg(w0) - v.AddArg(w1) - v.AddArg(w2) - v.AddArg(mem) + v.AddArg5(p, w0, w1, w2, mem) return true } // match: (MOVDstore [i] {s} p w3 x:(STMG3 [i-24] {s} p w0 w1 w2 mem)) @@ -11164,12 +10463,7 @@ func rewriteValueS390X_OpS390XMOVDstore(v *Value) bool { v.reset(OpS390XSTMG4) v.AuxInt = i - 24 v.Aux = s - v.AddArg(p) - v.AddArg(w0) - v.AddArg(w1) - v.AddArg(w2) - v.AddArg(w3) - v.AddArg(mem) + v.AddArg6(p, w0, w1, w2, w3, mem) return true } return false @@ -11195,8 +10489,7 @@ func rewriteValueS390X_OpS390XMOVDstoreconst(v *Value) bool { v.reset(OpS390XMOVDstoreconst) v.AuxInt = ValAndOff(sc).add(off) v.Aux = s - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVDstoreconst [sc] {sym1} (MOVDaddr [off] {sym2} ptr) mem) @@ -11218,8 +10511,7 @@ func rewriteValueS390X_OpS390XMOVDstoreconst(v *Value) bool { v.reset(OpS390XMOVDstoreconst) v.AuxInt = ValAndOff(sc).add(off) v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } return false @@ -11250,10 +10542,7 @@ func rewriteValueS390X_OpS390XMOVDstoreidx(v *Value) bool { v.reset(OpS390XMOVDstoreidx) v.AuxInt = c + d v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) + v.AddArg4(ptr, idx, val, mem) return true } break @@ -11279,10 +10568,7 @@ func rewriteValueS390X_OpS390XMOVDstoreidx(v *Value) bool { v.reset(OpS390XMOVDstoreidx) v.AuxInt = c + d v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) + v.AddArg4(ptr, idx, val, mem) return true } break @@ -11315,9 +10601,7 @@ func rewriteValueS390X_OpS390XMOVHBRstore(v *Value) bool { v.reset(OpS390XMOVWBRstore) v.AuxInt = i - 2 v.Aux = s - v.AddArg(p) - v.AddArg(w) - v.AddArg(mem) + v.AddArg3(p, w, mem) return true } // match: (MOVHBRstore [i] {s} p (SRDconst [j] w) x:(MOVHBRstore [i-2] {s} p w0:(SRDconst [j-16] w) mem)) @@ -11347,9 +10631,7 @@ func rewriteValueS390X_OpS390XMOVHBRstore(v *Value) bool { v.reset(OpS390XMOVWBRstore) v.AuxInt = i - 2 v.Aux = s - v.AddArg(p) - v.AddArg(w0) - v.AddArg(mem) + v.AddArg3(p, w0, mem) return true } // match: (MOVHBRstore [i] {s} p (SRWconst [16] w) x:(MOVHBRstore [i-2] {s} p w mem)) @@ -11374,9 +10656,7 @@ func rewriteValueS390X_OpS390XMOVHBRstore(v *Value) bool { v.reset(OpS390XMOVWBRstore) v.AuxInt = i - 2 v.Aux = s - v.AddArg(p) - v.AddArg(w) - v.AddArg(mem) + v.AddArg3(p, w, mem) return true } // match: (MOVHBRstore [i] {s} p (SRWconst [j] w) x:(MOVHBRstore [i-2] {s} p w0:(SRWconst [j-16] w) mem)) @@ -11406,9 +10686,7 @@ func rewriteValueS390X_OpS390XMOVHBRstore(v *Value) bool { v.reset(OpS390XMOVWBRstore) v.AuxInt = i - 2 v.Aux = s - v.AddArg(p) - v.AddArg(w0) - v.AddArg(mem) + v.AddArg3(p, w0, mem) return true } return false @@ -11445,10 +10723,7 @@ func rewriteValueS390X_OpS390XMOVHBRstoreidx(v *Value) bool { v.reset(OpS390XMOVWBRstoreidx) v.AuxInt = i - 2 v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v.AddArg(w) - v.AddArg(mem) + v.AddArg4(p, idx, w, mem) return true } } @@ -11486,10 +10761,7 @@ func rewriteValueS390X_OpS390XMOVHBRstoreidx(v *Value) bool { v.reset(OpS390XMOVWBRstoreidx) v.AuxInt = i - 2 v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v.AddArg(w0) - v.AddArg(mem) + v.AddArg4(p, idx, w0, mem) return true } } @@ -11522,10 +10794,7 @@ func rewriteValueS390X_OpS390XMOVHBRstoreidx(v *Value) bool { v.reset(OpS390XMOVWBRstoreidx) v.AuxInt = i - 2 v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v.AddArg(w) - v.AddArg(mem) + v.AddArg4(p, idx, w, mem) return true } } @@ -11563,10 +10832,7 @@ func rewriteValueS390X_OpS390XMOVHBRstoreidx(v *Value) bool { v.reset(OpS390XMOVWBRstoreidx) v.AuxInt = i - 2 v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v.AddArg(w0) - v.AddArg(mem) + v.AddArg4(p, idx, w0, mem) return true } } @@ -11615,8 +10881,7 @@ func rewriteValueS390X_OpS390XMOVHZload(v *Value) bool { v.reset(OpS390XMOVHZload) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVHZload [off1] {sym1} (MOVDaddr [off2] {sym2} base) mem) @@ -11639,8 +10904,7 @@ func rewriteValueS390X_OpS390XMOVHZload(v *Value) bool { v.reset(OpS390XMOVHZload) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(base) - v.AddArg(mem) + v.AddArg2(base, mem) return true } // match: (MOVHZload [off1] {sym1} (MOVDaddridx [off2] {sym2} ptr idx) mem) @@ -11663,9 +10927,7 @@ func rewriteValueS390X_OpS390XMOVHZload(v *Value) bool { v.reset(OpS390XMOVHZloadidx) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } // match: (MOVHZload [off] {sym} (ADD ptr idx) mem) @@ -11690,9 +10952,7 @@ func rewriteValueS390X_OpS390XMOVHZload(v *Value) bool { v.reset(OpS390XMOVHZloadidx) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } break @@ -11723,9 +10983,7 @@ func rewriteValueS390X_OpS390XMOVHZloadidx(v *Value) bool { v.reset(OpS390XMOVHZloadidx) v.AuxInt = c + d v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } break @@ -11750,9 +11008,7 @@ func rewriteValueS390X_OpS390XMOVHZloadidx(v *Value) bool { v.reset(OpS390XMOVHZloadidx) v.AuxInt = c + d v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } break @@ -11933,8 +11189,7 @@ func rewriteValueS390X_OpS390XMOVHZreg(v *Value) bool { v.AddArg(v0) v0.AuxInt = o v0.Aux = s - v0.AddArg(p) - v0.AddArg(mem) + v0.AddArg2(p, mem) return true } // match: (MOVHZreg x:(MOVHloadidx [o] {s} p i mem)) @@ -11960,9 +11215,7 @@ func rewriteValueS390X_OpS390XMOVHZreg(v *Value) bool { v.AddArg(v0) v0.AuxInt = o v0.Aux = s - v0.AddArg(p) - v0.AddArg(i) - v0.AddArg(mem) + v0.AddArg3(p, i, mem) return true } // match: (MOVHZreg x:(Arg )) @@ -12051,8 +11304,7 @@ func rewriteValueS390X_OpS390XMOVHload(v *Value) bool { v.reset(OpS390XMOVHload) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVHload [off1] {sym1} (MOVDaddr [off2] {sym2} base) mem) @@ -12075,8 +11327,7 @@ func rewriteValueS390X_OpS390XMOVHload(v *Value) bool { v.reset(OpS390XMOVHload) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(base) - v.AddArg(mem) + v.AddArg2(base, mem) return true } // match: (MOVHload [off1] {sym1} (MOVDaddridx [off2] {sym2} ptr idx) mem) @@ -12099,9 +11350,7 @@ func rewriteValueS390X_OpS390XMOVHload(v *Value) bool { v.reset(OpS390XMOVHloadidx) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } // match: (MOVHload [off] {sym} (ADD ptr idx) mem) @@ -12126,9 +11375,7 @@ func rewriteValueS390X_OpS390XMOVHload(v *Value) bool { v.reset(OpS390XMOVHloadidx) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } break @@ -12159,9 +11406,7 @@ func rewriteValueS390X_OpS390XMOVHloadidx(v *Value) bool { v.reset(OpS390XMOVHloadidx) v.AuxInt = c + d v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } break @@ -12186,9 +11431,7 @@ func rewriteValueS390X_OpS390XMOVHloadidx(v *Value) bool { v.reset(OpS390XMOVHloadidx) v.AuxInt = c + d v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } break @@ -12403,8 +11646,7 @@ func rewriteValueS390X_OpS390XMOVHreg(v *Value) bool { v.AddArg(v0) v0.AuxInt = o v0.Aux = s - v0.AddArg(p) - v0.AddArg(mem) + v0.AddArg2(p, mem) return true } // match: (MOVHreg x:(MOVHZloadidx [o] {s} p i mem)) @@ -12430,9 +11672,7 @@ func rewriteValueS390X_OpS390XMOVHreg(v *Value) bool { v.AddArg(v0) v0.AuxInt = o v0.Aux = s - v0.AddArg(p) - v0.AddArg(i) - v0.AddArg(mem) + v0.AddArg3(p, i, mem) return true } // match: (MOVHreg x:(Arg )) @@ -12502,9 +11742,7 @@ func rewriteValueS390X_OpS390XMOVHstore(v *Value) bool { v.reset(OpS390XMOVHstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(x) - v.AddArg(mem) + v.AddArg3(ptr, x, mem) return true } // match: (MOVHstore [off] {sym} ptr (MOVHZreg x) mem) @@ -12521,9 +11759,7 @@ func rewriteValueS390X_OpS390XMOVHstore(v *Value) bool { v.reset(OpS390XMOVHstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(x) - v.AddArg(mem) + v.AddArg3(ptr, x, mem) return true } // match: (MOVHstore [off1] {sym} (ADDconst [off2] ptr) val mem) @@ -12545,9 +11781,7 @@ func rewriteValueS390X_OpS390XMOVHstore(v *Value) bool { v.reset(OpS390XMOVHstore) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } // match: (MOVHstore [off] {sym} ptr (MOVDconst [c]) mem) @@ -12568,8 +11802,7 @@ func rewriteValueS390X_OpS390XMOVHstore(v *Value) bool { v.reset(OpS390XMOVHstoreconst) v.AuxInt = makeValAndOff(int64(int16(c)), off) v.Aux = sym - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVHstore [off1] {sym1} (MOVDaddr [off2] {sym2} base) val mem) @@ -12593,9 +11826,7 @@ func rewriteValueS390X_OpS390XMOVHstore(v *Value) bool { v.reset(OpS390XMOVHstore) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(base) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(base, val, mem) return true } // match: (MOVHstore [off1] {sym1} (MOVDaddridx [off2] {sym2} ptr idx) val mem) @@ -12619,10 +11850,7 @@ func rewriteValueS390X_OpS390XMOVHstore(v *Value) bool { v.reset(OpS390XMOVHstoreidx) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) + v.AddArg4(ptr, idx, val, mem) return true } // match: (MOVHstore [off] {sym} (ADD ptr idx) val mem) @@ -12648,10 +11876,7 @@ func rewriteValueS390X_OpS390XMOVHstore(v *Value) bool { v.reset(OpS390XMOVHstoreidx) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) + v.AddArg4(ptr, idx, val, mem) return true } break @@ -12679,9 +11904,7 @@ func rewriteValueS390X_OpS390XMOVHstore(v *Value) bool { v.reset(OpS390XMOVWstore) v.AuxInt = i - 2 v.Aux = s - v.AddArg(p) - v.AddArg(w) - v.AddArg(mem) + v.AddArg3(p, w, mem) return true } // match: (MOVHstore [i] {s} p w0:(SRDconst [j] w) x:(MOVHstore [i-2] {s} p (SRDconst [j+16] w) mem)) @@ -12712,9 +11935,7 @@ func rewriteValueS390X_OpS390XMOVHstore(v *Value) bool { v.reset(OpS390XMOVWstore) v.AuxInt = i - 2 v.Aux = s - v.AddArg(p) - v.AddArg(w0) - v.AddArg(mem) + v.AddArg3(p, w0, mem) return true } // match: (MOVHstore [i] {s} p w x:(MOVHstore [i-2] {s} p (SRWconst [16] w) mem)) @@ -12740,9 +11961,7 @@ func rewriteValueS390X_OpS390XMOVHstore(v *Value) bool { v.reset(OpS390XMOVWstore) v.AuxInt = i - 2 v.Aux = s - v.AddArg(p) - v.AddArg(w) - v.AddArg(mem) + v.AddArg3(p, w, mem) return true } // match: (MOVHstore [i] {s} p w0:(SRWconst [j] w) x:(MOVHstore [i-2] {s} p (SRWconst [j+16] w) mem)) @@ -12773,9 +11992,7 @@ func rewriteValueS390X_OpS390XMOVHstore(v *Value) bool { v.reset(OpS390XMOVWstore) v.AuxInt = i - 2 v.Aux = s - v.AddArg(p) - v.AddArg(w0) - v.AddArg(mem) + v.AddArg3(p, w0, mem) return true } return false @@ -12803,8 +12020,7 @@ func rewriteValueS390X_OpS390XMOVHstoreconst(v *Value) bool { v.reset(OpS390XMOVHstoreconst) v.AuxInt = ValAndOff(sc).add(off) v.Aux = s - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVHstoreconst [sc] {sym1} (MOVDaddr [off] {sym2} ptr) mem) @@ -12826,8 +12042,7 @@ func rewriteValueS390X_OpS390XMOVHstoreconst(v *Value) bool { v.reset(OpS390XMOVHstoreconst) v.AuxInt = ValAndOff(sc).add(off) v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVHstoreconst [c] {s} p x:(MOVHstoreconst [a] {s} p mem)) @@ -12852,11 +12067,9 @@ func rewriteValueS390X_OpS390XMOVHstoreconst(v *Value) bool { v.reset(OpS390XMOVWstore) v.AuxInt = ValAndOff(a).Off() v.Aux = s - v.AddArg(p) v0 := b.NewValue0(x.Pos, OpS390XMOVDconst, typ.UInt64) v0.AuxInt = int64(int32(ValAndOff(c).Val()&0xffff | ValAndOff(a).Val()<<16)) - v.AddArg(v0) - v.AddArg(mem) + v.AddArg3(p, v0, mem) return true } return false @@ -12887,10 +12100,7 @@ func rewriteValueS390X_OpS390XMOVHstoreidx(v *Value) bool { v.reset(OpS390XMOVHstoreidx) v.AuxInt = c + d v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) + v.AddArg4(ptr, idx, val, mem) return true } break @@ -12916,10 +12126,7 @@ func rewriteValueS390X_OpS390XMOVHstoreidx(v *Value) bool { v.reset(OpS390XMOVHstoreidx) v.AuxInt = c + d v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) + v.AddArg4(ptr, idx, val, mem) return true } break @@ -12952,10 +12159,7 @@ func rewriteValueS390X_OpS390XMOVHstoreidx(v *Value) bool { v.reset(OpS390XMOVWstoreidx) v.AuxInt = i - 2 v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v.AddArg(w) - v.AddArg(mem) + v.AddArg4(p, idx, w, mem) return true } } @@ -12994,10 +12198,7 @@ func rewriteValueS390X_OpS390XMOVHstoreidx(v *Value) bool { v.reset(OpS390XMOVWstoreidx) v.AuxInt = i - 2 v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v.AddArg(w0) - v.AddArg(mem) + v.AddArg4(p, idx, w0, mem) return true } } @@ -13031,10 +12232,7 @@ func rewriteValueS390X_OpS390XMOVHstoreidx(v *Value) bool { v.reset(OpS390XMOVWstoreidx) v.AuxInt = i - 2 v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v.AddArg(w) - v.AddArg(mem) + v.AddArg4(p, idx, w, mem) return true } } @@ -13073,10 +12271,7 @@ func rewriteValueS390X_OpS390XMOVHstoreidx(v *Value) bool { v.reset(OpS390XMOVWstoreidx) v.AuxInt = i - 2 v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v.AddArg(w0) - v.AddArg(mem) + v.AddArg4(p, idx, w0, mem) return true } } @@ -13110,9 +12305,7 @@ func rewriteValueS390X_OpS390XMOVWBRstore(v *Value) bool { v.reset(OpS390XMOVDBRstore) v.AuxInt = i - 4 v.Aux = s - v.AddArg(p) - v.AddArg(w) - v.AddArg(mem) + v.AddArg3(p, w, mem) return true } // match: (MOVWBRstore [i] {s} p (SRDconst [j] w) x:(MOVWBRstore [i-4] {s} p w0:(SRDconst [j-32] w) mem)) @@ -13142,9 +12335,7 @@ func rewriteValueS390X_OpS390XMOVWBRstore(v *Value) bool { v.reset(OpS390XMOVDBRstore) v.AuxInt = i - 4 v.Aux = s - v.AddArg(p) - v.AddArg(w0) - v.AddArg(mem) + v.AddArg3(p, w0, mem) return true } return false @@ -13181,10 +12372,7 @@ func rewriteValueS390X_OpS390XMOVWBRstoreidx(v *Value) bool { v.reset(OpS390XMOVDBRstoreidx) v.AuxInt = i - 4 v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v.AddArg(w) - v.AddArg(mem) + v.AddArg4(p, idx, w, mem) return true } } @@ -13222,10 +12410,7 @@ func rewriteValueS390X_OpS390XMOVWBRstoreidx(v *Value) bool { v.reset(OpS390XMOVDBRstoreidx) v.AuxInt = i - 4 v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v.AddArg(w0) - v.AddArg(mem) + v.AddArg4(p, idx, w0, mem) return true } } @@ -13274,8 +12459,7 @@ func rewriteValueS390X_OpS390XMOVWZload(v *Value) bool { v.reset(OpS390XMOVWZload) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVWZload [off1] {sym1} (MOVDaddr [off2] {sym2} base) mem) @@ -13298,8 +12482,7 @@ func rewriteValueS390X_OpS390XMOVWZload(v *Value) bool { v.reset(OpS390XMOVWZload) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(base) - v.AddArg(mem) + v.AddArg2(base, mem) return true } // match: (MOVWZload [off1] {sym1} (MOVDaddridx [off2] {sym2} ptr idx) mem) @@ -13322,9 +12505,7 @@ func rewriteValueS390X_OpS390XMOVWZload(v *Value) bool { v.reset(OpS390XMOVWZloadidx) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } // match: (MOVWZload [off] {sym} (ADD ptr idx) mem) @@ -13349,9 +12530,7 @@ func rewriteValueS390X_OpS390XMOVWZload(v *Value) bool { v.reset(OpS390XMOVWZloadidx) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } break @@ -13382,9 +12561,7 @@ func rewriteValueS390X_OpS390XMOVWZloadidx(v *Value) bool { v.reset(OpS390XMOVWZloadidx) v.AuxInt = c + d v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } break @@ -13409,9 +12586,7 @@ func rewriteValueS390X_OpS390XMOVWZloadidx(v *Value) bool { v.reset(OpS390XMOVWZloadidx) v.AuxInt = c + d v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } break @@ -13609,8 +12784,7 @@ func rewriteValueS390X_OpS390XMOVWZreg(v *Value) bool { v.AddArg(v0) v0.AuxInt = o v0.Aux = s - v0.AddArg(p) - v0.AddArg(mem) + v0.AddArg2(p, mem) return true } // match: (MOVWZreg x:(MOVWloadidx [o] {s} p i mem)) @@ -13636,9 +12810,7 @@ func rewriteValueS390X_OpS390XMOVWZreg(v *Value) bool { v.AddArg(v0) v0.AuxInt = o v0.Aux = s - v0.AddArg(p) - v0.AddArg(i) - v0.AddArg(mem) + v0.AddArg3(p, i, mem) return true } // match: (MOVWZreg x:(Arg )) @@ -13712,8 +12884,7 @@ func rewriteValueS390X_OpS390XMOVWload(v *Value) bool { v.reset(OpS390XMOVWload) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVWload [off1] {sym1} (MOVDaddr [off2] {sym2} base) mem) @@ -13736,8 +12907,7 @@ func rewriteValueS390X_OpS390XMOVWload(v *Value) bool { v.reset(OpS390XMOVWload) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(base) - v.AddArg(mem) + v.AddArg2(base, mem) return true } // match: (MOVWload [off1] {sym1} (MOVDaddridx [off2] {sym2} ptr idx) mem) @@ -13760,9 +12930,7 @@ func rewriteValueS390X_OpS390XMOVWload(v *Value) bool { v.reset(OpS390XMOVWloadidx) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } // match: (MOVWload [off] {sym} (ADD ptr idx) mem) @@ -13787,9 +12955,7 @@ func rewriteValueS390X_OpS390XMOVWload(v *Value) bool { v.reset(OpS390XMOVWloadidx) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } break @@ -13820,9 +12986,7 @@ func rewriteValueS390X_OpS390XMOVWloadidx(v *Value) bool { v.reset(OpS390XMOVWloadidx) v.AuxInt = c + d v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } break @@ -13847,9 +13011,7 @@ func rewriteValueS390X_OpS390XMOVWloadidx(v *Value) bool { v.reset(OpS390XMOVWloadidx) v.AuxInt = c + d v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg3(ptr, idx, mem) return true } break @@ -14115,8 +13277,7 @@ func rewriteValueS390X_OpS390XMOVWreg(v *Value) bool { v.AddArg(v0) v0.AuxInt = o v0.Aux = s - v0.AddArg(p) - v0.AddArg(mem) + v0.AddArg2(p, mem) return true } // match: (MOVWreg x:(MOVWZloadidx [o] {s} p i mem)) @@ -14142,9 +13303,7 @@ func rewriteValueS390X_OpS390XMOVWreg(v *Value) bool { v.AddArg(v0) v0.AuxInt = o v0.Aux = s - v0.AddArg(p) - v0.AddArg(i) - v0.AddArg(mem) + v0.AddArg3(p, i, mem) return true } // match: (MOVWreg x:(Arg )) @@ -14195,9 +13354,7 @@ func rewriteValueS390X_OpS390XMOVWstore(v *Value) bool { v.reset(OpS390XMOVWstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(x) - v.AddArg(mem) + v.AddArg3(ptr, x, mem) return true } // match: (MOVWstore [off] {sym} ptr (MOVWZreg x) mem) @@ -14214,9 +13371,7 @@ func rewriteValueS390X_OpS390XMOVWstore(v *Value) bool { v.reset(OpS390XMOVWstore) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(x) - v.AddArg(mem) + v.AddArg3(ptr, x, mem) return true } // match: (MOVWstore [off1] {sym} (ADDconst [off2] ptr) val mem) @@ -14238,9 +13393,7 @@ func rewriteValueS390X_OpS390XMOVWstore(v *Value) bool { v.reset(OpS390XMOVWstore) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } // match: (MOVWstore [off] {sym} ptr (MOVDconst [c]) mem) @@ -14261,8 +13414,7 @@ func rewriteValueS390X_OpS390XMOVWstore(v *Value) bool { v.reset(OpS390XMOVWstoreconst) v.AuxInt = makeValAndOff(int64(int32(c)), off) v.Aux = sym - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVWstore [off1] {sym1} (MOVDaddr [off2] {sym2} base) val mem) @@ -14286,9 +13438,7 @@ func rewriteValueS390X_OpS390XMOVWstore(v *Value) bool { v.reset(OpS390XMOVWstore) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(base) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(base, val, mem) return true } // match: (MOVWstore [off1] {sym1} (MOVDaddridx [off2] {sym2} ptr idx) val mem) @@ -14312,10 +13462,7 @@ func rewriteValueS390X_OpS390XMOVWstore(v *Value) bool { v.reset(OpS390XMOVWstoreidx) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) + v.AddArg4(ptr, idx, val, mem) return true } // match: (MOVWstore [off] {sym} (ADD ptr idx) val mem) @@ -14341,10 +13488,7 @@ func rewriteValueS390X_OpS390XMOVWstore(v *Value) bool { v.reset(OpS390XMOVWstoreidx) v.AuxInt = off v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) + v.AddArg4(ptr, idx, val, mem) return true } break @@ -14371,9 +13515,7 @@ func rewriteValueS390X_OpS390XMOVWstore(v *Value) bool { v.reset(OpS390XMOVDstore) v.AuxInt = i - 4 v.Aux = s - v.AddArg(p) - v.AddArg(w) - v.AddArg(mem) + v.AddArg3(p, w, mem) return true } // match: (MOVWstore [i] {s} p w0:(SRDconst [j] w) x:(MOVWstore [i-4] {s} p (SRDconst [j+32] w) mem)) @@ -14404,9 +13546,7 @@ func rewriteValueS390X_OpS390XMOVWstore(v *Value) bool { v.reset(OpS390XMOVDstore) v.AuxInt = i - 4 v.Aux = s - v.AddArg(p) - v.AddArg(w0) - v.AddArg(mem) + v.AddArg3(p, w0, mem) return true } // match: (MOVWstore [i] {s} p w1 x:(MOVWstore [i-4] {s} p w0 mem)) @@ -14432,10 +13572,7 @@ func rewriteValueS390X_OpS390XMOVWstore(v *Value) bool { v.reset(OpS390XSTM2) v.AuxInt = i - 4 v.Aux = s - v.AddArg(p) - v.AddArg(w0) - v.AddArg(w1) - v.AddArg(mem) + v.AddArg4(p, w0, w1, mem) return true } // match: (MOVWstore [i] {s} p w2 x:(STM2 [i-8] {s} p w0 w1 mem)) @@ -14462,11 +13599,7 @@ func rewriteValueS390X_OpS390XMOVWstore(v *Value) bool { v.reset(OpS390XSTM3) v.AuxInt = i - 8 v.Aux = s - v.AddArg(p) - v.AddArg(w0) - v.AddArg(w1) - v.AddArg(w2) - v.AddArg(mem) + v.AddArg5(p, w0, w1, w2, mem) return true } // match: (MOVWstore [i] {s} p w3 x:(STM3 [i-12] {s} p w0 w1 w2 mem)) @@ -14494,12 +13627,7 @@ func rewriteValueS390X_OpS390XMOVWstore(v *Value) bool { v.reset(OpS390XSTM4) v.AuxInt = i - 12 v.Aux = s - v.AddArg(p) - v.AddArg(w0) - v.AddArg(w1) - v.AddArg(w2) - v.AddArg(w3) - v.AddArg(mem) + v.AddArg6(p, w0, w1, w2, w3, mem) return true } return false @@ -14527,8 +13655,7 @@ func rewriteValueS390X_OpS390XMOVWstoreconst(v *Value) bool { v.reset(OpS390XMOVWstoreconst) v.AuxInt = ValAndOff(sc).add(off) v.Aux = s - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVWstoreconst [sc] {sym1} (MOVDaddr [off] {sym2} ptr) mem) @@ -14550,8 +13677,7 @@ func rewriteValueS390X_OpS390XMOVWstoreconst(v *Value) bool { v.reset(OpS390XMOVWstoreconst) v.AuxInt = ValAndOff(sc).add(off) v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (MOVWstoreconst [c] {s} p x:(MOVWstoreconst [a] {s} p mem)) @@ -14576,11 +13702,9 @@ func rewriteValueS390X_OpS390XMOVWstoreconst(v *Value) bool { v.reset(OpS390XMOVDstore) v.AuxInt = ValAndOff(a).Off() v.Aux = s - v.AddArg(p) v0 := b.NewValue0(x.Pos, OpS390XMOVDconst, typ.UInt64) v0.AuxInt = ValAndOff(c).Val()&0xffffffff | ValAndOff(a).Val()<<32 - v.AddArg(v0) - v.AddArg(mem) + v.AddArg3(p, v0, mem) return true } return false @@ -14611,10 +13735,7 @@ func rewriteValueS390X_OpS390XMOVWstoreidx(v *Value) bool { v.reset(OpS390XMOVWstoreidx) v.AuxInt = c + d v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) + v.AddArg4(ptr, idx, val, mem) return true } break @@ -14640,10 +13761,7 @@ func rewriteValueS390X_OpS390XMOVWstoreidx(v *Value) bool { v.reset(OpS390XMOVWstoreidx) v.AuxInt = c + d v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) + v.AddArg4(ptr, idx, val, mem) return true } break @@ -14676,10 +13794,7 @@ func rewriteValueS390X_OpS390XMOVWstoreidx(v *Value) bool { v.reset(OpS390XMOVDstoreidx) v.AuxInt = i - 4 v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v.AddArg(w) - v.AddArg(mem) + v.AddArg4(p, idx, w, mem) return true } } @@ -14718,10 +13833,7 @@ func rewriteValueS390X_OpS390XMOVWstoreidx(v *Value) bool { v.reset(OpS390XMOVDstoreidx) v.AuxInt = i - 4 v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v.AddArg(w0) - v.AddArg(mem) + v.AddArg4(p, idx, w0, mem) return true } } @@ -14774,9 +13886,7 @@ func rewriteValueS390X_OpS390XMULLD(v *Value) bool { v.Type = t v.AuxInt = off v.Aux = sym - v.AddArg(x) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg3(x, ptr, mem) return true } break @@ -14846,8 +13956,7 @@ func rewriteValueS390X_OpS390XMULLDconst(v *Value) bool { v0 := b.NewValue0(v.Pos, OpS390XSLDconst, v.Type) v0.AuxInt = log2(c + 1) v0.AddArg(x) - v.AddArg(v0) - v.AddArg(x) + v.AddArg2(v0, x) return true } // match: (MULLDconst [c] x) @@ -14863,8 +13972,7 @@ func rewriteValueS390X_OpS390XMULLDconst(v *Value) bool { v0 := b.NewValue0(v.Pos, OpS390XSLDconst, v.Type) v0.AuxInt = log2(c - 1) v0.AddArg(x) - v.AddArg(v0) - v.AddArg(x) + v.AddArg2(v0, x) return true } // match: (MULLDconst [c] (MOVDconst [d])) @@ -14905,10 +14013,9 @@ func rewriteValueS390X_OpS390XMULLDload(v *Value) bool { break } v.reset(OpS390XMULLD) - v.AddArg(x) v0 := b.NewValue0(v_2.Pos, OpS390XLGDR, t) v0.AddArg(y) - v.AddArg(v0) + v.AddArg2(x, v0) return true } // match: (MULLDload [off1] {sym} x (ADDconst [off2] ptr) mem) @@ -14930,9 +14037,7 @@ func rewriteValueS390X_OpS390XMULLDload(v *Value) bool { v.reset(OpS390XMULLDload) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(x) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg3(x, ptr, mem) return true } // match: (MULLDload [o1] {s1} x (MOVDaddr [o2] {s2} ptr) mem) @@ -14955,9 +14060,7 @@ func rewriteValueS390X_OpS390XMULLDload(v *Value) bool { v.reset(OpS390XMULLDload) v.AuxInt = o1 + o2 v.Aux = mergeSym(s1, s2) - v.AddArg(x) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg3(x, ptr, mem) return true } return false @@ -15003,9 +14106,7 @@ func rewriteValueS390X_OpS390XMULLW(v *Value) bool { v.Type = t v.AuxInt = off v.Aux = sym - v.AddArg(x) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg3(x, ptr, mem) return true } break @@ -15032,9 +14133,7 @@ func rewriteValueS390X_OpS390XMULLW(v *Value) bool { v.Type = t v.AuxInt = off v.Aux = sym - v.AddArg(x) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg3(x, ptr, mem) return true } break @@ -15104,8 +14203,7 @@ func rewriteValueS390X_OpS390XMULLWconst(v *Value) bool { v0 := b.NewValue0(v.Pos, OpS390XSLWconst, v.Type) v0.AuxInt = log2(c + 1) v0.AddArg(x) - v.AddArg(v0) - v.AddArg(x) + v.AddArg2(v0, x) return true } // match: (MULLWconst [c] x) @@ -15121,8 +14219,7 @@ func rewriteValueS390X_OpS390XMULLWconst(v *Value) bool { v0 := b.NewValue0(v.Pos, OpS390XSLWconst, v.Type) v0.AuxInt = log2(c - 1) v0.AddArg(x) - v.AddArg(v0) - v.AddArg(x) + v.AddArg2(v0, x) return true } // match: (MULLWconst [c] (MOVDconst [d])) @@ -15162,9 +14259,7 @@ func rewriteValueS390X_OpS390XMULLWload(v *Value) bool { v.reset(OpS390XMULLWload) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(x) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg3(x, ptr, mem) return true } // match: (MULLWload [o1] {s1} x (MOVDaddr [o2] {s2} ptr) mem) @@ -15187,9 +14282,7 @@ func rewriteValueS390X_OpS390XMULLWload(v *Value) bool { v.reset(OpS390XMULLWload) v.AuxInt = o1 + o2 v.Aux = mergeSym(s1, s2) - v.AddArg(x) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg3(x, ptr, mem) return true } return false @@ -15256,8 +14349,7 @@ func rewriteValueS390X_OpS390XNOT(v *Value) bool { v.reset(OpS390XXOR) v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) v0.AuxInt = -1 - v.AddArg(v0) - v.AddArg(x) + v.AddArg2(v0, x) return true } } @@ -15367,8 +14459,7 @@ func rewriteValueS390X_OpS390XOR(v *Value) bool { y := v_1_0.Args[0] v.reset(OpS390XLGDR) v0 := b.NewValue0(v.Pos, OpS390XCPSDR, t) - v0.AddArg(y) - v0.AddArg(x) + v0.AddArg2(y, x) v.AddArg(v0) return true } @@ -15402,8 +14493,7 @@ func rewriteValueS390X_OpS390XOR(v *Value) bool { v0 := b.NewValue0(v.Pos, OpS390XCPSDR, x.Type) v1 := b.NewValue0(v.Pos, OpS390XFMOVDconst, x.Type) v1.AuxInt = c - v0.AddArg(v1) - v0.AddArg(x) + v0.AddArg2(v1, x) v.AddArg(v0) return true } @@ -15435,8 +14525,7 @@ func rewriteValueS390X_OpS390XOR(v *Value) bool { y := v_1_0.Args[0] v.reset(OpS390XLGDR) v0 := b.NewValue0(v.Pos, OpS390XCPSDR, t) - v0.AddArg(y) - v0.AddArg(x) + v0.AddArg2(y, x) v.AddArg(v0) return true } @@ -15470,8 +14559,7 @@ func rewriteValueS390X_OpS390XOR(v *Value) bool { v0 := b.NewValue0(v.Pos, OpS390XCPSDR, x.Type) v1 := b.NewValue0(v.Pos, OpS390XFMOVDconst, x.Type) v1.AuxInt = c - v0.AddArg(v1) - v0.AddArg(x) + v0.AddArg2(v1, x) v.AddArg(v0) return true } @@ -15530,9 +14618,7 @@ func rewriteValueS390X_OpS390XOR(v *Value) bool { v.Type = t v.AuxInt = off v.Aux = sym - v.AddArg(x) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg3(x, ptr, mem) return true } break @@ -15572,8 +14658,7 @@ func rewriteValueS390X_OpS390XOR(v *Value) bool { v.AddArg(v0) v0.AuxInt = i0 v0.Aux = s - v0.AddArg(p) - v0.AddArg(mem) + v0.AddArg2(p, mem) return true } break @@ -15613,8 +14698,7 @@ func rewriteValueS390X_OpS390XOR(v *Value) bool { v.AddArg(v0) v0.AuxInt = i0 v0.Aux = s - v0.AddArg(p) - v0.AddArg(mem) + v0.AddArg2(p, mem) return true } break @@ -15654,8 +14738,7 @@ func rewriteValueS390X_OpS390XOR(v *Value) bool { v.AddArg(v0) v0.AuxInt = i0 v0.Aux = s - v0.AddArg(p) - v0.AddArg(mem) + v0.AddArg2(p, mem) return true } break @@ -15716,11 +14799,9 @@ func rewriteValueS390X_OpS390XOR(v *Value) bool { v2 := b.NewValue0(x1.Pos, OpS390XMOVHZload, typ.UInt16) v2.AuxInt = i0 v2.Aux = s - v2.AddArg(p) - v2.AddArg(mem) + v2.AddArg2(p, mem) v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) + v0.AddArg2(v1, y) return true } } @@ -15782,11 +14863,9 @@ func rewriteValueS390X_OpS390XOR(v *Value) bool { v2 := b.NewValue0(x1.Pos, OpS390XMOVWZload, typ.UInt32) v2.AuxInt = i0 v2.Aux = s - v2.AddArg(p) - v2.AddArg(mem) + v2.AddArg2(p, mem) v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) + v0.AddArg2(v1, y) return true } } @@ -15834,9 +14913,7 @@ func rewriteValueS390X_OpS390XOR(v *Value) bool { v.AddArg(v0) v0.AuxInt = i0 v0.Aux = s - v0.AddArg(p) - v0.AddArg(idx) - v0.AddArg(mem) + v0.AddArg3(p, idx, mem) return true } } @@ -15885,9 +14962,7 @@ func rewriteValueS390X_OpS390XOR(v *Value) bool { v.AddArg(v0) v0.AuxInt = i0 v0.Aux = s - v0.AddArg(p) - v0.AddArg(idx) - v0.AddArg(mem) + v0.AddArg3(p, idx, mem) return true } } @@ -15936,9 +15011,7 @@ func rewriteValueS390X_OpS390XOR(v *Value) bool { v.AddArg(v0) v0.AuxInt = i0 v0.Aux = s - v0.AddArg(p) - v0.AddArg(idx) - v0.AddArg(mem) + v0.AddArg3(p, idx, mem) return true } } @@ -16008,12 +15081,9 @@ func rewriteValueS390X_OpS390XOR(v *Value) bool { v2 := b.NewValue0(v.Pos, OpS390XMOVHZloadidx, typ.UInt16) v2.AuxInt = i0 v2.Aux = s - v2.AddArg(p) - v2.AddArg(idx) - v2.AddArg(mem) + v2.AddArg3(p, idx, mem) v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) + v0.AddArg2(v1, y) return true } } @@ -16084,12 +15154,9 @@ func rewriteValueS390X_OpS390XOR(v *Value) bool { v2 := b.NewValue0(v.Pos, OpS390XMOVWZloadidx, typ.UInt32) v2.AuxInt = i0 v2.Aux = s - v2.AddArg(p) - v2.AddArg(idx) - v2.AddArg(mem) + v2.AddArg3(p, idx, mem) v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) + v0.AddArg2(v1, y) return true } } @@ -16133,8 +15200,7 @@ func rewriteValueS390X_OpS390XOR(v *Value) bool { v1 := b.NewValue0(x1.Pos, OpS390XMOVHBRload, typ.UInt16) v1.AuxInt = i0 v1.Aux = s - v1.AddArg(p) - v1.AddArg(mem) + v1.AddArg2(p, mem) v0.AddArg(v1) return true } @@ -16184,8 +15250,7 @@ func rewriteValueS390X_OpS390XOR(v *Value) bool { v1 := b.NewValue0(x1.Pos, OpS390XMOVWBRload, typ.UInt32) v1.AuxInt = i0 v1.Aux = s - v1.AddArg(p) - v1.AddArg(mem) + v1.AddArg2(p, mem) v0.AddArg(v1) return true } @@ -16234,8 +15299,7 @@ func rewriteValueS390X_OpS390XOR(v *Value) bool { v.AddArg(v0) v0.AuxInt = i0 v0.Aux = s - v0.AddArg(p) - v0.AddArg(mem) + v0.AddArg2(p, mem) return true } break @@ -16297,12 +15361,10 @@ func rewriteValueS390X_OpS390XOR(v *Value) bool { v3 := b.NewValue0(x0.Pos, OpS390XMOVHBRload, typ.UInt16) v3.AuxInt = i0 v3.Aux = s - v3.AddArg(p) - v3.AddArg(mem) + v3.AddArg2(p, mem) v2.AddArg(v3) v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) + v0.AddArg2(v1, y) return true } } @@ -16373,12 +15435,10 @@ func rewriteValueS390X_OpS390XOR(v *Value) bool { v3 := b.NewValue0(x0.Pos, OpS390XMOVWBRload, typ.UInt32) v3.AuxInt = i0 v3.Aux = s - v3.AddArg(p) - v3.AddArg(mem) + v3.AddArg2(p, mem) v2.AddArg(v3) v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) + v0.AddArg2(v1, y) return true } } @@ -16427,9 +15487,7 @@ func rewriteValueS390X_OpS390XOR(v *Value) bool { v1 := b.NewValue0(v.Pos, OpS390XMOVHBRloadidx, typ.Int16) v1.AuxInt = i0 v1.Aux = s - v1.AddArg(p) - v1.AddArg(idx) - v1.AddArg(mem) + v1.AddArg3(p, idx, mem) v0.AddArg(v1) return true } @@ -16488,9 +15546,7 @@ func rewriteValueS390X_OpS390XOR(v *Value) bool { v1 := b.NewValue0(v.Pos, OpS390XMOVWBRloadidx, typ.Int32) v1.AuxInt = i0 v1.Aux = s - v1.AddArg(p) - v1.AddArg(idx) - v1.AddArg(mem) + v1.AddArg3(p, idx, mem) v0.AddArg(v1) return true } @@ -16548,9 +15604,7 @@ func rewriteValueS390X_OpS390XOR(v *Value) bool { v.AddArg(v0) v0.AuxInt = i0 v0.Aux = s - v0.AddArg(p) - v0.AddArg(idx) - v0.AddArg(mem) + v0.AddArg3(p, idx, mem) return true } } @@ -16621,13 +15675,10 @@ func rewriteValueS390X_OpS390XOR(v *Value) bool { v3 := b.NewValue0(v.Pos, OpS390XMOVHBRloadidx, typ.Int16) v3.AuxInt = i0 v3.Aux = s - v3.AddArg(p) - v3.AddArg(idx) - v3.AddArg(mem) + v3.AddArg3(p, idx, mem) v2.AddArg(v3) v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) + v0.AddArg2(v1, y) return true } } @@ -16707,13 +15758,10 @@ func rewriteValueS390X_OpS390XOR(v *Value) bool { v3 := b.NewValue0(v.Pos, OpS390XMOVWBRloadidx, typ.Int32) v3.AuxInt = i0 v3.Aux = s - v3.AddArg(p) - v3.AddArg(idx) - v3.AddArg(mem) + v3.AddArg3(p, idx, mem) v2.AddArg(v3) v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) + v0.AddArg2(v1, y) return true } } @@ -16802,9 +15850,7 @@ func rewriteValueS390X_OpS390XORW(v *Value) bool { v.Type = t v.AuxInt = off v.Aux = sym - v.AddArg(x) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg3(x, ptr, mem) return true } break @@ -16831,9 +15877,7 @@ func rewriteValueS390X_OpS390XORW(v *Value) bool { v.Type = t v.AuxInt = off v.Aux = sym - v.AddArg(x) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg3(x, ptr, mem) return true } break @@ -16873,8 +15917,7 @@ func rewriteValueS390X_OpS390XORW(v *Value) bool { v.AddArg(v0) v0.AuxInt = i0 v0.Aux = s - v0.AddArg(p) - v0.AddArg(mem) + v0.AddArg2(p, mem) return true } break @@ -16914,8 +15957,7 @@ func rewriteValueS390X_OpS390XORW(v *Value) bool { v.AddArg(v0) v0.AuxInt = i0 v0.Aux = s - v0.AddArg(p) - v0.AddArg(mem) + v0.AddArg2(p, mem) return true } break @@ -16976,11 +16018,9 @@ func rewriteValueS390X_OpS390XORW(v *Value) bool { v2 := b.NewValue0(x1.Pos, OpS390XMOVHZload, typ.UInt16) v2.AuxInt = i0 v2.Aux = s - v2.AddArg(p) - v2.AddArg(mem) + v2.AddArg2(p, mem) v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) + v0.AddArg2(v1, y) return true } } @@ -17028,9 +16068,7 @@ func rewriteValueS390X_OpS390XORW(v *Value) bool { v.AddArg(v0) v0.AuxInt = i0 v0.Aux = s - v0.AddArg(p) - v0.AddArg(idx) - v0.AddArg(mem) + v0.AddArg3(p, idx, mem) return true } } @@ -17079,9 +16117,7 @@ func rewriteValueS390X_OpS390XORW(v *Value) bool { v.AddArg(v0) v0.AuxInt = i0 v0.Aux = s - v0.AddArg(p) - v0.AddArg(idx) - v0.AddArg(mem) + v0.AddArg3(p, idx, mem) return true } } @@ -17151,12 +16187,9 @@ func rewriteValueS390X_OpS390XORW(v *Value) bool { v2 := b.NewValue0(v.Pos, OpS390XMOVHZloadidx, typ.UInt16) v2.AuxInt = i0 v2.Aux = s - v2.AddArg(p) - v2.AddArg(idx) - v2.AddArg(mem) + v2.AddArg3(p, idx, mem) v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) + v0.AddArg2(v1, y) return true } } @@ -17200,8 +16233,7 @@ func rewriteValueS390X_OpS390XORW(v *Value) bool { v1 := b.NewValue0(x1.Pos, OpS390XMOVHBRload, typ.UInt16) v1.AuxInt = i0 v1.Aux = s - v1.AddArg(p) - v1.AddArg(mem) + v1.AddArg2(p, mem) v0.AddArg(v1) return true } @@ -17250,8 +16282,7 @@ func rewriteValueS390X_OpS390XORW(v *Value) bool { v.AddArg(v0) v0.AuxInt = i0 v0.Aux = s - v0.AddArg(p) - v0.AddArg(mem) + v0.AddArg2(p, mem) return true } break @@ -17313,12 +16344,10 @@ func rewriteValueS390X_OpS390XORW(v *Value) bool { v3 := b.NewValue0(x0.Pos, OpS390XMOVHBRload, typ.UInt16) v3.AuxInt = i0 v3.Aux = s - v3.AddArg(p) - v3.AddArg(mem) + v3.AddArg2(p, mem) v2.AddArg(v3) v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) + v0.AddArg2(v1, y) return true } } @@ -17367,9 +16396,7 @@ func rewriteValueS390X_OpS390XORW(v *Value) bool { v1 := b.NewValue0(v.Pos, OpS390XMOVHBRloadidx, typ.Int16) v1.AuxInt = i0 v1.Aux = s - v1.AddArg(p) - v1.AddArg(idx) - v1.AddArg(mem) + v1.AddArg3(p, idx, mem) v0.AddArg(v1) return true } @@ -17427,9 +16454,7 @@ func rewriteValueS390X_OpS390XORW(v *Value) bool { v.AddArg(v0) v0.AuxInt = i0 v0.Aux = s - v0.AddArg(p) - v0.AddArg(idx) - v0.AddArg(mem) + v0.AddArg3(p, idx, mem) return true } } @@ -17500,13 +16525,10 @@ func rewriteValueS390X_OpS390XORW(v *Value) bool { v3 := b.NewValue0(v.Pos, OpS390XMOVHBRloadidx, typ.Int16) v3.AuxInt = i0 v3.Aux = s - v3.AddArg(p) - v3.AddArg(idx) - v3.AddArg(mem) + v3.AddArg3(p, idx, mem) v2.AddArg(v3) v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) + v0.AddArg2(v1, y) return true } } @@ -17581,9 +16603,7 @@ func rewriteValueS390X_OpS390XORWload(v *Value) bool { v.reset(OpS390XORWload) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(x) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg3(x, ptr, mem) return true } // match: (ORWload [o1] {s1} x (MOVDaddr [o2] {s2} ptr) mem) @@ -17606,9 +16626,7 @@ func rewriteValueS390X_OpS390XORWload(v *Value) bool { v.reset(OpS390XORWload) v.AuxInt = o1 + o2 v.Aux = mergeSym(s1, s2) - v.AddArg(x) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg3(x, ptr, mem) return true } return false @@ -17675,10 +16693,9 @@ func rewriteValueS390X_OpS390XORload(v *Value) bool { break } v.reset(OpS390XOR) - v.AddArg(x) v0 := b.NewValue0(v_2.Pos, OpS390XLGDR, t) v0.AddArg(y) - v.AddArg(v0) + v.AddArg2(x, v0) return true } // match: (ORload [off1] {sym} x (ADDconst [off2] ptr) mem) @@ -17700,9 +16717,7 @@ func rewriteValueS390X_OpS390XORload(v *Value) bool { v.reset(OpS390XORload) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(x) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg3(x, ptr, mem) return true } // match: (ORload [o1] {s1} x (MOVDaddr [o2] {s2} ptr) mem) @@ -17725,9 +16740,7 @@ func rewriteValueS390X_OpS390XORload(v *Value) bool { v.reset(OpS390XORload) v.AuxInt = o1 + o2 v.Aux = mergeSym(s1, s2) - v.AddArg(x) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg3(x, ptr, mem) return true } return false @@ -17803,11 +16816,10 @@ func rewriteValueS390X_OpS390XSLD(v *Value) bool { c := v_1_0.AuxInt y := v_1_1 v.reset(OpS390XSLD) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpS390XANDWconst, typ.UInt32) v0.AuxInt = c & 63 v0.AddArg(y) - v.AddArg(v0) + v.AddArg2(x, v0) return true } break @@ -17826,8 +16838,7 @@ func rewriteValueS390X_OpS390XSLD(v *Value) bool { break } v.reset(OpS390XSLD) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (SLD x (MOVWreg y)) @@ -17839,8 +16850,7 @@ func rewriteValueS390X_OpS390XSLD(v *Value) bool { } y := v_1.Args[0] v.reset(OpS390XSLD) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (SLD x (MOVHreg y)) @@ -17852,8 +16862,7 @@ func rewriteValueS390X_OpS390XSLD(v *Value) bool { } y := v_1.Args[0] v.reset(OpS390XSLD) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (SLD x (MOVBreg y)) @@ -17865,8 +16874,7 @@ func rewriteValueS390X_OpS390XSLD(v *Value) bool { } y := v_1.Args[0] v.reset(OpS390XSLD) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (SLD x (MOVWZreg y)) @@ -17878,8 +16886,7 @@ func rewriteValueS390X_OpS390XSLD(v *Value) bool { } y := v_1.Args[0] v.reset(OpS390XSLD) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (SLD x (MOVHZreg y)) @@ -17891,8 +16898,7 @@ func rewriteValueS390X_OpS390XSLD(v *Value) bool { } y := v_1.Args[0] v.reset(OpS390XSLD) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (SLD x (MOVBZreg y)) @@ -17904,8 +16910,7 @@ func rewriteValueS390X_OpS390XSLD(v *Value) bool { } y := v_1.Args[0] v.reset(OpS390XSLD) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } return false @@ -17945,11 +16950,10 @@ func rewriteValueS390X_OpS390XSLW(v *Value) bool { c := v_1_0.AuxInt y := v_1_1 v.reset(OpS390XSLW) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpS390XANDWconst, typ.UInt32) v0.AuxInt = c & 63 v0.AddArg(y) - v.AddArg(v0) + v.AddArg2(x, v0) return true } break @@ -17968,8 +16972,7 @@ func rewriteValueS390X_OpS390XSLW(v *Value) bool { break } v.reset(OpS390XSLW) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (SLW x (MOVWreg y)) @@ -17981,8 +16984,7 @@ func rewriteValueS390X_OpS390XSLW(v *Value) bool { } y := v_1.Args[0] v.reset(OpS390XSLW) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (SLW x (MOVHreg y)) @@ -17994,8 +16996,7 @@ func rewriteValueS390X_OpS390XSLW(v *Value) bool { } y := v_1.Args[0] v.reset(OpS390XSLW) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (SLW x (MOVBreg y)) @@ -18007,8 +17008,7 @@ func rewriteValueS390X_OpS390XSLW(v *Value) bool { } y := v_1.Args[0] v.reset(OpS390XSLW) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (SLW x (MOVWZreg y)) @@ -18020,8 +17020,7 @@ func rewriteValueS390X_OpS390XSLW(v *Value) bool { } y := v_1.Args[0] v.reset(OpS390XSLW) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (SLW x (MOVHZreg y)) @@ -18033,8 +17032,7 @@ func rewriteValueS390X_OpS390XSLW(v *Value) bool { } y := v_1.Args[0] v.reset(OpS390XSLW) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (SLW x (MOVBZreg y)) @@ -18046,8 +17044,7 @@ func rewriteValueS390X_OpS390XSLW(v *Value) bool { } y := v_1.Args[0] v.reset(OpS390XSLW) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } return false @@ -18087,11 +17084,10 @@ func rewriteValueS390X_OpS390XSRAD(v *Value) bool { c := v_1_0.AuxInt y := v_1_1 v.reset(OpS390XSRAD) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpS390XANDWconst, typ.UInt32) v0.AuxInt = c & 63 v0.AddArg(y) - v.AddArg(v0) + v.AddArg2(x, v0) return true } break @@ -18110,8 +17106,7 @@ func rewriteValueS390X_OpS390XSRAD(v *Value) bool { break } v.reset(OpS390XSRAD) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (SRAD x (MOVWreg y)) @@ -18123,8 +17118,7 @@ func rewriteValueS390X_OpS390XSRAD(v *Value) bool { } y := v_1.Args[0] v.reset(OpS390XSRAD) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (SRAD x (MOVHreg y)) @@ -18136,8 +17130,7 @@ func rewriteValueS390X_OpS390XSRAD(v *Value) bool { } y := v_1.Args[0] v.reset(OpS390XSRAD) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (SRAD x (MOVBreg y)) @@ -18149,8 +17142,7 @@ func rewriteValueS390X_OpS390XSRAD(v *Value) bool { } y := v_1.Args[0] v.reset(OpS390XSRAD) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (SRAD x (MOVWZreg y)) @@ -18162,8 +17154,7 @@ func rewriteValueS390X_OpS390XSRAD(v *Value) bool { } y := v_1.Args[0] v.reset(OpS390XSRAD) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (SRAD x (MOVHZreg y)) @@ -18175,8 +17166,7 @@ func rewriteValueS390X_OpS390XSRAD(v *Value) bool { } y := v_1.Args[0] v.reset(OpS390XSRAD) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (SRAD x (MOVBZreg y)) @@ -18188,8 +17178,7 @@ func rewriteValueS390X_OpS390XSRAD(v *Value) bool { } y := v_1.Args[0] v.reset(OpS390XSRAD) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } return false @@ -18245,11 +17234,10 @@ func rewriteValueS390X_OpS390XSRAW(v *Value) bool { c := v_1_0.AuxInt y := v_1_1 v.reset(OpS390XSRAW) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpS390XANDWconst, typ.UInt32) v0.AuxInt = c & 63 v0.AddArg(y) - v.AddArg(v0) + v.AddArg2(x, v0) return true } break @@ -18268,8 +17256,7 @@ func rewriteValueS390X_OpS390XSRAW(v *Value) bool { break } v.reset(OpS390XSRAW) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (SRAW x (MOVWreg y)) @@ -18281,8 +17268,7 @@ func rewriteValueS390X_OpS390XSRAW(v *Value) bool { } y := v_1.Args[0] v.reset(OpS390XSRAW) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (SRAW x (MOVHreg y)) @@ -18294,8 +17280,7 @@ func rewriteValueS390X_OpS390XSRAW(v *Value) bool { } y := v_1.Args[0] v.reset(OpS390XSRAW) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (SRAW x (MOVBreg y)) @@ -18307,8 +17292,7 @@ func rewriteValueS390X_OpS390XSRAW(v *Value) bool { } y := v_1.Args[0] v.reset(OpS390XSRAW) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (SRAW x (MOVWZreg y)) @@ -18320,8 +17304,7 @@ func rewriteValueS390X_OpS390XSRAW(v *Value) bool { } y := v_1.Args[0] v.reset(OpS390XSRAW) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (SRAW x (MOVHZreg y)) @@ -18333,8 +17316,7 @@ func rewriteValueS390X_OpS390XSRAW(v *Value) bool { } y := v_1.Args[0] v.reset(OpS390XSRAW) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (SRAW x (MOVBZreg y)) @@ -18346,8 +17328,7 @@ func rewriteValueS390X_OpS390XSRAW(v *Value) bool { } y := v_1.Args[0] v.reset(OpS390XSRAW) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } return false @@ -18403,11 +17384,10 @@ func rewriteValueS390X_OpS390XSRD(v *Value) bool { c := v_1_0.AuxInt y := v_1_1 v.reset(OpS390XSRD) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpS390XANDWconst, typ.UInt32) v0.AuxInt = c & 63 v0.AddArg(y) - v.AddArg(v0) + v.AddArg2(x, v0) return true } break @@ -18426,8 +17406,7 @@ func rewriteValueS390X_OpS390XSRD(v *Value) bool { break } v.reset(OpS390XSRD) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (SRD x (MOVWreg y)) @@ -18439,8 +17418,7 @@ func rewriteValueS390X_OpS390XSRD(v *Value) bool { } y := v_1.Args[0] v.reset(OpS390XSRD) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (SRD x (MOVHreg y)) @@ -18452,8 +17430,7 @@ func rewriteValueS390X_OpS390XSRD(v *Value) bool { } y := v_1.Args[0] v.reset(OpS390XSRD) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (SRD x (MOVBreg y)) @@ -18465,8 +17442,7 @@ func rewriteValueS390X_OpS390XSRD(v *Value) bool { } y := v_1.Args[0] v.reset(OpS390XSRD) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (SRD x (MOVWZreg y)) @@ -18478,8 +17454,7 @@ func rewriteValueS390X_OpS390XSRD(v *Value) bool { } y := v_1.Args[0] v.reset(OpS390XSRD) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (SRD x (MOVHZreg y)) @@ -18491,8 +17466,7 @@ func rewriteValueS390X_OpS390XSRD(v *Value) bool { } y := v_1.Args[0] v.reset(OpS390XSRD) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (SRD x (MOVBZreg y)) @@ -18504,8 +17478,7 @@ func rewriteValueS390X_OpS390XSRD(v *Value) bool { } y := v_1.Args[0] v.reset(OpS390XSRD) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } return false @@ -18569,11 +17542,10 @@ func rewriteValueS390X_OpS390XSRW(v *Value) bool { c := v_1_0.AuxInt y := v_1_1 v.reset(OpS390XSRW) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpS390XANDWconst, typ.UInt32) v0.AuxInt = c & 63 v0.AddArg(y) - v.AddArg(v0) + v.AddArg2(x, v0) return true } break @@ -18592,8 +17564,7 @@ func rewriteValueS390X_OpS390XSRW(v *Value) bool { break } v.reset(OpS390XSRW) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (SRW x (MOVWreg y)) @@ -18605,8 +17576,7 @@ func rewriteValueS390X_OpS390XSRW(v *Value) bool { } y := v_1.Args[0] v.reset(OpS390XSRW) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (SRW x (MOVHreg y)) @@ -18618,8 +17588,7 @@ func rewriteValueS390X_OpS390XSRW(v *Value) bool { } y := v_1.Args[0] v.reset(OpS390XSRW) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (SRW x (MOVBreg y)) @@ -18631,8 +17600,7 @@ func rewriteValueS390X_OpS390XSRW(v *Value) bool { } y := v_1.Args[0] v.reset(OpS390XSRW) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (SRW x (MOVWZreg y)) @@ -18644,8 +17612,7 @@ func rewriteValueS390X_OpS390XSRW(v *Value) bool { } y := v_1.Args[0] v.reset(OpS390XSRW) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (SRW x (MOVHZreg y)) @@ -18657,8 +17624,7 @@ func rewriteValueS390X_OpS390XSRW(v *Value) bool { } y := v_1.Args[0] v.reset(OpS390XSRW) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (SRW x (MOVBZreg y)) @@ -18670,8 +17636,7 @@ func rewriteValueS390X_OpS390XSRW(v *Value) bool { } y := v_1.Args[0] v.reset(OpS390XSRW) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } return false @@ -18706,12 +17671,7 @@ func rewriteValueS390X_OpS390XSTM2(v *Value) bool { v.reset(OpS390XSTM4) v.AuxInt = i - 8 v.Aux = s - v.AddArg(p) - v.AddArg(w0) - v.AddArg(w1) - v.AddArg(w2) - v.AddArg(w3) - v.AddArg(mem) + v.AddArg6(p, w0, w1, w2, w3, mem) return true } // match: (STM2 [i] {s} p (SRDconst [32] x) x mem) @@ -18731,9 +17691,7 @@ func rewriteValueS390X_OpS390XSTM2(v *Value) bool { v.reset(OpS390XMOVDstore) v.AuxInt = i v.Aux = s - v.AddArg(p) - v.AddArg(x) - v.AddArg(mem) + v.AddArg3(p, x, mem) return true } return false @@ -18768,12 +17726,7 @@ func rewriteValueS390X_OpS390XSTMG2(v *Value) bool { v.reset(OpS390XSTMG4) v.AuxInt = i - 16 v.Aux = s - v.AddArg(p) - v.AddArg(w0) - v.AddArg(w1) - v.AddArg(w2) - v.AddArg(w3) - v.AddArg(mem) + v.AddArg6(p, w0, w1, w2, w3, mem) return true } return false @@ -18850,9 +17803,7 @@ func rewriteValueS390X_OpS390XSUB(v *Value) bool { v.Type = t v.AuxInt = off v.Aux = sym - v.AddArg(x) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg3(x, ptr, mem) return true } return false @@ -18870,8 +17821,7 @@ func rewriteValueS390X_OpS390XSUBE(v *Value) bool { break } v.reset(OpS390XSUBC) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (SUBE x y (FlagOV)) @@ -18883,8 +17833,7 @@ func rewriteValueS390X_OpS390XSUBE(v *Value) bool { break } v.reset(OpS390XSUBC) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (SUBE x y (Select1 (SUBC (MOVDconst [0]) (NEG (Select0 (SUBE (MOVDconst [0]) (MOVDconst [0]) c)))))) @@ -18926,9 +17875,7 @@ func rewriteValueS390X_OpS390XSUBE(v *Value) bool { break } v.reset(OpS390XSUBE) - v.AddArg(x) - v.AddArg(y) - v.AddArg(c) + v.AddArg3(x, y, c) return true } return false @@ -18997,9 +17944,7 @@ func rewriteValueS390X_OpS390XSUBW(v *Value) bool { v.Type = t v.AuxInt = off v.Aux = sym - v.AddArg(x) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg3(x, ptr, mem) return true } // match: (SUBW x g:(MOVWZload [off] {sym} ptr mem)) @@ -19023,9 +17968,7 @@ func rewriteValueS390X_OpS390XSUBW(v *Value) bool { v.Type = t v.AuxInt = off v.Aux = sym - v.AddArg(x) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg3(x, ptr, mem) return true } return false @@ -19080,9 +18023,7 @@ func rewriteValueS390X_OpS390XSUBWload(v *Value) bool { v.reset(OpS390XSUBWload) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(x) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg3(x, ptr, mem) return true } // match: (SUBWload [o1] {s1} x (MOVDaddr [o2] {s2} ptr) mem) @@ -19105,9 +18046,7 @@ func rewriteValueS390X_OpS390XSUBWload(v *Value) bool { v.reset(OpS390XSUBWload) v.AuxInt = o1 + o2 v.Aux = mergeSym(s1, s2) - v.AddArg(x) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg3(x, ptr, mem) return true } return false @@ -19196,10 +18135,9 @@ func rewriteValueS390X_OpS390XSUBload(v *Value) bool { break } v.reset(OpS390XSUB) - v.AddArg(x) v0 := b.NewValue0(v_2.Pos, OpS390XLGDR, t) v0.AddArg(y) - v.AddArg(v0) + v.AddArg2(x, v0) return true } // match: (SUBload [off1] {sym} x (ADDconst [off2] ptr) mem) @@ -19221,9 +18159,7 @@ func rewriteValueS390X_OpS390XSUBload(v *Value) bool { v.reset(OpS390XSUBload) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(x) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg3(x, ptr, mem) return true } // match: (SUBload [o1] {s1} x (MOVDaddr [o2] {s2} ptr) mem) @@ -19246,9 +18182,7 @@ func rewriteValueS390X_OpS390XSUBload(v *Value) bool { v.reset(OpS390XSUBload) v.AuxInt = o1 + o2 v.Aux = mergeSym(s1, s2) - v.AddArg(x) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg3(x, ptr, mem) return true } return false @@ -19265,8 +18199,7 @@ func rewriteValueS390X_OpS390XSumBytes2(v *Value) bool { v0 := b.NewValue0(v.Pos, OpS390XSRWconst, typ.UInt8) v0.AuxInt = 8 v0.AddArg(x) - v.AddArg(v0) - v.AddArg(x) + v.AddArg2(v0, x) return true } } @@ -19283,8 +18216,7 @@ func rewriteValueS390X_OpS390XSumBytes4(v *Value) bool { v1 := b.NewValue0(v.Pos, OpS390XSRWconst, typ.UInt16) v1.AuxInt = 16 v1.AddArg(x) - v0.AddArg(v1) - v0.AddArg(x) + v0.AddArg2(v1, x) v.AddArg(v0) return true } @@ -19302,8 +18234,7 @@ func rewriteValueS390X_OpS390XSumBytes8(v *Value) bool { v1 := b.NewValue0(v.Pos, OpS390XSRDconst, typ.UInt32) v1.AuxInt = 32 v1.AddArg(x) - v0.AddArg(v1) - v0.AddArg(x) + v0.AddArg2(v1, x) v.AddArg(v0) return true } @@ -19406,9 +18337,7 @@ func rewriteValueS390X_OpS390XXOR(v *Value) bool { v.Type = t v.AuxInt = off v.Aux = sym - v.AddArg(x) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg3(x, ptr, mem) return true } break @@ -19491,9 +18420,7 @@ func rewriteValueS390X_OpS390XXORW(v *Value) bool { v.Type = t v.AuxInt = off v.Aux = sym - v.AddArg(x) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg3(x, ptr, mem) return true } break @@ -19520,9 +18447,7 @@ func rewriteValueS390X_OpS390XXORW(v *Value) bool { v.Type = t v.AuxInt = off v.Aux = sym - v.AddArg(x) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg3(x, ptr, mem) return true } break @@ -19582,9 +18507,7 @@ func rewriteValueS390X_OpS390XXORWload(v *Value) bool { v.reset(OpS390XXORWload) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(x) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg3(x, ptr, mem) return true } // match: (XORWload [o1] {s1} x (MOVDaddr [o2] {s2} ptr) mem) @@ -19607,9 +18530,7 @@ func rewriteValueS390X_OpS390XXORWload(v *Value) bool { v.reset(OpS390XXORWload) v.AuxInt = o1 + o2 v.Aux = mergeSym(s1, s2) - v.AddArg(x) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg3(x, ptr, mem) return true } return false @@ -19666,10 +18587,9 @@ func rewriteValueS390X_OpS390XXORload(v *Value) bool { break } v.reset(OpS390XXOR) - v.AddArg(x) v0 := b.NewValue0(v_2.Pos, OpS390XLGDR, t) v0.AddArg(y) - v.AddArg(v0) + v.AddArg2(x, v0) return true } // match: (XORload [off1] {sym} x (ADDconst [off2] ptr) mem) @@ -19691,9 +18611,7 @@ func rewriteValueS390X_OpS390XXORload(v *Value) bool { v.reset(OpS390XXORload) v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(x) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg3(x, ptr, mem) return true } // match: (XORload [o1] {s1} x (MOVDaddr [o2] {s2} ptr) mem) @@ -19716,9 +18634,7 @@ func rewriteValueS390X_OpS390XXORload(v *Value) bool { v.reset(OpS390XXORload) v.AuxInt = o1 + o2 v.Aux = mergeSym(s1, s2) - v.AddArg(x) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg3(x, ptr, mem) return true } return false @@ -19739,14 +18655,12 @@ func rewriteValueS390X_OpSelect0(v *Value) bool { v.reset(OpSelect0) v.Type = typ.UInt64 v0 := b.NewValue0(v.Pos, OpS390XADDE, types.NewTuple(typ.UInt64, types.TypeFlags)) - v0.AddArg(x) - v0.AddArg(y) v1 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) v2 := b.NewValue0(v.Pos, OpS390XADDCconst, types.NewTuple(typ.UInt64, types.TypeFlags)) v2.AuxInt = -1 v2.AddArg(c) v1.AddArg(v2) - v0.AddArg(v1) + v0.AddArg3(x, y, v1) v.AddArg(v0) return true } @@ -19762,16 +18676,13 @@ func rewriteValueS390X_OpSelect0(v *Value) bool { v.reset(OpSelect0) v.Type = typ.UInt64 v0 := b.NewValue0(v.Pos, OpS390XSUBE, types.NewTuple(typ.UInt64, types.TypeFlags)) - v0.AddArg(x) - v0.AddArg(y) v1 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) v2 := b.NewValue0(v.Pos, OpS390XSUBC, types.NewTuple(typ.UInt64, types.TypeFlags)) v3 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) v3.AuxInt = 0 - v2.AddArg(v3) - v2.AddArg(c) + v2.AddArg2(v3, c) v1.AddArg(v2) - v0.AddArg(v1) + v0.AddArg3(x, y, v1) v.AddArg(v0) return true } @@ -19785,10 +18696,9 @@ func rewriteValueS390X_OpSelect0(v *Value) bool { tuple := v_0.Args[1] val := v_0.Args[0] v.reset(OpS390XADDW) - v.AddArg(val) v0 := b.NewValue0(v.Pos, OpSelect0, t) v0.AddArg(tuple) - v.AddArg(v0) + v.AddArg2(val, v0) return true } // match: (Select0 (AddTupleFirst64 val tuple)) @@ -19801,10 +18711,9 @@ func rewriteValueS390X_OpSelect0(v *Value) bool { tuple := v_0.Args[1] val := v_0.Args[0] v.reset(OpS390XADD) - v.AddArg(val) v0 := b.NewValue0(v.Pos, OpSelect0, t) v0.AddArg(tuple) - v.AddArg(v0) + v.AddArg2(val, v0) return true } // match: (Select0 (ADDCconst (MOVDconst [c]) [d])) @@ -19864,22 +18773,18 @@ func rewriteValueS390X_OpSelect1(v *Value) bool { v0 := b.NewValue0(v.Pos, OpS390XADDE, types.NewTuple(typ.UInt64, types.TypeFlags)) v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) v1.AuxInt = 0 - v0.AddArg(v1) v2 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) v2.AuxInt = 0 - v0.AddArg(v2) v3 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) v4 := b.NewValue0(v.Pos, OpS390XADDE, types.NewTuple(typ.UInt64, types.TypeFlags)) - v4.AddArg(x) - v4.AddArg(y) v5 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) v6 := b.NewValue0(v.Pos, OpS390XADDCconst, types.NewTuple(typ.UInt64, types.TypeFlags)) v6.AuxInt = -1 v6.AddArg(c) v5.AddArg(v6) - v4.AddArg(v5) + v4.AddArg3(x, y, v5) v3.AddArg(v4) - v0.AddArg(v3) + v0.AddArg3(v1, v2, v3) v.AddArg(v0) return true } @@ -19897,24 +18802,19 @@ func rewriteValueS390X_OpSelect1(v *Value) bool { v1 := b.NewValue0(v.Pos, OpS390XSUBE, types.NewTuple(typ.UInt64, types.TypeFlags)) v2 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) v2.AuxInt = 0 - v1.AddArg(v2) v3 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) v3.AuxInt = 0 - v1.AddArg(v3) v4 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) v5 := b.NewValue0(v.Pos, OpS390XSUBE, types.NewTuple(typ.UInt64, types.TypeFlags)) - v5.AddArg(x) - v5.AddArg(y) v6 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) v7 := b.NewValue0(v.Pos, OpS390XSUBC, types.NewTuple(typ.UInt64, types.TypeFlags)) v8 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) v8.AuxInt = 0 - v7.AddArg(v8) - v7.AddArg(c) + v7.AddArg2(v8, c) v6.AddArg(v7) - v5.AddArg(v6) + v5.AddArg3(x, y, v6) v4.AddArg(v5) - v1.AddArg(v4) + v1.AddArg3(v2, v3, v4) v0.AddArg(v1) v.AddArg(v0) return true @@ -20061,9 +18961,7 @@ func rewriteValueS390X_OpStore(v *Value) bool { break } v.reset(OpS390XFMOVDstore) - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } // match: (Store {t} ptr val mem) @@ -20078,9 +18976,7 @@ func rewriteValueS390X_OpStore(v *Value) bool { break } v.reset(OpS390XFMOVSstore) - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } // match: (Store {t} ptr val mem) @@ -20095,9 +18991,7 @@ func rewriteValueS390X_OpStore(v *Value) bool { break } v.reset(OpS390XMOVDstore) - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } // match: (Store {t} ptr val mem) @@ -20112,9 +19006,7 @@ func rewriteValueS390X_OpStore(v *Value) bool { break } v.reset(OpS390XMOVWstore) - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } // match: (Store {t} ptr val mem) @@ -20129,9 +19021,7 @@ func rewriteValueS390X_OpStore(v *Value) bool { break } v.reset(OpS390XMOVHstore) - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } // match: (Store {t} ptr val mem) @@ -20146,9 +19036,7 @@ func rewriteValueS390X_OpStore(v *Value) bool { break } v.reset(OpS390XMOVBstore) - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } return false @@ -20191,8 +19079,7 @@ func rewriteValueS390X_OpZero(v *Value) bool { mem := v_1 v.reset(OpS390XMOVBstoreconst) v.AuxInt = 0 - v.AddArg(destptr) - v.AddArg(mem) + v.AddArg2(destptr, mem) return true } // match: (Zero [2] destptr mem) @@ -20205,8 +19092,7 @@ func rewriteValueS390X_OpZero(v *Value) bool { mem := v_1 v.reset(OpS390XMOVHstoreconst) v.AuxInt = 0 - v.AddArg(destptr) - v.AddArg(mem) + v.AddArg2(destptr, mem) return true } // match: (Zero [4] destptr mem) @@ -20219,8 +19105,7 @@ func rewriteValueS390X_OpZero(v *Value) bool { mem := v_1 v.reset(OpS390XMOVWstoreconst) v.AuxInt = 0 - v.AddArg(destptr) - v.AddArg(mem) + v.AddArg2(destptr, mem) return true } // match: (Zero [8] destptr mem) @@ -20233,8 +19118,7 @@ func rewriteValueS390X_OpZero(v *Value) bool { mem := v_1 v.reset(OpS390XMOVDstoreconst) v.AuxInt = 0 - v.AddArg(destptr) - v.AddArg(mem) + v.AddArg2(destptr, mem) return true } // match: (Zero [3] destptr mem) @@ -20247,12 +19131,10 @@ func rewriteValueS390X_OpZero(v *Value) bool { mem := v_1 v.reset(OpS390XMOVBstoreconst) v.AuxInt = makeValAndOff(0, 2) - v.AddArg(destptr) v0 := b.NewValue0(v.Pos, OpS390XMOVHstoreconst, types.TypeMem) v0.AuxInt = 0 - v0.AddArg(destptr) - v0.AddArg(mem) - v.AddArg(v0) + v0.AddArg2(destptr, mem) + v.AddArg2(destptr, v0) return true } // match: (Zero [5] destptr mem) @@ -20265,12 +19147,10 @@ func rewriteValueS390X_OpZero(v *Value) bool { mem := v_1 v.reset(OpS390XMOVBstoreconst) v.AuxInt = makeValAndOff(0, 4) - v.AddArg(destptr) v0 := b.NewValue0(v.Pos, OpS390XMOVWstoreconst, types.TypeMem) v0.AuxInt = 0 - v0.AddArg(destptr) - v0.AddArg(mem) - v.AddArg(v0) + v0.AddArg2(destptr, mem) + v.AddArg2(destptr, v0) return true } // match: (Zero [6] destptr mem) @@ -20283,12 +19163,10 @@ func rewriteValueS390X_OpZero(v *Value) bool { mem := v_1 v.reset(OpS390XMOVHstoreconst) v.AuxInt = makeValAndOff(0, 4) - v.AddArg(destptr) v0 := b.NewValue0(v.Pos, OpS390XMOVWstoreconst, types.TypeMem) v0.AuxInt = 0 - v0.AddArg(destptr) - v0.AddArg(mem) - v.AddArg(v0) + v0.AddArg2(destptr, mem) + v.AddArg2(destptr, v0) return true } // match: (Zero [7] destptr mem) @@ -20301,12 +19179,10 @@ func rewriteValueS390X_OpZero(v *Value) bool { mem := v_1 v.reset(OpS390XMOVWstoreconst) v.AuxInt = makeValAndOff(0, 3) - v.AddArg(destptr) v0 := b.NewValue0(v.Pos, OpS390XMOVWstoreconst, types.TypeMem) v0.AuxInt = 0 - v0.AddArg(destptr) - v0.AddArg(mem) - v.AddArg(v0) + v0.AddArg2(destptr, mem) + v.AddArg2(destptr, v0) return true } // match: (Zero [s] destptr mem) @@ -20321,8 +19197,7 @@ func rewriteValueS390X_OpZero(v *Value) bool { } v.reset(OpS390XCLEAR) v.AuxInt = makeValAndOff(s, 0) - v.AddArg(destptr) - v.AddArg(mem) + v.AddArg2(destptr, mem) return true } // match: (Zero [s] destptr mem) @@ -20337,12 +19212,10 @@ func rewriteValueS390X_OpZero(v *Value) bool { } v.reset(OpS390XLoweredZero) v.AuxInt = s % 256 - v.AddArg(destptr) v0 := b.NewValue0(v.Pos, OpS390XADDconst, destptr.Type) v0.AuxInt = (s / 256) * 256 v0.AddArg(destptr) - v.AddArg(v0) - v.AddArg(mem) + v.AddArg3(destptr, v0, mem) return true } return false diff --git a/src/cmd/compile/internal/ssa/rewriteWasm.go b/src/cmd/compile/internal/ssa/rewriteWasm.go index 90701067ce..81acd7e7c4 100644 --- a/src/cmd/compile/internal/ssa/rewriteWasm.go +++ b/src/cmd/compile/internal/ssa/rewriteWasm.go @@ -685,10 +685,9 @@ func rewriteValueWasm_OpBitLen64(v *Value) bool { v.reset(OpWasmI64Sub) v0 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64) v0.AuxInt = 64 - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpWasmI64Clz, typ.Int64) v1.AddArg(x) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } } @@ -701,10 +700,9 @@ func rewriteValueWasm_OpCom16(v *Value) bool { for { x := v_0 v.reset(OpWasmI64Xor) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64) v0.AuxInt = -1 - v.AddArg(v0) + v.AddArg2(x, v0) return true } } @@ -717,10 +715,9 @@ func rewriteValueWasm_OpCom32(v *Value) bool { for { x := v_0 v.reset(OpWasmI64Xor) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64) v0.AuxInt = -1 - v.AddArg(v0) + v.AddArg2(x, v0) return true } } @@ -733,10 +730,9 @@ func rewriteValueWasm_OpCom64(v *Value) bool { for { x := v_0 v.reset(OpWasmI64Xor) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64) v0.AuxInt = -1 - v.AddArg(v0) + v.AddArg2(x, v0) return true } } @@ -749,10 +745,9 @@ func rewriteValueWasm_OpCom8(v *Value) bool { for { x := v_0 v.reset(OpWasmI64Xor) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64) v0.AuxInt = -1 - v.AddArg(v0) + v.AddArg2(x, v0) return true } } @@ -775,10 +770,9 @@ func rewriteValueWasm_OpCtz16(v *Value) bool { x := v_0 v.reset(OpWasmI64Ctz) v0 := b.NewValue0(v.Pos, OpWasmI64Or, typ.Int64) - v0.AddArg(x) v1 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64) v1.AuxInt = 0x10000 - v0.AddArg(v1) + v0.AddArg2(x, v1) v.AddArg(v0) return true } @@ -793,10 +787,9 @@ func rewriteValueWasm_OpCtz32(v *Value) bool { x := v_0 v.reset(OpWasmI64Ctz) v0 := b.NewValue0(v.Pos, OpWasmI64Or, typ.Int64) - v0.AddArg(x) v1 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64) v1.AuxInt = 0x100000000 - v0.AddArg(v1) + v0.AddArg2(x, v1) v.AddArg(v0) return true } @@ -811,10 +804,9 @@ func rewriteValueWasm_OpCtz8(v *Value) bool { x := v_0 v.reset(OpWasmI64Ctz) v0 := b.NewValue0(v.Pos, OpWasmI64Or, typ.Int64) - v0.AddArg(x) v1 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64) v1.AuxInt = 0x100 - v0.AddArg(v1) + v0.AddArg2(x, v1) v.AddArg(v0) return true } @@ -892,10 +884,9 @@ func rewriteValueWasm_OpDiv16(v *Value) bool { v.reset(OpWasmI64DivS) v0 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64) v1.AddArg(y) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } } @@ -912,10 +903,9 @@ func rewriteValueWasm_OpDiv16u(v *Value) bool { v.reset(OpWasmI64DivU) v0 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) v1.AddArg(y) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } } @@ -932,10 +922,9 @@ func rewriteValueWasm_OpDiv32(v *Value) bool { v.reset(OpWasmI64DivS) v0 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64) v1.AddArg(y) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } } @@ -952,10 +941,9 @@ func rewriteValueWasm_OpDiv32u(v *Value) bool { v.reset(OpWasmI64DivU) v0 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) v1.AddArg(y) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } } @@ -972,10 +960,9 @@ func rewriteValueWasm_OpDiv8(v *Value) bool { v.reset(OpWasmI64DivS) v0 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64) v1.AddArg(y) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } } @@ -992,10 +979,9 @@ func rewriteValueWasm_OpDiv8u(v *Value) bool { v.reset(OpWasmI64DivU) v0 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) v1.AddArg(y) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } } @@ -1012,10 +998,9 @@ func rewriteValueWasm_OpEq16(v *Value) bool { v.reset(OpWasmI64Eq) v0 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) v1.AddArg(y) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } } @@ -1032,10 +1017,9 @@ func rewriteValueWasm_OpEq32(v *Value) bool { v.reset(OpWasmI64Eq) v0 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) v1.AddArg(y) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } } @@ -1052,10 +1036,9 @@ func rewriteValueWasm_OpEq8(v *Value) bool { v.reset(OpWasmI64Eq) v0 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) v1.AddArg(y) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } } @@ -1087,10 +1070,9 @@ func rewriteValueWasm_OpLeq16(v *Value) bool { v.reset(OpWasmI64LeS) v0 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64) v1.AddArg(y) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } } @@ -1107,10 +1089,9 @@ func rewriteValueWasm_OpLeq16U(v *Value) bool { v.reset(OpWasmI64LeU) v0 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) v1.AddArg(y) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } } @@ -1127,10 +1108,9 @@ func rewriteValueWasm_OpLeq32(v *Value) bool { v.reset(OpWasmI64LeS) v0 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64) v1.AddArg(y) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } } @@ -1147,10 +1127,9 @@ func rewriteValueWasm_OpLeq32U(v *Value) bool { v.reset(OpWasmI64LeU) v0 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) v1.AddArg(y) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } } @@ -1167,10 +1146,9 @@ func rewriteValueWasm_OpLeq8(v *Value) bool { v.reset(OpWasmI64LeS) v0 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64) v1.AddArg(y) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } } @@ -1187,10 +1165,9 @@ func rewriteValueWasm_OpLeq8U(v *Value) bool { v.reset(OpWasmI64LeU) v0 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) v1.AddArg(y) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } } @@ -1207,10 +1184,9 @@ func rewriteValueWasm_OpLess16(v *Value) bool { v.reset(OpWasmI64LtS) v0 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64) v1.AddArg(y) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } } @@ -1227,10 +1203,9 @@ func rewriteValueWasm_OpLess16U(v *Value) bool { v.reset(OpWasmI64LtU) v0 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) v1.AddArg(y) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } } @@ -1247,10 +1222,9 @@ func rewriteValueWasm_OpLess32(v *Value) bool { v.reset(OpWasmI64LtS) v0 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64) v1.AddArg(y) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } } @@ -1267,10 +1241,9 @@ func rewriteValueWasm_OpLess32U(v *Value) bool { v.reset(OpWasmI64LtU) v0 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) v1.AddArg(y) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } } @@ -1287,10 +1260,9 @@ func rewriteValueWasm_OpLess8(v *Value) bool { v.reset(OpWasmI64LtS) v0 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64) v1.AddArg(y) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } } @@ -1307,10 +1279,9 @@ func rewriteValueWasm_OpLess8U(v *Value) bool { v.reset(OpWasmI64LtU) v0 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) v1.AddArg(y) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } } @@ -1328,8 +1299,7 @@ func rewriteValueWasm_OpLoad(v *Value) bool { break } v.reset(OpWasmF32Load) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (Load ptr mem) @@ -1343,8 +1313,7 @@ func rewriteValueWasm_OpLoad(v *Value) bool { break } v.reset(OpWasmF64Load) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (Load ptr mem) @@ -1358,8 +1327,7 @@ func rewriteValueWasm_OpLoad(v *Value) bool { break } v.reset(OpWasmI64Load) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (Load ptr mem) @@ -1373,8 +1341,7 @@ func rewriteValueWasm_OpLoad(v *Value) bool { break } v.reset(OpWasmI64Load32U) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (Load ptr mem) @@ -1388,8 +1355,7 @@ func rewriteValueWasm_OpLoad(v *Value) bool { break } v.reset(OpWasmI64Load32S) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (Load ptr mem) @@ -1403,8 +1369,7 @@ func rewriteValueWasm_OpLoad(v *Value) bool { break } v.reset(OpWasmI64Load16U) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (Load ptr mem) @@ -1418,8 +1383,7 @@ func rewriteValueWasm_OpLoad(v *Value) bool { break } v.reset(OpWasmI64Load16S) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (Load ptr mem) @@ -1433,8 +1397,7 @@ func rewriteValueWasm_OpLoad(v *Value) bool { break } v.reset(OpWasmI64Load8U) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (Load ptr mem) @@ -1448,8 +1411,7 @@ func rewriteValueWasm_OpLoad(v *Value) bool { break } v.reset(OpWasmI64Load8S) - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } return false @@ -1480,10 +1442,9 @@ func rewriteValueWasm_OpLsh16x16(v *Value) bool { y := v_1 v.reset(OpLsh64x64) v.AuxInt = c - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) v0.AddArg(y) - v.AddArg(v0) + v.AddArg2(x, v0) return true } } @@ -1500,10 +1461,9 @@ func rewriteValueWasm_OpLsh16x32(v *Value) bool { y := v_1 v.reset(OpLsh64x64) v.AuxInt = c - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) v0.AddArg(y) - v.AddArg(v0) + v.AddArg2(x, v0) return true } } @@ -1520,10 +1480,9 @@ func rewriteValueWasm_OpLsh16x8(v *Value) bool { y := v_1 v.reset(OpLsh64x64) v.AuxInt = c - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) v0.AddArg(y) - v.AddArg(v0) + v.AddArg2(x, v0) return true } } @@ -1540,10 +1499,9 @@ func rewriteValueWasm_OpLsh32x16(v *Value) bool { y := v_1 v.reset(OpLsh64x64) v.AuxInt = c - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) v0.AddArg(y) - v.AddArg(v0) + v.AddArg2(x, v0) return true } } @@ -1560,10 +1518,9 @@ func rewriteValueWasm_OpLsh32x32(v *Value) bool { y := v_1 v.reset(OpLsh64x64) v.AuxInt = c - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) v0.AddArg(y) - v.AddArg(v0) + v.AddArg2(x, v0) return true } } @@ -1580,10 +1537,9 @@ func rewriteValueWasm_OpLsh32x8(v *Value) bool { y := v_1 v.reset(OpLsh64x64) v.AuxInt = c - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) v0.AddArg(y) - v.AddArg(v0) + v.AddArg2(x, v0) return true } } @@ -1600,10 +1556,9 @@ func rewriteValueWasm_OpLsh64x16(v *Value) bool { y := v_1 v.reset(OpLsh64x64) v.AuxInt = c - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) v0.AddArg(y) - v.AddArg(v0) + v.AddArg2(x, v0) return true } } @@ -1620,10 +1575,9 @@ func rewriteValueWasm_OpLsh64x32(v *Value) bool { y := v_1 v.reset(OpLsh64x64) v.AuxInt = c - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) v0.AddArg(y) - v.AddArg(v0) + v.AddArg2(x, v0) return true } } @@ -1642,8 +1596,7 @@ func rewriteValueWasm_OpLsh64x64(v *Value) bool { break } v.reset(OpWasmI64Shl) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (Lsh64x64 x (I64Const [c])) @@ -1659,10 +1612,9 @@ func rewriteValueWasm_OpLsh64x64(v *Value) bool { break } v.reset(OpWasmI64Shl) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64) v0.AuxInt = c - v.AddArg(v0) + v.AddArg2(x, v0) return true } // match: (Lsh64x64 x (I64Const [c])) @@ -1687,18 +1639,14 @@ func rewriteValueWasm_OpLsh64x64(v *Value) bool { y := v_1 v.reset(OpWasmSelect) v0 := b.NewValue0(v.Pos, OpWasmI64Shl, typ.Int64) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) + v0.AddArg2(x, y) v1 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64) v1.AuxInt = 0 - v.AddArg(v1) v2 := b.NewValue0(v.Pos, OpWasmI64LtU, typ.Bool) - v2.AddArg(y) v3 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64) v3.AuxInt = 64 - v2.AddArg(v3) - v.AddArg(v2) + v2.AddArg2(y, v3) + v.AddArg3(v0, v1, v2) return true } } @@ -1715,10 +1663,9 @@ func rewriteValueWasm_OpLsh64x8(v *Value) bool { y := v_1 v.reset(OpLsh64x64) v.AuxInt = c - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) v0.AddArg(y) - v.AddArg(v0) + v.AddArg2(x, v0) return true } } @@ -1735,10 +1682,9 @@ func rewriteValueWasm_OpLsh8x16(v *Value) bool { y := v_1 v.reset(OpLsh64x64) v.AuxInt = c - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) v0.AddArg(y) - v.AddArg(v0) + v.AddArg2(x, v0) return true } } @@ -1755,10 +1701,9 @@ func rewriteValueWasm_OpLsh8x32(v *Value) bool { y := v_1 v.reset(OpLsh64x64) v.AuxInt = c - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) v0.AddArg(y) - v.AddArg(v0) + v.AddArg2(x, v0) return true } } @@ -1775,10 +1720,9 @@ func rewriteValueWasm_OpLsh8x8(v *Value) bool { y := v_1 v.reset(OpLsh64x64) v.AuxInt = c - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) v0.AddArg(y) - v.AddArg(v0) + v.AddArg2(x, v0) return true } } @@ -1795,10 +1739,9 @@ func rewriteValueWasm_OpMod16(v *Value) bool { v.reset(OpWasmI64RemS) v0 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64) v1.AddArg(y) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } } @@ -1815,10 +1758,9 @@ func rewriteValueWasm_OpMod16u(v *Value) bool { v.reset(OpWasmI64RemU) v0 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) v1.AddArg(y) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } } @@ -1835,10 +1777,9 @@ func rewriteValueWasm_OpMod32(v *Value) bool { v.reset(OpWasmI64RemS) v0 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64) v1.AddArg(y) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } } @@ -1855,10 +1796,9 @@ func rewriteValueWasm_OpMod32u(v *Value) bool { v.reset(OpWasmI64RemU) v0 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) v1.AddArg(y) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } } @@ -1875,10 +1815,9 @@ func rewriteValueWasm_OpMod8(v *Value) bool { v.reset(OpWasmI64RemS) v0 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64) v1.AddArg(y) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } } @@ -1895,10 +1834,9 @@ func rewriteValueWasm_OpMod8u(v *Value) bool { v.reset(OpWasmI64RemU) v0 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) v1.AddArg(y) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } } @@ -1930,12 +1868,9 @@ func rewriteValueWasm_OpMove(v *Value) bool { src := v_1 mem := v_2 v.reset(OpWasmI64Store8) - v.AddArg(dst) v0 := b.NewValue0(v.Pos, OpWasmI64Load8U, typ.UInt8) - v0.AddArg(src) - v0.AddArg(mem) - v.AddArg(v0) - v.AddArg(mem) + v0.AddArg2(src, mem) + v.AddArg3(dst, v0, mem) return true } // match: (Move [2] dst src mem) @@ -1948,12 +1883,9 @@ func rewriteValueWasm_OpMove(v *Value) bool { src := v_1 mem := v_2 v.reset(OpWasmI64Store16) - v.AddArg(dst) v0 := b.NewValue0(v.Pos, OpWasmI64Load16U, typ.UInt16) - v0.AddArg(src) - v0.AddArg(mem) - v.AddArg(v0) - v.AddArg(mem) + v0.AddArg2(src, mem) + v.AddArg3(dst, v0, mem) return true } // match: (Move [4] dst src mem) @@ -1966,12 +1898,9 @@ func rewriteValueWasm_OpMove(v *Value) bool { src := v_1 mem := v_2 v.reset(OpWasmI64Store32) - v.AddArg(dst) v0 := b.NewValue0(v.Pos, OpWasmI64Load32U, typ.UInt32) - v0.AddArg(src) - v0.AddArg(mem) - v.AddArg(v0) - v.AddArg(mem) + v0.AddArg2(src, mem) + v.AddArg3(dst, v0, mem) return true } // match: (Move [8] dst src mem) @@ -1984,12 +1913,9 @@ func rewriteValueWasm_OpMove(v *Value) bool { src := v_1 mem := v_2 v.reset(OpWasmI64Store) - v.AddArg(dst) v0 := b.NewValue0(v.Pos, OpWasmI64Load, typ.UInt64) - v0.AddArg(src) - v0.AddArg(mem) - v.AddArg(v0) - v.AddArg(mem) + v0.AddArg2(src, mem) + v.AddArg3(dst, v0, mem) return true } // match: (Move [16] dst src mem) @@ -2003,20 +1929,14 @@ func rewriteValueWasm_OpMove(v *Value) bool { mem := v_2 v.reset(OpWasmI64Store) v.AuxInt = 8 - v.AddArg(dst) v0 := b.NewValue0(v.Pos, OpWasmI64Load, typ.UInt64) v0.AuxInt = 8 - v0.AddArg(src) - v0.AddArg(mem) - v.AddArg(v0) + v0.AddArg2(src, mem) v1 := b.NewValue0(v.Pos, OpWasmI64Store, types.TypeMem) - v1.AddArg(dst) v2 := b.NewValue0(v.Pos, OpWasmI64Load, typ.UInt64) - v2.AddArg(src) - v2.AddArg(mem) - v1.AddArg(v2) - v1.AddArg(mem) - v.AddArg(v1) + v2.AddArg2(src, mem) + v1.AddArg3(dst, v2, mem) + v.AddArg3(dst, v0, v1) return true } // match: (Move [3] dst src mem) @@ -2030,20 +1950,14 @@ func rewriteValueWasm_OpMove(v *Value) bool { mem := v_2 v.reset(OpWasmI64Store8) v.AuxInt = 2 - v.AddArg(dst) v0 := b.NewValue0(v.Pos, OpWasmI64Load8U, typ.UInt8) v0.AuxInt = 2 - v0.AddArg(src) - v0.AddArg(mem) - v.AddArg(v0) + v0.AddArg2(src, mem) v1 := b.NewValue0(v.Pos, OpWasmI64Store16, types.TypeMem) - v1.AddArg(dst) v2 := b.NewValue0(v.Pos, OpWasmI64Load16U, typ.UInt16) - v2.AddArg(src) - v2.AddArg(mem) - v1.AddArg(v2) - v1.AddArg(mem) - v.AddArg(v1) + v2.AddArg2(src, mem) + v1.AddArg3(dst, v2, mem) + v.AddArg3(dst, v0, v1) return true } // match: (Move [5] dst src mem) @@ -2057,20 +1971,14 @@ func rewriteValueWasm_OpMove(v *Value) bool { mem := v_2 v.reset(OpWasmI64Store8) v.AuxInt = 4 - v.AddArg(dst) v0 := b.NewValue0(v.Pos, OpWasmI64Load8U, typ.UInt8) v0.AuxInt = 4 - v0.AddArg(src) - v0.AddArg(mem) - v.AddArg(v0) + v0.AddArg2(src, mem) v1 := b.NewValue0(v.Pos, OpWasmI64Store32, types.TypeMem) - v1.AddArg(dst) v2 := b.NewValue0(v.Pos, OpWasmI64Load32U, typ.UInt32) - v2.AddArg(src) - v2.AddArg(mem) - v1.AddArg(v2) - v1.AddArg(mem) - v.AddArg(v1) + v2.AddArg2(src, mem) + v1.AddArg3(dst, v2, mem) + v.AddArg3(dst, v0, v1) return true } // match: (Move [6] dst src mem) @@ -2084,20 +1992,14 @@ func rewriteValueWasm_OpMove(v *Value) bool { mem := v_2 v.reset(OpWasmI64Store16) v.AuxInt = 4 - v.AddArg(dst) v0 := b.NewValue0(v.Pos, OpWasmI64Load16U, typ.UInt16) v0.AuxInt = 4 - v0.AddArg(src) - v0.AddArg(mem) - v.AddArg(v0) + v0.AddArg2(src, mem) v1 := b.NewValue0(v.Pos, OpWasmI64Store32, types.TypeMem) - v1.AddArg(dst) v2 := b.NewValue0(v.Pos, OpWasmI64Load32U, typ.UInt32) - v2.AddArg(src) - v2.AddArg(mem) - v1.AddArg(v2) - v1.AddArg(mem) - v.AddArg(v1) + v2.AddArg2(src, mem) + v1.AddArg3(dst, v2, mem) + v.AddArg3(dst, v0, v1) return true } // match: (Move [7] dst src mem) @@ -2111,20 +2013,14 @@ func rewriteValueWasm_OpMove(v *Value) bool { mem := v_2 v.reset(OpWasmI64Store32) v.AuxInt = 3 - v.AddArg(dst) v0 := b.NewValue0(v.Pos, OpWasmI64Load32U, typ.UInt32) v0.AuxInt = 3 - v0.AddArg(src) - v0.AddArg(mem) - v.AddArg(v0) + v0.AddArg2(src, mem) v1 := b.NewValue0(v.Pos, OpWasmI64Store32, types.TypeMem) - v1.AddArg(dst) v2 := b.NewValue0(v.Pos, OpWasmI64Load32U, typ.UInt32) - v2.AddArg(src) - v2.AddArg(mem) - v1.AddArg(v2) - v1.AddArg(mem) - v.AddArg(v1) + v2.AddArg2(src, mem) + v1.AddArg3(dst, v2, mem) + v.AddArg3(dst, v0, v1) return true } // match: (Move [s] dst src mem) @@ -2140,20 +2036,14 @@ func rewriteValueWasm_OpMove(v *Value) bool { } v.reset(OpWasmI64Store) v.AuxInt = s - 8 - v.AddArg(dst) v0 := b.NewValue0(v.Pos, OpWasmI64Load, typ.UInt64) v0.AuxInt = s - 8 - v0.AddArg(src) - v0.AddArg(mem) - v.AddArg(v0) + v0.AddArg2(src, mem) v1 := b.NewValue0(v.Pos, OpWasmI64Store, types.TypeMem) - v1.AddArg(dst) v2 := b.NewValue0(v.Pos, OpWasmI64Load, typ.UInt64) - v2.AddArg(src) - v2.AddArg(mem) - v1.AddArg(v2) - v1.AddArg(mem) - v.AddArg(v1) + v2.AddArg2(src, mem) + v1.AddArg3(dst, v2, mem) + v.AddArg3(dst, v0, v1) return true } // match: (Move [s] dst src mem) @@ -2172,19 +2062,14 @@ func rewriteValueWasm_OpMove(v *Value) bool { v0 := b.NewValue0(v.Pos, OpOffPtr, dst.Type) v0.AuxInt = s % 16 v0.AddArg(dst) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpOffPtr, src.Type) v1.AuxInt = s % 16 v1.AddArg(src) - v.AddArg(v1) v2 := b.NewValue0(v.Pos, OpWasmI64Store, types.TypeMem) - v2.AddArg(dst) v3 := b.NewValue0(v.Pos, OpWasmI64Load, typ.UInt64) - v3.AddArg(src) - v3.AddArg(mem) - v2.AddArg(v3) - v2.AddArg(mem) - v.AddArg(v2) + v3.AddArg2(src, mem) + v2.AddArg3(dst, v3, mem) + v.AddArg3(v0, v1, v2) return true } // match: (Move [s] dst src mem) @@ -2203,28 +2088,20 @@ func rewriteValueWasm_OpMove(v *Value) bool { v0 := b.NewValue0(v.Pos, OpOffPtr, dst.Type) v0.AuxInt = s % 16 v0.AddArg(dst) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpOffPtr, src.Type) v1.AuxInt = s % 16 v1.AddArg(src) - v.AddArg(v1) v2 := b.NewValue0(v.Pos, OpWasmI64Store, types.TypeMem) v2.AuxInt = 8 - v2.AddArg(dst) v3 := b.NewValue0(v.Pos, OpWasmI64Load, typ.UInt64) v3.AuxInt = 8 - v3.AddArg(src) - v3.AddArg(mem) - v2.AddArg(v3) + v3.AddArg2(src, mem) v4 := b.NewValue0(v.Pos, OpWasmI64Store, types.TypeMem) - v4.AddArg(dst) v5 := b.NewValue0(v.Pos, OpWasmI64Load, typ.UInt64) - v5.AddArg(src) - v5.AddArg(mem) - v4.AddArg(v5) - v4.AddArg(mem) - v2.AddArg(v4) - v.AddArg(v2) + v5.AddArg2(src, mem) + v4.AddArg3(dst, v5, mem) + v2.AddArg3(dst, v3, v4) + v.AddArg3(v0, v1, v2) return true } // match: (Move [s] dst src mem) @@ -2240,9 +2117,7 @@ func rewriteValueWasm_OpMove(v *Value) bool { } v.reset(OpWasmLoweredMove) v.AuxInt = s / 8 - v.AddArg(dst) - v.AddArg(src) - v.AddArg(mem) + v.AddArg3(dst, src, mem) return true } return false @@ -2258,8 +2133,7 @@ func rewriteValueWasm_OpNeg16(v *Value) bool { v.reset(OpWasmI64Sub) v0 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64) v0.AuxInt = 0 - v.AddArg(v0) - v.AddArg(x) + v.AddArg2(v0, x) return true } } @@ -2274,8 +2148,7 @@ func rewriteValueWasm_OpNeg32(v *Value) bool { v.reset(OpWasmI64Sub) v0 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64) v0.AuxInt = 0 - v.AddArg(v0) - v.AddArg(x) + v.AddArg2(v0, x) return true } } @@ -2290,8 +2163,7 @@ func rewriteValueWasm_OpNeg64(v *Value) bool { v.reset(OpWasmI64Sub) v0 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64) v0.AuxInt = 0 - v.AddArg(v0) - v.AddArg(x) + v.AddArg2(v0, x) return true } } @@ -2306,8 +2178,7 @@ func rewriteValueWasm_OpNeg8(v *Value) bool { v.reset(OpWasmI64Sub) v0 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64) v0.AuxInt = 0 - v.AddArg(v0) - v.AddArg(x) + v.AddArg2(v0, x) return true } } @@ -2324,10 +2195,9 @@ func rewriteValueWasm_OpNeq16(v *Value) bool { v.reset(OpWasmI64Ne) v0 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) v1.AddArg(y) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } } @@ -2344,10 +2214,9 @@ func rewriteValueWasm_OpNeq32(v *Value) bool { v.reset(OpWasmI64Ne) v0 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) v1.AddArg(y) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } } @@ -2364,10 +2233,9 @@ func rewriteValueWasm_OpNeq8(v *Value) bool { v.reset(OpWasmI64Ne) v0 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) v1.AddArg(y) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } } @@ -2432,17 +2300,14 @@ func rewriteValueWasm_OpRotateLeft16(v *Value) bool { c := v_1.AuxInt v.reset(OpOr16) v0 := b.NewValue0(v.Pos, OpLsh16x64, t) - v0.AddArg(x) v1 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64) v1.AuxInt = c & 15 - v0.AddArg(v1) - v.AddArg(v0) + v0.AddArg2(x, v1) v2 := b.NewValue0(v.Pos, OpRsh16Ux64, t) - v2.AddArg(x) v3 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64) v3.AuxInt = -c & 15 - v2.AddArg(v3) - v.AddArg(v2) + v2.AddArg2(x, v3) + v.AddArg2(v0, v2) return true } return false @@ -2463,17 +2328,14 @@ func rewriteValueWasm_OpRotateLeft8(v *Value) bool { c := v_1.AuxInt v.reset(OpOr8) v0 := b.NewValue0(v.Pos, OpLsh8x64, t) - v0.AddArg(x) v1 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64) v1.AuxInt = c & 7 - v0.AddArg(v1) - v.AddArg(v0) + v0.AddArg2(x, v1) v2 := b.NewValue0(v.Pos, OpRsh8Ux64, t) - v2.AddArg(x) v3 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64) v3.AuxInt = -c & 7 - v2.AddArg(v3) - v.AddArg(v2) + v2.AddArg2(x, v3) + v.AddArg2(v0, v2) return true } return false @@ -2493,10 +2355,9 @@ func rewriteValueWasm_OpRsh16Ux16(v *Value) bool { v.AuxInt = c v0 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) v1.AddArg(y) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } } @@ -2515,10 +2376,9 @@ func rewriteValueWasm_OpRsh16Ux32(v *Value) bool { v.AuxInt = c v0 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) v1.AddArg(y) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } } @@ -2537,8 +2397,7 @@ func rewriteValueWasm_OpRsh16Ux64(v *Value) bool { v.AuxInt = c v0 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) v0.AddArg(x) - v.AddArg(v0) - v.AddArg(y) + v.AddArg2(v0, y) return true } } @@ -2557,10 +2416,9 @@ func rewriteValueWasm_OpRsh16Ux8(v *Value) bool { v.AuxInt = c v0 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) v1.AddArg(y) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } } @@ -2579,10 +2437,9 @@ func rewriteValueWasm_OpRsh16x16(v *Value) bool { v.AuxInt = c v0 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) v1.AddArg(y) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } } @@ -2601,10 +2458,9 @@ func rewriteValueWasm_OpRsh16x32(v *Value) bool { v.AuxInt = c v0 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) v1.AddArg(y) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } } @@ -2623,8 +2479,7 @@ func rewriteValueWasm_OpRsh16x64(v *Value) bool { v.AuxInt = c v0 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64) v0.AddArg(x) - v.AddArg(v0) - v.AddArg(y) + v.AddArg2(v0, y) return true } } @@ -2643,10 +2498,9 @@ func rewriteValueWasm_OpRsh16x8(v *Value) bool { v.AuxInt = c v0 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) v1.AddArg(y) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } } @@ -2665,10 +2519,9 @@ func rewriteValueWasm_OpRsh32Ux16(v *Value) bool { v.AuxInt = c v0 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) v1.AddArg(y) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } } @@ -2687,10 +2540,9 @@ func rewriteValueWasm_OpRsh32Ux32(v *Value) bool { v.AuxInt = c v0 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) v1.AddArg(y) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } } @@ -2709,8 +2561,7 @@ func rewriteValueWasm_OpRsh32Ux64(v *Value) bool { v.AuxInt = c v0 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) v0.AddArg(x) - v.AddArg(v0) - v.AddArg(y) + v.AddArg2(v0, y) return true } } @@ -2729,10 +2580,9 @@ func rewriteValueWasm_OpRsh32Ux8(v *Value) bool { v.AuxInt = c v0 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) v1.AddArg(y) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } } @@ -2751,10 +2601,9 @@ func rewriteValueWasm_OpRsh32x16(v *Value) bool { v.AuxInt = c v0 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) v1.AddArg(y) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } } @@ -2773,10 +2622,9 @@ func rewriteValueWasm_OpRsh32x32(v *Value) bool { v.AuxInt = c v0 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) v1.AddArg(y) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } } @@ -2795,8 +2643,7 @@ func rewriteValueWasm_OpRsh32x64(v *Value) bool { v.AuxInt = c v0 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64) v0.AddArg(x) - v.AddArg(v0) - v.AddArg(y) + v.AddArg2(v0, y) return true } } @@ -2815,10 +2662,9 @@ func rewriteValueWasm_OpRsh32x8(v *Value) bool { v.AuxInt = c v0 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) v1.AddArg(y) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } } @@ -2835,10 +2681,9 @@ func rewriteValueWasm_OpRsh64Ux16(v *Value) bool { y := v_1 v.reset(OpRsh64Ux64) v.AuxInt = c - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) v0.AddArg(y) - v.AddArg(v0) + v.AddArg2(x, v0) return true } } @@ -2855,10 +2700,9 @@ func rewriteValueWasm_OpRsh64Ux32(v *Value) bool { y := v_1 v.reset(OpRsh64Ux64) v.AuxInt = c - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) v0.AddArg(y) - v.AddArg(v0) + v.AddArg2(x, v0) return true } } @@ -2877,8 +2721,7 @@ func rewriteValueWasm_OpRsh64Ux64(v *Value) bool { break } v.reset(OpWasmI64ShrU) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (Rsh64Ux64 x (I64Const [c])) @@ -2894,10 +2737,9 @@ func rewriteValueWasm_OpRsh64Ux64(v *Value) bool { break } v.reset(OpWasmI64ShrU) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64) v0.AuxInt = c - v.AddArg(v0) + v.AddArg2(x, v0) return true } // match: (Rsh64Ux64 x (I64Const [c])) @@ -2922,18 +2764,14 @@ func rewriteValueWasm_OpRsh64Ux64(v *Value) bool { y := v_1 v.reset(OpWasmSelect) v0 := b.NewValue0(v.Pos, OpWasmI64ShrU, typ.Int64) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) + v0.AddArg2(x, y) v1 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64) v1.AuxInt = 0 - v.AddArg(v1) v2 := b.NewValue0(v.Pos, OpWasmI64LtU, typ.Bool) - v2.AddArg(y) v3 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64) v3.AuxInt = 64 - v2.AddArg(v3) - v.AddArg(v2) + v2.AddArg2(y, v3) + v.AddArg3(v0, v1, v2) return true } } @@ -2950,10 +2788,9 @@ func rewriteValueWasm_OpRsh64Ux8(v *Value) bool { y := v_1 v.reset(OpRsh64Ux64) v.AuxInt = c - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) v0.AddArg(y) - v.AddArg(v0) + v.AddArg2(x, v0) return true } } @@ -2970,10 +2807,9 @@ func rewriteValueWasm_OpRsh64x16(v *Value) bool { y := v_1 v.reset(OpRsh64x64) v.AuxInt = c - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) v0.AddArg(y) - v.AddArg(v0) + v.AddArg2(x, v0) return true } } @@ -2990,10 +2826,9 @@ func rewriteValueWasm_OpRsh64x32(v *Value) bool { y := v_1 v.reset(OpRsh64x64) v.AuxInt = c - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) v0.AddArg(y) - v.AddArg(v0) + v.AddArg2(x, v0) return true } } @@ -3012,8 +2847,7 @@ func rewriteValueWasm_OpRsh64x64(v *Value) bool { break } v.reset(OpWasmI64ShrS) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } // match: (Rsh64x64 x (I64Const [c])) @@ -3029,10 +2863,9 @@ func rewriteValueWasm_OpRsh64x64(v *Value) bool { break } v.reset(OpWasmI64ShrS) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64) v0.AuxInt = c - v.AddArg(v0) + v.AddArg2(x, v0) return true } // match: (Rsh64x64 x (I64Const [c])) @@ -3048,10 +2881,9 @@ func rewriteValueWasm_OpRsh64x64(v *Value) bool { break } v.reset(OpWasmI64ShrS) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64) v0.AuxInt = 63 - v.AddArg(v0) + v.AddArg2(x, v0) return true } // match: (Rsh64x64 x y) @@ -3060,19 +2892,15 @@ func rewriteValueWasm_OpRsh64x64(v *Value) bool { x := v_0 y := v_1 v.reset(OpWasmI64ShrS) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpWasmSelect, typ.Int64) - v0.AddArg(y) v1 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64) v1.AuxInt = 63 - v0.AddArg(v1) v2 := b.NewValue0(v.Pos, OpWasmI64LtU, typ.Bool) - v2.AddArg(y) v3 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64) v3.AuxInt = 64 - v2.AddArg(v3) - v0.AddArg(v2) - v.AddArg(v0) + v2.AddArg2(y, v3) + v0.AddArg3(y, v1, v2) + v.AddArg2(x, v0) return true } } @@ -3089,10 +2917,9 @@ func rewriteValueWasm_OpRsh64x8(v *Value) bool { y := v_1 v.reset(OpRsh64x64) v.AuxInt = c - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) v0.AddArg(y) - v.AddArg(v0) + v.AddArg2(x, v0) return true } } @@ -3111,10 +2938,9 @@ func rewriteValueWasm_OpRsh8Ux16(v *Value) bool { v.AuxInt = c v0 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) v1.AddArg(y) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } } @@ -3133,10 +2959,9 @@ func rewriteValueWasm_OpRsh8Ux32(v *Value) bool { v.AuxInt = c v0 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) v1.AddArg(y) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } } @@ -3155,8 +2980,7 @@ func rewriteValueWasm_OpRsh8Ux64(v *Value) bool { v.AuxInt = c v0 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) v0.AddArg(x) - v.AddArg(v0) - v.AddArg(y) + v.AddArg2(v0, y) return true } } @@ -3175,10 +2999,9 @@ func rewriteValueWasm_OpRsh8Ux8(v *Value) bool { v.AuxInt = c v0 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) v1.AddArg(y) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } } @@ -3197,10 +3020,9 @@ func rewriteValueWasm_OpRsh8x16(v *Value) bool { v.AuxInt = c v0 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) v1.AddArg(y) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } } @@ -3219,10 +3041,9 @@ func rewriteValueWasm_OpRsh8x32(v *Value) bool { v.AuxInt = c v0 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) v1.AddArg(y) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } } @@ -3241,8 +3062,7 @@ func rewriteValueWasm_OpRsh8x64(v *Value) bool { v.AuxInt = c v0 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64) v0.AddArg(x) - v.AddArg(v0) - v.AddArg(y) + v.AddArg2(v0, y) return true } } @@ -3261,10 +3081,9 @@ func rewriteValueWasm_OpRsh8x8(v *Value) bool { v.AuxInt = c v0 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) v1.AddArg(y) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } } @@ -3303,14 +3122,12 @@ func rewriteValueWasm_OpSignExt16to32(v *Value) bool { x := v_0 v.reset(OpWasmI64ShrS) v0 := b.NewValue0(v.Pos, OpWasmI64Shl, typ.Int64) - v0.AddArg(x) v1 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64) v1.AuxInt = 48 - v0.AddArg(v1) - v.AddArg(v0) + v0.AddArg2(x, v1) v2 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64) v2.AuxInt = 48 - v.AddArg(v2) + v.AddArg2(v0, v2) return true } } @@ -3349,14 +3166,12 @@ func rewriteValueWasm_OpSignExt16to64(v *Value) bool { x := v_0 v.reset(OpWasmI64ShrS) v0 := b.NewValue0(v.Pos, OpWasmI64Shl, typ.Int64) - v0.AddArg(x) v1 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64) v1.AuxInt = 48 - v0.AddArg(v1) - v.AddArg(v0) + v0.AddArg2(x, v1) v2 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64) v2.AuxInt = 48 - v.AddArg(v2) + v.AddArg2(v0, v2) return true } } @@ -3395,14 +3210,12 @@ func rewriteValueWasm_OpSignExt32to64(v *Value) bool { x := v_0 v.reset(OpWasmI64ShrS) v0 := b.NewValue0(v.Pos, OpWasmI64Shl, typ.Int64) - v0.AddArg(x) v1 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64) v1.AuxInt = 32 - v0.AddArg(v1) - v.AddArg(v0) + v0.AddArg2(x, v1) v2 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64) v2.AuxInt = 32 - v.AddArg(v2) + v.AddArg2(v0, v2) return true } } @@ -3441,14 +3254,12 @@ func rewriteValueWasm_OpSignExt8to16(v *Value) bool { x := v_0 v.reset(OpWasmI64ShrS) v0 := b.NewValue0(v.Pos, OpWasmI64Shl, typ.Int64) - v0.AddArg(x) v1 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64) v1.AuxInt = 56 - v0.AddArg(v1) - v.AddArg(v0) + v0.AddArg2(x, v1) v2 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64) v2.AuxInt = 56 - v.AddArg(v2) + v.AddArg2(v0, v2) return true } } @@ -3487,14 +3298,12 @@ func rewriteValueWasm_OpSignExt8to32(v *Value) bool { x := v_0 v.reset(OpWasmI64ShrS) v0 := b.NewValue0(v.Pos, OpWasmI64Shl, typ.Int64) - v0.AddArg(x) v1 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64) v1.AuxInt = 56 - v0.AddArg(v1) - v.AddArg(v0) + v0.AddArg2(x, v1) v2 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64) v2.AuxInt = 56 - v.AddArg(v2) + v.AddArg2(v0, v2) return true } } @@ -3533,14 +3342,12 @@ func rewriteValueWasm_OpSignExt8to64(v *Value) bool { x := v_0 v.reset(OpWasmI64ShrS) v0 := b.NewValue0(v.Pos, OpWasmI64Shl, typ.Int64) - v0.AddArg(x) v1 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64) v1.AuxInt = 56 - v0.AddArg(v1) - v.AddArg(v0) + v0.AddArg2(x, v1) v2 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64) v2.AuxInt = 56 - v.AddArg(v2) + v.AddArg2(v0, v2) return true } } @@ -3556,12 +3363,10 @@ func rewriteValueWasm_OpSlicemask(v *Value) bool { v0 := b.NewValue0(v.Pos, OpWasmI64Sub, typ.Int64) v1 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64) v1.AuxInt = 0 - v0.AddArg(v1) - v0.AddArg(x) - v.AddArg(v0) + v0.AddArg2(v1, x) v2 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64) v2.AuxInt = 63 - v.AddArg(v2) + v.AddArg2(v0, v2) return true } } @@ -3581,9 +3386,7 @@ func rewriteValueWasm_OpStore(v *Value) bool { break } v.reset(OpWasmF64Store) - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } // match: (Store {t} ptr val mem) @@ -3598,9 +3401,7 @@ func rewriteValueWasm_OpStore(v *Value) bool { break } v.reset(OpWasmF32Store) - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } // match: (Store {t} ptr val mem) @@ -3615,9 +3416,7 @@ func rewriteValueWasm_OpStore(v *Value) bool { break } v.reset(OpWasmI64Store) - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } // match: (Store {t} ptr val mem) @@ -3632,9 +3431,7 @@ func rewriteValueWasm_OpStore(v *Value) bool { break } v.reset(OpWasmI64Store32) - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } // match: (Store {t} ptr val mem) @@ -3649,9 +3446,7 @@ func rewriteValueWasm_OpStore(v *Value) bool { break } v.reset(OpWasmI64Store16) - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } // match: (Store {t} ptr val mem) @@ -3666,9 +3461,7 @@ func rewriteValueWasm_OpStore(v *Value) bool { break } v.reset(OpWasmI64Store8) - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } return false @@ -3702,10 +3495,9 @@ func rewriteValueWasm_OpWasmF64Add(v *Value) bool { x := v_0.AuxInt y := v_1 v.reset(OpWasmF64Add) - v.AddArg(y) v0 := b.NewValue0(v.Pos, OpWasmF64Const, typ.Float64) v0.AuxInt = x - v.AddArg(v0) + v.AddArg2(y, v0) return true } return false @@ -3739,10 +3531,9 @@ func rewriteValueWasm_OpWasmF64Mul(v *Value) bool { x := v_0.AuxInt y := v_1 v.reset(OpWasmF64Mul) - v.AddArg(y) v0 := b.NewValue0(v.Pos, OpWasmF64Const, typ.Float64) v0.AuxInt = x - v.AddArg(v0) + v.AddArg2(y, v0) return true } return false @@ -3776,10 +3567,9 @@ func rewriteValueWasm_OpWasmI64Add(v *Value) bool { x := v_0.AuxInt y := v_1 v.reset(OpWasmI64Add) - v.AddArg(y) v0 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64) v0.AuxInt = x - v.AddArg(v0) + v.AddArg2(y, v0) return true } // match: (I64Add x (I64Const [y])) @@ -3862,10 +3652,9 @@ func rewriteValueWasm_OpWasmI64And(v *Value) bool { x := v_0.AuxInt y := v_1 v.reset(OpWasmI64And) - v.AddArg(y) v0 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64) v0.AuxInt = x - v.AddArg(v0) + v.AddArg2(y, v0) return true } return false @@ -3922,10 +3711,9 @@ func rewriteValueWasm_OpWasmI64Eq(v *Value) bool { x := v_0.AuxInt y := v_1 v.reset(OpWasmI64Eq) - v.AddArg(y) v0 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64) v0.AuxInt = x - v.AddArg(v0) + v.AddArg2(y, v0) return true } // match: (I64Eq x (I64Const [0])) @@ -3981,8 +3769,7 @@ func rewriteValueWasm_OpWasmI64Load(v *Value) bool { } v.reset(OpWasmI64Load) v.AuxInt = off + off2 - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (I64Load [off] (LoweredAddr {sym} [off2] (SB)) _) @@ -4024,8 +3811,7 @@ func rewriteValueWasm_OpWasmI64Load16S(v *Value) bool { } v.reset(OpWasmI64Load16S) v.AuxInt = off + off2 - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } return false @@ -4051,8 +3837,7 @@ func rewriteValueWasm_OpWasmI64Load16U(v *Value) bool { } v.reset(OpWasmI64Load16U) v.AuxInt = off + off2 - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (I64Load16U [off] (LoweredAddr {sym} [off2] (SB)) _) @@ -4094,8 +3879,7 @@ func rewriteValueWasm_OpWasmI64Load32S(v *Value) bool { } v.reset(OpWasmI64Load32S) v.AuxInt = off + off2 - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } return false @@ -4121,8 +3905,7 @@ func rewriteValueWasm_OpWasmI64Load32U(v *Value) bool { } v.reset(OpWasmI64Load32U) v.AuxInt = off + off2 - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (I64Load32U [off] (LoweredAddr {sym} [off2] (SB)) _) @@ -4164,8 +3947,7 @@ func rewriteValueWasm_OpWasmI64Load8S(v *Value) bool { } v.reset(OpWasmI64Load8S) v.AuxInt = off + off2 - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } return false @@ -4189,8 +3971,7 @@ func rewriteValueWasm_OpWasmI64Load8U(v *Value) bool { } v.reset(OpWasmI64Load8U) v.AuxInt = off + off2 - v.AddArg(ptr) - v.AddArg(mem) + v.AddArg2(ptr, mem) return true } // match: (I64Load8U [off] (LoweredAddr {sym} [off2] (SB)) _) @@ -4242,10 +4023,9 @@ func rewriteValueWasm_OpWasmI64Mul(v *Value) bool { x := v_0.AuxInt y := v_1 v.reset(OpWasmI64Mul) - v.AddArg(y) v0 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64) v0.AuxInt = x - v.AddArg(v0) + v.AddArg2(y, v0) return true } return false @@ -4302,10 +4082,9 @@ func rewriteValueWasm_OpWasmI64Ne(v *Value) bool { x := v_0.AuxInt y := v_1 v.reset(OpWasmI64Ne) - v.AddArg(y) v0 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64) v0.AuxInt = x - v.AddArg(v0) + v.AddArg2(y, v0) return true } // match: (I64Ne x (I64Const [0])) @@ -4352,10 +4131,9 @@ func rewriteValueWasm_OpWasmI64Or(v *Value) bool { x := v_0.AuxInt y := v_1 v.reset(OpWasmI64Or) - v.AddArg(y) v0 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64) v0.AuxInt = x - v.AddArg(v0) + v.AddArg2(y, v0) return true } return false @@ -4441,9 +4219,7 @@ func rewriteValueWasm_OpWasmI64Store(v *Value) bool { } v.reset(OpWasmI64Store) v.AuxInt = off + off2 - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } return false @@ -4469,9 +4245,7 @@ func rewriteValueWasm_OpWasmI64Store16(v *Value) bool { } v.reset(OpWasmI64Store16) v.AuxInt = off + off2 - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } return false @@ -4497,9 +4271,7 @@ func rewriteValueWasm_OpWasmI64Store32(v *Value) bool { } v.reset(OpWasmI64Store32) v.AuxInt = off + off2 - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } return false @@ -4525,9 +4297,7 @@ func rewriteValueWasm_OpWasmI64Store8(v *Value) bool { } v.reset(OpWasmI64Store8) v.AuxInt = off + off2 - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.AddArg3(ptr, val, mem) return true } return false @@ -4561,10 +4331,9 @@ func rewriteValueWasm_OpWasmI64Xor(v *Value) bool { x := v_0.AuxInt y := v_1 v.reset(OpWasmI64Xor) - v.AddArg(y) v0 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64) v0.AuxInt = x - v.AddArg(v0) + v.AddArg2(y, v0) return true } return false @@ -4595,11 +4364,9 @@ func rewriteValueWasm_OpZero(v *Value) bool { destptr := v_0 mem := v_1 v.reset(OpWasmI64Store8) - v.AddArg(destptr) v0 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64) v0.AuxInt = 0 - v.AddArg(v0) - v.AddArg(mem) + v.AddArg3(destptr, v0, mem) return true } // match: (Zero [2] destptr mem) @@ -4611,11 +4378,9 @@ func rewriteValueWasm_OpZero(v *Value) bool { destptr := v_0 mem := v_1 v.reset(OpWasmI64Store16) - v.AddArg(destptr) v0 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64) v0.AuxInt = 0 - v.AddArg(v0) - v.AddArg(mem) + v.AddArg3(destptr, v0, mem) return true } // match: (Zero [4] destptr mem) @@ -4627,11 +4392,9 @@ func rewriteValueWasm_OpZero(v *Value) bool { destptr := v_0 mem := v_1 v.reset(OpWasmI64Store32) - v.AddArg(destptr) v0 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64) v0.AuxInt = 0 - v.AddArg(v0) - v.AddArg(mem) + v.AddArg3(destptr, v0, mem) return true } // match: (Zero [8] destptr mem) @@ -4643,11 +4406,9 @@ func rewriteValueWasm_OpZero(v *Value) bool { destptr := v_0 mem := v_1 v.reset(OpWasmI64Store) - v.AddArg(destptr) v0 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64) v0.AuxInt = 0 - v.AddArg(v0) - v.AddArg(mem) + v.AddArg3(destptr, v0, mem) return true } // match: (Zero [3] destptr mem) @@ -4660,17 +4421,13 @@ func rewriteValueWasm_OpZero(v *Value) bool { mem := v_1 v.reset(OpWasmI64Store8) v.AuxInt = 2 - v.AddArg(destptr) v0 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64) v0.AuxInt = 0 - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpWasmI64Store16, types.TypeMem) - v1.AddArg(destptr) v2 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64) v2.AuxInt = 0 - v1.AddArg(v2) - v1.AddArg(mem) - v.AddArg(v1) + v1.AddArg3(destptr, v2, mem) + v.AddArg3(destptr, v0, v1) return true } // match: (Zero [5] destptr mem) @@ -4683,17 +4440,13 @@ func rewriteValueWasm_OpZero(v *Value) bool { mem := v_1 v.reset(OpWasmI64Store8) v.AuxInt = 4 - v.AddArg(destptr) v0 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64) v0.AuxInt = 0 - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpWasmI64Store32, types.TypeMem) - v1.AddArg(destptr) v2 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64) v2.AuxInt = 0 - v1.AddArg(v2) - v1.AddArg(mem) - v.AddArg(v1) + v1.AddArg3(destptr, v2, mem) + v.AddArg3(destptr, v0, v1) return true } // match: (Zero [6] destptr mem) @@ -4706,17 +4459,13 @@ func rewriteValueWasm_OpZero(v *Value) bool { mem := v_1 v.reset(OpWasmI64Store16) v.AuxInt = 4 - v.AddArg(destptr) v0 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64) v0.AuxInt = 0 - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpWasmI64Store32, types.TypeMem) - v1.AddArg(destptr) v2 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64) v2.AuxInt = 0 - v1.AddArg(v2) - v1.AddArg(mem) - v.AddArg(v1) + v1.AddArg3(destptr, v2, mem) + v.AddArg3(destptr, v0, v1) return true } // match: (Zero [7] destptr mem) @@ -4729,17 +4478,13 @@ func rewriteValueWasm_OpZero(v *Value) bool { mem := v_1 v.reset(OpWasmI64Store32) v.AuxInt = 3 - v.AddArg(destptr) v0 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64) v0.AuxInt = 0 - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpWasmI64Store32, types.TypeMem) - v1.AddArg(destptr) v2 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64) v2.AuxInt = 0 - v1.AddArg(v2) - v1.AddArg(mem) - v.AddArg(v1) + v1.AddArg3(destptr, v2, mem) + v.AddArg3(destptr, v0, v1) return true } // match: (Zero [s] destptr mem) @@ -4757,14 +4502,11 @@ func rewriteValueWasm_OpZero(v *Value) bool { v0 := b.NewValue0(v.Pos, OpOffPtr, destptr.Type) v0.AuxInt = s % 8 v0.AddArg(destptr) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpWasmI64Store, types.TypeMem) - v1.AddArg(destptr) v2 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64) v2.AuxInt = 0 - v1.AddArg(v2) - v1.AddArg(mem) - v.AddArg(v1) + v1.AddArg3(destptr, v2, mem) + v.AddArg2(v0, v1) return true } // match: (Zero [16] destptr mem) @@ -4777,17 +4519,13 @@ func rewriteValueWasm_OpZero(v *Value) bool { mem := v_1 v.reset(OpWasmI64Store) v.AuxInt = 8 - v.AddArg(destptr) v0 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64) v0.AuxInt = 0 - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpWasmI64Store, types.TypeMem) - v1.AddArg(destptr) v2 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64) v2.AuxInt = 0 - v1.AddArg(v2) - v1.AddArg(mem) - v.AddArg(v1) + v1.AddArg3(destptr, v2, mem) + v.AddArg3(destptr, v0, v1) return true } // match: (Zero [24] destptr mem) @@ -4800,24 +4538,18 @@ func rewriteValueWasm_OpZero(v *Value) bool { mem := v_1 v.reset(OpWasmI64Store) v.AuxInt = 16 - v.AddArg(destptr) v0 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64) v0.AuxInt = 0 - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpWasmI64Store, types.TypeMem) v1.AuxInt = 8 - v1.AddArg(destptr) v2 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64) v2.AuxInt = 0 - v1.AddArg(v2) v3 := b.NewValue0(v.Pos, OpWasmI64Store, types.TypeMem) - v3.AddArg(destptr) v4 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64) v4.AuxInt = 0 - v3.AddArg(v4) - v3.AddArg(mem) - v1.AddArg(v3) - v.AddArg(v1) + v3.AddArg3(destptr, v4, mem) + v1.AddArg3(destptr, v2, v3) + v.AddArg3(destptr, v0, v1) return true } // match: (Zero [32] destptr mem) @@ -4830,31 +4562,23 @@ func rewriteValueWasm_OpZero(v *Value) bool { mem := v_1 v.reset(OpWasmI64Store) v.AuxInt = 24 - v.AddArg(destptr) v0 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64) v0.AuxInt = 0 - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpWasmI64Store, types.TypeMem) v1.AuxInt = 16 - v1.AddArg(destptr) v2 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64) v2.AuxInt = 0 - v1.AddArg(v2) v3 := b.NewValue0(v.Pos, OpWasmI64Store, types.TypeMem) v3.AuxInt = 8 - v3.AddArg(destptr) v4 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64) v4.AuxInt = 0 - v3.AddArg(v4) v5 := b.NewValue0(v.Pos, OpWasmI64Store, types.TypeMem) - v5.AddArg(destptr) v6 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64) v6.AuxInt = 0 - v5.AddArg(v6) - v5.AddArg(mem) - v3.AddArg(v5) - v1.AddArg(v3) - v.AddArg(v1) + v5.AddArg3(destptr, v6, mem) + v3.AddArg3(destptr, v4, v5) + v1.AddArg3(destptr, v2, v3) + v.AddArg3(destptr, v0, v1) return true } // match: (Zero [s] destptr mem) @@ -4869,8 +4593,7 @@ func rewriteValueWasm_OpZero(v *Value) bool { } v.reset(OpWasmLoweredZero) v.AuxInt = s / 8 - v.AddArg(destptr) - v.AddArg(mem) + v.AddArg2(destptr, mem) return true } return false @@ -4897,10 +4620,9 @@ func rewriteValueWasm_OpZeroExt16to32(v *Value) bool { for { x := v_0 v.reset(OpWasmI64And) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64) v0.AuxInt = 0xffff - v.AddArg(v0) + v.AddArg2(x, v0) return true } } @@ -4926,10 +4648,9 @@ func rewriteValueWasm_OpZeroExt16to64(v *Value) bool { for { x := v_0 v.reset(OpWasmI64And) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64) v0.AuxInt = 0xffff - v.AddArg(v0) + v.AddArg2(x, v0) return true } } @@ -4955,10 +4676,9 @@ func rewriteValueWasm_OpZeroExt32to64(v *Value) bool { for { x := v_0 v.reset(OpWasmI64And) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64) v0.AuxInt = 0xffffffff - v.AddArg(v0) + v.AddArg2(x, v0) return true } } @@ -4984,10 +4704,9 @@ func rewriteValueWasm_OpZeroExt8to16(v *Value) bool { for { x := v_0 v.reset(OpWasmI64And) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64) v0.AuxInt = 0xff - v.AddArg(v0) + v.AddArg2(x, v0) return true } } @@ -5013,10 +4732,9 @@ func rewriteValueWasm_OpZeroExt8to32(v *Value) bool { for { x := v_0 v.reset(OpWasmI64And) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64) v0.AuxInt = 0xff - v.AddArg(v0) + v.AddArg2(x, v0) return true } } @@ -5042,10 +4760,9 @@ func rewriteValueWasm_OpZeroExt8to64(v *Value) bool { for { x := v_0 v.reset(OpWasmI64And) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64) v0.AuxInt = 0xff - v.AddArg(v0) + v.AddArg2(x, v0) return true } } diff --git a/src/cmd/compile/internal/ssa/rewritedec.go b/src/cmd/compile/internal/ssa/rewritedec.go index 9393cade62..a031fca4ad 100644 --- a/src/cmd/compile/internal/ssa/rewritedec.go +++ b/src/cmd/compile/internal/ssa/rewritedec.go @@ -116,16 +116,13 @@ func rewriteValuedec_OpLoad(v *Value) bool { } v.reset(OpComplexMake) v0 := b.NewValue0(v.Pos, OpLoad, typ.Float32) - v0.AddArg(ptr) - v0.AddArg(mem) - v.AddArg(v0) + v0.AddArg2(ptr, mem) v1 := b.NewValue0(v.Pos, OpLoad, typ.Float32) v2 := b.NewValue0(v.Pos, OpOffPtr, typ.Float32Ptr) v2.AuxInt = 4 v2.AddArg(ptr) - v1.AddArg(v2) - v1.AddArg(mem) - v.AddArg(v1) + v1.AddArg2(v2, mem) + v.AddArg2(v0, v1) return true } // match: (Load ptr mem) @@ -140,16 +137,13 @@ func rewriteValuedec_OpLoad(v *Value) bool { } v.reset(OpComplexMake) v0 := b.NewValue0(v.Pos, OpLoad, typ.Float64) - v0.AddArg(ptr) - v0.AddArg(mem) - v.AddArg(v0) + v0.AddArg2(ptr, mem) v1 := b.NewValue0(v.Pos, OpLoad, typ.Float64) v2 := b.NewValue0(v.Pos, OpOffPtr, typ.Float64Ptr) v2.AuxInt = 8 v2.AddArg(ptr) - v1.AddArg(v2) - v1.AddArg(mem) - v.AddArg(v1) + v1.AddArg2(v2, mem) + v.AddArg2(v0, v1) return true } // match: (Load ptr mem) @@ -164,16 +158,13 @@ func rewriteValuedec_OpLoad(v *Value) bool { } v.reset(OpStringMake) v0 := b.NewValue0(v.Pos, OpLoad, typ.BytePtr) - v0.AddArg(ptr) - v0.AddArg(mem) - v.AddArg(v0) + v0.AddArg2(ptr, mem) v1 := b.NewValue0(v.Pos, OpLoad, typ.Int) v2 := b.NewValue0(v.Pos, OpOffPtr, typ.IntPtr) v2.AuxInt = config.PtrSize v2.AddArg(ptr) - v1.AddArg(v2) - v1.AddArg(mem) - v.AddArg(v1) + v1.AddArg2(v2, mem) + v.AddArg2(v0, v1) return true } // match: (Load ptr mem) @@ -188,23 +179,18 @@ func rewriteValuedec_OpLoad(v *Value) bool { } v.reset(OpSliceMake) v0 := b.NewValue0(v.Pos, OpLoad, t.Elem().PtrTo()) - v0.AddArg(ptr) - v0.AddArg(mem) - v.AddArg(v0) + v0.AddArg2(ptr, mem) v1 := b.NewValue0(v.Pos, OpLoad, typ.Int) v2 := b.NewValue0(v.Pos, OpOffPtr, typ.IntPtr) v2.AuxInt = config.PtrSize v2.AddArg(ptr) - v1.AddArg(v2) - v1.AddArg(mem) - v.AddArg(v1) + v1.AddArg2(v2, mem) v3 := b.NewValue0(v.Pos, OpLoad, typ.Int) v4 := b.NewValue0(v.Pos, OpOffPtr, typ.IntPtr) v4.AuxInt = 2 * config.PtrSize v4.AddArg(ptr) - v3.AddArg(v4) - v3.AddArg(mem) - v.AddArg(v3) + v3.AddArg2(v4, mem) + v.AddArg3(v0, v1, v3) return true } // match: (Load ptr mem) @@ -219,16 +205,13 @@ func rewriteValuedec_OpLoad(v *Value) bool { } v.reset(OpIMake) v0 := b.NewValue0(v.Pos, OpLoad, typ.Uintptr) - v0.AddArg(ptr) - v0.AddArg(mem) - v.AddArg(v0) + v0.AddArg2(ptr, mem) v1 := b.NewValue0(v.Pos, OpLoad, typ.BytePtr) v2 := b.NewValue0(v.Pos, OpOffPtr, typ.BytePtrPtr) v2.AuxInt = config.PtrSize v2.AddArg(ptr) - v1.AddArg(v2) - v1.AddArg(mem) - v.AddArg(v1) + v1.AddArg2(v2, mem) + v.AddArg2(v0, v1) return true } return false @@ -310,14 +293,10 @@ func rewriteValuedec_OpStore(v *Value) bool { v0 := b.NewValue0(v.Pos, OpOffPtr, typ.Float32Ptr) v0.AuxInt = 4 v0.AddArg(dst) - v.AddArg(v0) - v.AddArg(imag) v1 := b.NewValue0(v.Pos, OpStore, types.TypeMem) v1.Aux = typ.Float32 - v1.AddArg(dst) - v1.AddArg(real) - v1.AddArg(mem) - v.AddArg(v1) + v1.AddArg3(dst, real, mem) + v.AddArg3(v0, imag, v1) return true } // match: (Store {t} dst (ComplexMake real imag) mem) @@ -340,14 +319,10 @@ func rewriteValuedec_OpStore(v *Value) bool { v0 := b.NewValue0(v.Pos, OpOffPtr, typ.Float64Ptr) v0.AuxInt = 8 v0.AddArg(dst) - v.AddArg(v0) - v.AddArg(imag) v1 := b.NewValue0(v.Pos, OpStore, types.TypeMem) v1.Aux = typ.Float64 - v1.AddArg(dst) - v1.AddArg(real) - v1.AddArg(mem) - v.AddArg(v1) + v1.AddArg3(dst, real, mem) + v.AddArg3(v0, imag, v1) return true } // match: (Store dst (StringMake ptr len) mem) @@ -365,14 +340,10 @@ func rewriteValuedec_OpStore(v *Value) bool { v0 := b.NewValue0(v.Pos, OpOffPtr, typ.IntPtr) v0.AuxInt = config.PtrSize v0.AddArg(dst) - v.AddArg(v0) - v.AddArg(len) v1 := b.NewValue0(v.Pos, OpStore, types.TypeMem) v1.Aux = typ.BytePtr - v1.AddArg(dst) - v1.AddArg(ptr) - v1.AddArg(mem) - v.AddArg(v1) + v1.AddArg3(dst, ptr, mem) + v.AddArg3(v0, len, v1) return true } // match: (Store dst (SliceMake ptr len cap) mem) @@ -391,22 +362,16 @@ func rewriteValuedec_OpStore(v *Value) bool { v0 := b.NewValue0(v.Pos, OpOffPtr, typ.IntPtr) v0.AuxInt = 2 * config.PtrSize v0.AddArg(dst) - v.AddArg(v0) - v.AddArg(cap) v1 := b.NewValue0(v.Pos, OpStore, types.TypeMem) v1.Aux = typ.Int v2 := b.NewValue0(v.Pos, OpOffPtr, typ.IntPtr) v2.AuxInt = config.PtrSize v2.AddArg(dst) - v1.AddArg(v2) - v1.AddArg(len) v3 := b.NewValue0(v.Pos, OpStore, types.TypeMem) v3.Aux = typ.BytePtr - v3.AddArg(dst) - v3.AddArg(ptr) - v3.AddArg(mem) - v1.AddArg(v3) - v.AddArg(v1) + v3.AddArg3(dst, ptr, mem) + v1.AddArg3(v2, len, v3) + v.AddArg3(v0, cap, v1) return true } // match: (Store dst (IMake itab data) mem) @@ -424,14 +389,10 @@ func rewriteValuedec_OpStore(v *Value) bool { v0 := b.NewValue0(v.Pos, OpOffPtr, typ.BytePtrPtr) v0.AuxInt = config.PtrSize v0.AddArg(dst) - v.AddArg(v0) - v.AddArg(data) v1 := b.NewValue0(v.Pos, OpStore, types.TypeMem) v1.Aux = typ.Uintptr - v1.AddArg(dst) - v1.AddArg(itab) - v1.AddArg(mem) - v.AddArg(v1) + v1.AddArg3(dst, itab, mem) + v.AddArg3(v0, data, v1) return true } return false diff --git a/src/cmd/compile/internal/ssa/rewritedec64.go b/src/cmd/compile/internal/ssa/rewritedec64.go index 96a23afd8d..3beaf8e99f 100644 --- a/src/cmd/compile/internal/ssa/rewritedec64.go +++ b/src/cmd/compile/internal/ssa/rewritedec64.go @@ -133,31 +133,26 @@ func rewriteValuedec64_OpAdd64(v *Value) bool { v0 := b.NewValue0(v.Pos, OpAdd32withcarry, typ.Int32) v1 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32) v1.AddArg(x) - v0.AddArg(v1) v2 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32) v2.AddArg(y) - v0.AddArg(v2) v3 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) v4 := b.NewValue0(v.Pos, OpAdd32carry, types.NewTuple(typ.UInt32, types.TypeFlags)) v5 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32) v5.AddArg(x) - v4.AddArg(v5) v6 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32) v6.AddArg(y) - v4.AddArg(v6) + v4.AddArg2(v5, v6) v3.AddArg(v4) - v0.AddArg(v3) - v.AddArg(v0) + v0.AddArg3(v1, v2, v3) v7 := b.NewValue0(v.Pos, OpSelect0, typ.UInt32) v8 := b.NewValue0(v.Pos, OpAdd32carry, types.NewTuple(typ.UInt32, types.TypeFlags)) v9 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32) v9.AddArg(x) - v8.AddArg(v9) v10 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32) v10.AddArg(y) - v8.AddArg(v10) + v8.AddArg2(v9, v10) v7.AddArg(v8) - v.AddArg(v7) + v.AddArg2(v0, v7) return true } } @@ -175,19 +170,16 @@ func rewriteValuedec64_OpAnd64(v *Value) bool { v0 := b.NewValue0(v.Pos, OpAnd32, typ.UInt32) v1 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32) v1.AddArg(x) - v0.AddArg(v1) v2 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32) v2.AddArg(y) - v0.AddArg(v2) - v.AddArg(v0) + v0.AddArg2(v1, v2) v3 := b.NewValue0(v.Pos, OpAnd32, typ.UInt32) v4 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32) v4.AddArg(x) - v3.AddArg(v4) v5 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32) v5.AddArg(y) - v3.AddArg(v5) - v.AddArg(v3) + v3.AddArg2(v4, v5) + v.AddArg2(v0, v3) return true } } @@ -208,11 +200,10 @@ func rewriteValuedec64_OpArg(v *Value) bool { v0 := b.NewValue0(v.Pos, OpArg, typ.Int32) v0.AuxInt = off + 4 v0.Aux = n - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpArg, typ.UInt32) v1.AuxInt = off v1.Aux = n - v.AddArg(v1) + v.AddArg2(v0, v1) return true } // match: (Arg {n} [off]) @@ -228,11 +219,10 @@ func rewriteValuedec64_OpArg(v *Value) bool { v0 := b.NewValue0(v.Pos, OpArg, typ.UInt32) v0.AuxInt = off + 4 v0.Aux = n - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpArg, typ.UInt32) v1.AuxInt = off v1.Aux = n - v.AddArg(v1) + v.AddArg2(v0, v1) return true } // match: (Arg {n} [off]) @@ -248,11 +238,10 @@ func rewriteValuedec64_OpArg(v *Value) bool { v0 := b.NewValue0(v.Pos, OpArg, typ.Int32) v0.AuxInt = off v0.Aux = n - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpArg, typ.UInt32) v1.AuxInt = off + 4 v1.Aux = n - v.AddArg(v1) + v.AddArg2(v0, v1) return true } // match: (Arg {n} [off]) @@ -268,11 +257,10 @@ func rewriteValuedec64_OpArg(v *Value) bool { v0 := b.NewValue0(v.Pos, OpArg, typ.UInt32) v0.AuxInt = off v0.Aux = n - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpArg, typ.UInt32) v1.AuxInt = off + 4 v1.Aux = n - v.AddArg(v1) + v.AddArg2(v0, v1) return true } return false @@ -291,19 +279,17 @@ func rewriteValuedec64_OpBitLen64(v *Value) bool { v1 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32) v1.AddArg(x) v0.AddArg(v1) - v.AddArg(v0) v2 := b.NewValue0(v.Pos, OpBitLen32, typ.Int) v3 := b.NewValue0(v.Pos, OpOr32, typ.UInt32) v4 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32) v4.AddArg(x) - v3.AddArg(v4) v5 := b.NewValue0(v.Pos, OpZeromask, typ.UInt32) v6 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32) v6.AddArg(x) v5.AddArg(v6) - v3.AddArg(v5) + v3.AddArg2(v4, v5) v2.AddArg(v3) - v.AddArg(v2) + v.AddArg2(v0, v2) return true } } @@ -320,12 +306,11 @@ func rewriteValuedec64_OpBswap64(v *Value) bool { v1 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32) v1.AddArg(x) v0.AddArg(v1) - v.AddArg(v0) v2 := b.NewValue0(v.Pos, OpBswap32, typ.UInt32) v3 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32) v3.AddArg(x) v2.AddArg(v3) - v.AddArg(v2) + v.AddArg2(v0, v2) return true } } @@ -342,12 +327,11 @@ func rewriteValuedec64_OpCom64(v *Value) bool { v1 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32) v1.AddArg(x) v0.AddArg(v1) - v.AddArg(v0) v2 := b.NewValue0(v.Pos, OpCom32, typ.UInt32) v3 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32) v3.AddArg(x) v2.AddArg(v3) - v.AddArg(v2) + v.AddArg2(v0, v2) return true } } @@ -366,10 +350,9 @@ func rewriteValuedec64_OpConst64(v *Value) bool { v.reset(OpInt64Make) v0 := b.NewValue0(v.Pos, OpConst32, typ.Int32) v0.AuxInt = c >> 32 - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) v1.AuxInt = int64(int32(c)) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } // match: (Const64 [c]) @@ -384,10 +367,9 @@ func rewriteValuedec64_OpConst64(v *Value) bool { v.reset(OpInt64Make) v0 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) v0.AuxInt = c >> 32 - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) v1.AuxInt = int64(int32(c)) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } return false @@ -406,7 +388,6 @@ func rewriteValuedec64_OpCtz64(v *Value) bool { v1 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32) v1.AddArg(x) v0.AddArg(v1) - v.AddArg(v0) v2 := b.NewValue0(v.Pos, OpAnd32, typ.UInt32) v3 := b.NewValue0(v.Pos, OpCom32, typ.UInt32) v4 := b.NewValue0(v.Pos, OpZeromask, typ.UInt32) @@ -414,13 +395,12 @@ func rewriteValuedec64_OpCtz64(v *Value) bool { v5.AddArg(x) v4.AddArg(v5) v3.AddArg(v4) - v2.AddArg(v3) v6 := b.NewValue0(v.Pos, OpCtz32, typ.UInt32) v7 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32) v7.AddArg(x) v6.AddArg(v7) - v2.AddArg(v6) - v.AddArg(v2) + v2.AddArg2(v3, v6) + v.AddArg2(v0, v2) return true } } @@ -438,19 +418,16 @@ func rewriteValuedec64_OpEq64(v *Value) bool { v0 := b.NewValue0(v.Pos, OpEq32, typ.Bool) v1 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32) v1.AddArg(x) - v0.AddArg(v1) v2 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32) v2.AddArg(y) - v0.AddArg(v2) - v.AddArg(v0) + v0.AddArg2(v1, v2) v3 := b.NewValue0(v.Pos, OpEq32, typ.Bool) v4 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32) v4.AddArg(x) - v3.AddArg(v4) v5 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32) v5.AddArg(y) - v3.AddArg(v5) - v.AddArg(v3) + v3.AddArg2(v4, v5) + v.AddArg2(v0, v3) return true } } @@ -501,29 +478,24 @@ func rewriteValuedec64_OpLeq64(v *Value) bool { v0 := b.NewValue0(v.Pos, OpLess32, typ.Bool) v1 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32) v1.AddArg(x) - v0.AddArg(v1) v2 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32) v2.AddArg(y) - v0.AddArg(v2) - v.AddArg(v0) + v0.AddArg2(v1, v2) v3 := b.NewValue0(v.Pos, OpAndB, typ.Bool) v4 := b.NewValue0(v.Pos, OpEq32, typ.Bool) v5 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32) v5.AddArg(x) - v4.AddArg(v5) v6 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32) v6.AddArg(y) - v4.AddArg(v6) - v3.AddArg(v4) + v4.AddArg2(v5, v6) v7 := b.NewValue0(v.Pos, OpLeq32U, typ.Bool) v8 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32) v8.AddArg(x) - v7.AddArg(v8) v9 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32) v9.AddArg(y) - v7.AddArg(v9) - v3.AddArg(v7) - v.AddArg(v3) + v7.AddArg2(v8, v9) + v3.AddArg2(v4, v7) + v.AddArg2(v0, v3) return true } } @@ -541,29 +513,24 @@ func rewriteValuedec64_OpLeq64U(v *Value) bool { v0 := b.NewValue0(v.Pos, OpLess32U, typ.Bool) v1 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32) v1.AddArg(x) - v0.AddArg(v1) v2 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32) v2.AddArg(y) - v0.AddArg(v2) - v.AddArg(v0) + v0.AddArg2(v1, v2) v3 := b.NewValue0(v.Pos, OpAndB, typ.Bool) v4 := b.NewValue0(v.Pos, OpEq32, typ.Bool) v5 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32) v5.AddArg(x) - v4.AddArg(v5) v6 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32) v6.AddArg(y) - v4.AddArg(v6) - v3.AddArg(v4) + v4.AddArg2(v5, v6) v7 := b.NewValue0(v.Pos, OpLeq32U, typ.Bool) v8 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32) v8.AddArg(x) - v7.AddArg(v8) v9 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32) v9.AddArg(y) - v7.AddArg(v9) - v3.AddArg(v7) - v.AddArg(v3) + v7.AddArg2(v8, v9) + v3.AddArg2(v4, v7) + v.AddArg2(v0, v3) return true } } @@ -581,29 +548,24 @@ func rewriteValuedec64_OpLess64(v *Value) bool { v0 := b.NewValue0(v.Pos, OpLess32, typ.Bool) v1 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32) v1.AddArg(x) - v0.AddArg(v1) v2 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32) v2.AddArg(y) - v0.AddArg(v2) - v.AddArg(v0) + v0.AddArg2(v1, v2) v3 := b.NewValue0(v.Pos, OpAndB, typ.Bool) v4 := b.NewValue0(v.Pos, OpEq32, typ.Bool) v5 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32) v5.AddArg(x) - v4.AddArg(v5) v6 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32) v6.AddArg(y) - v4.AddArg(v6) - v3.AddArg(v4) + v4.AddArg2(v5, v6) v7 := b.NewValue0(v.Pos, OpLess32U, typ.Bool) v8 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32) v8.AddArg(x) - v7.AddArg(v8) v9 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32) v9.AddArg(y) - v7.AddArg(v9) - v3.AddArg(v7) - v.AddArg(v3) + v7.AddArg2(v8, v9) + v3.AddArg2(v4, v7) + v.AddArg2(v0, v3) return true } } @@ -621,29 +583,24 @@ func rewriteValuedec64_OpLess64U(v *Value) bool { v0 := b.NewValue0(v.Pos, OpLess32U, typ.Bool) v1 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32) v1.AddArg(x) - v0.AddArg(v1) v2 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32) v2.AddArg(y) - v0.AddArg(v2) - v.AddArg(v0) + v0.AddArg2(v1, v2) v3 := b.NewValue0(v.Pos, OpAndB, typ.Bool) v4 := b.NewValue0(v.Pos, OpEq32, typ.Bool) v5 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32) v5.AddArg(x) - v4.AddArg(v5) v6 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32) v6.AddArg(y) - v4.AddArg(v6) - v3.AddArg(v4) + v4.AddArg2(v5, v6) v7 := b.NewValue0(v.Pos, OpLess32U, typ.Bool) v8 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32) v8.AddArg(x) - v7.AddArg(v8) v9 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32) v9.AddArg(y) - v7.AddArg(v9) - v3.AddArg(v7) - v.AddArg(v3) + v7.AddArg2(v8, v9) + v3.AddArg2(v4, v7) + v.AddArg2(v0, v3) return true } } @@ -668,13 +625,10 @@ func rewriteValuedec64_OpLoad(v *Value) bool { v1 := b.NewValue0(v.Pos, OpOffPtr, typ.Int32Ptr) v1.AuxInt = 4 v1.AddArg(ptr) - v0.AddArg(v1) - v0.AddArg(mem) - v.AddArg(v0) + v0.AddArg2(v1, mem) v2 := b.NewValue0(v.Pos, OpLoad, typ.UInt32) - v2.AddArg(ptr) - v2.AddArg(mem) - v.AddArg(v2) + v2.AddArg2(ptr, mem) + v.AddArg2(v0, v2) return true } // match: (Load ptr mem) @@ -692,13 +646,10 @@ func rewriteValuedec64_OpLoad(v *Value) bool { v1 := b.NewValue0(v.Pos, OpOffPtr, typ.UInt32Ptr) v1.AuxInt = 4 v1.AddArg(ptr) - v0.AddArg(v1) - v0.AddArg(mem) - v.AddArg(v0) + v0.AddArg2(v1, mem) v2 := b.NewValue0(v.Pos, OpLoad, typ.UInt32) - v2.AddArg(ptr) - v2.AddArg(mem) - v.AddArg(v2) + v2.AddArg2(ptr, mem) + v.AddArg2(v0, v2) return true } // match: (Load ptr mem) @@ -713,16 +664,13 @@ func rewriteValuedec64_OpLoad(v *Value) bool { } v.reset(OpInt64Make) v0 := b.NewValue0(v.Pos, OpLoad, typ.Int32) - v0.AddArg(ptr) - v0.AddArg(mem) - v.AddArg(v0) + v0.AddArg2(ptr, mem) v1 := b.NewValue0(v.Pos, OpLoad, typ.UInt32) v2 := b.NewValue0(v.Pos, OpOffPtr, typ.UInt32Ptr) v2.AuxInt = 4 v2.AddArg(ptr) - v1.AddArg(v2) - v1.AddArg(mem) - v.AddArg(v1) + v1.AddArg2(v2, mem) + v.AddArg2(v0, v1) return true } // match: (Load ptr mem) @@ -737,16 +685,13 @@ func rewriteValuedec64_OpLoad(v *Value) bool { } v.reset(OpInt64Make) v0 := b.NewValue0(v.Pos, OpLoad, typ.UInt32) - v0.AddArg(ptr) - v0.AddArg(mem) - v.AddArg(v0) + v0.AddArg2(ptr, mem) v1 := b.NewValue0(v.Pos, OpLoad, typ.UInt32) v2 := b.NewValue0(v.Pos, OpOffPtr, typ.UInt32Ptr) v2.AuxInt = 4 v2.AddArg(ptr) - v1.AddArg(v2) - v1.AddArg(mem) - v.AddArg(v1) + v1.AddArg2(v2, mem) + v.AddArg2(v0, v1) return true } return false @@ -791,8 +736,7 @@ func rewriteValuedec64_OpLsh16x64(v *Value) bool { } v.reset(OpLsh16x32) v.AuxInt = c - v.AddArg(x) - v.AddArg(lo) + v.AddArg2(x, lo) return true } // match: (Lsh16x64 x (Int64Make hi lo)) @@ -809,13 +753,11 @@ func rewriteValuedec64_OpLsh16x64(v *Value) bool { break } v.reset(OpLsh16x32) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpOr32, typ.UInt32) v1 := b.NewValue0(v.Pos, OpZeromask, typ.UInt32) v1.AddArg(hi) - v0.AddArg(v1) - v0.AddArg(lo) - v.AddArg(v0) + v0.AddArg2(v1, lo) + v.AddArg2(x, v0) return true } return false @@ -860,8 +802,7 @@ func rewriteValuedec64_OpLsh32x64(v *Value) bool { } v.reset(OpLsh32x32) v.AuxInt = c - v.AddArg(x) - v.AddArg(lo) + v.AddArg2(x, lo) return true } // match: (Lsh32x64 x (Int64Make hi lo)) @@ -878,13 +819,11 @@ func rewriteValuedec64_OpLsh32x64(v *Value) bool { break } v.reset(OpLsh32x32) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpOr32, typ.UInt32) v1 := b.NewValue0(v.Pos, OpZeromask, typ.UInt32) v1.AddArg(hi) - v0.AddArg(v1) - v0.AddArg(lo) - v.AddArg(v0) + v0.AddArg2(v1, lo) + v.AddArg2(x, v0) return true } return false @@ -907,33 +846,24 @@ func rewriteValuedec64_OpLsh64x16(v *Value) bool { v0 := b.NewValue0(v.Pos, OpOr32, typ.UInt32) v1 := b.NewValue0(v.Pos, OpOr32, typ.UInt32) v2 := b.NewValue0(v.Pos, OpLsh32x16, typ.UInt32) - v2.AddArg(hi) - v2.AddArg(s) - v1.AddArg(v2) + v2.AddArg2(hi, s) v3 := b.NewValue0(v.Pos, OpRsh32Ux16, typ.UInt32) - v3.AddArg(lo) v4 := b.NewValue0(v.Pos, OpSub16, typ.UInt16) v5 := b.NewValue0(v.Pos, OpConst16, typ.UInt16) v5.AuxInt = 32 - v4.AddArg(v5) - v4.AddArg(s) - v3.AddArg(v4) - v1.AddArg(v3) - v0.AddArg(v1) + v4.AddArg2(v5, s) + v3.AddArg2(lo, v4) + v1.AddArg2(v2, v3) v6 := b.NewValue0(v.Pos, OpLsh32x16, typ.UInt32) - v6.AddArg(lo) v7 := b.NewValue0(v.Pos, OpSub16, typ.UInt16) - v7.AddArg(s) v8 := b.NewValue0(v.Pos, OpConst16, typ.UInt16) v8.AuxInt = 32 - v7.AddArg(v8) - v6.AddArg(v7) - v0.AddArg(v6) - v.AddArg(v0) + v7.AddArg2(s, v8) + v6.AddArg2(lo, v7) + v0.AddArg2(v1, v6) v9 := b.NewValue0(v.Pos, OpLsh32x16, typ.UInt32) - v9.AddArg(lo) - v9.AddArg(s) - v.AddArg(v9) + v9.AddArg2(lo, s) + v.AddArg2(v0, v9) return true } return false @@ -956,33 +886,24 @@ func rewriteValuedec64_OpLsh64x32(v *Value) bool { v0 := b.NewValue0(v.Pos, OpOr32, typ.UInt32) v1 := b.NewValue0(v.Pos, OpOr32, typ.UInt32) v2 := b.NewValue0(v.Pos, OpLsh32x32, typ.UInt32) - v2.AddArg(hi) - v2.AddArg(s) - v1.AddArg(v2) + v2.AddArg2(hi, s) v3 := b.NewValue0(v.Pos, OpRsh32Ux32, typ.UInt32) - v3.AddArg(lo) v4 := b.NewValue0(v.Pos, OpSub32, typ.UInt32) v5 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) v5.AuxInt = 32 - v4.AddArg(v5) - v4.AddArg(s) - v3.AddArg(v4) - v1.AddArg(v3) - v0.AddArg(v1) + v4.AddArg2(v5, s) + v3.AddArg2(lo, v4) + v1.AddArg2(v2, v3) v6 := b.NewValue0(v.Pos, OpLsh32x32, typ.UInt32) - v6.AddArg(lo) v7 := b.NewValue0(v.Pos, OpSub32, typ.UInt32) - v7.AddArg(s) v8 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) v8.AuxInt = 32 - v7.AddArg(v8) - v6.AddArg(v7) - v0.AddArg(v6) - v.AddArg(v0) + v7.AddArg2(s, v8) + v6.AddArg2(lo, v7) + v0.AddArg2(v1, v6) v9 := b.NewValue0(v.Pos, OpLsh32x32, typ.UInt32) - v9.AddArg(lo) - v9.AddArg(s) - v.AddArg(v9) + v9.AddArg2(lo, s) + v.AddArg2(v0, v9) return true } return false @@ -1027,8 +948,7 @@ func rewriteValuedec64_OpLsh64x64(v *Value) bool { } v.reset(OpLsh64x32) v.AuxInt = c - v.AddArg(x) - v.AddArg(lo) + v.AddArg2(x, lo) return true } // match: (Lsh64x64 x (Int64Make hi lo)) @@ -1045,13 +965,11 @@ func rewriteValuedec64_OpLsh64x64(v *Value) bool { break } v.reset(OpLsh64x32) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpOr32, typ.UInt32) v1 := b.NewValue0(v.Pos, OpZeromask, typ.UInt32) v1.AddArg(hi) - v0.AddArg(v1) - v0.AddArg(lo) - v.AddArg(v0) + v0.AddArg2(v1, lo) + v.AddArg2(x, v0) return true } return false @@ -1074,33 +992,24 @@ func rewriteValuedec64_OpLsh64x8(v *Value) bool { v0 := b.NewValue0(v.Pos, OpOr32, typ.UInt32) v1 := b.NewValue0(v.Pos, OpOr32, typ.UInt32) v2 := b.NewValue0(v.Pos, OpLsh32x8, typ.UInt32) - v2.AddArg(hi) - v2.AddArg(s) - v1.AddArg(v2) + v2.AddArg2(hi, s) v3 := b.NewValue0(v.Pos, OpRsh32Ux8, typ.UInt32) - v3.AddArg(lo) v4 := b.NewValue0(v.Pos, OpSub8, typ.UInt8) v5 := b.NewValue0(v.Pos, OpConst8, typ.UInt8) v5.AuxInt = 32 - v4.AddArg(v5) - v4.AddArg(s) - v3.AddArg(v4) - v1.AddArg(v3) - v0.AddArg(v1) + v4.AddArg2(v5, s) + v3.AddArg2(lo, v4) + v1.AddArg2(v2, v3) v6 := b.NewValue0(v.Pos, OpLsh32x8, typ.UInt32) - v6.AddArg(lo) v7 := b.NewValue0(v.Pos, OpSub8, typ.UInt8) - v7.AddArg(s) v8 := b.NewValue0(v.Pos, OpConst8, typ.UInt8) v8.AuxInt = 32 - v7.AddArg(v8) - v6.AddArg(v7) - v0.AddArg(v6) - v.AddArg(v0) + v7.AddArg2(s, v8) + v6.AddArg2(lo, v7) + v0.AddArg2(v1, v6) v9 := b.NewValue0(v.Pos, OpLsh32x8, typ.UInt32) - v9.AddArg(lo) - v9.AddArg(s) - v.AddArg(v9) + v9.AddArg2(lo, s) + v.AddArg2(v0, v9) return true } return false @@ -1145,8 +1054,7 @@ func rewriteValuedec64_OpLsh8x64(v *Value) bool { } v.reset(OpLsh8x32) v.AuxInt = c - v.AddArg(x) - v.AddArg(lo) + v.AddArg2(x, lo) return true } // match: (Lsh8x64 x (Int64Make hi lo)) @@ -1163,13 +1071,11 @@ func rewriteValuedec64_OpLsh8x64(v *Value) bool { break } v.reset(OpLsh8x32) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpOr32, typ.UInt32) v1 := b.NewValue0(v.Pos, OpZeromask, typ.UInt32) v1.AddArg(hi) - v0.AddArg(v1) - v0.AddArg(lo) - v.AddArg(v0) + v0.AddArg2(v1, lo) + v.AddArg2(x, v0) return true } return false @@ -1189,42 +1095,35 @@ func rewriteValuedec64_OpMul64(v *Value) bool { v1 := b.NewValue0(v.Pos, OpMul32, typ.UInt32) v2 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32) v2.AddArg(x) - v1.AddArg(v2) v3 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32) v3.AddArg(y) - v1.AddArg(v3) - v0.AddArg(v1) + v1.AddArg2(v2, v3) v4 := b.NewValue0(v.Pos, OpAdd32, typ.UInt32) v5 := b.NewValue0(v.Pos, OpMul32, typ.UInt32) v6 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32) v6.AddArg(x) - v5.AddArg(v6) v7 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32) v7.AddArg(y) - v5.AddArg(v7) - v4.AddArg(v5) + v5.AddArg2(v6, v7) v8 := b.NewValue0(v.Pos, OpSelect0, typ.UInt32) v9 := b.NewValue0(v.Pos, OpMul32uhilo, types.NewTuple(typ.UInt32, typ.UInt32)) v10 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32) v10.AddArg(x) - v9.AddArg(v10) v11 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32) v11.AddArg(y) - v9.AddArg(v11) + v9.AddArg2(v10, v11) v8.AddArg(v9) - v4.AddArg(v8) - v0.AddArg(v4) - v.AddArg(v0) + v4.AddArg2(v5, v8) + v0.AddArg2(v1, v4) v12 := b.NewValue0(v.Pos, OpSelect1, typ.UInt32) v13 := b.NewValue0(v.Pos, OpMul32uhilo, types.NewTuple(typ.UInt32, typ.UInt32)) v14 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32) v14.AddArg(x) - v13.AddArg(v14) v15 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32) v15.AddArg(y) - v13.AddArg(v15) + v13.AddArg2(v14, v15) v12.AddArg(v13) - v.AddArg(v12) + v.AddArg2(v0, v12) return true } } @@ -1239,8 +1138,7 @@ func rewriteValuedec64_OpNeg64(v *Value) bool { v.reset(OpSub64) v0 := b.NewValue0(v.Pos, OpConst64, t) v0.AuxInt = 0 - v.AddArg(v0) - v.AddArg(x) + v.AddArg2(v0, x) return true } } @@ -1258,19 +1156,16 @@ func rewriteValuedec64_OpNeq64(v *Value) bool { v0 := b.NewValue0(v.Pos, OpNeq32, typ.Bool) v1 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32) v1.AddArg(x) - v0.AddArg(v1) v2 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32) v2.AddArg(y) - v0.AddArg(v2) - v.AddArg(v0) + v0.AddArg2(v1, v2) v3 := b.NewValue0(v.Pos, OpNeq32, typ.Bool) v4 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32) v4.AddArg(x) - v3.AddArg(v4) v5 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32) v5.AddArg(y) - v3.AddArg(v5) - v.AddArg(v3) + v3.AddArg2(v4, v5) + v.AddArg2(v0, v3) return true } } @@ -1288,19 +1183,16 @@ func rewriteValuedec64_OpOr64(v *Value) bool { v0 := b.NewValue0(v.Pos, OpOr32, typ.UInt32) v1 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32) v1.AddArg(x) - v0.AddArg(v1) v2 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32) v2.AddArg(y) - v0.AddArg(v2) - v.AddArg(v0) + v0.AddArg2(v1, v2) v3 := b.NewValue0(v.Pos, OpOr32, typ.UInt32) v4 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32) v4.AddArg(x) - v3.AddArg(v4) v5 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32) v5.AddArg(y) - v3.AddArg(v5) - v.AddArg(v3) + v3.AddArg2(v4, v5) + v.AddArg2(v0, v3) return true } } @@ -1344,8 +1236,7 @@ func rewriteValuedec64_OpRsh16Ux64(v *Value) bool { } v.reset(OpRsh16Ux32) v.AuxInt = c - v.AddArg(x) - v.AddArg(lo) + v.AddArg2(x, lo) return true } // match: (Rsh16Ux64 x (Int64Make hi lo)) @@ -1362,13 +1253,11 @@ func rewriteValuedec64_OpRsh16Ux64(v *Value) bool { break } v.reset(OpRsh16Ux32) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpOr32, typ.UInt32) v1 := b.NewValue0(v.Pos, OpZeromask, typ.UInt32) v1.AddArg(hi) - v0.AddArg(v1) - v0.AddArg(lo) - v.AddArg(v0) + v0.AddArg2(v1, lo) + v.AddArg2(x, v0) return true } return false @@ -1416,8 +1305,7 @@ func rewriteValuedec64_OpRsh16x64(v *Value) bool { } v.reset(OpRsh16x32) v.AuxInt = c - v.AddArg(x) - v.AddArg(lo) + v.AddArg2(x, lo) return true } // match: (Rsh16x64 x (Int64Make hi lo)) @@ -1434,13 +1322,11 @@ func rewriteValuedec64_OpRsh16x64(v *Value) bool { break } v.reset(OpRsh16x32) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpOr32, typ.UInt32) v1 := b.NewValue0(v.Pos, OpZeromask, typ.UInt32) v1.AddArg(hi) - v0.AddArg(v1) - v0.AddArg(lo) - v.AddArg(v0) + v0.AddArg2(v1, lo) + v.AddArg2(x, v0) return true } return false @@ -1485,8 +1371,7 @@ func rewriteValuedec64_OpRsh32Ux64(v *Value) bool { } v.reset(OpRsh32Ux32) v.AuxInt = c - v.AddArg(x) - v.AddArg(lo) + v.AddArg2(x, lo) return true } // match: (Rsh32Ux64 x (Int64Make hi lo)) @@ -1503,13 +1388,11 @@ func rewriteValuedec64_OpRsh32Ux64(v *Value) bool { break } v.reset(OpRsh32Ux32) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpOr32, typ.UInt32) v1 := b.NewValue0(v.Pos, OpZeromask, typ.UInt32) v1.AddArg(hi) - v0.AddArg(v1) - v0.AddArg(lo) - v.AddArg(v0) + v0.AddArg2(v1, lo) + v.AddArg2(x, v0) return true } return false @@ -1555,8 +1438,7 @@ func rewriteValuedec64_OpRsh32x64(v *Value) bool { } v.reset(OpRsh32x32) v.AuxInt = c - v.AddArg(x) - v.AddArg(lo) + v.AddArg2(x, lo) return true } // match: (Rsh32x64 x (Int64Make hi lo)) @@ -1573,13 +1455,11 @@ func rewriteValuedec64_OpRsh32x64(v *Value) bool { break } v.reset(OpRsh32x32) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpOr32, typ.UInt32) v1 := b.NewValue0(v.Pos, OpZeromask, typ.UInt32) v1.AddArg(hi) - v0.AddArg(v1) - v0.AddArg(lo) - v.AddArg(v0) + v0.AddArg2(v1, lo) + v.AddArg2(x, v0) return true } return false @@ -1600,35 +1480,26 @@ func rewriteValuedec64_OpRsh64Ux16(v *Value) bool { s := v_1 v.reset(OpInt64Make) v0 := b.NewValue0(v.Pos, OpRsh32Ux16, typ.UInt32) - v0.AddArg(hi) - v0.AddArg(s) - v.AddArg(v0) + v0.AddArg2(hi, s) v1 := b.NewValue0(v.Pos, OpOr32, typ.UInt32) v2 := b.NewValue0(v.Pos, OpOr32, typ.UInt32) v3 := b.NewValue0(v.Pos, OpRsh32Ux16, typ.UInt32) - v3.AddArg(lo) - v3.AddArg(s) - v2.AddArg(v3) + v3.AddArg2(lo, s) v4 := b.NewValue0(v.Pos, OpLsh32x16, typ.UInt32) - v4.AddArg(hi) v5 := b.NewValue0(v.Pos, OpSub16, typ.UInt16) v6 := b.NewValue0(v.Pos, OpConst16, typ.UInt16) v6.AuxInt = 32 - v5.AddArg(v6) - v5.AddArg(s) - v4.AddArg(v5) - v2.AddArg(v4) - v1.AddArg(v2) + v5.AddArg2(v6, s) + v4.AddArg2(hi, v5) + v2.AddArg2(v3, v4) v7 := b.NewValue0(v.Pos, OpRsh32Ux16, typ.UInt32) - v7.AddArg(hi) v8 := b.NewValue0(v.Pos, OpSub16, typ.UInt16) - v8.AddArg(s) v9 := b.NewValue0(v.Pos, OpConst16, typ.UInt16) v9.AuxInt = 32 - v8.AddArg(v9) - v7.AddArg(v8) - v1.AddArg(v7) - v.AddArg(v1) + v8.AddArg2(s, v9) + v7.AddArg2(hi, v8) + v1.AddArg2(v2, v7) + v.AddArg2(v0, v1) return true } return false @@ -1649,35 +1520,26 @@ func rewriteValuedec64_OpRsh64Ux32(v *Value) bool { s := v_1 v.reset(OpInt64Make) v0 := b.NewValue0(v.Pos, OpRsh32Ux32, typ.UInt32) - v0.AddArg(hi) - v0.AddArg(s) - v.AddArg(v0) + v0.AddArg2(hi, s) v1 := b.NewValue0(v.Pos, OpOr32, typ.UInt32) v2 := b.NewValue0(v.Pos, OpOr32, typ.UInt32) v3 := b.NewValue0(v.Pos, OpRsh32Ux32, typ.UInt32) - v3.AddArg(lo) - v3.AddArg(s) - v2.AddArg(v3) + v3.AddArg2(lo, s) v4 := b.NewValue0(v.Pos, OpLsh32x32, typ.UInt32) - v4.AddArg(hi) v5 := b.NewValue0(v.Pos, OpSub32, typ.UInt32) v6 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) v6.AuxInt = 32 - v5.AddArg(v6) - v5.AddArg(s) - v4.AddArg(v5) - v2.AddArg(v4) - v1.AddArg(v2) + v5.AddArg2(v6, s) + v4.AddArg2(hi, v5) + v2.AddArg2(v3, v4) v7 := b.NewValue0(v.Pos, OpRsh32Ux32, typ.UInt32) - v7.AddArg(hi) v8 := b.NewValue0(v.Pos, OpSub32, typ.UInt32) - v8.AddArg(s) v9 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) v9.AuxInt = 32 - v8.AddArg(v9) - v7.AddArg(v8) - v1.AddArg(v7) - v.AddArg(v1) + v8.AddArg2(s, v9) + v7.AddArg2(hi, v8) + v1.AddArg2(v2, v7) + v.AddArg2(v0, v1) return true } return false @@ -1722,8 +1584,7 @@ func rewriteValuedec64_OpRsh64Ux64(v *Value) bool { } v.reset(OpRsh64Ux32) v.AuxInt = c - v.AddArg(x) - v.AddArg(lo) + v.AddArg2(x, lo) return true } // match: (Rsh64Ux64 x (Int64Make hi lo)) @@ -1740,13 +1601,11 @@ func rewriteValuedec64_OpRsh64Ux64(v *Value) bool { break } v.reset(OpRsh64Ux32) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpOr32, typ.UInt32) v1 := b.NewValue0(v.Pos, OpZeromask, typ.UInt32) v1.AddArg(hi) - v0.AddArg(v1) - v0.AddArg(lo) - v.AddArg(v0) + v0.AddArg2(v1, lo) + v.AddArg2(x, v0) return true } return false @@ -1767,35 +1626,26 @@ func rewriteValuedec64_OpRsh64Ux8(v *Value) bool { s := v_1 v.reset(OpInt64Make) v0 := b.NewValue0(v.Pos, OpRsh32Ux8, typ.UInt32) - v0.AddArg(hi) - v0.AddArg(s) - v.AddArg(v0) + v0.AddArg2(hi, s) v1 := b.NewValue0(v.Pos, OpOr32, typ.UInt32) v2 := b.NewValue0(v.Pos, OpOr32, typ.UInt32) v3 := b.NewValue0(v.Pos, OpRsh32Ux8, typ.UInt32) - v3.AddArg(lo) - v3.AddArg(s) - v2.AddArg(v3) + v3.AddArg2(lo, s) v4 := b.NewValue0(v.Pos, OpLsh32x8, typ.UInt32) - v4.AddArg(hi) v5 := b.NewValue0(v.Pos, OpSub8, typ.UInt8) v6 := b.NewValue0(v.Pos, OpConst8, typ.UInt8) v6.AuxInt = 32 - v5.AddArg(v6) - v5.AddArg(s) - v4.AddArg(v5) - v2.AddArg(v4) - v1.AddArg(v2) + v5.AddArg2(v6, s) + v4.AddArg2(hi, v5) + v2.AddArg2(v3, v4) v7 := b.NewValue0(v.Pos, OpRsh32Ux8, typ.UInt32) - v7.AddArg(hi) v8 := b.NewValue0(v.Pos, OpSub8, typ.UInt8) - v8.AddArg(s) v9 := b.NewValue0(v.Pos, OpConst8, typ.UInt8) v9.AuxInt = 32 - v8.AddArg(v9) - v7.AddArg(v8) - v1.AddArg(v7) - v.AddArg(v1) + v8.AddArg2(s, v9) + v7.AddArg2(hi, v8) + v1.AddArg2(v2, v7) + v.AddArg2(v0, v1) return true } return false @@ -1816,47 +1666,36 @@ func rewriteValuedec64_OpRsh64x16(v *Value) bool { s := v_1 v.reset(OpInt64Make) v0 := b.NewValue0(v.Pos, OpRsh32x16, typ.UInt32) - v0.AddArg(hi) - v0.AddArg(s) - v.AddArg(v0) + v0.AddArg2(hi, s) v1 := b.NewValue0(v.Pos, OpOr32, typ.UInt32) v2 := b.NewValue0(v.Pos, OpOr32, typ.UInt32) v3 := b.NewValue0(v.Pos, OpRsh32Ux16, typ.UInt32) - v3.AddArg(lo) - v3.AddArg(s) - v2.AddArg(v3) + v3.AddArg2(lo, s) v4 := b.NewValue0(v.Pos, OpLsh32x16, typ.UInt32) - v4.AddArg(hi) v5 := b.NewValue0(v.Pos, OpSub16, typ.UInt16) v6 := b.NewValue0(v.Pos, OpConst16, typ.UInt16) v6.AuxInt = 32 - v5.AddArg(v6) - v5.AddArg(s) - v4.AddArg(v5) - v2.AddArg(v4) - v1.AddArg(v2) + v5.AddArg2(v6, s) + v4.AddArg2(hi, v5) + v2.AddArg2(v3, v4) v7 := b.NewValue0(v.Pos, OpAnd32, typ.UInt32) v8 := b.NewValue0(v.Pos, OpRsh32x16, typ.UInt32) - v8.AddArg(hi) v9 := b.NewValue0(v.Pos, OpSub16, typ.UInt16) - v9.AddArg(s) v10 := b.NewValue0(v.Pos, OpConst16, typ.UInt16) v10.AuxInt = 32 - v9.AddArg(v10) - v8.AddArg(v9) - v7.AddArg(v8) + v9.AddArg2(s, v10) + v8.AddArg2(hi, v9) v11 := b.NewValue0(v.Pos, OpZeromask, typ.UInt32) v12 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) v13 := b.NewValue0(v.Pos, OpRsh16Ux32, typ.UInt16) - v13.AddArg(s) v14 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) v14.AuxInt = 5 - v13.AddArg(v14) + v13.AddArg2(s, v14) v12.AddArg(v13) v11.AddArg(v12) - v7.AddArg(v11) - v1.AddArg(v7) - v.AddArg(v1) + v7.AddArg2(v8, v11) + v1.AddArg2(v2, v7) + v.AddArg2(v0, v1) return true } return false @@ -1877,45 +1716,34 @@ func rewriteValuedec64_OpRsh64x32(v *Value) bool { s := v_1 v.reset(OpInt64Make) v0 := b.NewValue0(v.Pos, OpRsh32x32, typ.UInt32) - v0.AddArg(hi) - v0.AddArg(s) - v.AddArg(v0) + v0.AddArg2(hi, s) v1 := b.NewValue0(v.Pos, OpOr32, typ.UInt32) v2 := b.NewValue0(v.Pos, OpOr32, typ.UInt32) v3 := b.NewValue0(v.Pos, OpRsh32Ux32, typ.UInt32) - v3.AddArg(lo) - v3.AddArg(s) - v2.AddArg(v3) + v3.AddArg2(lo, s) v4 := b.NewValue0(v.Pos, OpLsh32x32, typ.UInt32) - v4.AddArg(hi) v5 := b.NewValue0(v.Pos, OpSub32, typ.UInt32) v6 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) v6.AuxInt = 32 - v5.AddArg(v6) - v5.AddArg(s) - v4.AddArg(v5) - v2.AddArg(v4) - v1.AddArg(v2) + v5.AddArg2(v6, s) + v4.AddArg2(hi, v5) + v2.AddArg2(v3, v4) v7 := b.NewValue0(v.Pos, OpAnd32, typ.UInt32) v8 := b.NewValue0(v.Pos, OpRsh32x32, typ.UInt32) - v8.AddArg(hi) v9 := b.NewValue0(v.Pos, OpSub32, typ.UInt32) - v9.AddArg(s) v10 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) v10.AuxInt = 32 - v9.AddArg(v10) - v8.AddArg(v9) - v7.AddArg(v8) + v9.AddArg2(s, v10) + v8.AddArg2(hi, v9) v11 := b.NewValue0(v.Pos, OpZeromask, typ.UInt32) v12 := b.NewValue0(v.Pos, OpRsh32Ux32, typ.UInt32) - v12.AddArg(s) v13 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) v13.AuxInt = 5 - v12.AddArg(v13) + v12.AddArg2(s, v13) v11.AddArg(v12) - v7.AddArg(v11) - v1.AddArg(v7) - v.AddArg(v1) + v7.AddArg2(v8, v11) + v1.AddArg2(v2, v7) + v.AddArg2(v0, v1) return true } return false @@ -1947,12 +1775,11 @@ func rewriteValuedec64_OpRsh64x64(v *Value) bool { v1 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32) v1.AddArg(x) v0.AddArg(v1) - v.AddArg(v0) v2 := b.NewValue0(v.Pos, OpSignmask, typ.Int32) v3 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32) v3.AddArg(x) v2.AddArg(v3) - v.AddArg(v2) + v.AddArg2(v0, v2) return true } // match: (Rsh64x64 [c] x (Int64Make (Const32 [0]) lo)) @@ -1970,8 +1797,7 @@ func rewriteValuedec64_OpRsh64x64(v *Value) bool { } v.reset(OpRsh64x32) v.AuxInt = c - v.AddArg(x) - v.AddArg(lo) + v.AddArg2(x, lo) return true } // match: (Rsh64x64 x (Int64Make hi lo)) @@ -1988,13 +1814,11 @@ func rewriteValuedec64_OpRsh64x64(v *Value) bool { break } v.reset(OpRsh64x32) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpOr32, typ.UInt32) v1 := b.NewValue0(v.Pos, OpZeromask, typ.UInt32) v1.AddArg(hi) - v0.AddArg(v1) - v0.AddArg(lo) - v.AddArg(v0) + v0.AddArg2(v1, lo) + v.AddArg2(x, v0) return true } return false @@ -2015,47 +1839,36 @@ func rewriteValuedec64_OpRsh64x8(v *Value) bool { s := v_1 v.reset(OpInt64Make) v0 := b.NewValue0(v.Pos, OpRsh32x8, typ.UInt32) - v0.AddArg(hi) - v0.AddArg(s) - v.AddArg(v0) + v0.AddArg2(hi, s) v1 := b.NewValue0(v.Pos, OpOr32, typ.UInt32) v2 := b.NewValue0(v.Pos, OpOr32, typ.UInt32) v3 := b.NewValue0(v.Pos, OpRsh32Ux8, typ.UInt32) - v3.AddArg(lo) - v3.AddArg(s) - v2.AddArg(v3) + v3.AddArg2(lo, s) v4 := b.NewValue0(v.Pos, OpLsh32x8, typ.UInt32) - v4.AddArg(hi) v5 := b.NewValue0(v.Pos, OpSub8, typ.UInt8) v6 := b.NewValue0(v.Pos, OpConst8, typ.UInt8) v6.AuxInt = 32 - v5.AddArg(v6) - v5.AddArg(s) - v4.AddArg(v5) - v2.AddArg(v4) - v1.AddArg(v2) + v5.AddArg2(v6, s) + v4.AddArg2(hi, v5) + v2.AddArg2(v3, v4) v7 := b.NewValue0(v.Pos, OpAnd32, typ.UInt32) v8 := b.NewValue0(v.Pos, OpRsh32x8, typ.UInt32) - v8.AddArg(hi) v9 := b.NewValue0(v.Pos, OpSub8, typ.UInt8) - v9.AddArg(s) v10 := b.NewValue0(v.Pos, OpConst8, typ.UInt8) v10.AuxInt = 32 - v9.AddArg(v10) - v8.AddArg(v9) - v7.AddArg(v8) + v9.AddArg2(s, v10) + v8.AddArg2(hi, v9) v11 := b.NewValue0(v.Pos, OpZeromask, typ.UInt32) v12 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) v13 := b.NewValue0(v.Pos, OpRsh8Ux32, typ.UInt8) - v13.AddArg(s) v14 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) v14.AuxInt = 5 - v13.AddArg(v14) + v13.AddArg2(s, v14) v12.AddArg(v13) v11.AddArg(v12) - v7.AddArg(v11) - v1.AddArg(v7) - v.AddArg(v1) + v7.AddArg2(v8, v11) + v1.AddArg2(v2, v7) + v.AddArg2(v0, v1) return true } return false @@ -2100,8 +1913,7 @@ func rewriteValuedec64_OpRsh8Ux64(v *Value) bool { } v.reset(OpRsh8Ux32) v.AuxInt = c - v.AddArg(x) - v.AddArg(lo) + v.AddArg2(x, lo) return true } // match: (Rsh8Ux64 x (Int64Make hi lo)) @@ -2118,13 +1930,11 @@ func rewriteValuedec64_OpRsh8Ux64(v *Value) bool { break } v.reset(OpRsh8Ux32) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpOr32, typ.UInt32) v1 := b.NewValue0(v.Pos, OpZeromask, typ.UInt32) v1.AddArg(hi) - v0.AddArg(v1) - v0.AddArg(lo) - v.AddArg(v0) + v0.AddArg2(v1, lo) + v.AddArg2(x, v0) return true } return false @@ -2172,8 +1982,7 @@ func rewriteValuedec64_OpRsh8x64(v *Value) bool { } v.reset(OpRsh8x32) v.AuxInt = c - v.AddArg(x) - v.AddArg(lo) + v.AddArg2(x, lo) return true } // match: (Rsh8x64 x (Int64Make hi lo)) @@ -2190,13 +1999,11 @@ func rewriteValuedec64_OpRsh8x64(v *Value) bool { break } v.reset(OpRsh8x32) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpOr32, typ.UInt32) v1 := b.NewValue0(v.Pos, OpZeromask, typ.UInt32) v1.AddArg(hi) - v0.AddArg(v1) - v0.AddArg(lo) - v.AddArg(v0) + v0.AddArg2(v1, lo) + v.AddArg2(x, v0) return true } return false @@ -2227,8 +2034,7 @@ func rewriteValuedec64_OpSignExt32to64(v *Value) bool { v.reset(OpInt64Make) v0 := b.NewValue0(v.Pos, OpSignmask, typ.Int32) v0.AddArg(x) - v.AddArg(v0) - v.AddArg(x) + v.AddArg2(v0, x) return true } } @@ -2273,14 +2079,10 @@ func rewriteValuedec64_OpStore(v *Value) bool { v0 := b.NewValue0(v.Pos, OpOffPtr, hi.Type.PtrTo()) v0.AuxInt = 4 v0.AddArg(dst) - v.AddArg(v0) - v.AddArg(hi) v1 := b.NewValue0(v.Pos, OpStore, types.TypeMem) v1.Aux = lo.Type - v1.AddArg(dst) - v1.AddArg(lo) - v1.AddArg(mem) - v.AddArg(v1) + v1.AddArg3(dst, lo, mem) + v.AddArg3(v0, hi, v1) return true } // match: (Store {t} dst (Int64Make hi lo) mem) @@ -2303,14 +2105,10 @@ func rewriteValuedec64_OpStore(v *Value) bool { v0 := b.NewValue0(v.Pos, OpOffPtr, lo.Type.PtrTo()) v0.AuxInt = 4 v0.AddArg(dst) - v.AddArg(v0) - v.AddArg(lo) v1 := b.NewValue0(v.Pos, OpStore, types.TypeMem) v1.Aux = hi.Type - v1.AddArg(dst) - v1.AddArg(hi) - v1.AddArg(mem) - v.AddArg(v1) + v1.AddArg3(dst, hi, mem) + v.AddArg3(v0, lo, v1) return true } return false @@ -2329,31 +2127,26 @@ func rewriteValuedec64_OpSub64(v *Value) bool { v0 := b.NewValue0(v.Pos, OpSub32withcarry, typ.Int32) v1 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32) v1.AddArg(x) - v0.AddArg(v1) v2 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32) v2.AddArg(y) - v0.AddArg(v2) v3 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) v4 := b.NewValue0(v.Pos, OpSub32carry, types.NewTuple(typ.UInt32, types.TypeFlags)) v5 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32) v5.AddArg(x) - v4.AddArg(v5) v6 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32) v6.AddArg(y) - v4.AddArg(v6) + v4.AddArg2(v5, v6) v3.AddArg(v4) - v0.AddArg(v3) - v.AddArg(v0) + v0.AddArg3(v1, v2, v3) v7 := b.NewValue0(v.Pos, OpSelect0, typ.UInt32) v8 := b.NewValue0(v.Pos, OpSub32carry, types.NewTuple(typ.UInt32, types.TypeFlags)) v9 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32) v9.AddArg(x) - v8.AddArg(v9) v10 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32) v10.AddArg(y) - v8.AddArg(v10) + v8.AddArg2(v9, v10) v7.AddArg(v8) - v.AddArg(v7) + v.AddArg2(v0, v7) return true } } @@ -2417,19 +2210,16 @@ func rewriteValuedec64_OpXor64(v *Value) bool { v0 := b.NewValue0(v.Pos, OpXor32, typ.UInt32) v1 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32) v1.AddArg(x) - v0.AddArg(v1) v2 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32) v2.AddArg(y) - v0.AddArg(v2) - v.AddArg(v0) + v0.AddArg2(v1, v2) v3 := b.NewValue0(v.Pos, OpXor32, typ.UInt32) v4 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32) v4.AddArg(x) - v3.AddArg(v4) v5 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32) v5.AddArg(y) - v3.AddArg(v5) - v.AddArg(v3) + v3.AddArg2(v4, v5) + v.AddArg2(v0, v3) return true } } @@ -2459,8 +2249,7 @@ func rewriteValuedec64_OpZeroExt32to64(v *Value) bool { v.reset(OpInt64Make) v0 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) v0.AuxInt = 0 - v.AddArg(v0) - v.AddArg(x) + v.AddArg2(v0, x) return true } } diff --git a/src/cmd/compile/internal/ssa/rewritedecArgs.go b/src/cmd/compile/internal/ssa/rewritedecArgs.go index 9a9e522554..eec3acfcda 100644 --- a/src/cmd/compile/internal/ssa/rewritedecArgs.go +++ b/src/cmd/compile/internal/ssa/rewritedecArgs.go @@ -28,11 +28,10 @@ func rewriteValuedecArgs_OpArg(v *Value) bool { v0 := b.NewValue0(v.Pos, OpArg, typ.BytePtr) v0.AuxInt = off v0.Aux = n - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpArg, typ.Int) v1.AuxInt = off + config.PtrSize v1.Aux = n - v.AddArg(v1) + v.AddArg2(v0, v1) return true } // match: (Arg {n} [off]) @@ -48,15 +47,13 @@ func rewriteValuedecArgs_OpArg(v *Value) bool { v0 := b.NewValue0(v.Pos, OpArg, v.Type.Elem().PtrTo()) v0.AuxInt = off v0.Aux = n - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpArg, typ.Int) v1.AuxInt = off + config.PtrSize v1.Aux = n - v.AddArg(v1) v2 := b.NewValue0(v.Pos, OpArg, typ.Int) v2.AuxInt = off + 2*config.PtrSize v2.Aux = n - v.AddArg(v2) + v.AddArg3(v0, v1, v2) return true } // match: (Arg {n} [off]) @@ -72,11 +69,10 @@ func rewriteValuedecArgs_OpArg(v *Value) bool { v0 := b.NewValue0(v.Pos, OpArg, typ.Uintptr) v0.AuxInt = off v0.Aux = n - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpArg, typ.BytePtr) v1.AuxInt = off + config.PtrSize v1.Aux = n - v.AddArg(v1) + v.AddArg2(v0, v1) return true } // match: (Arg {n} [off]) @@ -92,11 +88,10 @@ func rewriteValuedecArgs_OpArg(v *Value) bool { v0 := b.NewValue0(v.Pos, OpArg, typ.Float64) v0.AuxInt = off v0.Aux = n - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpArg, typ.Float64) v1.AuxInt = off + 8 v1.Aux = n - v.AddArg(v1) + v.AddArg2(v0, v1) return true } // match: (Arg {n} [off]) @@ -112,11 +107,10 @@ func rewriteValuedecArgs_OpArg(v *Value) bool { v0 := b.NewValue0(v.Pos, OpArg, typ.Float32) v0.AuxInt = off v0.Aux = n - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpArg, typ.Float32) v1.AuxInt = off + 4 v1.Aux = n - v.AddArg(v1) + v.AddArg2(v0, v1) return true } // match: (Arg ) @@ -161,11 +155,10 @@ func rewriteValuedecArgs_OpArg(v *Value) bool { v0 := b.NewValue0(v.Pos, OpArg, t.FieldType(0)) v0.AuxInt = off + t.FieldOff(0) v0.Aux = n - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpArg, t.FieldType(1)) v1.AuxInt = off + t.FieldOff(1) v1.Aux = n - v.AddArg(v1) + v.AddArg2(v0, v1) return true } // match: (Arg {n} [off]) @@ -182,15 +175,13 @@ func rewriteValuedecArgs_OpArg(v *Value) bool { v0 := b.NewValue0(v.Pos, OpArg, t.FieldType(0)) v0.AuxInt = off + t.FieldOff(0) v0.Aux = n - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpArg, t.FieldType(1)) v1.AuxInt = off + t.FieldOff(1) v1.Aux = n - v.AddArg(v1) v2 := b.NewValue0(v.Pos, OpArg, t.FieldType(2)) v2.AuxInt = off + t.FieldOff(2) v2.Aux = n - v.AddArg(v2) + v.AddArg3(v0, v1, v2) return true } // match: (Arg {n} [off]) @@ -207,19 +198,16 @@ func rewriteValuedecArgs_OpArg(v *Value) bool { v0 := b.NewValue0(v.Pos, OpArg, t.FieldType(0)) v0.AuxInt = off + t.FieldOff(0) v0.Aux = n - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpArg, t.FieldType(1)) v1.AuxInt = off + t.FieldOff(1) v1.Aux = n - v.AddArg(v1) v2 := b.NewValue0(v.Pos, OpArg, t.FieldType(2)) v2.AuxInt = off + t.FieldOff(2) v2.Aux = n - v.AddArg(v2) v3 := b.NewValue0(v.Pos, OpArg, t.FieldType(3)) v3.AuxInt = off + t.FieldOff(3) v3.Aux = n - v.AddArg(v3) + v.AddArg4(v0, v1, v2, v3) return true } // match: (Arg ) diff --git a/src/cmd/compile/internal/ssa/rewritegeneric.go b/src/cmd/compile/internal/ssa/rewritegeneric.go index 94c2353fd9..5a9dd7ed5b 100644 --- a/src/cmd/compile/internal/ssa/rewritegeneric.go +++ b/src/cmd/compile/internal/ssa/rewritegeneric.go @@ -491,11 +491,9 @@ func rewriteValuegeneric_OpAdd16(v *Value) bool { } z := v_1_1 v.reset(OpMul16) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpAdd16, t) - v0.AddArg(y) - v0.AddArg(z) - v.AddArg(v0) + v0.AddArg2(y, z) + v.AddArg2(x, v0) return true } } @@ -554,11 +552,9 @@ func rewriteValuegeneric_OpAdd16(v *Value) bool { continue } v.reset(OpAdd16) - v.AddArg(i) v0 := b.NewValue0(v.Pos, OpAdd16, t) - v0.AddArg(z) - v0.AddArg(x) - v.AddArg(v0) + v0.AddArg2(z, x) + v.AddArg2(i, v0) return true } } @@ -583,11 +579,9 @@ func rewriteValuegeneric_OpAdd16(v *Value) bool { continue } v.reset(OpAdd16) - v.AddArg(i) v0 := b.NewValue0(v.Pos, OpSub16, t) - v0.AddArg(x) - v0.AddArg(z) - v.AddArg(v0) + v0.AddArg2(x, z) + v.AddArg2(i, v0) return true } break @@ -613,10 +607,8 @@ func rewriteValuegeneric_OpAdd16(v *Value) bool { } v.reset(OpSub16) v0 := b.NewValue0(v.Pos, OpAdd16, t) - v0.AddArg(x) - v0.AddArg(z) - v.AddArg(v0) - v.AddArg(i) + v0.AddArg2(x, z) + v.AddArg2(v0, i) return true } break @@ -645,8 +637,7 @@ func rewriteValuegeneric_OpAdd16(v *Value) bool { v.reset(OpAdd16) v0 := b.NewValue0(v.Pos, OpConst16, t) v0.AuxInt = int64(int16(c + d)) - v.AddArg(v0) - v.AddArg(x) + v.AddArg2(v0, x) return true } } @@ -673,8 +664,7 @@ func rewriteValuegeneric_OpAdd16(v *Value) bool { v.reset(OpSub16) v0 := b.NewValue0(v.Pos, OpConst16, t) v0.AuxInt = int64(int16(c + d)) - v.AddArg(v0) - v.AddArg(x) + v.AddArg2(v0, x) return true } break @@ -701,8 +691,7 @@ func rewriteValuegeneric_OpAdd16(v *Value) bool { v.reset(OpAdd16) v0 := b.NewValue0(v.Pos, OpConst16, t) v0.AuxInt = int64(int16(c - d)) - v.AddArg(v0) - v.AddArg(x) + v.AddArg2(v0, x) return true } break @@ -757,11 +746,9 @@ func rewriteValuegeneric_OpAdd32(v *Value) bool { } z := v_1_1 v.reset(OpMul32) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpAdd32, t) - v0.AddArg(y) - v0.AddArg(z) - v.AddArg(v0) + v0.AddArg2(y, z) + v.AddArg2(x, v0) return true } } @@ -820,11 +807,9 @@ func rewriteValuegeneric_OpAdd32(v *Value) bool { continue } v.reset(OpAdd32) - v.AddArg(i) v0 := b.NewValue0(v.Pos, OpAdd32, t) - v0.AddArg(z) - v0.AddArg(x) - v.AddArg(v0) + v0.AddArg2(z, x) + v.AddArg2(i, v0) return true } } @@ -849,11 +834,9 @@ func rewriteValuegeneric_OpAdd32(v *Value) bool { continue } v.reset(OpAdd32) - v.AddArg(i) v0 := b.NewValue0(v.Pos, OpSub32, t) - v0.AddArg(x) - v0.AddArg(z) - v.AddArg(v0) + v0.AddArg2(x, z) + v.AddArg2(i, v0) return true } break @@ -879,10 +862,8 @@ func rewriteValuegeneric_OpAdd32(v *Value) bool { } v.reset(OpSub32) v0 := b.NewValue0(v.Pos, OpAdd32, t) - v0.AddArg(x) - v0.AddArg(z) - v.AddArg(v0) - v.AddArg(i) + v0.AddArg2(x, z) + v.AddArg2(v0, i) return true } break @@ -911,8 +892,7 @@ func rewriteValuegeneric_OpAdd32(v *Value) bool { v.reset(OpAdd32) v0 := b.NewValue0(v.Pos, OpConst32, t) v0.AuxInt = int64(int32(c + d)) - v.AddArg(v0) - v.AddArg(x) + v.AddArg2(v0, x) return true } } @@ -939,8 +919,7 @@ func rewriteValuegeneric_OpAdd32(v *Value) bool { v.reset(OpSub32) v0 := b.NewValue0(v.Pos, OpConst32, t) v0.AuxInt = int64(int32(c + d)) - v.AddArg(v0) - v.AddArg(x) + v.AddArg2(v0, x) return true } break @@ -967,8 +946,7 @@ func rewriteValuegeneric_OpAdd32(v *Value) bool { v.reset(OpAdd32) v0 := b.NewValue0(v.Pos, OpConst32, t) v0.AuxInt = int64(int32(c - d)) - v.AddArg(v0) - v.AddArg(x) + v.AddArg2(v0, x) return true } break @@ -1046,11 +1024,9 @@ func rewriteValuegeneric_OpAdd64(v *Value) bool { } z := v_1_1 v.reset(OpMul64) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpAdd64, t) - v0.AddArg(y) - v0.AddArg(z) - v.AddArg(v0) + v0.AddArg2(y, z) + v.AddArg2(x, v0) return true } } @@ -1109,11 +1085,9 @@ func rewriteValuegeneric_OpAdd64(v *Value) bool { continue } v.reset(OpAdd64) - v.AddArg(i) v0 := b.NewValue0(v.Pos, OpAdd64, t) - v0.AddArg(z) - v0.AddArg(x) - v.AddArg(v0) + v0.AddArg2(z, x) + v.AddArg2(i, v0) return true } } @@ -1138,11 +1112,9 @@ func rewriteValuegeneric_OpAdd64(v *Value) bool { continue } v.reset(OpAdd64) - v.AddArg(i) v0 := b.NewValue0(v.Pos, OpSub64, t) - v0.AddArg(x) - v0.AddArg(z) - v.AddArg(v0) + v0.AddArg2(x, z) + v.AddArg2(i, v0) return true } break @@ -1168,10 +1140,8 @@ func rewriteValuegeneric_OpAdd64(v *Value) bool { } v.reset(OpSub64) v0 := b.NewValue0(v.Pos, OpAdd64, t) - v0.AddArg(x) - v0.AddArg(z) - v.AddArg(v0) - v.AddArg(i) + v0.AddArg2(x, z) + v.AddArg2(v0, i) return true } break @@ -1200,8 +1170,7 @@ func rewriteValuegeneric_OpAdd64(v *Value) bool { v.reset(OpAdd64) v0 := b.NewValue0(v.Pos, OpConst64, t) v0.AuxInt = c + d - v.AddArg(v0) - v.AddArg(x) + v.AddArg2(v0, x) return true } } @@ -1228,8 +1197,7 @@ func rewriteValuegeneric_OpAdd64(v *Value) bool { v.reset(OpSub64) v0 := b.NewValue0(v.Pos, OpConst64, t) v0.AuxInt = c + d - v.AddArg(v0) - v.AddArg(x) + v.AddArg2(v0, x) return true } break @@ -1256,8 +1224,7 @@ func rewriteValuegeneric_OpAdd64(v *Value) bool { v.reset(OpAdd64) v0 := b.NewValue0(v.Pos, OpConst64, t) v0.AuxInt = c - d - v.AddArg(v0) - v.AddArg(x) + v.AddArg2(v0, x) return true } break @@ -1335,11 +1302,9 @@ func rewriteValuegeneric_OpAdd8(v *Value) bool { } z := v_1_1 v.reset(OpMul8) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpAdd8, t) - v0.AddArg(y) - v0.AddArg(z) - v.AddArg(v0) + v0.AddArg2(y, z) + v.AddArg2(x, v0) return true } } @@ -1398,11 +1363,9 @@ func rewriteValuegeneric_OpAdd8(v *Value) bool { continue } v.reset(OpAdd8) - v.AddArg(i) v0 := b.NewValue0(v.Pos, OpAdd8, t) - v0.AddArg(z) - v0.AddArg(x) - v.AddArg(v0) + v0.AddArg2(z, x) + v.AddArg2(i, v0) return true } } @@ -1427,11 +1390,9 @@ func rewriteValuegeneric_OpAdd8(v *Value) bool { continue } v.reset(OpAdd8) - v.AddArg(i) v0 := b.NewValue0(v.Pos, OpSub8, t) - v0.AddArg(x) - v0.AddArg(z) - v.AddArg(v0) + v0.AddArg2(x, z) + v.AddArg2(i, v0) return true } break @@ -1457,10 +1418,8 @@ func rewriteValuegeneric_OpAdd8(v *Value) bool { } v.reset(OpSub8) v0 := b.NewValue0(v.Pos, OpAdd8, t) - v0.AddArg(x) - v0.AddArg(z) - v.AddArg(v0) - v.AddArg(i) + v0.AddArg2(x, z) + v.AddArg2(v0, i) return true } break @@ -1489,8 +1448,7 @@ func rewriteValuegeneric_OpAdd8(v *Value) bool { v.reset(OpAdd8) v0 := b.NewValue0(v.Pos, OpConst8, t) v0.AuxInt = int64(int8(c + d)) - v.AddArg(v0) - v.AddArg(x) + v.AddArg2(v0, x) return true } } @@ -1517,8 +1475,7 @@ func rewriteValuegeneric_OpAdd8(v *Value) bool { v.reset(OpSub8) v0 := b.NewValue0(v.Pos, OpConst8, t) v0.AuxInt = int64(int8(c + d)) - v.AddArg(v0) - v.AddArg(x) + v.AddArg2(v0, x) return true } break @@ -1545,8 +1502,7 @@ func rewriteValuegeneric_OpAdd8(v *Value) bool { v.reset(OpAdd8) v0 := b.NewValue0(v.Pos, OpConst8, t) v0.AuxInt = int64(int8(c - d)) - v.AddArg(v0) - v.AddArg(x) + v.AddArg2(v0, x) return true } break @@ -1721,8 +1677,7 @@ func rewriteValuegeneric_OpAnd16(v *Value) bool { } y := v_1_1 v.reset(OpAnd16) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } } @@ -1751,11 +1706,9 @@ func rewriteValuegeneric_OpAnd16(v *Value) bool { continue } v.reset(OpAnd16) - v.AddArg(i) v0 := b.NewValue0(v.Pos, OpAnd16, t) - v0.AddArg(z) - v0.AddArg(x) - v.AddArg(v0) + v0.AddArg2(z, x) + v.AddArg2(i, v0) return true } } @@ -1785,8 +1738,7 @@ func rewriteValuegeneric_OpAnd16(v *Value) bool { v.reset(OpAnd16) v0 := b.NewValue0(v.Pos, OpConst16, t) v0.AuxInt = int64(int16(c & d)) - v.AddArg(v0) - v.AddArg(x) + v.AddArg2(v0, x) return true } } @@ -1927,8 +1879,7 @@ func rewriteValuegeneric_OpAnd32(v *Value) bool { } y := v_1_1 v.reset(OpAnd32) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } } @@ -1957,11 +1908,9 @@ func rewriteValuegeneric_OpAnd32(v *Value) bool { continue } v.reset(OpAnd32) - v.AddArg(i) v0 := b.NewValue0(v.Pos, OpAnd32, t) - v0.AddArg(z) - v0.AddArg(x) - v.AddArg(v0) + v0.AddArg2(z, x) + v.AddArg2(i, v0) return true } } @@ -1991,8 +1940,7 @@ func rewriteValuegeneric_OpAnd32(v *Value) bool { v.reset(OpAnd32) v0 := b.NewValue0(v.Pos, OpConst32, t) v0.AuxInt = int64(int32(c & d)) - v.AddArg(v0) - v.AddArg(x) + v.AddArg2(v0, x) return true } } @@ -2133,8 +2081,7 @@ func rewriteValuegeneric_OpAnd64(v *Value) bool { } y := v_1_1 v.reset(OpAnd64) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } } @@ -2163,11 +2110,9 @@ func rewriteValuegeneric_OpAnd64(v *Value) bool { continue } v.reset(OpAnd64) - v.AddArg(i) v0 := b.NewValue0(v.Pos, OpAnd64, t) - v0.AddArg(z) - v0.AddArg(x) - v.AddArg(v0) + v0.AddArg2(z, x) + v.AddArg2(i, v0) return true } } @@ -2197,8 +2142,7 @@ func rewriteValuegeneric_OpAnd64(v *Value) bool { v.reset(OpAnd64) v0 := b.NewValue0(v.Pos, OpConst64, t) v0.AuxInt = c & d - v.AddArg(v0) - v.AddArg(x) + v.AddArg2(v0, x) return true } } @@ -2339,8 +2283,7 @@ func rewriteValuegeneric_OpAnd8(v *Value) bool { } y := v_1_1 v.reset(OpAnd8) - v.AddArg(x) - v.AddArg(y) + v.AddArg2(x, y) return true } } @@ -2369,11 +2312,9 @@ func rewriteValuegeneric_OpAnd8(v *Value) bool { continue } v.reset(OpAnd8) - v.AddArg(i) v0 := b.NewValue0(v.Pos, OpAnd8, t) - v0.AddArg(z) - v0.AddArg(x) - v.AddArg(v0) + v0.AddArg2(z, x) + v.AddArg2(i, v0) return true } } @@ -2403,8 +2344,7 @@ func rewriteValuegeneric_OpAnd8(v *Value) bool { v.reset(OpAnd8) v0 := b.NewValue0(v.Pos, OpConst8, t) v0.AuxInt = int64(int8(c & d)) - v.AddArg(v0) - v.AddArg(x) + v.AddArg2(v0, x) return true } } @@ -2635,9 +2575,8 @@ func rewriteValuegeneric_OpConstInterface(v *Value) bool { for { v.reset(OpIMake) v0 := b.NewValue0(v.Pos, OpConstNil, typ.Uintptr) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpConstNil, typ.BytePtr) - v.AddArg(v1) + v.AddArg2(v0, v1) return true } } @@ -2654,13 +2593,11 @@ func rewriteValuegeneric_OpConstSlice(v *Value) bool { } v.reset(OpSliceMake) v0 := b.NewValue0(v.Pos, OpConstNil, v.Type.Elem().PtrTo()) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpConst32, typ.Int) v1.AuxInt = 0 - v.AddArg(v1) v2 := b.NewValue0(v.Pos, OpConst32, typ.Int) v2.AuxInt = 0 - v.AddArg(v2) + v.AddArg3(v0, v1, v2) return true } // match: (ConstSlice) @@ -2672,13 +2609,11 @@ func rewriteValuegeneric_OpConstSlice(v *Value) bool { } v.reset(OpSliceMake) v0 := b.NewValue0(v.Pos, OpConstNil, v.Type.Elem().PtrTo()) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpConst64, typ.Int) v1.AuxInt = 0 - v.AddArg(v1) v2 := b.NewValue0(v.Pos, OpConst64, typ.Int) v2.AuxInt = 0 - v.AddArg(v2) + v.AddArg3(v0, v1, v2) return true } return false @@ -2698,10 +2633,9 @@ func rewriteValuegeneric_OpConstString(v *Value) bool { } v.reset(OpStringMake) v0 := b.NewValue0(v.Pos, OpConstNil, typ.BytePtr) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpConst32, typ.Int) v1.AuxInt = 0 - v.AddArg(v1) + v.AddArg2(v0, v1) return true } // match: (ConstString {s}) @@ -2714,10 +2648,9 @@ func rewriteValuegeneric_OpConstString(v *Value) bool { } v.reset(OpStringMake) v0 := b.NewValue0(v.Pos, OpConstNil, typ.BytePtr) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpConst64, typ.Int) v1.AuxInt = 0 - v.AddArg(v1) + v.AddArg2(v0, v1) return true } // match: (ConstString {s}) @@ -2733,10 +2666,9 @@ func rewriteValuegeneric_OpConstString(v *Value) bool { v0.Aux = fe.StringData(s.(string)) v1 := b.NewValue0(v.Pos, OpSB, typ.Uintptr) v0.AddArg(v1) - v.AddArg(v0) v2 := b.NewValue0(v.Pos, OpConst32, typ.Int) v2.AuxInt = int64(len(s.(string))) - v.AddArg(v2) + v.AddArg2(v0, v2) return true } // match: (ConstString {s}) @@ -2752,10 +2684,9 @@ func rewriteValuegeneric_OpConstString(v *Value) bool { v0.Aux = fe.StringData(s.(string)) v1 := b.NewValue0(v.Pos, OpSB, typ.Uintptr) v0.AddArg(v1) - v.AddArg(v0) v2 := b.NewValue0(v.Pos, OpConst64, typ.Int) v2.AuxInt = int64(len(s.(string))) - v.AddArg(v2) + v.AddArg2(v0, v2) return true } return false @@ -2783,8 +2714,7 @@ func rewriteValuegeneric_OpConvert(v *Value) bool { continue } v.reset(OpAdd64) - v.AddArg(ptr) - v.AddArg(off) + v.AddArg2(ptr, off) return true } break @@ -2809,8 +2739,7 @@ func rewriteValuegeneric_OpConvert(v *Value) bool { continue } v.reset(OpAdd32) - v.AddArg(ptr) - v.AddArg(off) + v.AddArg2(ptr, off) return true } break @@ -3035,10 +2964,9 @@ func rewriteValuegeneric_OpDiv16(v *Value) bool { break } v.reset(OpRsh16Ux64) - v.AddArg(n) v0 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) v0.AuxInt = log2(c & 0xffff) - v.AddArg(v0) + v.AddArg2(n, v0) return true } // match: (Div16 n (Const16 [c])) @@ -3056,10 +2984,9 @@ func rewriteValuegeneric_OpDiv16(v *Value) bool { } v.reset(OpNeg16) v0 := b.NewValue0(v.Pos, OpDiv16, t) - v0.AddArg(n) v1 := b.NewValue0(v.Pos, OpConst16, t) v1.AuxInt = -c - v0.AddArg(v1) + v0.AddArg2(n, v1) v.AddArg(v0) return true } @@ -3073,14 +3000,12 @@ func rewriteValuegeneric_OpDiv16(v *Value) bool { } v.reset(OpRsh16Ux64) v0 := b.NewValue0(v.Pos, OpAnd16, t) - v0.AddArg(x) v1 := b.NewValue0(v.Pos, OpNeg16, t) v1.AddArg(x) - v0.AddArg(v1) - v.AddArg(v0) + v0.AddArg2(x, v1) v2 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) v2.AuxInt = 15 - v.AddArg(v2) + v.AddArg2(v0, v2) return true } // match: (Div16 n (Const16 [c])) @@ -3098,22 +3023,18 @@ func rewriteValuegeneric_OpDiv16(v *Value) bool { } v.reset(OpRsh16x64) v0 := b.NewValue0(v.Pos, OpAdd16, t) - v0.AddArg(n) v1 := b.NewValue0(v.Pos, OpRsh16Ux64, t) v2 := b.NewValue0(v.Pos, OpRsh16x64, t) - v2.AddArg(n) v3 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) v3.AuxInt = 15 - v2.AddArg(v3) - v1.AddArg(v2) + v2.AddArg2(n, v3) v4 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) v4.AuxInt = 16 - log2(c) - v1.AddArg(v4) - v0.AddArg(v1) - v.AddArg(v0) + v1.AddArg2(v2, v4) + v0.AddArg2(n, v1) v5 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) v5.AuxInt = log2(c) - v.AddArg(v5) + v.AddArg2(v0, v5) return true } // match: (Div16 x (Const16 [c])) @@ -3135,23 +3056,19 @@ func rewriteValuegeneric_OpDiv16(v *Value) bool { v1 := b.NewValue0(v.Pos, OpMul32, typ.UInt32) v2 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) v2.AuxInt = int64(smagic(16, c).m) - v1.AddArg(v2) v3 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32) v3.AddArg(x) - v1.AddArg(v3) - v0.AddArg(v1) + v1.AddArg2(v2, v3) v4 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) v4.AuxInt = 16 + smagic(16, c).s - v0.AddArg(v4) - v.AddArg(v0) + v0.AddArg2(v1, v4) v5 := b.NewValue0(v.Pos, OpRsh32x64, t) v6 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32) v6.AddArg(x) - v5.AddArg(v6) v7 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) v7.AuxInt = 31 - v5.AddArg(v7) - v.AddArg(v5) + v5.AddArg2(v6, v7) + v.AddArg2(v0, v5) return true } return false @@ -3194,10 +3111,9 @@ func rewriteValuegeneric_OpDiv16u(v *Value) bool { break } v.reset(OpRsh16Ux64) - v.AddArg(n) v0 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) v0.AuxInt = log2(c & 0xffff) - v.AddArg(v0) + v.AddArg2(n, v0) return true } // match: (Div16u x (Const16 [c])) @@ -3217,14 +3133,12 @@ func rewriteValuegeneric_OpDiv16u(v *Value) bool { v1 := b.NewValue0(v.Pos, OpMul64, typ.UInt64) v2 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) v2.AuxInt = int64(1<<16 + umagic(16, c).m) - v1.AddArg(v2) v3 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) v3.AddArg(x) - v1.AddArg(v3) - v0.AddArg(v1) + v1.AddArg2(v2, v3) v4 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) v4.AuxInt = 16 + umagic(16, c).s - v0.AddArg(v4) + v0.AddArg2(v1, v4) v.AddArg(v0) return true } @@ -3245,14 +3159,12 @@ func rewriteValuegeneric_OpDiv16u(v *Value) bool { v1 := b.NewValue0(v.Pos, OpMul32, typ.UInt32) v2 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) v2.AuxInt = int64(1<<15 + umagic(16, c).m/2) - v1.AddArg(v2) v3 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) v3.AddArg(x) - v1.AddArg(v3) - v0.AddArg(v1) + v1.AddArg2(v2, v3) v4 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) v4.AuxInt = 16 + umagic(16, c).s - 1 - v0.AddArg(v4) + v0.AddArg2(v1, v4) v.AddArg(v0) return true } @@ -3273,19 +3185,16 @@ func rewriteValuegeneric_OpDiv16u(v *Value) bool { v1 := b.NewValue0(v.Pos, OpMul32, typ.UInt32) v2 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) v2.AuxInt = int64(1<<15 + (umagic(16, c).m+1)/2) - v1.AddArg(v2) v3 := b.NewValue0(v.Pos, OpRsh32Ux64, typ.UInt32) v4 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) v4.AddArg(x) - v3.AddArg(v4) v5 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) v5.AuxInt = 1 - v3.AddArg(v5) - v1.AddArg(v3) - v0.AddArg(v1) + v3.AddArg2(v4, v5) + v1.AddArg2(v2, v3) v6 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) v6.AuxInt = 16 + umagic(16, c).s - 2 - v0.AddArg(v6) + v0.AddArg2(v1, v6) v.AddArg(v0) return true } @@ -3307,23 +3216,19 @@ func rewriteValuegeneric_OpDiv16u(v *Value) bool { v2 := b.NewValue0(v.Pos, OpLsh32x64, typ.UInt32) v3 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) v3.AddArg(x) - v2.AddArg(v3) v4 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) v4.AuxInt = 16 - v2.AddArg(v4) - v1.AddArg(v2) + v2.AddArg2(v3, v4) v5 := b.NewValue0(v.Pos, OpMul32, typ.UInt32) v6 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) v6.AuxInt = int64(umagic(16, c).m) - v5.AddArg(v6) v7 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) v7.AddArg(x) - v5.AddArg(v7) - v1.AddArg(v5) - v0.AddArg(v1) + v5.AddArg2(v6, v7) + v1.AddArg2(v2, v5) v8 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) v8.AuxInt = 16 + umagic(16, c).s - 1 - v0.AddArg(v8) + v0.AddArg2(v1, v8) v.AddArg(v0) return true } @@ -3367,10 +3272,9 @@ func rewriteValuegeneric_OpDiv32(v *Value) bool { break } v.reset(OpRsh32Ux64) - v.AddArg(n) v0 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) v0.AuxInt = log2(c & 0xffffffff) - v.AddArg(v0) + v.AddArg2(n, v0) return true } // match: (Div32 n (Const32 [c])) @@ -3388,10 +3292,9 @@ func rewriteValuegeneric_OpDiv32(v *Value) bool { } v.reset(OpNeg32) v0 := b.NewValue0(v.Pos, OpDiv32, t) - v0.AddArg(n) v1 := b.NewValue0(v.Pos, OpConst32, t) v1.AuxInt = -c - v0.AddArg(v1) + v0.AddArg2(n, v1) v.AddArg(v0) return true } @@ -3405,14 +3308,12 @@ func rewriteValuegeneric_OpDiv32(v *Value) bool { } v.reset(OpRsh32Ux64) v0 := b.NewValue0(v.Pos, OpAnd32, t) - v0.AddArg(x) v1 := b.NewValue0(v.Pos, OpNeg32, t) v1.AddArg(x) - v0.AddArg(v1) - v.AddArg(v0) + v0.AddArg2(x, v1) v2 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) v2.AuxInt = 31 - v.AddArg(v2) + v.AddArg2(v0, v2) return true } // match: (Div32 n (Const32 [c])) @@ -3430,22 +3331,18 @@ func rewriteValuegeneric_OpDiv32(v *Value) bool { } v.reset(OpRsh32x64) v0 := b.NewValue0(v.Pos, OpAdd32, t) - v0.AddArg(n) v1 := b.NewValue0(v.Pos, OpRsh32Ux64, t) v2 := b.NewValue0(v.Pos, OpRsh32x64, t) - v2.AddArg(n) v3 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) v3.AuxInt = 31 - v2.AddArg(v3) - v1.AddArg(v2) + v2.AddArg2(n, v3) v4 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) v4.AuxInt = 32 - log2(c) - v1.AddArg(v4) - v0.AddArg(v1) - v.AddArg(v0) + v1.AddArg2(v2, v4) + v0.AddArg2(n, v1) v5 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) v5.AuxInt = log2(c) - v.AddArg(v5) + v.AddArg2(v0, v5) return true } // match: (Div32 x (Const32 [c])) @@ -3467,23 +3364,19 @@ func rewriteValuegeneric_OpDiv32(v *Value) bool { v1 := b.NewValue0(v.Pos, OpMul64, typ.UInt64) v2 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) v2.AuxInt = int64(smagic(32, c).m) - v1.AddArg(v2) v3 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64) v3.AddArg(x) - v1.AddArg(v3) - v0.AddArg(v1) + v1.AddArg2(v2, v3) v4 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) v4.AuxInt = 32 + smagic(32, c).s - v0.AddArg(v4) - v.AddArg(v0) + v0.AddArg2(v1, v4) v5 := b.NewValue0(v.Pos, OpRsh64x64, t) v6 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64) v6.AddArg(x) - v5.AddArg(v6) v7 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) v7.AuxInt = 63 - v5.AddArg(v7) - v.AddArg(v5) + v5.AddArg2(v6, v7) + v.AddArg2(v0, v5) return true } // match: (Div32 x (Const32 [c])) @@ -3505,19 +3398,15 @@ func rewriteValuegeneric_OpDiv32(v *Value) bool { v1 := b.NewValue0(v.Pos, OpHmul32, t) v2 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) v2.AuxInt = int64(int32(smagic(32, c).m / 2)) - v1.AddArg(v2) - v1.AddArg(x) - v0.AddArg(v1) + v1.AddArg2(v2, x) v3 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) v3.AuxInt = smagic(32, c).s - 1 - v0.AddArg(v3) - v.AddArg(v0) + v0.AddArg2(v1, v3) v4 := b.NewValue0(v.Pos, OpRsh32x64, t) - v4.AddArg(x) v5 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) v5.AuxInt = 31 - v4.AddArg(v5) - v.AddArg(v4) + v4.AddArg2(x, v5) + v.AddArg2(v0, v4) return true } // match: (Div32 x (Const32 [c])) @@ -3540,21 +3429,16 @@ func rewriteValuegeneric_OpDiv32(v *Value) bool { v2 := b.NewValue0(v.Pos, OpHmul32, t) v3 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) v3.AuxInt = int64(int32(smagic(32, c).m)) - v2.AddArg(v3) - v2.AddArg(x) - v1.AddArg(v2) - v1.AddArg(x) - v0.AddArg(v1) + v2.AddArg2(v3, x) + v1.AddArg2(v2, x) v4 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) v4.AuxInt = smagic(32, c).s - v0.AddArg(v4) - v.AddArg(v0) + v0.AddArg2(v1, v4) v5 := b.NewValue0(v.Pos, OpRsh32x64, t) - v5.AddArg(x) v6 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) v6.AuxInt = 31 - v5.AddArg(v6) - v.AddArg(v5) + v5.AddArg2(x, v6) + v.AddArg2(v0, v5) return true } return false @@ -3592,10 +3476,9 @@ func rewriteValuegeneric_OpDiv32F(v *Value) bool { break } v.reset(OpMul32F) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpConst32F, t) v0.AuxInt = auxFrom32F(1 / auxTo32F(c)) - v.AddArg(v0) + v.AddArg2(x, v0) return true } return false @@ -3638,10 +3521,9 @@ func rewriteValuegeneric_OpDiv32u(v *Value) bool { break } v.reset(OpRsh32Ux64) - v.AddArg(n) v0 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) v0.AuxInt = log2(c & 0xffffffff) - v.AddArg(v0) + v.AddArg2(n, v0) return true } // match: (Div32u x (Const32 [c])) @@ -3661,12 +3543,10 @@ func rewriteValuegeneric_OpDiv32u(v *Value) bool { v0 := b.NewValue0(v.Pos, OpHmul32u, typ.UInt32) v1 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) v1.AuxInt = int64(int32(1<<31 + umagic(32, c).m/2)) - v0.AddArg(v1) - v0.AddArg(x) - v.AddArg(v0) + v0.AddArg2(v1, x) v2 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) v2.AuxInt = umagic(32, c).s - 1 - v.AddArg(v2) + v.AddArg2(v0, v2) return true } // match: (Div32u x (Const32 [c])) @@ -3686,17 +3566,14 @@ func rewriteValuegeneric_OpDiv32u(v *Value) bool { v0 := b.NewValue0(v.Pos, OpHmul32u, typ.UInt32) v1 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) v1.AuxInt = int64(int32(1<<31 + (umagic(32, c).m+1)/2)) - v0.AddArg(v1) v2 := b.NewValue0(v.Pos, OpRsh32Ux64, typ.UInt32) - v2.AddArg(x) v3 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) v3.AuxInt = 1 - v2.AddArg(v3) - v0.AddArg(v2) - v.AddArg(v0) + v2.AddArg2(x, v3) + v0.AddArg2(v1, v2) v4 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) v4.AuxInt = umagic(32, c).s - 2 - v.AddArg(v4) + v.AddArg2(v0, v4) return true } // match: (Div32u x (Const32 [c])) @@ -3714,17 +3591,14 @@ func rewriteValuegeneric_OpDiv32u(v *Value) bool { v.reset(OpRsh32Ux64) v.Type = typ.UInt32 v0 := b.NewValue0(v.Pos, OpAvg32u, typ.UInt32) - v0.AddArg(x) v1 := b.NewValue0(v.Pos, OpHmul32u, typ.UInt32) v2 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) v2.AuxInt = int64(int32(umagic(32, c).m)) - v1.AddArg(v2) - v1.AddArg(x) - v0.AddArg(v1) - v.AddArg(v0) + v1.AddArg2(v2, x) + v0.AddArg2(x, v1) v3 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) v3.AuxInt = umagic(32, c).s - 1 - v.AddArg(v3) + v.AddArg2(v0, v3) return true } // match: (Div32u x (Const32 [c])) @@ -3744,14 +3618,12 @@ func rewriteValuegeneric_OpDiv32u(v *Value) bool { v1 := b.NewValue0(v.Pos, OpMul64, typ.UInt64) v2 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) v2.AuxInt = int64(1<<31 + umagic(32, c).m/2) - v1.AddArg(v2) v3 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) v3.AddArg(x) - v1.AddArg(v3) - v0.AddArg(v1) + v1.AddArg2(v2, v3) v4 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) v4.AuxInt = 32 + umagic(32, c).s - 1 - v0.AddArg(v4) + v0.AddArg2(v1, v4) v.AddArg(v0) return true } @@ -3772,19 +3644,16 @@ func rewriteValuegeneric_OpDiv32u(v *Value) bool { v1 := b.NewValue0(v.Pos, OpMul64, typ.UInt64) v2 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) v2.AuxInt = int64(1<<31 + (umagic(32, c).m+1)/2) - v1.AddArg(v2) v3 := b.NewValue0(v.Pos, OpRsh64Ux64, typ.UInt64) v4 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) v4.AddArg(x) - v3.AddArg(v4) v5 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) v5.AuxInt = 1 - v3.AddArg(v5) - v1.AddArg(v3) - v0.AddArg(v1) + v3.AddArg2(v4, v5) + v1.AddArg2(v2, v3) v6 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) v6.AuxInt = 32 + umagic(32, c).s - 2 - v0.AddArg(v6) + v0.AddArg2(v1, v6) v.AddArg(v0) return true } @@ -3806,23 +3675,19 @@ func rewriteValuegeneric_OpDiv32u(v *Value) bool { v2 := b.NewValue0(v.Pos, OpLsh64x64, typ.UInt64) v3 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) v3.AddArg(x) - v2.AddArg(v3) v4 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) v4.AuxInt = 32 - v2.AddArg(v4) - v1.AddArg(v2) + v2.AddArg2(v3, v4) v5 := b.NewValue0(v.Pos, OpMul64, typ.UInt64) v6 := b.NewValue0(v.Pos, OpConst64, typ.UInt32) v6.AuxInt = int64(umagic(32, c).m) - v5.AddArg(v6) v7 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) v7.AddArg(x) - v5.AddArg(v7) - v1.AddArg(v5) - v0.AddArg(v1) + v5.AddArg2(v6, v7) + v1.AddArg2(v2, v5) v8 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) v8.AuxInt = 32 + umagic(32, c).s - 1 - v0.AddArg(v8) + v0.AddArg2(v1, v8) v.AddArg(v0) return true } @@ -3866,10 +3731,9 @@ func rewriteValuegeneric_OpDiv64(v *Value) bool { break } v.reset(OpRsh64Ux64) - v.AddArg(n) v0 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) v0.AuxInt = log2(c) - v.AddArg(v0) + v.AddArg2(n, v0) return true } // match: (Div64 n (Const64 [-1<<63])) @@ -3899,10 +3763,9 @@ func rewriteValuegeneric_OpDiv64(v *Value) bool { } v.reset(OpNeg64) v0 := b.NewValue0(v.Pos, OpDiv64, t) - v0.AddArg(n) v1 := b.NewValue0(v.Pos, OpConst64, t) v1.AuxInt = -c - v0.AddArg(v1) + v0.AddArg2(n, v1) v.AddArg(v0) return true } @@ -3916,14 +3779,12 @@ func rewriteValuegeneric_OpDiv64(v *Value) bool { } v.reset(OpRsh64Ux64) v0 := b.NewValue0(v.Pos, OpAnd64, t) - v0.AddArg(x) v1 := b.NewValue0(v.Pos, OpNeg64, t) v1.AddArg(x) - v0.AddArg(v1) - v.AddArg(v0) + v0.AddArg2(x, v1) v2 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) v2.AuxInt = 63 - v.AddArg(v2) + v.AddArg2(v0, v2) return true } // match: (Div64 n (Const64 [c])) @@ -3941,22 +3802,18 @@ func rewriteValuegeneric_OpDiv64(v *Value) bool { } v.reset(OpRsh64x64) v0 := b.NewValue0(v.Pos, OpAdd64, t) - v0.AddArg(n) v1 := b.NewValue0(v.Pos, OpRsh64Ux64, t) v2 := b.NewValue0(v.Pos, OpRsh64x64, t) - v2.AddArg(n) v3 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) v3.AuxInt = 63 - v2.AddArg(v3) - v1.AddArg(v2) + v2.AddArg2(n, v3) v4 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) v4.AuxInt = 64 - log2(c) - v1.AddArg(v4) - v0.AddArg(v1) - v.AddArg(v0) + v1.AddArg2(v2, v4) + v0.AddArg2(n, v1) v5 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) v5.AuxInt = log2(c) - v.AddArg(v5) + v.AddArg2(v0, v5) return true } // match: (Div64 x (Const64 [c])) @@ -3978,19 +3835,15 @@ func rewriteValuegeneric_OpDiv64(v *Value) bool { v1 := b.NewValue0(v.Pos, OpHmul64, t) v2 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) v2.AuxInt = int64(smagic(64, c).m / 2) - v1.AddArg(v2) - v1.AddArg(x) - v0.AddArg(v1) + v1.AddArg2(v2, x) v3 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) v3.AuxInt = smagic(64, c).s - 1 - v0.AddArg(v3) - v.AddArg(v0) + v0.AddArg2(v1, v3) v4 := b.NewValue0(v.Pos, OpRsh64x64, t) - v4.AddArg(x) v5 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) v5.AuxInt = 63 - v4.AddArg(v5) - v.AddArg(v4) + v4.AddArg2(x, v5) + v.AddArg2(v0, v4) return true } // match: (Div64 x (Const64 [c])) @@ -4013,21 +3866,16 @@ func rewriteValuegeneric_OpDiv64(v *Value) bool { v2 := b.NewValue0(v.Pos, OpHmul64, t) v3 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) v3.AuxInt = int64(smagic(64, c).m) - v2.AddArg(v3) - v2.AddArg(x) - v1.AddArg(v2) - v1.AddArg(x) - v0.AddArg(v1) + v2.AddArg2(v3, x) + v1.AddArg2(v2, x) v4 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) v4.AuxInt = smagic(64, c).s - v0.AddArg(v4) - v.AddArg(v0) + v0.AddArg2(v1, v4) v5 := b.NewValue0(v.Pos, OpRsh64x64, t) - v5.AddArg(x) v6 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) v6.AuxInt = 63 - v5.AddArg(v6) - v.AddArg(v5) + v5.AddArg2(x, v6) + v.AddArg2(v0, v5) return true } return false @@ -4065,10 +3913,9 @@ func rewriteValuegeneric_OpDiv64F(v *Value) bool { break } v.reset(OpMul64F) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpConst64F, t) v0.AuxInt = auxFrom64F(1 / auxTo64F(c)) - v.AddArg(v0) + v.AddArg2(x, v0) return true } return false @@ -4111,10 +3958,9 @@ func rewriteValuegeneric_OpDiv64u(v *Value) bool { break } v.reset(OpRsh64Ux64) - v.AddArg(n) v0 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) v0.AuxInt = log2(c) - v.AddArg(v0) + v.AddArg2(n, v0) return true } // match: (Div64u n (Const64 [-1<<63])) @@ -4125,10 +3971,9 @@ func rewriteValuegeneric_OpDiv64u(v *Value) bool { break } v.reset(OpRsh64Ux64) - v.AddArg(n) v0 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) v0.AuxInt = 63 - v.AddArg(v0) + v.AddArg2(n, v0) return true } // match: (Div64u x (Const64 [c])) @@ -4148,12 +3993,10 @@ func rewriteValuegeneric_OpDiv64u(v *Value) bool { v0 := b.NewValue0(v.Pos, OpHmul64u, typ.UInt64) v1 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) v1.AuxInt = int64(1<<63 + umagic(64, c).m/2) - v0.AddArg(v1) - v0.AddArg(x) - v.AddArg(v0) + v0.AddArg2(v1, x) v2 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) v2.AuxInt = umagic(64, c).s - 1 - v.AddArg(v2) + v.AddArg2(v0, v2) return true } // match: (Div64u x (Const64 [c])) @@ -4173,17 +4016,14 @@ func rewriteValuegeneric_OpDiv64u(v *Value) bool { v0 := b.NewValue0(v.Pos, OpHmul64u, typ.UInt64) v1 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) v1.AuxInt = int64(1<<63 + (umagic(64, c).m+1)/2) - v0.AddArg(v1) v2 := b.NewValue0(v.Pos, OpRsh64Ux64, typ.UInt64) - v2.AddArg(x) v3 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) v3.AuxInt = 1 - v2.AddArg(v3) - v0.AddArg(v2) - v.AddArg(v0) + v2.AddArg2(x, v3) + v0.AddArg2(v1, v2) v4 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) v4.AuxInt = umagic(64, c).s - 2 - v.AddArg(v4) + v.AddArg2(v0, v4) return true } // match: (Div64u x (Const64 [c])) @@ -4201,17 +4041,14 @@ func rewriteValuegeneric_OpDiv64u(v *Value) bool { v.reset(OpRsh64Ux64) v.Type = typ.UInt64 v0 := b.NewValue0(v.Pos, OpAvg64u, typ.UInt64) - v0.AddArg(x) v1 := b.NewValue0(v.Pos, OpHmul64u, typ.UInt64) v2 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) v2.AuxInt = int64(umagic(64, c).m) - v1.AddArg(v2) - v1.AddArg(x) - v0.AddArg(v1) - v.AddArg(v0) + v1.AddArg2(v2, x) + v0.AddArg2(x, v1) v3 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) v3.AuxInt = umagic(64, c).s - 1 - v.AddArg(v3) + v.AddArg2(v0, v3) return true } return false @@ -4253,10 +4090,9 @@ func rewriteValuegeneric_OpDiv8(v *Value) bool { break } v.reset(OpRsh8Ux64) - v.AddArg(n) v0 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) v0.AuxInt = log2(c & 0xff) - v.AddArg(v0) + v.AddArg2(n, v0) return true } // match: (Div8 n (Const8 [c])) @@ -4274,10 +4110,9 @@ func rewriteValuegeneric_OpDiv8(v *Value) bool { } v.reset(OpNeg8) v0 := b.NewValue0(v.Pos, OpDiv8, t) - v0.AddArg(n) v1 := b.NewValue0(v.Pos, OpConst8, t) v1.AuxInt = -c - v0.AddArg(v1) + v0.AddArg2(n, v1) v.AddArg(v0) return true } @@ -4291,14 +4126,12 @@ func rewriteValuegeneric_OpDiv8(v *Value) bool { } v.reset(OpRsh8Ux64) v0 := b.NewValue0(v.Pos, OpAnd8, t) - v0.AddArg(x) v1 := b.NewValue0(v.Pos, OpNeg8, t) v1.AddArg(x) - v0.AddArg(v1) - v.AddArg(v0) + v0.AddArg2(x, v1) v2 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) v2.AuxInt = 7 - v.AddArg(v2) + v.AddArg2(v0, v2) return true } // match: (Div8 n (Const8 [c])) @@ -4316,22 +4149,18 @@ func rewriteValuegeneric_OpDiv8(v *Value) bool { } v.reset(OpRsh8x64) v0 := b.NewValue0(v.Pos, OpAdd8, t) - v0.AddArg(n) v1 := b.NewValue0(v.Pos, OpRsh8Ux64, t) v2 := b.NewValue0(v.Pos, OpRsh8x64, t) - v2.AddArg(n) v3 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) v3.AuxInt = 7 - v2.AddArg(v3) - v1.AddArg(v2) + v2.AddArg2(n, v3) v4 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) v4.AuxInt = 8 - log2(c) - v1.AddArg(v4) - v0.AddArg(v1) - v.AddArg(v0) + v1.AddArg2(v2, v4) + v0.AddArg2(n, v1) v5 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) v5.AuxInt = log2(c) - v.AddArg(v5) + v.AddArg2(v0, v5) return true } // match: (Div8 x (Const8 [c])) @@ -4353,23 +4182,19 @@ func rewriteValuegeneric_OpDiv8(v *Value) bool { v1 := b.NewValue0(v.Pos, OpMul32, typ.UInt32) v2 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) v2.AuxInt = int64(smagic(8, c).m) - v1.AddArg(v2) v3 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32) v3.AddArg(x) - v1.AddArg(v3) - v0.AddArg(v1) + v1.AddArg2(v2, v3) v4 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) v4.AuxInt = 8 + smagic(8, c).s - v0.AddArg(v4) - v.AddArg(v0) + v0.AddArg2(v1, v4) v5 := b.NewValue0(v.Pos, OpRsh32x64, t) v6 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32) v6.AddArg(x) - v5.AddArg(v6) v7 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) v7.AuxInt = 31 - v5.AddArg(v7) - v.AddArg(v5) + v5.AddArg2(v6, v7) + v.AddArg2(v0, v5) return true } return false @@ -4411,10 +4236,9 @@ func rewriteValuegeneric_OpDiv8u(v *Value) bool { break } v.reset(OpRsh8Ux64) - v.AddArg(n) v0 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) v0.AuxInt = log2(c & 0xff) - v.AddArg(v0) + v.AddArg2(n, v0) return true } // match: (Div8u x (Const8 [c])) @@ -4434,14 +4258,12 @@ func rewriteValuegeneric_OpDiv8u(v *Value) bool { v1 := b.NewValue0(v.Pos, OpMul32, typ.UInt32) v2 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) v2.AuxInt = int64(1<<8 + umagic(8, c).m) - v1.AddArg(v2) v3 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) v3.AddArg(x) - v1.AddArg(v3) - v0.AddArg(v1) + v1.AddArg2(v2, v3) v4 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) v4.AuxInt = 8 + umagic(8, c).s - v0.AddArg(v4) + v0.AddArg2(v1, v4) v.AddArg(v0) return true } @@ -4488,8 +4310,7 @@ func rewriteValuegeneric_OpEq16(v *Value) bool { v.reset(OpEq16) v0 := b.NewValue0(v.Pos, OpConst16, t) v0.AuxInt = int64(int16(c - d)) - v.AddArg(v0) - v.AddArg(x) + v.AddArg2(v0, x) return true } } @@ -4535,14 +4356,12 @@ func rewriteValuegeneric_OpEq16(v *Value) bool { v0 := b.NewValue0(v.Pos, OpMod32u, typ.UInt32) v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) v1.AddArg(x) - v0.AddArg(v1) v2 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) v2.AuxInt = c & 0xffff - v0.AddArg(v2) - v.AddArg(v0) + v0.AddArg2(v1, v2) v3 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) v3.AuxInt = 0 - v.AddArg(v3) + v.AddArg2(v0, v3) return true } break @@ -4569,14 +4388,12 @@ func rewriteValuegeneric_OpEq16(v *Value) bool { v0 := b.NewValue0(v.Pos, OpMod32, typ.Int32) v1 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32) v1.AddArg(x) - v0.AddArg(v1) v2 := b.NewValue0(v.Pos, OpConst32, typ.Int32) v2.AuxInt = c - v0.AddArg(v2) - v.AddArg(v0) + v0.AddArg2(v1, v2) v3 := b.NewValue0(v.Pos, OpConst32, typ.Int32) v3.AuxInt = 0 - v.AddArg(v3) + v.AddArg2(v0, v3) return true } break @@ -4634,16 +4451,13 @@ func rewriteValuegeneric_OpEq16(v *Value) bool { v1 := b.NewValue0(v.Pos, OpMul16, typ.UInt16) v2 := b.NewValue0(v.Pos, OpConst16, typ.UInt16) v2.AuxInt = int64(int16(udivisible(16, c).m)) - v1.AddArg(v2) - v1.AddArg(x) - v0.AddArg(v1) + v1.AddArg2(v2, x) v3 := b.NewValue0(v.Pos, OpConst16, typ.UInt16) v3.AuxInt = int64(16 - udivisible(16, c).k) - v0.AddArg(v3) - v.AddArg(v0) + v0.AddArg2(v1, v3) v4 := b.NewValue0(v.Pos, OpConst16, typ.UInt16) v4.AuxInt = int64(int16(udivisible(16, c).max)) - v.AddArg(v4) + v.AddArg2(v0, v4) return true } } @@ -4703,16 +4517,13 @@ func rewriteValuegeneric_OpEq16(v *Value) bool { v1 := b.NewValue0(v.Pos, OpMul16, typ.UInt16) v2 := b.NewValue0(v.Pos, OpConst16, typ.UInt16) v2.AuxInt = int64(int16(udivisible(16, c).m)) - v1.AddArg(v2) - v1.AddArg(x) - v0.AddArg(v1) + v1.AddArg2(v2, x) v3 := b.NewValue0(v.Pos, OpConst16, typ.UInt16) v3.AuxInt = int64(16 - udivisible(16, c).k) - v0.AddArg(v3) - v.AddArg(v0) + v0.AddArg2(v1, v3) v4 := b.NewValue0(v.Pos, OpConst16, typ.UInt16) v4.AuxInt = int64(int16(udivisible(16, c).max)) - v.AddArg(v4) + v.AddArg2(v0, v4) return true } } @@ -4781,16 +4592,13 @@ func rewriteValuegeneric_OpEq16(v *Value) bool { v1 := b.NewValue0(v.Pos, OpMul16, typ.UInt16) v2 := b.NewValue0(v.Pos, OpConst16, typ.UInt16) v2.AuxInt = int64(int16(udivisible(16, c).m)) - v1.AddArg(v2) - v1.AddArg(x) - v0.AddArg(v1) + v1.AddArg2(v2, x) v3 := b.NewValue0(v.Pos, OpConst16, typ.UInt16) v3.AuxInt = int64(16 - udivisible(16, c).k) - v0.AddArg(v3) - v.AddArg(v0) + v0.AddArg2(v1, v3) v4 := b.NewValue0(v.Pos, OpConst16, typ.UInt16) v4.AuxInt = int64(int16(udivisible(16, c).max)) - v.AddArg(v4) + v.AddArg2(v0, v4) return true } } @@ -4868,16 +4676,13 @@ func rewriteValuegeneric_OpEq16(v *Value) bool { v1 := b.NewValue0(v.Pos, OpMul16, typ.UInt16) v2 := b.NewValue0(v.Pos, OpConst16, typ.UInt16) v2.AuxInt = int64(int16(udivisible(16, c).m)) - v1.AddArg(v2) - v1.AddArg(x) - v0.AddArg(v1) + v1.AddArg2(v2, x) v3 := b.NewValue0(v.Pos, OpConst16, typ.UInt16) v3.AuxInt = int64(16 - udivisible(16, c).k) - v0.AddArg(v3) - v.AddArg(v0) + v0.AddArg2(v1, v3) v4 := b.NewValue0(v.Pos, OpConst16, typ.UInt16) v4.AuxInt = int64(int16(udivisible(16, c).max)) - v.AddArg(v4) + v.AddArg2(v0, v4) return true } } @@ -4949,20 +4754,16 @@ func rewriteValuegeneric_OpEq16(v *Value) bool { v2 := b.NewValue0(v.Pos, OpMul16, typ.UInt16) v3 := b.NewValue0(v.Pos, OpConst16, typ.UInt16) v3.AuxInt = int64(int16(sdivisible(16, c).m)) - v2.AddArg(v3) - v2.AddArg(x) - v1.AddArg(v2) + v2.AddArg2(v3, x) v4 := b.NewValue0(v.Pos, OpConst16, typ.UInt16) v4.AuxInt = int64(int16(sdivisible(16, c).a)) - v1.AddArg(v4) - v0.AddArg(v1) + v1.AddArg2(v2, v4) v5 := b.NewValue0(v.Pos, OpConst16, typ.UInt16) v5.AuxInt = int64(16 - sdivisible(16, c).k) - v0.AddArg(v5) - v.AddArg(v0) + v0.AddArg2(v1, v5) v6 := b.NewValue0(v.Pos, OpConst16, typ.UInt16) v6.AuxInt = int64(int16(sdivisible(16, c).max)) - v.AddArg(v6) + v.AddArg2(v0, v6) return true } } @@ -5025,14 +4826,12 @@ func rewriteValuegeneric_OpEq16(v *Value) bool { } v.reset(OpEq16) v0 := b.NewValue0(v.Pos, OpAnd16, t) - v0.AddArg(n) v1 := b.NewValue0(v.Pos, OpConst16, t) v1.AuxInt = int64(1< op:(OffPtr [o1] p1) (Store {t2} p2 _ (Store {t3} p3 _ mem:(Zero [n] p4 _)))) @@ -9463,8 +9178,7 @@ func rewriteValuegeneric_OpLoad(v *Value) bool { v1 := b.NewValue0(v.Pos, OpOffPtr, op.Type) v1.AuxInt = o1 v1.AddArg(p4) - v0.AddArg(v1) - v0.AddArg(mem) + v0.AddArg2(v1, mem) return true } // match: (Load op:(OffPtr [o1] p1) (Store {t2} p2 _ (Store {t3} p3 _ (Store {t4} p4 _ mem:(Zero [n] p5 _))))) @@ -9515,8 +9229,7 @@ func rewriteValuegeneric_OpLoad(v *Value) bool { v1 := b.NewValue0(v.Pos, OpOffPtr, op.Type) v1.AuxInt = o1 v1.AddArg(p5) - v0.AddArg(v1) - v0.AddArg(mem) + v0.AddArg2(v1, mem) return true } // match: (Load op:(OffPtr [o1] p1) (Store {t2} p2 _ (Store {t3} p3 _ (Store {t4} p4 _ (Store {t5} p5 _ mem:(Zero [n] p6 _)))))) @@ -9574,8 +9287,7 @@ func rewriteValuegeneric_OpLoad(v *Value) bool { v1 := b.NewValue0(v.Pos, OpOffPtr, op.Type) v1.AuxInt = o1 v1.AddArg(p6) - v0.AddArg(v1) - v0.AddArg(mem) + v0.AddArg2(v1, mem) return true } // match: (Load (OffPtr [o] p1) (Zero [n] p2 _)) @@ -9765,8 +9477,7 @@ func rewriteValuegeneric_OpLoad(v *Value) bool { v1 := b.NewValue0(v.Pos, OpOffPtr, t.FieldType(0).PtrTo()) v1.AuxInt = 0 v1.AddArg(ptr) - v0.AddArg(v1) - v0.AddArg(mem) + v0.AddArg2(v1, mem) v.AddArg(v0) return true } @@ -9785,16 +9496,13 @@ func rewriteValuegeneric_OpLoad(v *Value) bool { v1 := b.NewValue0(v.Pos, OpOffPtr, t.FieldType(0).PtrTo()) v1.AuxInt = 0 v1.AddArg(ptr) - v0.AddArg(v1) - v0.AddArg(mem) - v.AddArg(v0) + v0.AddArg2(v1, mem) v2 := b.NewValue0(v.Pos, OpLoad, t.FieldType(1)) v3 := b.NewValue0(v.Pos, OpOffPtr, t.FieldType(1).PtrTo()) v3.AuxInt = t.FieldOff(1) v3.AddArg(ptr) - v2.AddArg(v3) - v2.AddArg(mem) - v.AddArg(v2) + v2.AddArg2(v3, mem) + v.AddArg2(v0, v2) return true } // match: (Load ptr mem) @@ -9812,23 +9520,18 @@ func rewriteValuegeneric_OpLoad(v *Value) bool { v1 := b.NewValue0(v.Pos, OpOffPtr, t.FieldType(0).PtrTo()) v1.AuxInt = 0 v1.AddArg(ptr) - v0.AddArg(v1) - v0.AddArg(mem) - v.AddArg(v0) + v0.AddArg2(v1, mem) v2 := b.NewValue0(v.Pos, OpLoad, t.FieldType(1)) v3 := b.NewValue0(v.Pos, OpOffPtr, t.FieldType(1).PtrTo()) v3.AuxInt = t.FieldOff(1) v3.AddArg(ptr) - v2.AddArg(v3) - v2.AddArg(mem) - v.AddArg(v2) + v2.AddArg2(v3, mem) v4 := b.NewValue0(v.Pos, OpLoad, t.FieldType(2)) v5 := b.NewValue0(v.Pos, OpOffPtr, t.FieldType(2).PtrTo()) v5.AuxInt = t.FieldOff(2) v5.AddArg(ptr) - v4.AddArg(v5) - v4.AddArg(mem) - v.AddArg(v4) + v4.AddArg2(v5, mem) + v.AddArg3(v0, v2, v4) return true } // match: (Load ptr mem) @@ -9846,30 +9549,23 @@ func rewriteValuegeneric_OpLoad(v *Value) bool { v1 := b.NewValue0(v.Pos, OpOffPtr, t.FieldType(0).PtrTo()) v1.AuxInt = 0 v1.AddArg(ptr) - v0.AddArg(v1) - v0.AddArg(mem) - v.AddArg(v0) + v0.AddArg2(v1, mem) v2 := b.NewValue0(v.Pos, OpLoad, t.FieldType(1)) v3 := b.NewValue0(v.Pos, OpOffPtr, t.FieldType(1).PtrTo()) v3.AuxInt = t.FieldOff(1) v3.AddArg(ptr) - v2.AddArg(v3) - v2.AddArg(mem) - v.AddArg(v2) + v2.AddArg2(v3, mem) v4 := b.NewValue0(v.Pos, OpLoad, t.FieldType(2)) v5 := b.NewValue0(v.Pos, OpOffPtr, t.FieldType(2).PtrTo()) v5.AuxInt = t.FieldOff(2) v5.AddArg(ptr) - v4.AddArg(v5) - v4.AddArg(mem) - v.AddArg(v4) + v4.AddArg2(v5, mem) v6 := b.NewValue0(v.Pos, OpLoad, t.FieldType(3)) v7 := b.NewValue0(v.Pos, OpOffPtr, t.FieldType(3).PtrTo()) v7.AuxInt = t.FieldOff(3) v7.AddArg(ptr) - v6.AddArg(v7) - v6.AddArg(mem) - v.AddArg(v6) + v6.AddArg2(v7, mem) + v.AddArg4(v0, v2, v4, v6) return true } // match: (Load _ _) @@ -9895,8 +9591,7 @@ func rewriteValuegeneric_OpLoad(v *Value) bool { } v.reset(OpArrayMake1) v0 := b.NewValue0(v.Pos, OpLoad, t.Elem()) - v0.AddArg(ptr) - v0.AddArg(mem) + v0.AddArg2(ptr, mem) v.AddArg(v0) return true } @@ -9916,10 +9611,9 @@ func rewriteValuegeneric_OpLsh16x16(v *Value) bool { } c := v_1.AuxInt v.reset(OpLsh16x64) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpConst64, t) v0.AuxInt = int64(uint16(c)) - v.AddArg(v0) + v.AddArg2(x, v0) return true } // match: (Lsh16x16 (Const16 [0]) _) @@ -9948,10 +9642,9 @@ func rewriteValuegeneric_OpLsh16x32(v *Value) bool { } c := v_1.AuxInt v.reset(OpLsh16x64) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpConst64, t) v0.AuxInt = int64(uint32(c)) - v.AddArg(v0) + v.AddArg2(x, v0) return true } // match: (Lsh16x32 (Const16 [0]) _) @@ -10046,10 +9739,9 @@ func rewriteValuegeneric_OpLsh16x64(v *Value) bool { break } v.reset(OpLsh16x64) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpConst64, t) v0.AuxInt = c + d - v.AddArg(v0) + v.AddArg2(x, v0) return true } // match: (Lsh16x64 (Rsh16Ux64 (Lsh16x64 x (Const64 [c1])) (Const64 [c2])) (Const64 [c3])) @@ -10084,10 +9776,9 @@ func rewriteValuegeneric_OpLsh16x64(v *Value) bool { break } v.reset(OpLsh16x64) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) v0.AuxInt = c1 - c2 + c3 - v.AddArg(v0) + v.AddArg2(x, v0) return true } return false @@ -10106,10 +9797,9 @@ func rewriteValuegeneric_OpLsh16x8(v *Value) bool { } c := v_1.AuxInt v.reset(OpLsh16x64) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpConst64, t) v0.AuxInt = int64(uint8(c)) - v.AddArg(v0) + v.AddArg2(x, v0) return true } // match: (Lsh16x8 (Const16 [0]) _) @@ -10138,10 +9828,9 @@ func rewriteValuegeneric_OpLsh32x16(v *Value) bool { } c := v_1.AuxInt v.reset(OpLsh32x64) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpConst64, t) v0.AuxInt = int64(uint16(c)) - v.AddArg(v0) + v.AddArg2(x, v0) return true } // match: (Lsh32x16 (Const32 [0]) _) @@ -10170,10 +9859,9 @@ func rewriteValuegeneric_OpLsh32x32(v *Value) bool { } c := v_1.AuxInt v.reset(OpLsh32x64) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpConst64, t) v0.AuxInt = int64(uint32(c)) - v.AddArg(v0) + v.AddArg2(x, v0) return true } // match: (Lsh32x32 (Const32 [0]) _) @@ -10268,10 +9956,9 @@ func rewriteValuegeneric_OpLsh32x64(v *Value) bool { break } v.reset(OpLsh32x64) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpConst64, t) v0.AuxInt = c + d - v.AddArg(v0) + v.AddArg2(x, v0) return true } // match: (Lsh32x64 (Rsh32Ux64 (Lsh32x64 x (Const64 [c1])) (Const64 [c2])) (Const64 [c3])) @@ -10306,10 +9993,9 @@ func rewriteValuegeneric_OpLsh32x64(v *Value) bool { break } v.reset(OpLsh32x64) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) v0.AuxInt = c1 - c2 + c3 - v.AddArg(v0) + v.AddArg2(x, v0) return true } return false @@ -10328,10 +10014,9 @@ func rewriteValuegeneric_OpLsh32x8(v *Value) bool { } c := v_1.AuxInt v.reset(OpLsh32x64) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpConst64, t) v0.AuxInt = int64(uint8(c)) - v.AddArg(v0) + v.AddArg2(x, v0) return true } // match: (Lsh32x8 (Const32 [0]) _) @@ -10360,10 +10045,9 @@ func rewriteValuegeneric_OpLsh64x16(v *Value) bool { } c := v_1.AuxInt v.reset(OpLsh64x64) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpConst64, t) v0.AuxInt = int64(uint16(c)) - v.AddArg(v0) + v.AddArg2(x, v0) return true } // match: (Lsh64x16 (Const64 [0]) _) @@ -10392,10 +10076,9 @@ func rewriteValuegeneric_OpLsh64x32(v *Value) bool { } c := v_1.AuxInt v.reset(OpLsh64x64) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpConst64, t) v0.AuxInt = int64(uint32(c)) - v.AddArg(v0) + v.AddArg2(x, v0) return true } // match: (Lsh64x32 (Const64 [0]) _) @@ -10490,10 +10173,9 @@ func rewriteValuegeneric_OpLsh64x64(v *Value) bool { break } v.reset(OpLsh64x64) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpConst64, t) v0.AuxInt = c + d - v.AddArg(v0) + v.AddArg2(x, v0) return true } // match: (Lsh64x64 (Rsh64Ux64 (Lsh64x64 x (Const64 [c1])) (Const64 [c2])) (Const64 [c3])) @@ -10528,10 +10210,9 @@ func rewriteValuegeneric_OpLsh64x64(v *Value) bool { break } v.reset(OpLsh64x64) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) v0.AuxInt = c1 - c2 + c3 - v.AddArg(v0) + v.AddArg2(x, v0) return true } return false @@ -10550,10 +10231,9 @@ func rewriteValuegeneric_OpLsh64x8(v *Value) bool { } c := v_1.AuxInt v.reset(OpLsh64x64) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpConst64, t) v0.AuxInt = int64(uint8(c)) - v.AddArg(v0) + v.AddArg2(x, v0) return true } // match: (Lsh64x8 (Const64 [0]) _) @@ -10582,10 +10262,9 @@ func rewriteValuegeneric_OpLsh8x16(v *Value) bool { } c := v_1.AuxInt v.reset(OpLsh8x64) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpConst64, t) v0.AuxInt = int64(uint16(c)) - v.AddArg(v0) + v.AddArg2(x, v0) return true } // match: (Lsh8x16 (Const8 [0]) _) @@ -10614,10 +10293,9 @@ func rewriteValuegeneric_OpLsh8x32(v *Value) bool { } c := v_1.AuxInt v.reset(OpLsh8x64) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpConst64, t) v0.AuxInt = int64(uint32(c)) - v.AddArg(v0) + v.AddArg2(x, v0) return true } // match: (Lsh8x32 (Const8 [0]) _) @@ -10712,10 +10390,9 @@ func rewriteValuegeneric_OpLsh8x64(v *Value) bool { break } v.reset(OpLsh8x64) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpConst64, t) v0.AuxInt = c + d - v.AddArg(v0) + v.AddArg2(x, v0) return true } // match: (Lsh8x64 (Rsh8Ux64 (Lsh8x64 x (Const64 [c1])) (Const64 [c2])) (Const64 [c3])) @@ -10750,10 +10427,9 @@ func rewriteValuegeneric_OpLsh8x64(v *Value) bool { break } v.reset(OpLsh8x64) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) v0.AuxInt = c1 - c2 + c3 - v.AddArg(v0) + v.AddArg2(x, v0) return true } return false @@ -10772,10 +10448,9 @@ func rewriteValuegeneric_OpLsh8x8(v *Value) bool { } c := v_1.AuxInt v.reset(OpLsh8x64) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpConst64, t) v0.AuxInt = int64(uint8(c)) - v.AddArg(v0) + v.AddArg2(x, v0) return true } // match: (Lsh8x8 (Const8 [0]) _) @@ -10827,10 +10502,9 @@ func rewriteValuegeneric_OpMod16(v *Value) bool { break } v.reset(OpAnd16) - v.AddArg(n) v0 := b.NewValue0(v.Pos, OpConst16, t) v0.AuxInt = (c & 0xffff) - 1 - v.AddArg(v0) + v.AddArg2(n, v0) return true } // match: (Mod16 n (Const16 [c])) @@ -10848,10 +10522,9 @@ func rewriteValuegeneric_OpMod16(v *Value) bool { } v.reset(OpMod16) v.Type = t - v.AddArg(n) v0 := b.NewValue0(v.Pos, OpConst16, t) v0.AuxInt = -c - v.AddArg(v0) + v.AddArg2(n, v0) return true } // match: (Mod16 x (Const16 [c])) @@ -10868,18 +10541,15 @@ func rewriteValuegeneric_OpMod16(v *Value) bool { break } v.reset(OpSub16) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpMul16, t) v1 := b.NewValue0(v.Pos, OpDiv16, t) - v1.AddArg(x) v2 := b.NewValue0(v.Pos, OpConst16, t) v2.AuxInt = c - v1.AddArg(v2) - v0.AddArg(v1) + v1.AddArg2(x, v2) v3 := b.NewValue0(v.Pos, OpConst16, t) v3.AuxInt = c - v0.AddArg(v3) - v.AddArg(v0) + v0.AddArg2(v1, v3) + v.AddArg2(x, v0) return true } return false @@ -10921,10 +10591,9 @@ func rewriteValuegeneric_OpMod16u(v *Value) bool { break } v.reset(OpAnd16) - v.AddArg(n) v0 := b.NewValue0(v.Pos, OpConst16, t) v0.AuxInt = (c & 0xffff) - 1 - v.AddArg(v0) + v.AddArg2(n, v0) return true } // match: (Mod16u x (Const16 [c])) @@ -10941,18 +10610,15 @@ func rewriteValuegeneric_OpMod16u(v *Value) bool { break } v.reset(OpSub16) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpMul16, t) v1 := b.NewValue0(v.Pos, OpDiv16u, t) - v1.AddArg(x) v2 := b.NewValue0(v.Pos, OpConst16, t) v2.AuxInt = c - v1.AddArg(v2) - v0.AddArg(v1) + v1.AddArg2(x, v2) v3 := b.NewValue0(v.Pos, OpConst16, t) v3.AuxInt = c - v0.AddArg(v3) - v.AddArg(v0) + v0.AddArg2(v1, v3) + v.AddArg2(x, v0) return true } return false @@ -10994,10 +10660,9 @@ func rewriteValuegeneric_OpMod32(v *Value) bool { break } v.reset(OpAnd32) - v.AddArg(n) v0 := b.NewValue0(v.Pos, OpConst32, t) v0.AuxInt = (c & 0xffffffff) - 1 - v.AddArg(v0) + v.AddArg2(n, v0) return true } // match: (Mod32 n (Const32 [c])) @@ -11015,10 +10680,9 @@ func rewriteValuegeneric_OpMod32(v *Value) bool { } v.reset(OpMod32) v.Type = t - v.AddArg(n) v0 := b.NewValue0(v.Pos, OpConst32, t) v0.AuxInt = -c - v.AddArg(v0) + v.AddArg2(n, v0) return true } // match: (Mod32 x (Const32 [c])) @@ -11035,18 +10699,15 @@ func rewriteValuegeneric_OpMod32(v *Value) bool { break } v.reset(OpSub32) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpMul32, t) v1 := b.NewValue0(v.Pos, OpDiv32, t) - v1.AddArg(x) v2 := b.NewValue0(v.Pos, OpConst32, t) v2.AuxInt = c - v1.AddArg(v2) - v0.AddArg(v1) + v1.AddArg2(x, v2) v3 := b.NewValue0(v.Pos, OpConst32, t) v3.AuxInt = c - v0.AddArg(v3) - v.AddArg(v0) + v0.AddArg2(v1, v3) + v.AddArg2(x, v0) return true } return false @@ -11088,10 +10749,9 @@ func rewriteValuegeneric_OpMod32u(v *Value) bool { break } v.reset(OpAnd32) - v.AddArg(n) v0 := b.NewValue0(v.Pos, OpConst32, t) v0.AuxInt = (c & 0xffffffff) - 1 - v.AddArg(v0) + v.AddArg2(n, v0) return true } // match: (Mod32u x (Const32 [c])) @@ -11108,18 +10768,15 @@ func rewriteValuegeneric_OpMod32u(v *Value) bool { break } v.reset(OpSub32) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpMul32, t) v1 := b.NewValue0(v.Pos, OpDiv32u, t) - v1.AddArg(x) v2 := b.NewValue0(v.Pos, OpConst32, t) v2.AuxInt = c - v1.AddArg(v2) - v0.AddArg(v1) + v1.AddArg2(x, v2) v3 := b.NewValue0(v.Pos, OpConst32, t) v3.AuxInt = c - v0.AddArg(v3) - v.AddArg(v0) + v0.AddArg2(v1, v3) + v.AddArg2(x, v0) return true } return false @@ -11161,10 +10818,9 @@ func rewriteValuegeneric_OpMod64(v *Value) bool { break } v.reset(OpAnd64) - v.AddArg(n) v0 := b.NewValue0(v.Pos, OpConst64, t) v0.AuxInt = c - 1 - v.AddArg(v0) + v.AddArg2(n, v0) return true } // match: (Mod64 n (Const64 [-1<<63])) @@ -11195,10 +10851,9 @@ func rewriteValuegeneric_OpMod64(v *Value) bool { } v.reset(OpMod64) v.Type = t - v.AddArg(n) v0 := b.NewValue0(v.Pos, OpConst64, t) v0.AuxInt = -c - v.AddArg(v0) + v.AddArg2(n, v0) return true } // match: (Mod64 x (Const64 [c])) @@ -11215,18 +10870,15 @@ func rewriteValuegeneric_OpMod64(v *Value) bool { break } v.reset(OpSub64) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpMul64, t) v1 := b.NewValue0(v.Pos, OpDiv64, t) - v1.AddArg(x) v2 := b.NewValue0(v.Pos, OpConst64, t) v2.AuxInt = c - v1.AddArg(v2) - v0.AddArg(v1) + v1.AddArg2(x, v2) v3 := b.NewValue0(v.Pos, OpConst64, t) v3.AuxInt = c - v0.AddArg(v3) - v.AddArg(v0) + v0.AddArg2(v1, v3) + v.AddArg2(x, v0) return true } return false @@ -11268,10 +10920,9 @@ func rewriteValuegeneric_OpMod64u(v *Value) bool { break } v.reset(OpAnd64) - v.AddArg(n) v0 := b.NewValue0(v.Pos, OpConst64, t) v0.AuxInt = c - 1 - v.AddArg(v0) + v.AddArg2(n, v0) return true } // match: (Mod64u n (Const64 [-1<<63])) @@ -11283,10 +10934,9 @@ func rewriteValuegeneric_OpMod64u(v *Value) bool { break } v.reset(OpAnd64) - v.AddArg(n) v0 := b.NewValue0(v.Pos, OpConst64, t) v0.AuxInt = 1<<63 - 1 - v.AddArg(v0) + v.AddArg2(n, v0) return true } // match: (Mod64u x (Const64 [c])) @@ -11303,18 +10953,15 @@ func rewriteValuegeneric_OpMod64u(v *Value) bool { break } v.reset(OpSub64) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpMul64, t) v1 := b.NewValue0(v.Pos, OpDiv64u, t) - v1.AddArg(x) v2 := b.NewValue0(v.Pos, OpConst64, t) v2.AuxInt = c - v1.AddArg(v2) - v0.AddArg(v1) + v1.AddArg2(x, v2) v3 := b.NewValue0(v.Pos, OpConst64, t) v3.AuxInt = c - v0.AddArg(v3) - v.AddArg(v0) + v0.AddArg2(v1, v3) + v.AddArg2(x, v0) return true } return false @@ -11356,10 +11003,9 @@ func rewriteValuegeneric_OpMod8(v *Value) bool { break } v.reset(OpAnd8) - v.AddArg(n) v0 := b.NewValue0(v.Pos, OpConst8, t) v0.AuxInt = (c & 0xff) - 1 - v.AddArg(v0) + v.AddArg2(n, v0) return true } // match: (Mod8 n (Const8 [c])) @@ -11377,10 +11023,9 @@ func rewriteValuegeneric_OpMod8(v *Value) bool { } v.reset(OpMod8) v.Type = t - v.AddArg(n) v0 := b.NewValue0(v.Pos, OpConst8, t) v0.AuxInt = -c - v.AddArg(v0) + v.AddArg2(n, v0) return true } // match: (Mod8 x (Const8 [c])) @@ -11397,18 +11042,15 @@ func rewriteValuegeneric_OpMod8(v *Value) bool { break } v.reset(OpSub8) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpMul8, t) v1 := b.NewValue0(v.Pos, OpDiv8, t) - v1.AddArg(x) v2 := b.NewValue0(v.Pos, OpConst8, t) v2.AuxInt = c - v1.AddArg(v2) - v0.AddArg(v1) + v1.AddArg2(x, v2) v3 := b.NewValue0(v.Pos, OpConst8, t) v3.AuxInt = c - v0.AddArg(v3) - v.AddArg(v0) + v0.AddArg2(v1, v3) + v.AddArg2(x, v0) return true } return false @@ -11450,10 +11092,9 @@ func rewriteValuegeneric_OpMod8u(v *Value) bool { break } v.reset(OpAnd8) - v.AddArg(n) v0 := b.NewValue0(v.Pos, OpConst8, t) v0.AuxInt = (c & 0xff) - 1 - v.AddArg(v0) + v.AddArg2(n, v0) return true } // match: (Mod8u x (Const8 [c])) @@ -11470,18 +11111,15 @@ func rewriteValuegeneric_OpMod8u(v *Value) bool { break } v.reset(OpSub8) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpMul8, t) v1 := b.NewValue0(v.Pos, OpDiv8u, t) - v1.AddArg(x) v2 := b.NewValue0(v.Pos, OpConst8, t) v2.AuxInt = c - v1.AddArg(v2) - v0.AddArg(v1) + v1.AddArg2(x, v2) v3 := b.NewValue0(v.Pos, OpConst8, t) v3.AuxInt = c - v0.AddArg(v3) - v.AddArg(v0) + v0.AddArg2(v1, v3) + v.AddArg2(x, v0) return true } return false @@ -11512,8 +11150,7 @@ func rewriteValuegeneric_OpMove(v *Value) bool { v.reset(OpZero) v.AuxInt = n v.Aux = t - v.AddArg(dst1) - v.AddArg(mem) + v.AddArg2(dst1, mem) return true } // match: (Move {t} [n] dst1 src mem:(VarDef (Zero {t} [n] dst0 _))) @@ -11540,8 +11177,7 @@ func rewriteValuegeneric_OpMove(v *Value) bool { v.reset(OpZero) v.AuxInt = n v.Aux = t - v.AddArg(dst1) - v.AddArg(mem) + v.AddArg2(dst1, mem) return true } // match: (Move {t1} [n] dst1 src1 store:(Store {t2} op:(OffPtr [o2] dst2) _ mem)) @@ -11570,9 +11206,7 @@ func rewriteValuegeneric_OpMove(v *Value) bool { v.reset(OpMove) v.AuxInt = n v.Aux = t1 - v.AddArg(dst1) - v.AddArg(src1) - v.AddArg(mem) + v.AddArg3(dst1, src1, mem) return true } // match: (Move {t} [n] dst1 src1 move:(Move {t} [n] dst2 _ mem)) @@ -11595,9 +11229,7 @@ func rewriteValuegeneric_OpMove(v *Value) bool { v.reset(OpMove) v.AuxInt = n v.Aux = t - v.AddArg(dst1) - v.AddArg(src1) - v.AddArg(mem) + v.AddArg3(dst1, src1, mem) return true } // match: (Move {t} [n] dst1 src1 vardef:(VarDef {x} move:(Move {t} [n] dst2 _ mem))) @@ -11625,12 +11257,10 @@ func rewriteValuegeneric_OpMove(v *Value) bool { v.reset(OpMove) v.AuxInt = n v.Aux = t - v.AddArg(dst1) - v.AddArg(src1) v0 := b.NewValue0(v.Pos, OpVarDef, types.TypeMem) v0.Aux = x v0.AddArg(mem) - v.AddArg(v0) + v.AddArg3(dst1, src1, v0) return true } // match: (Move {t} [n] dst1 src1 zero:(Zero {t} [n] dst2 mem)) @@ -11653,9 +11283,7 @@ func rewriteValuegeneric_OpMove(v *Value) bool { v.reset(OpMove) v.AuxInt = n v.Aux = t - v.AddArg(dst1) - v.AddArg(src1) - v.AddArg(mem) + v.AddArg3(dst1, src1, mem) return true } // match: (Move {t} [n] dst1 src1 vardef:(VarDef {x} zero:(Zero {t} [n] dst2 mem))) @@ -11683,12 +11311,10 @@ func rewriteValuegeneric_OpMove(v *Value) bool { v.reset(OpMove) v.AuxInt = n v.Aux = t - v.AddArg(dst1) - v.AddArg(src1) v0 := b.NewValue0(v.Pos, OpVarDef, types.TypeMem) v0.Aux = x v0.AddArg(mem) - v.AddArg(v0) + v.AddArg3(dst1, src1, v0) return true } // match: (Move {t1} [n] dst p1 mem:(Store {t2} op2:(OffPtr [o2] p2) d1 (Store {t3} op3:(OffPtr [0] p3) d2 _))) @@ -11737,17 +11363,13 @@ func rewriteValuegeneric_OpMove(v *Value) bool { v0 := b.NewValue0(v.Pos, OpOffPtr, tt2) v0.AuxInt = o2 v0.AddArg(dst) - v.AddArg(v0) - v.AddArg(d1) v1 := b.NewValue0(v.Pos, OpStore, types.TypeMem) v1.Aux = t3 v2 := b.NewValue0(v.Pos, OpOffPtr, tt3) v2.AuxInt = 0 v2.AddArg(dst) - v1.AddArg(v2) - v1.AddArg(d2) - v1.AddArg(mem) - v.AddArg(v1) + v1.AddArg3(v2, d2, mem) + v.AddArg3(v0, d1, v1) return true } // match: (Move {t1} [n] dst p1 mem:(Store {t2} op2:(OffPtr [o2] p2) d1 (Store {t3} op3:(OffPtr [o3] p3) d2 (Store {t4} op4:(OffPtr [0] p4) d3 _)))) @@ -11810,25 +11432,19 @@ func rewriteValuegeneric_OpMove(v *Value) bool { v0 := b.NewValue0(v.Pos, OpOffPtr, tt2) v0.AuxInt = o2 v0.AddArg(dst) - v.AddArg(v0) - v.AddArg(d1) v1 := b.NewValue0(v.Pos, OpStore, types.TypeMem) v1.Aux = t3 v2 := b.NewValue0(v.Pos, OpOffPtr, tt3) v2.AuxInt = o3 v2.AddArg(dst) - v1.AddArg(v2) - v1.AddArg(d2) v3 := b.NewValue0(v.Pos, OpStore, types.TypeMem) v3.Aux = t4 v4 := b.NewValue0(v.Pos, OpOffPtr, tt4) v4.AuxInt = 0 v4.AddArg(dst) - v3.AddArg(v4) - v3.AddArg(d3) - v3.AddArg(mem) - v1.AddArg(v3) - v.AddArg(v1) + v3.AddArg3(v4, d3, mem) + v1.AddArg3(v2, d2, v3) + v.AddArg3(v0, d1, v1) return true } // match: (Move {t1} [n] dst p1 mem:(Store {t2} op2:(OffPtr [o2] p2) d1 (Store {t3} op3:(OffPtr [o3] p3) d2 (Store {t4} op4:(OffPtr [o4] p4) d3 (Store {t5} op5:(OffPtr [0] p5) d4 _))))) @@ -11905,33 +11521,25 @@ func rewriteValuegeneric_OpMove(v *Value) bool { v0 := b.NewValue0(v.Pos, OpOffPtr, tt2) v0.AuxInt = o2 v0.AddArg(dst) - v.AddArg(v0) - v.AddArg(d1) v1 := b.NewValue0(v.Pos, OpStore, types.TypeMem) v1.Aux = t3 v2 := b.NewValue0(v.Pos, OpOffPtr, tt3) v2.AuxInt = o3 v2.AddArg(dst) - v1.AddArg(v2) - v1.AddArg(d2) v3 := b.NewValue0(v.Pos, OpStore, types.TypeMem) v3.Aux = t4 v4 := b.NewValue0(v.Pos, OpOffPtr, tt4) v4.AuxInt = o4 v4.AddArg(dst) - v3.AddArg(v4) - v3.AddArg(d3) v5 := b.NewValue0(v.Pos, OpStore, types.TypeMem) v5.Aux = t5 v6 := b.NewValue0(v.Pos, OpOffPtr, tt5) v6.AuxInt = 0 v6.AddArg(dst) - v5.AddArg(v6) - v5.AddArg(d4) - v5.AddArg(mem) - v3.AddArg(v5) - v1.AddArg(v3) - v.AddArg(v1) + v5.AddArg3(v6, d4, mem) + v3.AddArg3(v4, d3, v5) + v1.AddArg3(v2, d2, v3) + v.AddArg3(v0, d1, v1) return true } // match: (Move {t1} [n] dst p1 mem:(VarDef (Store {t2} op2:(OffPtr [o2] p2) d1 (Store {t3} op3:(OffPtr [0] p3) d2 _)))) @@ -11984,17 +11592,13 @@ func rewriteValuegeneric_OpMove(v *Value) bool { v0 := b.NewValue0(v.Pos, OpOffPtr, tt2) v0.AuxInt = o2 v0.AddArg(dst) - v.AddArg(v0) - v.AddArg(d1) v1 := b.NewValue0(v.Pos, OpStore, types.TypeMem) v1.Aux = t3 v2 := b.NewValue0(v.Pos, OpOffPtr, tt3) v2.AuxInt = 0 v2.AddArg(dst) - v1.AddArg(v2) - v1.AddArg(d2) - v1.AddArg(mem) - v.AddArg(v1) + v1.AddArg3(v2, d2, mem) + v.AddArg3(v0, d1, v1) return true } // match: (Move {t1} [n] dst p1 mem:(VarDef (Store {t2} op2:(OffPtr [o2] p2) d1 (Store {t3} op3:(OffPtr [o3] p3) d2 (Store {t4} op4:(OffPtr [0] p4) d3 _))))) @@ -12061,25 +11665,19 @@ func rewriteValuegeneric_OpMove(v *Value) bool { v0 := b.NewValue0(v.Pos, OpOffPtr, tt2) v0.AuxInt = o2 v0.AddArg(dst) - v.AddArg(v0) - v.AddArg(d1) v1 := b.NewValue0(v.Pos, OpStore, types.TypeMem) v1.Aux = t3 v2 := b.NewValue0(v.Pos, OpOffPtr, tt3) v2.AuxInt = o3 v2.AddArg(dst) - v1.AddArg(v2) - v1.AddArg(d2) v3 := b.NewValue0(v.Pos, OpStore, types.TypeMem) v3.Aux = t4 v4 := b.NewValue0(v.Pos, OpOffPtr, tt4) v4.AuxInt = 0 v4.AddArg(dst) - v3.AddArg(v4) - v3.AddArg(d3) - v3.AddArg(mem) - v1.AddArg(v3) - v.AddArg(v1) + v3.AddArg3(v4, d3, mem) + v1.AddArg3(v2, d2, v3) + v.AddArg3(v0, d1, v1) return true } // match: (Move {t1} [n] dst p1 mem:(VarDef (Store {t2} op2:(OffPtr [o2] p2) d1 (Store {t3} op3:(OffPtr [o3] p3) d2 (Store {t4} op4:(OffPtr [o4] p4) d3 (Store {t5} op5:(OffPtr [0] p5) d4 _)))))) @@ -12160,33 +11758,25 @@ func rewriteValuegeneric_OpMove(v *Value) bool { v0 := b.NewValue0(v.Pos, OpOffPtr, tt2) v0.AuxInt = o2 v0.AddArg(dst) - v.AddArg(v0) - v.AddArg(d1) v1 := b.NewValue0(v.Pos, OpStore, types.TypeMem) v1.Aux = t3 v2 := b.NewValue0(v.Pos, OpOffPtr, tt3) v2.AuxInt = o3 v2.AddArg(dst) - v1.AddArg(v2) - v1.AddArg(d2) v3 := b.NewValue0(v.Pos, OpStore, types.TypeMem) v3.Aux = t4 v4 := b.NewValue0(v.Pos, OpOffPtr, tt4) v4.AuxInt = o4 v4.AddArg(dst) - v3.AddArg(v4) - v3.AddArg(d3) v5 := b.NewValue0(v.Pos, OpStore, types.TypeMem) v5.Aux = t5 v6 := b.NewValue0(v.Pos, OpOffPtr, tt5) v6.AuxInt = 0 v6.AddArg(dst) - v5.AddArg(v6) - v5.AddArg(d4) - v5.AddArg(mem) - v3.AddArg(v5) - v1.AddArg(v3) - v.AddArg(v1) + v5.AddArg3(v6, d4, mem) + v3.AddArg3(v4, d3, v5) + v1.AddArg3(v2, d2, v3) + v.AddArg3(v0, d1, v1) return true } // match: (Move {t1} [n] dst p1 mem:(Store {t2} op2:(OffPtr [o2] p2) d1 (Zero {t3} [n] p3 _))) @@ -12226,14 +11816,11 @@ func rewriteValuegeneric_OpMove(v *Value) bool { v0 := b.NewValue0(v.Pos, OpOffPtr, tt2) v0.AuxInt = o2 v0.AddArg(dst) - v.AddArg(v0) - v.AddArg(d1) v1 := b.NewValue0(v.Pos, OpZero, types.TypeMem) v1.AuxInt = n v1.Aux = t1 - v1.AddArg(dst) - v1.AddArg(mem) - v.AddArg(v1) + v1.AddArg2(dst, mem) + v.AddArg3(v0, d1, v1) return true } // match: (Move {t1} [n] dst p1 mem:(Store {t2} (OffPtr [o2] p2) d1 (Store {t3} (OffPtr [o3] p3) d2 (Zero {t4} [n] p4 _)))) @@ -12287,22 +11874,17 @@ func rewriteValuegeneric_OpMove(v *Value) bool { v0 := b.NewValue0(v.Pos, OpOffPtr, tt2) v0.AuxInt = o2 v0.AddArg(dst) - v.AddArg(v0) - v.AddArg(d1) v1 := b.NewValue0(v.Pos, OpStore, types.TypeMem) v1.Aux = t3 v2 := b.NewValue0(v.Pos, OpOffPtr, tt3) v2.AuxInt = o3 v2.AddArg(dst) - v1.AddArg(v2) - v1.AddArg(d2) v3 := b.NewValue0(v.Pos, OpZero, types.TypeMem) v3.AuxInt = n v3.Aux = t1 - v3.AddArg(dst) - v3.AddArg(mem) - v1.AddArg(v3) - v.AddArg(v1) + v3.AddArg2(dst, mem) + v1.AddArg3(v2, d2, v3) + v.AddArg3(v0, d1, v1) return true } // match: (Move {t1} [n] dst p1 mem:(Store {t2} (OffPtr [o2] p2) d1 (Store {t3} (OffPtr [o3] p3) d2 (Store {t4} (OffPtr [o4] p4) d3 (Zero {t5} [n] p5 _))))) @@ -12370,30 +11952,23 @@ func rewriteValuegeneric_OpMove(v *Value) bool { v0 := b.NewValue0(v.Pos, OpOffPtr, tt2) v0.AuxInt = o2 v0.AddArg(dst) - v.AddArg(v0) - v.AddArg(d1) v1 := b.NewValue0(v.Pos, OpStore, types.TypeMem) v1.Aux = t3 v2 := b.NewValue0(v.Pos, OpOffPtr, tt3) v2.AuxInt = o3 v2.AddArg(dst) - v1.AddArg(v2) - v1.AddArg(d2) v3 := b.NewValue0(v.Pos, OpStore, types.TypeMem) v3.Aux = t4 v4 := b.NewValue0(v.Pos, OpOffPtr, tt4) v4.AuxInt = o4 v4.AddArg(dst) - v3.AddArg(v4) - v3.AddArg(d3) v5 := b.NewValue0(v.Pos, OpZero, types.TypeMem) v5.AuxInt = n v5.Aux = t1 - v5.AddArg(dst) - v5.AddArg(mem) - v3.AddArg(v5) - v1.AddArg(v3) - v.AddArg(v1) + v5.AddArg2(dst, mem) + v3.AddArg3(v4, d3, v5) + v1.AddArg3(v2, d2, v3) + v.AddArg3(v0, d1, v1) return true } // match: (Move {t1} [n] dst p1 mem:(Store {t2} (OffPtr [o2] p2) d1 (Store {t3} (OffPtr [o3] p3) d2 (Store {t4} (OffPtr [o4] p4) d3 (Store {t5} (OffPtr [o5] p5) d4 (Zero {t6} [n] p6 _)))))) @@ -12475,38 +12050,29 @@ func rewriteValuegeneric_OpMove(v *Value) bool { v0 := b.NewValue0(v.Pos, OpOffPtr, tt2) v0.AuxInt = o2 v0.AddArg(dst) - v.AddArg(v0) - v.AddArg(d1) v1 := b.NewValue0(v.Pos, OpStore, types.TypeMem) v1.Aux = t3 v2 := b.NewValue0(v.Pos, OpOffPtr, tt3) v2.AuxInt = o3 v2.AddArg(dst) - v1.AddArg(v2) - v1.AddArg(d2) v3 := b.NewValue0(v.Pos, OpStore, types.TypeMem) v3.Aux = t4 v4 := b.NewValue0(v.Pos, OpOffPtr, tt4) v4.AuxInt = o4 v4.AddArg(dst) - v3.AddArg(v4) - v3.AddArg(d3) v5 := b.NewValue0(v.Pos, OpStore, types.TypeMem) v5.Aux = t5 v6 := b.NewValue0(v.Pos, OpOffPtr, tt5) v6.AuxInt = o5 v6.AddArg(dst) - v5.AddArg(v6) - v5.AddArg(d4) v7 := b.NewValue0(v.Pos, OpZero, types.TypeMem) v7.AuxInt = n v7.Aux = t1 - v7.AddArg(dst) - v7.AddArg(mem) - v5.AddArg(v7) - v3.AddArg(v5) - v1.AddArg(v3) - v.AddArg(v1) + v7.AddArg2(dst, mem) + v5.AddArg3(v6, d4, v7) + v3.AddArg3(v4, d3, v5) + v1.AddArg3(v2, d2, v3) + v.AddArg3(v0, d1, v1) return true } // match: (Move {t1} [n] dst p1 mem:(VarDef (Store {t2} op2:(OffPtr [o2] p2) d1 (Zero {t3} [n] p3 _)))) @@ -12550,14 +12116,11 @@ func rewriteValuegeneric_OpMove(v *Value) bool { v0 := b.NewValue0(v.Pos, OpOffPtr, tt2) v0.AuxInt = o2 v0.AddArg(dst) - v.AddArg(v0) - v.AddArg(d1) v1 := b.NewValue0(v.Pos, OpZero, types.TypeMem) v1.AuxInt = n v1.Aux = t1 - v1.AddArg(dst) - v1.AddArg(mem) - v.AddArg(v1) + v1.AddArg2(dst, mem) + v.AddArg3(v0, d1, v1) return true } // match: (Move {t1} [n] dst p1 mem:(VarDef (Store {t2} (OffPtr [o2] p2) d1 (Store {t3} (OffPtr [o3] p3) d2 (Zero {t4} [n] p4 _))))) @@ -12615,22 +12178,17 @@ func rewriteValuegeneric_OpMove(v *Value) bool { v0 := b.NewValue0(v.Pos, OpOffPtr, tt2) v0.AuxInt = o2 v0.AddArg(dst) - v.AddArg(v0) - v.AddArg(d1) v1 := b.NewValue0(v.Pos, OpStore, types.TypeMem) v1.Aux = t3 v2 := b.NewValue0(v.Pos, OpOffPtr, tt3) v2.AuxInt = o3 v2.AddArg(dst) - v1.AddArg(v2) - v1.AddArg(d2) v3 := b.NewValue0(v.Pos, OpZero, types.TypeMem) v3.AuxInt = n v3.Aux = t1 - v3.AddArg(dst) - v3.AddArg(mem) - v1.AddArg(v3) - v.AddArg(v1) + v3.AddArg2(dst, mem) + v1.AddArg3(v2, d2, v3) + v.AddArg3(v0, d1, v1) return true } // match: (Move {t1} [n] dst p1 mem:(VarDef (Store {t2} (OffPtr [o2] p2) d1 (Store {t3} (OffPtr [o3] p3) d2 (Store {t4} (OffPtr [o4] p4) d3 (Zero {t5} [n] p5 _)))))) @@ -12702,30 +12260,23 @@ func rewriteValuegeneric_OpMove(v *Value) bool { v0 := b.NewValue0(v.Pos, OpOffPtr, tt2) v0.AuxInt = o2 v0.AddArg(dst) - v.AddArg(v0) - v.AddArg(d1) v1 := b.NewValue0(v.Pos, OpStore, types.TypeMem) v1.Aux = t3 v2 := b.NewValue0(v.Pos, OpOffPtr, tt3) v2.AuxInt = o3 v2.AddArg(dst) - v1.AddArg(v2) - v1.AddArg(d2) v3 := b.NewValue0(v.Pos, OpStore, types.TypeMem) v3.Aux = t4 v4 := b.NewValue0(v.Pos, OpOffPtr, tt4) v4.AuxInt = o4 v4.AddArg(dst) - v3.AddArg(v4) - v3.AddArg(d3) v5 := b.NewValue0(v.Pos, OpZero, types.TypeMem) v5.AuxInt = n v5.Aux = t1 - v5.AddArg(dst) - v5.AddArg(mem) - v3.AddArg(v5) - v1.AddArg(v3) - v.AddArg(v1) + v5.AddArg2(dst, mem) + v3.AddArg3(v4, d3, v5) + v1.AddArg3(v2, d2, v3) + v.AddArg3(v0, d1, v1) return true } // match: (Move {t1} [n] dst p1 mem:(VarDef (Store {t2} (OffPtr [o2] p2) d1 (Store {t3} (OffPtr [o3] p3) d2 (Store {t4} (OffPtr [o4] p4) d3 (Store {t5} (OffPtr [o5] p5) d4 (Zero {t6} [n] p6 _))))))) @@ -12811,38 +12362,29 @@ func rewriteValuegeneric_OpMove(v *Value) bool { v0 := b.NewValue0(v.Pos, OpOffPtr, tt2) v0.AuxInt = o2 v0.AddArg(dst) - v.AddArg(v0) - v.AddArg(d1) v1 := b.NewValue0(v.Pos, OpStore, types.TypeMem) v1.Aux = t3 v2 := b.NewValue0(v.Pos, OpOffPtr, tt3) v2.AuxInt = o3 v2.AddArg(dst) - v1.AddArg(v2) - v1.AddArg(d2) v3 := b.NewValue0(v.Pos, OpStore, types.TypeMem) v3.Aux = t4 v4 := b.NewValue0(v.Pos, OpOffPtr, tt4) v4.AuxInt = o4 v4.AddArg(dst) - v3.AddArg(v4) - v3.AddArg(d3) v5 := b.NewValue0(v.Pos, OpStore, types.TypeMem) v5.Aux = t5 v6 := b.NewValue0(v.Pos, OpOffPtr, tt5) v6.AuxInt = o5 v6.AddArg(dst) - v5.AddArg(v6) - v5.AddArg(d4) v7 := b.NewValue0(v.Pos, OpZero, types.TypeMem) v7.AuxInt = n v7.Aux = t1 - v7.AddArg(dst) - v7.AddArg(mem) - v5.AddArg(v7) - v3.AddArg(v5) - v1.AddArg(v3) - v.AddArg(v1) + v7.AddArg2(dst, mem) + v5.AddArg3(v6, d4, v7) + v3.AddArg3(v4, d3, v5) + v1.AddArg3(v2, d2, v3) + v.AddArg3(v0, d1, v1) return true } // match: (Move {t1} [s] dst tmp1 midmem:(Move {t2} [s] tmp2 src _)) @@ -12867,9 +12409,7 @@ func rewriteValuegeneric_OpMove(v *Value) bool { v.reset(OpMove) v.AuxInt = s v.Aux = t1 - v.AddArg(dst) - v.AddArg(src) - v.AddArg(midmem) + v.AddArg3(dst, src, midmem) return true } // match: (Move {t1} [s] dst tmp1 midmem:(VarDef (Move {t2} [s] tmp2 src _))) @@ -12898,9 +12438,7 @@ func rewriteValuegeneric_OpMove(v *Value) bool { v.reset(OpMove) v.AuxInt = s v.Aux = t1 - v.AddArg(dst) - v.AddArg(src) - v.AddArg(midmem) + v.AddArg3(dst, src, midmem) return true } // match: (Move dst src mem) @@ -12988,10 +12526,9 @@ func rewriteValuegeneric_OpMul16(v *Value) bool { } v.reset(OpLsh16x64) v.Type = t - v.AddArg(n) v0 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) v0.AuxInt = log2(c) - v.AddArg(v0) + v.AddArg2(n, v0) return true } break @@ -13012,10 +12549,9 @@ func rewriteValuegeneric_OpMul16(v *Value) bool { } v.reset(OpNeg16) v0 := b.NewValue0(v.Pos, OpLsh16x64, t) - v0.AddArg(n) v1 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) v1.AuxInt = log2(-c) - v0.AddArg(v1) + v0.AddArg2(n, v1) v.AddArg(v0) return true } @@ -13058,8 +12594,7 @@ func rewriteValuegeneric_OpMul16(v *Value) bool { v.reset(OpMul16) v0 := b.NewValue0(v.Pos, OpConst16, t) v0.AuxInt = int64(int16(c * d)) - v.AddArg(v0) - v.AddArg(x) + v.AddArg2(v0, x) return true } } @@ -13135,10 +12670,9 @@ func rewriteValuegeneric_OpMul32(v *Value) bool { } v.reset(OpLsh32x64) v.Type = t - v.AddArg(n) v0 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) v0.AuxInt = log2(c) - v.AddArg(v0) + v.AddArg2(n, v0) return true } break @@ -13159,10 +12693,9 @@ func rewriteValuegeneric_OpMul32(v *Value) bool { } v.reset(OpNeg32) v0 := b.NewValue0(v.Pos, OpLsh32x64, t) - v0.AddArg(n) v1 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) v1.AuxInt = log2(-c) - v0.AddArg(v1) + v0.AddArg2(n, v1) v.AddArg(v0) return true } @@ -13192,13 +12725,11 @@ func rewriteValuegeneric_OpMul32(v *Value) bool { v.reset(OpAdd32) v0 := b.NewValue0(v.Pos, OpConst32, t) v0.AuxInt = int64(int32(c * d)) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpMul32, t) v2 := b.NewValue0(v.Pos, OpConst32, t) v2.AuxInt = c - v1.AddArg(v2) - v1.AddArg(x) - v.AddArg(v1) + v1.AddArg2(v2, x) + v.AddArg2(v0, v1) return true } } @@ -13241,8 +12772,7 @@ func rewriteValuegeneric_OpMul32(v *Value) bool { v.reset(OpMul32) v0 := b.NewValue0(v.Pos, OpConst32, t) v0.AuxInt = int64(int32(c * d)) - v.AddArg(v0) - v.AddArg(x) + v.AddArg2(v0, x) return true } } @@ -13309,8 +12839,7 @@ func rewriteValuegeneric_OpMul32F(v *Value) bool { continue } v.reset(OpAdd32F) - v.AddArg(x) - v.AddArg(x) + v.AddArg2(x, x) return true } break @@ -13385,10 +12914,9 @@ func rewriteValuegeneric_OpMul64(v *Value) bool { } v.reset(OpLsh64x64) v.Type = t - v.AddArg(n) v0 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) v0.AuxInt = log2(c) - v.AddArg(v0) + v.AddArg2(n, v0) return true } break @@ -13409,10 +12937,9 @@ func rewriteValuegeneric_OpMul64(v *Value) bool { } v.reset(OpNeg64) v0 := b.NewValue0(v.Pos, OpLsh64x64, t) - v0.AddArg(n) v1 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) v1.AuxInt = log2(-c) - v0.AddArg(v1) + v0.AddArg2(n, v1) v.AddArg(v0) return true } @@ -13442,13 +12969,11 @@ func rewriteValuegeneric_OpMul64(v *Value) bool { v.reset(OpAdd64) v0 := b.NewValue0(v.Pos, OpConst64, t) v0.AuxInt = c * d - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpMul64, t) v2 := b.NewValue0(v.Pos, OpConst64, t) v2.AuxInt = c - v1.AddArg(v2) - v1.AddArg(x) - v.AddArg(v1) + v1.AddArg2(v2, x) + v.AddArg2(v0, v1) return true } } @@ -13491,8 +13016,7 @@ func rewriteValuegeneric_OpMul64(v *Value) bool { v.reset(OpMul64) v0 := b.NewValue0(v.Pos, OpConst64, t) v0.AuxInt = c * d - v.AddArg(v0) - v.AddArg(x) + v.AddArg2(v0, x) return true } } @@ -13559,8 +13083,7 @@ func rewriteValuegeneric_OpMul64F(v *Value) bool { continue } v.reset(OpAdd64F) - v.AddArg(x) - v.AddArg(x) + v.AddArg2(x, x) return true } break @@ -13635,10 +13158,9 @@ func rewriteValuegeneric_OpMul8(v *Value) bool { } v.reset(OpLsh8x64) v.Type = t - v.AddArg(n) v0 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) v0.AuxInt = log2(c) - v.AddArg(v0) + v.AddArg2(n, v0) return true } break @@ -13659,10 +13181,9 @@ func rewriteValuegeneric_OpMul8(v *Value) bool { } v.reset(OpNeg8) v0 := b.NewValue0(v.Pos, OpLsh8x64, t) - v0.AddArg(n) v1 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) v1.AuxInt = log2(-c) - v0.AddArg(v1) + v0.AddArg2(n, v1) v.AddArg(v0) return true } @@ -13705,8 +13226,7 @@ func rewriteValuegeneric_OpMul8(v *Value) bool { v.reset(OpMul8) v0 := b.NewValue0(v.Pos, OpConst8, t) v0.AuxInt = int64(int8(c * d)) - v.AddArg(v0) - v.AddArg(x) + v.AddArg2(v0, x) return true } } @@ -13737,8 +13257,7 @@ func rewriteValuegeneric_OpNeg16(v *Value) bool { y := v_0.Args[1] x := v_0.Args[0] v.reset(OpSub16) - v.AddArg(y) - v.AddArg(x) + v.AddArg2(y, x) return true } // match: (Neg16 (Neg16 x)) @@ -13764,8 +13283,7 @@ func rewriteValuegeneric_OpNeg16(v *Value) bool { v.reset(OpAdd16) v0 := b.NewValue0(v.Pos, OpConst16, t) v0.AuxInt = 1 - v.AddArg(v0) - v.AddArg(x) + v.AddArg2(v0, x) return true } return false @@ -13793,8 +13311,7 @@ func rewriteValuegeneric_OpNeg32(v *Value) bool { y := v_0.Args[1] x := v_0.Args[0] v.reset(OpSub32) - v.AddArg(y) - v.AddArg(x) + v.AddArg2(y, x) return true } // match: (Neg32 (Neg32 x)) @@ -13820,8 +13337,7 @@ func rewriteValuegeneric_OpNeg32(v *Value) bool { v.reset(OpAdd32) v0 := b.NewValue0(v.Pos, OpConst32, t) v0.AuxInt = 1 - v.AddArg(v0) - v.AddArg(x) + v.AddArg2(v0, x) return true } return false @@ -13868,8 +13384,7 @@ func rewriteValuegeneric_OpNeg64(v *Value) bool { y := v_0.Args[1] x := v_0.Args[0] v.reset(OpSub64) - v.AddArg(y) - v.AddArg(x) + v.AddArg2(y, x) return true } // match: (Neg64 (Neg64 x)) @@ -13895,8 +13410,7 @@ func rewriteValuegeneric_OpNeg64(v *Value) bool { v.reset(OpAdd64) v0 := b.NewValue0(v.Pos, OpConst64, t) v0.AuxInt = 1 - v.AddArg(v0) - v.AddArg(x) + v.AddArg2(v0, x) return true } return false @@ -13943,8 +13457,7 @@ func rewriteValuegeneric_OpNeg8(v *Value) bool { y := v_0.Args[1] x := v_0.Args[0] v.reset(OpSub8) - v.AddArg(y) - v.AddArg(x) + v.AddArg2(y, x) return true } // match: (Neg8 (Neg8 x)) @@ -13970,8 +13483,7 @@ func rewriteValuegeneric_OpNeg8(v *Value) bool { v.reset(OpAdd8) v0 := b.NewValue0(v.Pos, OpConst8, t) v0.AuxInt = 1 - v.AddArg(v0) - v.AddArg(x) + v.AddArg2(v0, x) return true } return false @@ -14016,8 +13528,7 @@ func rewriteValuegeneric_OpNeq16(v *Value) bool { v.reset(OpNeq16) v0 := b.NewValue0(v.Pos, OpConst16, t) v0.AuxInt = int64(int16(c - d)) - v.AddArg(v0) - v.AddArg(x) + v.AddArg2(v0, x) return true } } @@ -14097,14 +13608,12 @@ func rewriteValuegeneric_OpNeq16(v *Value) bool { } v.reset(OpNeq16) v0 := b.NewValue0(v.Pos, OpAnd16, t) - v0.AddArg(n) v1 := b.NewValue0(v.Pos, OpConst16, t) v1.AuxInt = int64(1< ptr idx) @@ -16695,13 +16126,11 @@ func rewriteValuegeneric_OpPtrIndex(v *Value) bool { break } v.reset(OpAddPtr) - v.AddArg(ptr) v0 := b.NewValue0(v.Pos, OpMul64, typ.Int) - v0.AddArg(idx) v1 := b.NewValue0(v.Pos, OpConst64, typ.Int) v1.AuxInt = t.Elem().Size() - v0.AddArg(v1) - v.AddArg(v0) + v0.AddArg2(idx, v1) + v.AddArg2(ptr, v0) return true } return false @@ -16840,10 +16269,9 @@ func rewriteValuegeneric_OpRsh16Ux16(v *Value) bool { } c := v_1.AuxInt v.reset(OpRsh16Ux64) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpConst64, t) v0.AuxInt = int64(uint16(c)) - v.AddArg(v0) + v.AddArg2(x, v0) return true } // match: (Rsh16Ux16 (Const16 [0]) _) @@ -16872,10 +16300,9 @@ func rewriteValuegeneric_OpRsh16Ux32(v *Value) bool { } c := v_1.AuxInt v.reset(OpRsh16Ux64) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpConst64, t) v0.AuxInt = int64(uint32(c)) - v.AddArg(v0) + v.AddArg2(x, v0) return true } // match: (Rsh16Ux32 (Const16 [0]) _) @@ -16970,10 +16397,9 @@ func rewriteValuegeneric_OpRsh16Ux64(v *Value) bool { break } v.reset(OpRsh16Ux64) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpConst64, t) v0.AuxInt = c + d - v.AddArg(v0) + v.AddArg2(x, v0) return true } // match: (Rsh16Ux64 (Rsh16x64 x _) (Const64 [15])) @@ -16992,10 +16418,9 @@ func rewriteValuegeneric_OpRsh16Ux64(v *Value) bool { break } v.reset(OpRsh16Ux64) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpConst64, t) v0.AuxInt = 15 - v.AddArg(v0) + v.AddArg2(x, v0) return true } // match: (Rsh16Ux64 (Lsh16x64 (Rsh16Ux64 x (Const64 [c1])) (Const64 [c2])) (Const64 [c3])) @@ -17030,10 +16455,9 @@ func rewriteValuegeneric_OpRsh16Ux64(v *Value) bool { break } v.reset(OpRsh16Ux64) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) v0.AuxInt = c1 - c2 + c3 - v.AddArg(v0) + v.AddArg2(x, v0) return true } // match: (Rsh16Ux64 (Lsh16x64 x (Const64 [8])) (Const64 [8])) @@ -17070,10 +16494,9 @@ func rewriteValuegeneric_OpRsh16Ux8(v *Value) bool { } c := v_1.AuxInt v.reset(OpRsh16Ux64) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpConst64, t) v0.AuxInt = int64(uint8(c)) - v.AddArg(v0) + v.AddArg2(x, v0) return true } // match: (Rsh16Ux8 (Const16 [0]) _) @@ -17102,10 +16525,9 @@ func rewriteValuegeneric_OpRsh16x16(v *Value) bool { } c := v_1.AuxInt v.reset(OpRsh16x64) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpConst64, t) v0.AuxInt = int64(uint16(c)) - v.AddArg(v0) + v.AddArg2(x, v0) return true } // match: (Rsh16x16 (Const16 [0]) _) @@ -17134,10 +16556,9 @@ func rewriteValuegeneric_OpRsh16x32(v *Value) bool { } c := v_1.AuxInt v.reset(OpRsh16x64) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpConst64, t) v0.AuxInt = int64(uint32(c)) - v.AddArg(v0) + v.AddArg2(x, v0) return true } // match: (Rsh16x32 (Const16 [0]) _) @@ -17217,10 +16638,9 @@ func rewriteValuegeneric_OpRsh16x64(v *Value) bool { break } v.reset(OpRsh16x64) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpConst64, t) v0.AuxInt = c + d - v.AddArg(v0) + v.AddArg2(x, v0) return true } // match: (Rsh16x64 (Lsh16x64 x (Const64 [8])) (Const64 [8])) @@ -17257,10 +16677,9 @@ func rewriteValuegeneric_OpRsh16x8(v *Value) bool { } c := v_1.AuxInt v.reset(OpRsh16x64) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpConst64, t) v0.AuxInt = int64(uint8(c)) - v.AddArg(v0) + v.AddArg2(x, v0) return true } // match: (Rsh16x8 (Const16 [0]) _) @@ -17289,10 +16708,9 @@ func rewriteValuegeneric_OpRsh32Ux16(v *Value) bool { } c := v_1.AuxInt v.reset(OpRsh32Ux64) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpConst64, t) v0.AuxInt = int64(uint16(c)) - v.AddArg(v0) + v.AddArg2(x, v0) return true } // match: (Rsh32Ux16 (Const32 [0]) _) @@ -17321,10 +16739,9 @@ func rewriteValuegeneric_OpRsh32Ux32(v *Value) bool { } c := v_1.AuxInt v.reset(OpRsh32Ux64) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpConst64, t) v0.AuxInt = int64(uint32(c)) - v.AddArg(v0) + v.AddArg2(x, v0) return true } // match: (Rsh32Ux32 (Const32 [0]) _) @@ -17419,10 +16836,9 @@ func rewriteValuegeneric_OpRsh32Ux64(v *Value) bool { break } v.reset(OpRsh32Ux64) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpConst64, t) v0.AuxInt = c + d - v.AddArg(v0) + v.AddArg2(x, v0) return true } // match: (Rsh32Ux64 (Rsh32x64 x _) (Const64 [31])) @@ -17441,10 +16857,9 @@ func rewriteValuegeneric_OpRsh32Ux64(v *Value) bool { break } v.reset(OpRsh32Ux64) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpConst64, t) v0.AuxInt = 31 - v.AddArg(v0) + v.AddArg2(x, v0) return true } // match: (Rsh32Ux64 (Lsh32x64 (Rsh32Ux64 x (Const64 [c1])) (Const64 [c2])) (Const64 [c3])) @@ -17479,10 +16894,9 @@ func rewriteValuegeneric_OpRsh32Ux64(v *Value) bool { break } v.reset(OpRsh32Ux64) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) v0.AuxInt = c1 - c2 + c3 - v.AddArg(v0) + v.AddArg2(x, v0) return true } // match: (Rsh32Ux64 (Lsh32x64 x (Const64 [24])) (Const64 [24])) @@ -17537,10 +16951,9 @@ func rewriteValuegeneric_OpRsh32Ux8(v *Value) bool { } c := v_1.AuxInt v.reset(OpRsh32Ux64) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpConst64, t) v0.AuxInt = int64(uint8(c)) - v.AddArg(v0) + v.AddArg2(x, v0) return true } // match: (Rsh32Ux8 (Const32 [0]) _) @@ -17569,10 +16982,9 @@ func rewriteValuegeneric_OpRsh32x16(v *Value) bool { } c := v_1.AuxInt v.reset(OpRsh32x64) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpConst64, t) v0.AuxInt = int64(uint16(c)) - v.AddArg(v0) + v.AddArg2(x, v0) return true } // match: (Rsh32x16 (Const32 [0]) _) @@ -17601,10 +17013,9 @@ func rewriteValuegeneric_OpRsh32x32(v *Value) bool { } c := v_1.AuxInt v.reset(OpRsh32x64) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpConst64, t) v0.AuxInt = int64(uint32(c)) - v.AddArg(v0) + v.AddArg2(x, v0) return true } // match: (Rsh32x32 (Const32 [0]) _) @@ -17684,10 +17095,9 @@ func rewriteValuegeneric_OpRsh32x64(v *Value) bool { break } v.reset(OpRsh32x64) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpConst64, t) v0.AuxInt = c + d - v.AddArg(v0) + v.AddArg2(x, v0) return true } // match: (Rsh32x64 (Lsh32x64 x (Const64 [24])) (Const64 [24])) @@ -17742,10 +17152,9 @@ func rewriteValuegeneric_OpRsh32x8(v *Value) bool { } c := v_1.AuxInt v.reset(OpRsh32x64) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpConst64, t) v0.AuxInt = int64(uint8(c)) - v.AddArg(v0) + v.AddArg2(x, v0) return true } // match: (Rsh32x8 (Const32 [0]) _) @@ -17774,10 +17183,9 @@ func rewriteValuegeneric_OpRsh64Ux16(v *Value) bool { } c := v_1.AuxInt v.reset(OpRsh64Ux64) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpConst64, t) v0.AuxInt = int64(uint16(c)) - v.AddArg(v0) + v.AddArg2(x, v0) return true } // match: (Rsh64Ux16 (Const64 [0]) _) @@ -17806,10 +17214,9 @@ func rewriteValuegeneric_OpRsh64Ux32(v *Value) bool { } c := v_1.AuxInt v.reset(OpRsh64Ux64) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpConst64, t) v0.AuxInt = int64(uint32(c)) - v.AddArg(v0) + v.AddArg2(x, v0) return true } // match: (Rsh64Ux32 (Const64 [0]) _) @@ -17904,10 +17311,9 @@ func rewriteValuegeneric_OpRsh64Ux64(v *Value) bool { break } v.reset(OpRsh64Ux64) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpConst64, t) v0.AuxInt = c + d - v.AddArg(v0) + v.AddArg2(x, v0) return true } // match: (Rsh64Ux64 (Rsh64x64 x _) (Const64 [63])) @@ -17926,10 +17332,9 @@ func rewriteValuegeneric_OpRsh64Ux64(v *Value) bool { break } v.reset(OpRsh64Ux64) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpConst64, t) v0.AuxInt = 63 - v.AddArg(v0) + v.AddArg2(x, v0) return true } // match: (Rsh64Ux64 (Lsh64x64 (Rsh64Ux64 x (Const64 [c1])) (Const64 [c2])) (Const64 [c3])) @@ -17964,10 +17369,9 @@ func rewriteValuegeneric_OpRsh64Ux64(v *Value) bool { break } v.reset(OpRsh64Ux64) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) v0.AuxInt = c1 - c2 + c3 - v.AddArg(v0) + v.AddArg2(x, v0) return true } // match: (Rsh64Ux64 (Lsh64x64 x (Const64 [56])) (Const64 [56])) @@ -18040,10 +17444,9 @@ func rewriteValuegeneric_OpRsh64Ux8(v *Value) bool { } c := v_1.AuxInt v.reset(OpRsh64Ux64) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpConst64, t) v0.AuxInt = int64(uint8(c)) - v.AddArg(v0) + v.AddArg2(x, v0) return true } // match: (Rsh64Ux8 (Const64 [0]) _) @@ -18072,10 +17475,9 @@ func rewriteValuegeneric_OpRsh64x16(v *Value) bool { } c := v_1.AuxInt v.reset(OpRsh64x64) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpConst64, t) v0.AuxInt = int64(uint16(c)) - v.AddArg(v0) + v.AddArg2(x, v0) return true } // match: (Rsh64x16 (Const64 [0]) _) @@ -18104,10 +17506,9 @@ func rewriteValuegeneric_OpRsh64x32(v *Value) bool { } c := v_1.AuxInt v.reset(OpRsh64x64) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpConst64, t) v0.AuxInt = int64(uint32(c)) - v.AddArg(v0) + v.AddArg2(x, v0) return true } // match: (Rsh64x32 (Const64 [0]) _) @@ -18187,10 +17588,9 @@ func rewriteValuegeneric_OpRsh64x64(v *Value) bool { break } v.reset(OpRsh64x64) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpConst64, t) v0.AuxInt = c + d - v.AddArg(v0) + v.AddArg2(x, v0) return true } // match: (Rsh64x64 (Lsh64x64 x (Const64 [56])) (Const64 [56])) @@ -18263,10 +17663,9 @@ func rewriteValuegeneric_OpRsh64x8(v *Value) bool { } c := v_1.AuxInt v.reset(OpRsh64x64) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpConst64, t) v0.AuxInt = int64(uint8(c)) - v.AddArg(v0) + v.AddArg2(x, v0) return true } // match: (Rsh64x8 (Const64 [0]) _) @@ -18295,10 +17694,9 @@ func rewriteValuegeneric_OpRsh8Ux16(v *Value) bool { } c := v_1.AuxInt v.reset(OpRsh8Ux64) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpConst64, t) v0.AuxInt = int64(uint16(c)) - v.AddArg(v0) + v.AddArg2(x, v0) return true } // match: (Rsh8Ux16 (Const8 [0]) _) @@ -18327,10 +17725,9 @@ func rewriteValuegeneric_OpRsh8Ux32(v *Value) bool { } c := v_1.AuxInt v.reset(OpRsh8Ux64) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpConst64, t) v0.AuxInt = int64(uint32(c)) - v.AddArg(v0) + v.AddArg2(x, v0) return true } // match: (Rsh8Ux32 (Const8 [0]) _) @@ -18425,10 +17822,9 @@ func rewriteValuegeneric_OpRsh8Ux64(v *Value) bool { break } v.reset(OpRsh8Ux64) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpConst64, t) v0.AuxInt = c + d - v.AddArg(v0) + v.AddArg2(x, v0) return true } // match: (Rsh8Ux64 (Rsh8x64 x _) (Const64 [7] )) @@ -18447,10 +17843,9 @@ func rewriteValuegeneric_OpRsh8Ux64(v *Value) bool { break } v.reset(OpRsh8Ux64) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpConst64, t) v0.AuxInt = 7 - v.AddArg(v0) + v.AddArg2(x, v0) return true } // match: (Rsh8Ux64 (Lsh8x64 (Rsh8Ux64 x (Const64 [c1])) (Const64 [c2])) (Const64 [c3])) @@ -18485,10 +17880,9 @@ func rewriteValuegeneric_OpRsh8Ux64(v *Value) bool { break } v.reset(OpRsh8Ux64) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) v0.AuxInt = c1 - c2 + c3 - v.AddArg(v0) + v.AddArg2(x, v0) return true } return false @@ -18507,10 +17901,9 @@ func rewriteValuegeneric_OpRsh8Ux8(v *Value) bool { } c := v_1.AuxInt v.reset(OpRsh8Ux64) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpConst64, t) v0.AuxInt = int64(uint8(c)) - v.AddArg(v0) + v.AddArg2(x, v0) return true } // match: (Rsh8Ux8 (Const8 [0]) _) @@ -18539,10 +17932,9 @@ func rewriteValuegeneric_OpRsh8x16(v *Value) bool { } c := v_1.AuxInt v.reset(OpRsh8x64) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpConst64, t) v0.AuxInt = int64(uint16(c)) - v.AddArg(v0) + v.AddArg2(x, v0) return true } // match: (Rsh8x16 (Const8 [0]) _) @@ -18571,10 +17963,9 @@ func rewriteValuegeneric_OpRsh8x32(v *Value) bool { } c := v_1.AuxInt v.reset(OpRsh8x64) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpConst64, t) v0.AuxInt = int64(uint32(c)) - v.AddArg(v0) + v.AddArg2(x, v0) return true } // match: (Rsh8x32 (Const8 [0]) _) @@ -18653,10 +18044,9 @@ func rewriteValuegeneric_OpRsh8x64(v *Value) bool { break } v.reset(OpRsh8x64) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpConst64, t) v0.AuxInt = c + d - v.AddArg(v0) + v.AddArg2(x, v0) return true } return false @@ -18675,10 +18065,9 @@ func rewriteValuegeneric_OpRsh8x8(v *Value) bool { } c := v_1.AuxInt v.reset(OpRsh8x64) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpConst64, t) v0.AuxInt = int64(uint8(c)) - v.AddArg(v0) + v.AddArg2(x, v0) return true } // match: (Rsh8x8 (Const8 [0]) _) @@ -18708,8 +18097,7 @@ func rewriteValuegeneric_OpSelect0(v *Value) bool { } lo := v_0.Args[1] v.reset(OpDiv64u) - v.AddArg(lo) - v.AddArg(y) + v.AddArg2(lo, y) return true } return false @@ -18729,8 +18117,7 @@ func rewriteValuegeneric_OpSelect1(v *Value) bool { } lo := v_0.Args[1] v.reset(OpMod64u) - v.AddArg(lo) - v.AddArg(y) + v.AddArg2(lo, y) return true } return false @@ -19230,9 +18617,7 @@ func rewriteValuegeneric_OpStaticCall(v *Value) bool { v.reset(OpMove) v.AuxInt = sz v.Aux = t.(*types.Type).Elem() - v.AddArg(dst) - v.AddArg(src) - v.AddArg(mem) + v.AddArg3(dst, src, mem) return true } // match: (StaticCall {sym} s1:(Store _ (Const32 [sz]) s2:(Store _ src s3:(Store {t} _ dst mem)))) @@ -19269,9 +18654,7 @@ func rewriteValuegeneric_OpStaticCall(v *Value) bool { v.reset(OpMove) v.AuxInt = sz v.Aux = t.(*types.Type).Elem() - v.AddArg(dst) - v.AddArg(src) - v.AddArg(mem) + v.AddArg3(dst, src, mem) return true } // match: (StaticCall {sym} x) @@ -19595,9 +18978,7 @@ func rewriteValuegeneric_OpStore(v *Value) bool { v0 := b.NewValue0(v.Pos, OpOffPtr, t.FieldType(0).PtrTo()) v0.AuxInt = 0 v0.AddArg(dst) - v.AddArg(v0) - v.AddArg(f0) - v.AddArg(mem) + v.AddArg3(v0, f0, mem) return true } // match: (Store dst (StructMake2 f0 f1) mem) @@ -19616,17 +18997,13 @@ func rewriteValuegeneric_OpStore(v *Value) bool { v0 := b.NewValue0(v.Pos, OpOffPtr, t.FieldType(1).PtrTo()) v0.AuxInt = t.FieldOff(1) v0.AddArg(dst) - v.AddArg(v0) - v.AddArg(f1) v1 := b.NewValue0(v.Pos, OpStore, types.TypeMem) v1.Aux = t.FieldType(0) v2 := b.NewValue0(v.Pos, OpOffPtr, t.FieldType(0).PtrTo()) v2.AuxInt = 0 v2.AddArg(dst) - v1.AddArg(v2) - v1.AddArg(f0) - v1.AddArg(mem) - v.AddArg(v1) + v1.AddArg3(v2, f0, mem) + v.AddArg3(v0, f1, v1) return true } // match: (Store dst (StructMake3 f0 f1 f2) mem) @@ -19646,25 +19023,19 @@ func rewriteValuegeneric_OpStore(v *Value) bool { v0 := b.NewValue0(v.Pos, OpOffPtr, t.FieldType(2).PtrTo()) v0.AuxInt = t.FieldOff(2) v0.AddArg(dst) - v.AddArg(v0) - v.AddArg(f2) v1 := b.NewValue0(v.Pos, OpStore, types.TypeMem) v1.Aux = t.FieldType(1) v2 := b.NewValue0(v.Pos, OpOffPtr, t.FieldType(1).PtrTo()) v2.AuxInt = t.FieldOff(1) v2.AddArg(dst) - v1.AddArg(v2) - v1.AddArg(f1) v3 := b.NewValue0(v.Pos, OpStore, types.TypeMem) v3.Aux = t.FieldType(0) v4 := b.NewValue0(v.Pos, OpOffPtr, t.FieldType(0).PtrTo()) v4.AuxInt = 0 v4.AddArg(dst) - v3.AddArg(v4) - v3.AddArg(f0) - v3.AddArg(mem) - v1.AddArg(v3) - v.AddArg(v1) + v3.AddArg3(v4, f0, mem) + v1.AddArg3(v2, f1, v3) + v.AddArg3(v0, f2, v1) return true } // match: (Store dst (StructMake4 f0 f1 f2 f3) mem) @@ -19685,33 +19056,25 @@ func rewriteValuegeneric_OpStore(v *Value) bool { v0 := b.NewValue0(v.Pos, OpOffPtr, t.FieldType(3).PtrTo()) v0.AuxInt = t.FieldOff(3) v0.AddArg(dst) - v.AddArg(v0) - v.AddArg(f3) v1 := b.NewValue0(v.Pos, OpStore, types.TypeMem) v1.Aux = t.FieldType(2) v2 := b.NewValue0(v.Pos, OpOffPtr, t.FieldType(2).PtrTo()) v2.AuxInt = t.FieldOff(2) v2.AddArg(dst) - v1.AddArg(v2) - v1.AddArg(f2) v3 := b.NewValue0(v.Pos, OpStore, types.TypeMem) v3.Aux = t.FieldType(1) v4 := b.NewValue0(v.Pos, OpOffPtr, t.FieldType(1).PtrTo()) v4.AuxInt = t.FieldOff(1) v4.AddArg(dst) - v3.AddArg(v4) - v3.AddArg(f1) v5 := b.NewValue0(v.Pos, OpStore, types.TypeMem) v5.Aux = t.FieldType(0) v6 := b.NewValue0(v.Pos, OpOffPtr, t.FieldType(0).PtrTo()) v6.AuxInt = 0 v6.AddArg(dst) - v5.AddArg(v6) - v5.AddArg(f0) - v5.AddArg(mem) - v3.AddArg(v5) - v1.AddArg(v3) - v.AddArg(v1) + v5.AddArg3(v6, f0, mem) + v3.AddArg3(v4, f1, v5) + v1.AddArg3(v2, f2, v3) + v.AddArg3(v0, f3, v1) return true } // match: (Store {t} dst (Load src mem) mem) @@ -19731,9 +19094,7 @@ func rewriteValuegeneric_OpStore(v *Value) bool { v.reset(OpMove) v.AuxInt = sizeof(t) v.Aux = t - v.AddArg(dst) - v.AddArg(src) - v.AddArg(mem) + v.AddArg3(dst, src, mem) return true } // match: (Store {t} dst (Load src mem) (VarDef {x} mem)) @@ -19757,12 +19118,10 @@ func rewriteValuegeneric_OpStore(v *Value) bool { v.reset(OpMove) v.AuxInt = sizeof(t) v.Aux = t - v.AddArg(dst) - v.AddArg(src) v0 := b.NewValue0(v.Pos, OpVarDef, types.TypeMem) v0.Aux = x v0.AddArg(mem) - v.AddArg(v0) + v.AddArg3(dst, src, v0) return true } // match: (Store _ (ArrayMake0) mem) @@ -19788,9 +19147,7 @@ func rewriteValuegeneric_OpStore(v *Value) bool { mem := v_2 v.reset(OpStore) v.Aux = e.Type - v.AddArg(dst) - v.AddArg(e) - v.AddArg(mem) + v.AddArg3(dst, e, mem) return true } // match: (Store (Load (OffPtr [c] (SP)) mem) x mem) @@ -19885,14 +19242,10 @@ func rewriteValuegeneric_OpStore(v *Value) bool { } v.reset(OpStore) v.Aux = t1 - v.AddArg(op1) - v.AddArg(d1) v0 := b.NewValue0(v.Pos, OpStore, types.TypeMem) v0.Aux = t2 - v0.AddArg(op2) - v0.AddArg(d2) - v0.AddArg(mem) - v.AddArg(v0) + v0.AddArg3(op2, d2, mem) + v.AddArg3(op1, d1, v0) return true } // match: (Store {t1} op1:(OffPtr [o1] p1) d1 m2:(Store {t2} op2:(OffPtr [o2] p2) d2 m3:(Store {t3} op3:(OffPtr [0] p3) d3 m4:(Move [n] p4 _ mem)))) @@ -19944,19 +19297,13 @@ func rewriteValuegeneric_OpStore(v *Value) bool { } v.reset(OpStore) v.Aux = t1 - v.AddArg(op1) - v.AddArg(d1) v0 := b.NewValue0(v.Pos, OpStore, types.TypeMem) v0.Aux = t2 - v0.AddArg(op2) - v0.AddArg(d2) v1 := b.NewValue0(v.Pos, OpStore, types.TypeMem) v1.Aux = t3 - v1.AddArg(op3) - v1.AddArg(d3) - v1.AddArg(mem) - v0.AddArg(v1) - v.AddArg(v0) + v1.AddArg3(op3, d3, mem) + v0.AddArg3(op2, d2, v1) + v.AddArg3(op1, d1, v0) return true } // match: (Store {t1} op1:(OffPtr [o1] p1) d1 m2:(Store {t2} op2:(OffPtr [o2] p2) d2 m3:(Store {t3} op3:(OffPtr [o3] p3) d3 m4:(Store {t4} op4:(OffPtr [0] p4) d4 m5:(Move [n] p5 _ mem))))) @@ -20021,24 +19368,16 @@ func rewriteValuegeneric_OpStore(v *Value) bool { } v.reset(OpStore) v.Aux = t1 - v.AddArg(op1) - v.AddArg(d1) v0 := b.NewValue0(v.Pos, OpStore, types.TypeMem) v0.Aux = t2 - v0.AddArg(op2) - v0.AddArg(d2) v1 := b.NewValue0(v.Pos, OpStore, types.TypeMem) v1.Aux = t3 - v1.AddArg(op3) - v1.AddArg(d3) v2 := b.NewValue0(v.Pos, OpStore, types.TypeMem) v2.Aux = t4 - v2.AddArg(op4) - v2.AddArg(d4) - v2.AddArg(mem) - v1.AddArg(v2) - v0.AddArg(v1) - v.AddArg(v0) + v2.AddArg3(op4, d4, mem) + v1.AddArg3(op3, d3, v2) + v0.AddArg3(op2, d2, v1) + v.AddArg3(op1, d1, v0) return true } // match: (Store {t1} op1:(OffPtr [o1] p1) d1 m2:(Store {t2} op2:(OffPtr [0] p2) d2 m3:(Zero [n] p3 mem))) @@ -20077,14 +19416,10 @@ func rewriteValuegeneric_OpStore(v *Value) bool { } v.reset(OpStore) v.Aux = t1 - v.AddArg(op1) - v.AddArg(d1) v0 := b.NewValue0(v.Pos, OpStore, types.TypeMem) v0.Aux = t2 - v0.AddArg(op2) - v0.AddArg(d2) - v0.AddArg(mem) - v.AddArg(v0) + v0.AddArg3(op2, d2, mem) + v.AddArg3(op1, d1, v0) return true } // match: (Store {t1} op1:(OffPtr [o1] p1) d1 m2:(Store {t2} op2:(OffPtr [o2] p2) d2 m3:(Store {t3} op3:(OffPtr [0] p3) d3 m4:(Zero [n] p4 mem)))) @@ -20136,19 +19471,13 @@ func rewriteValuegeneric_OpStore(v *Value) bool { } v.reset(OpStore) v.Aux = t1 - v.AddArg(op1) - v.AddArg(d1) v0 := b.NewValue0(v.Pos, OpStore, types.TypeMem) v0.Aux = t2 - v0.AddArg(op2) - v0.AddArg(d2) v1 := b.NewValue0(v.Pos, OpStore, types.TypeMem) v1.Aux = t3 - v1.AddArg(op3) - v1.AddArg(d3) - v1.AddArg(mem) - v0.AddArg(v1) - v.AddArg(v0) + v1.AddArg3(op3, d3, mem) + v0.AddArg3(op2, d2, v1) + v.AddArg3(op1, d1, v0) return true } // match: (Store {t1} op1:(OffPtr [o1] p1) d1 m2:(Store {t2} op2:(OffPtr [o2] p2) d2 m3:(Store {t3} op3:(OffPtr [o3] p3) d3 m4:(Store {t4} op4:(OffPtr [0] p4) d4 m5:(Zero [n] p5 mem))))) @@ -20213,24 +19542,16 @@ func rewriteValuegeneric_OpStore(v *Value) bool { } v.reset(OpStore) v.Aux = t1 - v.AddArg(op1) - v.AddArg(d1) v0 := b.NewValue0(v.Pos, OpStore, types.TypeMem) v0.Aux = t2 - v0.AddArg(op2) - v0.AddArg(d2) v1 := b.NewValue0(v.Pos, OpStore, types.TypeMem) v1.Aux = t3 - v1.AddArg(op3) - v1.AddArg(d3) v2 := b.NewValue0(v.Pos, OpStore, types.TypeMem) v2.Aux = t4 - v2.AddArg(op4) - v2.AddArg(d4) - v2.AddArg(mem) - v1.AddArg(v2) - v0.AddArg(v1) - v.AddArg(v0) + v2.AddArg3(op4, d4, mem) + v1.AddArg3(op3, d3, v2) + v0.AddArg3(op2, d2, v1) + v.AddArg3(op1, d1, v0) return true } return false @@ -20433,8 +19754,7 @@ func rewriteValuegeneric_OpStructSelect(v *Value) bool { v1 := b.NewValue0(v.Pos, OpOffPtr, v.Type.PtrTo()) v1.AuxInt = t.FieldOff(int(i)) v1.AddArg(ptr) - v0.AddArg(v1) - v0.AddArg(mem) + v0.AddArg2(v1, mem) return true } // match: (StructSelect [0] (IData x)) @@ -20485,8 +19805,7 @@ func rewriteValuegeneric_OpSub16(v *Value) bool { v.reset(OpAdd16) v0 := b.NewValue0(v.Pos, OpConst16, t) v0.AuxInt = int64(int16(-c)) - v.AddArg(v0) - v.AddArg(x) + v.AddArg2(v0, x) return true } // match: (Sub16 (Mul16 x y) (Mul16 x z)) @@ -20514,11 +19833,9 @@ func rewriteValuegeneric_OpSub16(v *Value) bool { } z := v_1_1 v.reset(OpMul16) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpSub16, t) - v0.AddArg(y) - v0.AddArg(z) - v.AddArg(v0) + v0.AddArg2(y, z) + v.AddArg2(x, v0) return true } } @@ -20598,10 +19915,8 @@ func rewriteValuegeneric_OpSub16(v *Value) bool { } v.reset(OpSub16) v0 := b.NewValue0(v.Pos, OpAdd16, t) - v0.AddArg(x) - v0.AddArg(z) - v.AddArg(v0) - v.AddArg(i) + v0.AddArg2(x, z) + v.AddArg2(v0, i) return true } // match: (Sub16 x (Sub16 z i:(Const16 ))) @@ -20623,11 +19938,9 @@ func rewriteValuegeneric_OpSub16(v *Value) bool { break } v.reset(OpAdd16) - v.AddArg(i) v0 := b.NewValue0(v.Pos, OpSub16, t) - v0.AddArg(x) - v0.AddArg(z) - v.AddArg(v0) + v0.AddArg2(x, z) + v.AddArg2(i, v0) return true } // match: (Sub16 (Const16 [c]) (Sub16 x (Const16 [d]))) @@ -20651,8 +19964,7 @@ func rewriteValuegeneric_OpSub16(v *Value) bool { v.reset(OpSub16) v0 := b.NewValue0(v.Pos, OpConst16, t) v0.AuxInt = int64(int16(c + d)) - v.AddArg(v0) - v.AddArg(x) + v.AddArg2(v0, x) return true } // match: (Sub16 (Const16 [c]) (Sub16 (Const16 [d]) x)) @@ -20675,8 +19987,7 @@ func rewriteValuegeneric_OpSub16(v *Value) bool { v.reset(OpAdd16) v0 := b.NewValue0(v.Pos, OpConst16, t) v0.AuxInt = int64(int16(c - d)) - v.AddArg(v0) - v.AddArg(x) + v.AddArg2(v0, x) return true } return false @@ -20716,8 +20027,7 @@ func rewriteValuegeneric_OpSub32(v *Value) bool { v.reset(OpAdd32) v0 := b.NewValue0(v.Pos, OpConst32, t) v0.AuxInt = int64(int32(-c)) - v.AddArg(v0) - v.AddArg(x) + v.AddArg2(v0, x) return true } // match: (Sub32 (Mul32 x y) (Mul32 x z)) @@ -20745,11 +20055,9 @@ func rewriteValuegeneric_OpSub32(v *Value) bool { } z := v_1_1 v.reset(OpMul32) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpSub32, t) - v0.AddArg(y) - v0.AddArg(z) - v.AddArg(v0) + v0.AddArg2(y, z) + v.AddArg2(x, v0) return true } } @@ -20829,10 +20137,8 @@ func rewriteValuegeneric_OpSub32(v *Value) bool { } v.reset(OpSub32) v0 := b.NewValue0(v.Pos, OpAdd32, t) - v0.AddArg(x) - v0.AddArg(z) - v.AddArg(v0) - v.AddArg(i) + v0.AddArg2(x, z) + v.AddArg2(v0, i) return true } // match: (Sub32 x (Sub32 z i:(Const32 ))) @@ -20854,11 +20160,9 @@ func rewriteValuegeneric_OpSub32(v *Value) bool { break } v.reset(OpAdd32) - v.AddArg(i) v0 := b.NewValue0(v.Pos, OpSub32, t) - v0.AddArg(x) - v0.AddArg(z) - v.AddArg(v0) + v0.AddArg2(x, z) + v.AddArg2(i, v0) return true } // match: (Sub32 (Const32 [c]) (Sub32 x (Const32 [d]))) @@ -20882,8 +20186,7 @@ func rewriteValuegeneric_OpSub32(v *Value) bool { v.reset(OpSub32) v0 := b.NewValue0(v.Pos, OpConst32, t) v0.AuxInt = int64(int32(c + d)) - v.AddArg(v0) - v.AddArg(x) + v.AddArg2(v0, x) return true } // match: (Sub32 (Const32 [c]) (Sub32 (Const32 [d]) x)) @@ -20906,8 +20209,7 @@ func rewriteValuegeneric_OpSub32(v *Value) bool { v.reset(OpAdd32) v0 := b.NewValue0(v.Pos, OpConst32, t) v0.AuxInt = int64(int32(c - d)) - v.AddArg(v0) - v.AddArg(x) + v.AddArg2(v0, x) return true } return false @@ -20967,8 +20269,7 @@ func rewriteValuegeneric_OpSub64(v *Value) bool { v.reset(OpAdd64) v0 := b.NewValue0(v.Pos, OpConst64, t) v0.AuxInt = -c - v.AddArg(v0) - v.AddArg(x) + v.AddArg2(v0, x) return true } // match: (Sub64 (Mul64 x y) (Mul64 x z)) @@ -20996,11 +20297,9 @@ func rewriteValuegeneric_OpSub64(v *Value) bool { } z := v_1_1 v.reset(OpMul64) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpSub64, t) - v0.AddArg(y) - v0.AddArg(z) - v.AddArg(v0) + v0.AddArg2(y, z) + v.AddArg2(x, v0) return true } } @@ -21080,10 +20379,8 @@ func rewriteValuegeneric_OpSub64(v *Value) bool { } v.reset(OpSub64) v0 := b.NewValue0(v.Pos, OpAdd64, t) - v0.AddArg(x) - v0.AddArg(z) - v.AddArg(v0) - v.AddArg(i) + v0.AddArg2(x, z) + v.AddArg2(v0, i) return true } // match: (Sub64 x (Sub64 z i:(Const64 ))) @@ -21105,11 +20402,9 @@ func rewriteValuegeneric_OpSub64(v *Value) bool { break } v.reset(OpAdd64) - v.AddArg(i) v0 := b.NewValue0(v.Pos, OpSub64, t) - v0.AddArg(x) - v0.AddArg(z) - v.AddArg(v0) + v0.AddArg2(x, z) + v.AddArg2(i, v0) return true } // match: (Sub64 (Const64 [c]) (Sub64 x (Const64 [d]))) @@ -21133,8 +20428,7 @@ func rewriteValuegeneric_OpSub64(v *Value) bool { v.reset(OpSub64) v0 := b.NewValue0(v.Pos, OpConst64, t) v0.AuxInt = c + d - v.AddArg(v0) - v.AddArg(x) + v.AddArg2(v0, x) return true } // match: (Sub64 (Const64 [c]) (Sub64 (Const64 [d]) x)) @@ -21157,8 +20451,7 @@ func rewriteValuegeneric_OpSub64(v *Value) bool { v.reset(OpAdd64) v0 := b.NewValue0(v.Pos, OpConst64, t) v0.AuxInt = c - d - v.AddArg(v0) - v.AddArg(x) + v.AddArg2(v0, x) return true } return false @@ -21218,8 +20511,7 @@ func rewriteValuegeneric_OpSub8(v *Value) bool { v.reset(OpAdd8) v0 := b.NewValue0(v.Pos, OpConst8, t) v0.AuxInt = int64(int8(-c)) - v.AddArg(v0) - v.AddArg(x) + v.AddArg2(v0, x) return true } // match: (Sub8 (Mul8 x y) (Mul8 x z)) @@ -21247,11 +20539,9 @@ func rewriteValuegeneric_OpSub8(v *Value) bool { } z := v_1_1 v.reset(OpMul8) - v.AddArg(x) v0 := b.NewValue0(v.Pos, OpSub8, t) - v0.AddArg(y) - v0.AddArg(z) - v.AddArg(v0) + v0.AddArg2(y, z) + v.AddArg2(x, v0) return true } } @@ -21331,10 +20621,8 @@ func rewriteValuegeneric_OpSub8(v *Value) bool { } v.reset(OpSub8) v0 := b.NewValue0(v.Pos, OpAdd8, t) - v0.AddArg(x) - v0.AddArg(z) - v.AddArg(v0) - v.AddArg(i) + v0.AddArg2(x, z) + v.AddArg2(v0, i) return true } // match: (Sub8 x (Sub8 z i:(Const8 ))) @@ -21356,11 +20644,9 @@ func rewriteValuegeneric_OpSub8(v *Value) bool { break } v.reset(OpAdd8) - v.AddArg(i) v0 := b.NewValue0(v.Pos, OpSub8, t) - v0.AddArg(x) - v0.AddArg(z) - v.AddArg(v0) + v0.AddArg2(x, z) + v.AddArg2(i, v0) return true } // match: (Sub8 (Const8 [c]) (Sub8 x (Const8 [d]))) @@ -21384,8 +20670,7 @@ func rewriteValuegeneric_OpSub8(v *Value) bool { v.reset(OpSub8) v0 := b.NewValue0(v.Pos, OpConst8, t) v0.AuxInt = int64(int8(c + d)) - v.AddArg(v0) - v.AddArg(x) + v.AddArg2(v0, x) return true } // match: (Sub8 (Const8 [c]) (Sub8 (Const8 [d]) x)) @@ -21408,8 +20693,7 @@ func rewriteValuegeneric_OpSub8(v *Value) bool { v.reset(OpAdd8) v0 := b.NewValue0(v.Pos, OpConst8, t) v0.AuxInt = int64(int8(c - d)) - v.AddArg(v0) - v.AddArg(x) + v.AddArg2(v0, x) return true } return false @@ -21981,11 +21265,9 @@ func rewriteValuegeneric_OpXor16(v *Value) bool { continue } v.reset(OpXor16) - v.AddArg(i) v0 := b.NewValue0(v.Pos, OpXor16, t) - v0.AddArg(z) - v0.AddArg(x) - v.AddArg(v0) + v0.AddArg2(z, x) + v.AddArg2(i, v0) return true } } @@ -22015,8 +21297,7 @@ func rewriteValuegeneric_OpXor16(v *Value) bool { v.reset(OpXor16) v0 := b.NewValue0(v.Pos, OpConst16, t) v0.AuxInt = int64(int16(c ^ d)) - v.AddArg(v0) - v.AddArg(x) + v.AddArg2(v0, x) return true } } @@ -22119,11 +21400,9 @@ func rewriteValuegeneric_OpXor32(v *Value) bool { continue } v.reset(OpXor32) - v.AddArg(i) v0 := b.NewValue0(v.Pos, OpXor32, t) - v0.AddArg(z) - v0.AddArg(x) - v.AddArg(v0) + v0.AddArg2(z, x) + v.AddArg2(i, v0) return true } } @@ -22153,8 +21432,7 @@ func rewriteValuegeneric_OpXor32(v *Value) bool { v.reset(OpXor32) v0 := b.NewValue0(v.Pos, OpConst32, t) v0.AuxInt = int64(int32(c ^ d)) - v.AddArg(v0) - v.AddArg(x) + v.AddArg2(v0, x) return true } } @@ -22257,11 +21535,9 @@ func rewriteValuegeneric_OpXor64(v *Value) bool { continue } v.reset(OpXor64) - v.AddArg(i) v0 := b.NewValue0(v.Pos, OpXor64, t) - v0.AddArg(z) - v0.AddArg(x) - v.AddArg(v0) + v0.AddArg2(z, x) + v.AddArg2(i, v0) return true } } @@ -22291,8 +21567,7 @@ func rewriteValuegeneric_OpXor64(v *Value) bool { v.reset(OpXor64) v0 := b.NewValue0(v.Pos, OpConst64, t) v0.AuxInt = c ^ d - v.AddArg(v0) - v.AddArg(x) + v.AddArg2(v0, x) return true } } @@ -22395,11 +21670,9 @@ func rewriteValuegeneric_OpXor8(v *Value) bool { continue } v.reset(OpXor8) - v.AddArg(i) v0 := b.NewValue0(v.Pos, OpXor8, t) - v0.AddArg(z) - v0.AddArg(x) - v.AddArg(v0) + v0.AddArg2(z, x) + v.AddArg2(i, v0) return true } } @@ -22429,8 +21702,7 @@ func rewriteValuegeneric_OpXor8(v *Value) bool { v.reset(OpXor8) v0 := b.NewValue0(v.Pos, OpConst8, t) v0.AuxInt = int64(int8(c ^ d)) - v.AddArg(v0) - v.AddArg(x) + v.AddArg2(v0, x) return true } } @@ -22490,8 +21762,7 @@ func rewriteValuegeneric_OpZero(v *Value) bool { v.reset(OpZero) v.AuxInt = n v.Aux = t1 - v.AddArg(p1) - v.AddArg(mem) + v.AddArg2(p1, mem) return true } // match: (Zero {t} [n] dst1 move:(Move {t} [n] dst2 _ mem)) @@ -22513,8 +21784,7 @@ func rewriteValuegeneric_OpZero(v *Value) bool { v.reset(OpZero) v.AuxInt = n v.Aux = t - v.AddArg(dst1) - v.AddArg(mem) + v.AddArg2(dst1, mem) return true } // match: (Zero {t} [n] dst1 vardef:(VarDef {x} move:(Move {t} [n] dst2 _ mem))) @@ -22541,11 +21811,10 @@ func rewriteValuegeneric_OpZero(v *Value) bool { v.reset(OpZero) v.AuxInt = n v.Aux = t - v.AddArg(dst1) v0 := b.NewValue0(v.Pos, OpVarDef, types.TypeMem) v0.Aux = x v0.AddArg(mem) - v.AddArg(v0) + v.AddArg2(dst1, v0) return true } return false diff --git a/src/cmd/compile/internal/ssa/value.go b/src/cmd/compile/internal/ssa/value.go index b877220211..8c5834d530 100644 --- a/src/cmd/compile/internal/ssa/value.go +++ b/src/cmd/compile/internal/ssa/value.go @@ -218,6 +218,58 @@ func (v *Value) AddArg(w *Value) { v.Args = append(v.Args, w) w.Uses++ } + +//go:noinline +func (v *Value) AddArg2(w1, w2 *Value) { + if v.Args == nil { + v.resetArgs() // use argstorage + } + v.Args = append(v.Args, w1, w2) + w1.Uses++ + w2.Uses++ +} + +//go:noinline +func (v *Value) AddArg3(w1, w2, w3 *Value) { + if v.Args == nil { + v.resetArgs() // use argstorage + } + v.Args = append(v.Args, w1, w2, w3) + w1.Uses++ + w2.Uses++ + w3.Uses++ +} + +//go:noinline +func (v *Value) AddArg4(w1, w2, w3, w4 *Value) { + v.Args = append(v.Args, w1, w2, w3, w4) + w1.Uses++ + w2.Uses++ + w3.Uses++ + w4.Uses++ +} + +//go:noinline +func (v *Value) AddArg5(w1, w2, w3, w4, w5 *Value) { + v.Args = append(v.Args, w1, w2, w3, w4, w5) + w1.Uses++ + w2.Uses++ + w3.Uses++ + w4.Uses++ + w5.Uses++ +} + +//go:noinline +func (v *Value) AddArg6(w1, w2, w3, w4, w5, w6 *Value) { + v.Args = append(v.Args, w1, w2, w3, w4, w5, w6) + w1.Uses++ + w2.Uses++ + w3.Uses++ + w4.Uses++ + w5.Uses++ + w6.Uses++ +} + func (v *Value) AddArgs(a ...*Value) { if v.Args == nil { v.resetArgs() // use argstorage From e44cda3aa97ba0870806e65fc66641eb2cf6682a Mon Sep 17 00:00:00 2001 From: Richard Musiol Date: Sun, 1 Mar 2020 17:01:58 +0100 Subject: [PATCH 16/69] syscall: fix Fchdir on js/wasm NodeJS does not support fchdir so it has to be emulated with chdir by saving the path when opening a directory. However, if the path opened is relative, saving this path is not sufficient, because after changing the working directory the path does not resolve correctly any more, thus a subsequent fd.Chdir() fails. This change fixes the issue by resolving a relative path when opening the directory and saving the absolute path instead. Fixes #37448 Change-Id: Id6bc8c4232b0019fc11e850599a526336608ce54 Reviewed-on: https://go-review.googlesource.com/c/go/+/221717 Run-TryBot: Richard Musiol TryBot-Result: Gobot Gobot Reviewed-by: Emmanuel Odeke --- src/os/os_test.go | 35 +++++++++++++++++++++++++++++++++++ src/syscall/fs_js.go | 4 ++++ 2 files changed, 39 insertions(+) diff --git a/src/os/os_test.go b/src/os/os_test.go index 1d8442d808..cc03b91d72 100644 --- a/src/os/os_test.go +++ b/src/os/os_test.go @@ -1242,6 +1242,41 @@ func testChtimes(t *testing.T, name string) { } } +func TestFileChdir(t *testing.T) { + // TODO(brainman): file.Chdir() is not implemented on windows. + if runtime.GOOS == "windows" { + return + } + + wd, err := Getwd() + if err != nil { + t.Fatalf("Getwd: %s", err) + } + defer Chdir(wd) + + fd, err := Open(".") + if err != nil { + t.Fatalf("Open .: %s", err) + } + defer fd.Close() + + if err := Chdir("/"); err != nil { + t.Fatalf("Chdir /: %s", err) + } + + if err := fd.Chdir(); err != nil { + t.Fatalf("fd.Chdir: %s", err) + } + + wdNew, err := Getwd() + if err != nil { + t.Fatalf("Getwd: %s", err) + } + if wdNew != wd { + t.Fatalf("fd.Chdir failed, got %s, want %s", wdNew, wd) + } +} + func TestChdirAndGetwd(t *testing.T) { // TODO(brainman): file.Chdir() is not implemented on windows. if runtime.GOOS == "windows" { diff --git a/src/syscall/fs_js.go b/src/syscall/fs_js.go index c1cac97d91..262ec28afd 100644 --- a/src/syscall/fs_js.go +++ b/src/syscall/fs_js.go @@ -102,6 +102,10 @@ func Open(path string, openmode int, perm uint32) (int, error) { } } + if path[0] != '/' { + cwd := jsProcess.Call("cwd").String() + path = cwd + "/" + path + } f := &jsFile{ path: path, entries: entries, From a4f7b0879c8d5d67e8e186dc210d3c8e76589e78 Mon Sep 17 00:00:00 2001 From: Torben Schinke Date: Sun, 1 Mar 2020 20:07:46 +0000 Subject: [PATCH 17/69] syscall/js: improve documentation of js.FuncOf The existing documentation is improved to be more explicit about the lifecycle and its consequences. Fixes #34324 Change-Id: I9969afc69f6eeb7812c11fe821a842794df5aa5b GitHub-Last-Rev: 246a4991660927f88f48290580e96b15c16663c1 GitHub-Pull-Request: golang/go#34551 Reviewed-on: https://go-review.googlesource.com/c/go/+/197458 Reviewed-by: Richard Musiol --- src/syscall/js/func.go | 23 ++++++++++++++--------- 1 file changed, 14 insertions(+), 9 deletions(-) diff --git a/src/syscall/js/func.go b/src/syscall/js/func.go index 6c145c9da6..9e99027e9f 100644 --- a/src/syscall/js/func.go +++ b/src/syscall/js/func.go @@ -22,17 +22,22 @@ type Func struct { id uint32 } -// FuncOf returns a wrapped function. +// FuncOf returns a function to be used by JavaScript. // -// Invoking the JavaScript function will synchronously call the Go function fn with the value of JavaScript's -// "this" keyword and the arguments of the invocation. -// The return value of the invocation is the result of the Go function mapped back to JavaScript according to ValueOf. +// The Go function fn is called with the value of JavaScript's "this" keyword and the +// arguments of the invocation. The return value of the invocation is +// the result of the Go function mapped back to JavaScript according to ValueOf. // -// A wrapped function triggered during a call from Go to JavaScript gets executed on the same goroutine. -// A wrapped function triggered by JavaScript's event loop gets executed on an extra goroutine. -// Blocking operations in the wrapped function will block the event loop. -// As a consequence, if one wrapped function blocks, other wrapped funcs will not be processed. -// A blocking function should therefore explicitly start a new goroutine. +// Invoking the wrapped Go function from JavaScript will +// pause the event loop and spawn a new goroutine. +// Other wrapped functions which are triggered during a call from Go to JavaScript +// get executed on the same goroutine. +// +// As a consequence, if one wrapped function blocks, JavaScript's event loop +// is blocked until that function returns. Hence, calling any async JavaScript +// API, which requires the event loop, like fetch (http.Client), will cause an +// immediate deadlock. Therefore a blocking function should explicitly start a +// new goroutine. // // Func.Release must be called to free up resources when the function will not be used any more. func FuncOf(fn func(this Value, args []Value) interface{}) Func { From 529988d62c1ffc3e5332231fc3e977858e5a2351 Mon Sep 17 00:00:00 2001 From: Keith Randall Date: Tue, 11 Feb 2020 17:49:52 -0800 Subject: [PATCH 18/69] os: seek should invalidate any cached directory reads When we seek on the underlying FD, discard any directory entries we've already read and cached. This makes sure we won't return the same entry twice. We already fixed this for Darwin in CL 209961. Fixes #37161 Change-Id: I20e1ac8d751443135e67fb4c43c18d69befb643b Reviewed-on: https://go-review.googlesource.com/c/go/+/219143 Run-TryBot: Keith Randall TryBot-Result: Gobot Gobot Reviewed-by: Emmanuel Odeke Reviewed-by: Brad Fitzpatrick --- src/os/dir_darwin.go | 10 ---------- src/os/dir_unix.go | 2 -- src/os/file_unix.go | 7 ++++++- src/os/os_test.go | 31 +++++++++++++++++++++++++++++++ src/os/testdata/issue37161/a | 1 + src/os/testdata/issue37161/b | 1 + src/os/testdata/issue37161/c | 1 + 7 files changed, 40 insertions(+), 13 deletions(-) create mode 100644 src/os/testdata/issue37161/a create mode 100644 src/os/testdata/issue37161/b create mode 100644 src/os/testdata/issue37161/c diff --git a/src/os/dir_darwin.go b/src/os/dir_darwin.go index a274dd1268..2f9ba78d68 100644 --- a/src/os/dir_darwin.go +++ b/src/os/dir_darwin.go @@ -24,16 +24,6 @@ func (d *dirInfo) close() { d.dir = 0 } -func (f *File) seekInvalidate() { - if f.dirinfo == nil { - return - } - // Free cached dirinfo, so we allocate a new one if we - // access this file as a directory again. See #35767. - f.dirinfo.close() - f.dirinfo = nil -} - func (f *File) readdirnames(n int) (names []string, err error) { if f.dirinfo == nil { dir, call, errno := f.pfd.OpenDir() diff --git a/src/os/dir_unix.go b/src/os/dir_unix.go index 2856a2dc0f..e0c4989756 100644 --- a/src/os/dir_unix.go +++ b/src/os/dir_unix.go @@ -26,8 +26,6 @@ const ( func (d *dirInfo) close() {} -func (f *File) seekInvalidate() {} - func (f *File) readdirnames(n int) (names []string, err error) { // If this file has no dirinfo, create one. if f.dirinfo == nil { diff --git a/src/os/file_unix.go b/src/os/file_unix.go index 6945937fd6..32e4442e5d 100644 --- a/src/os/file_unix.go +++ b/src/os/file_unix.go @@ -295,7 +295,12 @@ func (f *File) pwrite(b []byte, off int64) (n int, err error) { // relative to the current offset, and 2 means relative to the end. // It returns the new offset and an error, if any. func (f *File) seek(offset int64, whence int) (ret int64, err error) { - f.seekInvalidate() + if f.dirinfo != nil { + // Free cached dirinfo, so we allocate a new one if we + // access this file as a directory again. See #35767 and #37161. + f.dirinfo.close() + f.dirinfo = nil + } ret, err = f.pfd.Seek(offset, whence) runtime.KeepAlive(f) return ret, err diff --git a/src/os/os_test.go b/src/os/os_test.go index cc03b91d72..44e1434dbe 100644 --- a/src/os/os_test.go +++ b/src/os/os_test.go @@ -2496,3 +2496,34 @@ func TestDirSeek(t *testing.T) { } } } + +func TestReaddirSmallSeek(t *testing.T) { + // See issue 37161. Read only one entry from a directory, + // seek to the beginning, and read again. We should not see + // duplicate entries. + if runtime.GOOS == "windows" { + testenv.SkipFlaky(t, 36019) + } + wd, err := Getwd() + if err != nil { + t.Fatal(err) + } + df, err := Open(filepath.Join(wd, "testdata", "issue37161")) + if err != nil { + t.Fatal(err) + } + names1, err := df.Readdirnames(1) + if err != nil { + t.Fatal(err) + } + if _, err = df.Seek(0, 0); err != nil { + t.Fatal(err) + } + names2, err := df.Readdirnames(0) + if err != nil { + t.Fatal(err) + } + if len(names2) != 3 { + t.Fatalf("first names: %v, second names: %v", names1, names2) + } +} diff --git a/src/os/testdata/issue37161/a b/src/os/testdata/issue37161/a new file mode 100644 index 0000000000..7898192261 --- /dev/null +++ b/src/os/testdata/issue37161/a @@ -0,0 +1 @@ +a diff --git a/src/os/testdata/issue37161/b b/src/os/testdata/issue37161/b new file mode 100644 index 0000000000..6178079822 --- /dev/null +++ b/src/os/testdata/issue37161/b @@ -0,0 +1 @@ +b diff --git a/src/os/testdata/issue37161/c b/src/os/testdata/issue37161/c new file mode 100644 index 0000000000..f2ad6c76f0 --- /dev/null +++ b/src/os/testdata/issue37161/c @@ -0,0 +1 @@ +c From 4978f5e6ea0ba74264b562a3c9ee62dcb63aae45 Mon Sep 17 00:00:00 2001 From: Jean de Klerk Date: Sat, 29 Feb 2020 17:35:51 -0700 Subject: [PATCH 19/69] time: use values larger than 24 for day for time.Format examples Currently, the time.Format docs use 7 Mar 2015 as the day/month/year. In numeric form, that is either 7/3/2015 or 3/7/2015 depending on which part of the world you're from. This is extremely confusing. In fact, the reference time being defined in a very US-centric way is quite confusing for the rest of the world, too [1]. We can't change that, but we can make the time.Format docs more comprehendable to the rest of the world without sacrificing by simply choosing a day that is not ambiguous (a value greater than 24 for day). This CL does makes the necessary change. Note: this CL moves some of the padding examples into their own example, since those examples do need a <10 day to demonstrate padding. 1: Additional context: a very old golang-nuts thread in which Rob expresses some regret about the format being the USA standard, rather than the alternative: https://groups.google.com/forum/m/#!msg/golang-nuts/0nQbfyNzk9E/LWbMgpRQNOgJ. Change-Id: If0a07c5e0dab86f8420cbf59543405eb857aa7f2 Reviewed-on: https://go-review.googlesource.com/c/go/+/221612 Run-TryBot: Jean de Klerk TryBot-Result: Gobot Gobot Reviewed-by: Ian Lance Taylor Reviewed-by: Rob Pike --- src/time/example_test.go | 112 +++++++++++++++++++++++---------------- 1 file changed, 66 insertions(+), 46 deletions(-) diff --git a/src/time/example_test.go b/src/time/example_test.go index fe8e042d69..f272ee44df 100644 --- a/src/time/example_test.go +++ b/src/time/example_test.go @@ -206,7 +206,7 @@ func ExampleNewTicker() { func ExampleTime_Format() { // Parse a time value from a string in the standard Unix format. - t, err := time.Parse(time.UnixDate, "Sat Mar 7 11:06:39 PST 2015") + t, err := time.Parse(time.UnixDate, "Wed Feb 25 11:06:39 PST 2015") if err != nil { // Always check errors even if they should not happen. panic(err) } @@ -252,8 +252,70 @@ func ExampleTime_Format() { fmt.Printf("\nFormats:\n\n") // Simple starter examples. - do("Basic full date", "Mon Jan 2 15:04:05 MST 2006", "Sat Mar 7 11:06:39 PST 2015") - do("Basic short date", "2006/01/02", "2015/03/07") + do("Basic full date", "Mon Jan 2 15:04:05 MST 2006", "Wed Feb 25 11:06:39 PST 2015") + do("Basic short date", "2006/01/02", "2015/02/25") + + // The hour of the reference time is 15, or 3PM. The layout can express + // it either way, and since our value is the morning we should see it as + // an AM time. We show both in one format string. Lower case too. + do("AM/PM", "3PM==3pm==15h", "11AM==11am==11h") + + // When parsing, if the seconds value is followed by a decimal point + // and some digits, that is taken as a fraction of a second even if + // the layout string does not represent the fractional second. + // Here we add a fractional second to our time value used above. + t, err = time.Parse(time.UnixDate, "Wed Feb 25 11:06:39.1234 PST 2015") + if err != nil { + panic(err) + } + // It does not appear in the output if the layout string does not contain + // a representation of the fractional second. + do("No fraction", time.UnixDate, "Wed Feb 25 11:06:39 PST 2015") + + // Fractional seconds can be printed by adding a run of 0s or 9s after + // a decimal point in the seconds value in the layout string. + // If the layout digits are 0s, the fractional second is of the specified + // width. Note that the output has a trailing zero. + do("0s for fraction", "15:04:05.00000", "11:06:39.12340") + + // If the fraction in the layout is 9s, trailing zeros are dropped. + do("9s for fraction", "15:04:05.99999999", "11:06:39.1234") + + // Output: + // default format: 2015-02-25 11:06:39 -0800 PST + // Unix format: Wed Feb 25 11:06:39 PST 2015 + // Same, in UTC: Wed Feb 25 19:06:39 UTC 2015 + // + // Formats: + // + // Basic full date "Mon Jan 2 15:04:05 MST 2006" gives "Wed Feb 25 11:06:39 PST 2015" + // Basic short date "2006/01/02" gives "2015/02/25" + // AM/PM "3PM==3pm==15h" gives "11AM==11am==11h" + // No fraction "Mon Jan _2 15:04:05 MST 2006" gives "Wed Feb 25 11:06:39 PST 2015" + // 0s for fraction "15:04:05.00000" gives "11:06:39.12340" + // 9s for fraction "15:04:05.99999999" gives "11:06:39.1234" + +} + +func ExampleTime_Format_pad() { + // Parse a time value from a string in the standard Unix format. + t, err := time.Parse(time.UnixDate, "Sat Mar 7 11:06:39 PST 2015") + if err != nil { // Always check errors even if they should not happen. + panic(err) + } + + // Define a helper function to make the examples' output look nice. + do := func(name, layout, want string) { + got := t.Format(layout) + if want != got { + fmt.Printf("error: for %q got %q; expected %q\n", layout, got, want) + return + } + fmt.Printf("%-16s %q gives %q\n", name, layout, got) + } + + // The predefined constant Unix uses an underscore to pad the day. + do("Unix", time.UnixDate, "Sat Mar 7 11:06:39 PST 2015") // For fixed-width printing of values, such as the date, that may be one or // two characters (7 vs. 07), use an _ instead of a space in the layout string. @@ -272,54 +334,12 @@ func ExampleTime_Format() { // so it doesn't need padding, but the minutes (04, 06) does. do("Suppressed pad", "04:05", "06:39") - // The predefined constant Unix uses an underscore to pad the day. - // Compare with our simple starter example. - do("Unix", time.UnixDate, "Sat Mar 7 11:06:39 PST 2015") - - // The hour of the reference time is 15, or 3PM. The layout can express - // it either way, and since our value is the morning we should see it as - // an AM time. We show both in one format string. Lower case too. - do("AM/PM", "3PM==3pm==15h", "11AM==11am==11h") - - // When parsing, if the seconds value is followed by a decimal point - // and some digits, that is taken as a fraction of a second even if - // the layout string does not represent the fractional second. - // Here we add a fractional second to our time value used above. - t, err = time.Parse(time.UnixDate, "Sat Mar 7 11:06:39.1234 PST 2015") - if err != nil { - panic(err) - } - // It does not appear in the output if the layout string does not contain - // a representation of the fractional second. - do("No fraction", time.UnixDate, "Sat Mar 7 11:06:39 PST 2015") - - // Fractional seconds can be printed by adding a run of 0s or 9s after - // a decimal point in the seconds value in the layout string. - // If the layout digits are 0s, the fractional second is of the specified - // width. Note that the output has a trailing zero. - do("0s for fraction", "15:04:05.00000", "11:06:39.12340") - - // If the fraction in the layout is 9s, trailing zeros are dropped. - do("9s for fraction", "15:04:05.99999999", "11:06:39.1234") - // Output: - // default format: 2015-03-07 11:06:39 -0800 PST - // Unix format: Sat Mar 7 11:06:39 PST 2015 - // Same, in UTC: Sat Mar 7 19:06:39 UTC 2015 - // - // Formats: - // - // Basic full date "Mon Jan 2 15:04:05 MST 2006" gives "Sat Mar 7 11:06:39 PST 2015" - // Basic short date "2006/01/02" gives "2015/03/07" + // Unix "Mon Jan _2 15:04:05 MST 2006" gives "Sat Mar 7 11:06:39 PST 2015" // No pad "<2>" gives "<7>" // Spaces "<_2>" gives "< 7>" // Zeros "<02>" gives "<07>" // Suppressed pad "04:05" gives "06:39" - // Unix "Mon Jan _2 15:04:05 MST 2006" gives "Sat Mar 7 11:06:39 PST 2015" - // AM/PM "3PM==3pm==15h" gives "11AM==11am==11h" - // No fraction "Mon Jan _2 15:04:05 MST 2006" gives "Sat Mar 7 11:06:39 PST 2015" - // 0s for fraction "15:04:05.00000" gives "11:06:39.12340" - // 9s for fraction "15:04:05.99999999" gives "11:06:39.1234" } From c7a59a99e3fa8d85cac0d638e251438238f18503 Mon Sep 17 00:00:00 2001 From: Keith Randall Date: Sun, 1 Mar 2020 17:40:35 -0800 Subject: [PATCH 20/69] os: plan9 seek() should invalidate cached directory info Update #37161 Change-Id: Iee828bbcc8436af29ca6dd9ed897cb5265a57cf8 Reviewed-on: https://go-review.googlesource.com/c/go/+/221778 Run-TryBot: Keith Randall Reviewed-by: Emmanuel Odeke --- src/os/file_plan9.go | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/src/os/file_plan9.go b/src/os/file_plan9.go index 48bf5f5076..bcf3c625bf 100644 --- a/src/os/file_plan9.go +++ b/src/os/file_plan9.go @@ -290,6 +290,11 @@ func (f *File) pwrite(b []byte, off int64) (n int, err error) { // relative to the current offset, and 2 means relative to the end. // It returns the new offset and an error, if any. func (f *File) seek(offset int64, whence int) (ret int64, err error) { + if f.dirinfo != nil { + // Free cached dirinfo, so we allocate a new one if we + // access this file as a directory again. See #35767 and #37161. + f.dirinfo = nil + } return syscall.Seek(f.fd, offset, whence) } From b79acf97c79c63779acc77062eef70511a42be9b Mon Sep 17 00:00:00 2001 From: Tobias Klauser Date: Mon, 2 Mar 2020 09:31:44 +0100 Subject: [PATCH 21/69] internal/cpu: use anonymous struct for CPU feature vars MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Like in x/sys/cpu, use anonymous structs to declare the CPU feature vars instead of defining single-use types. Also, order the vars alphabetically. Change-Id: Iedd3ca51916e3cbb852d2aeed18b3a4c6613e778 Reviewed-on: https://go-review.googlesource.com/c/go/+/221757 Reviewed-by: Ian Lance Taylor Reviewed-by: Martin Möhrmann --- src/internal/cpu/cpu.go | 64 +++++++++++++++++------------------------ 1 file changed, 26 insertions(+), 38 deletions(-) diff --git a/src/internal/cpu/cpu.go b/src/internal/cpu/cpu.go index 84df6472eb..2829945af0 100644 --- a/src/internal/cpu/cpu.go +++ b/src/internal/cpu/cpu.go @@ -19,13 +19,11 @@ type CacheLinePad struct{ _ [CacheLinePadSize]byte } // so we use the constant per GOARCH CacheLinePadSize as an approximation. var CacheLineSize uintptr = CacheLinePadSize -var X86 x86 - -// The booleans in x86 contain the correspondingly named cpuid feature bit. +// The booleans in X86 contain the correspondingly named cpuid feature bit. // HasAVX and HasAVX2 are only set if the OS does support XMM and YMM registers // in addition to the cpuid feature bit being set. // The struct is padded to avoid false sharing. -type x86 struct { +var X86 struct { _ CacheLinePad HasAES bool HasADX bool @@ -46,38 +44,18 @@ type x86 struct { _ CacheLinePad } -var PPC64 ppc64 - -// For ppc64(le), it is safe to check only for ISA level starting on ISA v3.00, -// since there are no optional categories. There are some exceptions that also -// require kernel support to work (darn, scv), so there are feature bits for -// those as well. The minimum processor requirement is POWER8 (ISA 2.07). +// The booleans in ARM contain the correspondingly named cpu feature bit. // The struct is padded to avoid false sharing. -type ppc64 struct { - _ CacheLinePad - HasDARN bool // Hardware random number generator (requires kernel enablement) - HasSCV bool // Syscall vectored (requires kernel enablement) - IsPOWER8 bool // ISA v2.07 (POWER8) - IsPOWER9 bool // ISA v3.00 (POWER9) - _ CacheLinePad -} - -var ARM arm - -// The booleans in arm contain the correspondingly named cpu feature bit. -// The struct is padded to avoid false sharing. -type arm struct { +var ARM struct { _ CacheLinePad HasVFPv4 bool HasIDIVA bool _ CacheLinePad } -var ARM64 arm64 - -// The booleans in arm64 contain the correspondingly named cpu feature bit. +// The booleans in ARM64 contain the correspondingly named cpu feature bit. // The struct is padded to avoid false sharing. -type arm64 struct { +var ARM64 struct { _ CacheLinePad HasFP bool HasASIMD bool @@ -106,9 +84,27 @@ type arm64 struct { _ CacheLinePad } -var S390X s390x +var MIPS64X struct { + _ CacheLinePad + HasMSA bool // MIPS SIMD architecture + _ CacheLinePad +} -type s390x struct { +// For ppc64(le), it is safe to check only for ISA level starting on ISA v3.00, +// since there are no optional categories. There are some exceptions that also +// require kernel support to work (darn, scv), so there are feature bits for +// those as well. The minimum processor requirement is POWER8 (ISA 2.07). +// The struct is padded to avoid false sharing. +var PPC64 struct { + _ CacheLinePad + HasDARN bool // Hardware random number generator (requires kernel enablement) + HasSCV bool // Syscall vectored (requires kernel enablement) + IsPOWER8 bool // ISA v2.07 (POWER8) + IsPOWER9 bool // ISA v3.00 (POWER9) + _ CacheLinePad +} + +var S390X struct { _ CacheLinePad HasZARCH bool // z architecture mode is active [mandatory] HasSTFLE bool // store facility list extended [mandatory] @@ -134,14 +130,6 @@ type s390x struct { _ CacheLinePad } -var MIPS64X mips64x - -type mips64x struct { - _ CacheLinePad - HasMSA bool // MIPS SIMD architecture - _ CacheLinePad -} - // Initialize examines the processor and sets the relevant variables above. // This is called by the runtime package early in program initialization, // before normal init functions are run. env is set by runtime if the OS supports From acac535c3ca571beeb168c953d6d672f61387ef1 Mon Sep 17 00:00:00 2001 From: Changkun Ou Date: Sun, 16 Feb 2020 01:11:53 +0100 Subject: [PATCH 22/69] doc: race condition in unsynchronized send/close This CL documents that unsynchronized send and close operations on a channel are detected as a race condition. Fixes #27769 Change-Id: I7495a2d0dd834c3f3b6339f8ca18ea21ae979aa8 Reviewed-on: https://go-review.googlesource.com/c/go/+/219637 Reviewed-by: Rob Pike --- doc/articles/race_detector.html | 32 ++++++++++++++++++++++++++++++++ 1 file changed, 32 insertions(+) diff --git a/doc/articles/race_detector.html b/doc/articles/race_detector.html index 014411d948..63a658f870 100644 --- a/doc/articles/race_detector.html +++ b/doc/articles/race_detector.html @@ -379,6 +379,38 @@ func (w *Watchdog) Start() { } +

Unsynchronized send and close operations

+ +

+As this example demonstrates, unsynchronized send and close operations +on the same channel can also be a race condition: +

+ +
+c := make(chan struct{}) // or buffered channel
+
+// The race detector cannot derive the happens before relation
+// for the following send and close operations. These two operations
+// are unsynchronized and happen concurrently.
+go func() { c <- struct{}{} }()
+close(c)
+
+ +

+According to the Go memory model, a send on a channel happens before +the corresponding receive from that channel completes. To synchronize +send and close operations, use a receive operation that guarantees +the send is done before the close: +

+ +
+c := make(chan struct{}) // or buffered channel
+
+go func() { c <- struct{}{} }()
+<-c
+close(c)
+
+

Supported Systems

From 12d02e7d8e7df75ccbf07ec40028329fcc35c55b Mon Sep 17 00:00:00 2001 From: "Bryan C. Mills" Date: Mon, 2 Mar 2020 10:16:39 -0500 Subject: [PATCH 23/69] net/http: verify RoundTripper invariants in the send function Issue #37598 reports a nil-panic in *Client.send that can only occur if one of the RoundTripper invariants is violated. Unfortunately, that condition is currently difficult to diagnose: it manifests as a panic during a Response field access, rather than something the user can easily associate with an specific erroneous RoundTripper implementation. No test because the new code paths are supposed to be unreachable. Updates #37598 Change-Id: If0451e9c6431f6fab7137de43727297a80def05b Reviewed-on: https://go-review.googlesource.com/c/go/+/221818 Reviewed-by: Brad Fitzpatrick --- src/net/http/client.go | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/src/net/http/client.go b/src/net/http/client.go index a496f1c0c7..ec24516833 100644 --- a/src/net/http/client.go +++ b/src/net/http/client.go @@ -265,6 +265,12 @@ func send(ireq *Request, rt RoundTripper, deadline time.Time) (resp *Response, d } return nil, didTimeout, err } + if resp == nil { + return nil, didTimeout, fmt.Errorf("http: RoundTripper implementation (%T) returned a nil *Response with a nil error", rt) + } + if resp.Body == nil { + return nil, didTimeout, fmt.Errorf("http: RoundTripper implementation (%T) returned a *Response with a nil Body", rt) + } if !deadline.IsZero() { resp.Body = &cancelTimerBody{ stop: stopTimer, From ab7ecea0c8dff908dfcad8c9091b71c6051bb94a Mon Sep 17 00:00:00 2001 From: Meng Zhuo Date: Sat, 29 Feb 2020 11:23:29 +0800 Subject: [PATCH 24/69] cmd/compile: add intrinsics for runtime/internal/math on MIPS64x MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit name old time/op new time/op delta MulUintptr/small 8.42ns ± 0% 5.93ns ± 0% -29.66% (p=0.000 n=9+10) MulUintptr/large 11.1ns ± 0% 7.4ns ± 0% -33.17% (p=0.000 n=10+9) Change-Id: I6659a886389660461fc2c90bd248243f6e7c29d5 Reviewed-on: https://go-review.googlesource.com/c/go/+/210897 Run-TryBot: Meng Zhuo TryBot-Result: Gobot Gobot Reviewed-by: Cherry Zhang --- src/cmd/compile/internal/gc/ssa.go | 2 +- src/cmd/compile/internal/ssa/gen/MIPS64.rules | 2 + src/cmd/compile/internal/ssa/rewriteMIPS64.go | 41 +++++++++++++++++++ 3 files changed, 44 insertions(+), 1 deletion(-) diff --git a/src/cmd/compile/internal/gc/ssa.go b/src/cmd/compile/internal/gc/ssa.go index a89af236f4..f2a472bde6 100644 --- a/src/cmd/compile/internal/gc/ssa.go +++ b/src/cmd/compile/internal/gc/ssa.go @@ -3269,7 +3269,7 @@ func init() { } return s.newValue2(ssa.OpMul64uover, types.NewTuple(types.Types[TUINT], types.Types[TUINT]), args[0], args[1]) }, - sys.AMD64, sys.I386) + sys.AMD64, sys.I386, sys.MIPS64) add("runtime", "KeepAlive", func(s *state, n *Node, args []*ssa.Value) *ssa.Value { data := s.newValue1(ssa.OpIData, s.f.Config.Types.BytePtr, args[0]) diff --git a/src/cmd/compile/internal/ssa/gen/MIPS64.rules b/src/cmd/compile/internal/ssa/gen/MIPS64.rules index 6df2b3e6b9..be05dc71c0 100644 --- a/src/cmd/compile/internal/ssa/gen/MIPS64.rules +++ b/src/cmd/compile/internal/ssa/gen/MIPS64.rules @@ -11,6 +11,8 @@ (Mul(64|32|16|8) x y) -> (Select1 (MULVU x y)) (Mul(32|64)F ...) -> (MUL(F|D) ...) (Mul64uhilo ...) -> (MULVU ...) +(Select0 (Mul64uover x y)) -> (Select1 (MULVU x y)) +(Select1 (Mul64uover x y)) -> (SGTU (Select0 (MULVU x y)) (MOVVconst [0])) (Hmul64 x y) -> (Select0 (MULV x y)) (Hmul64u x y) -> (Select0 (MULVU x y)) diff --git a/src/cmd/compile/internal/ssa/rewriteMIPS64.go b/src/cmd/compile/internal/ssa/rewriteMIPS64.go index 6736fcd560..346fa6f28e 100644 --- a/src/cmd/compile/internal/ssa/rewriteMIPS64.go +++ b/src/cmd/compile/internal/ssa/rewriteMIPS64.go @@ -6927,6 +6927,24 @@ func rewriteValueMIPS64_OpRsh8x8(v *Value) bool { } func rewriteValueMIPS64_OpSelect0(v *Value) bool { v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Select0 (Mul64uover x y)) + // result: (Select1 (MULVU x y)) + for { + if v_0.Op != OpMul64uover { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + v.reset(OpSelect1) + v.Type = typ.UInt64 + v0 := b.NewValue0(v.Pos, OpMIPS64MULVU, types.NewTuple(typ.UInt64, typ.UInt64)) + v0.AddArg(x) + v0.AddArg(y) + v.AddArg(v0) + return true + } // match: (Select0 (DIVVU _ (MOVVconst [1]))) // result: (MOVVconst [0]) for { @@ -7010,6 +7028,29 @@ func rewriteValueMIPS64_OpSelect0(v *Value) bool { } func rewriteValueMIPS64_OpSelect1(v *Value) bool { v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Select1 (Mul64uover x y)) + // result: (SGTU (Select0 (MULVU x y)) (MOVVconst [0])) + for { + if v_0.Op != OpMul64uover { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + v.reset(OpMIPS64SGTU) + v.Type = typ.Bool + v0 := b.NewValue0(v.Pos, OpSelect0, typ.UInt64) + v1 := b.NewValue0(v.Pos, OpMIPS64MULVU, types.NewTuple(typ.UInt64, typ.UInt64)) + v1.AddArg(x) + v1.AddArg(y) + v0.AddArg(v1) + v.AddArg(v0) + v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) + v2.AuxInt = 0 + v.AddArg(v2) + return true + } // match: (Select1 (MULVU x (MOVVconst [-1]))) // result: (NEGV x) for { From d7c073ecbfc3ecc506bfc753b271973b47f8bc15 Mon Sep 17 00:00:00 2001 From: Josh Bleecher Snyder Date: Wed, 30 Oct 2019 10:29:47 -0700 Subject: [PATCH 25/69] cmd/compile: add specialized Value reset for OpCopy This: * Simplifies and shortens the generated code for rewrite rules. * Shrinks cmd/compile by 86k (0.4%) and makes it easier to compile. * Removes the stmt boundary code wrangling from Value.reset, in favor of doing it in the one place where it actually does some work, namely the writebarrier pass. (This was ascertained by inspecting the code for cases in which notStmtBoundary values were generated.) Passes toolstash-check -all. Change-Id: I25671d4c4bbd772f235195d11da090878ea2cc07 Reviewed-on: https://go-review.googlesource.com/c/go/+/221421 Run-TryBot: Josh Bleecher Snyder TryBot-Result: Gobot Gobot Reviewed-by: David Chase --- src/cmd/compile/internal/ssa/gen/rulegen.go | 7 +- src/cmd/compile/internal/ssa/numberlines.go | 9 +- src/cmd/compile/internal/ssa/rewrite386.go | 126 +-- src/cmd/compile/internal/ssa/rewriteAMD64.go | 961 +++++------------- src/cmd/compile/internal/ssa/rewriteARM.go | 132 +-- src/cmd/compile/internal/ssa/rewriteARM64.go | 230 ++--- src/cmd/compile/internal/ssa/rewriteMIPS.go | 80 +- src/cmd/compile/internal/ssa/rewriteMIPS64.go | 44 +- src/cmd/compile/internal/ssa/rewritePPC64.go | 368 ++----- .../compile/internal/ssa/rewriteRISCV64.go | 12 +- src/cmd/compile/internal/ssa/rewriteS390X.go | 415 +++----- src/cmd/compile/internal/ssa/rewriteWasm.go | 60 +- src/cmd/compile/internal/ssa/rewritedec.go | 36 +- src/cmd/compile/internal/ssa/rewritedec64.go | 12 +- .../compile/internal/ssa/rewritegeneric.go | 527 +++------- src/cmd/compile/internal/ssa/value.go | 20 +- src/cmd/compile/internal/ssa/writebarrier.go | 1 + 17 files changed, 820 insertions(+), 2220 deletions(-) diff --git a/src/cmd/compile/internal/ssa/gen/rulegen.go b/src/cmd/compile/internal/ssa/gen/rulegen.go index 0fba0546e7..53c6bdbf65 100644 --- a/src/cmd/compile/internal/ssa/gen/rulegen.go +++ b/src/cmd/compile/internal/ssa/gen/rulegen.go @@ -1093,9 +1093,7 @@ func genResult0(rr *RuleRewrite, arch arch, result string, top, move bool, pos s // It in not safe in general to move a variable between blocks // (and particularly not a phi node). // Introduce a copy. - rr.add(stmtf("v.reset(OpCopy)")) - rr.add(stmtf("v.Type = %s.Type", result)) - rr.add(stmtf("v.AddArg(%s)", result)) + rr.add(stmtf("v.copyOf(%s)", result)) } return result } @@ -1123,8 +1121,7 @@ func genResult0(rr *RuleRewrite, arch arch, result string, top, move bool, pos s rr.add(declf(v, "b.NewValue0(%s, Op%s%s, %s)", pos, oparch, op.name, typ)) if move && top { // Rewrite original into a copy - rr.add(stmtf("v.reset(OpCopy)")) - rr.add(stmtf("v.AddArg(%s)", v)) + rr.add(stmtf("v.copyOf(%s)", v)) } } diff --git a/src/cmd/compile/internal/ssa/numberlines.go b/src/cmd/compile/internal/ssa/numberlines.go index 3d77fe5bb4..f4e62b88c4 100644 --- a/src/cmd/compile/internal/ssa/numberlines.go +++ b/src/cmd/compile/internal/ssa/numberlines.go @@ -66,12 +66,9 @@ func nextGoodStatementIndex(v *Value, i int, b *Block) int { return i } -// notStmtBoundary indicates which value opcodes can never be a statement -// boundary because they don't correspond to a user's understanding of a -// statement boundary. Called from *Value.reset(), and *Func.newValue(), -// located here to keep all the statement boundary heuristics in one place. -// Note: *Value.reset() filters out OpCopy because of how that is used in -// rewrite. +// notStmtBoundary reports whether a value with opcode op can never be a statement +// boundary. Such values don't correspond to a user's understanding of a +// statement boundary. func notStmtBoundary(op Op) bool { switch op { case OpCopy, OpPhi, OpVarKill, OpVarDef, OpVarLive, OpUnknown, OpFwdRef, OpArg: diff --git a/src/cmd/compile/internal/ssa/rewrite386.go b/src/cmd/compile/internal/ssa/rewrite386.go index 7a75b7121f..bff76e9029 100644 --- a/src/cmd/compile/internal/ssa/rewrite386.go +++ b/src/cmd/compile/internal/ssa/rewrite386.go @@ -1235,9 +1235,7 @@ func rewriteValue386_Op386ADDLconst(v *Value) bool { if !(int32(c) == 0) { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (ADDLconst [c] (MOVLconst [d])) @@ -1950,9 +1948,7 @@ func rewriteValue386_Op386ANDL(v *Value) bool { if x != v_1 { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } return false @@ -1994,9 +1990,7 @@ func rewriteValue386_Op386ANDLconst(v *Value) bool { if !(int32(c) == -1) { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (ANDLconst [c] (MOVLconst [d])) @@ -2706,8 +2700,7 @@ func rewriteValue386_Op386CMPBconst(v *Value) bool { } b = l.Block v0 := b.NewValue0(l.Pos, Op386CMPBconstload, types.TypeFlags) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v0.AuxInt = makeValAndOff(c, off) v0.Aux = sym v0.AddArg2(ptr, mem) @@ -3015,8 +3008,7 @@ func rewriteValue386_Op386CMPLconst(v *Value) bool { } b = l.Block v0 := b.NewValue0(l.Pos, Op386CMPLconstload, types.TypeFlags) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v0.AuxInt = makeValAndOff(c, off) v0.Aux = sym v0.AddArg2(ptr, mem) @@ -3309,8 +3301,7 @@ func rewriteValue386_Op386CMPWconst(v *Value) bool { } b = l.Block v0 := b.NewValue0(l.Pos, Op386CMPWconstload, types.TypeFlags) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v0.AuxInt = makeValAndOff(c, off) v0.Aux = sym v0.AddArg2(ptr, mem) @@ -4054,8 +4045,7 @@ func rewriteValue386_Op386MOVBLSX(v *Value) bool { } b = x.Block v0 := b.NewValue0(x.Pos, Op386MOVBLSXload, v.Type) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v0.AuxInt = off v0.Aux = sym v0.AddArg2(ptr, mem) @@ -4151,8 +4141,7 @@ func rewriteValue386_Op386MOVBLZX(v *Value) bool { } b = x.Block v0 := b.NewValue0(x.Pos, Op386MOVBload, v.Type) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v0.AuxInt = off v0.Aux = sym v0.AddArg2(ptr, mem) @@ -4176,8 +4165,7 @@ func rewriteValue386_Op386MOVBLZX(v *Value) bool { } b = x.Block v0 := b.NewValue0(v.Pos, Op386MOVBloadidx1, v.Type) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v0.AuxInt = off v0.Aux = sym v0.AddArg3(ptr, idx, mem) @@ -5127,9 +5115,7 @@ func rewriteValue386_Op386MOVLload(v *Value) bool { if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (MOVLload [off1] {sym} (ADDLconst [off2] ptr) mem) @@ -7495,8 +7481,7 @@ func rewriteValue386_Op386MOVWLSX(v *Value) bool { } b = x.Block v0 := b.NewValue0(x.Pos, Op386MOVWLSXload, v.Type) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v0.AuxInt = off v0.Aux = sym v0.AddArg2(ptr, mem) @@ -7592,8 +7577,7 @@ func rewriteValue386_Op386MOVWLZX(v *Value) bool { } b = x.Block v0 := b.NewValue0(x.Pos, Op386MOVWload, v.Type) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v0.AuxInt = off v0.Aux = sym v0.AddArg2(ptr, mem) @@ -7617,8 +7601,7 @@ func rewriteValue386_Op386MOVWLZX(v *Value) bool { } b = x.Block v0 := b.NewValue0(v.Pos, Op386MOVWloadidx1, v.Type) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v0.AuxInt = off v0.Aux = sym v0.AddArg3(ptr, idx, mem) @@ -7642,8 +7625,7 @@ func rewriteValue386_Op386MOVWLZX(v *Value) bool { } b = x.Block v0 := b.NewValue0(v.Pos, Op386MOVWloadidx2, v.Type) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v0.AuxInt = off v0.Aux = sym v0.AddArg3(ptr, idx, mem) @@ -8899,9 +8881,7 @@ func rewriteValue386_Op386MULLconst(v *Value) bool { break } x := v_0 - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (MULLconst [3] x) @@ -9750,9 +9730,7 @@ func rewriteValue386_Op386ORL(v *Value) bool { if x != v_1 { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (ORL x0:(MOVBload [i0] {s} p mem) s0:(SHLLconst [8] x1:(MOVBload [i1] {s} p mem))) @@ -9786,8 +9764,7 @@ func rewriteValue386_Op386ORL(v *Value) bool { } b = mergePoint(b, x0, x1) v0 := b.NewValue0(x1.Pos, Op386MOVWload, typ.UInt16) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v0.AuxInt = i0 v0.Aux = s v0.AddArg2(p, mem) @@ -9850,8 +9827,7 @@ func rewriteValue386_Op386ORL(v *Value) bool { } b = mergePoint(b, x0, x1, x2) v0 := b.NewValue0(x2.Pos, Op386MOVLload, typ.UInt32) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v0.AuxInt = i0 v0.Aux = s v0.AddArg2(p, mem) @@ -9898,8 +9874,7 @@ func rewriteValue386_Op386ORL(v *Value) bool { } b = mergePoint(b, x0, x1) v0 := b.NewValue0(v.Pos, Op386MOVWloadidx1, v.Type) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v0.AuxInt = i0 v0.Aux = s v0.AddArg3(p, idx, mem) @@ -9974,8 +9949,7 @@ func rewriteValue386_Op386ORL(v *Value) bool { } b = mergePoint(b, x0, x1, x2) v0 := b.NewValue0(v.Pos, Op386MOVLloadidx1, v.Type) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v0.AuxInt = i0 v0.Aux = s v0.AddArg3(p, idx, mem) @@ -10000,9 +9974,7 @@ func rewriteValue386_Op386ORLconst(v *Value) bool { if !(int32(c) == 0) { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (ORLconst [c] _) @@ -10489,9 +10461,7 @@ func rewriteValue386_Op386ROLBconst(v *Value) bool { break } x := v_0 - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } return false @@ -10519,9 +10489,7 @@ func rewriteValue386_Op386ROLLconst(v *Value) bool { break } x := v_0 - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } return false @@ -10549,9 +10517,7 @@ func rewriteValue386_Op386ROLWconst(v *Value) bool { break } x := v_0 - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } return false @@ -10583,9 +10549,7 @@ func rewriteValue386_Op386SARBconst(v *Value) bool { break } x := v_0 - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (SARBconst [c] (MOVLconst [d])) @@ -10641,9 +10605,7 @@ func rewriteValue386_Op386SARLconst(v *Value) bool { break } x := v_0 - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (SARLconst [c] (MOVLconst [d])) @@ -10687,9 +10649,7 @@ func rewriteValue386_Op386SARWconst(v *Value) bool { break } x := v_0 - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (SARWconst [c] (MOVLconst [d])) @@ -11469,9 +11429,7 @@ func rewriteValue386_Op386SHLLconst(v *Value) bool { break } x := v_0 - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } return false @@ -11522,9 +11480,7 @@ func rewriteValue386_Op386SHRBconst(v *Value) bool { break } x := v_0 - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } return false @@ -11568,9 +11524,7 @@ func rewriteValue386_Op386SHRLconst(v *Value) bool { break } x := v_0 - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } return false @@ -11621,9 +11575,7 @@ func rewriteValue386_Op386SHRWconst(v *Value) bool { break } x := v_0 - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } return false @@ -11747,9 +11699,7 @@ func rewriteValue386_Op386SUBLconst(v *Value) bool { if !(int32(c) == 0) { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (SUBLconst [c] x) @@ -12419,9 +12369,7 @@ func rewriteValue386_Op386XORLconst(v *Value) bool { if !(int32(c) == 0) { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (XORLconst [c] (MOVLconst [d])) @@ -14043,9 +13991,7 @@ func rewriteValue386_OpMove(v *Value) bool { break } mem := v_2 - v.reset(OpCopy) - v.Type = mem.Type - v.AddArg(mem) + v.copyOf(mem) return true } // match: (Move [1] dst src mem) @@ -15773,9 +15719,7 @@ func rewriteValue386_OpZero(v *Value) bool { break } mem := v_1 - v.reset(OpCopy) - v.Type = mem.Type - v.AddArg(mem) + v.copyOf(mem) return true } // match: (Zero [1] destptr mem) diff --git a/src/cmd/compile/internal/ssa/rewriteAMD64.go b/src/cmd/compile/internal/ssa/rewriteAMD64.go index 566a7aaf66..a30c609a68 100644 --- a/src/cmd/compile/internal/ssa/rewriteAMD64.go +++ b/src/cmd/compile/internal/ssa/rewriteAMD64.go @@ -1705,9 +1705,7 @@ func rewriteValueAMD64_OpAMD64ADDLconst(v *Value) bool { if !(int32(c) == 0) { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (ADDLconst [c] (MOVLconst [d])) @@ -2324,9 +2322,7 @@ func rewriteValueAMD64_OpAMD64ADDQconst(v *Value) bool { break } x := v_0 - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (ADDQconst [c] (MOVQconst [d])) @@ -2824,9 +2820,7 @@ func rewriteValueAMD64_OpAMD64ANDL(v *Value) bool { if x != v_1 { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (ANDL x l:(MOVLload [off] {sym} ptr mem)) @@ -2943,9 +2937,7 @@ func rewriteValueAMD64_OpAMD64ANDLconst(v *Value) bool { if !(int32(c) == -1) { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (ANDLconst [c] (MOVLconst [d])) @@ -3208,9 +3200,7 @@ func rewriteValueAMD64_OpAMD64ANDQ(v *Value) bool { if x != v_1 { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (ANDQ x l:(MOVQload [off] {sym} ptr mem)) @@ -3334,9 +3324,7 @@ func rewriteValueAMD64_OpAMD64ANDQconst(v *Value) bool { break } x := v_0 - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (ANDQconst [c] (MOVQconst [d])) @@ -4725,9 +4713,7 @@ func rewriteValueAMD64_OpAMD64CMOVLCC(v *Value) bool { if v_2.Op != OpAMD64FlagEQ { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (CMOVLCC _ x (FlagGT_UGT)) @@ -4737,9 +4723,7 @@ func rewriteValueAMD64_OpAMD64CMOVLCC(v *Value) bool { if v_2.Op != OpAMD64FlagGT_UGT { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (CMOVLCC y _ (FlagGT_ULT)) @@ -4749,9 +4733,7 @@ func rewriteValueAMD64_OpAMD64CMOVLCC(v *Value) bool { if v_2.Op != OpAMD64FlagGT_ULT { break } - v.reset(OpCopy) - v.Type = y.Type - v.AddArg(y) + v.copyOf(y) return true } // match: (CMOVLCC y _ (FlagLT_ULT)) @@ -4761,9 +4743,7 @@ func rewriteValueAMD64_OpAMD64CMOVLCC(v *Value) bool { if v_2.Op != OpAMD64FlagLT_ULT { break } - v.reset(OpCopy) - v.Type = y.Type - v.AddArg(y) + v.copyOf(y) return true } // match: (CMOVLCC _ x (FlagLT_UGT)) @@ -4773,9 +4753,7 @@ func rewriteValueAMD64_OpAMD64CMOVLCC(v *Value) bool { if v_2.Op != OpAMD64FlagLT_UGT { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } return false @@ -4804,9 +4782,7 @@ func rewriteValueAMD64_OpAMD64CMOVLCS(v *Value) bool { if v_2.Op != OpAMD64FlagEQ { break } - v.reset(OpCopy) - v.Type = y.Type - v.AddArg(y) + v.copyOf(y) return true } // match: (CMOVLCS y _ (FlagGT_UGT)) @@ -4816,9 +4792,7 @@ func rewriteValueAMD64_OpAMD64CMOVLCS(v *Value) bool { if v_2.Op != OpAMD64FlagGT_UGT { break } - v.reset(OpCopy) - v.Type = y.Type - v.AddArg(y) + v.copyOf(y) return true } // match: (CMOVLCS _ x (FlagGT_ULT)) @@ -4828,9 +4802,7 @@ func rewriteValueAMD64_OpAMD64CMOVLCS(v *Value) bool { if v_2.Op != OpAMD64FlagGT_ULT { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (CMOVLCS _ x (FlagLT_ULT)) @@ -4840,9 +4812,7 @@ func rewriteValueAMD64_OpAMD64CMOVLCS(v *Value) bool { if v_2.Op != OpAMD64FlagLT_ULT { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (CMOVLCS y _ (FlagLT_UGT)) @@ -4852,9 +4822,7 @@ func rewriteValueAMD64_OpAMD64CMOVLCS(v *Value) bool { if v_2.Op != OpAMD64FlagLT_UGT { break } - v.reset(OpCopy) - v.Type = y.Type - v.AddArg(y) + v.copyOf(y) return true } return false @@ -4883,9 +4851,7 @@ func rewriteValueAMD64_OpAMD64CMOVLEQ(v *Value) bool { if v_2.Op != OpAMD64FlagEQ { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (CMOVLEQ y _ (FlagGT_UGT)) @@ -4895,9 +4861,7 @@ func rewriteValueAMD64_OpAMD64CMOVLEQ(v *Value) bool { if v_2.Op != OpAMD64FlagGT_UGT { break } - v.reset(OpCopy) - v.Type = y.Type - v.AddArg(y) + v.copyOf(y) return true } // match: (CMOVLEQ y _ (FlagGT_ULT)) @@ -4907,9 +4871,7 @@ func rewriteValueAMD64_OpAMD64CMOVLEQ(v *Value) bool { if v_2.Op != OpAMD64FlagGT_ULT { break } - v.reset(OpCopy) - v.Type = y.Type - v.AddArg(y) + v.copyOf(y) return true } // match: (CMOVLEQ y _ (FlagLT_ULT)) @@ -4919,9 +4881,7 @@ func rewriteValueAMD64_OpAMD64CMOVLEQ(v *Value) bool { if v_2.Op != OpAMD64FlagLT_ULT { break } - v.reset(OpCopy) - v.Type = y.Type - v.AddArg(y) + v.copyOf(y) return true } // match: (CMOVLEQ y _ (FlagLT_UGT)) @@ -4931,9 +4891,7 @@ func rewriteValueAMD64_OpAMD64CMOVLEQ(v *Value) bool { if v_2.Op != OpAMD64FlagLT_UGT { break } - v.reset(OpCopy) - v.Type = y.Type - v.AddArg(y) + v.copyOf(y) return true } return false @@ -4962,9 +4920,7 @@ func rewriteValueAMD64_OpAMD64CMOVLGE(v *Value) bool { if v_2.Op != OpAMD64FlagEQ { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (CMOVLGE _ x (FlagGT_UGT)) @@ -4974,9 +4930,7 @@ func rewriteValueAMD64_OpAMD64CMOVLGE(v *Value) bool { if v_2.Op != OpAMD64FlagGT_UGT { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (CMOVLGE _ x (FlagGT_ULT)) @@ -4986,9 +4940,7 @@ func rewriteValueAMD64_OpAMD64CMOVLGE(v *Value) bool { if v_2.Op != OpAMD64FlagGT_ULT { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (CMOVLGE y _ (FlagLT_ULT)) @@ -4998,9 +4950,7 @@ func rewriteValueAMD64_OpAMD64CMOVLGE(v *Value) bool { if v_2.Op != OpAMD64FlagLT_ULT { break } - v.reset(OpCopy) - v.Type = y.Type - v.AddArg(y) + v.copyOf(y) return true } // match: (CMOVLGE y _ (FlagLT_UGT)) @@ -5010,9 +4960,7 @@ func rewriteValueAMD64_OpAMD64CMOVLGE(v *Value) bool { if v_2.Op != OpAMD64FlagLT_UGT { break } - v.reset(OpCopy) - v.Type = y.Type - v.AddArg(y) + v.copyOf(y) return true } return false @@ -5041,9 +4989,7 @@ func rewriteValueAMD64_OpAMD64CMOVLGT(v *Value) bool { if v_2.Op != OpAMD64FlagEQ { break } - v.reset(OpCopy) - v.Type = y.Type - v.AddArg(y) + v.copyOf(y) return true } // match: (CMOVLGT _ x (FlagGT_UGT)) @@ -5053,9 +4999,7 @@ func rewriteValueAMD64_OpAMD64CMOVLGT(v *Value) bool { if v_2.Op != OpAMD64FlagGT_UGT { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (CMOVLGT _ x (FlagGT_ULT)) @@ -5065,9 +5009,7 @@ func rewriteValueAMD64_OpAMD64CMOVLGT(v *Value) bool { if v_2.Op != OpAMD64FlagGT_ULT { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (CMOVLGT y _ (FlagLT_ULT)) @@ -5077,9 +5019,7 @@ func rewriteValueAMD64_OpAMD64CMOVLGT(v *Value) bool { if v_2.Op != OpAMD64FlagLT_ULT { break } - v.reset(OpCopy) - v.Type = y.Type - v.AddArg(y) + v.copyOf(y) return true } // match: (CMOVLGT y _ (FlagLT_UGT)) @@ -5089,9 +5029,7 @@ func rewriteValueAMD64_OpAMD64CMOVLGT(v *Value) bool { if v_2.Op != OpAMD64FlagLT_UGT { break } - v.reset(OpCopy) - v.Type = y.Type - v.AddArg(y) + v.copyOf(y) return true } return false @@ -5120,9 +5058,7 @@ func rewriteValueAMD64_OpAMD64CMOVLHI(v *Value) bool { if v_2.Op != OpAMD64FlagEQ { break } - v.reset(OpCopy) - v.Type = y.Type - v.AddArg(y) + v.copyOf(y) return true } // match: (CMOVLHI _ x (FlagGT_UGT)) @@ -5132,9 +5068,7 @@ func rewriteValueAMD64_OpAMD64CMOVLHI(v *Value) bool { if v_2.Op != OpAMD64FlagGT_UGT { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (CMOVLHI y _ (FlagGT_ULT)) @@ -5144,9 +5078,7 @@ func rewriteValueAMD64_OpAMD64CMOVLHI(v *Value) bool { if v_2.Op != OpAMD64FlagGT_ULT { break } - v.reset(OpCopy) - v.Type = y.Type - v.AddArg(y) + v.copyOf(y) return true } // match: (CMOVLHI y _ (FlagLT_ULT)) @@ -5156,9 +5088,7 @@ func rewriteValueAMD64_OpAMD64CMOVLHI(v *Value) bool { if v_2.Op != OpAMD64FlagLT_ULT { break } - v.reset(OpCopy) - v.Type = y.Type - v.AddArg(y) + v.copyOf(y) return true } // match: (CMOVLHI _ x (FlagLT_UGT)) @@ -5168,9 +5098,7 @@ func rewriteValueAMD64_OpAMD64CMOVLHI(v *Value) bool { if v_2.Op != OpAMD64FlagLT_UGT { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } return false @@ -5199,9 +5127,7 @@ func rewriteValueAMD64_OpAMD64CMOVLLE(v *Value) bool { if v_2.Op != OpAMD64FlagEQ { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (CMOVLLE y _ (FlagGT_UGT)) @@ -5211,9 +5137,7 @@ func rewriteValueAMD64_OpAMD64CMOVLLE(v *Value) bool { if v_2.Op != OpAMD64FlagGT_UGT { break } - v.reset(OpCopy) - v.Type = y.Type - v.AddArg(y) + v.copyOf(y) return true } // match: (CMOVLLE y _ (FlagGT_ULT)) @@ -5223,9 +5147,7 @@ func rewriteValueAMD64_OpAMD64CMOVLLE(v *Value) bool { if v_2.Op != OpAMD64FlagGT_ULT { break } - v.reset(OpCopy) - v.Type = y.Type - v.AddArg(y) + v.copyOf(y) return true } // match: (CMOVLLE _ x (FlagLT_ULT)) @@ -5235,9 +5157,7 @@ func rewriteValueAMD64_OpAMD64CMOVLLE(v *Value) bool { if v_2.Op != OpAMD64FlagLT_ULT { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (CMOVLLE _ x (FlagLT_UGT)) @@ -5247,9 +5167,7 @@ func rewriteValueAMD64_OpAMD64CMOVLLE(v *Value) bool { if v_2.Op != OpAMD64FlagLT_UGT { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } return false @@ -5278,9 +5196,7 @@ func rewriteValueAMD64_OpAMD64CMOVLLS(v *Value) bool { if v_2.Op != OpAMD64FlagEQ { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (CMOVLLS y _ (FlagGT_UGT)) @@ -5290,9 +5206,7 @@ func rewriteValueAMD64_OpAMD64CMOVLLS(v *Value) bool { if v_2.Op != OpAMD64FlagGT_UGT { break } - v.reset(OpCopy) - v.Type = y.Type - v.AddArg(y) + v.copyOf(y) return true } // match: (CMOVLLS _ x (FlagGT_ULT)) @@ -5302,9 +5216,7 @@ func rewriteValueAMD64_OpAMD64CMOVLLS(v *Value) bool { if v_2.Op != OpAMD64FlagGT_ULT { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (CMOVLLS _ x (FlagLT_ULT)) @@ -5314,9 +5226,7 @@ func rewriteValueAMD64_OpAMD64CMOVLLS(v *Value) bool { if v_2.Op != OpAMD64FlagLT_ULT { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (CMOVLLS y _ (FlagLT_UGT)) @@ -5326,9 +5236,7 @@ func rewriteValueAMD64_OpAMD64CMOVLLS(v *Value) bool { if v_2.Op != OpAMD64FlagLT_UGT { break } - v.reset(OpCopy) - v.Type = y.Type - v.AddArg(y) + v.copyOf(y) return true } return false @@ -5357,9 +5265,7 @@ func rewriteValueAMD64_OpAMD64CMOVLLT(v *Value) bool { if v_2.Op != OpAMD64FlagEQ { break } - v.reset(OpCopy) - v.Type = y.Type - v.AddArg(y) + v.copyOf(y) return true } // match: (CMOVLLT y _ (FlagGT_UGT)) @@ -5369,9 +5275,7 @@ func rewriteValueAMD64_OpAMD64CMOVLLT(v *Value) bool { if v_2.Op != OpAMD64FlagGT_UGT { break } - v.reset(OpCopy) - v.Type = y.Type - v.AddArg(y) + v.copyOf(y) return true } // match: (CMOVLLT y _ (FlagGT_ULT)) @@ -5381,9 +5285,7 @@ func rewriteValueAMD64_OpAMD64CMOVLLT(v *Value) bool { if v_2.Op != OpAMD64FlagGT_ULT { break } - v.reset(OpCopy) - v.Type = y.Type - v.AddArg(y) + v.copyOf(y) return true } // match: (CMOVLLT _ x (FlagLT_ULT)) @@ -5393,9 +5295,7 @@ func rewriteValueAMD64_OpAMD64CMOVLLT(v *Value) bool { if v_2.Op != OpAMD64FlagLT_ULT { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (CMOVLLT _ x (FlagLT_UGT)) @@ -5405,9 +5305,7 @@ func rewriteValueAMD64_OpAMD64CMOVLLT(v *Value) bool { if v_2.Op != OpAMD64FlagLT_UGT { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } return false @@ -5436,9 +5334,7 @@ func rewriteValueAMD64_OpAMD64CMOVLNE(v *Value) bool { if v_2.Op != OpAMD64FlagEQ { break } - v.reset(OpCopy) - v.Type = y.Type - v.AddArg(y) + v.copyOf(y) return true } // match: (CMOVLNE _ x (FlagGT_UGT)) @@ -5448,9 +5344,7 @@ func rewriteValueAMD64_OpAMD64CMOVLNE(v *Value) bool { if v_2.Op != OpAMD64FlagGT_UGT { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (CMOVLNE _ x (FlagGT_ULT)) @@ -5460,9 +5354,7 @@ func rewriteValueAMD64_OpAMD64CMOVLNE(v *Value) bool { if v_2.Op != OpAMD64FlagGT_ULT { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (CMOVLNE _ x (FlagLT_ULT)) @@ -5472,9 +5364,7 @@ func rewriteValueAMD64_OpAMD64CMOVLNE(v *Value) bool { if v_2.Op != OpAMD64FlagLT_ULT { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (CMOVLNE _ x (FlagLT_UGT)) @@ -5484,9 +5374,7 @@ func rewriteValueAMD64_OpAMD64CMOVLNE(v *Value) bool { if v_2.Op != OpAMD64FlagLT_UGT { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } return false @@ -5515,9 +5403,7 @@ func rewriteValueAMD64_OpAMD64CMOVQCC(v *Value) bool { if v_2.Op != OpAMD64FlagEQ { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (CMOVQCC _ x (FlagGT_UGT)) @@ -5527,9 +5413,7 @@ func rewriteValueAMD64_OpAMD64CMOVQCC(v *Value) bool { if v_2.Op != OpAMD64FlagGT_UGT { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (CMOVQCC y _ (FlagGT_ULT)) @@ -5539,9 +5423,7 @@ func rewriteValueAMD64_OpAMD64CMOVQCC(v *Value) bool { if v_2.Op != OpAMD64FlagGT_ULT { break } - v.reset(OpCopy) - v.Type = y.Type - v.AddArg(y) + v.copyOf(y) return true } // match: (CMOVQCC y _ (FlagLT_ULT)) @@ -5551,9 +5433,7 @@ func rewriteValueAMD64_OpAMD64CMOVQCC(v *Value) bool { if v_2.Op != OpAMD64FlagLT_ULT { break } - v.reset(OpCopy) - v.Type = y.Type - v.AddArg(y) + v.copyOf(y) return true } // match: (CMOVQCC _ x (FlagLT_UGT)) @@ -5563,9 +5443,7 @@ func rewriteValueAMD64_OpAMD64CMOVQCC(v *Value) bool { if v_2.Op != OpAMD64FlagLT_UGT { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } return false @@ -5594,9 +5472,7 @@ func rewriteValueAMD64_OpAMD64CMOVQCS(v *Value) bool { if v_2.Op != OpAMD64FlagEQ { break } - v.reset(OpCopy) - v.Type = y.Type - v.AddArg(y) + v.copyOf(y) return true } // match: (CMOVQCS y _ (FlagGT_UGT)) @@ -5606,9 +5482,7 @@ func rewriteValueAMD64_OpAMD64CMOVQCS(v *Value) bool { if v_2.Op != OpAMD64FlagGT_UGT { break } - v.reset(OpCopy) - v.Type = y.Type - v.AddArg(y) + v.copyOf(y) return true } // match: (CMOVQCS _ x (FlagGT_ULT)) @@ -5618,9 +5492,7 @@ func rewriteValueAMD64_OpAMD64CMOVQCS(v *Value) bool { if v_2.Op != OpAMD64FlagGT_ULT { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (CMOVQCS _ x (FlagLT_ULT)) @@ -5630,9 +5502,7 @@ func rewriteValueAMD64_OpAMD64CMOVQCS(v *Value) bool { if v_2.Op != OpAMD64FlagLT_ULT { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (CMOVQCS y _ (FlagLT_UGT)) @@ -5642,9 +5512,7 @@ func rewriteValueAMD64_OpAMD64CMOVQCS(v *Value) bool { if v_2.Op != OpAMD64FlagLT_UGT { break } - v.reset(OpCopy) - v.Type = y.Type - v.AddArg(y) + v.copyOf(y) return true } return false @@ -5673,9 +5541,7 @@ func rewriteValueAMD64_OpAMD64CMOVQEQ(v *Value) bool { if v_2.Op != OpAMD64FlagEQ { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (CMOVQEQ y _ (FlagGT_UGT)) @@ -5685,9 +5551,7 @@ func rewriteValueAMD64_OpAMD64CMOVQEQ(v *Value) bool { if v_2.Op != OpAMD64FlagGT_UGT { break } - v.reset(OpCopy) - v.Type = y.Type - v.AddArg(y) + v.copyOf(y) return true } // match: (CMOVQEQ y _ (FlagGT_ULT)) @@ -5697,9 +5561,7 @@ func rewriteValueAMD64_OpAMD64CMOVQEQ(v *Value) bool { if v_2.Op != OpAMD64FlagGT_ULT { break } - v.reset(OpCopy) - v.Type = y.Type - v.AddArg(y) + v.copyOf(y) return true } // match: (CMOVQEQ y _ (FlagLT_ULT)) @@ -5709,9 +5571,7 @@ func rewriteValueAMD64_OpAMD64CMOVQEQ(v *Value) bool { if v_2.Op != OpAMD64FlagLT_ULT { break } - v.reset(OpCopy) - v.Type = y.Type - v.AddArg(y) + v.copyOf(y) return true } // match: (CMOVQEQ y _ (FlagLT_UGT)) @@ -5721,9 +5581,7 @@ func rewriteValueAMD64_OpAMD64CMOVQEQ(v *Value) bool { if v_2.Op != OpAMD64FlagLT_UGT { break } - v.reset(OpCopy) - v.Type = y.Type - v.AddArg(y) + v.copyOf(y) return true } // match: (CMOVQEQ x _ (Select1 (BSFQ (ORQconst [c] _)))) @@ -5746,9 +5604,7 @@ func rewriteValueAMD64_OpAMD64CMOVQEQ(v *Value) bool { if !(c != 0) { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } return false @@ -5777,9 +5633,7 @@ func rewriteValueAMD64_OpAMD64CMOVQGE(v *Value) bool { if v_2.Op != OpAMD64FlagEQ { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (CMOVQGE _ x (FlagGT_UGT)) @@ -5789,9 +5643,7 @@ func rewriteValueAMD64_OpAMD64CMOVQGE(v *Value) bool { if v_2.Op != OpAMD64FlagGT_UGT { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (CMOVQGE _ x (FlagGT_ULT)) @@ -5801,9 +5653,7 @@ func rewriteValueAMD64_OpAMD64CMOVQGE(v *Value) bool { if v_2.Op != OpAMD64FlagGT_ULT { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (CMOVQGE y _ (FlagLT_ULT)) @@ -5813,9 +5663,7 @@ func rewriteValueAMD64_OpAMD64CMOVQGE(v *Value) bool { if v_2.Op != OpAMD64FlagLT_ULT { break } - v.reset(OpCopy) - v.Type = y.Type - v.AddArg(y) + v.copyOf(y) return true } // match: (CMOVQGE y _ (FlagLT_UGT)) @@ -5825,9 +5673,7 @@ func rewriteValueAMD64_OpAMD64CMOVQGE(v *Value) bool { if v_2.Op != OpAMD64FlagLT_UGT { break } - v.reset(OpCopy) - v.Type = y.Type - v.AddArg(y) + v.copyOf(y) return true } return false @@ -5856,9 +5702,7 @@ func rewriteValueAMD64_OpAMD64CMOVQGT(v *Value) bool { if v_2.Op != OpAMD64FlagEQ { break } - v.reset(OpCopy) - v.Type = y.Type - v.AddArg(y) + v.copyOf(y) return true } // match: (CMOVQGT _ x (FlagGT_UGT)) @@ -5868,9 +5712,7 @@ func rewriteValueAMD64_OpAMD64CMOVQGT(v *Value) bool { if v_2.Op != OpAMD64FlagGT_UGT { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (CMOVQGT _ x (FlagGT_ULT)) @@ -5880,9 +5722,7 @@ func rewriteValueAMD64_OpAMD64CMOVQGT(v *Value) bool { if v_2.Op != OpAMD64FlagGT_ULT { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (CMOVQGT y _ (FlagLT_ULT)) @@ -5892,9 +5732,7 @@ func rewriteValueAMD64_OpAMD64CMOVQGT(v *Value) bool { if v_2.Op != OpAMD64FlagLT_ULT { break } - v.reset(OpCopy) - v.Type = y.Type - v.AddArg(y) + v.copyOf(y) return true } // match: (CMOVQGT y _ (FlagLT_UGT)) @@ -5904,9 +5742,7 @@ func rewriteValueAMD64_OpAMD64CMOVQGT(v *Value) bool { if v_2.Op != OpAMD64FlagLT_UGT { break } - v.reset(OpCopy) - v.Type = y.Type - v.AddArg(y) + v.copyOf(y) return true } return false @@ -5935,9 +5771,7 @@ func rewriteValueAMD64_OpAMD64CMOVQHI(v *Value) bool { if v_2.Op != OpAMD64FlagEQ { break } - v.reset(OpCopy) - v.Type = y.Type - v.AddArg(y) + v.copyOf(y) return true } // match: (CMOVQHI _ x (FlagGT_UGT)) @@ -5947,9 +5781,7 @@ func rewriteValueAMD64_OpAMD64CMOVQHI(v *Value) bool { if v_2.Op != OpAMD64FlagGT_UGT { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (CMOVQHI y _ (FlagGT_ULT)) @@ -5959,9 +5791,7 @@ func rewriteValueAMD64_OpAMD64CMOVQHI(v *Value) bool { if v_2.Op != OpAMD64FlagGT_ULT { break } - v.reset(OpCopy) - v.Type = y.Type - v.AddArg(y) + v.copyOf(y) return true } // match: (CMOVQHI y _ (FlagLT_ULT)) @@ -5971,9 +5801,7 @@ func rewriteValueAMD64_OpAMD64CMOVQHI(v *Value) bool { if v_2.Op != OpAMD64FlagLT_ULT { break } - v.reset(OpCopy) - v.Type = y.Type - v.AddArg(y) + v.copyOf(y) return true } // match: (CMOVQHI _ x (FlagLT_UGT)) @@ -5983,9 +5811,7 @@ func rewriteValueAMD64_OpAMD64CMOVQHI(v *Value) bool { if v_2.Op != OpAMD64FlagLT_UGT { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } return false @@ -6014,9 +5840,7 @@ func rewriteValueAMD64_OpAMD64CMOVQLE(v *Value) bool { if v_2.Op != OpAMD64FlagEQ { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (CMOVQLE y _ (FlagGT_UGT)) @@ -6026,9 +5850,7 @@ func rewriteValueAMD64_OpAMD64CMOVQLE(v *Value) bool { if v_2.Op != OpAMD64FlagGT_UGT { break } - v.reset(OpCopy) - v.Type = y.Type - v.AddArg(y) + v.copyOf(y) return true } // match: (CMOVQLE y _ (FlagGT_ULT)) @@ -6038,9 +5860,7 @@ func rewriteValueAMD64_OpAMD64CMOVQLE(v *Value) bool { if v_2.Op != OpAMD64FlagGT_ULT { break } - v.reset(OpCopy) - v.Type = y.Type - v.AddArg(y) + v.copyOf(y) return true } // match: (CMOVQLE _ x (FlagLT_ULT)) @@ -6050,9 +5870,7 @@ func rewriteValueAMD64_OpAMD64CMOVQLE(v *Value) bool { if v_2.Op != OpAMD64FlagLT_ULT { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (CMOVQLE _ x (FlagLT_UGT)) @@ -6062,9 +5880,7 @@ func rewriteValueAMD64_OpAMD64CMOVQLE(v *Value) bool { if v_2.Op != OpAMD64FlagLT_UGT { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } return false @@ -6093,9 +5909,7 @@ func rewriteValueAMD64_OpAMD64CMOVQLS(v *Value) bool { if v_2.Op != OpAMD64FlagEQ { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (CMOVQLS y _ (FlagGT_UGT)) @@ -6105,9 +5919,7 @@ func rewriteValueAMD64_OpAMD64CMOVQLS(v *Value) bool { if v_2.Op != OpAMD64FlagGT_UGT { break } - v.reset(OpCopy) - v.Type = y.Type - v.AddArg(y) + v.copyOf(y) return true } // match: (CMOVQLS _ x (FlagGT_ULT)) @@ -6117,9 +5929,7 @@ func rewriteValueAMD64_OpAMD64CMOVQLS(v *Value) bool { if v_2.Op != OpAMD64FlagGT_ULT { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (CMOVQLS _ x (FlagLT_ULT)) @@ -6129,9 +5939,7 @@ func rewriteValueAMD64_OpAMD64CMOVQLS(v *Value) bool { if v_2.Op != OpAMD64FlagLT_ULT { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (CMOVQLS y _ (FlagLT_UGT)) @@ -6141,9 +5949,7 @@ func rewriteValueAMD64_OpAMD64CMOVQLS(v *Value) bool { if v_2.Op != OpAMD64FlagLT_UGT { break } - v.reset(OpCopy) - v.Type = y.Type - v.AddArg(y) + v.copyOf(y) return true } return false @@ -6172,9 +5978,7 @@ func rewriteValueAMD64_OpAMD64CMOVQLT(v *Value) bool { if v_2.Op != OpAMD64FlagEQ { break } - v.reset(OpCopy) - v.Type = y.Type - v.AddArg(y) + v.copyOf(y) return true } // match: (CMOVQLT y _ (FlagGT_UGT)) @@ -6184,9 +5988,7 @@ func rewriteValueAMD64_OpAMD64CMOVQLT(v *Value) bool { if v_2.Op != OpAMD64FlagGT_UGT { break } - v.reset(OpCopy) - v.Type = y.Type - v.AddArg(y) + v.copyOf(y) return true } // match: (CMOVQLT y _ (FlagGT_ULT)) @@ -6196,9 +5998,7 @@ func rewriteValueAMD64_OpAMD64CMOVQLT(v *Value) bool { if v_2.Op != OpAMD64FlagGT_ULT { break } - v.reset(OpCopy) - v.Type = y.Type - v.AddArg(y) + v.copyOf(y) return true } // match: (CMOVQLT _ x (FlagLT_ULT)) @@ -6208,9 +6008,7 @@ func rewriteValueAMD64_OpAMD64CMOVQLT(v *Value) bool { if v_2.Op != OpAMD64FlagLT_ULT { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (CMOVQLT _ x (FlagLT_UGT)) @@ -6220,9 +6018,7 @@ func rewriteValueAMD64_OpAMD64CMOVQLT(v *Value) bool { if v_2.Op != OpAMD64FlagLT_UGT { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } return false @@ -6251,9 +6047,7 @@ func rewriteValueAMD64_OpAMD64CMOVQNE(v *Value) bool { if v_2.Op != OpAMD64FlagEQ { break } - v.reset(OpCopy) - v.Type = y.Type - v.AddArg(y) + v.copyOf(y) return true } // match: (CMOVQNE _ x (FlagGT_UGT)) @@ -6263,9 +6057,7 @@ func rewriteValueAMD64_OpAMD64CMOVQNE(v *Value) bool { if v_2.Op != OpAMD64FlagGT_UGT { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (CMOVQNE _ x (FlagGT_ULT)) @@ -6275,9 +6067,7 @@ func rewriteValueAMD64_OpAMD64CMOVQNE(v *Value) bool { if v_2.Op != OpAMD64FlagGT_ULT { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (CMOVQNE _ x (FlagLT_ULT)) @@ -6287,9 +6077,7 @@ func rewriteValueAMD64_OpAMD64CMOVQNE(v *Value) bool { if v_2.Op != OpAMD64FlagLT_ULT { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (CMOVQNE _ x (FlagLT_UGT)) @@ -6299,9 +6087,7 @@ func rewriteValueAMD64_OpAMD64CMOVQNE(v *Value) bool { if v_2.Op != OpAMD64FlagLT_UGT { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } return false @@ -6330,9 +6116,7 @@ func rewriteValueAMD64_OpAMD64CMOVWCC(v *Value) bool { if v_2.Op != OpAMD64FlagEQ { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (CMOVWCC _ x (FlagGT_UGT)) @@ -6342,9 +6126,7 @@ func rewriteValueAMD64_OpAMD64CMOVWCC(v *Value) bool { if v_2.Op != OpAMD64FlagGT_UGT { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (CMOVWCC y _ (FlagGT_ULT)) @@ -6354,9 +6136,7 @@ func rewriteValueAMD64_OpAMD64CMOVWCC(v *Value) bool { if v_2.Op != OpAMD64FlagGT_ULT { break } - v.reset(OpCopy) - v.Type = y.Type - v.AddArg(y) + v.copyOf(y) return true } // match: (CMOVWCC y _ (FlagLT_ULT)) @@ -6366,9 +6146,7 @@ func rewriteValueAMD64_OpAMD64CMOVWCC(v *Value) bool { if v_2.Op != OpAMD64FlagLT_ULT { break } - v.reset(OpCopy) - v.Type = y.Type - v.AddArg(y) + v.copyOf(y) return true } // match: (CMOVWCC _ x (FlagLT_UGT)) @@ -6378,9 +6156,7 @@ func rewriteValueAMD64_OpAMD64CMOVWCC(v *Value) bool { if v_2.Op != OpAMD64FlagLT_UGT { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } return false @@ -6409,9 +6185,7 @@ func rewriteValueAMD64_OpAMD64CMOVWCS(v *Value) bool { if v_2.Op != OpAMD64FlagEQ { break } - v.reset(OpCopy) - v.Type = y.Type - v.AddArg(y) + v.copyOf(y) return true } // match: (CMOVWCS y _ (FlagGT_UGT)) @@ -6421,9 +6195,7 @@ func rewriteValueAMD64_OpAMD64CMOVWCS(v *Value) bool { if v_2.Op != OpAMD64FlagGT_UGT { break } - v.reset(OpCopy) - v.Type = y.Type - v.AddArg(y) + v.copyOf(y) return true } // match: (CMOVWCS _ x (FlagGT_ULT)) @@ -6433,9 +6205,7 @@ func rewriteValueAMD64_OpAMD64CMOVWCS(v *Value) bool { if v_2.Op != OpAMD64FlagGT_ULT { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (CMOVWCS _ x (FlagLT_ULT)) @@ -6445,9 +6215,7 @@ func rewriteValueAMD64_OpAMD64CMOVWCS(v *Value) bool { if v_2.Op != OpAMD64FlagLT_ULT { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (CMOVWCS y _ (FlagLT_UGT)) @@ -6457,9 +6225,7 @@ func rewriteValueAMD64_OpAMD64CMOVWCS(v *Value) bool { if v_2.Op != OpAMD64FlagLT_UGT { break } - v.reset(OpCopy) - v.Type = y.Type - v.AddArg(y) + v.copyOf(y) return true } return false @@ -6488,9 +6254,7 @@ func rewriteValueAMD64_OpAMD64CMOVWEQ(v *Value) bool { if v_2.Op != OpAMD64FlagEQ { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (CMOVWEQ y _ (FlagGT_UGT)) @@ -6500,9 +6264,7 @@ func rewriteValueAMD64_OpAMD64CMOVWEQ(v *Value) bool { if v_2.Op != OpAMD64FlagGT_UGT { break } - v.reset(OpCopy) - v.Type = y.Type - v.AddArg(y) + v.copyOf(y) return true } // match: (CMOVWEQ y _ (FlagGT_ULT)) @@ -6512,9 +6274,7 @@ func rewriteValueAMD64_OpAMD64CMOVWEQ(v *Value) bool { if v_2.Op != OpAMD64FlagGT_ULT { break } - v.reset(OpCopy) - v.Type = y.Type - v.AddArg(y) + v.copyOf(y) return true } // match: (CMOVWEQ y _ (FlagLT_ULT)) @@ -6524,9 +6284,7 @@ func rewriteValueAMD64_OpAMD64CMOVWEQ(v *Value) bool { if v_2.Op != OpAMD64FlagLT_ULT { break } - v.reset(OpCopy) - v.Type = y.Type - v.AddArg(y) + v.copyOf(y) return true } // match: (CMOVWEQ y _ (FlagLT_UGT)) @@ -6536,9 +6294,7 @@ func rewriteValueAMD64_OpAMD64CMOVWEQ(v *Value) bool { if v_2.Op != OpAMD64FlagLT_UGT { break } - v.reset(OpCopy) - v.Type = y.Type - v.AddArg(y) + v.copyOf(y) return true } return false @@ -6567,9 +6323,7 @@ func rewriteValueAMD64_OpAMD64CMOVWGE(v *Value) bool { if v_2.Op != OpAMD64FlagEQ { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (CMOVWGE _ x (FlagGT_UGT)) @@ -6579,9 +6333,7 @@ func rewriteValueAMD64_OpAMD64CMOVWGE(v *Value) bool { if v_2.Op != OpAMD64FlagGT_UGT { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (CMOVWGE _ x (FlagGT_ULT)) @@ -6591,9 +6343,7 @@ func rewriteValueAMD64_OpAMD64CMOVWGE(v *Value) bool { if v_2.Op != OpAMD64FlagGT_ULT { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (CMOVWGE y _ (FlagLT_ULT)) @@ -6603,9 +6353,7 @@ func rewriteValueAMD64_OpAMD64CMOVWGE(v *Value) bool { if v_2.Op != OpAMD64FlagLT_ULT { break } - v.reset(OpCopy) - v.Type = y.Type - v.AddArg(y) + v.copyOf(y) return true } // match: (CMOVWGE y _ (FlagLT_UGT)) @@ -6615,9 +6363,7 @@ func rewriteValueAMD64_OpAMD64CMOVWGE(v *Value) bool { if v_2.Op != OpAMD64FlagLT_UGT { break } - v.reset(OpCopy) - v.Type = y.Type - v.AddArg(y) + v.copyOf(y) return true } return false @@ -6646,9 +6392,7 @@ func rewriteValueAMD64_OpAMD64CMOVWGT(v *Value) bool { if v_2.Op != OpAMD64FlagEQ { break } - v.reset(OpCopy) - v.Type = y.Type - v.AddArg(y) + v.copyOf(y) return true } // match: (CMOVWGT _ x (FlagGT_UGT)) @@ -6658,9 +6402,7 @@ func rewriteValueAMD64_OpAMD64CMOVWGT(v *Value) bool { if v_2.Op != OpAMD64FlagGT_UGT { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (CMOVWGT _ x (FlagGT_ULT)) @@ -6670,9 +6412,7 @@ func rewriteValueAMD64_OpAMD64CMOVWGT(v *Value) bool { if v_2.Op != OpAMD64FlagGT_ULT { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (CMOVWGT y _ (FlagLT_ULT)) @@ -6682,9 +6422,7 @@ func rewriteValueAMD64_OpAMD64CMOVWGT(v *Value) bool { if v_2.Op != OpAMD64FlagLT_ULT { break } - v.reset(OpCopy) - v.Type = y.Type - v.AddArg(y) + v.copyOf(y) return true } // match: (CMOVWGT y _ (FlagLT_UGT)) @@ -6694,9 +6432,7 @@ func rewriteValueAMD64_OpAMD64CMOVWGT(v *Value) bool { if v_2.Op != OpAMD64FlagLT_UGT { break } - v.reset(OpCopy) - v.Type = y.Type - v.AddArg(y) + v.copyOf(y) return true } return false @@ -6725,9 +6461,7 @@ func rewriteValueAMD64_OpAMD64CMOVWHI(v *Value) bool { if v_2.Op != OpAMD64FlagEQ { break } - v.reset(OpCopy) - v.Type = y.Type - v.AddArg(y) + v.copyOf(y) return true } // match: (CMOVWHI _ x (FlagGT_UGT)) @@ -6737,9 +6471,7 @@ func rewriteValueAMD64_OpAMD64CMOVWHI(v *Value) bool { if v_2.Op != OpAMD64FlagGT_UGT { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (CMOVWHI y _ (FlagGT_ULT)) @@ -6749,9 +6481,7 @@ func rewriteValueAMD64_OpAMD64CMOVWHI(v *Value) bool { if v_2.Op != OpAMD64FlagGT_ULT { break } - v.reset(OpCopy) - v.Type = y.Type - v.AddArg(y) + v.copyOf(y) return true } // match: (CMOVWHI y _ (FlagLT_ULT)) @@ -6761,9 +6491,7 @@ func rewriteValueAMD64_OpAMD64CMOVWHI(v *Value) bool { if v_2.Op != OpAMD64FlagLT_ULT { break } - v.reset(OpCopy) - v.Type = y.Type - v.AddArg(y) + v.copyOf(y) return true } // match: (CMOVWHI _ x (FlagLT_UGT)) @@ -6773,9 +6501,7 @@ func rewriteValueAMD64_OpAMD64CMOVWHI(v *Value) bool { if v_2.Op != OpAMD64FlagLT_UGT { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } return false @@ -6804,9 +6530,7 @@ func rewriteValueAMD64_OpAMD64CMOVWLE(v *Value) bool { if v_2.Op != OpAMD64FlagEQ { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (CMOVWLE y _ (FlagGT_UGT)) @@ -6816,9 +6540,7 @@ func rewriteValueAMD64_OpAMD64CMOVWLE(v *Value) bool { if v_2.Op != OpAMD64FlagGT_UGT { break } - v.reset(OpCopy) - v.Type = y.Type - v.AddArg(y) + v.copyOf(y) return true } // match: (CMOVWLE y _ (FlagGT_ULT)) @@ -6828,9 +6550,7 @@ func rewriteValueAMD64_OpAMD64CMOVWLE(v *Value) bool { if v_2.Op != OpAMD64FlagGT_ULT { break } - v.reset(OpCopy) - v.Type = y.Type - v.AddArg(y) + v.copyOf(y) return true } // match: (CMOVWLE _ x (FlagLT_ULT)) @@ -6840,9 +6560,7 @@ func rewriteValueAMD64_OpAMD64CMOVWLE(v *Value) bool { if v_2.Op != OpAMD64FlagLT_ULT { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (CMOVWLE _ x (FlagLT_UGT)) @@ -6852,9 +6570,7 @@ func rewriteValueAMD64_OpAMD64CMOVWLE(v *Value) bool { if v_2.Op != OpAMD64FlagLT_UGT { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } return false @@ -6883,9 +6599,7 @@ func rewriteValueAMD64_OpAMD64CMOVWLS(v *Value) bool { if v_2.Op != OpAMD64FlagEQ { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (CMOVWLS y _ (FlagGT_UGT)) @@ -6895,9 +6609,7 @@ func rewriteValueAMD64_OpAMD64CMOVWLS(v *Value) bool { if v_2.Op != OpAMD64FlagGT_UGT { break } - v.reset(OpCopy) - v.Type = y.Type - v.AddArg(y) + v.copyOf(y) return true } // match: (CMOVWLS _ x (FlagGT_ULT)) @@ -6907,9 +6619,7 @@ func rewriteValueAMD64_OpAMD64CMOVWLS(v *Value) bool { if v_2.Op != OpAMD64FlagGT_ULT { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (CMOVWLS _ x (FlagLT_ULT)) @@ -6919,9 +6629,7 @@ func rewriteValueAMD64_OpAMD64CMOVWLS(v *Value) bool { if v_2.Op != OpAMD64FlagLT_ULT { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (CMOVWLS y _ (FlagLT_UGT)) @@ -6931,9 +6639,7 @@ func rewriteValueAMD64_OpAMD64CMOVWLS(v *Value) bool { if v_2.Op != OpAMD64FlagLT_UGT { break } - v.reset(OpCopy) - v.Type = y.Type - v.AddArg(y) + v.copyOf(y) return true } return false @@ -6962,9 +6668,7 @@ func rewriteValueAMD64_OpAMD64CMOVWLT(v *Value) bool { if v_2.Op != OpAMD64FlagEQ { break } - v.reset(OpCopy) - v.Type = y.Type - v.AddArg(y) + v.copyOf(y) return true } // match: (CMOVWLT y _ (FlagGT_UGT)) @@ -6974,9 +6678,7 @@ func rewriteValueAMD64_OpAMD64CMOVWLT(v *Value) bool { if v_2.Op != OpAMD64FlagGT_UGT { break } - v.reset(OpCopy) - v.Type = y.Type - v.AddArg(y) + v.copyOf(y) return true } // match: (CMOVWLT y _ (FlagGT_ULT)) @@ -6986,9 +6688,7 @@ func rewriteValueAMD64_OpAMD64CMOVWLT(v *Value) bool { if v_2.Op != OpAMD64FlagGT_ULT { break } - v.reset(OpCopy) - v.Type = y.Type - v.AddArg(y) + v.copyOf(y) return true } // match: (CMOVWLT _ x (FlagLT_ULT)) @@ -6998,9 +6698,7 @@ func rewriteValueAMD64_OpAMD64CMOVWLT(v *Value) bool { if v_2.Op != OpAMD64FlagLT_ULT { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (CMOVWLT _ x (FlagLT_UGT)) @@ -7010,9 +6708,7 @@ func rewriteValueAMD64_OpAMD64CMOVWLT(v *Value) bool { if v_2.Op != OpAMD64FlagLT_UGT { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } return false @@ -7041,9 +6737,7 @@ func rewriteValueAMD64_OpAMD64CMOVWNE(v *Value) bool { if v_2.Op != OpAMD64FlagEQ { break } - v.reset(OpCopy) - v.Type = y.Type - v.AddArg(y) + v.copyOf(y) return true } // match: (CMOVWNE _ x (FlagGT_UGT)) @@ -7053,9 +6747,7 @@ func rewriteValueAMD64_OpAMD64CMOVWNE(v *Value) bool { if v_2.Op != OpAMD64FlagGT_UGT { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (CMOVWNE _ x (FlagGT_ULT)) @@ -7065,9 +6757,7 @@ func rewriteValueAMD64_OpAMD64CMOVWNE(v *Value) bool { if v_2.Op != OpAMD64FlagGT_ULT { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (CMOVWNE _ x (FlagLT_ULT)) @@ -7077,9 +6767,7 @@ func rewriteValueAMD64_OpAMD64CMOVWNE(v *Value) bool { if v_2.Op != OpAMD64FlagLT_ULT { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (CMOVWNE _ x (FlagLT_UGT)) @@ -7089,9 +6777,7 @@ func rewriteValueAMD64_OpAMD64CMOVWNE(v *Value) bool { if v_2.Op != OpAMD64FlagLT_UGT { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } return false @@ -7338,8 +7024,7 @@ func rewriteValueAMD64_OpAMD64CMPBconst(v *Value) bool { } b = l.Block v0 := b.NewValue0(l.Pos, OpAMD64CMPBconstload, types.TypeFlags) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v0.AuxInt = makeValAndOff(c, off) v0.Aux = sym v0.AddArg2(ptr, mem) @@ -7724,8 +7409,7 @@ func rewriteValueAMD64_OpAMD64CMPLconst(v *Value) bool { } b = l.Block v0 := b.NewValue0(l.Pos, OpAMD64CMPLconstload, types.TypeFlags) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v0.AuxInt = makeValAndOff(c, off) v0.Aux = sym v0.AddArg2(ptr, mem) @@ -8290,8 +7974,7 @@ func rewriteValueAMD64_OpAMD64CMPQconst(v *Value) bool { } b = l.Block v0 := b.NewValue0(l.Pos, OpAMD64CMPQconstload, types.TypeFlags) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v0.AuxInt = makeValAndOff(c, off) v0.Aux = sym v0.AddArg2(ptr, mem) @@ -8661,8 +8344,7 @@ func rewriteValueAMD64_OpAMD64CMPWconst(v *Value) bool { } b = l.Block v0 := b.NewValue0(l.Pos, OpAMD64CMPWconstload, types.TypeFlags) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v0.AuxInt = makeValAndOff(c, off) v0.Aux = sym v0.AddArg2(ptr, mem) @@ -9964,8 +9646,7 @@ func rewriteValueAMD64_OpAMD64MOVBQSX(v *Value) bool { } b = x.Block v0 := b.NewValue0(x.Pos, OpAMD64MOVBQSXload, v.Type) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v0.AuxInt = off v0.Aux = sym v0.AddArg2(ptr, mem) @@ -9988,8 +9669,7 @@ func rewriteValueAMD64_OpAMD64MOVBQSX(v *Value) bool { } b = x.Block v0 := b.NewValue0(x.Pos, OpAMD64MOVBQSXload, v.Type) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v0.AuxInt = off v0.Aux = sym v0.AddArg2(ptr, mem) @@ -10012,8 +9692,7 @@ func rewriteValueAMD64_OpAMD64MOVBQSX(v *Value) bool { } b = x.Block v0 := b.NewValue0(x.Pos, OpAMD64MOVBQSXload, v.Type) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v0.AuxInt = off v0.Aux = sym v0.AddArg2(ptr, mem) @@ -10036,8 +9715,7 @@ func rewriteValueAMD64_OpAMD64MOVBQSX(v *Value) bool { } b = x.Block v0 := b.NewValue0(x.Pos, OpAMD64MOVBQSXload, v.Type) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v0.AuxInt = off v0.Aux = sym v0.AddArg2(ptr, mem) @@ -10142,8 +9820,7 @@ func rewriteValueAMD64_OpAMD64MOVBQZX(v *Value) bool { } b = x.Block v0 := b.NewValue0(x.Pos, OpAMD64MOVBload, v.Type) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v0.AuxInt = off v0.Aux = sym v0.AddArg2(ptr, mem) @@ -10166,8 +9843,7 @@ func rewriteValueAMD64_OpAMD64MOVBQZX(v *Value) bool { } b = x.Block v0 := b.NewValue0(x.Pos, OpAMD64MOVBload, v.Type) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v0.AuxInt = off v0.Aux = sym v0.AddArg2(ptr, mem) @@ -10190,8 +9866,7 @@ func rewriteValueAMD64_OpAMD64MOVBQZX(v *Value) bool { } b = x.Block v0 := b.NewValue0(x.Pos, OpAMD64MOVBload, v.Type) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v0.AuxInt = off v0.Aux = sym v0.AddArg2(ptr, mem) @@ -10214,8 +9889,7 @@ func rewriteValueAMD64_OpAMD64MOVBQZX(v *Value) bool { } b = x.Block v0 := b.NewValue0(x.Pos, OpAMD64MOVBload, v.Type) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v0.AuxInt = off v0.Aux = sym v0.AddArg2(ptr, mem) @@ -10229,9 +9903,7 @@ func rewriteValueAMD64_OpAMD64MOVBQZX(v *Value) bool { if !(zeroUpper56Bits(x, 3)) { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (MOVBQZX x:(MOVBloadidx1 [off] {sym} ptr idx mem)) @@ -10252,8 +9924,7 @@ func rewriteValueAMD64_OpAMD64MOVBQZX(v *Value) bool { } b = x.Block v0 := b.NewValue0(v.Pos, OpAMD64MOVBloadidx1, v.Type) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v0.AuxInt = off v0.Aux = sym v0.AddArg3(ptr, idx, mem) @@ -12279,8 +11950,7 @@ func rewriteValueAMD64_OpAMD64MOVLQSX(v *Value) bool { } b = x.Block v0 := b.NewValue0(x.Pos, OpAMD64MOVLQSXload, v.Type) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v0.AuxInt = off v0.Aux = sym v0.AddArg2(ptr, mem) @@ -12303,8 +11973,7 @@ func rewriteValueAMD64_OpAMD64MOVLQSX(v *Value) bool { } b = x.Block v0 := b.NewValue0(x.Pos, OpAMD64MOVLQSXload, v.Type) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v0.AuxInt = off v0.Aux = sym v0.AddArg2(ptr, mem) @@ -12431,8 +12100,7 @@ func rewriteValueAMD64_OpAMD64MOVLQZX(v *Value) bool { } b = x.Block v0 := b.NewValue0(x.Pos, OpAMD64MOVLload, v.Type) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v0.AuxInt = off v0.Aux = sym v0.AddArg2(ptr, mem) @@ -12455,8 +12123,7 @@ func rewriteValueAMD64_OpAMD64MOVLQZX(v *Value) bool { } b = x.Block v0 := b.NewValue0(x.Pos, OpAMD64MOVLload, v.Type) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v0.AuxInt = off v0.Aux = sym v0.AddArg2(ptr, mem) @@ -12470,9 +12137,7 @@ func rewriteValueAMD64_OpAMD64MOVLQZX(v *Value) bool { if !(zeroUpper32Bits(x, 3)) { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (MOVLQZX x:(MOVLloadidx1 [off] {sym} ptr idx mem)) @@ -12493,8 +12158,7 @@ func rewriteValueAMD64_OpAMD64MOVLQZX(v *Value) bool { } b = x.Block v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, v.Type) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v0.AuxInt = off v0.Aux = sym v0.AddArg3(ptr, idx, mem) @@ -12518,8 +12182,7 @@ func rewriteValueAMD64_OpAMD64MOVLQZX(v *Value) bool { } b = x.Block v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx4, v.Type) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v0.AuxInt = off v0.Aux = sym v0.AddArg3(ptr, idx, mem) @@ -12640,8 +12303,7 @@ func rewriteValueAMD64_OpAMD64MOVLf2i(v *Value) bool { } b = b.Func.Entry v0 := b.NewValue0(v.Pos, OpArg, t) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v0.AuxInt = off v0.Aux = sym return true @@ -12667,8 +12329,7 @@ func rewriteValueAMD64_OpAMD64MOVLi2f(v *Value) bool { } b = b.Func.Entry v0 := b.NewValue0(v.Pos, OpArg, t) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v0.AuxInt = off v0.Aux = sym return true @@ -15152,8 +14813,7 @@ func rewriteValueAMD64_OpAMD64MOVQf2i(v *Value) bool { } b = b.Func.Entry v0 := b.NewValue0(v.Pos, OpArg, t) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v0.AuxInt = off v0.Aux = sym return true @@ -15179,8 +14839,7 @@ func rewriteValueAMD64_OpAMD64MOVQi2f(v *Value) bool { } b = b.Func.Entry v0 := b.NewValue0(v.Pos, OpArg, t) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v0.AuxInt = off v0.Aux = sym return true @@ -15210,9 +14869,7 @@ func rewriteValueAMD64_OpAMD64MOVQload(v *Value) bool { if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (MOVQload [off1] {sym} (ADDQconst [off2] ptr) mem) @@ -18082,8 +17739,7 @@ func rewriteValueAMD64_OpAMD64MOVWQSX(v *Value) bool { } b = x.Block v0 := b.NewValue0(x.Pos, OpAMD64MOVWQSXload, v.Type) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v0.AuxInt = off v0.Aux = sym v0.AddArg2(ptr, mem) @@ -18106,8 +17762,7 @@ func rewriteValueAMD64_OpAMD64MOVWQSX(v *Value) bool { } b = x.Block v0 := b.NewValue0(x.Pos, OpAMD64MOVWQSXload, v.Type) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v0.AuxInt = off v0.Aux = sym v0.AddArg2(ptr, mem) @@ -18130,8 +17785,7 @@ func rewriteValueAMD64_OpAMD64MOVWQSX(v *Value) bool { } b = x.Block v0 := b.NewValue0(x.Pos, OpAMD64MOVWQSXload, v.Type) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v0.AuxInt = off v0.Aux = sym v0.AddArg2(ptr, mem) @@ -18247,8 +17901,7 @@ func rewriteValueAMD64_OpAMD64MOVWQZX(v *Value) bool { } b = x.Block v0 := b.NewValue0(x.Pos, OpAMD64MOVWload, v.Type) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v0.AuxInt = off v0.Aux = sym v0.AddArg2(ptr, mem) @@ -18271,8 +17924,7 @@ func rewriteValueAMD64_OpAMD64MOVWQZX(v *Value) bool { } b = x.Block v0 := b.NewValue0(x.Pos, OpAMD64MOVWload, v.Type) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v0.AuxInt = off v0.Aux = sym v0.AddArg2(ptr, mem) @@ -18295,8 +17947,7 @@ func rewriteValueAMD64_OpAMD64MOVWQZX(v *Value) bool { } b = x.Block v0 := b.NewValue0(x.Pos, OpAMD64MOVWload, v.Type) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v0.AuxInt = off v0.Aux = sym v0.AddArg2(ptr, mem) @@ -18310,9 +17961,7 @@ func rewriteValueAMD64_OpAMD64MOVWQZX(v *Value) bool { if !(zeroUpper48Bits(x, 3)) { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (MOVWQZX x:(MOVWloadidx1 [off] {sym} ptr idx mem)) @@ -18333,8 +17982,7 @@ func rewriteValueAMD64_OpAMD64MOVWQZX(v *Value) bool { } b = x.Block v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, v.Type) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v0.AuxInt = off v0.Aux = sym v0.AddArg3(ptr, idx, mem) @@ -18358,8 +18006,7 @@ func rewriteValueAMD64_OpAMD64MOVWQZX(v *Value) bool { } b = x.Block v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx2, v.Type) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v0.AuxInt = off v0.Aux = sym v0.AddArg3(ptr, idx, mem) @@ -20132,9 +19779,7 @@ func rewriteValueAMD64_OpAMD64MULLconst(v *Value) bool { break } x := v_0 - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (MULLconst [ 3] x) @@ -20587,9 +20232,7 @@ func rewriteValueAMD64_OpAMD64MULQconst(v *Value) bool { break } x := v_0 - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (MULQconst [ 3] x) @@ -21163,9 +20806,7 @@ func rewriteValueAMD64_OpAMD64NEGL(v *Value) bool { break } x := v_0.Args[0] - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (NEGL s:(SUBL x y)) @@ -21207,9 +20848,7 @@ func rewriteValueAMD64_OpAMD64NEGQ(v *Value) bool { break } x := v_0.Args[0] - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (NEGQ s:(SUBQ x y)) @@ -22050,9 +21689,7 @@ func rewriteValueAMD64_OpAMD64ORL(v *Value) bool { if x != v_1 { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (ORL x0:(MOVBload [i0] {s} p mem) sh:(SHLLconst [8] x1:(MOVBload [i1] {s} p mem))) @@ -22086,8 +21723,7 @@ func rewriteValueAMD64_OpAMD64ORL(v *Value) bool { } b = mergePoint(b, x0, x1) v0 := b.NewValue0(x1.Pos, OpAMD64MOVWload, typ.UInt16) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v0.AuxInt = i0 v0.Aux = s v0.AddArg2(p, mem) @@ -22126,8 +21762,7 @@ func rewriteValueAMD64_OpAMD64ORL(v *Value) bool { } b = mergePoint(b, x0, x1) v0 := b.NewValue0(x1.Pos, OpAMD64MOVLload, typ.UInt32) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v0.AuxInt = i0 v0.Aux = s v0.AddArg2(p, mem) @@ -22184,8 +21819,7 @@ func rewriteValueAMD64_OpAMD64ORL(v *Value) bool { } b = mergePoint(b, x0, x1, y) v0 := b.NewValue0(x0.Pos, OpAMD64ORL, v.Type) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v1 := b.NewValue0(x0.Pos, OpAMD64SHLLconst, v.Type) v1.AuxInt = j0 v2 := b.NewValue0(x0.Pos, OpAMD64MOVWload, typ.UInt16) @@ -22237,8 +21871,7 @@ func rewriteValueAMD64_OpAMD64ORL(v *Value) bool { } b = mergePoint(b, x0, x1) v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, v.Type) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v0.AuxInt = i0 v0.Aux = s v0.AddArg3(p, idx, mem) @@ -22286,8 +21919,7 @@ func rewriteValueAMD64_OpAMD64ORL(v *Value) bool { } b = mergePoint(b, x0, x1) v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v0.AuxInt = i0 v0.Aux = s v0.AddArg3(p, idx, mem) @@ -22353,8 +21985,7 @@ func rewriteValueAMD64_OpAMD64ORL(v *Value) bool { } b = mergePoint(b, x0, x1, y) v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) v1.AuxInt = j0 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) @@ -22401,8 +22032,7 @@ func rewriteValueAMD64_OpAMD64ORL(v *Value) bool { } b = mergePoint(b, x0, x1) v0 := b.NewValue0(x0.Pos, OpAMD64ROLWconst, v.Type) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v0.AuxInt = 8 v1 := b.NewValue0(x0.Pos, OpAMD64MOVWload, typ.UInt16) v1.AuxInt = i0 @@ -22452,8 +22082,7 @@ func rewriteValueAMD64_OpAMD64ORL(v *Value) bool { } b = mergePoint(b, x0, x1) v0 := b.NewValue0(x0.Pos, OpAMD64BSWAPL, v.Type) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v1 := b.NewValue0(x0.Pos, OpAMD64MOVLload, typ.UInt32) v1.AuxInt = i0 v1.Aux = s @@ -22512,8 +22141,7 @@ func rewriteValueAMD64_OpAMD64ORL(v *Value) bool { } b = mergePoint(b, x0, x1, y) v0 := b.NewValue0(x1.Pos, OpAMD64ORL, v.Type) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v1 := b.NewValue0(x1.Pos, OpAMD64SHLLconst, v.Type) v1.AuxInt = j1 v2 := b.NewValue0(x1.Pos, OpAMD64ROLWconst, typ.UInt16) @@ -22568,8 +22196,7 @@ func rewriteValueAMD64_OpAMD64ORL(v *Value) bool { } b = mergePoint(b, x0, x1) v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, v.Type) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v0.AuxInt = 8 v1 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) v1.AuxInt = i0 @@ -22628,8 +22255,7 @@ func rewriteValueAMD64_OpAMD64ORL(v *Value) bool { } b = mergePoint(b, x0, x1) v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v1 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) v1.AuxInt = i0 v1.Aux = s @@ -22697,8 +22323,7 @@ func rewriteValueAMD64_OpAMD64ORL(v *Value) bool { } b = mergePoint(b, x0, x1, y) v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) v1.AuxInt = j1 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) @@ -22797,9 +22422,7 @@ func rewriteValueAMD64_OpAMD64ORLconst(v *Value) bool { if !(int32(c) == 0) { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (ORLconst [c] _) @@ -23330,9 +22953,7 @@ func rewriteValueAMD64_OpAMD64ORQ(v *Value) bool { if x != v_1 { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (ORQ x0:(MOVBload [i0] {s} p mem) sh:(SHLQconst [8] x1:(MOVBload [i1] {s} p mem))) @@ -23366,8 +22987,7 @@ func rewriteValueAMD64_OpAMD64ORQ(v *Value) bool { } b = mergePoint(b, x0, x1) v0 := b.NewValue0(x1.Pos, OpAMD64MOVWload, typ.UInt16) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v0.AuxInt = i0 v0.Aux = s v0.AddArg2(p, mem) @@ -23406,8 +23026,7 @@ func rewriteValueAMD64_OpAMD64ORQ(v *Value) bool { } b = mergePoint(b, x0, x1) v0 := b.NewValue0(x1.Pos, OpAMD64MOVLload, typ.UInt32) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v0.AuxInt = i0 v0.Aux = s v0.AddArg2(p, mem) @@ -23446,8 +23065,7 @@ func rewriteValueAMD64_OpAMD64ORQ(v *Value) bool { } b = mergePoint(b, x0, x1) v0 := b.NewValue0(x1.Pos, OpAMD64MOVQload, typ.UInt64) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v0.AuxInt = i0 v0.Aux = s v0.AddArg2(p, mem) @@ -23504,8 +23122,7 @@ func rewriteValueAMD64_OpAMD64ORQ(v *Value) bool { } b = mergePoint(b, x0, x1, y) v0 := b.NewValue0(x0.Pos, OpAMD64ORQ, v.Type) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v1 := b.NewValue0(x0.Pos, OpAMD64SHLQconst, v.Type) v1.AuxInt = j0 v2 := b.NewValue0(x0.Pos, OpAMD64MOVWload, typ.UInt16) @@ -23568,8 +23185,7 @@ func rewriteValueAMD64_OpAMD64ORQ(v *Value) bool { } b = mergePoint(b, x0, x1, y) v0 := b.NewValue0(x0.Pos, OpAMD64ORQ, v.Type) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v1 := b.NewValue0(x0.Pos, OpAMD64SHLQconst, v.Type) v1.AuxInt = j0 v2 := b.NewValue0(x0.Pos, OpAMD64MOVLload, typ.UInt32) @@ -23621,8 +23237,7 @@ func rewriteValueAMD64_OpAMD64ORQ(v *Value) bool { } b = mergePoint(b, x0, x1) v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, v.Type) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v0.AuxInt = i0 v0.Aux = s v0.AddArg3(p, idx, mem) @@ -23670,8 +23285,7 @@ func rewriteValueAMD64_OpAMD64ORQ(v *Value) bool { } b = mergePoint(b, x0, x1) v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v0.AuxInt = i0 v0.Aux = s v0.AddArg3(p, idx, mem) @@ -23719,8 +23333,7 @@ func rewriteValueAMD64_OpAMD64ORQ(v *Value) bool { } b = mergePoint(b, x0, x1) v0 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx1, typ.UInt64) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v0.AuxInt = i0 v0.Aux = s v0.AddArg3(p, idx, mem) @@ -23786,8 +23399,7 @@ func rewriteValueAMD64_OpAMD64ORQ(v *Value) bool { } b = mergePoint(b, x0, x1, y) v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) v1.AuxInt = j0 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) @@ -23859,8 +23471,7 @@ func rewriteValueAMD64_OpAMD64ORQ(v *Value) bool { } b = mergePoint(b, x0, x1, y) v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) v1.AuxInt = j0 v2 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) @@ -23907,8 +23518,7 @@ func rewriteValueAMD64_OpAMD64ORQ(v *Value) bool { } b = mergePoint(b, x0, x1) v0 := b.NewValue0(x0.Pos, OpAMD64ROLWconst, v.Type) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v0.AuxInt = 8 v1 := b.NewValue0(x0.Pos, OpAMD64MOVWload, typ.UInt16) v1.AuxInt = i0 @@ -23958,8 +23568,7 @@ func rewriteValueAMD64_OpAMD64ORQ(v *Value) bool { } b = mergePoint(b, x0, x1) v0 := b.NewValue0(x0.Pos, OpAMD64BSWAPL, v.Type) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v1 := b.NewValue0(x0.Pos, OpAMD64MOVLload, typ.UInt32) v1.AuxInt = i0 v1.Aux = s @@ -24008,8 +23617,7 @@ func rewriteValueAMD64_OpAMD64ORQ(v *Value) bool { } b = mergePoint(b, x0, x1) v0 := b.NewValue0(x0.Pos, OpAMD64BSWAPQ, v.Type) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v1 := b.NewValue0(x0.Pos, OpAMD64MOVQload, typ.UInt64) v1.AuxInt = i0 v1.Aux = s @@ -24068,8 +23676,7 @@ func rewriteValueAMD64_OpAMD64ORQ(v *Value) bool { } b = mergePoint(b, x0, x1, y) v0 := b.NewValue0(x1.Pos, OpAMD64ORQ, v.Type) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v1 := b.NewValue0(x1.Pos, OpAMD64SHLQconst, v.Type) v1.AuxInt = j1 v2 := b.NewValue0(x1.Pos, OpAMD64ROLWconst, typ.UInt16) @@ -24143,8 +23750,7 @@ func rewriteValueAMD64_OpAMD64ORQ(v *Value) bool { } b = mergePoint(b, x0, x1, y) v0 := b.NewValue0(x1.Pos, OpAMD64ORQ, v.Type) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v1 := b.NewValue0(x1.Pos, OpAMD64SHLQconst, v.Type) v1.AuxInt = j1 v2 := b.NewValue0(x1.Pos, OpAMD64BSWAPL, typ.UInt32) @@ -24198,8 +23804,7 @@ func rewriteValueAMD64_OpAMD64ORQ(v *Value) bool { } b = mergePoint(b, x0, x1) v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, v.Type) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v0.AuxInt = 8 v1 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) v1.AuxInt = i0 @@ -24258,8 +23863,7 @@ func rewriteValueAMD64_OpAMD64ORQ(v *Value) bool { } b = mergePoint(b, x0, x1) v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v1 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) v1.AuxInt = i0 v1.Aux = s @@ -24317,8 +23921,7 @@ func rewriteValueAMD64_OpAMD64ORQ(v *Value) bool { } b = mergePoint(b, x0, x1) v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, v.Type) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v1 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx1, typ.UInt64) v1.AuxInt = i0 v1.Aux = s @@ -24386,8 +23989,7 @@ func rewriteValueAMD64_OpAMD64ORQ(v *Value) bool { } b = mergePoint(b, x0, x1, y) v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) v1.AuxInt = j1 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) @@ -24470,8 +24072,7 @@ func rewriteValueAMD64_OpAMD64ORQ(v *Value) bool { } b = mergePoint(b, x0, x1, y) v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) v1.AuxInt = j1 v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, typ.UInt32) @@ -24567,9 +24168,7 @@ func rewriteValueAMD64_OpAMD64ORQconst(v *Value) bool { break } x := v_0 - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (ORQconst [-1] _) @@ -24847,9 +24446,7 @@ func rewriteValueAMD64_OpAMD64ROLBconst(v *Value) bool { break } x := v_0 - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } return false @@ -24932,9 +24529,7 @@ func rewriteValueAMD64_OpAMD64ROLLconst(v *Value) bool { break } x := v_0 - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } return false @@ -25017,9 +24612,7 @@ func rewriteValueAMD64_OpAMD64ROLQconst(v *Value) bool { break } x := v_0 - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } return false @@ -25102,9 +24695,7 @@ func rewriteValueAMD64_OpAMD64ROLWconst(v *Value) bool { break } x := v_0 - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } return false @@ -25369,9 +24960,7 @@ func rewriteValueAMD64_OpAMD64SARBconst(v *Value) bool { break } x := v_0 - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (SARBconst [c] (MOVQconst [d])) @@ -25593,9 +25182,7 @@ func rewriteValueAMD64_OpAMD64SARLconst(v *Value) bool { break } x := v_0 - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (SARLconst [c] (MOVQconst [d])) @@ -25817,9 +25404,7 @@ func rewriteValueAMD64_OpAMD64SARQconst(v *Value) bool { break } x := v_0 - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (SARQconst [c] (MOVQconst [d])) @@ -25876,9 +25461,7 @@ func rewriteValueAMD64_OpAMD64SARWconst(v *Value) bool { break } x := v_0 - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (SARWconst [c] (MOVQconst [d])) @@ -30170,9 +29753,7 @@ func rewriteValueAMD64_OpAMD64SHLLconst(v *Value) bool { break } x := v_0 - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (SHLLconst [d] (MOVLconst [c])) @@ -30406,9 +29987,7 @@ func rewriteValueAMD64_OpAMD64SHLQconst(v *Value) bool { break } x := v_0 - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (SHLQconst [d] (MOVQconst [c])) @@ -30515,9 +30094,7 @@ func rewriteValueAMD64_OpAMD64SHRBconst(v *Value) bool { break } x := v_0 - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } return false @@ -30739,9 +30316,7 @@ func rewriteValueAMD64_OpAMD64SHRLconst(v *Value) bool { break } x := v_0 - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } return false @@ -30963,9 +30538,7 @@ func rewriteValueAMD64_OpAMD64SHRQconst(v *Value) bool { break } x := v_0 - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } return false @@ -31048,9 +30621,7 @@ func rewriteValueAMD64_OpAMD64SHRWconst(v *Value) bool { break } x := v_0 - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } return false @@ -31133,9 +30704,7 @@ func rewriteValueAMD64_OpAMD64SUBLconst(v *Value) bool { if !(int32(c) == 0) { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (SUBLconst [c] x) @@ -31380,9 +30949,7 @@ func rewriteValueAMD64_OpAMD64SUBQconst(v *Value) bool { break } x := v_0 - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (SUBQconst [c] x) @@ -31797,8 +31364,7 @@ func rewriteValueAMD64_OpAMD64TESTB(v *Value) bool { } b = l.Block v0 := b.NewValue0(l.Pos, OpAMD64CMPBconstload, types.TypeFlags) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v0.AuxInt = makeValAndOff(0, off) v0.Aux = sym v0.AddArg2(ptr, mem) @@ -31866,8 +31432,7 @@ func rewriteValueAMD64_OpAMD64TESTL(v *Value) bool { } b = l.Block v0 := b.NewValue0(l.Pos, OpAMD64CMPLconstload, types.TypeFlags) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v0.AuxInt = makeValAndOff(0, off) v0.Aux = sym v0.AddArg2(ptr, mem) @@ -31949,8 +31514,7 @@ func rewriteValueAMD64_OpAMD64TESTQ(v *Value) bool { } b = l.Block v0 := b.NewValue0(l.Pos, OpAMD64CMPQconstload, types.TypeFlags) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v0.AuxInt = makeValAndOff(0, off) v0.Aux = sym v0.AddArg2(ptr, mem) @@ -32028,8 +31592,7 @@ func rewriteValueAMD64_OpAMD64TESTW(v *Value) bool { } b = l.Block v0 := b.NewValue0(l.Pos, OpAMD64CMPWconstload, types.TypeFlags) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v0.AuxInt = makeValAndOff(0, off) v0.Aux = sym v0.AddArg2(ptr, mem) @@ -32549,9 +32112,7 @@ func rewriteValueAMD64_OpAMD64XORLconst(v *Value) bool { if !(int32(c) == 0) { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (XORLconst [c] (MOVLconst [d])) @@ -32916,9 +32477,7 @@ func rewriteValueAMD64_OpAMD64XORQconst(v *Value) bool { break } x := v_0 - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (XORQconst [c] (MOVQconst [d])) @@ -35955,9 +35514,7 @@ func rewriteValueAMD64_OpMove(v *Value) bool { break } mem := v_2 - v.reset(OpCopy) - v.Type = mem.Type - v.AddArg(mem) + v.copyOf(mem) return true } // match: (Move [1] dst src mem) @@ -38272,9 +37829,7 @@ func rewriteValueAMD64_OpSelect1(v *Value) bool { break } x := v_0_0_0.Args[0] - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (Select1 (AddTupleFirst32 _ tuple)) @@ -38438,9 +37993,7 @@ func rewriteValueAMD64_OpZero(v *Value) bool { break } mem := v_1 - v.reset(OpCopy) - v.Type = mem.Type - v.AddArg(mem) + v.copyOf(mem) return true } // match: (Zero [1] destptr mem) diff --git a/src/cmd/compile/internal/ssa/rewriteARM.go b/src/cmd/compile/internal/ssa/rewriteARM.go index 5be3e34dcb..bf1cf2d183 100644 --- a/src/cmd/compile/internal/ssa/rewriteARM.go +++ b/src/cmd/compile/internal/ssa/rewriteARM.go @@ -1938,9 +1938,7 @@ func rewriteValueARM_OpARMADDconst(v *Value) bool { break } x := v_0 - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (ADDconst [c] x) @@ -2440,9 +2438,7 @@ func rewriteValueARM_OpARMAND(v *Value) bool { if x != v_1 { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (AND x (MVN y)) @@ -2534,9 +2530,7 @@ func rewriteValueARM_OpARMANDconst(v *Value) bool { if !(int32(c) == -1) { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (ANDconst [c] x) @@ -2644,9 +2638,7 @@ func rewriteValueARM_OpARMANDshiftLL(v *Value) bool { if x != y.Args[0] || !(c == d) { break } - v.reset(OpCopy) - v.Type = y.Type - v.AddArg(y) + v.copyOf(y) return true } return false @@ -2737,9 +2729,7 @@ func rewriteValueARM_OpARMANDshiftRA(v *Value) bool { if x != y.Args[0] || !(c == d) { break } - v.reset(OpCopy) - v.Type = y.Type - v.AddArg(y) + v.copyOf(y) return true } return false @@ -2830,9 +2820,7 @@ func rewriteValueARM_OpARMANDshiftRL(v *Value) bool { if x != y.Args[0] || !(c == d) { break } - v.reset(OpCopy) - v.Type = y.Type - v.AddArg(y) + v.copyOf(y) return true } return false @@ -3025,9 +3013,7 @@ func rewriteValueARM_OpARMBICconst(v *Value) bool { break } x := v_0 - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (BICconst [c] _) @@ -3723,9 +3709,7 @@ func rewriteValueARM_OpARMCMOVWHSconst(v *Value) bool { if v_1.Op != OpARMFlagLT_ULT { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (CMOVWHSconst _ (FlagLT_UGT) [c]) @@ -3746,9 +3730,7 @@ func rewriteValueARM_OpARMCMOVWHSconst(v *Value) bool { if v_1.Op != OpARMFlagGT_ULT { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (CMOVWHSconst _ (FlagGT_UGT) [c]) @@ -3810,9 +3792,7 @@ func rewriteValueARM_OpARMCMOVWLSconst(v *Value) bool { if v_1.Op != OpARMFlagLT_UGT { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (CMOVWLSconst _ (FlagGT_ULT) [c]) @@ -3833,9 +3813,7 @@ func rewriteValueARM_OpARMCMOVWLSconst(v *Value) bool { if v_1.Op != OpARMFlagGT_UGT { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (CMOVWLSconst x (InvertFlags flags) [c]) @@ -5779,9 +5757,7 @@ func rewriteValueARM_OpARMMOVDload(v *Value) bool { if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } return false @@ -5928,9 +5904,7 @@ func rewriteValueARM_OpARMMOVFload(v *Value) bool { if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } return false @@ -6747,9 +6721,7 @@ func rewriteValueARM_OpARMMOVWload(v *Value) bool { if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (MOVWload [0] {sym} (ADD ptr idx) mem) @@ -6879,9 +6851,7 @@ func rewriteValueARM_OpARMMOVWloadidx(v *Value) bool { if !(isSamePtr(ptr, ptr2)) { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (MOVWloadidx ptr (MOVWconst [c]) mem) @@ -7028,9 +6998,7 @@ func rewriteValueARM_OpARMMOVWloadshiftLL(v *Value) bool { if !(c == d && isSamePtr(ptr, ptr2)) { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (MOVWloadshiftLL ptr (MOVWconst [c]) [d] mem) @@ -7074,9 +7042,7 @@ func rewriteValueARM_OpARMMOVWloadshiftRA(v *Value) bool { if !(c == d && isSamePtr(ptr, ptr2)) { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (MOVWloadshiftRA ptr (MOVWconst [c]) [d] mem) @@ -7120,9 +7086,7 @@ func rewriteValueARM_OpARMMOVWloadshiftRL(v *Value) bool { if !(c == d && isSamePtr(ptr, ptr2)) { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (MOVWloadshiftRL ptr (MOVWconst [c]) [d] mem) @@ -7575,9 +7539,7 @@ func rewriteValueARM_OpARMMUL(v *Value) bool { if v_1.Op != OpARMMOVWconst || v_1.AuxInt != 1 { continue } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } break @@ -7783,9 +7745,7 @@ func rewriteValueARM_OpARMMULA(v *Value) bool { break } a := v_2 - v.reset(OpCopy) - v.Type = a.Type - v.AddArg(a) + v.copyOf(a) return true } // match: (MULA x (MOVWconst [1]) a) @@ -7976,9 +7936,7 @@ func rewriteValueARM_OpARMMULA(v *Value) bool { break } a := v_2 - v.reset(OpCopy) - v.Type = a.Type - v.AddArg(a) + v.copyOf(a) return true } // match: (MULA (MOVWconst [1]) x a) @@ -8241,9 +8199,7 @@ func rewriteValueARM_OpARMMULS(v *Value) bool { break } a := v_2 - v.reset(OpCopy) - v.Type = a.Type - v.AddArg(a) + v.copyOf(a) return true } // match: (MULS x (MOVWconst [1]) a) @@ -8434,9 +8390,7 @@ func rewriteValueARM_OpARMMULS(v *Value) bool { break } a := v_2 - v.reset(OpCopy) - v.Type = a.Type - v.AddArg(a) + v.copyOf(a) return true } // match: (MULS (MOVWconst [1]) x a) @@ -9084,9 +9038,7 @@ func rewriteValueARM_OpARMOR(v *Value) bool { if x != v_1 { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } return false @@ -9100,9 +9052,7 @@ func rewriteValueARM_OpARMORconst(v *Value) bool { break } x := v_0 - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (ORconst [c] _) @@ -9244,9 +9194,7 @@ func rewriteValueARM_OpARMORshiftLL(v *Value) bool { if x != y.Args[0] || !(c == d) { break } - v.reset(OpCopy) - v.Type = y.Type - v.AddArg(y) + v.copyOf(y) return true } return false @@ -9337,9 +9285,7 @@ func rewriteValueARM_OpARMORshiftRA(v *Value) bool { if x != y.Args[0] || !(c == d) { break } - v.reset(OpCopy) - v.Type = y.Type - v.AddArg(y) + v.copyOf(y) return true } return false @@ -9446,9 +9392,7 @@ func rewriteValueARM_OpARMORshiftRL(v *Value) bool { if x != y.Args[0] || !(c == d) { break } - v.reset(OpCopy) - v.Type = y.Type - v.AddArg(y) + v.copyOf(y) return true } return false @@ -11952,9 +11896,7 @@ func rewriteValueARM_OpARMSUBconst(v *Value) bool { break } x := v_0 - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (SUBconst [c] x) @@ -13253,9 +13195,7 @@ func rewriteValueARM_OpARMXORconst(v *Value) bool { break } x := v_0 - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (XORconst [c] (MOVWconst [d])) @@ -15168,9 +15108,7 @@ func rewriteValueARM_OpMove(v *Value) bool { break } mem := v_2 - v.reset(OpCopy) - v.Type = mem.Type - v.AddArg(mem) + v.copyOf(mem) return true } // match: (Move [1] dst src mem) @@ -16402,9 +16340,7 @@ func rewriteValueARM_OpSelect0(v *Value) bool { if v_0_1.Op != OpARMMOVWconst || v_0_1.AuxInt != 1 { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (Select0 (CALLudiv x (MOVWconst [c]))) @@ -16637,9 +16573,7 @@ func rewriteValueARM_OpZero(v *Value) bool { break } mem := v_1 - v.reset(OpCopy) - v.Type = mem.Type - v.AddArg(mem) + v.copyOf(mem) return true } // match: (Zero [1] ptr mem) diff --git a/src/cmd/compile/internal/ssa/rewriteARM64.go b/src/cmd/compile/internal/ssa/rewriteARM64.go index dd3a8b922b..a6b13497ac 100644 --- a/src/cmd/compile/internal/ssa/rewriteARM64.go +++ b/src/cmd/compile/internal/ssa/rewriteARM64.go @@ -1613,9 +1613,7 @@ func rewriteValueARM64_OpARM64ADDconst(v *Value) bool { break } x := v_0 - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (ADDconst [c] (MOVDconst [d])) @@ -1915,9 +1913,7 @@ func rewriteValueARM64_OpARM64AND(v *Value) bool { if x != v_1 { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (AND x (MVN y)) @@ -2022,9 +2018,7 @@ func rewriteValueARM64_OpARM64ANDconst(v *Value) bool { break } x := v_0 - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (ANDconst [c] (MOVDconst [d])) @@ -2179,9 +2173,7 @@ func rewriteValueARM64_OpARM64ANDshiftLL(v *Value) bool { if x != y.Args[0] || !(c == d) { break } - v.reset(OpCopy) - v.Type = y.Type - v.AddArg(y) + v.copyOf(y) return true } return false @@ -2235,9 +2227,7 @@ func rewriteValueARM64_OpARM64ANDshiftRA(v *Value) bool { if x != y.Args[0] || !(c == d) { break } - v.reset(OpCopy) - v.Type = y.Type - v.AddArg(y) + v.copyOf(y) return true } return false @@ -2291,9 +2281,7 @@ func rewriteValueARM64_OpARM64ANDshiftRL(v *Value) bool { if x != y.Args[0] || !(c == d) { break } - v.reset(OpCopy) - v.Type = y.Type - v.AddArg(y) + v.copyOf(y) return true } return false @@ -3504,9 +3492,7 @@ func rewriteValueARM64_OpARM64CSEL(v *Value) bool { if !(ccARM64Eval(cc, flag) > 0) { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (CSEL {cc} _ y flag) @@ -3519,9 +3505,7 @@ func rewriteValueARM64_OpARM64CSEL(v *Value) bool { if !(ccARM64Eval(cc, flag) < 0) { break } - v.reset(OpCopy) - v.Type = y.Type - v.AddArg(y) + v.copyOf(y) return true } // match: (CSEL {cc} x y (CMPWconst [0] boolval)) @@ -3591,9 +3575,7 @@ func rewriteValueARM64_OpARM64CSEL0(v *Value) bool { if !(ccARM64Eval(cc, flag) > 0) { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (CSEL0 {cc} _ flag) @@ -4094,8 +4076,7 @@ func rewriteValueARM64_OpARM64FMOVDfpgp(v *Value) bool { sym := v_0.Aux b = b.Func.Entry v0 := b.NewValue0(v.Pos, OpArg, t) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v0.AuxInt = off v0.Aux = sym return true @@ -4116,8 +4097,7 @@ func rewriteValueARM64_OpARM64FMOVDgpfp(v *Value) bool { sym := v_0.Aux b = b.Func.Entry v0 := b.NewValue0(v.Pos, OpArg, t) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v0.AuxInt = off v0.Aux = sym return true @@ -5474,9 +5454,7 @@ func rewriteValueARM64_OpARM64MADD(v *Value) bool { if v_2.Op != OpARM64MOVDconst || v_2.AuxInt != 0 { break } - v.reset(OpCopy) - v.Type = a.Type - v.AddArg(a) + v.copyOf(a) return true } // match: (MADD a x (MOVDconst [1])) @@ -5652,9 +5630,7 @@ func rewriteValueARM64_OpARM64MADD(v *Value) bool { if v_1.Op != OpARM64MOVDconst || v_1.AuxInt != 0 { break } - v.reset(OpCopy) - v.Type = a.Type - v.AddArg(a) + v.copyOf(a) return true } // match: (MADD a (MOVDconst [1]) x) @@ -5880,9 +5856,7 @@ func rewriteValueARM64_OpARM64MADDW(v *Value) bool { if !(int32(c) == 0) { break } - v.reset(OpCopy) - v.Type = a.Type - v.AddArg(a) + v.copyOf(a) return true } // match: (MADDW a x (MOVDconst [c])) @@ -6073,9 +6047,7 @@ func rewriteValueARM64_OpARM64MADDW(v *Value) bool { if !(int32(c) == 0) { break } - v.reset(OpCopy) - v.Type = a.Type - v.AddArg(a) + v.copyOf(a) return true } // match: (MADDW a (MOVDconst [c]) x) @@ -6284,9 +6256,7 @@ func rewriteValueARM64_OpARM64MNEG(v *Value) bool { if v_1.Op != OpARM64MOVDconst || v_1.AuxInt != -1 { continue } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } break @@ -6521,9 +6491,7 @@ func rewriteValueARM64_OpARM64MNEGW(v *Value) bool { if !(int32(c) == -1) { continue } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } break @@ -13868,9 +13836,7 @@ func rewriteValueARM64_OpARM64MSUB(v *Value) bool { if v_2.Op != OpARM64MOVDconst || v_2.AuxInt != 0 { break } - v.reset(OpCopy) - v.Type = a.Type - v.AddArg(a) + v.copyOf(a) return true } // match: (MSUB a x (MOVDconst [1])) @@ -14046,9 +14012,7 @@ func rewriteValueARM64_OpARM64MSUB(v *Value) bool { if v_1.Op != OpARM64MOVDconst || v_1.AuxInt != 0 { break } - v.reset(OpCopy) - v.Type = a.Type - v.AddArg(a) + v.copyOf(a) return true } // match: (MSUB a (MOVDconst [1]) x) @@ -14274,9 +14238,7 @@ func rewriteValueARM64_OpARM64MSUBW(v *Value) bool { if !(int32(c) == 0) { break } - v.reset(OpCopy) - v.Type = a.Type - v.AddArg(a) + v.copyOf(a) return true } // match: (MSUBW a x (MOVDconst [c])) @@ -14467,9 +14429,7 @@ func rewriteValueARM64_OpARM64MSUBW(v *Value) bool { if !(int32(c) == 0) { break } - v.reset(OpCopy) - v.Type = a.Type - v.AddArg(a) + v.copyOf(a) return true } // match: (MSUBW a (MOVDconst [c]) x) @@ -14720,9 +14680,7 @@ func rewriteValueARM64_OpARM64MUL(v *Value) bool { if v_1.Op != OpARM64MOVDconst || v_1.AuxInt != 1 { continue } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } break @@ -14972,9 +14930,7 @@ func rewriteValueARM64_OpARM64MULW(v *Value) bool { if !(int32(c) == 1) { continue } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } break @@ -15506,9 +15462,7 @@ func rewriteValueARM64_OpARM64OR(v *Value) bool { if x != v_1 { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (OR x (MVN y)) @@ -16005,8 +15959,7 @@ func rewriteValueARM64_OpARM64OR(v *Value) bool { } b = mergePoint(b, x0, x1, x2, x3) v0 := b.NewValue0(x3.Pos, OpARM64MOVWUload, t) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v0.Aux = s v1 := b.NewValue0(x3.Pos, OpOffPtr, p.Type) v1.AuxInt = i0 @@ -16097,8 +16050,7 @@ func rewriteValueARM64_OpARM64OR(v *Value) bool { } b = mergePoint(b, x0, x1, x2, x3) v0 := b.NewValue0(x2.Pos, OpARM64MOVWUloadidx, t) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v0.AddArg3(ptr0, idx0, mem) return true } @@ -16186,8 +16138,7 @@ func rewriteValueARM64_OpARM64OR(v *Value) bool { } b = mergePoint(b, x0, x1, x2, x3) v0 := b.NewValue0(v.Pos, OpARM64MOVWUloadidx, t) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v0.AddArg3(ptr, idx, mem) return true } @@ -16359,8 +16310,7 @@ func rewriteValueARM64_OpARM64OR(v *Value) bool { } b = mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) v0 := b.NewValue0(x7.Pos, OpARM64MOVDload, t) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v0.Aux = s v1 := b.NewValue0(x7.Pos, OpOffPtr, p.Type) v1.AuxInt = i0 @@ -16519,8 +16469,7 @@ func rewriteValueARM64_OpARM64OR(v *Value) bool { } b = mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) v0 := b.NewValue0(x6.Pos, OpARM64MOVDloadidx, t) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v0.AddArg3(ptr0, idx0, mem) return true } @@ -16692,8 +16641,7 @@ func rewriteValueARM64_OpARM64OR(v *Value) bool { } b = mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) v0 := b.NewValue0(v.Pos, OpARM64MOVDloadidx, t) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v0.AddArg3(ptr, idx, mem) return true } @@ -16781,8 +16729,7 @@ func rewriteValueARM64_OpARM64OR(v *Value) bool { } b = mergePoint(b, x0, x1, x2, x3) v0 := b.NewValue0(x3.Pos, OpARM64REVW, t) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v1 := b.NewValue0(x3.Pos, OpARM64MOVWUload, t) v1.Aux = s v2 := b.NewValue0(x3.Pos, OpOffPtr, p.Type) @@ -16875,8 +16822,7 @@ func rewriteValueARM64_OpARM64OR(v *Value) bool { } b = mergePoint(b, x0, x1, x2, x3) v0 := b.NewValue0(x3.Pos, OpARM64REVW, t) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v1 := b.NewValue0(x3.Pos, OpARM64MOVWUloadidx, t) v1.AddArg3(ptr0, idx0, mem) v0.AddArg(v1) @@ -16966,8 +16912,7 @@ func rewriteValueARM64_OpARM64OR(v *Value) bool { } b = mergePoint(b, x0, x1, x2, x3) v0 := b.NewValue0(v.Pos, OpARM64REVW, t) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v1 := b.NewValue0(v.Pos, OpARM64MOVWUloadidx, t) v1.AddArg3(ptr, idx, mem) v0.AddArg(v1) @@ -17141,8 +17086,7 @@ func rewriteValueARM64_OpARM64OR(v *Value) bool { } b = mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) v0 := b.NewValue0(x7.Pos, OpARM64REV, t) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v1 := b.NewValue0(x7.Pos, OpARM64MOVDload, t) v1.Aux = s v2 := b.NewValue0(x7.Pos, OpOffPtr, p.Type) @@ -17303,8 +17247,7 @@ func rewriteValueARM64_OpARM64OR(v *Value) bool { } b = mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) v0 := b.NewValue0(x7.Pos, OpARM64REV, t) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v1 := b.NewValue0(x7.Pos, OpARM64MOVDloadidx, t) v1.AddArg3(ptr0, idx0, mem) v0.AddArg(v1) @@ -17478,8 +17421,7 @@ func rewriteValueARM64_OpARM64OR(v *Value) bool { } b = mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) v0 := b.NewValue0(v.Pos, OpARM64REV, t) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v1 := b.NewValue0(v.Pos, OpARM64MOVDloadidx, t) v1.AddArg3(ptr, idx, mem) v0.AddArg(v1) @@ -17692,9 +17634,7 @@ func rewriteValueARM64_OpARM64ORconst(v *Value) bool { break } x := v_0 - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (ORconst [-1] _) @@ -17803,9 +17743,7 @@ func rewriteValueARM64_OpARM64ORshiftLL(v *Value) bool { if x != y.Args[0] || !(c == d) { break } - v.reset(OpCopy) - v.Type = y.Type - v.AddArg(y) + v.copyOf(y) return true } // match: ( ORshiftLL [c] (SRLconst x [64-c]) x) @@ -17951,8 +17889,7 @@ func rewriteValueARM64_OpARM64ORshiftLL(v *Value) bool { } b = mergePoint(b, x0, x1) v0 := b.NewValue0(x1.Pos, OpARM64MOVHUload, t) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v0.Aux = s v1 := b.NewValue0(x1.Pos, OpOffPtr, p.Type) v1.AuxInt = i0 @@ -18004,8 +17941,7 @@ func rewriteValueARM64_OpARM64ORshiftLL(v *Value) bool { } b = mergePoint(b, x0, x1) v0 := b.NewValue0(x1.Pos, OpARM64MOVHUloadidx, t) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v0.AddArg3(ptr0, idx0, mem) return true } @@ -18048,8 +17984,7 @@ func rewriteValueARM64_OpARM64ORshiftLL(v *Value) bool { } b = mergePoint(b, x0, x1) v0 := b.NewValue0(v.Pos, OpARM64MOVHUloadidx, t) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v0.AddArg3(ptr, idx, mem) return true } @@ -18108,8 +18043,7 @@ func rewriteValueARM64_OpARM64ORshiftLL(v *Value) bool { } b = mergePoint(b, x0, x1, x2) v0 := b.NewValue0(x2.Pos, OpARM64MOVWUload, t) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v0.Aux = s v1 := b.NewValue0(x2.Pos, OpOffPtr, p.Type) v1.AuxInt = i0 @@ -18175,8 +18109,7 @@ func rewriteValueARM64_OpARM64ORshiftLL(v *Value) bool { } b = mergePoint(b, x0, x1, x2) v0 := b.NewValue0(x2.Pos, OpARM64MOVWUloadidx, t) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v0.AddArg3(ptr0, idx0, mem) return true } @@ -18236,8 +18169,7 @@ func rewriteValueARM64_OpARM64ORshiftLL(v *Value) bool { } b = mergePoint(b, x0, x1, x2) v0 := b.NewValue0(v.Pos, OpARM64MOVWUloadidx, t) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v0.AddArg3(ptr, idx, mem) return true } @@ -18295,8 +18227,7 @@ func rewriteValueARM64_OpARM64ORshiftLL(v *Value) bool { } b = mergePoint(b, x0, x1, x2) v0 := b.NewValue0(x2.Pos, OpARM64MOVWUloadidx, t) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v1 := b.NewValue0(x2.Pos, OpARM64SLLconst, idx0.Type) v1.AuxInt = 1 v1.AddArg(idx0) @@ -18400,8 +18331,7 @@ func rewriteValueARM64_OpARM64ORshiftLL(v *Value) bool { } b = mergePoint(b, x0, x1, x2, x3, x4) v0 := b.NewValue0(x4.Pos, OpARM64MOVDload, t) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v0.Aux = s v1 := b.NewValue0(x4.Pos, OpOffPtr, p.Type) v1.AuxInt = i0 @@ -18501,8 +18431,7 @@ func rewriteValueARM64_OpARM64ORshiftLL(v *Value) bool { } b = mergePoint(b, x0, x1, x2, x3, x4) v0 := b.NewValue0(x4.Pos, OpARM64MOVDloadidx, t) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v0.AddArg3(ptr0, idx0, mem) return true } @@ -18596,8 +18525,7 @@ func rewriteValueARM64_OpARM64ORshiftLL(v *Value) bool { } b = mergePoint(b, x0, x1, x2, x3, x4) v0 := b.NewValue0(x4.Pos, OpARM64MOVDloadidx, t) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v1 := b.NewValue0(x4.Pos, OpARM64SLLconst, idx0.Type) v1.AuxInt = 2 v1.AddArg(idx0) @@ -18700,8 +18628,7 @@ func rewriteValueARM64_OpARM64ORshiftLL(v *Value) bool { } b = mergePoint(b, x0, x1, x2, x3, x4) v0 := b.NewValue0(v.Pos, OpARM64MOVDloadidx, t) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v0.AddArg3(ptr, idx, mem) return true } @@ -18743,8 +18670,7 @@ func rewriteValueARM64_OpARM64ORshiftLL(v *Value) bool { } b = mergePoint(b, x0, x1) v0 := b.NewValue0(x1.Pos, OpARM64REV16W, t) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v1 := b.NewValue0(x1.Pos, OpARM64MOVHUload, t) v1.AuxInt = i0 v1.Aux = s @@ -18796,8 +18722,7 @@ func rewriteValueARM64_OpARM64ORshiftLL(v *Value) bool { } b = mergePoint(b, x0, x1) v0 := b.NewValue0(x0.Pos, OpARM64REV16W, t) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v1 := b.NewValue0(x0.Pos, OpARM64MOVHUloadidx, t) v1.AddArg3(ptr0, idx0, mem) v0.AddArg(v1) @@ -18842,8 +18767,7 @@ func rewriteValueARM64_OpARM64ORshiftLL(v *Value) bool { } b = mergePoint(b, x0, x1) v0 := b.NewValue0(v.Pos, OpARM64REV16W, t) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v1 := b.NewValue0(v.Pos, OpARM64MOVHUloadidx, t) v1.AddArg3(ptr, idx, mem) v0.AddArg(v1) @@ -18908,8 +18832,7 @@ func rewriteValueARM64_OpARM64ORshiftLL(v *Value) bool { } b = mergePoint(b, x0, x1, x2) v0 := b.NewValue0(x2.Pos, OpARM64REVW, t) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v1 := b.NewValue0(x2.Pos, OpARM64MOVWUload, t) v1.Aux = s v2 := b.NewValue0(x2.Pos, OpOffPtr, p.Type) @@ -18981,8 +18904,7 @@ func rewriteValueARM64_OpARM64ORshiftLL(v *Value) bool { } b = mergePoint(b, x0, x1, x2) v0 := b.NewValue0(x1.Pos, OpARM64REVW, t) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v1 := b.NewValue0(x1.Pos, OpARM64MOVWUloadidx, t) v1.AddArg3(ptr0, idx0, mem) v0.AddArg(v1) @@ -19048,8 +18970,7 @@ func rewriteValueARM64_OpARM64ORshiftLL(v *Value) bool { } b = mergePoint(b, x0, x1, x2) v0 := b.NewValue0(v.Pos, OpARM64REVW, t) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v1 := b.NewValue0(v.Pos, OpARM64MOVWUloadidx, t) v1.AddArg3(ptr, idx, mem) v0.AddArg(v1) @@ -19156,8 +19077,7 @@ func rewriteValueARM64_OpARM64ORshiftLL(v *Value) bool { } b = mergePoint(b, x0, x1, x2, x3, x4) v0 := b.NewValue0(x4.Pos, OpARM64REV, t) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v1 := b.NewValue0(x4.Pos, OpARM64MOVDload, t) v1.Aux = s v2 := b.NewValue0(x4.Pos, OpOffPtr, p.Type) @@ -19263,8 +19183,7 @@ func rewriteValueARM64_OpARM64ORshiftLL(v *Value) bool { } b = mergePoint(b, x0, x1, x2, x3, x4) v0 := b.NewValue0(x3.Pos, OpARM64REV, t) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v1 := b.NewValue0(x3.Pos, OpARM64MOVDloadidx, t) v1.AddArg3(ptr0, idx0, mem) v0.AddArg(v1) @@ -19372,8 +19291,7 @@ func rewriteValueARM64_OpARM64ORshiftLL(v *Value) bool { } b = mergePoint(b, x0, x1, x2, x3, x4) v0 := b.NewValue0(v.Pos, OpARM64REV, t) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v1 := b.NewValue0(v.Pos, OpARM64MOVDloadidx, t) v1.AddArg3(ptr, idx, mem) v0.AddArg(v1) @@ -19430,9 +19348,7 @@ func rewriteValueARM64_OpARM64ORshiftRA(v *Value) bool { if x != y.Args[0] || !(c == d) { break } - v.reset(OpCopy) - v.Type = y.Type - v.AddArg(y) + v.copyOf(y) return true } return false @@ -19486,9 +19402,7 @@ func rewriteValueARM64_OpARM64ORshiftRL(v *Value) bool { if x != y.Args[0] || !(c == d) { break } - v.reset(OpCopy) - v.Type = y.Type - v.AddArg(y) + v.copyOf(y) return true } // match: ( ORshiftRL [c] (SLLconst x [64-c]) x) @@ -20455,9 +20369,7 @@ func rewriteValueARM64_OpARM64SUBconst(v *Value) bool { break } x := v_0 - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (SUBconst [c] (MOVDconst [d])) @@ -21035,9 +20947,7 @@ func rewriteValueARM64_OpARM64UDIV(v *Value) bool { if v_1.Op != OpARM64MOVDconst || v_1.AuxInt != 1 { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (UDIV x (MOVDconst [c])) @@ -21089,9 +20999,7 @@ func rewriteValueARM64_OpARM64UDIVW(v *Value) bool { if !(uint32(c) == 1) { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (UDIVW x (MOVDconst [c])) @@ -21666,9 +21574,7 @@ func rewriteValueARM64_OpARM64XORconst(v *Value) bool { break } x := v_0 - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (XORconst [-1] x) @@ -23648,9 +23554,7 @@ func rewriteValueARM64_OpMove(v *Value) bool { break } mem := v_2 - v.reset(OpCopy) - v.Type = mem.Type - v.AddArg(mem) + v.copyOf(mem) return true } // match: (Move [1] dst src mem) @@ -25446,9 +25350,7 @@ func rewriteValueARM64_OpZero(v *Value) bool { break } mem := v_1 - v.reset(OpCopy) - v.Type = mem.Type - v.AddArg(mem) + v.copyOf(mem) return true } // match: (Zero [1] ptr mem) diff --git a/src/cmd/compile/internal/ssa/rewriteMIPS.go b/src/cmd/compile/internal/ssa/rewriteMIPS.go index 58dc71bd04..e036885a16 100644 --- a/src/cmd/compile/internal/ssa/rewriteMIPS.go +++ b/src/cmd/compile/internal/ssa/rewriteMIPS.go @@ -2094,9 +2094,7 @@ func rewriteValueMIPS_OpMIPSADDconst(v *Value) bool { break } x := v_0 - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (ADDconst [c] (MOVWconst [d])) @@ -2168,9 +2166,7 @@ func rewriteValueMIPS_OpMIPSAND(v *Value) bool { if x != v_1 { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (AND (SGTUconst [1] x) (SGTUconst [1] y)) @@ -2215,9 +2211,7 @@ func rewriteValueMIPS_OpMIPSANDconst(v *Value) bool { break } x := v_0 - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (ANDconst [c] (MOVWconst [d])) @@ -2259,9 +2253,7 @@ func rewriteValueMIPS_OpMIPSCMOVZ(v *Value) bool { if v_2.Op != OpMIPSMOVWconst || v_2.AuxInt != 0 { break } - v.reset(OpCopy) - v.Type = f.Type - v.AddArg(f) + v.copyOf(f) return true } // match: (CMOVZ a _ (MOVWconst [c])) @@ -2276,9 +2268,7 @@ func rewriteValueMIPS_OpMIPSCMOVZ(v *Value) bool { if !(c != 0) { break } - v.reset(OpCopy) - v.Type = a.Type - v.AddArg(a) + v.copyOf(a) return true } // match: (CMOVZ a (MOVWconst [0]) c) @@ -2320,9 +2310,7 @@ func rewriteValueMIPS_OpMIPSCMOVZzero(v *Value) bool { if !(c != 0) { break } - v.reset(OpCopy) - v.Type = a.Type - v.AddArg(a) + v.copyOf(a) return true } return false @@ -2484,8 +2472,7 @@ func rewriteValueMIPS_OpMIPSMOVBUreg(v *Value) bool { } b = x.Block v0 := b.NewValue0(x.Pos, OpMIPSMOVBUload, t) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v0.AuxInt = off v0.Aux = sym v0.AddArg2(ptr, mem) @@ -2632,8 +2619,7 @@ func rewriteValueMIPS_OpMIPSMOVBreg(v *Value) bool { } b = x.Block v0 := b.NewValue0(x.Pos, OpMIPSMOVBload, t) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v0.AuxInt = off v0.Aux = sym v0.AddArg2(ptr, mem) @@ -2936,9 +2922,7 @@ func rewriteValueMIPS_OpMIPSMOVDload(v *Value) bool { if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } return false @@ -3060,9 +3044,7 @@ func rewriteValueMIPS_OpMIPSMOVFload(v *Value) bool { if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } return false @@ -3257,8 +3239,7 @@ func rewriteValueMIPS_OpMIPSMOVHUreg(v *Value) bool { } b = x.Block v0 := b.NewValue0(x.Pos, OpMIPSMOVHUload, t) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v0.AuxInt = off v0.Aux = sym v0.AddArg2(ptr, mem) @@ -3451,8 +3432,7 @@ func rewriteValueMIPS_OpMIPSMOVHreg(v *Value) bool { } b = x.Block v0 := b.NewValue0(x.Pos, OpMIPSMOVHload, t) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v0.AuxInt = off v0.Aux = sym v0.AddArg2(ptr, mem) @@ -3721,9 +3701,7 @@ func rewriteValueMIPS_OpMIPSMOVWload(v *Value) bool { if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } return false @@ -3913,9 +3891,7 @@ func rewriteValueMIPS_OpMIPSMUL(v *Value) bool { continue } x := v_1 - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } break @@ -4053,9 +4029,7 @@ func rewriteValueMIPS_OpMIPSOR(v *Value) bool { if x != v_1 { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (OR (SGTUzero x) (SGTUzero y)) @@ -4089,9 +4063,7 @@ func rewriteValueMIPS_OpMIPSORconst(v *Value) bool { break } x := v_0 - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (ORconst [-1] _) @@ -4712,9 +4684,7 @@ func rewriteValueMIPS_OpMIPSSUBconst(v *Value) bool { break } x := v_0 - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (SUBconst [c] (MOVWconst [d])) @@ -4800,9 +4770,7 @@ func rewriteValueMIPS_OpMIPSXORconst(v *Value) bool { break } x := v_0 - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (XORconst [-1] x) @@ -4977,9 +4945,7 @@ func rewriteValueMIPS_OpMove(v *Value) bool { break } mem := v_2 - v.reset(OpCopy) - v.Type = mem.Type - v.AddArg(mem) + v.copyOf(mem) return true } // match: (Move [1] dst src mem) @@ -6699,9 +6665,7 @@ func rewriteValueMIPS_OpSelect1(v *Value) bool { continue } x := v_0_1 - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } break @@ -6962,9 +6926,7 @@ func rewriteValueMIPS_OpZero(v *Value) bool { break } mem := v_1 - v.reset(OpCopy) - v.Type = mem.Type - v.AddArg(mem) + v.copyOf(mem) return true } // match: (Zero [1] ptr mem) diff --git a/src/cmd/compile/internal/ssa/rewriteMIPS64.go b/src/cmd/compile/internal/ssa/rewriteMIPS64.go index 346fa6f28e..4413c535d6 100644 --- a/src/cmd/compile/internal/ssa/rewriteMIPS64.go +++ b/src/cmd/compile/internal/ssa/rewriteMIPS64.go @@ -2327,9 +2327,7 @@ func rewriteValueMIPS64_OpMIPS64ADDVconst(v *Value) bool { break } x := v_0 - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (ADDVconst [c] (MOVVconst [d])) @@ -2412,9 +2410,7 @@ func rewriteValueMIPS64_OpMIPS64AND(v *Value) bool { if x != v_1 { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } return false @@ -2438,9 +2434,7 @@ func rewriteValueMIPS64_OpMIPS64ANDconst(v *Value) bool { break } x := v_0 - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (ANDconst [c] (MOVVconst [d])) @@ -4290,9 +4284,7 @@ func rewriteValueMIPS64_OpMIPS64OR(v *Value) bool { if x != v_1 { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } return false @@ -4306,9 +4298,7 @@ func rewriteValueMIPS64_OpMIPS64ORconst(v *Value) bool { break } x := v_0 - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (ORconst [-1] _) @@ -4867,9 +4857,7 @@ func rewriteValueMIPS64_OpMIPS64SUBVconst(v *Value) bool { break } x := v_0 - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (SUBVconst [c] (MOVVconst [d])) @@ -4967,9 +4955,7 @@ func rewriteValueMIPS64_OpMIPS64XORconst(v *Value) bool { break } x := v_0 - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (XORconst [-1] x) @@ -5190,9 +5176,7 @@ func rewriteValueMIPS64_OpMove(v *Value) bool { break } mem := v_2 - v.reset(OpCopy) - v.Type = mem.Type - v.AddArg(mem) + v.copyOf(mem) return true } // match: (Move [1] dst src mem) @@ -7104,9 +7088,7 @@ func rewriteValueMIPS64_OpSelect1(v *Value) bool { if v_0_1.Op != OpMIPS64MOVVconst || v_0_1.AuxInt != 1 { continue } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } break @@ -7149,9 +7131,7 @@ func rewriteValueMIPS64_OpSelect1(v *Value) bool { if v_0_1.Op != OpMIPS64MOVVconst || v_0_1.AuxInt != 1 { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (Select1 (DIVVU x (MOVVconst [c]))) @@ -7369,9 +7349,7 @@ func rewriteValueMIPS64_OpZero(v *Value) bool { break } mem := v_1 - v.reset(OpCopy) - v.Type = mem.Type - v.AddArg(mem) + v.copyOf(mem) return true } // match: (Zero [1] ptr mem) diff --git a/src/cmd/compile/internal/ssa/rewritePPC64.go b/src/cmd/compile/internal/ssa/rewritePPC64.go index c7b4f44920..279fed4edb 100644 --- a/src/cmd/compile/internal/ssa/rewritePPC64.go +++ b/src/cmd/compile/internal/ssa/rewritePPC64.go @@ -3424,9 +3424,7 @@ func rewriteValuePPC64_OpMove(v *Value) bool { break } mem := v_2 - v.reset(OpCopy) - v.Type = mem.Type - v.AddArg(mem) + v.copyOf(mem) return true } // match: (Move [1] dst src mem) @@ -3999,9 +3997,7 @@ func rewriteValuePPC64_OpPPC64ADDconst(v *Value) bool { break } x := v_0 - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (ADDconst [c] (MOVDaddr [d] {sym} x)) @@ -4094,9 +4090,7 @@ func rewriteValuePPC64_OpPPC64AND(v *Value) bool { if y.Op != OpPPC64MOVWZreg || !(c&0xFFFFFFFF == 0xFFFFFFFF) { continue } - v.reset(OpCopy) - v.Type = y.Type - v.AddArg(y) + v.copyOf(y) return true } break @@ -4164,9 +4158,7 @@ func rewriteValuePPC64_OpPPC64ANDconst(v *Value) bool { break } x := v_0 - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (ANDconst [0] _) @@ -4188,9 +4180,7 @@ func rewriteValuePPC64_OpPPC64ANDconst(v *Value) bool { if y.Op != OpPPC64MOVBZreg || !(c&0xFF == 0xFF) { break } - v.reset(OpCopy) - v.Type = y.Type - v.AddArg(y) + v.copyOf(y) return true } // match: (ANDconst [0xFF] y:(MOVBreg _)) @@ -4203,9 +4193,7 @@ func rewriteValuePPC64_OpPPC64ANDconst(v *Value) bool { if y.Op != OpPPC64MOVBreg { break } - v.reset(OpCopy) - v.Type = y.Type - v.AddArg(y) + v.copyOf(y) return true } // match: (ANDconst [c] y:(MOVHZreg _)) @@ -4217,9 +4205,7 @@ func rewriteValuePPC64_OpPPC64ANDconst(v *Value) bool { if y.Op != OpPPC64MOVHZreg || !(c&0xFFFF == 0xFFFF) { break } - v.reset(OpCopy) - v.Type = y.Type - v.AddArg(y) + v.copyOf(y) return true } // match: (ANDconst [0xFFFF] y:(MOVHreg _)) @@ -4232,9 +4218,7 @@ func rewriteValuePPC64_OpPPC64ANDconst(v *Value) bool { if y.Op != OpPPC64MOVHreg { break } - v.reset(OpCopy) - v.Type = y.Type - v.AddArg(y) + v.copyOf(y) return true } // match: (ANDconst [c] (MOVBreg x)) @@ -5464,9 +5448,7 @@ func rewriteValuePPC64_OpPPC64ISEL(v *Value) bool { if v_2.Op != OpPPC64FlagEQ { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (ISEL [2] _ y (FlagLT)) @@ -5479,9 +5461,7 @@ func rewriteValuePPC64_OpPPC64ISEL(v *Value) bool { if v_2.Op != OpPPC64FlagLT { break } - v.reset(OpCopy) - v.Type = y.Type - v.AddArg(y) + v.copyOf(y) return true } // match: (ISEL [2] _ y (FlagGT)) @@ -5494,9 +5474,7 @@ func rewriteValuePPC64_OpPPC64ISEL(v *Value) bool { if v_2.Op != OpPPC64FlagGT { break } - v.reset(OpCopy) - v.Type = y.Type - v.AddArg(y) + v.copyOf(y) return true } // match: (ISEL [6] _ y (FlagEQ)) @@ -5509,9 +5487,7 @@ func rewriteValuePPC64_OpPPC64ISEL(v *Value) bool { if v_2.Op != OpPPC64FlagEQ { break } - v.reset(OpCopy) - v.Type = y.Type - v.AddArg(y) + v.copyOf(y) return true } // match: (ISEL [6] x _ (FlagLT)) @@ -5524,9 +5500,7 @@ func rewriteValuePPC64_OpPPC64ISEL(v *Value) bool { if v_2.Op != OpPPC64FlagLT { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (ISEL [6] x _ (FlagGT)) @@ -5539,9 +5513,7 @@ func rewriteValuePPC64_OpPPC64ISEL(v *Value) bool { if v_2.Op != OpPPC64FlagGT { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (ISEL [0] _ y (FlagEQ)) @@ -5554,9 +5526,7 @@ func rewriteValuePPC64_OpPPC64ISEL(v *Value) bool { if v_2.Op != OpPPC64FlagEQ { break } - v.reset(OpCopy) - v.Type = y.Type - v.AddArg(y) + v.copyOf(y) return true } // match: (ISEL [0] _ y (FlagGT)) @@ -5569,9 +5539,7 @@ func rewriteValuePPC64_OpPPC64ISEL(v *Value) bool { if v_2.Op != OpPPC64FlagGT { break } - v.reset(OpCopy) - v.Type = y.Type - v.AddArg(y) + v.copyOf(y) return true } // match: (ISEL [0] x _ (FlagLT)) @@ -5584,9 +5552,7 @@ func rewriteValuePPC64_OpPPC64ISEL(v *Value) bool { if v_2.Op != OpPPC64FlagLT { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (ISEL [5] _ x (FlagEQ)) @@ -5599,9 +5565,7 @@ func rewriteValuePPC64_OpPPC64ISEL(v *Value) bool { if v_2.Op != OpPPC64FlagEQ { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (ISEL [5] _ x (FlagLT)) @@ -5614,9 +5578,7 @@ func rewriteValuePPC64_OpPPC64ISEL(v *Value) bool { if v_2.Op != OpPPC64FlagLT { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (ISEL [5] y _ (FlagGT)) @@ -5629,9 +5591,7 @@ func rewriteValuePPC64_OpPPC64ISEL(v *Value) bool { if v_2.Op != OpPPC64FlagGT { break } - v.reset(OpCopy) - v.Type = y.Type - v.AddArg(y) + v.copyOf(y) return true } // match: (ISEL [1] _ y (FlagEQ)) @@ -5644,9 +5604,7 @@ func rewriteValuePPC64_OpPPC64ISEL(v *Value) bool { if v_2.Op != OpPPC64FlagEQ { break } - v.reset(OpCopy) - v.Type = y.Type - v.AddArg(y) + v.copyOf(y) return true } // match: (ISEL [1] _ y (FlagLT)) @@ -5659,9 +5617,7 @@ func rewriteValuePPC64_OpPPC64ISEL(v *Value) bool { if v_2.Op != OpPPC64FlagLT { break } - v.reset(OpCopy) - v.Type = y.Type - v.AddArg(y) + v.copyOf(y) return true } // match: (ISEL [1] x _ (FlagGT)) @@ -5674,9 +5630,7 @@ func rewriteValuePPC64_OpPPC64ISEL(v *Value) bool { if v_2.Op != OpPPC64FlagGT { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (ISEL [4] x _ (FlagEQ)) @@ -5689,9 +5643,7 @@ func rewriteValuePPC64_OpPPC64ISEL(v *Value) bool { if v_2.Op != OpPPC64FlagEQ { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (ISEL [4] x _ (FlagGT)) @@ -5704,9 +5656,7 @@ func rewriteValuePPC64_OpPPC64ISEL(v *Value) bool { if v_2.Op != OpPPC64FlagGT { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (ISEL [4] _ y (FlagLT)) @@ -5719,9 +5669,7 @@ func rewriteValuePPC64_OpPPC64ISEL(v *Value) bool { if v_2.Op != OpPPC64FlagLT { break } - v.reset(OpCopy) - v.Type = y.Type - v.AddArg(y) + v.copyOf(y) return true } // match: (ISEL [n] x y (InvertFlags bool)) @@ -6173,8 +6121,7 @@ func rewriteValuePPC64_OpPPC64MFVSRD(v *Value) bool { } b = x.Block v0 := b.NewValue0(x.Pos, OpPPC64MOVDload, typ.Int64) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v0.AuxInt = off v0.Aux = sym v0.AddArg2(ptr, mem) @@ -6311,9 +6258,7 @@ func rewriteValuePPC64_OpPPC64MOVBZreg(v *Value) bool { if !(uint64(c) <= 0xFF) { break } - v.reset(OpCopy) - v.Type = y.Type - v.AddArg(y) + v.copyOf(y) return true } // match: (MOVBZreg (SRWconst [c] (MOVBZreg x))) @@ -6393,9 +6338,7 @@ func rewriteValuePPC64_OpPPC64MOVBZreg(v *Value) bool { if y.Op != OpPPC64MOVBZreg { break } - v.reset(OpCopy) - v.Type = y.Type - v.AddArg(y) + v.copyOf(y) return true } // match: (MOVBZreg (MOVBreg x)) @@ -6417,9 +6360,7 @@ func rewriteValuePPC64_OpPPC64MOVBZreg(v *Value) bool { break } _ = x.Args[1] - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (MOVBZreg x:(MOVBZloadidx _ _ _)) @@ -6430,9 +6371,7 @@ func rewriteValuePPC64_OpPPC64MOVBZreg(v *Value) bool { break } _ = x.Args[2] - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (MOVBZreg x:(Arg )) @@ -6447,9 +6386,7 @@ func rewriteValuePPC64_OpPPC64MOVBZreg(v *Value) bool { if !(is8BitInt(t) && !isSigned(t)) { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (MOVBZreg (MOVDconst [c])) @@ -6481,9 +6418,7 @@ func rewriteValuePPC64_OpPPC64MOVBreg(v *Value) bool { if !(uint64(c) <= 0x7F) { break } - v.reset(OpCopy) - v.Type = y.Type - v.AddArg(y) + v.copyOf(y) return true } // match: (MOVBreg (SRAWconst [c] (MOVBreg x))) @@ -6597,9 +6532,7 @@ func rewriteValuePPC64_OpPPC64MOVBreg(v *Value) bool { if y.Op != OpPPC64MOVBreg { break } - v.reset(OpCopy) - v.Type = y.Type - v.AddArg(y) + v.copyOf(y) return true } // match: (MOVBreg (MOVBZreg x)) @@ -6625,9 +6558,7 @@ func rewriteValuePPC64_OpPPC64MOVBreg(v *Value) bool { if !(is8BitInt(t) && isSigned(t)) { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (MOVBreg (MOVDconst [c])) @@ -8234,9 +8165,7 @@ func rewriteValuePPC64_OpPPC64MOVHZreg(v *Value) bool { if !(uint64(c) <= 0xFFFF) { break } - v.reset(OpCopy) - v.Type = y.Type - v.AddArg(y) + v.copyOf(y) return true } // match: (MOVHZreg (SRWconst [c] (MOVBZreg x))) @@ -8335,9 +8264,7 @@ func rewriteValuePPC64_OpPPC64MOVHZreg(v *Value) bool { if y.Op != OpPPC64MOVHZreg { break } - v.reset(OpCopy) - v.Type = y.Type - v.AddArg(y) + v.copyOf(y) return true } // match: (MOVHZreg y:(MOVBZreg _)) @@ -8347,9 +8274,7 @@ func rewriteValuePPC64_OpPPC64MOVHZreg(v *Value) bool { if y.Op != OpPPC64MOVBZreg { break } - v.reset(OpCopy) - v.Type = y.Type - v.AddArg(y) + v.copyOf(y) return true } // match: (MOVHZreg y:(MOVHBRload _ _)) @@ -8360,9 +8285,7 @@ func rewriteValuePPC64_OpPPC64MOVHZreg(v *Value) bool { break } _ = y.Args[1] - v.reset(OpCopy) - v.Type = y.Type - v.AddArg(y) + v.copyOf(y) return true } // match: (MOVHZreg y:(MOVHreg x)) @@ -8385,9 +8308,7 @@ func rewriteValuePPC64_OpPPC64MOVHZreg(v *Value) bool { break } _ = x.Args[1] - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (MOVHZreg x:(MOVBZloadidx _ _ _)) @@ -8398,9 +8319,7 @@ func rewriteValuePPC64_OpPPC64MOVHZreg(v *Value) bool { break } _ = x.Args[2] - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (MOVHZreg x:(MOVHZload _ _)) @@ -8411,9 +8330,7 @@ func rewriteValuePPC64_OpPPC64MOVHZreg(v *Value) bool { break } _ = x.Args[1] - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (MOVHZreg x:(MOVHZloadidx _ _ _)) @@ -8424,9 +8341,7 @@ func rewriteValuePPC64_OpPPC64MOVHZreg(v *Value) bool { break } _ = x.Args[2] - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (MOVHZreg x:(Arg )) @@ -8441,9 +8356,7 @@ func rewriteValuePPC64_OpPPC64MOVHZreg(v *Value) bool { if !((is8BitInt(t) || is16BitInt(t)) && !isSigned(t)) { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (MOVHZreg (MOVDconst [c])) @@ -8588,9 +8501,7 @@ func rewriteValuePPC64_OpPPC64MOVHreg(v *Value) bool { if !(uint64(c) <= 0x7FFF) { break } - v.reset(OpCopy) - v.Type = y.Type - v.AddArg(y) + v.copyOf(y) return true } // match: (MOVHreg (SRAWconst [c] (MOVBreg x))) @@ -8723,9 +8634,7 @@ func rewriteValuePPC64_OpPPC64MOVHreg(v *Value) bool { if y.Op != OpPPC64MOVHreg { break } - v.reset(OpCopy) - v.Type = y.Type - v.AddArg(y) + v.copyOf(y) return true } // match: (MOVHreg y:(MOVBreg _)) @@ -8735,9 +8644,7 @@ func rewriteValuePPC64_OpPPC64MOVHreg(v *Value) bool { if y.Op != OpPPC64MOVBreg { break } - v.reset(OpCopy) - v.Type = y.Type - v.AddArg(y) + v.copyOf(y) return true } // match: (MOVHreg y:(MOVHZreg x)) @@ -8760,9 +8667,7 @@ func rewriteValuePPC64_OpPPC64MOVHreg(v *Value) bool { break } _ = x.Args[1] - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (MOVHreg x:(MOVHloadidx _ _ _)) @@ -8773,9 +8678,7 @@ func rewriteValuePPC64_OpPPC64MOVHreg(v *Value) bool { break } _ = x.Args[2] - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (MOVHreg x:(Arg )) @@ -8790,9 +8693,7 @@ func rewriteValuePPC64_OpPPC64MOVHreg(v *Value) bool { if !((is8BitInt(t) || is16BitInt(t)) && isSigned(t)) { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (MOVHreg (MOVDconst [c])) @@ -9356,9 +9257,7 @@ func rewriteValuePPC64_OpPPC64MOVWZreg(v *Value) bool { if !(uint64(c) <= 0xFFFFFFFF) { break } - v.reset(OpCopy) - v.Type = y.Type - v.AddArg(y) + v.copyOf(y) return true } // match: (MOVWZreg y:(AND (MOVDconst [c]) _)) @@ -9380,9 +9279,7 @@ func rewriteValuePPC64_OpPPC64MOVWZreg(v *Value) bool { if !(uint64(c) <= 0xFFFFFFFF) { continue } - v.reset(OpCopy) - v.Type = y.Type - v.AddArg(y) + v.copyOf(y) return true } break @@ -9485,9 +9382,7 @@ func rewriteValuePPC64_OpPPC64MOVWZreg(v *Value) bool { if y.Op != OpPPC64MOVWZreg { break } - v.reset(OpCopy) - v.Type = y.Type - v.AddArg(y) + v.copyOf(y) return true } // match: (MOVWZreg y:(MOVHZreg _)) @@ -9497,9 +9392,7 @@ func rewriteValuePPC64_OpPPC64MOVWZreg(v *Value) bool { if y.Op != OpPPC64MOVHZreg { break } - v.reset(OpCopy) - v.Type = y.Type - v.AddArg(y) + v.copyOf(y) return true } // match: (MOVWZreg y:(MOVBZreg _)) @@ -9509,9 +9402,7 @@ func rewriteValuePPC64_OpPPC64MOVWZreg(v *Value) bool { if y.Op != OpPPC64MOVBZreg { break } - v.reset(OpCopy) - v.Type = y.Type - v.AddArg(y) + v.copyOf(y) return true } // match: (MOVWZreg y:(MOVHBRload _ _)) @@ -9522,9 +9413,7 @@ func rewriteValuePPC64_OpPPC64MOVWZreg(v *Value) bool { break } _ = y.Args[1] - v.reset(OpCopy) - v.Type = y.Type - v.AddArg(y) + v.copyOf(y) return true } // match: (MOVWZreg y:(MOVWBRload _ _)) @@ -9535,9 +9424,7 @@ func rewriteValuePPC64_OpPPC64MOVWZreg(v *Value) bool { break } _ = y.Args[1] - v.reset(OpCopy) - v.Type = y.Type - v.AddArg(y) + v.copyOf(y) return true } // match: (MOVWZreg y:(MOVWreg x)) @@ -9560,9 +9447,7 @@ func rewriteValuePPC64_OpPPC64MOVWZreg(v *Value) bool { break } _ = x.Args[1] - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (MOVWZreg x:(MOVBZloadidx _ _ _)) @@ -9573,9 +9458,7 @@ func rewriteValuePPC64_OpPPC64MOVWZreg(v *Value) bool { break } _ = x.Args[2] - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (MOVWZreg x:(MOVHZload _ _)) @@ -9586,9 +9469,7 @@ func rewriteValuePPC64_OpPPC64MOVWZreg(v *Value) bool { break } _ = x.Args[1] - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (MOVWZreg x:(MOVHZloadidx _ _ _)) @@ -9599,9 +9480,7 @@ func rewriteValuePPC64_OpPPC64MOVWZreg(v *Value) bool { break } _ = x.Args[2] - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (MOVWZreg x:(MOVWZload _ _)) @@ -9612,9 +9491,7 @@ func rewriteValuePPC64_OpPPC64MOVWZreg(v *Value) bool { break } _ = x.Args[1] - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (MOVWZreg x:(MOVWZloadidx _ _ _)) @@ -9625,9 +9502,7 @@ func rewriteValuePPC64_OpPPC64MOVWZreg(v *Value) bool { break } _ = x.Args[2] - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (MOVWZreg x:(Arg )) @@ -9642,9 +9517,7 @@ func rewriteValuePPC64_OpPPC64MOVWZreg(v *Value) bool { if !((is8BitInt(t) || is16BitInt(t) || is32BitInt(t)) && !isSigned(t)) { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (MOVWZreg (MOVDconst [c])) @@ -9789,9 +9662,7 @@ func rewriteValuePPC64_OpPPC64MOVWreg(v *Value) bool { if !(uint64(c) <= 0xFFFF) { break } - v.reset(OpCopy) - v.Type = y.Type - v.AddArg(y) + v.copyOf(y) return true } // match: (MOVWreg y:(AND (MOVDconst [c]) _)) @@ -9813,9 +9684,7 @@ func rewriteValuePPC64_OpPPC64MOVWreg(v *Value) bool { if !(uint64(c) <= 0x7FFFFFFF) { continue } - v.reset(OpCopy) - v.Type = y.Type - v.AddArg(y) + v.copyOf(y) return true } break @@ -9935,9 +9804,7 @@ func rewriteValuePPC64_OpPPC64MOVWreg(v *Value) bool { if y.Op != OpPPC64MOVWreg { break } - v.reset(OpCopy) - v.Type = y.Type - v.AddArg(y) + v.copyOf(y) return true } // match: (MOVWreg y:(MOVHreg _)) @@ -9947,9 +9814,7 @@ func rewriteValuePPC64_OpPPC64MOVWreg(v *Value) bool { if y.Op != OpPPC64MOVHreg { break } - v.reset(OpCopy) - v.Type = y.Type - v.AddArg(y) + v.copyOf(y) return true } // match: (MOVWreg y:(MOVBreg _)) @@ -9959,9 +9824,7 @@ func rewriteValuePPC64_OpPPC64MOVWreg(v *Value) bool { if y.Op != OpPPC64MOVBreg { break } - v.reset(OpCopy) - v.Type = y.Type - v.AddArg(y) + v.copyOf(y) return true } // match: (MOVWreg y:(MOVWZreg x)) @@ -9984,9 +9847,7 @@ func rewriteValuePPC64_OpPPC64MOVWreg(v *Value) bool { break } _ = x.Args[1] - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (MOVWreg x:(MOVHloadidx _ _ _)) @@ -9997,9 +9858,7 @@ func rewriteValuePPC64_OpPPC64MOVWreg(v *Value) bool { break } _ = x.Args[2] - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (MOVWreg x:(MOVWload _ _)) @@ -10010,9 +9869,7 @@ func rewriteValuePPC64_OpPPC64MOVWreg(v *Value) bool { break } _ = x.Args[1] - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (MOVWreg x:(MOVWloadidx _ _ _)) @@ -10023,9 +9880,7 @@ func rewriteValuePPC64_OpPPC64MOVWreg(v *Value) bool { break } _ = x.Args[2] - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (MOVWreg x:(Arg )) @@ -10040,9 +9895,7 @@ func rewriteValuePPC64_OpPPC64MOVWreg(v *Value) bool { if !((is8BitInt(t) || is16BitInt(t) || is32BitInt(t)) && isSigned(t)) { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (MOVWreg (MOVDconst [c])) @@ -10343,8 +10196,7 @@ func rewriteValuePPC64_OpPPC64MTVSRD(v *Value) bool { } b = x.Block v0 := b.NewValue0(x.Pos, OpPPC64FMOVDload, typ.Float64) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v0.AuxInt = off v0.Aux = sym v0.AddArg2(ptr, mem) @@ -10637,8 +10489,7 @@ func rewriteValuePPC64_OpPPC64OR(v *Value) bool { } b = mergePoint(b, x0, x1) v0 := b.NewValue0(x1.Pos, OpPPC64MOVHZload, t) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v0.AuxInt = i0 v0.Aux = s v0.AddArg2(p, mem) @@ -10678,8 +10529,7 @@ func rewriteValuePPC64_OpPPC64OR(v *Value) bool { } b = mergePoint(b, x0, x1) v0 := b.NewValue0(x1.Pos, OpPPC64MOVHZload, t) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v0.AuxInt = i0 v0.Aux = s v0.AddArg2(p, mem) @@ -10719,8 +10569,7 @@ func rewriteValuePPC64_OpPPC64OR(v *Value) bool { } b = mergePoint(b, x0, x1) v0 := b.NewValue0(x1.Pos, OpPPC64MOVHBRload, t) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v1 := b.NewValue0(x1.Pos, OpPPC64MOVDaddr, typ.Uintptr) v1.AuxInt = i0 v1.Aux = s @@ -10762,8 +10611,7 @@ func rewriteValuePPC64_OpPPC64OR(v *Value) bool { } b = mergePoint(b, x0, x1) v0 := b.NewValue0(x1.Pos, OpPPC64MOVHBRload, t) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v1 := b.NewValue0(x1.Pos, OpPPC64MOVDaddr, typ.Uintptr) v1.AuxInt = i0 v1.Aux = s @@ -10811,8 +10659,7 @@ func rewriteValuePPC64_OpPPC64OR(v *Value) bool { } b = mergePoint(b, x0, x1) v0 := b.NewValue0(x1.Pos, OpPPC64SLDconst, t) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v0.AuxInt = n1 v1 := b.NewValue0(x1.Pos, OpPPC64MOVHBRload, t) v2 := b.NewValue0(x1.Pos, OpPPC64MOVDaddr, typ.Uintptr) @@ -10863,8 +10710,7 @@ func rewriteValuePPC64_OpPPC64OR(v *Value) bool { } b = mergePoint(b, x0, x1) v0 := b.NewValue0(x1.Pos, OpPPC64SLDconst, t) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v0.AuxInt = n1 v1 := b.NewValue0(x1.Pos, OpPPC64MOVHBRload, t) v2 := b.NewValue0(x1.Pos, OpPPC64MOVDaddr, typ.Uintptr) @@ -10933,8 +10779,7 @@ func rewriteValuePPC64_OpPPC64OR(v *Value) bool { } b = mergePoint(b, x0, x1, x2) v0 := b.NewValue0(x0.Pos, OpPPC64MOVWZload, t) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v0.AuxInt = i0 v0.Aux = s v0.AddArg2(p, mem) @@ -10999,8 +10844,7 @@ func rewriteValuePPC64_OpPPC64OR(v *Value) bool { } b = mergePoint(b, x0, x1, x2) v0 := b.NewValue0(x0.Pos, OpPPC64MOVWZload, t) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v0.AuxInt = i0 v0.Aux = s v0.AddArg2(p, mem) @@ -11066,8 +10910,7 @@ func rewriteValuePPC64_OpPPC64OR(v *Value) bool { } b = mergePoint(b, x0, x1, x2) v0 := b.NewValue0(x0.Pos, OpPPC64MOVWBRload, t) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v1 := b.NewValue0(x0.Pos, OpPPC64MOVDaddr, typ.Uintptr) v1.AuxInt = i0 v1.Aux = s @@ -11135,8 +10978,7 @@ func rewriteValuePPC64_OpPPC64OR(v *Value) bool { } b = mergePoint(b, x0, x1, x2) v0 := b.NewValue0(x0.Pos, OpPPC64MOVWBRload, t) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v1 := b.NewValue0(x0.Pos, OpPPC64MOVDaddr, typ.Uintptr) v1.AuxInt = i0 v1.Aux = s @@ -11204,8 +11046,7 @@ func rewriteValuePPC64_OpPPC64OR(v *Value) bool { } b = mergePoint(b, x0, x1, x2) v0 := b.NewValue0(x2.Pos, OpPPC64MOVWBRload, t) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v1 := b.NewValue0(x2.Pos, OpPPC64MOVDaddr, typ.Uintptr) v1.AuxInt = i0 v1.Aux = s @@ -11273,8 +11114,7 @@ func rewriteValuePPC64_OpPPC64OR(v *Value) bool { } b = mergePoint(b, x0, x1, x2) v0 := b.NewValue0(x2.Pos, OpPPC64MOVWBRload, t) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v1 := b.NewValue0(x2.Pos, OpPPC64MOVDaddr, typ.Uintptr) v1.AuxInt = i0 v1.Aux = s @@ -11346,8 +11186,7 @@ func rewriteValuePPC64_OpPPC64OR(v *Value) bool { } b = mergePoint(b, x0, x1, x2) v0 := b.NewValue0(x0.Pos, OpPPC64SLDconst, t) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v0.AuxInt = 32 v1 := b.NewValue0(x0.Pos, OpPPC64MOVWBRload, t) v2 := b.NewValue0(x0.Pos, OpPPC64MOVDaddr, typ.Uintptr) @@ -11422,8 +11261,7 @@ func rewriteValuePPC64_OpPPC64OR(v *Value) bool { } b = mergePoint(b, x0, x1, x2) v0 := b.NewValue0(x0.Pos, OpPPC64SLDconst, t) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v0.AuxInt = 32 v1 := b.NewValue0(x0.Pos, OpPPC64MOVWBRload, t) v2 := b.NewValue0(x0.Pos, OpPPC64MOVDaddr, typ.Uintptr) @@ -11541,8 +11379,7 @@ func rewriteValuePPC64_OpPPC64OR(v *Value) bool { } b = mergePoint(b, x0, x4, x5, x6, x7) v0 := b.NewValue0(x0.Pos, OpPPC64MOVDload, t) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v0.AuxInt = i0 v0.Aux = s v0.AddArg2(p, mem) @@ -11658,8 +11495,7 @@ func rewriteValuePPC64_OpPPC64OR(v *Value) bool { } b = mergePoint(b, x0, x1, x2, x3, x4) v0 := b.NewValue0(x4.Pos, OpPPC64MOVDBRload, t) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v1 := b.NewValue0(x4.Pos, OpPPC64MOVDaddr, typ.Uintptr) v1.AuxInt = i0 v1.Aux = s @@ -11777,8 +11613,7 @@ func rewriteValuePPC64_OpPPC64OR(v *Value) bool { } b = mergePoint(b, x3, x4, x5, x6, x7) v0 := b.NewValue0(x3.Pos, OpPPC64MOVDBRload, t) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v1 := b.NewValue0(x3.Pos, OpPPC64MOVDaddr, typ.Uintptr) v1.AuxInt = i0 v1.Aux = s @@ -11896,8 +11731,7 @@ func rewriteValuePPC64_OpPPC64OR(v *Value) bool { } b = mergePoint(b, x3, x4, x5, x6, x7) v0 := b.NewValue0(x3.Pos, OpPPC64MOVDBRload, t) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v1 := b.NewValue0(x3.Pos, OpPPC64MOVDaddr, typ.Uintptr) v1.AuxInt = i0 v1.Aux = s @@ -11922,9 +11756,7 @@ func rewriteValuePPC64_OpPPC64ORN(v *Value) bool { if v_1.Op != OpPPC64MOVDconst || v_1.AuxInt != -1 { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } return false @@ -11962,9 +11794,7 @@ func rewriteValuePPC64_OpPPC64ORconst(v *Value) bool { break } x := v_0 - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } return false @@ -12223,9 +12053,7 @@ func rewriteValuePPC64_OpPPC64XORconst(v *Value) bool { break } x := v_0 - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } return false @@ -15075,9 +14903,7 @@ func rewriteValuePPC64_OpZero(v *Value) bool { break } mem := v_1 - v.reset(OpCopy) - v.Type = mem.Type - v.AddArg(mem) + v.copyOf(mem) return true } // match: (Zero [1] destptr mem) diff --git a/src/cmd/compile/internal/ssa/rewriteRISCV64.go b/src/cmd/compile/internal/ssa/rewriteRISCV64.go index e40fe69930..f20f744456 100644 --- a/src/cmd/compile/internal/ssa/rewriteRISCV64.go +++ b/src/cmd/compile/internal/ssa/rewriteRISCV64.go @@ -1864,9 +1864,7 @@ func rewriteValueRISCV64_OpMove(v *Value) bool { break } mem := v_2 - v.reset(OpCopy) - v.Type = mem.Type - v.AddArg(mem) + v.copyOf(mem) return true } // match: (Move [1] dst src mem) @@ -2289,9 +2287,7 @@ func rewriteValueRISCV64_OpRISCV64ADDI(v *Value) bool { break } x := v_0 - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } return false @@ -4081,9 +4077,7 @@ func rewriteValueRISCV64_OpZero(v *Value) bool { break } mem := v_1 - v.reset(OpCopy) - v.Type = mem.Type - v.AddArg(mem) + v.copyOf(mem) return true } // match: (Zero [1] ptr mem) diff --git a/src/cmd/compile/internal/ssa/rewriteS390X.go b/src/cmd/compile/internal/ssa/rewriteS390X.go index 7a78dfdac7..874bac1fde 100644 --- a/src/cmd/compile/internal/ssa/rewriteS390X.go +++ b/src/cmd/compile/internal/ssa/rewriteS390X.go @@ -3097,9 +3097,7 @@ func rewriteValueS390X_OpMove(v *Value) bool { break } mem := v_2 - v.reset(OpCopy) - v.Type = mem.Type - v.AddArg(mem) + v.copyOf(mem) return true } // match: (Move [1] dst src mem) @@ -5450,9 +5448,7 @@ func rewriteValueS390X_OpS390XADDWconst(v *Value) bool { if !(int32(c) == 0) { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (ADDWconst [c] (MOVDconst [d])) @@ -5604,9 +5600,7 @@ func rewriteValueS390X_OpS390XADDconst(v *Value) bool { break } x := v_0 - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (ADDconst [c] (MOVDconst [d])) @@ -5849,9 +5843,7 @@ func rewriteValueS390X_OpS390XAND(v *Value) bool { if x != v_1 { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (AND x g:(MOVDload [off] {sym} ptr mem)) @@ -5909,9 +5901,7 @@ func rewriteValueS390X_OpS390XANDW(v *Value) bool { if x != v_1 { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (ANDW x g:(MOVWload [off] {sym} ptr mem)) @@ -6029,9 +6019,7 @@ func rewriteValueS390X_OpS390XANDWconst(v *Value) bool { if !(int32(c) == -1) { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (ANDWconst [c] (MOVDconst [d])) @@ -6132,9 +6120,7 @@ func rewriteValueS390X_OpS390XANDconst(v *Value) bool { break } x := v_0 - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (ANDconst [c] (MOVDconst [d])) @@ -7285,9 +7271,7 @@ func rewriteValueS390X_OpS390XFMOVDload(v *Value) bool { if !(isSamePtr(ptr1, ptr2)) { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (FMOVDload [off1] {sym} (ADDconst [off2] ptr) mem) @@ -7610,9 +7594,7 @@ func rewriteValueS390X_OpS390XFMOVSload(v *Value) bool { if !(isSamePtr(ptr1, ptr2)) { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (FMOVSload [off1] {sym} (ADDconst [off2] ptr) mem) @@ -8094,8 +8076,7 @@ func rewriteValueS390X_OpS390XLDGR(v *Value) bool { } b = x.Block v0 := b.NewValue0(x.Pos, OpS390XLNDFR, t) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v1 := b.NewValue0(x.Pos, OpS390XLDGR, t) v2 := b.NewValue0(x.Pos, OpS390XMOVDload, t1) v2.AuxInt = off @@ -8112,9 +8093,7 @@ func rewriteValueS390X_OpS390XLDGR(v *Value) bool { break } x := v_0.Args[0] - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } return false @@ -8162,9 +8141,7 @@ func rewriteValueS390X_OpS390XLGDR(v *Value) bool { break } x := v_0.Args[0] - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } return false @@ -8197,9 +8174,7 @@ func rewriteValueS390X_OpS390XLOCGR(v *Value) bool { if v_2.Op != OpS390XFlagEQ || !(c.(s390x.CCMask)&s390x.Equal != 0) { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (LOCGR {c} _ x (FlagLT)) @@ -8211,9 +8186,7 @@ func rewriteValueS390X_OpS390XLOCGR(v *Value) bool { if v_2.Op != OpS390XFlagLT || !(c.(s390x.CCMask)&s390x.Less != 0) { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (LOCGR {c} _ x (FlagGT)) @@ -8225,9 +8198,7 @@ func rewriteValueS390X_OpS390XLOCGR(v *Value) bool { if v_2.Op != OpS390XFlagGT || !(c.(s390x.CCMask)&s390x.Greater != 0) { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (LOCGR {c} _ x (FlagOV)) @@ -8239,9 +8210,7 @@ func rewriteValueS390X_OpS390XLOCGR(v *Value) bool { if v_2.Op != OpS390XFlagOV || !(c.(s390x.CCMask)&s390x.Unordered != 0) { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (LOCGR {c} x _ (FlagEQ)) @@ -8253,9 +8222,7 @@ func rewriteValueS390X_OpS390XLOCGR(v *Value) bool { if v_2.Op != OpS390XFlagEQ || !(c.(s390x.CCMask)&s390x.Equal == 0) { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (LOCGR {c} x _ (FlagLT)) @@ -8267,9 +8234,7 @@ func rewriteValueS390X_OpS390XLOCGR(v *Value) bool { if v_2.Op != OpS390XFlagLT || !(c.(s390x.CCMask)&s390x.Less == 0) { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (LOCGR {c} x _ (FlagGT)) @@ -8281,9 +8246,7 @@ func rewriteValueS390X_OpS390XLOCGR(v *Value) bool { if v_2.Op != OpS390XFlagGT || !(c.(s390x.CCMask)&s390x.Greater == 0) { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (LOCGR {c} x _ (FlagOV)) @@ -8295,9 +8258,7 @@ func rewriteValueS390X_OpS390XLOCGR(v *Value) bool { if v_2.Op != OpS390XFlagOV || !(c.(s390x.CCMask)&s390x.Unordered == 0) { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } return false @@ -8311,9 +8272,7 @@ func rewriteValueS390X_OpS390XLoweredRound32F(v *Value) bool { if x.Op != OpS390XFMOVSconst { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } return false @@ -8327,9 +8286,7 @@ func rewriteValueS390X_OpS390XLoweredRound64F(v *Value) bool { if x.Op != OpS390XFMOVDconst { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } return false @@ -8620,9 +8577,7 @@ func rewriteValueS390X_OpS390XMOVBZreg(v *Value) bool { if !(!x.Type.IsSigned() || x.Type.Size() > 1) { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (MOVBZreg x:(MOVBZloadidx _ _ _)) @@ -8637,9 +8592,7 @@ func rewriteValueS390X_OpS390XMOVBZreg(v *Value) bool { if !(!x.Type.IsSigned() || x.Type.Size() > 1) { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (MOVBZreg x:(MOVBload [o] {s} p mem)) @@ -8660,8 +8613,7 @@ func rewriteValueS390X_OpS390XMOVBZreg(v *Value) bool { } b = x.Block v0 := b.NewValue0(x.Pos, OpS390XMOVBZload, t) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v0.AuxInt = o v0.Aux = s v0.AddArg2(p, mem) @@ -8686,8 +8638,7 @@ func rewriteValueS390X_OpS390XMOVBZreg(v *Value) bool { } b = x.Block v0 := b.NewValue0(v.Pos, OpS390XMOVBZloadidx, t) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v0.AuxInt = o v0.Aux = s v0.AddArg3(p, i, mem) @@ -8705,9 +8656,7 @@ func rewriteValueS390X_OpS390XMOVBZreg(v *Value) bool { if !(!t.IsSigned() && t.Size() == 1) { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (MOVBZreg (MOVDconst [c])) @@ -8743,9 +8692,7 @@ func rewriteValueS390X_OpS390XMOVBZreg(v *Value) bool { if !(int64(uint8(c)) == c && int64(uint8(d)) == d && (!x.Type.IsSigned() || x.Type.Size() > 1)) { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (MOVBZreg (ANDWconst [m] x)) @@ -9051,9 +8998,7 @@ func rewriteValueS390X_OpS390XMOVBreg(v *Value) bool { if !(x.Type.IsSigned() || x.Type.Size() == 8) { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (MOVBreg x:(MOVBloadidx _ _ _)) @@ -9068,9 +9013,7 @@ func rewriteValueS390X_OpS390XMOVBreg(v *Value) bool { if !(x.Type.IsSigned() || x.Type.Size() == 8) { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (MOVBreg x:(MOVBZload [o] {s} p mem)) @@ -9091,8 +9034,7 @@ func rewriteValueS390X_OpS390XMOVBreg(v *Value) bool { } b = x.Block v0 := b.NewValue0(x.Pos, OpS390XMOVBload, t) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v0.AuxInt = o v0.Aux = s v0.AddArg2(p, mem) @@ -9117,8 +9059,7 @@ func rewriteValueS390X_OpS390XMOVBreg(v *Value) bool { } b = x.Block v0 := b.NewValue0(v.Pos, OpS390XMOVBloadidx, t) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v0.AuxInt = o v0.Aux = s v0.AddArg3(p, i, mem) @@ -9136,9 +9077,7 @@ func rewriteValueS390X_OpS390XMOVBreg(v *Value) bool { if !(t.IsSigned() && t.Size() == 1) { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (MOVBreg (MOVDconst [c])) @@ -10085,9 +10024,7 @@ func rewriteValueS390X_OpS390XMOVDload(v *Value) bool { if !(isSamePtr(ptr1, ptr2)) { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (MOVDload [off] {sym} ptr1 (FMOVDstore [off] {sym} ptr2 x _)) @@ -11111,9 +11048,7 @@ func rewriteValueS390X_OpS390XMOVHZreg(v *Value) bool { if !(!x.Type.IsSigned() || x.Type.Size() > 1) { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (MOVHZreg x:(MOVBZloadidx _ _ _)) @@ -11128,9 +11063,7 @@ func rewriteValueS390X_OpS390XMOVHZreg(v *Value) bool { if !(!x.Type.IsSigned() || x.Type.Size() > 1) { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (MOVHZreg x:(MOVHZload _ _)) @@ -11145,9 +11078,7 @@ func rewriteValueS390X_OpS390XMOVHZreg(v *Value) bool { if !(!x.Type.IsSigned() || x.Type.Size() > 2) { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (MOVHZreg x:(MOVHZloadidx _ _ _)) @@ -11162,9 +11093,7 @@ func rewriteValueS390X_OpS390XMOVHZreg(v *Value) bool { if !(!x.Type.IsSigned() || x.Type.Size() > 2) { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (MOVHZreg x:(MOVHload [o] {s} p mem)) @@ -11185,8 +11114,7 @@ func rewriteValueS390X_OpS390XMOVHZreg(v *Value) bool { } b = x.Block v0 := b.NewValue0(x.Pos, OpS390XMOVHZload, t) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v0.AuxInt = o v0.Aux = s v0.AddArg2(p, mem) @@ -11211,8 +11139,7 @@ func rewriteValueS390X_OpS390XMOVHZreg(v *Value) bool { } b = x.Block v0 := b.NewValue0(v.Pos, OpS390XMOVHZloadidx, t) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v0.AuxInt = o v0.Aux = s v0.AddArg3(p, i, mem) @@ -11230,9 +11157,7 @@ func rewriteValueS390X_OpS390XMOVHZreg(v *Value) bool { if !(!t.IsSigned() && t.Size() <= 2) { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (MOVHZreg (MOVDconst [c])) @@ -11534,9 +11459,7 @@ func rewriteValueS390X_OpS390XMOVHreg(v *Value) bool { if !(x.Type.IsSigned() || x.Type.Size() == 8) { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (MOVHreg x:(MOVBloadidx _ _ _)) @@ -11551,9 +11474,7 @@ func rewriteValueS390X_OpS390XMOVHreg(v *Value) bool { if !(x.Type.IsSigned() || x.Type.Size() == 8) { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (MOVHreg x:(MOVHload _ _)) @@ -11568,9 +11489,7 @@ func rewriteValueS390X_OpS390XMOVHreg(v *Value) bool { if !(x.Type.IsSigned() || x.Type.Size() == 8) { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (MOVHreg x:(MOVHloadidx _ _ _)) @@ -11585,9 +11504,7 @@ func rewriteValueS390X_OpS390XMOVHreg(v *Value) bool { if !(x.Type.IsSigned() || x.Type.Size() == 8) { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (MOVHreg x:(MOVBZload _ _)) @@ -11602,9 +11519,7 @@ func rewriteValueS390X_OpS390XMOVHreg(v *Value) bool { if !(!x.Type.IsSigned() || x.Type.Size() > 1) { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (MOVHreg x:(MOVBZloadidx _ _ _)) @@ -11619,9 +11534,7 @@ func rewriteValueS390X_OpS390XMOVHreg(v *Value) bool { if !(!x.Type.IsSigned() || x.Type.Size() > 1) { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (MOVHreg x:(MOVHZload [o] {s} p mem)) @@ -11642,8 +11555,7 @@ func rewriteValueS390X_OpS390XMOVHreg(v *Value) bool { } b = x.Block v0 := b.NewValue0(x.Pos, OpS390XMOVHload, t) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v0.AuxInt = o v0.Aux = s v0.AddArg2(p, mem) @@ -11668,8 +11580,7 @@ func rewriteValueS390X_OpS390XMOVHreg(v *Value) bool { } b = x.Block v0 := b.NewValue0(v.Pos, OpS390XMOVHloadidx, t) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v0.AuxInt = o v0.Aux = s v0.AddArg3(p, i, mem) @@ -11687,9 +11598,7 @@ func rewriteValueS390X_OpS390XMOVHreg(v *Value) bool { if !(t.IsSigned() && t.Size() <= 2) { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (MOVHreg (MOVDconst [c])) @@ -12672,9 +12581,7 @@ func rewriteValueS390X_OpS390XMOVWZreg(v *Value) bool { if !(!x.Type.IsSigned() || x.Type.Size() > 1) { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (MOVWZreg x:(MOVBZloadidx _ _ _)) @@ -12689,9 +12596,7 @@ func rewriteValueS390X_OpS390XMOVWZreg(v *Value) bool { if !(!x.Type.IsSigned() || x.Type.Size() > 1) { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (MOVWZreg x:(MOVHZload _ _)) @@ -12706,9 +12611,7 @@ func rewriteValueS390X_OpS390XMOVWZreg(v *Value) bool { if !(!x.Type.IsSigned() || x.Type.Size() > 2) { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (MOVWZreg x:(MOVHZloadidx _ _ _)) @@ -12723,9 +12626,7 @@ func rewriteValueS390X_OpS390XMOVWZreg(v *Value) bool { if !(!x.Type.IsSigned() || x.Type.Size() > 2) { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (MOVWZreg x:(MOVWZload _ _)) @@ -12740,9 +12641,7 @@ func rewriteValueS390X_OpS390XMOVWZreg(v *Value) bool { if !(!x.Type.IsSigned() || x.Type.Size() > 4) { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (MOVWZreg x:(MOVWZloadidx _ _ _)) @@ -12757,9 +12656,7 @@ func rewriteValueS390X_OpS390XMOVWZreg(v *Value) bool { if !(!x.Type.IsSigned() || x.Type.Size() > 4) { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (MOVWZreg x:(MOVWload [o] {s} p mem)) @@ -12780,8 +12677,7 @@ func rewriteValueS390X_OpS390XMOVWZreg(v *Value) bool { } b = x.Block v0 := b.NewValue0(x.Pos, OpS390XMOVWZload, t) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v0.AuxInt = o v0.Aux = s v0.AddArg2(p, mem) @@ -12806,8 +12702,7 @@ func rewriteValueS390X_OpS390XMOVWZreg(v *Value) bool { } b = x.Block v0 := b.NewValue0(v.Pos, OpS390XMOVWZloadidx, t) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v0.AuxInt = o v0.Aux = s v0.AddArg3(p, i, mem) @@ -12825,9 +12720,7 @@ func rewriteValueS390X_OpS390XMOVWZreg(v *Value) bool { if !(!t.IsSigned() && t.Size() <= 4) { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (MOVWZreg (MOVDconst [c])) @@ -13097,9 +12990,7 @@ func rewriteValueS390X_OpS390XMOVWreg(v *Value) bool { if !(x.Type.IsSigned() || x.Type.Size() == 8) { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (MOVWreg x:(MOVBloadidx _ _ _)) @@ -13114,9 +13005,7 @@ func rewriteValueS390X_OpS390XMOVWreg(v *Value) bool { if !(x.Type.IsSigned() || x.Type.Size() == 8) { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (MOVWreg x:(MOVHload _ _)) @@ -13131,9 +13020,7 @@ func rewriteValueS390X_OpS390XMOVWreg(v *Value) bool { if !(x.Type.IsSigned() || x.Type.Size() == 8) { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (MOVWreg x:(MOVHloadidx _ _ _)) @@ -13148,9 +13035,7 @@ func rewriteValueS390X_OpS390XMOVWreg(v *Value) bool { if !(x.Type.IsSigned() || x.Type.Size() == 8) { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (MOVWreg x:(MOVWload _ _)) @@ -13165,9 +13050,7 @@ func rewriteValueS390X_OpS390XMOVWreg(v *Value) bool { if !(x.Type.IsSigned() || x.Type.Size() == 8) { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (MOVWreg x:(MOVWloadidx _ _ _)) @@ -13182,9 +13065,7 @@ func rewriteValueS390X_OpS390XMOVWreg(v *Value) bool { if !(x.Type.IsSigned() || x.Type.Size() == 8) { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (MOVWreg x:(MOVBZload _ _)) @@ -13199,9 +13080,7 @@ func rewriteValueS390X_OpS390XMOVWreg(v *Value) bool { if !(!x.Type.IsSigned() || x.Type.Size() > 1) { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (MOVWreg x:(MOVBZloadidx _ _ _)) @@ -13216,9 +13095,7 @@ func rewriteValueS390X_OpS390XMOVWreg(v *Value) bool { if !(!x.Type.IsSigned() || x.Type.Size() > 1) { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (MOVWreg x:(MOVHZload _ _)) @@ -13233,9 +13110,7 @@ func rewriteValueS390X_OpS390XMOVWreg(v *Value) bool { if !(!x.Type.IsSigned() || x.Type.Size() > 2) { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (MOVWreg x:(MOVHZloadidx _ _ _)) @@ -13250,9 +13125,7 @@ func rewriteValueS390X_OpS390XMOVWreg(v *Value) bool { if !(!x.Type.IsSigned() || x.Type.Size() > 2) { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (MOVWreg x:(MOVWZload [o] {s} p mem)) @@ -13273,8 +13146,7 @@ func rewriteValueS390X_OpS390XMOVWreg(v *Value) bool { } b = x.Block v0 := b.NewValue0(x.Pos, OpS390XMOVWload, t) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v0.AuxInt = o v0.Aux = s v0.AddArg2(p, mem) @@ -13299,8 +13171,7 @@ func rewriteValueS390X_OpS390XMOVWreg(v *Value) bool { } b = x.Block v0 := b.NewValue0(v.Pos, OpS390XMOVWloadidx, t) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v0.AuxInt = o v0.Aux = s v0.AddArg3(p, i, mem) @@ -13318,9 +13189,7 @@ func rewriteValueS390X_OpS390XMOVWreg(v *Value) bool { if !(t.IsSigned() && t.Size() <= 4) { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (MOVWreg (MOVDconst [c])) @@ -13924,9 +13793,7 @@ func rewriteValueS390X_OpS390XMULLDconst(v *Value) bool { break } x := v_0 - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (MULLDconst [c] x) @@ -14171,9 +14038,7 @@ func rewriteValueS390X_OpS390XMULLWconst(v *Value) bool { break } x := v_0 - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (MULLWconst [c] x) @@ -14591,9 +14456,7 @@ func rewriteValueS390X_OpS390XOR(v *Value) bool { if x != v_1 { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (OR x g:(MOVDload [off] {sym} ptr mem)) @@ -14654,8 +14517,7 @@ func rewriteValueS390X_OpS390XOR(v *Value) bool { } b = mergePoint(b, x0, x1) v0 := b.NewValue0(x0.Pos, OpS390XMOVHZload, typ.UInt16) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v0.AuxInt = i0 v0.Aux = s v0.AddArg2(p, mem) @@ -14694,8 +14556,7 @@ func rewriteValueS390X_OpS390XOR(v *Value) bool { } b = mergePoint(b, x0, x1) v0 := b.NewValue0(x0.Pos, OpS390XMOVWZload, typ.UInt32) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v0.AuxInt = i0 v0.Aux = s v0.AddArg2(p, mem) @@ -14734,8 +14595,7 @@ func rewriteValueS390X_OpS390XOR(v *Value) bool { } b = mergePoint(b, x0, x1) v0 := b.NewValue0(x0.Pos, OpS390XMOVDload, typ.UInt64) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v0.AuxInt = i0 v0.Aux = s v0.AddArg2(p, mem) @@ -14792,8 +14652,7 @@ func rewriteValueS390X_OpS390XOR(v *Value) bool { } b = mergePoint(b, x0, x1, y) v0 := b.NewValue0(x1.Pos, OpS390XOR, v.Type) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v1 := b.NewValue0(x1.Pos, OpS390XSLDconst, v.Type) v1.AuxInt = j1 v2 := b.NewValue0(x1.Pos, OpS390XMOVHZload, typ.UInt16) @@ -14856,8 +14715,7 @@ func rewriteValueS390X_OpS390XOR(v *Value) bool { } b = mergePoint(b, x0, x1, y) v0 := b.NewValue0(x1.Pos, OpS390XOR, v.Type) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v1 := b.NewValue0(x1.Pos, OpS390XSLDconst, v.Type) v1.AuxInt = j1 v2 := b.NewValue0(x1.Pos, OpS390XMOVWZload, typ.UInt32) @@ -14909,8 +14767,7 @@ func rewriteValueS390X_OpS390XOR(v *Value) bool { } b = mergePoint(b, x0, x1) v0 := b.NewValue0(v.Pos, OpS390XMOVHZloadidx, typ.UInt16) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v0.AuxInt = i0 v0.Aux = s v0.AddArg3(p, idx, mem) @@ -14958,8 +14815,7 @@ func rewriteValueS390X_OpS390XOR(v *Value) bool { } b = mergePoint(b, x0, x1) v0 := b.NewValue0(v.Pos, OpS390XMOVWZloadidx, typ.UInt32) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v0.AuxInt = i0 v0.Aux = s v0.AddArg3(p, idx, mem) @@ -15007,8 +14863,7 @@ func rewriteValueS390X_OpS390XOR(v *Value) bool { } b = mergePoint(b, x0, x1) v0 := b.NewValue0(v.Pos, OpS390XMOVDloadidx, typ.UInt64) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v0.AuxInt = i0 v0.Aux = s v0.AddArg3(p, idx, mem) @@ -15074,8 +14929,7 @@ func rewriteValueS390X_OpS390XOR(v *Value) bool { } b = mergePoint(b, x0, x1, y) v0 := b.NewValue0(v.Pos, OpS390XOR, v.Type) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v1 := b.NewValue0(v.Pos, OpS390XSLDconst, v.Type) v1.AuxInt = j1 v2 := b.NewValue0(v.Pos, OpS390XMOVHZloadidx, typ.UInt16) @@ -15147,8 +15001,7 @@ func rewriteValueS390X_OpS390XOR(v *Value) bool { } b = mergePoint(b, x0, x1, y) v0 := b.NewValue0(v.Pos, OpS390XOR, v.Type) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v1 := b.NewValue0(v.Pos, OpS390XSLDconst, v.Type) v1.AuxInt = j1 v2 := b.NewValue0(v.Pos, OpS390XMOVWZloadidx, typ.UInt32) @@ -15195,8 +15048,7 @@ func rewriteValueS390X_OpS390XOR(v *Value) bool { } b = mergePoint(b, x0, x1) v0 := b.NewValue0(x1.Pos, OpS390XMOVHZreg, typ.UInt64) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v1 := b.NewValue0(x1.Pos, OpS390XMOVHBRload, typ.UInt16) v1.AuxInt = i0 v1.Aux = s @@ -15245,8 +15097,7 @@ func rewriteValueS390X_OpS390XOR(v *Value) bool { } b = mergePoint(b, x0, x1) v0 := b.NewValue0(x1.Pos, OpS390XMOVWZreg, typ.UInt64) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v1 := b.NewValue0(x1.Pos, OpS390XMOVWBRload, typ.UInt32) v1.AuxInt = i0 v1.Aux = s @@ -15295,8 +15146,7 @@ func rewriteValueS390X_OpS390XOR(v *Value) bool { } b = mergePoint(b, x0, x1) v0 := b.NewValue0(x1.Pos, OpS390XMOVDBRload, typ.UInt64) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v0.AuxInt = i0 v0.Aux = s v0.AddArg2(p, mem) @@ -15353,8 +15203,7 @@ func rewriteValueS390X_OpS390XOR(v *Value) bool { } b = mergePoint(b, x0, x1, y) v0 := b.NewValue0(x0.Pos, OpS390XOR, v.Type) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v1 := b.NewValue0(x0.Pos, OpS390XSLDconst, v.Type) v1.AuxInt = j0 v2 := b.NewValue0(x0.Pos, OpS390XMOVHZreg, typ.UInt64) @@ -15427,8 +15276,7 @@ func rewriteValueS390X_OpS390XOR(v *Value) bool { } b = mergePoint(b, x0, x1, y) v0 := b.NewValue0(x0.Pos, OpS390XOR, v.Type) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v1 := b.NewValue0(x0.Pos, OpS390XSLDconst, v.Type) v1.AuxInt = j0 v2 := b.NewValue0(x0.Pos, OpS390XMOVWZreg, typ.UInt64) @@ -15482,8 +15330,7 @@ func rewriteValueS390X_OpS390XOR(v *Value) bool { } b = mergePoint(b, x0, x1) v0 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v1 := b.NewValue0(v.Pos, OpS390XMOVHBRloadidx, typ.Int16) v1.AuxInt = i0 v1.Aux = s @@ -15541,8 +15388,7 @@ func rewriteValueS390X_OpS390XOR(v *Value) bool { } b = mergePoint(b, x0, x1) v0 := b.NewValue0(v.Pos, OpS390XMOVWZreg, typ.UInt64) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v1 := b.NewValue0(v.Pos, OpS390XMOVWBRloadidx, typ.Int32) v1.AuxInt = i0 v1.Aux = s @@ -15600,8 +15446,7 @@ func rewriteValueS390X_OpS390XOR(v *Value) bool { } b = mergePoint(b, x0, x1) v0 := b.NewValue0(v.Pos, OpS390XMOVDBRloadidx, typ.Int64) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v0.AuxInt = i0 v0.Aux = s v0.AddArg3(p, idx, mem) @@ -15667,8 +15512,7 @@ func rewriteValueS390X_OpS390XOR(v *Value) bool { } b = mergePoint(b, x0, x1, y) v0 := b.NewValue0(v.Pos, OpS390XOR, v.Type) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v1 := b.NewValue0(v.Pos, OpS390XSLDconst, v.Type) v1.AuxInt = j0 v2 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64) @@ -15750,8 +15594,7 @@ func rewriteValueS390X_OpS390XOR(v *Value) bool { } b = mergePoint(b, x0, x1, y) v0 := b.NewValue0(v.Pos, OpS390XOR, v.Type) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v1 := b.NewValue0(v.Pos, OpS390XSLDconst, v.Type) v1.AuxInt = j0 v2 := b.NewValue0(v.Pos, OpS390XMOVWZreg, typ.UInt64) @@ -15823,9 +15666,7 @@ func rewriteValueS390X_OpS390XORW(v *Value) bool { if x != v_1 { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (ORW x g:(MOVWload [off] {sym} ptr mem)) @@ -15913,8 +15754,7 @@ func rewriteValueS390X_OpS390XORW(v *Value) bool { } b = mergePoint(b, x0, x1) v0 := b.NewValue0(x0.Pos, OpS390XMOVHZload, typ.UInt16) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v0.AuxInt = i0 v0.Aux = s v0.AddArg2(p, mem) @@ -15953,8 +15793,7 @@ func rewriteValueS390X_OpS390XORW(v *Value) bool { } b = mergePoint(b, x0, x1) v0 := b.NewValue0(x0.Pos, OpS390XMOVWZload, typ.UInt32) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v0.AuxInt = i0 v0.Aux = s v0.AddArg2(p, mem) @@ -16011,8 +15850,7 @@ func rewriteValueS390X_OpS390XORW(v *Value) bool { } b = mergePoint(b, x0, x1, y) v0 := b.NewValue0(x1.Pos, OpS390XORW, v.Type) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v1 := b.NewValue0(x1.Pos, OpS390XSLWconst, v.Type) v1.AuxInt = j1 v2 := b.NewValue0(x1.Pos, OpS390XMOVHZload, typ.UInt16) @@ -16064,8 +15902,7 @@ func rewriteValueS390X_OpS390XORW(v *Value) bool { } b = mergePoint(b, x0, x1) v0 := b.NewValue0(v.Pos, OpS390XMOVHZloadidx, typ.UInt16) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v0.AuxInt = i0 v0.Aux = s v0.AddArg3(p, idx, mem) @@ -16113,8 +15950,7 @@ func rewriteValueS390X_OpS390XORW(v *Value) bool { } b = mergePoint(b, x0, x1) v0 := b.NewValue0(v.Pos, OpS390XMOVWZloadidx, typ.UInt32) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v0.AuxInt = i0 v0.Aux = s v0.AddArg3(p, idx, mem) @@ -16180,8 +16016,7 @@ func rewriteValueS390X_OpS390XORW(v *Value) bool { } b = mergePoint(b, x0, x1, y) v0 := b.NewValue0(v.Pos, OpS390XORW, v.Type) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v1 := b.NewValue0(v.Pos, OpS390XSLWconst, v.Type) v1.AuxInt = j1 v2 := b.NewValue0(v.Pos, OpS390XMOVHZloadidx, typ.UInt16) @@ -16228,8 +16063,7 @@ func rewriteValueS390X_OpS390XORW(v *Value) bool { } b = mergePoint(b, x0, x1) v0 := b.NewValue0(x1.Pos, OpS390XMOVHZreg, typ.UInt64) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v1 := b.NewValue0(x1.Pos, OpS390XMOVHBRload, typ.UInt16) v1.AuxInt = i0 v1.Aux = s @@ -16278,8 +16112,7 @@ func rewriteValueS390X_OpS390XORW(v *Value) bool { } b = mergePoint(b, x0, x1) v0 := b.NewValue0(x1.Pos, OpS390XMOVWBRload, typ.UInt32) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v0.AuxInt = i0 v0.Aux = s v0.AddArg2(p, mem) @@ -16336,8 +16169,7 @@ func rewriteValueS390X_OpS390XORW(v *Value) bool { } b = mergePoint(b, x0, x1, y) v0 := b.NewValue0(x0.Pos, OpS390XORW, v.Type) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v1 := b.NewValue0(x0.Pos, OpS390XSLWconst, v.Type) v1.AuxInt = j0 v2 := b.NewValue0(x0.Pos, OpS390XMOVHZreg, typ.UInt64) @@ -16391,8 +16223,7 @@ func rewriteValueS390X_OpS390XORW(v *Value) bool { } b = mergePoint(b, x0, x1) v0 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v1 := b.NewValue0(v.Pos, OpS390XMOVHBRloadidx, typ.Int16) v1.AuxInt = i0 v1.Aux = s @@ -16450,8 +16281,7 @@ func rewriteValueS390X_OpS390XORW(v *Value) bool { } b = mergePoint(b, x0, x1) v0 := b.NewValue0(v.Pos, OpS390XMOVWBRloadidx, typ.Int32) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v0.AuxInt = i0 v0.Aux = s v0.AddArg3(p, idx, mem) @@ -16517,8 +16347,7 @@ func rewriteValueS390X_OpS390XORW(v *Value) bool { } b = mergePoint(b, x0, x1, y) v0 := b.NewValue0(v.Pos, OpS390XORW, v.Type) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v1 := b.NewValue0(v.Pos, OpS390XSLWconst, v.Type) v1.AuxInt = j0 v2 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64) @@ -16549,9 +16378,7 @@ func rewriteValueS390X_OpS390XORWconst(v *Value) bool { if !(int32(c) == 0) { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (ORWconst [c] _) @@ -16640,9 +16467,7 @@ func rewriteValueS390X_OpS390XORconst(v *Value) bool { break } x := v_0 - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (ORconst [-1] _) @@ -17984,9 +17809,7 @@ func rewriteValueS390X_OpS390XSUBWconst(v *Value) bool { if !(int32(c) == 0) { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (SUBWconst [c] x) @@ -18060,9 +17883,7 @@ func rewriteValueS390X_OpS390XSUBconst(v *Value) bool { break } x := v_0 - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (SUBconst [c] x) @@ -18465,9 +18286,7 @@ func rewriteValueS390X_OpS390XXORWconst(v *Value) bool { if !(int32(c) == 0) { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (XORWconst [c] (MOVDconst [d])) @@ -18544,9 +18363,7 @@ func rewriteValueS390X_OpS390XXORconst(v *Value) bool { break } x := v_0 - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (XORconst [c] (MOVDconst [d])) @@ -19064,9 +18881,7 @@ func rewriteValueS390X_OpZero(v *Value) bool { break } mem := v_1 - v.reset(OpCopy) - v.Type = mem.Type - v.AddArg(mem) + v.copyOf(mem) return true } // match: (Zero [1] destptr mem) diff --git a/src/cmd/compile/internal/ssa/rewriteWasm.go b/src/cmd/compile/internal/ssa/rewriteWasm.go index 81acd7e7c4..4b100b6c32 100644 --- a/src/cmd/compile/internal/ssa/rewriteWasm.go +++ b/src/cmd/compile/internal/ssa/rewriteWasm.go @@ -1853,9 +1853,7 @@ func rewriteValueWasm_OpMove(v *Value) bool { break } mem := v_2 - v.reset(OpCopy) - v.Type = mem.Type - v.AddArg(mem) + v.copyOf(mem) return true } // match: (Move [1] dst src mem) @@ -3099,9 +3097,7 @@ func rewriteValueWasm_OpSignExt16to32(v *Value) bool { break } _ = x.Args[1] - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (SignExt16to32 x) @@ -3143,9 +3139,7 @@ func rewriteValueWasm_OpSignExt16to64(v *Value) bool { break } _ = x.Args[1] - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (SignExt16to64 x) @@ -3187,9 +3181,7 @@ func rewriteValueWasm_OpSignExt32to64(v *Value) bool { break } _ = x.Args[1] - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (SignExt32to64 x) @@ -3231,9 +3223,7 @@ func rewriteValueWasm_OpSignExt8to16(v *Value) bool { break } _ = x.Args[1] - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (SignExt8to16 x) @@ -3275,9 +3265,7 @@ func rewriteValueWasm_OpSignExt8to32(v *Value) bool { break } _ = x.Args[1] - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (SignExt8to32 x) @@ -3319,9 +3307,7 @@ func rewriteValueWasm_OpSignExt8to64(v *Value) bool { break } _ = x.Args[1] - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (SignExt8to64 x) @@ -3596,9 +3582,7 @@ func rewriteValueWasm_OpWasmI64AddConst(v *Value) bool { break } x := v_0 - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (I64AddConst [off] (LoweredAddr {sym} [off2] base)) @@ -4350,9 +4334,7 @@ func rewriteValueWasm_OpZero(v *Value) bool { break } mem := v_1 - v.reset(OpCopy) - v.Type = mem.Type - v.AddArg(mem) + v.copyOf(mem) return true } // match: (Zero [1] destptr mem) @@ -4610,9 +4592,7 @@ func rewriteValueWasm_OpZeroExt16to32(v *Value) bool { break } _ = x.Args[1] - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (ZeroExt16to32 x) @@ -4638,9 +4618,7 @@ func rewriteValueWasm_OpZeroExt16to64(v *Value) bool { break } _ = x.Args[1] - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (ZeroExt16to64 x) @@ -4666,9 +4644,7 @@ func rewriteValueWasm_OpZeroExt32to64(v *Value) bool { break } _ = x.Args[1] - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (ZeroExt32to64 x) @@ -4694,9 +4670,7 @@ func rewriteValueWasm_OpZeroExt8to16(v *Value) bool { break } _ = x.Args[1] - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (ZeroExt8to16 x) @@ -4722,9 +4696,7 @@ func rewriteValueWasm_OpZeroExt8to32(v *Value) bool { break } _ = x.Args[1] - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (ZeroExt8to32 x) @@ -4750,9 +4722,7 @@ func rewriteValueWasm_OpZeroExt8to64(v *Value) bool { break } _ = x.Args[1] - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (ZeroExt8to64 x) diff --git a/src/cmd/compile/internal/ssa/rewritedec.go b/src/cmd/compile/internal/ssa/rewritedec.go index a031fca4ad..08ed1fd129 100644 --- a/src/cmd/compile/internal/ssa/rewritedec.go +++ b/src/cmd/compile/internal/ssa/rewritedec.go @@ -41,9 +41,7 @@ func rewriteValuedec_OpComplexImag(v *Value) bool { break } imag := v_0.Args[1] - v.reset(OpCopy) - v.Type = imag.Type - v.AddArg(imag) + v.copyOf(imag) return true } return false @@ -58,9 +56,7 @@ func rewriteValuedec_OpComplexReal(v *Value) bool { } _ = v_0.Args[1] real := v_0.Args[0] - v.reset(OpCopy) - v.Type = real.Type - v.AddArg(real) + v.copyOf(real) return true } return false @@ -74,9 +70,7 @@ func rewriteValuedec_OpIData(v *Value) bool { break } data := v_0.Args[1] - v.reset(OpCopy) - v.Type = data.Type - v.AddArg(data) + v.copyOf(data) return true } return false @@ -91,9 +85,7 @@ func rewriteValuedec_OpITab(v *Value) bool { } _ = v_0.Args[1] itab := v_0.Args[0] - v.reset(OpCopy) - v.Type = itab.Type - v.AddArg(itab) + v.copyOf(itab) return true } return false @@ -225,9 +217,7 @@ func rewriteValuedec_OpSliceCap(v *Value) bool { break } cap := v_0.Args[2] - v.reset(OpCopy) - v.Type = cap.Type - v.AddArg(cap) + v.copyOf(cap) return true } return false @@ -242,9 +232,7 @@ func rewriteValuedec_OpSliceLen(v *Value) bool { } _ = v_0.Args[2] len := v_0.Args[1] - v.reset(OpCopy) - v.Type = len.Type - v.AddArg(len) + v.copyOf(len) return true } return false @@ -259,9 +247,7 @@ func rewriteValuedec_OpSlicePtr(v *Value) bool { } _ = v_0.Args[2] ptr := v_0.Args[0] - v.reset(OpCopy) - v.Type = ptr.Type - v.AddArg(ptr) + v.copyOf(ptr) return true } return false @@ -406,9 +392,7 @@ func rewriteValuedec_OpStringLen(v *Value) bool { break } len := v_0.Args[1] - v.reset(OpCopy) - v.Type = len.Type - v.AddArg(len) + v.copyOf(len) return true } return false @@ -423,9 +407,7 @@ func rewriteValuedec_OpStringPtr(v *Value) bool { } _ = v_0.Args[1] ptr := v_0.Args[0] - v.reset(OpCopy) - v.Type = ptr.Type - v.AddArg(ptr) + v.copyOf(ptr) return true } return false diff --git a/src/cmd/compile/internal/ssa/rewritedec64.go b/src/cmd/compile/internal/ssa/rewritedec64.go index 3beaf8e99f..08a045ccac 100644 --- a/src/cmd/compile/internal/ssa/rewritedec64.go +++ b/src/cmd/compile/internal/ssa/rewritedec64.go @@ -441,9 +441,7 @@ func rewriteValuedec64_OpInt64Hi(v *Value) bool { } _ = v_0.Args[1] hi := v_0.Args[0] - v.reset(OpCopy) - v.Type = hi.Type - v.AddArg(hi) + v.copyOf(hi) return true } return false @@ -457,9 +455,7 @@ func rewriteValuedec64_OpInt64Lo(v *Value) bool { break } lo := v_0.Args[1] - v.reset(OpCopy) - v.Type = lo.Type - v.AddArg(lo) + v.copyOf(lo) return true } return false @@ -2174,9 +2170,7 @@ func rewriteValuedec64_OpTrunc64to32(v *Value) bool { break } lo := v_0.Args[1] - v.reset(OpCopy) - v.Type = lo.Type - v.AddArg(lo) + v.copyOf(lo) return true } return false diff --git a/src/cmd/compile/internal/ssa/rewritegeneric.go b/src/cmd/compile/internal/ssa/rewritegeneric.go index 5a9dd7ed5b..0089df46b9 100644 --- a/src/cmd/compile/internal/ssa/rewritegeneric.go +++ b/src/cmd/compile/internal/ssa/rewritegeneric.go @@ -508,9 +508,7 @@ func rewriteValuegeneric_OpAdd16(v *Value) bool { continue } x := v_1 - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } break @@ -763,9 +761,7 @@ func rewriteValuegeneric_OpAdd32(v *Value) bool { continue } x := v_1 - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } break @@ -1041,9 +1037,7 @@ func rewriteValuegeneric_OpAdd64(v *Value) bool { continue } x := v_1 - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } break @@ -1319,9 +1313,7 @@ func rewriteValuegeneric_OpAdd8(v *Value) bool { continue } x := v_1 - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } break @@ -1627,9 +1619,7 @@ func rewriteValuegeneric_OpAnd16(v *Value) bool { if x != v_1 { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (And16 (Const16 [-1]) x) @@ -1640,9 +1630,7 @@ func rewriteValuegeneric_OpAnd16(v *Value) bool { continue } x := v_1 - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } break @@ -1829,9 +1817,7 @@ func rewriteValuegeneric_OpAnd32(v *Value) bool { if x != v_1 { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (And32 (Const32 [-1]) x) @@ -1842,9 +1828,7 @@ func rewriteValuegeneric_OpAnd32(v *Value) bool { continue } x := v_1 - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } break @@ -2031,9 +2015,7 @@ func rewriteValuegeneric_OpAnd64(v *Value) bool { if x != v_1 { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (And64 (Const64 [-1]) x) @@ -2044,9 +2026,7 @@ func rewriteValuegeneric_OpAnd64(v *Value) bool { continue } x := v_1 - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } break @@ -2233,9 +2213,7 @@ func rewriteValuegeneric_OpAnd8(v *Value) bool { if x != v_1 { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (And8 (Const8 [-1]) x) @@ -2246,9 +2224,7 @@ func rewriteValuegeneric_OpAnd8(v *Value) bool { continue } x := v_1 - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } break @@ -2361,9 +2337,7 @@ func rewriteValuegeneric_OpArraySelect(v *Value) bool { break } x := v_0.Args[0] - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (ArraySelect [0] (IData x)) @@ -2388,9 +2362,7 @@ func rewriteValuegeneric_OpCom16(v *Value) bool { break } x := v_0.Args[0] - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (Com16 (Const16 [c])) @@ -2435,9 +2407,7 @@ func rewriteValuegeneric_OpCom32(v *Value) bool { break } x := v_0.Args[0] - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (Com32 (Const32 [c])) @@ -2482,9 +2452,7 @@ func rewriteValuegeneric_OpCom64(v *Value) bool { break } x := v_0.Args[0] - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (Com64 (Const64 [c])) @@ -2529,9 +2497,7 @@ func rewriteValuegeneric_OpCom8(v *Value) bool { break } x := v_0.Args[0] - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (Com8 (Const8 [c])) @@ -2755,9 +2721,7 @@ func rewriteValuegeneric_OpConvert(v *Value) bool { if mem != v_1 { break } - v.reset(OpCopy) - v.Type = ptr.Type - v.AddArg(ptr) + v.copyOf(ptr) return true } return false @@ -6803,9 +6767,7 @@ func rewriteValuegeneric_OpEqB(v *Value) bool { continue } x := v_1 - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } break @@ -8894,9 +8856,7 @@ func rewriteValuegeneric_OpLoad(v *Value) bool { if !(isSamePtr(p1, p2) && t1.Compare(x.Type) == types.CMPeq && t1.Size() == sizeof(t2)) { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (Load p1 (Store {t2} p2 _ (Store {t3} p3 x _))) @@ -8922,9 +8882,7 @@ func rewriteValuegeneric_OpLoad(v *Value) bool { if !(isSamePtr(p1, p3) && t1.Compare(x.Type) == types.CMPeq && t1.Size() == sizeof(t2) && disjoint(p3, sizeof(t3), p2, sizeof(t2))) { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (Load p1 (Store {t2} p2 _ (Store {t3} p3 _ (Store {t4} p4 x _)))) @@ -8957,9 +8915,7 @@ func rewriteValuegeneric_OpLoad(v *Value) bool { if !(isSamePtr(p1, p4) && t1.Compare(x.Type) == types.CMPeq && t1.Size() == sizeof(t2) && disjoint(p4, sizeof(t4), p2, sizeof(t2)) && disjoint(p4, sizeof(t4), p3, sizeof(t3))) { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (Load p1 (Store {t2} p2 _ (Store {t3} p3 _ (Store {t4} p4 _ (Store {t5} p5 x _))))) @@ -8999,9 +8955,7 @@ func rewriteValuegeneric_OpLoad(v *Value) bool { if !(isSamePtr(p1, p5) && t1.Compare(x.Type) == types.CMPeq && t1.Size() == sizeof(t2) && disjoint(p5, sizeof(t5), p2, sizeof(t2)) && disjoint(p5, sizeof(t5), p3, sizeof(t3)) && disjoint(p5, sizeof(t5), p4, sizeof(t4))) { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (Load p1 (Store {t2} p2 (Const64 [x]) _)) @@ -9129,8 +9083,7 @@ func rewriteValuegeneric_OpLoad(v *Value) bool { } b = mem.Block v0 := b.NewValue0(v.Pos, OpLoad, t1) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v1 := b.NewValue0(v.Pos, OpOffPtr, op.Type) v1.AuxInt = o1 v1.AddArg(p3) @@ -9173,8 +9126,7 @@ func rewriteValuegeneric_OpLoad(v *Value) bool { } b = mem.Block v0 := b.NewValue0(v.Pos, OpLoad, t1) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v1 := b.NewValue0(v.Pos, OpOffPtr, op.Type) v1.AuxInt = o1 v1.AddArg(p4) @@ -9224,8 +9176,7 @@ func rewriteValuegeneric_OpLoad(v *Value) bool { } b = mem.Block v0 := b.NewValue0(v.Pos, OpLoad, t1) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v1 := b.NewValue0(v.Pos, OpOffPtr, op.Type) v1.AuxInt = o1 v1.AddArg(p5) @@ -9282,8 +9233,7 @@ func rewriteValuegeneric_OpLoad(v *Value) bool { } b = mem.Block v0 := b.NewValue0(v.Pos, OpLoad, t1) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v1 := b.NewValue0(v.Pos, OpOffPtr, op.Type) v1.AuxInt = o1 v1.AddArg(p6) @@ -9686,9 +9636,7 @@ func rewriteValuegeneric_OpLsh16x64(v *Value) bool { if v_1.Op != OpConst64 || v_1.AuxInt != 0 { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (Lsh16x64 (Const16 [0]) _) @@ -9903,9 +9851,7 @@ func rewriteValuegeneric_OpLsh32x64(v *Value) bool { if v_1.Op != OpConst64 || v_1.AuxInt != 0 { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (Lsh32x64 (Const32 [0]) _) @@ -10120,9 +10066,7 @@ func rewriteValuegeneric_OpLsh64x64(v *Value) bool { if v_1.Op != OpConst64 || v_1.AuxInt != 0 { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (Lsh64x64 (Const64 [0]) _) @@ -10337,9 +10281,7 @@ func rewriteValuegeneric_OpLsh8x64(v *Value) bool { if v_1.Op != OpConst64 || v_1.AuxInt != 0 { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (Lsh8x64 (Const8 [0]) _) @@ -10831,9 +10773,7 @@ func rewriteValuegeneric_OpMod64(v *Value) bool { if v_1.Op != OpConst64 || v_1.AuxInt != -1<<63 || !(isNonNegative(n)) { break } - v.reset(OpCopy) - v.Type = n.Type - v.AddArg(n) + v.copyOf(n) return true } // match: (Mod64 n (Const64 [c])) @@ -12451,9 +12391,7 @@ func rewriteValuegeneric_OpMove(v *Value) bool { if !(isSamePtr(dst, src)) { break } - v.reset(OpCopy) - v.Type = mem.Type - v.AddArg(mem) + v.copyOf(mem) return true } return false @@ -12489,9 +12427,7 @@ func rewriteValuegeneric_OpMul16(v *Value) bool { continue } x := v_1 - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } break @@ -12633,9 +12569,7 @@ func rewriteValuegeneric_OpMul32(v *Value) bool { continue } x := v_1 - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } break @@ -12809,9 +12743,7 @@ func rewriteValuegeneric_OpMul32F(v *Value) bool { if v_1.Op != OpConst32F || v_1.AuxInt != auxFrom64F(1) { continue } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } break @@ -12877,9 +12809,7 @@ func rewriteValuegeneric_OpMul64(v *Value) bool { continue } x := v_1 - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } break @@ -13053,9 +12983,7 @@ func rewriteValuegeneric_OpMul64F(v *Value) bool { if v_1.Op != OpConst64F || v_1.AuxInt != auxFrom64F(1) { continue } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } break @@ -13121,9 +13049,7 @@ func rewriteValuegeneric_OpMul8(v *Value) bool { continue } x := v_1 - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } break @@ -13267,9 +13193,7 @@ func rewriteValuegeneric_OpNeg16(v *Value) bool { break } x := v_0.Args[0] - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (Neg16 (Com16 x)) @@ -13321,9 +13245,7 @@ func rewriteValuegeneric_OpNeg32(v *Value) bool { break } x := v_0.Args[0] - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (Neg32 (Com32 x)) @@ -13394,9 +13316,7 @@ func rewriteValuegeneric_OpNeg64(v *Value) bool { break } x := v_0.Args[0] - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (Neg64 (Com64 x)) @@ -13467,9 +13387,7 @@ func rewriteValuegeneric_OpNeg8(v *Value) bool { break } x := v_0.Args[0] - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (Neg8 (Com8 x)) @@ -14311,9 +14229,7 @@ func rewriteValuegeneric_OpNeqB(v *Value) bool { continue } x := v_1 - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } break @@ -14792,9 +14708,7 @@ func rewriteValuegeneric_OpNilCheck(v *Value) bool { if mem != v_1 { break } - v.reset(OpCopy) - v.Type = mem.Type - v.AddArg(mem) + v.copyOf(mem) return true } // match: (NilCheck (Load (OffPtr [c] (SP)) (StaticCall {sym} _)) _) @@ -15285,9 +15199,7 @@ func rewriteValuegeneric_OpOffPtr(v *Value) bool { if !(v.Type.Compare(p.Type) == types.CMPeq) { break } - v.reset(OpCopy) - v.Type = p.Type - v.AddArg(p) + v.copyOf(p) return true } return false @@ -15321,9 +15233,7 @@ func rewriteValuegeneric_OpOr16(v *Value) bool { if x != v_1 { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (Or16 (Const16 [0]) x) @@ -15334,9 +15244,7 @@ func rewriteValuegeneric_OpOr16(v *Value) bool { continue } x := v_1 - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } break @@ -15503,9 +15411,7 @@ func rewriteValuegeneric_OpOr32(v *Value) bool { if x != v_1 { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (Or32 (Const32 [0]) x) @@ -15516,9 +15422,7 @@ func rewriteValuegeneric_OpOr32(v *Value) bool { continue } x := v_1 - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } break @@ -15685,9 +15589,7 @@ func rewriteValuegeneric_OpOr64(v *Value) bool { if x != v_1 { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (Or64 (Const64 [0]) x) @@ -15698,9 +15600,7 @@ func rewriteValuegeneric_OpOr64(v *Value) bool { continue } x := v_1 - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } break @@ -15867,9 +15767,7 @@ func rewriteValuegeneric_OpOr8(v *Value) bool { if x != v_1 { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (Or8 (Const8 [0]) x) @@ -15880,9 +15778,7 @@ func rewriteValuegeneric_OpOr8(v *Value) bool { continue } x := v_1 - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } break @@ -16150,9 +16046,7 @@ func rewriteValuegeneric_OpRotateLeft16(v *Value) bool { if !(c%16 == 0) { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } return false @@ -16172,9 +16066,7 @@ func rewriteValuegeneric_OpRotateLeft32(v *Value) bool { if !(c%32 == 0) { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } return false @@ -16194,9 +16086,7 @@ func rewriteValuegeneric_OpRotateLeft64(v *Value) bool { if !(c%64 == 0) { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } return false @@ -16216,9 +16106,7 @@ func rewriteValuegeneric_OpRotateLeft8(v *Value) bool { if !(c%8 == 0) { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } return false @@ -16232,9 +16120,7 @@ func rewriteValuegeneric_OpRound32F(v *Value) bool { if x.Op != OpConst32F { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } return false @@ -16248,9 +16134,7 @@ func rewriteValuegeneric_OpRound64F(v *Value) bool { if x.Op != OpConst64F { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } return false @@ -16344,9 +16228,7 @@ func rewriteValuegeneric_OpRsh16Ux64(v *Value) bool { if v_1.Op != OpConst64 || v_1.AuxInt != 0 { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (Rsh16Ux64 (Const16 [0]) _) @@ -16600,9 +16482,7 @@ func rewriteValuegeneric_OpRsh16x64(v *Value) bool { if v_1.Op != OpConst64 || v_1.AuxInt != 0 { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (Rsh16x64 (Const16 [0]) _) @@ -16783,9 +16663,7 @@ func rewriteValuegeneric_OpRsh32Ux64(v *Value) bool { if v_1.Op != OpConst64 || v_1.AuxInt != 0 { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (Rsh32Ux64 (Const32 [0]) _) @@ -17057,9 +16935,7 @@ func rewriteValuegeneric_OpRsh32x64(v *Value) bool { if v_1.Op != OpConst64 || v_1.AuxInt != 0 { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (Rsh32x64 (Const32 [0]) _) @@ -17258,9 +17134,7 @@ func rewriteValuegeneric_OpRsh64Ux64(v *Value) bool { if v_1.Op != OpConst64 || v_1.AuxInt != 0 { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (Rsh64Ux64 (Const64 [0]) _) @@ -17550,9 +17424,7 @@ func rewriteValuegeneric_OpRsh64x64(v *Value) bool { if v_1.Op != OpConst64 || v_1.AuxInt != 0 { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (Rsh64x64 (Const64 [0]) _) @@ -17769,9 +17641,7 @@ func rewriteValuegeneric_OpRsh8Ux64(v *Value) bool { if v_1.Op != OpConst64 || v_1.AuxInt != 0 { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (Rsh8Ux64 (Const8 [0]) _) @@ -18006,9 +17876,7 @@ func rewriteValuegeneric_OpRsh8x64(v *Value) bool { if v_1.Op != OpConst64 || v_1.AuxInt != 0 { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (Rsh8x64 (Const8 [0]) _) @@ -18155,9 +18023,7 @@ func rewriteValuegeneric_OpSignExt16to32(v *Value) bool { if !(s >= 16) { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } return false @@ -18195,9 +18061,7 @@ func rewriteValuegeneric_OpSignExt16to64(v *Value) bool { if !(s >= 48) { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } return false @@ -18235,9 +18099,7 @@ func rewriteValuegeneric_OpSignExt32to64(v *Value) bool { if !(s >= 32) { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } return false @@ -18275,9 +18137,7 @@ func rewriteValuegeneric_OpSignExt8to16(v *Value) bool { if !(s >= 8) { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } return false @@ -18315,9 +18175,7 @@ func rewriteValuegeneric_OpSignExt8to32(v *Value) bool { if !(s >= 24) { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } return false @@ -18355,9 +18213,7 @@ func rewriteValuegeneric_OpSignExt8to64(v *Value) bool { if !(s >= 56) { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } return false @@ -18666,9 +18522,7 @@ func rewriteValuegeneric_OpStaticCall(v *Value) bool { if !(needRaceCleanup(sym, v)) { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } return false @@ -18695,9 +18549,7 @@ func rewriteValuegeneric_OpStore(v *Value) bool { if mem != v_2 || !(isSamePtr(p1, p2) && t2.Size() == sizeof(t1)) { break } - v.reset(OpCopy) - v.Type = mem.Type - v.AddArg(mem) + v.copyOf(mem) return true } // match: (Store {t1} p1 (Load p2 oldmem) mem:(Store {t3} p3 _ oldmem)) @@ -18722,9 +18574,7 @@ func rewriteValuegeneric_OpStore(v *Value) bool { if oldmem != mem.Args[2] || !(isSamePtr(p1, p2) && t2.Size() == sizeof(t1) && disjoint(p1, sizeof(t1), p3, sizeof(t3))) { break } - v.reset(OpCopy) - v.Type = mem.Type - v.AddArg(mem) + v.copyOf(mem) return true } // match: (Store {t1} p1 (Load p2 oldmem) mem:(Store {t3} p3 _ (Store {t4} p4 _ oldmem))) @@ -18756,9 +18606,7 @@ func rewriteValuegeneric_OpStore(v *Value) bool { if oldmem != mem_2.Args[2] || !(isSamePtr(p1, p2) && t2.Size() == sizeof(t1) && disjoint(p1, sizeof(t1), p3, sizeof(t3)) && disjoint(p1, sizeof(t1), p4, sizeof(t4))) { break } - v.reset(OpCopy) - v.Type = mem.Type - v.AddArg(mem) + v.copyOf(mem) return true } // match: (Store {t1} p1 (Load p2 oldmem) mem:(Store {t3} p3 _ (Store {t4} p4 _ (Store {t5} p5 _ oldmem)))) @@ -18797,9 +18645,7 @@ func rewriteValuegeneric_OpStore(v *Value) bool { if oldmem != mem_2_2.Args[2] || !(isSamePtr(p1, p2) && t2.Size() == sizeof(t1) && disjoint(p1, sizeof(t1), p3, sizeof(t3)) && disjoint(p1, sizeof(t1), p4, sizeof(t4)) && disjoint(p1, sizeof(t1), p5, sizeof(t5))) { break } - v.reset(OpCopy) - v.Type = mem.Type - v.AddArg(mem) + v.copyOf(mem) return true } // match: (Store {t} (OffPtr [o] p1) x mem:(Zero [n] p2 _)) @@ -18823,9 +18669,7 @@ func rewriteValuegeneric_OpStore(v *Value) bool { if !(isConstZero(x) && o >= 0 && sizeof(t)+o <= n && isSamePtr(p1, p2)) { break } - v.reset(OpCopy) - v.Type = mem.Type - v.AddArg(mem) + v.copyOf(mem) return true } // match: (Store {t1} op:(OffPtr [o1] p1) x mem:(Store {t2} p2 _ (Zero [n] p3 _))) @@ -18857,9 +18701,7 @@ func rewriteValuegeneric_OpStore(v *Value) bool { if !(isConstZero(x) && o1 >= 0 && sizeof(t1)+o1 <= n && isSamePtr(p1, p3) && disjoint(op, sizeof(t1), p2, sizeof(t2))) { break } - v.reset(OpCopy) - v.Type = mem.Type - v.AddArg(mem) + v.copyOf(mem) return true } // match: (Store {t1} op:(OffPtr [o1] p1) x mem:(Store {t2} p2 _ (Store {t3} p3 _ (Zero [n] p4 _)))) @@ -18898,9 +18740,7 @@ func rewriteValuegeneric_OpStore(v *Value) bool { if !(isConstZero(x) && o1 >= 0 && sizeof(t1)+o1 <= n && isSamePtr(p1, p4) && disjoint(op, sizeof(t1), p2, sizeof(t2)) && disjoint(op, sizeof(t1), p3, sizeof(t3))) { break } - v.reset(OpCopy) - v.Type = mem.Type - v.AddArg(mem) + v.copyOf(mem) return true } // match: (Store {t1} op:(OffPtr [o1] p1) x mem:(Store {t2} p2 _ (Store {t3} p3 _ (Store {t4} p4 _ (Zero [n] p5 _))))) @@ -18946,9 +18786,7 @@ func rewriteValuegeneric_OpStore(v *Value) bool { if !(isConstZero(x) && o1 >= 0 && sizeof(t1)+o1 <= n && isSamePtr(p1, p5) && disjoint(op, sizeof(t1), p2, sizeof(t2)) && disjoint(op, sizeof(t1), p3, sizeof(t3)) && disjoint(op, sizeof(t1), p4, sizeof(t4))) { break } - v.reset(OpCopy) - v.Type = mem.Type - v.AddArg(mem) + v.copyOf(mem) return true } // match: (Store _ (StructMake0) mem) @@ -18958,9 +18796,7 @@ func rewriteValuegeneric_OpStore(v *Value) bool { break } mem := v_2 - v.reset(OpCopy) - v.Type = mem.Type - v.AddArg(mem) + v.copyOf(mem) return true } // match: (Store dst (StructMake1 f0) mem) @@ -19131,9 +18967,7 @@ func rewriteValuegeneric_OpStore(v *Value) bool { break } mem := v_2 - v.reset(OpCopy) - v.Type = mem.Type - v.AddArg(mem) + v.copyOf(mem) return true } // match: (Store dst (ArrayMake1 e) mem) @@ -19171,9 +19005,7 @@ func rewriteValuegeneric_OpStore(v *Value) bool { if mem != v_2 || !(isConstZero(x) && mem.Op == OpStaticCall && isSameSym(mem.Aux, "runtime.newobject") && c == config.ctxt.FixedFrameSize()+config.RegSize) { break } - v.reset(OpCopy) - v.Type = mem.Type - v.AddArg(mem) + v.copyOf(mem) return true } // match: (Store (OffPtr (Load (OffPtr [c] (SP)) mem)) x mem) @@ -19201,9 +19033,7 @@ func rewriteValuegeneric_OpStore(v *Value) bool { if mem != v_2 || !(isConstZero(x) && mem.Op == OpStaticCall && isSameSym(mem.Aux, "runtime.newobject") && c == config.ctxt.FixedFrameSize()+config.RegSize) { break } - v.reset(OpCopy) - v.Type = mem.Type - v.AddArg(mem) + v.copyOf(mem) return true } // match: (Store {t1} op1:(OffPtr [o1] p1) d1 m2:(Store {t2} op2:(OffPtr [0] p2) d2 m3:(Move [n] p3 _ mem))) @@ -19613,9 +19443,7 @@ func rewriteValuegeneric_OpStructSelect(v *Value) bool { break } x := v_0.Args[0] - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (StructSelect [0] (StructMake2 x _)) @@ -19626,9 +19454,7 @@ func rewriteValuegeneric_OpStructSelect(v *Value) bool { } _ = v_0.Args[1] x := v_0.Args[0] - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (StructSelect [1] (StructMake2 _ x)) @@ -19638,9 +19464,7 @@ func rewriteValuegeneric_OpStructSelect(v *Value) bool { break } x := v_0.Args[1] - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (StructSelect [0] (StructMake3 x _ _)) @@ -19651,9 +19475,7 @@ func rewriteValuegeneric_OpStructSelect(v *Value) bool { } _ = v_0.Args[2] x := v_0.Args[0] - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (StructSelect [1] (StructMake3 _ x _)) @@ -19664,9 +19486,7 @@ func rewriteValuegeneric_OpStructSelect(v *Value) bool { } _ = v_0.Args[2] x := v_0.Args[1] - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (StructSelect [2] (StructMake3 _ _ x)) @@ -19676,9 +19496,7 @@ func rewriteValuegeneric_OpStructSelect(v *Value) bool { break } x := v_0.Args[2] - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (StructSelect [0] (StructMake4 x _ _ _)) @@ -19689,9 +19507,7 @@ func rewriteValuegeneric_OpStructSelect(v *Value) bool { } _ = v_0.Args[3] x := v_0.Args[0] - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (StructSelect [1] (StructMake4 _ x _ _)) @@ -19702,9 +19518,7 @@ func rewriteValuegeneric_OpStructSelect(v *Value) bool { } _ = v_0.Args[3] x := v_0.Args[1] - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (StructSelect [2] (StructMake4 _ _ x _)) @@ -19715,9 +19529,7 @@ func rewriteValuegeneric_OpStructSelect(v *Value) bool { } _ = v_0.Args[3] x := v_0.Args[2] - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (StructSelect [3] (StructMake4 _ _ _ x)) @@ -19727,9 +19539,7 @@ func rewriteValuegeneric_OpStructSelect(v *Value) bool { break } x := v_0.Args[3] - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (StructSelect [i] x:(Load ptr mem)) @@ -19749,8 +19559,7 @@ func rewriteValuegeneric_OpStructSelect(v *Value) bool { } b = x.Block v0 := b.NewValue0(v.Pos, OpLoad, v.Type) - v.reset(OpCopy) - v.AddArg(v0) + v.copyOf(v0) v1 := b.NewValue0(v.Pos, OpOffPtr, v.Type.PtrTo()) v1.AuxInt = t.FieldOff(int(i)) v1.AddArg(ptr) @@ -19867,9 +19676,7 @@ func rewriteValuegeneric_OpSub16(v *Value) bool { if x != v_1 { continue } - v.reset(OpCopy) - v.Type = y.Type - v.AddArg(y) + v.copyOf(y) return true } break @@ -19889,9 +19696,7 @@ func rewriteValuegeneric_OpSub16(v *Value) bool { if y != v_1 { continue } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } break @@ -20089,9 +19894,7 @@ func rewriteValuegeneric_OpSub32(v *Value) bool { if x != v_1 { continue } - v.reset(OpCopy) - v.Type = y.Type - v.AddArg(y) + v.copyOf(y) return true } break @@ -20111,9 +19914,7 @@ func rewriteValuegeneric_OpSub32(v *Value) bool { if y != v_1 { continue } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } break @@ -20331,9 +20132,7 @@ func rewriteValuegeneric_OpSub64(v *Value) bool { if x != v_1 { continue } - v.reset(OpCopy) - v.Type = y.Type - v.AddArg(y) + v.copyOf(y) return true } break @@ -20353,9 +20152,7 @@ func rewriteValuegeneric_OpSub64(v *Value) bool { if y != v_1 { continue } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } break @@ -20573,9 +20370,7 @@ func rewriteValuegeneric_OpSub8(v *Value) bool { if x != v_1 { continue } - v.reset(OpCopy) - v.Type = y.Type - v.AddArg(y) + v.copyOf(y) return true } break @@ -20595,9 +20390,7 @@ func rewriteValuegeneric_OpSub8(v *Value) bool { if y != v_1 { continue } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } break @@ -20718,9 +20511,7 @@ func rewriteValuegeneric_OpTrunc16to8(v *Value) bool { break } x := v_0.Args[0] - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (Trunc16to8 (SignExt8to16 x)) @@ -20730,9 +20521,7 @@ func rewriteValuegeneric_OpTrunc16to8(v *Value) bool { break } x := v_0.Args[0] - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (Trunc16to8 (And16 (Const16 [y]) x)) @@ -20793,9 +20582,7 @@ func rewriteValuegeneric_OpTrunc32to16(v *Value) bool { break } x := v_0.Args[0] - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (Trunc32to16 (SignExt8to32 x)) @@ -20816,9 +20603,7 @@ func rewriteValuegeneric_OpTrunc32to16(v *Value) bool { break } x := v_0.Args[0] - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (Trunc32to16 (And32 (Const32 [y]) x)) @@ -20868,9 +20653,7 @@ func rewriteValuegeneric_OpTrunc32to8(v *Value) bool { break } x := v_0.Args[0] - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (Trunc32to8 (SignExt8to32 x)) @@ -20880,9 +20663,7 @@ func rewriteValuegeneric_OpTrunc32to8(v *Value) bool { break } x := v_0.Args[0] - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (Trunc32to8 (And32 (Const32 [y]) x)) @@ -20943,9 +20724,7 @@ func rewriteValuegeneric_OpTrunc64to16(v *Value) bool { break } x := v_0.Args[0] - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (Trunc64to16 (SignExt8to64 x)) @@ -20966,9 +20745,7 @@ func rewriteValuegeneric_OpTrunc64to16(v *Value) bool { break } x := v_0.Args[0] - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (Trunc64to16 (And64 (Const64 [y]) x)) @@ -21040,9 +20817,7 @@ func rewriteValuegeneric_OpTrunc64to32(v *Value) bool { break } x := v_0.Args[0] - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (Trunc64to32 (SignExt8to64 x)) @@ -21074,9 +20849,7 @@ func rewriteValuegeneric_OpTrunc64to32(v *Value) bool { break } x := v_0.Args[0] - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (Trunc64to32 (And64 (Const64 [y]) x)) @@ -21126,9 +20899,7 @@ func rewriteValuegeneric_OpTrunc64to8(v *Value) bool { break } x := v_0.Args[0] - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (Trunc64to8 (SignExt8to64 x)) @@ -21138,9 +20909,7 @@ func rewriteValuegeneric_OpTrunc64to8(v *Value) bool { break } x := v_0.Args[0] - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } // match: (Trunc64to8 (And64 (Const64 [y]) x)) @@ -21211,9 +20980,7 @@ func rewriteValuegeneric_OpXor16(v *Value) bool { continue } x := v_1 - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } break @@ -21234,9 +21001,7 @@ func rewriteValuegeneric_OpXor16(v *Value) bool { continue } y := v_1_1 - v.reset(OpCopy) - v.Type = y.Type - v.AddArg(y) + v.copyOf(y) return true } } @@ -21346,9 +21111,7 @@ func rewriteValuegeneric_OpXor32(v *Value) bool { continue } x := v_1 - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } break @@ -21369,9 +21132,7 @@ func rewriteValuegeneric_OpXor32(v *Value) bool { continue } y := v_1_1 - v.reset(OpCopy) - v.Type = y.Type - v.AddArg(y) + v.copyOf(y) return true } } @@ -21481,9 +21242,7 @@ func rewriteValuegeneric_OpXor64(v *Value) bool { continue } x := v_1 - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } break @@ -21504,9 +21263,7 @@ func rewriteValuegeneric_OpXor64(v *Value) bool { continue } y := v_1_1 - v.reset(OpCopy) - v.Type = y.Type - v.AddArg(y) + v.copyOf(y) return true } } @@ -21616,9 +21373,7 @@ func rewriteValuegeneric_OpXor8(v *Value) bool { continue } x := v_1 - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } break @@ -21639,9 +21394,7 @@ func rewriteValuegeneric_OpXor8(v *Value) bool { continue } y := v_1_1 - v.reset(OpCopy) - v.Type = y.Type - v.AddArg(y) + v.copyOf(y) return true } } @@ -21732,9 +21485,7 @@ func rewriteValuegeneric_OpZero(v *Value) bool { if v_0_0_0.Op != OpSP || mem != v_1 || !(mem.Op == OpStaticCall && isSameSym(mem.Aux, "runtime.newobject") && c == config.ctxt.FixedFrameSize()+config.RegSize) { break } - v.reset(OpCopy) - v.Type = mem.Type - v.AddArg(mem) + v.copyOf(mem) return true } // match: (Zero {t1} [n] p1 store:(Store {t2} (OffPtr [o2] p2) _ mem)) @@ -21852,9 +21603,7 @@ func rewriteValuegeneric_OpZeroExt16to32(v *Value) bool { if !(s >= 16) { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } return false @@ -21892,9 +21641,7 @@ func rewriteValuegeneric_OpZeroExt16to64(v *Value) bool { if !(s >= 48) { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } return false @@ -21932,9 +21679,7 @@ func rewriteValuegeneric_OpZeroExt32to64(v *Value) bool { if !(s >= 32) { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } return false @@ -21972,9 +21717,7 @@ func rewriteValuegeneric_OpZeroExt8to16(v *Value) bool { if !(s >= 8) { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } return false @@ -22012,9 +21755,7 @@ func rewriteValuegeneric_OpZeroExt8to32(v *Value) bool { if !(s >= 24) { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } return false @@ -22052,9 +21793,7 @@ func rewriteValuegeneric_OpZeroExt8to64(v *Value) bool { if !(s >= 56) { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.copyOf(x) return true } return false diff --git a/src/cmd/compile/internal/ssa/value.go b/src/cmd/compile/internal/ssa/value.go index 8c5834d530..e5246779fc 100644 --- a/src/cmd/compile/internal/ssa/value.go +++ b/src/cmd/compile/internal/ssa/value.go @@ -310,17 +310,29 @@ func (v *Value) resetArgs() { v.Args = v.argstorage[:0] } +// reset is called from most rewrite rules. +// Allowing it to be inlined increases the size +// of cmd/compile by almost 10%, and slows it down. +//go:noinline func (v *Value) reset(op Op) { v.Op = op - if op != OpCopy && notStmtBoundary(op) { - // Special case for OpCopy because of how it is used in rewrite - v.Pos = v.Pos.WithNotStmt() - } v.resetArgs() v.AuxInt = 0 v.Aux = nil } +// copyOf is called from rewrite rules. +// It modifies v to be (Copy a). +//go:noinline +func (v *Value) copyOf(a *Value) { + v.Op = OpCopy + v.resetArgs() + v.AddArg(a) + v.AuxInt = 0 + v.Aux = nil + v.Type = a.Type +} + // copyInto makes a new value identical to v and adds it to the end of b. // unlike copyIntoWithXPos this does not check for v.Pos being a statement. func (v *Value) copyInto(b *Block) *Value { diff --git a/src/cmd/compile/internal/ssa/writebarrier.go b/src/cmd/compile/internal/ssa/writebarrier.go index d246fb333c..cebfbb8c9d 100644 --- a/src/cmd/compile/internal/ssa/writebarrier.go +++ b/src/cmd/compile/internal/ssa/writebarrier.go @@ -347,6 +347,7 @@ func writebarrier(f *Func) { bEnd.Values = append(bEnd.Values, last) last.Block = bEnd last.reset(OpPhi) + last.Pos = last.Pos.WithNotStmt() last.Type = types.TypeMem last.AddArg(memThen) last.AddArg(memElse) From bef0b4ea8fd0dbda6f29412a841c176d2fc2f2eb Mon Sep 17 00:00:00 2001 From: Keith Randall Date: Sun, 23 Feb 2020 19:23:15 -0800 Subject: [PATCH 26/69] hash/maphash: add more tests for seed generation Test all the paths by which a Hash picks its seed. Make sure they all behave identically to a preset seed. Change-Id: I2f7950857697f2f07226b96655574c36931b2aae Reviewed-on: https://go-review.googlesource.com/c/go/+/220686 Run-TryBot: Keith Randall TryBot-Result: Gobot Gobot Reviewed-by: Vladimir Evgrafov Reviewed-by: Alan Donovan --- src/hash/maphash/maphash_test.go | 56 ++++++++++++++++++++++++++++++++ 1 file changed, 56 insertions(+) diff --git a/src/hash/maphash/maphash_test.go b/src/hash/maphash/maphash_test.go index 0164a9e20a..caea43a8c8 100644 --- a/src/hash/maphash/maphash_test.go +++ b/src/hash/maphash/maphash_test.go @@ -106,6 +106,62 @@ func TestRepeat(t *testing.T) { } } +func TestSeedFromSum64(t *testing.T) { + h1 := new(Hash) + h1.WriteString("foo") + x := h1.Sum64() // seed generated here + h2 := new(Hash) + h2.SetSeed(h1.Seed()) + h2.WriteString("foo") + y := h2.Sum64() + if x != y { + t.Errorf("hashes don't match: want %x, got %x", x, y) + } +} + +func TestSeedFromSeed(t *testing.T) { + h1 := new(Hash) + h1.WriteString("foo") + _ = h1.Seed() // seed generated here + x := h1.Sum64() + h2 := new(Hash) + h2.SetSeed(h1.Seed()) + h2.WriteString("foo") + y := h2.Sum64() + if x != y { + t.Errorf("hashes don't match: want %x, got %x", x, y) + } +} + +func TestSeedFromFlush(t *testing.T) { + b := make([]byte, 65) + h1 := new(Hash) + h1.Write(b) // seed generated here + x := h1.Sum64() + h2 := new(Hash) + h2.SetSeed(h1.Seed()) + h2.Write(b) + y := h2.Sum64() + if x != y { + t.Errorf("hashes don't match: want %x, got %x", x, y) + } +} + +func TestSeedFromReset(t *testing.T) { + h1 := new(Hash) + h1.WriteString("foo") + h1.Reset() // seed generated here + h1.WriteString("foo") + x := h1.Sum64() + h2 := new(Hash) + h2.SetSeed(h1.Seed()) + h2.WriteString("foo") + y := h2.Sum64() + if x != y { + t.Errorf("hashes don't match: want %x, got %x", x, y) + } +} + // Make sure a Hash implements the hash.Hash and hash.Hash64 interfaces. var _ hash.Hash = &Hash{} var _ hash.Hash64 = &Hash{} From d8bf079751cbdbebe8425be7c93b963dc0f31360 Mon Sep 17 00:00:00 2001 From: Keith Randall Date: Fri, 28 Feb 2020 15:42:03 -0800 Subject: [PATCH 27/69] runtime: use quiet NaNs in softfloat implementation Update #37455 Change-Id: Ieac0823aa398d73187c009037be15ba34c84f3d9 Reviewed-on: https://go-review.googlesource.com/c/go/+/221433 Run-TryBot: Emmanuel Odeke TryBot-Result: Gobot Gobot Reviewed-by: Cherry Zhang --- src/runtime/softfloat64.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/runtime/softfloat64.go b/src/runtime/softfloat64.go index 8fde0feddc..13bee6c1d7 100644 --- a/src/runtime/softfloat64.go +++ b/src/runtime/softfloat64.go @@ -13,7 +13,7 @@ const ( expbits64 uint = 11 bias64 = -1<<(expbits64-1) + 1 - nan64 uint64 = (1< Date: Sun, 1 Mar 2020 13:09:09 -0800 Subject: [PATCH 28/69] cmd/compile: add streamlined Block Reset+AddControl routines For use in rewrite rules. Shrinks cmd/compile: compile 20082104 19967416 -114688 -0.571% Passes toolstash-check -all. Change-Id: Ic856508b27ec5b7fb9b6ca63e955a7139ae7dc30 Reviewed-on: https://go-review.googlesource.com/c/go/+/221780 Run-TryBot: Josh Bleecher Snyder TryBot-Result: Gobot Gobot Reviewed-by: Keith Randall --- src/cmd/compile/internal/ssa/block.go | 28 + src/cmd/compile/internal/ssa/gen/rulegen.go | 16 +- src/cmd/compile/internal/ssa/rewrite386.go | 123 +-- src/cmd/compile/internal/ssa/rewriteAMD64.go | 189 ++--- src/cmd/compile/internal/ssa/rewriteARM.go | 705 ++++++------------ src/cmd/compile/internal/ssa/rewriteARM64.go | 429 ++++------- src/cmd/compile/internal/ssa/rewriteMIPS.go | 75 +- src/cmd/compile/internal/ssa/rewriteMIPS64.go | 63 +- src/cmd/compile/internal/ssa/rewritePPC64.go | 183 ++--- .../compile/internal/ssa/rewriteRISCV64.go | 3 +- src/cmd/compile/internal/ssa/rewriteS390X.go | 133 ++-- .../compile/internal/ssa/rewritegeneric.go | 3 +- 12 files changed, 675 insertions(+), 1275 deletions(-) diff --git a/src/cmd/compile/internal/ssa/block.go b/src/cmd/compile/internal/ssa/block.go index eadd5700ba..205fcfc707 100644 --- a/src/cmd/compile/internal/ssa/block.go +++ b/src/cmd/compile/internal/ssa/block.go @@ -232,6 +232,34 @@ func (b *Block) Reset(kind BlockKind) { b.AuxInt = 0 } +// resetWithControl resets b and adds control v. +// It is equivalent to b.Reset(kind); b.AddControl(v), +// except that it is one call instead of two and avoids a bounds check. +// It is intended for use by rewrite rules, where this matters. +func (b *Block) resetWithControl(kind BlockKind, v *Value) { + b.Kind = kind + b.ResetControls() + b.Aux = nil + b.AuxInt = 0 + b.Controls[0] = v + v.Uses++ +} + +// resetWithControl2 resets b and adds controls v and w. +// It is equivalent to b.Reset(kind); b.AddControl(v); b.AddControl(w), +// except that it is one call instead of three and avoids two bounds checks. +// It is intended for use by rewrite rules, where this matters. +func (b *Block) resetWithControl2(kind BlockKind, v, w *Value) { + b.Kind = kind + b.ResetControls() + b.Aux = nil + b.AuxInt = 0 + b.Controls[0] = v + b.Controls[1] = w + v.Uses++ + w.Uses++ +} + // AddEdgeTo adds an edge from block b to block c. Used during building of the // SSA graph; do not use on an already-completed SSA graph. func (b *Block) AddEdgeTo(c *Block) { diff --git a/src/cmd/compile/internal/ssa/gen/rulegen.go b/src/cmd/compile/internal/ssa/gen/rulegen.go index 53c6bdbf65..759336fb2b 100644 --- a/src/cmd/compile/internal/ssa/gen/rulegen.go +++ b/src/cmd/compile/internal/ssa/gen/rulegen.go @@ -891,7 +891,7 @@ func genBlockRewrite(rule Rule, arch arch, data blockData) *RuleRewrite { } blockName, _ := getBlockInfo(outop, arch) - rr.add(stmtf("b.Reset(%s)", blockName)) + var genControls [2]string for i, control := range t[:outdata.controls] { // Select a source position for any new control values. // TODO: does it always make sense to use the source position @@ -904,9 +904,19 @@ func genBlockRewrite(rule Rule, arch arch, data blockData) *RuleRewrite { } // Generate a new control value (or copy an existing value). - v := genResult0(rr, arch, control, false, false, newpos) - rr.add(stmtf("b.AddControl(%s)", v)) + genControls[i] = genResult0(rr, arch, control, false, false, newpos) } + switch outdata.controls { + case 0: + rr.add(stmtf("b.Reset(%s)", blockName)) + case 1: + rr.add(stmtf("b.resetWithControl(%s, %s)", blockName, genControls[0])) + case 2: + rr.add(stmtf("b.resetWithControl2(%s, %s, %s)", blockName, genControls[0], genControls[1])) + default: + log.Fatalf("too many controls: %d", outdata.controls) + } + if auxint != "" { rr.add(stmtf("b.AuxInt = %s", auxint)) } diff --git a/src/cmd/compile/internal/ssa/rewrite386.go b/src/cmd/compile/internal/ssa/rewrite386.go index bff76e9029..b83c65da86 100644 --- a/src/cmd/compile/internal/ssa/rewrite386.go +++ b/src/cmd/compile/internal/ssa/rewrite386.go @@ -15967,8 +15967,7 @@ func rewriteBlock386(b *Block) bool { for b.Controls[0].Op == Op386InvertFlags { v_0 := b.Controls[0] cmp := v_0.Args[0] - b.Reset(Block386EQ) - b.AddControl(cmp) + b.resetWithControl(Block386EQ, cmp) return true } // match: (EQ (FlagEQ) yes no) @@ -16011,8 +16010,7 @@ func rewriteBlock386(b *Block) bool { for b.Controls[0].Op == Op386InvertFlags { v_0 := b.Controls[0] cmp := v_0.Args[0] - b.Reset(Block386LE) - b.AddControl(cmp) + b.resetWithControl(Block386LE, cmp) return true } // match: (GE (FlagEQ) yes no) @@ -16053,8 +16051,7 @@ func rewriteBlock386(b *Block) bool { for b.Controls[0].Op == Op386InvertFlags { v_0 := b.Controls[0] cmp := v_0.Args[0] - b.Reset(Block386LT) - b.AddControl(cmp) + b.resetWithControl(Block386LT, cmp) return true } // match: (GT (FlagEQ) yes no) @@ -16096,8 +16093,7 @@ func rewriteBlock386(b *Block) bool { for b.Controls[0].Op == Op386SETL { v_0 := b.Controls[0] cmp := v_0.Args[0] - b.Reset(Block386LT) - b.AddControl(cmp) + b.resetWithControl(Block386LT, cmp) return true } // match: (If (SETLE cmp) yes no) @@ -16105,8 +16101,7 @@ func rewriteBlock386(b *Block) bool { for b.Controls[0].Op == Op386SETLE { v_0 := b.Controls[0] cmp := v_0.Args[0] - b.Reset(Block386LE) - b.AddControl(cmp) + b.resetWithControl(Block386LE, cmp) return true } // match: (If (SETG cmp) yes no) @@ -16114,8 +16109,7 @@ func rewriteBlock386(b *Block) bool { for b.Controls[0].Op == Op386SETG { v_0 := b.Controls[0] cmp := v_0.Args[0] - b.Reset(Block386GT) - b.AddControl(cmp) + b.resetWithControl(Block386GT, cmp) return true } // match: (If (SETGE cmp) yes no) @@ -16123,8 +16117,7 @@ func rewriteBlock386(b *Block) bool { for b.Controls[0].Op == Op386SETGE { v_0 := b.Controls[0] cmp := v_0.Args[0] - b.Reset(Block386GE) - b.AddControl(cmp) + b.resetWithControl(Block386GE, cmp) return true } // match: (If (SETEQ cmp) yes no) @@ -16132,8 +16125,7 @@ func rewriteBlock386(b *Block) bool { for b.Controls[0].Op == Op386SETEQ { v_0 := b.Controls[0] cmp := v_0.Args[0] - b.Reset(Block386EQ) - b.AddControl(cmp) + b.resetWithControl(Block386EQ, cmp) return true } // match: (If (SETNE cmp) yes no) @@ -16141,8 +16133,7 @@ func rewriteBlock386(b *Block) bool { for b.Controls[0].Op == Op386SETNE { v_0 := b.Controls[0] cmp := v_0.Args[0] - b.Reset(Block386NE) - b.AddControl(cmp) + b.resetWithControl(Block386NE, cmp) return true } // match: (If (SETB cmp) yes no) @@ -16150,8 +16141,7 @@ func rewriteBlock386(b *Block) bool { for b.Controls[0].Op == Op386SETB { v_0 := b.Controls[0] cmp := v_0.Args[0] - b.Reset(Block386ULT) - b.AddControl(cmp) + b.resetWithControl(Block386ULT, cmp) return true } // match: (If (SETBE cmp) yes no) @@ -16159,8 +16149,7 @@ func rewriteBlock386(b *Block) bool { for b.Controls[0].Op == Op386SETBE { v_0 := b.Controls[0] cmp := v_0.Args[0] - b.Reset(Block386ULE) - b.AddControl(cmp) + b.resetWithControl(Block386ULE, cmp) return true } // match: (If (SETA cmp) yes no) @@ -16168,8 +16157,7 @@ func rewriteBlock386(b *Block) bool { for b.Controls[0].Op == Op386SETA { v_0 := b.Controls[0] cmp := v_0.Args[0] - b.Reset(Block386UGT) - b.AddControl(cmp) + b.resetWithControl(Block386UGT, cmp) return true } // match: (If (SETAE cmp) yes no) @@ -16177,8 +16165,7 @@ func rewriteBlock386(b *Block) bool { for b.Controls[0].Op == Op386SETAE { v_0 := b.Controls[0] cmp := v_0.Args[0] - b.Reset(Block386UGE) - b.AddControl(cmp) + b.resetWithControl(Block386UGE, cmp) return true } // match: (If (SETO cmp) yes no) @@ -16186,8 +16173,7 @@ func rewriteBlock386(b *Block) bool { for b.Controls[0].Op == Op386SETO { v_0 := b.Controls[0] cmp := v_0.Args[0] - b.Reset(Block386OS) - b.AddControl(cmp) + b.resetWithControl(Block386OS, cmp) return true } // match: (If (SETGF cmp) yes no) @@ -16195,8 +16181,7 @@ func rewriteBlock386(b *Block) bool { for b.Controls[0].Op == Op386SETGF { v_0 := b.Controls[0] cmp := v_0.Args[0] - b.Reset(Block386UGT) - b.AddControl(cmp) + b.resetWithControl(Block386UGT, cmp) return true } // match: (If (SETGEF cmp) yes no) @@ -16204,8 +16189,7 @@ func rewriteBlock386(b *Block) bool { for b.Controls[0].Op == Op386SETGEF { v_0 := b.Controls[0] cmp := v_0.Args[0] - b.Reset(Block386UGE) - b.AddControl(cmp) + b.resetWithControl(Block386UGE, cmp) return true } // match: (If (SETEQF cmp) yes no) @@ -16213,8 +16197,7 @@ func rewriteBlock386(b *Block) bool { for b.Controls[0].Op == Op386SETEQF { v_0 := b.Controls[0] cmp := v_0.Args[0] - b.Reset(Block386EQF) - b.AddControl(cmp) + b.resetWithControl(Block386EQF, cmp) return true } // match: (If (SETNEF cmp) yes no) @@ -16222,18 +16205,16 @@ func rewriteBlock386(b *Block) bool { for b.Controls[0].Op == Op386SETNEF { v_0 := b.Controls[0] cmp := v_0.Args[0] - b.Reset(Block386NEF) - b.AddControl(cmp) + b.resetWithControl(Block386NEF, cmp) return true } // match: (If cond yes no) // result: (NE (TESTB cond cond) yes no) for { cond := b.Controls[0] - b.Reset(Block386NE) v0 := b.NewValue0(cond.Pos, Op386TESTB, types.TypeFlags) v0.AddArg2(cond, cond) - b.AddControl(v0) + b.resetWithControl(Block386NE, v0) return true } case Block386LE: @@ -16242,8 +16223,7 @@ func rewriteBlock386(b *Block) bool { for b.Controls[0].Op == Op386InvertFlags { v_0 := b.Controls[0] cmp := v_0.Args[0] - b.Reset(Block386GE) - b.AddControl(cmp) + b.resetWithControl(Block386GE, cmp) return true } // match: (LE (FlagEQ) yes no) @@ -16284,8 +16264,7 @@ func rewriteBlock386(b *Block) bool { for b.Controls[0].Op == Op386InvertFlags { v_0 := b.Controls[0] cmp := v_0.Args[0] - b.Reset(Block386GT) - b.AddControl(cmp) + b.resetWithControl(Block386GT, cmp) return true } // match: (LT (FlagEQ) yes no) @@ -16336,8 +16315,7 @@ func rewriteBlock386(b *Block) bool { if v_0_1.Op != Op386SETL || cmp != v_0_1.Args[0] { break } - b.Reset(Block386LT) - b.AddControl(cmp) + b.resetWithControl(Block386LT, cmp) return true } // match: (NE (TESTB (SETLE cmp) (SETLE cmp)) yes no) @@ -16354,8 +16332,7 @@ func rewriteBlock386(b *Block) bool { if v_0_1.Op != Op386SETLE || cmp != v_0_1.Args[0] { break } - b.Reset(Block386LE) - b.AddControl(cmp) + b.resetWithControl(Block386LE, cmp) return true } // match: (NE (TESTB (SETG cmp) (SETG cmp)) yes no) @@ -16372,8 +16349,7 @@ func rewriteBlock386(b *Block) bool { if v_0_1.Op != Op386SETG || cmp != v_0_1.Args[0] { break } - b.Reset(Block386GT) - b.AddControl(cmp) + b.resetWithControl(Block386GT, cmp) return true } // match: (NE (TESTB (SETGE cmp) (SETGE cmp)) yes no) @@ -16390,8 +16366,7 @@ func rewriteBlock386(b *Block) bool { if v_0_1.Op != Op386SETGE || cmp != v_0_1.Args[0] { break } - b.Reset(Block386GE) - b.AddControl(cmp) + b.resetWithControl(Block386GE, cmp) return true } // match: (NE (TESTB (SETEQ cmp) (SETEQ cmp)) yes no) @@ -16408,8 +16383,7 @@ func rewriteBlock386(b *Block) bool { if v_0_1.Op != Op386SETEQ || cmp != v_0_1.Args[0] { break } - b.Reset(Block386EQ) - b.AddControl(cmp) + b.resetWithControl(Block386EQ, cmp) return true } // match: (NE (TESTB (SETNE cmp) (SETNE cmp)) yes no) @@ -16426,8 +16400,7 @@ func rewriteBlock386(b *Block) bool { if v_0_1.Op != Op386SETNE || cmp != v_0_1.Args[0] { break } - b.Reset(Block386NE) - b.AddControl(cmp) + b.resetWithControl(Block386NE, cmp) return true } // match: (NE (TESTB (SETB cmp) (SETB cmp)) yes no) @@ -16444,8 +16417,7 @@ func rewriteBlock386(b *Block) bool { if v_0_1.Op != Op386SETB || cmp != v_0_1.Args[0] { break } - b.Reset(Block386ULT) - b.AddControl(cmp) + b.resetWithControl(Block386ULT, cmp) return true } // match: (NE (TESTB (SETBE cmp) (SETBE cmp)) yes no) @@ -16462,8 +16434,7 @@ func rewriteBlock386(b *Block) bool { if v_0_1.Op != Op386SETBE || cmp != v_0_1.Args[0] { break } - b.Reset(Block386ULE) - b.AddControl(cmp) + b.resetWithControl(Block386ULE, cmp) return true } // match: (NE (TESTB (SETA cmp) (SETA cmp)) yes no) @@ -16480,8 +16451,7 @@ func rewriteBlock386(b *Block) bool { if v_0_1.Op != Op386SETA || cmp != v_0_1.Args[0] { break } - b.Reset(Block386UGT) - b.AddControl(cmp) + b.resetWithControl(Block386UGT, cmp) return true } // match: (NE (TESTB (SETAE cmp) (SETAE cmp)) yes no) @@ -16498,8 +16468,7 @@ func rewriteBlock386(b *Block) bool { if v_0_1.Op != Op386SETAE || cmp != v_0_1.Args[0] { break } - b.Reset(Block386UGE) - b.AddControl(cmp) + b.resetWithControl(Block386UGE, cmp) return true } // match: (NE (TESTB (SETO cmp) (SETO cmp)) yes no) @@ -16516,8 +16485,7 @@ func rewriteBlock386(b *Block) bool { if v_0_1.Op != Op386SETO || cmp != v_0_1.Args[0] { break } - b.Reset(Block386OS) - b.AddControl(cmp) + b.resetWithControl(Block386OS, cmp) return true } // match: (NE (TESTB (SETGF cmp) (SETGF cmp)) yes no) @@ -16534,8 +16502,7 @@ func rewriteBlock386(b *Block) bool { if v_0_1.Op != Op386SETGF || cmp != v_0_1.Args[0] { break } - b.Reset(Block386UGT) - b.AddControl(cmp) + b.resetWithControl(Block386UGT, cmp) return true } // match: (NE (TESTB (SETGEF cmp) (SETGEF cmp)) yes no) @@ -16552,8 +16519,7 @@ func rewriteBlock386(b *Block) bool { if v_0_1.Op != Op386SETGEF || cmp != v_0_1.Args[0] { break } - b.Reset(Block386UGE) - b.AddControl(cmp) + b.resetWithControl(Block386UGE, cmp) return true } // match: (NE (TESTB (SETEQF cmp) (SETEQF cmp)) yes no) @@ -16570,8 +16536,7 @@ func rewriteBlock386(b *Block) bool { if v_0_1.Op != Op386SETEQF || cmp != v_0_1.Args[0] { break } - b.Reset(Block386EQF) - b.AddControl(cmp) + b.resetWithControl(Block386EQF, cmp) return true } // match: (NE (TESTB (SETNEF cmp) (SETNEF cmp)) yes no) @@ -16588,8 +16553,7 @@ func rewriteBlock386(b *Block) bool { if v_0_1.Op != Op386SETNEF || cmp != v_0_1.Args[0] { break } - b.Reset(Block386NEF) - b.AddControl(cmp) + b.resetWithControl(Block386NEF, cmp) return true } // match: (NE (InvertFlags cmp) yes no) @@ -16597,8 +16561,7 @@ func rewriteBlock386(b *Block) bool { for b.Controls[0].Op == Op386InvertFlags { v_0 := b.Controls[0] cmp := v_0.Args[0] - b.Reset(Block386NE) - b.AddControl(cmp) + b.resetWithControl(Block386NE, cmp) return true } // match: (NE (FlagEQ) yes no) @@ -16638,8 +16601,7 @@ func rewriteBlock386(b *Block) bool { for b.Controls[0].Op == Op386InvertFlags { v_0 := b.Controls[0] cmp := v_0.Args[0] - b.Reset(Block386ULE) - b.AddControl(cmp) + b.resetWithControl(Block386ULE, cmp) return true } // match: (UGE (FlagEQ) yes no) @@ -16680,8 +16642,7 @@ func rewriteBlock386(b *Block) bool { for b.Controls[0].Op == Op386InvertFlags { v_0 := b.Controls[0] cmp := v_0.Args[0] - b.Reset(Block386ULT) - b.AddControl(cmp) + b.resetWithControl(Block386ULT, cmp) return true } // match: (UGT (FlagEQ) yes no) @@ -16723,8 +16684,7 @@ func rewriteBlock386(b *Block) bool { for b.Controls[0].Op == Op386InvertFlags { v_0 := b.Controls[0] cmp := v_0.Args[0] - b.Reset(Block386UGE) - b.AddControl(cmp) + b.resetWithControl(Block386UGE, cmp) return true } // match: (ULE (FlagEQ) yes no) @@ -16765,8 +16725,7 @@ func rewriteBlock386(b *Block) bool { for b.Controls[0].Op == Op386InvertFlags { v_0 := b.Controls[0] cmp := v_0.Args[0] - b.Reset(Block386UGT) - b.AddControl(cmp) + b.resetWithControl(Block386UGT, cmp) return true } // match: (ULT (FlagEQ) yes no) diff --git a/src/cmd/compile/internal/ssa/rewriteAMD64.go b/src/cmd/compile/internal/ssa/rewriteAMD64.go index a30c609a68..a0d422b372 100644 --- a/src/cmd/compile/internal/ssa/rewriteAMD64.go +++ b/src/cmd/compile/internal/ssa/rewriteAMD64.go @@ -38434,10 +38434,9 @@ func rewriteBlockAMD64(b *Block) bool { continue } y := v_0_1 - b.Reset(BlockAMD64UGE) v0 := b.NewValue0(v_0.Pos, OpAMD64BTL, types.TypeFlags) v0.AddArg2(x, y) - b.AddControl(v0) + b.resetWithControl(BlockAMD64UGE, v0) return true } break @@ -38459,10 +38458,9 @@ func rewriteBlockAMD64(b *Block) bool { continue } y := v_0_1 - b.Reset(BlockAMD64UGE) v0 := b.NewValue0(v_0.Pos, OpAMD64BTQ, types.TypeFlags) v0.AddArg2(x, y) - b.AddControl(v0) + b.resetWithControl(BlockAMD64UGE, v0) return true } break @@ -38477,11 +38475,10 @@ func rewriteBlockAMD64(b *Block) bool { if !(isUint32PowerOfTwo(c)) { break } - b.Reset(BlockAMD64UGE) v0 := b.NewValue0(v_0.Pos, OpAMD64BTLconst, types.TypeFlags) v0.AuxInt = log2uint32(c) v0.AddArg(x) - b.AddControl(v0) + b.resetWithControl(BlockAMD64UGE, v0) return true } // match: (EQ (TESTQconst [c] x)) @@ -38494,11 +38491,10 @@ func rewriteBlockAMD64(b *Block) bool { if !(isUint64PowerOfTwo(c)) { break } - b.Reset(BlockAMD64UGE) v0 := b.NewValue0(v_0.Pos, OpAMD64BTQconst, types.TypeFlags) v0.AuxInt = log2(c) v0.AddArg(x) - b.AddControl(v0) + b.resetWithControl(BlockAMD64UGE, v0) return true } // match: (EQ (TESTQ (MOVQconst [c]) x)) @@ -38518,11 +38514,10 @@ func rewriteBlockAMD64(b *Block) bool { if !(isUint64PowerOfTwo(c)) { continue } - b.Reset(BlockAMD64UGE) v0 := b.NewValue0(v_0.Pos, OpAMD64BTQconst, types.TypeFlags) v0.AuxInt = log2(c) v0.AddArg(x) - b.AddControl(v0) + b.resetWithControl(BlockAMD64UGE, v0) return true } break @@ -38549,11 +38544,10 @@ func rewriteBlockAMD64(b *Block) bool { if !(z1 == z2) { continue } - b.Reset(BlockAMD64UGE) v0 := b.NewValue0(v_0.Pos, OpAMD64BTQconst, types.TypeFlags) v0.AuxInt = 63 v0.AddArg(x) - b.AddControl(v0) + b.resetWithControl(BlockAMD64UGE, v0) return true } break @@ -38580,11 +38574,10 @@ func rewriteBlockAMD64(b *Block) bool { if !(z1 == z2) { continue } - b.Reset(BlockAMD64UGE) v0 := b.NewValue0(v_0.Pos, OpAMD64BTQconst, types.TypeFlags) v0.AuxInt = 31 v0.AddArg(x) - b.AddControl(v0) + b.resetWithControl(BlockAMD64UGE, v0) return true } break @@ -38611,11 +38604,10 @@ func rewriteBlockAMD64(b *Block) bool { if !(z1 == z2) { continue } - b.Reset(BlockAMD64UGE) v0 := b.NewValue0(v_0.Pos, OpAMD64BTQconst, types.TypeFlags) v0.AuxInt = 0 v0.AddArg(x) - b.AddControl(v0) + b.resetWithControl(BlockAMD64UGE, v0) return true } break @@ -38642,11 +38634,10 @@ func rewriteBlockAMD64(b *Block) bool { if !(z1 == z2) { continue } - b.Reset(BlockAMD64UGE) v0 := b.NewValue0(v_0.Pos, OpAMD64BTLconst, types.TypeFlags) v0.AuxInt = 0 v0.AddArg(x) - b.AddControl(v0) + b.resetWithControl(BlockAMD64UGE, v0) return true } break @@ -38669,11 +38660,10 @@ func rewriteBlockAMD64(b *Block) bool { if !(z1 == z2) { continue } - b.Reset(BlockAMD64UGE) v0 := b.NewValue0(v_0.Pos, OpAMD64BTQconst, types.TypeFlags) v0.AuxInt = 63 v0.AddArg(x) - b.AddControl(v0) + b.resetWithControl(BlockAMD64UGE, v0) return true } break @@ -38696,11 +38686,10 @@ func rewriteBlockAMD64(b *Block) bool { if !(z1 == z2) { continue } - b.Reset(BlockAMD64UGE) v0 := b.NewValue0(v_0.Pos, OpAMD64BTLconst, types.TypeFlags) v0.AuxInt = 31 v0.AddArg(x) - b.AddControl(v0) + b.resetWithControl(BlockAMD64UGE, v0) return true } break @@ -38710,8 +38699,7 @@ func rewriteBlockAMD64(b *Block) bool { for b.Controls[0].Op == OpAMD64InvertFlags { v_0 := b.Controls[0] cmp := v_0.Args[0] - b.Reset(BlockAMD64EQ) - b.AddControl(cmp) + b.resetWithControl(BlockAMD64EQ, cmp) return true } // match: (EQ (FlagEQ) yes no) @@ -38754,8 +38742,7 @@ func rewriteBlockAMD64(b *Block) bool { for b.Controls[0].Op == OpAMD64InvertFlags { v_0 := b.Controls[0] cmp := v_0.Args[0] - b.Reset(BlockAMD64LE) - b.AddControl(cmp) + b.resetWithControl(BlockAMD64LE, cmp) return true } // match: (GE (FlagEQ) yes no) @@ -38796,8 +38783,7 @@ func rewriteBlockAMD64(b *Block) bool { for b.Controls[0].Op == OpAMD64InvertFlags { v_0 := b.Controls[0] cmp := v_0.Args[0] - b.Reset(BlockAMD64LT) - b.AddControl(cmp) + b.resetWithControl(BlockAMD64LT, cmp) return true } // match: (GT (FlagEQ) yes no) @@ -38839,8 +38825,7 @@ func rewriteBlockAMD64(b *Block) bool { for b.Controls[0].Op == OpAMD64SETL { v_0 := b.Controls[0] cmp := v_0.Args[0] - b.Reset(BlockAMD64LT) - b.AddControl(cmp) + b.resetWithControl(BlockAMD64LT, cmp) return true } // match: (If (SETLE cmp) yes no) @@ -38848,8 +38833,7 @@ func rewriteBlockAMD64(b *Block) bool { for b.Controls[0].Op == OpAMD64SETLE { v_0 := b.Controls[0] cmp := v_0.Args[0] - b.Reset(BlockAMD64LE) - b.AddControl(cmp) + b.resetWithControl(BlockAMD64LE, cmp) return true } // match: (If (SETG cmp) yes no) @@ -38857,8 +38841,7 @@ func rewriteBlockAMD64(b *Block) bool { for b.Controls[0].Op == OpAMD64SETG { v_0 := b.Controls[0] cmp := v_0.Args[0] - b.Reset(BlockAMD64GT) - b.AddControl(cmp) + b.resetWithControl(BlockAMD64GT, cmp) return true } // match: (If (SETGE cmp) yes no) @@ -38866,8 +38849,7 @@ func rewriteBlockAMD64(b *Block) bool { for b.Controls[0].Op == OpAMD64SETGE { v_0 := b.Controls[0] cmp := v_0.Args[0] - b.Reset(BlockAMD64GE) - b.AddControl(cmp) + b.resetWithControl(BlockAMD64GE, cmp) return true } // match: (If (SETEQ cmp) yes no) @@ -38875,8 +38857,7 @@ func rewriteBlockAMD64(b *Block) bool { for b.Controls[0].Op == OpAMD64SETEQ { v_0 := b.Controls[0] cmp := v_0.Args[0] - b.Reset(BlockAMD64EQ) - b.AddControl(cmp) + b.resetWithControl(BlockAMD64EQ, cmp) return true } // match: (If (SETNE cmp) yes no) @@ -38884,8 +38865,7 @@ func rewriteBlockAMD64(b *Block) bool { for b.Controls[0].Op == OpAMD64SETNE { v_0 := b.Controls[0] cmp := v_0.Args[0] - b.Reset(BlockAMD64NE) - b.AddControl(cmp) + b.resetWithControl(BlockAMD64NE, cmp) return true } // match: (If (SETB cmp) yes no) @@ -38893,8 +38873,7 @@ func rewriteBlockAMD64(b *Block) bool { for b.Controls[0].Op == OpAMD64SETB { v_0 := b.Controls[0] cmp := v_0.Args[0] - b.Reset(BlockAMD64ULT) - b.AddControl(cmp) + b.resetWithControl(BlockAMD64ULT, cmp) return true } // match: (If (SETBE cmp) yes no) @@ -38902,8 +38881,7 @@ func rewriteBlockAMD64(b *Block) bool { for b.Controls[0].Op == OpAMD64SETBE { v_0 := b.Controls[0] cmp := v_0.Args[0] - b.Reset(BlockAMD64ULE) - b.AddControl(cmp) + b.resetWithControl(BlockAMD64ULE, cmp) return true } // match: (If (SETA cmp) yes no) @@ -38911,8 +38889,7 @@ func rewriteBlockAMD64(b *Block) bool { for b.Controls[0].Op == OpAMD64SETA { v_0 := b.Controls[0] cmp := v_0.Args[0] - b.Reset(BlockAMD64UGT) - b.AddControl(cmp) + b.resetWithControl(BlockAMD64UGT, cmp) return true } // match: (If (SETAE cmp) yes no) @@ -38920,8 +38897,7 @@ func rewriteBlockAMD64(b *Block) bool { for b.Controls[0].Op == OpAMD64SETAE { v_0 := b.Controls[0] cmp := v_0.Args[0] - b.Reset(BlockAMD64UGE) - b.AddControl(cmp) + b.resetWithControl(BlockAMD64UGE, cmp) return true } // match: (If (SETO cmp) yes no) @@ -38929,8 +38905,7 @@ func rewriteBlockAMD64(b *Block) bool { for b.Controls[0].Op == OpAMD64SETO { v_0 := b.Controls[0] cmp := v_0.Args[0] - b.Reset(BlockAMD64OS) - b.AddControl(cmp) + b.resetWithControl(BlockAMD64OS, cmp) return true } // match: (If (SETGF cmp) yes no) @@ -38938,8 +38913,7 @@ func rewriteBlockAMD64(b *Block) bool { for b.Controls[0].Op == OpAMD64SETGF { v_0 := b.Controls[0] cmp := v_0.Args[0] - b.Reset(BlockAMD64UGT) - b.AddControl(cmp) + b.resetWithControl(BlockAMD64UGT, cmp) return true } // match: (If (SETGEF cmp) yes no) @@ -38947,8 +38921,7 @@ func rewriteBlockAMD64(b *Block) bool { for b.Controls[0].Op == OpAMD64SETGEF { v_0 := b.Controls[0] cmp := v_0.Args[0] - b.Reset(BlockAMD64UGE) - b.AddControl(cmp) + b.resetWithControl(BlockAMD64UGE, cmp) return true } // match: (If (SETEQF cmp) yes no) @@ -38956,8 +38929,7 @@ func rewriteBlockAMD64(b *Block) bool { for b.Controls[0].Op == OpAMD64SETEQF { v_0 := b.Controls[0] cmp := v_0.Args[0] - b.Reset(BlockAMD64EQF) - b.AddControl(cmp) + b.resetWithControl(BlockAMD64EQF, cmp) return true } // match: (If (SETNEF cmp) yes no) @@ -38965,18 +38937,16 @@ func rewriteBlockAMD64(b *Block) bool { for b.Controls[0].Op == OpAMD64SETNEF { v_0 := b.Controls[0] cmp := v_0.Args[0] - b.Reset(BlockAMD64NEF) - b.AddControl(cmp) + b.resetWithControl(BlockAMD64NEF, cmp) return true } // match: (If cond yes no) // result: (NE (TESTB cond cond) yes no) for { cond := b.Controls[0] - b.Reset(BlockAMD64NE) v0 := b.NewValue0(cond.Pos, OpAMD64TESTB, types.TypeFlags) v0.AddArg2(cond, cond) - b.AddControl(v0) + b.resetWithControl(BlockAMD64NE, v0) return true } case BlockAMD64LE: @@ -38985,8 +38955,7 @@ func rewriteBlockAMD64(b *Block) bool { for b.Controls[0].Op == OpAMD64InvertFlags { v_0 := b.Controls[0] cmp := v_0.Args[0] - b.Reset(BlockAMD64GE) - b.AddControl(cmp) + b.resetWithControl(BlockAMD64GE, cmp) return true } // match: (LE (FlagEQ) yes no) @@ -39027,8 +38996,7 @@ func rewriteBlockAMD64(b *Block) bool { for b.Controls[0].Op == OpAMD64InvertFlags { v_0 := b.Controls[0] cmp := v_0.Args[0] - b.Reset(BlockAMD64GT) - b.AddControl(cmp) + b.resetWithControl(BlockAMD64GT, cmp) return true } // match: (LT (FlagEQ) yes no) @@ -39079,8 +39047,7 @@ func rewriteBlockAMD64(b *Block) bool { if v_0_1.Op != OpAMD64SETL || cmp != v_0_1.Args[0] { break } - b.Reset(BlockAMD64LT) - b.AddControl(cmp) + b.resetWithControl(BlockAMD64LT, cmp) return true } // match: (NE (TESTB (SETLE cmp) (SETLE cmp)) yes no) @@ -39097,8 +39064,7 @@ func rewriteBlockAMD64(b *Block) bool { if v_0_1.Op != OpAMD64SETLE || cmp != v_0_1.Args[0] { break } - b.Reset(BlockAMD64LE) - b.AddControl(cmp) + b.resetWithControl(BlockAMD64LE, cmp) return true } // match: (NE (TESTB (SETG cmp) (SETG cmp)) yes no) @@ -39115,8 +39081,7 @@ func rewriteBlockAMD64(b *Block) bool { if v_0_1.Op != OpAMD64SETG || cmp != v_0_1.Args[0] { break } - b.Reset(BlockAMD64GT) - b.AddControl(cmp) + b.resetWithControl(BlockAMD64GT, cmp) return true } // match: (NE (TESTB (SETGE cmp) (SETGE cmp)) yes no) @@ -39133,8 +39098,7 @@ func rewriteBlockAMD64(b *Block) bool { if v_0_1.Op != OpAMD64SETGE || cmp != v_0_1.Args[0] { break } - b.Reset(BlockAMD64GE) - b.AddControl(cmp) + b.resetWithControl(BlockAMD64GE, cmp) return true } // match: (NE (TESTB (SETEQ cmp) (SETEQ cmp)) yes no) @@ -39151,8 +39115,7 @@ func rewriteBlockAMD64(b *Block) bool { if v_0_1.Op != OpAMD64SETEQ || cmp != v_0_1.Args[0] { break } - b.Reset(BlockAMD64EQ) - b.AddControl(cmp) + b.resetWithControl(BlockAMD64EQ, cmp) return true } // match: (NE (TESTB (SETNE cmp) (SETNE cmp)) yes no) @@ -39169,8 +39132,7 @@ func rewriteBlockAMD64(b *Block) bool { if v_0_1.Op != OpAMD64SETNE || cmp != v_0_1.Args[0] { break } - b.Reset(BlockAMD64NE) - b.AddControl(cmp) + b.resetWithControl(BlockAMD64NE, cmp) return true } // match: (NE (TESTB (SETB cmp) (SETB cmp)) yes no) @@ -39187,8 +39149,7 @@ func rewriteBlockAMD64(b *Block) bool { if v_0_1.Op != OpAMD64SETB || cmp != v_0_1.Args[0] { break } - b.Reset(BlockAMD64ULT) - b.AddControl(cmp) + b.resetWithControl(BlockAMD64ULT, cmp) return true } // match: (NE (TESTB (SETBE cmp) (SETBE cmp)) yes no) @@ -39205,8 +39166,7 @@ func rewriteBlockAMD64(b *Block) bool { if v_0_1.Op != OpAMD64SETBE || cmp != v_0_1.Args[0] { break } - b.Reset(BlockAMD64ULE) - b.AddControl(cmp) + b.resetWithControl(BlockAMD64ULE, cmp) return true } // match: (NE (TESTB (SETA cmp) (SETA cmp)) yes no) @@ -39223,8 +39183,7 @@ func rewriteBlockAMD64(b *Block) bool { if v_0_1.Op != OpAMD64SETA || cmp != v_0_1.Args[0] { break } - b.Reset(BlockAMD64UGT) - b.AddControl(cmp) + b.resetWithControl(BlockAMD64UGT, cmp) return true } // match: (NE (TESTB (SETAE cmp) (SETAE cmp)) yes no) @@ -39241,8 +39200,7 @@ func rewriteBlockAMD64(b *Block) bool { if v_0_1.Op != OpAMD64SETAE || cmp != v_0_1.Args[0] { break } - b.Reset(BlockAMD64UGE) - b.AddControl(cmp) + b.resetWithControl(BlockAMD64UGE, cmp) return true } // match: (NE (TESTB (SETO cmp) (SETO cmp)) yes no) @@ -39259,8 +39217,7 @@ func rewriteBlockAMD64(b *Block) bool { if v_0_1.Op != OpAMD64SETO || cmp != v_0_1.Args[0] { break } - b.Reset(BlockAMD64OS) - b.AddControl(cmp) + b.resetWithControl(BlockAMD64OS, cmp) return true } // match: (NE (TESTL (SHLL (MOVLconst [1]) x) y)) @@ -39280,10 +39237,9 @@ func rewriteBlockAMD64(b *Block) bool { continue } y := v_0_1 - b.Reset(BlockAMD64ULT) v0 := b.NewValue0(v_0.Pos, OpAMD64BTL, types.TypeFlags) v0.AddArg2(x, y) - b.AddControl(v0) + b.resetWithControl(BlockAMD64ULT, v0) return true } break @@ -39305,10 +39261,9 @@ func rewriteBlockAMD64(b *Block) bool { continue } y := v_0_1 - b.Reset(BlockAMD64ULT) v0 := b.NewValue0(v_0.Pos, OpAMD64BTQ, types.TypeFlags) v0.AddArg2(x, y) - b.AddControl(v0) + b.resetWithControl(BlockAMD64ULT, v0) return true } break @@ -39323,11 +39278,10 @@ func rewriteBlockAMD64(b *Block) bool { if !(isUint32PowerOfTwo(c)) { break } - b.Reset(BlockAMD64ULT) v0 := b.NewValue0(v_0.Pos, OpAMD64BTLconst, types.TypeFlags) v0.AuxInt = log2uint32(c) v0.AddArg(x) - b.AddControl(v0) + b.resetWithControl(BlockAMD64ULT, v0) return true } // match: (NE (TESTQconst [c] x)) @@ -39340,11 +39294,10 @@ func rewriteBlockAMD64(b *Block) bool { if !(isUint64PowerOfTwo(c)) { break } - b.Reset(BlockAMD64ULT) v0 := b.NewValue0(v_0.Pos, OpAMD64BTQconst, types.TypeFlags) v0.AuxInt = log2(c) v0.AddArg(x) - b.AddControl(v0) + b.resetWithControl(BlockAMD64ULT, v0) return true } // match: (NE (TESTQ (MOVQconst [c]) x)) @@ -39364,11 +39317,10 @@ func rewriteBlockAMD64(b *Block) bool { if !(isUint64PowerOfTwo(c)) { continue } - b.Reset(BlockAMD64ULT) v0 := b.NewValue0(v_0.Pos, OpAMD64BTQconst, types.TypeFlags) v0.AuxInt = log2(c) v0.AddArg(x) - b.AddControl(v0) + b.resetWithControl(BlockAMD64ULT, v0) return true } break @@ -39395,11 +39347,10 @@ func rewriteBlockAMD64(b *Block) bool { if !(z1 == z2) { continue } - b.Reset(BlockAMD64ULT) v0 := b.NewValue0(v_0.Pos, OpAMD64BTQconst, types.TypeFlags) v0.AuxInt = 63 v0.AddArg(x) - b.AddControl(v0) + b.resetWithControl(BlockAMD64ULT, v0) return true } break @@ -39426,11 +39377,10 @@ func rewriteBlockAMD64(b *Block) bool { if !(z1 == z2) { continue } - b.Reset(BlockAMD64ULT) v0 := b.NewValue0(v_0.Pos, OpAMD64BTQconst, types.TypeFlags) v0.AuxInt = 31 v0.AddArg(x) - b.AddControl(v0) + b.resetWithControl(BlockAMD64ULT, v0) return true } break @@ -39457,11 +39407,10 @@ func rewriteBlockAMD64(b *Block) bool { if !(z1 == z2) { continue } - b.Reset(BlockAMD64ULT) v0 := b.NewValue0(v_0.Pos, OpAMD64BTQconst, types.TypeFlags) v0.AuxInt = 0 v0.AddArg(x) - b.AddControl(v0) + b.resetWithControl(BlockAMD64ULT, v0) return true } break @@ -39488,11 +39437,10 @@ func rewriteBlockAMD64(b *Block) bool { if !(z1 == z2) { continue } - b.Reset(BlockAMD64ULT) v0 := b.NewValue0(v_0.Pos, OpAMD64BTLconst, types.TypeFlags) v0.AuxInt = 0 v0.AddArg(x) - b.AddControl(v0) + b.resetWithControl(BlockAMD64ULT, v0) return true } break @@ -39515,11 +39463,10 @@ func rewriteBlockAMD64(b *Block) bool { if !(z1 == z2) { continue } - b.Reset(BlockAMD64ULT) v0 := b.NewValue0(v_0.Pos, OpAMD64BTQconst, types.TypeFlags) v0.AuxInt = 63 v0.AddArg(x) - b.AddControl(v0) + b.resetWithControl(BlockAMD64ULT, v0) return true } break @@ -39542,11 +39489,10 @@ func rewriteBlockAMD64(b *Block) bool { if !(z1 == z2) { continue } - b.Reset(BlockAMD64ULT) v0 := b.NewValue0(v_0.Pos, OpAMD64BTLconst, types.TypeFlags) v0.AuxInt = 31 v0.AddArg(x) - b.AddControl(v0) + b.resetWithControl(BlockAMD64ULT, v0) return true } break @@ -39565,8 +39511,7 @@ func rewriteBlockAMD64(b *Block) bool { if v_0_1.Op != OpAMD64SETGF || cmp != v_0_1.Args[0] { break } - b.Reset(BlockAMD64UGT) - b.AddControl(cmp) + b.resetWithControl(BlockAMD64UGT, cmp) return true } // match: (NE (TESTB (SETGEF cmp) (SETGEF cmp)) yes no) @@ -39583,8 +39528,7 @@ func rewriteBlockAMD64(b *Block) bool { if v_0_1.Op != OpAMD64SETGEF || cmp != v_0_1.Args[0] { break } - b.Reset(BlockAMD64UGE) - b.AddControl(cmp) + b.resetWithControl(BlockAMD64UGE, cmp) return true } // match: (NE (TESTB (SETEQF cmp) (SETEQF cmp)) yes no) @@ -39601,8 +39545,7 @@ func rewriteBlockAMD64(b *Block) bool { if v_0_1.Op != OpAMD64SETEQF || cmp != v_0_1.Args[0] { break } - b.Reset(BlockAMD64EQF) - b.AddControl(cmp) + b.resetWithControl(BlockAMD64EQF, cmp) return true } // match: (NE (TESTB (SETNEF cmp) (SETNEF cmp)) yes no) @@ -39619,8 +39562,7 @@ func rewriteBlockAMD64(b *Block) bool { if v_0_1.Op != OpAMD64SETNEF || cmp != v_0_1.Args[0] { break } - b.Reset(BlockAMD64NEF) - b.AddControl(cmp) + b.resetWithControl(BlockAMD64NEF, cmp) return true } // match: (NE (InvertFlags cmp) yes no) @@ -39628,8 +39570,7 @@ func rewriteBlockAMD64(b *Block) bool { for b.Controls[0].Op == OpAMD64InvertFlags { v_0 := b.Controls[0] cmp := v_0.Args[0] - b.Reset(BlockAMD64NE) - b.AddControl(cmp) + b.resetWithControl(BlockAMD64NE, cmp) return true } // match: (NE (FlagEQ) yes no) @@ -39713,8 +39654,7 @@ func rewriteBlockAMD64(b *Block) bool { for b.Controls[0].Op == OpAMD64InvertFlags { v_0 := b.Controls[0] cmp := v_0.Args[0] - b.Reset(BlockAMD64ULE) - b.AddControl(cmp) + b.resetWithControl(BlockAMD64ULE, cmp) return true } // match: (UGE (FlagEQ) yes no) @@ -39755,8 +39695,7 @@ func rewriteBlockAMD64(b *Block) bool { for b.Controls[0].Op == OpAMD64InvertFlags { v_0 := b.Controls[0] cmp := v_0.Args[0] - b.Reset(BlockAMD64ULT) - b.AddControl(cmp) + b.resetWithControl(BlockAMD64ULT, cmp) return true } // match: (UGT (FlagEQ) yes no) @@ -39798,8 +39737,7 @@ func rewriteBlockAMD64(b *Block) bool { for b.Controls[0].Op == OpAMD64InvertFlags { v_0 := b.Controls[0] cmp := v_0.Args[0] - b.Reset(BlockAMD64UGE) - b.AddControl(cmp) + b.resetWithControl(BlockAMD64UGE, cmp) return true } // match: (ULE (FlagEQ) yes no) @@ -39888,8 +39826,7 @@ func rewriteBlockAMD64(b *Block) bool { for b.Controls[0].Op == OpAMD64InvertFlags { v_0 := b.Controls[0] cmp := v_0.Args[0] - b.Reset(BlockAMD64UGT) - b.AddControl(cmp) + b.resetWithControl(BlockAMD64UGT, cmp) return true } // match: (ULT (FlagEQ) yes no) diff --git a/src/cmd/compile/internal/ssa/rewriteARM.go b/src/cmd/compile/internal/ssa/rewriteARM.go index bf1cf2d183..52ab522434 100644 --- a/src/cmd/compile/internal/ssa/rewriteARM.go +++ b/src/cmd/compile/internal/ssa/rewriteARM.go @@ -16828,8 +16828,7 @@ func rewriteBlockARM(b *Block) bool { for b.Controls[0].Op == OpARMInvertFlags { v_0 := b.Controls[0] cmp := v_0.Args[0] - b.Reset(BlockARMEQ) - b.AddControl(cmp) + b.resetWithControl(BlockARMEQ, cmp) return true } // match: (EQ (CMPconst [0] l:(SUB x y)) yes no) @@ -16849,10 +16848,9 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMEQ) v0 := b.NewValue0(v_0.Pos, OpARMCMP, types.TypeFlags) v0.AddArg2(x, y) - b.AddControl(v0) + b.resetWithControl(BlockARMEQ, v0) return true } // match: (EQ (CMPconst [0] l:(MULS x y a)) yes no) @@ -16873,12 +16871,11 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMEQ) v0 := b.NewValue0(v_0.Pos, OpARMCMP, types.TypeFlags) v1 := b.NewValue0(v_0.Pos, OpARMMUL, x.Type) v1.AddArg2(x, y) v0.AddArg2(a, v1) - b.AddControl(v0) + b.resetWithControl(BlockARMEQ, v0) return true } // match: (EQ (CMPconst [0] l:(SUBconst [c] x)) yes no) @@ -16898,11 +16895,10 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMEQ) v0 := b.NewValue0(v_0.Pos, OpARMCMPconst, types.TypeFlags) v0.AuxInt = c v0.AddArg(x) - b.AddControl(v0) + b.resetWithControl(BlockARMEQ, v0) return true } // match: (EQ (CMPconst [0] l:(SUBshiftLL x y [c])) yes no) @@ -16923,11 +16919,10 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMEQ) v0 := b.NewValue0(v_0.Pos, OpARMCMPshiftLL, types.TypeFlags) v0.AuxInt = c v0.AddArg2(x, y) - b.AddControl(v0) + b.resetWithControl(BlockARMEQ, v0) return true } // match: (EQ (CMPconst [0] l:(SUBshiftRL x y [c])) yes no) @@ -16948,11 +16943,10 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMEQ) v0 := b.NewValue0(v_0.Pos, OpARMCMPshiftRL, types.TypeFlags) v0.AuxInt = c v0.AddArg2(x, y) - b.AddControl(v0) + b.resetWithControl(BlockARMEQ, v0) return true } // match: (EQ (CMPconst [0] l:(SUBshiftRA x y [c])) yes no) @@ -16973,11 +16967,10 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMEQ) v0 := b.NewValue0(v_0.Pos, OpARMCMPshiftRA, types.TypeFlags) v0.AuxInt = c v0.AddArg2(x, y) - b.AddControl(v0) + b.resetWithControl(BlockARMEQ, v0) return true } // match: (EQ (CMPconst [0] l:(SUBshiftLLreg x y z)) yes no) @@ -16998,10 +16991,9 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMEQ) v0 := b.NewValue0(v_0.Pos, OpARMCMPshiftLLreg, types.TypeFlags) v0.AddArg3(x, y, z) - b.AddControl(v0) + b.resetWithControl(BlockARMEQ, v0) return true } // match: (EQ (CMPconst [0] l:(SUBshiftRLreg x y z)) yes no) @@ -17022,10 +17014,9 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMEQ) v0 := b.NewValue0(v_0.Pos, OpARMCMPshiftRLreg, types.TypeFlags) v0.AddArg3(x, y, z) - b.AddControl(v0) + b.resetWithControl(BlockARMEQ, v0) return true } // match: (EQ (CMPconst [0] l:(SUBshiftRAreg x y z)) yes no) @@ -17046,10 +17037,9 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMEQ) v0 := b.NewValue0(v_0.Pos, OpARMCMPshiftRAreg, types.TypeFlags) v0.AddArg3(x, y, z) - b.AddControl(v0) + b.resetWithControl(BlockARMEQ, v0) return true } // match: (EQ (CMPconst [0] l:(ADD x y)) yes no) @@ -17073,10 +17063,9 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { continue } - b.Reset(BlockARMEQ) v0 := b.NewValue0(v_0.Pos, OpARMCMN, types.TypeFlags) v0.AddArg2(x, y) - b.AddControl(v0) + b.resetWithControl(BlockARMEQ, v0) return true } break @@ -17099,12 +17088,11 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMEQ) v0 := b.NewValue0(v_0.Pos, OpARMCMN, types.TypeFlags) v1 := b.NewValue0(v_0.Pos, OpARMMUL, x.Type) v1.AddArg2(x, y) v0.AddArg2(a, v1) - b.AddControl(v0) + b.resetWithControl(BlockARMEQ, v0) return true } // match: (EQ (CMPconst [0] l:(ADDconst [c] x)) yes no) @@ -17124,11 +17112,10 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMEQ) v0 := b.NewValue0(v_0.Pos, OpARMCMNconst, types.TypeFlags) v0.AuxInt = c v0.AddArg(x) - b.AddControl(v0) + b.resetWithControl(BlockARMEQ, v0) return true } // match: (EQ (CMPconst [0] l:(ADDshiftLL x y [c])) yes no) @@ -17149,11 +17136,10 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMEQ) v0 := b.NewValue0(v_0.Pos, OpARMCMNshiftLL, types.TypeFlags) v0.AuxInt = c v0.AddArg2(x, y) - b.AddControl(v0) + b.resetWithControl(BlockARMEQ, v0) return true } // match: (EQ (CMPconst [0] l:(ADDshiftRL x y [c])) yes no) @@ -17174,11 +17160,10 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMEQ) v0 := b.NewValue0(v_0.Pos, OpARMCMNshiftRL, types.TypeFlags) v0.AuxInt = c v0.AddArg2(x, y) - b.AddControl(v0) + b.resetWithControl(BlockARMEQ, v0) return true } // match: (EQ (CMPconst [0] l:(ADDshiftRA x y [c])) yes no) @@ -17199,11 +17184,10 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMEQ) v0 := b.NewValue0(v_0.Pos, OpARMCMNshiftRA, types.TypeFlags) v0.AuxInt = c v0.AddArg2(x, y) - b.AddControl(v0) + b.resetWithControl(BlockARMEQ, v0) return true } // match: (EQ (CMPconst [0] l:(ADDshiftLLreg x y z)) yes no) @@ -17224,10 +17208,9 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMEQ) v0 := b.NewValue0(v_0.Pos, OpARMCMNshiftLLreg, types.TypeFlags) v0.AddArg3(x, y, z) - b.AddControl(v0) + b.resetWithControl(BlockARMEQ, v0) return true } // match: (EQ (CMPconst [0] l:(ADDshiftRLreg x y z)) yes no) @@ -17248,10 +17231,9 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMEQ) v0 := b.NewValue0(v_0.Pos, OpARMCMNshiftRLreg, types.TypeFlags) v0.AddArg3(x, y, z) - b.AddControl(v0) + b.resetWithControl(BlockARMEQ, v0) return true } // match: (EQ (CMPconst [0] l:(ADDshiftRAreg x y z)) yes no) @@ -17272,10 +17254,9 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMEQ) v0 := b.NewValue0(v_0.Pos, OpARMCMNshiftRAreg, types.TypeFlags) v0.AddArg3(x, y, z) - b.AddControl(v0) + b.resetWithControl(BlockARMEQ, v0) return true } // match: (EQ (CMPconst [0] l:(AND x y)) yes no) @@ -17299,10 +17280,9 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { continue } - b.Reset(BlockARMEQ) v0 := b.NewValue0(v_0.Pos, OpARMTST, types.TypeFlags) v0.AddArg2(x, y) - b.AddControl(v0) + b.resetWithControl(BlockARMEQ, v0) return true } break @@ -17324,11 +17304,10 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMEQ) v0 := b.NewValue0(v_0.Pos, OpARMTSTconst, types.TypeFlags) v0.AuxInt = c v0.AddArg(x) - b.AddControl(v0) + b.resetWithControl(BlockARMEQ, v0) return true } // match: (EQ (CMPconst [0] l:(ANDshiftLL x y [c])) yes no) @@ -17349,11 +17328,10 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMEQ) v0 := b.NewValue0(v_0.Pos, OpARMTSTshiftLL, types.TypeFlags) v0.AuxInt = c v0.AddArg2(x, y) - b.AddControl(v0) + b.resetWithControl(BlockARMEQ, v0) return true } // match: (EQ (CMPconst [0] l:(ANDshiftRL x y [c])) yes no) @@ -17374,11 +17352,10 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMEQ) v0 := b.NewValue0(v_0.Pos, OpARMTSTshiftRL, types.TypeFlags) v0.AuxInt = c v0.AddArg2(x, y) - b.AddControl(v0) + b.resetWithControl(BlockARMEQ, v0) return true } // match: (EQ (CMPconst [0] l:(ANDshiftRA x y [c])) yes no) @@ -17399,11 +17376,10 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMEQ) v0 := b.NewValue0(v_0.Pos, OpARMTSTshiftRA, types.TypeFlags) v0.AuxInt = c v0.AddArg2(x, y) - b.AddControl(v0) + b.resetWithControl(BlockARMEQ, v0) return true } // match: (EQ (CMPconst [0] l:(ANDshiftLLreg x y z)) yes no) @@ -17424,10 +17400,9 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMEQ) v0 := b.NewValue0(v_0.Pos, OpARMTSTshiftLLreg, types.TypeFlags) v0.AddArg3(x, y, z) - b.AddControl(v0) + b.resetWithControl(BlockARMEQ, v0) return true } // match: (EQ (CMPconst [0] l:(ANDshiftRLreg x y z)) yes no) @@ -17448,10 +17423,9 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMEQ) v0 := b.NewValue0(v_0.Pos, OpARMTSTshiftRLreg, types.TypeFlags) v0.AddArg3(x, y, z) - b.AddControl(v0) + b.resetWithControl(BlockARMEQ, v0) return true } // match: (EQ (CMPconst [0] l:(ANDshiftRAreg x y z)) yes no) @@ -17472,10 +17446,9 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMEQ) v0 := b.NewValue0(v_0.Pos, OpARMTSTshiftRAreg, types.TypeFlags) v0.AddArg3(x, y, z) - b.AddControl(v0) + b.resetWithControl(BlockARMEQ, v0) return true } // match: (EQ (CMPconst [0] l:(XOR x y)) yes no) @@ -17499,10 +17472,9 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { continue } - b.Reset(BlockARMEQ) v0 := b.NewValue0(v_0.Pos, OpARMTEQ, types.TypeFlags) v0.AddArg2(x, y) - b.AddControl(v0) + b.resetWithControl(BlockARMEQ, v0) return true } break @@ -17524,11 +17496,10 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMEQ) v0 := b.NewValue0(v_0.Pos, OpARMTEQconst, types.TypeFlags) v0.AuxInt = c v0.AddArg(x) - b.AddControl(v0) + b.resetWithControl(BlockARMEQ, v0) return true } // match: (EQ (CMPconst [0] l:(XORshiftLL x y [c])) yes no) @@ -17549,11 +17520,10 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMEQ) v0 := b.NewValue0(v_0.Pos, OpARMTEQshiftLL, types.TypeFlags) v0.AuxInt = c v0.AddArg2(x, y) - b.AddControl(v0) + b.resetWithControl(BlockARMEQ, v0) return true } // match: (EQ (CMPconst [0] l:(XORshiftRL x y [c])) yes no) @@ -17574,11 +17544,10 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMEQ) v0 := b.NewValue0(v_0.Pos, OpARMTEQshiftRL, types.TypeFlags) v0.AuxInt = c v0.AddArg2(x, y) - b.AddControl(v0) + b.resetWithControl(BlockARMEQ, v0) return true } // match: (EQ (CMPconst [0] l:(XORshiftRA x y [c])) yes no) @@ -17599,11 +17568,10 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMEQ) v0 := b.NewValue0(v_0.Pos, OpARMTEQshiftRA, types.TypeFlags) v0.AuxInt = c v0.AddArg2(x, y) - b.AddControl(v0) + b.resetWithControl(BlockARMEQ, v0) return true } // match: (EQ (CMPconst [0] l:(XORshiftLLreg x y z)) yes no) @@ -17624,10 +17592,9 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMEQ) v0 := b.NewValue0(v_0.Pos, OpARMTEQshiftLLreg, types.TypeFlags) v0.AddArg3(x, y, z) - b.AddControl(v0) + b.resetWithControl(BlockARMEQ, v0) return true } // match: (EQ (CMPconst [0] l:(XORshiftRLreg x y z)) yes no) @@ -17648,10 +17615,9 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMEQ) v0 := b.NewValue0(v_0.Pos, OpARMTEQshiftRLreg, types.TypeFlags) v0.AddArg3(x, y, z) - b.AddControl(v0) + b.resetWithControl(BlockARMEQ, v0) return true } // match: (EQ (CMPconst [0] l:(XORshiftRAreg x y z)) yes no) @@ -17672,10 +17638,9 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMEQ) v0 := b.NewValue0(v_0.Pos, OpARMTEQshiftRAreg, types.TypeFlags) v0.AddArg3(x, y, z) - b.AddControl(v0) + b.resetWithControl(BlockARMEQ, v0) return true } case BlockARMGE: @@ -17716,8 +17681,7 @@ func rewriteBlockARM(b *Block) bool { for b.Controls[0].Op == OpARMInvertFlags { v_0 := b.Controls[0] cmp := v_0.Args[0] - b.Reset(BlockARMLE) - b.AddControl(cmp) + b.resetWithControl(BlockARMLE, cmp) return true } // match: (GE (CMPconst [0] l:(SUB x y)) yes no) @@ -17737,10 +17701,9 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMGE) v0 := b.NewValue0(v_0.Pos, OpARMCMP, types.TypeFlags) v0.AddArg2(x, y) - b.AddControl(v0) + b.resetWithControl(BlockARMGE, v0) return true } // match: (GE (CMPconst [0] l:(MULS x y a)) yes no) @@ -17761,12 +17724,11 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMGE) v0 := b.NewValue0(v_0.Pos, OpARMCMP, types.TypeFlags) v1 := b.NewValue0(v_0.Pos, OpARMMUL, x.Type) v1.AddArg2(x, y) v0.AddArg2(a, v1) - b.AddControl(v0) + b.resetWithControl(BlockARMGE, v0) return true } // match: (GE (CMPconst [0] l:(SUBconst [c] x)) yes no) @@ -17786,11 +17748,10 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMGE) v0 := b.NewValue0(v_0.Pos, OpARMCMPconst, types.TypeFlags) v0.AuxInt = c v0.AddArg(x) - b.AddControl(v0) + b.resetWithControl(BlockARMGE, v0) return true } // match: (GE (CMPconst [0] l:(SUBshiftLL x y [c])) yes no) @@ -17811,11 +17772,10 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMGE) v0 := b.NewValue0(v_0.Pos, OpARMCMPshiftLL, types.TypeFlags) v0.AuxInt = c v0.AddArg2(x, y) - b.AddControl(v0) + b.resetWithControl(BlockARMGE, v0) return true } // match: (GE (CMPconst [0] l:(SUBshiftRL x y [c])) yes no) @@ -17836,11 +17796,10 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMGE) v0 := b.NewValue0(v_0.Pos, OpARMCMPshiftRL, types.TypeFlags) v0.AuxInt = c v0.AddArg2(x, y) - b.AddControl(v0) + b.resetWithControl(BlockARMGE, v0) return true } // match: (GE (CMPconst [0] l:(SUBshiftRA x y [c])) yes no) @@ -17861,11 +17820,10 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMGE) v0 := b.NewValue0(v_0.Pos, OpARMCMPshiftRA, types.TypeFlags) v0.AuxInt = c v0.AddArg2(x, y) - b.AddControl(v0) + b.resetWithControl(BlockARMGE, v0) return true } // match: (GE (CMPconst [0] l:(SUBshiftLLreg x y z)) yes no) @@ -17886,10 +17844,9 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMGE) v0 := b.NewValue0(v_0.Pos, OpARMCMPshiftLLreg, types.TypeFlags) v0.AddArg3(x, y, z) - b.AddControl(v0) + b.resetWithControl(BlockARMGE, v0) return true } // match: (GE (CMPconst [0] l:(SUBshiftRLreg x y z)) yes no) @@ -17910,10 +17867,9 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMGE) v0 := b.NewValue0(v_0.Pos, OpARMCMPshiftRLreg, types.TypeFlags) v0.AddArg3(x, y, z) - b.AddControl(v0) + b.resetWithControl(BlockARMGE, v0) return true } // match: (GE (CMPconst [0] l:(SUBshiftRAreg x y z)) yes no) @@ -17934,10 +17890,9 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMGE) v0 := b.NewValue0(v_0.Pos, OpARMCMPshiftRAreg, types.TypeFlags) v0.AddArg3(x, y, z) - b.AddControl(v0) + b.resetWithControl(BlockARMGE, v0) return true } // match: (GE (CMPconst [0] l:(ADD x y)) yes no) @@ -17961,10 +17916,9 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { continue } - b.Reset(BlockARMGE) v0 := b.NewValue0(v_0.Pos, OpARMCMN, types.TypeFlags) v0.AddArg2(x, y) - b.AddControl(v0) + b.resetWithControl(BlockARMGE, v0) return true } break @@ -17987,12 +17941,11 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMGE) v0 := b.NewValue0(v_0.Pos, OpARMCMN, types.TypeFlags) v1 := b.NewValue0(v_0.Pos, OpARMMUL, x.Type) v1.AddArg2(x, y) v0.AddArg2(a, v1) - b.AddControl(v0) + b.resetWithControl(BlockARMGE, v0) return true } // match: (GE (CMPconst [0] l:(ADDconst [c] x)) yes no) @@ -18012,11 +17965,10 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMGE) v0 := b.NewValue0(v_0.Pos, OpARMCMNconst, types.TypeFlags) v0.AuxInt = c v0.AddArg(x) - b.AddControl(v0) + b.resetWithControl(BlockARMGE, v0) return true } // match: (GE (CMPconst [0] l:(ADDshiftLL x y [c])) yes no) @@ -18037,11 +17989,10 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMGE) v0 := b.NewValue0(v_0.Pos, OpARMCMNshiftLL, types.TypeFlags) v0.AuxInt = c v0.AddArg2(x, y) - b.AddControl(v0) + b.resetWithControl(BlockARMGE, v0) return true } // match: (GE (CMPconst [0] l:(ADDshiftRL x y [c])) yes no) @@ -18062,11 +18013,10 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMGE) v0 := b.NewValue0(v_0.Pos, OpARMCMNshiftRL, types.TypeFlags) v0.AuxInt = c v0.AddArg2(x, y) - b.AddControl(v0) + b.resetWithControl(BlockARMGE, v0) return true } // match: (GE (CMPconst [0] l:(ADDshiftRA x y [c])) yes no) @@ -18087,11 +18037,10 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMGE) v0 := b.NewValue0(v_0.Pos, OpARMCMNshiftRA, types.TypeFlags) v0.AuxInt = c v0.AddArg2(x, y) - b.AddControl(v0) + b.resetWithControl(BlockARMGE, v0) return true } // match: (GE (CMPconst [0] l:(ADDshiftLLreg x y z)) yes no) @@ -18112,10 +18061,9 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMGE) v0 := b.NewValue0(v_0.Pos, OpARMCMNshiftLLreg, types.TypeFlags) v0.AddArg3(x, y, z) - b.AddControl(v0) + b.resetWithControl(BlockARMGE, v0) return true } // match: (GE (CMPconst [0] l:(ADDshiftRLreg x y z)) yes no) @@ -18136,10 +18084,9 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMGE) v0 := b.NewValue0(v_0.Pos, OpARMCMNshiftRLreg, types.TypeFlags) v0.AddArg3(x, y, z) - b.AddControl(v0) + b.resetWithControl(BlockARMGE, v0) return true } // match: (GE (CMPconst [0] l:(ADDshiftRAreg x y z)) yes no) @@ -18160,10 +18107,9 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMGE) v0 := b.NewValue0(v_0.Pos, OpARMCMNshiftRAreg, types.TypeFlags) v0.AddArg3(x, y, z) - b.AddControl(v0) + b.resetWithControl(BlockARMGE, v0) return true } // match: (GE (CMPconst [0] l:(AND x y)) yes no) @@ -18187,10 +18133,9 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { continue } - b.Reset(BlockARMGE) v0 := b.NewValue0(v_0.Pos, OpARMTST, types.TypeFlags) v0.AddArg2(x, y) - b.AddControl(v0) + b.resetWithControl(BlockARMGE, v0) return true } break @@ -18212,11 +18157,10 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMGE) v0 := b.NewValue0(v_0.Pos, OpARMTSTconst, types.TypeFlags) v0.AuxInt = c v0.AddArg(x) - b.AddControl(v0) + b.resetWithControl(BlockARMGE, v0) return true } // match: (GE (CMPconst [0] l:(ANDshiftLL x y [c])) yes no) @@ -18237,11 +18181,10 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMGE) v0 := b.NewValue0(v_0.Pos, OpARMTSTshiftLL, types.TypeFlags) v0.AuxInt = c v0.AddArg2(x, y) - b.AddControl(v0) + b.resetWithControl(BlockARMGE, v0) return true } // match: (GE (CMPconst [0] l:(ANDshiftRL x y [c])) yes no) @@ -18262,11 +18205,10 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMGE) v0 := b.NewValue0(v_0.Pos, OpARMTSTshiftRL, types.TypeFlags) v0.AuxInt = c v0.AddArg2(x, y) - b.AddControl(v0) + b.resetWithControl(BlockARMGE, v0) return true } // match: (GE (CMPconst [0] l:(ANDshiftRA x y [c])) yes no) @@ -18287,11 +18229,10 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMGE) v0 := b.NewValue0(v_0.Pos, OpARMTSTshiftRA, types.TypeFlags) v0.AuxInt = c v0.AddArg2(x, y) - b.AddControl(v0) + b.resetWithControl(BlockARMGE, v0) return true } // match: (GE (CMPconst [0] l:(ANDshiftLLreg x y z)) yes no) @@ -18312,10 +18253,9 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMGE) v0 := b.NewValue0(v_0.Pos, OpARMTSTshiftLLreg, types.TypeFlags) v0.AddArg3(x, y, z) - b.AddControl(v0) + b.resetWithControl(BlockARMGE, v0) return true } // match: (GE (CMPconst [0] l:(ANDshiftRLreg x y z)) yes no) @@ -18336,10 +18276,9 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMGE) v0 := b.NewValue0(v_0.Pos, OpARMTSTshiftRLreg, types.TypeFlags) v0.AddArg3(x, y, z) - b.AddControl(v0) + b.resetWithControl(BlockARMGE, v0) return true } // match: (GE (CMPconst [0] l:(ANDshiftRAreg x y z)) yes no) @@ -18360,10 +18299,9 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMGE) v0 := b.NewValue0(v_0.Pos, OpARMTSTshiftRAreg, types.TypeFlags) v0.AddArg3(x, y, z) - b.AddControl(v0) + b.resetWithControl(BlockARMGE, v0) return true } // match: (GE (CMPconst [0] l:(XOR x y)) yes no) @@ -18387,10 +18325,9 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { continue } - b.Reset(BlockARMGE) v0 := b.NewValue0(v_0.Pos, OpARMTEQ, types.TypeFlags) v0.AddArg2(x, y) - b.AddControl(v0) + b.resetWithControl(BlockARMGE, v0) return true } break @@ -18412,11 +18349,10 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMGE) v0 := b.NewValue0(v_0.Pos, OpARMTEQconst, types.TypeFlags) v0.AuxInt = c v0.AddArg(x) - b.AddControl(v0) + b.resetWithControl(BlockARMGE, v0) return true } // match: (GE (CMPconst [0] l:(XORshiftLL x y [c])) yes no) @@ -18437,11 +18373,10 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMGE) v0 := b.NewValue0(v_0.Pos, OpARMTEQshiftLL, types.TypeFlags) v0.AuxInt = c v0.AddArg2(x, y) - b.AddControl(v0) + b.resetWithControl(BlockARMGE, v0) return true } // match: (GE (CMPconst [0] l:(XORshiftRL x y [c])) yes no) @@ -18462,11 +18397,10 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMGE) v0 := b.NewValue0(v_0.Pos, OpARMTEQshiftRL, types.TypeFlags) v0.AuxInt = c v0.AddArg2(x, y) - b.AddControl(v0) + b.resetWithControl(BlockARMGE, v0) return true } // match: (GE (CMPconst [0] l:(XORshiftRA x y [c])) yes no) @@ -18487,11 +18421,10 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMGE) v0 := b.NewValue0(v_0.Pos, OpARMTEQshiftRA, types.TypeFlags) v0.AuxInt = c v0.AddArg2(x, y) - b.AddControl(v0) + b.resetWithControl(BlockARMGE, v0) return true } // match: (GE (CMPconst [0] l:(XORshiftLLreg x y z)) yes no) @@ -18512,10 +18445,9 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMGE) v0 := b.NewValue0(v_0.Pos, OpARMTEQshiftLLreg, types.TypeFlags) v0.AddArg3(x, y, z) - b.AddControl(v0) + b.resetWithControl(BlockARMGE, v0) return true } // match: (GE (CMPconst [0] l:(XORshiftRLreg x y z)) yes no) @@ -18536,10 +18468,9 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMGE) v0 := b.NewValue0(v_0.Pos, OpARMTEQshiftRLreg, types.TypeFlags) v0.AddArg3(x, y, z) - b.AddControl(v0) + b.resetWithControl(BlockARMGE, v0) return true } // match: (GE (CMPconst [0] l:(XORshiftRAreg x y z)) yes no) @@ -18560,10 +18491,9 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMGE) v0 := b.NewValue0(v_0.Pos, OpARMTEQshiftRAreg, types.TypeFlags) v0.AddArg3(x, y, z) - b.AddControl(v0) + b.resetWithControl(BlockARMGE, v0) return true } case BlockARMGT: @@ -18605,8 +18535,7 @@ func rewriteBlockARM(b *Block) bool { for b.Controls[0].Op == OpARMInvertFlags { v_0 := b.Controls[0] cmp := v_0.Args[0] - b.Reset(BlockARMLT) - b.AddControl(cmp) + b.resetWithControl(BlockARMLT, cmp) return true } // match: (GT (CMPconst [0] l:(SUB x y)) yes no) @@ -18626,10 +18555,9 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMGT) v0 := b.NewValue0(v_0.Pos, OpARMCMP, types.TypeFlags) v0.AddArg2(x, y) - b.AddControl(v0) + b.resetWithControl(BlockARMGT, v0) return true } // match: (GT (CMPconst [0] l:(MULS x y a)) yes no) @@ -18650,12 +18578,11 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMGT) v0 := b.NewValue0(v_0.Pos, OpARMCMP, types.TypeFlags) v1 := b.NewValue0(v_0.Pos, OpARMMUL, x.Type) v1.AddArg2(x, y) v0.AddArg2(a, v1) - b.AddControl(v0) + b.resetWithControl(BlockARMGT, v0) return true } // match: (GT (CMPconst [0] l:(SUBconst [c] x)) yes no) @@ -18675,11 +18602,10 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMGT) v0 := b.NewValue0(v_0.Pos, OpARMCMPconst, types.TypeFlags) v0.AuxInt = c v0.AddArg(x) - b.AddControl(v0) + b.resetWithControl(BlockARMGT, v0) return true } // match: (GT (CMPconst [0] l:(SUBshiftLL x y [c])) yes no) @@ -18700,11 +18626,10 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMGT) v0 := b.NewValue0(v_0.Pos, OpARMCMPshiftLL, types.TypeFlags) v0.AuxInt = c v0.AddArg2(x, y) - b.AddControl(v0) + b.resetWithControl(BlockARMGT, v0) return true } // match: (GT (CMPconst [0] l:(SUBshiftRL x y [c])) yes no) @@ -18725,11 +18650,10 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMGT) v0 := b.NewValue0(v_0.Pos, OpARMCMPshiftRL, types.TypeFlags) v0.AuxInt = c v0.AddArg2(x, y) - b.AddControl(v0) + b.resetWithControl(BlockARMGT, v0) return true } // match: (GT (CMPconst [0] l:(SUBshiftRA x y [c])) yes no) @@ -18750,11 +18674,10 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMGT) v0 := b.NewValue0(v_0.Pos, OpARMCMPshiftRA, types.TypeFlags) v0.AuxInt = c v0.AddArg2(x, y) - b.AddControl(v0) + b.resetWithControl(BlockARMGT, v0) return true } // match: (GT (CMPconst [0] l:(SUBshiftLLreg x y z)) yes no) @@ -18775,10 +18698,9 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMGT) v0 := b.NewValue0(v_0.Pos, OpARMCMPshiftLLreg, types.TypeFlags) v0.AddArg3(x, y, z) - b.AddControl(v0) + b.resetWithControl(BlockARMGT, v0) return true } // match: (GT (CMPconst [0] l:(SUBshiftRLreg x y z)) yes no) @@ -18799,10 +18721,9 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMGT) v0 := b.NewValue0(v_0.Pos, OpARMCMPshiftRLreg, types.TypeFlags) v0.AddArg3(x, y, z) - b.AddControl(v0) + b.resetWithControl(BlockARMGT, v0) return true } // match: (GT (CMPconst [0] l:(SUBshiftRAreg x y z)) yes no) @@ -18823,10 +18744,9 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMGT) v0 := b.NewValue0(v_0.Pos, OpARMCMPshiftRAreg, types.TypeFlags) v0.AddArg3(x, y, z) - b.AddControl(v0) + b.resetWithControl(BlockARMGT, v0) return true } // match: (GT (CMPconst [0] l:(ADD x y)) yes no) @@ -18850,10 +18770,9 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { continue } - b.Reset(BlockARMGT) v0 := b.NewValue0(v_0.Pos, OpARMCMN, types.TypeFlags) v0.AddArg2(x, y) - b.AddControl(v0) + b.resetWithControl(BlockARMGT, v0) return true } break @@ -18875,11 +18794,10 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMGT) v0 := b.NewValue0(v_0.Pos, OpARMCMNconst, types.TypeFlags) v0.AuxInt = c v0.AddArg(x) - b.AddControl(v0) + b.resetWithControl(BlockARMGT, v0) return true } // match: (GT (CMPconst [0] l:(ADDshiftLL x y [c])) yes no) @@ -18900,11 +18818,10 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMGT) v0 := b.NewValue0(v_0.Pos, OpARMCMNshiftLL, types.TypeFlags) v0.AuxInt = c v0.AddArg2(x, y) - b.AddControl(v0) + b.resetWithControl(BlockARMGT, v0) return true } // match: (GT (CMPconst [0] l:(ADDshiftRL x y [c])) yes no) @@ -18925,11 +18842,10 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMGT) v0 := b.NewValue0(v_0.Pos, OpARMCMNshiftRL, types.TypeFlags) v0.AuxInt = c v0.AddArg2(x, y) - b.AddControl(v0) + b.resetWithControl(BlockARMGT, v0) return true } // match: (GT (CMPconst [0] l:(ADDshiftRA x y [c])) yes no) @@ -18950,11 +18866,10 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMGT) v0 := b.NewValue0(v_0.Pos, OpARMCMNshiftRA, types.TypeFlags) v0.AuxInt = c v0.AddArg2(x, y) - b.AddControl(v0) + b.resetWithControl(BlockARMGT, v0) return true } // match: (GT (CMPconst [0] l:(ADDshiftLLreg x y z)) yes no) @@ -18975,10 +18890,9 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMGT) v0 := b.NewValue0(v_0.Pos, OpARMCMNshiftLLreg, types.TypeFlags) v0.AddArg3(x, y, z) - b.AddControl(v0) + b.resetWithControl(BlockARMGT, v0) return true } // match: (GT (CMPconst [0] l:(ADDshiftRLreg x y z)) yes no) @@ -18999,10 +18913,9 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMGT) v0 := b.NewValue0(v_0.Pos, OpARMCMNshiftRLreg, types.TypeFlags) v0.AddArg3(x, y, z) - b.AddControl(v0) + b.resetWithControl(BlockARMGT, v0) return true } // match: (GT (CMPconst [0] l:(ADDshiftRAreg x y z)) yes no) @@ -19023,10 +18936,9 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMGT) v0 := b.NewValue0(v_0.Pos, OpARMCMNshiftRAreg, types.TypeFlags) v0.AddArg3(x, y, z) - b.AddControl(v0) + b.resetWithControl(BlockARMGT, v0) return true } // match: (GT (CMPconst [0] l:(AND x y)) yes no) @@ -19050,10 +18962,9 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { continue } - b.Reset(BlockARMGT) v0 := b.NewValue0(v_0.Pos, OpARMTST, types.TypeFlags) v0.AddArg2(x, y) - b.AddControl(v0) + b.resetWithControl(BlockARMGT, v0) return true } break @@ -19076,12 +18987,11 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMGT) v0 := b.NewValue0(v_0.Pos, OpARMCMN, types.TypeFlags) v1 := b.NewValue0(v_0.Pos, OpARMMUL, x.Type) v1.AddArg2(x, y) v0.AddArg2(a, v1) - b.AddControl(v0) + b.resetWithControl(BlockARMGT, v0) return true } // match: (GT (CMPconst [0] l:(ANDconst [c] x)) yes no) @@ -19101,11 +19011,10 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMGT) v0 := b.NewValue0(v_0.Pos, OpARMTSTconst, types.TypeFlags) v0.AuxInt = c v0.AddArg(x) - b.AddControl(v0) + b.resetWithControl(BlockARMGT, v0) return true } // match: (GT (CMPconst [0] l:(ANDshiftLL x y [c])) yes no) @@ -19126,11 +19035,10 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMGT) v0 := b.NewValue0(v_0.Pos, OpARMTSTshiftLL, types.TypeFlags) v0.AuxInt = c v0.AddArg2(x, y) - b.AddControl(v0) + b.resetWithControl(BlockARMGT, v0) return true } // match: (GT (CMPconst [0] l:(ANDshiftRL x y [c])) yes no) @@ -19151,11 +19059,10 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMGT) v0 := b.NewValue0(v_0.Pos, OpARMTSTshiftRL, types.TypeFlags) v0.AuxInt = c v0.AddArg2(x, y) - b.AddControl(v0) + b.resetWithControl(BlockARMGT, v0) return true } // match: (GT (CMPconst [0] l:(ANDshiftRA x y [c])) yes no) @@ -19176,11 +19083,10 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMGT) v0 := b.NewValue0(v_0.Pos, OpARMTSTshiftRA, types.TypeFlags) v0.AuxInt = c v0.AddArg2(x, y) - b.AddControl(v0) + b.resetWithControl(BlockARMGT, v0) return true } // match: (GT (CMPconst [0] l:(ANDshiftLLreg x y z)) yes no) @@ -19201,10 +19107,9 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMGT) v0 := b.NewValue0(v_0.Pos, OpARMTSTshiftLLreg, types.TypeFlags) v0.AddArg3(x, y, z) - b.AddControl(v0) + b.resetWithControl(BlockARMGT, v0) return true } // match: (GT (CMPconst [0] l:(ANDshiftRLreg x y z)) yes no) @@ -19225,10 +19130,9 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMGT) v0 := b.NewValue0(v_0.Pos, OpARMTSTshiftRLreg, types.TypeFlags) v0.AddArg3(x, y, z) - b.AddControl(v0) + b.resetWithControl(BlockARMGT, v0) return true } // match: (GT (CMPconst [0] l:(ANDshiftRAreg x y z)) yes no) @@ -19249,10 +19153,9 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMGT) v0 := b.NewValue0(v_0.Pos, OpARMTSTshiftRAreg, types.TypeFlags) v0.AddArg3(x, y, z) - b.AddControl(v0) + b.resetWithControl(BlockARMGT, v0) return true } // match: (GT (CMPconst [0] l:(XOR x y)) yes no) @@ -19276,10 +19179,9 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { continue } - b.Reset(BlockARMGT) v0 := b.NewValue0(v_0.Pos, OpARMTEQ, types.TypeFlags) v0.AddArg2(x, y) - b.AddControl(v0) + b.resetWithControl(BlockARMGT, v0) return true } break @@ -19301,11 +19203,10 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMGT) v0 := b.NewValue0(v_0.Pos, OpARMTEQconst, types.TypeFlags) v0.AuxInt = c v0.AddArg(x) - b.AddControl(v0) + b.resetWithControl(BlockARMGT, v0) return true } // match: (GT (CMPconst [0] l:(XORshiftLL x y [c])) yes no) @@ -19326,11 +19227,10 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMGT) v0 := b.NewValue0(v_0.Pos, OpARMTEQshiftLL, types.TypeFlags) v0.AuxInt = c v0.AddArg2(x, y) - b.AddControl(v0) + b.resetWithControl(BlockARMGT, v0) return true } // match: (GT (CMPconst [0] l:(XORshiftRL x y [c])) yes no) @@ -19351,11 +19251,10 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMGT) v0 := b.NewValue0(v_0.Pos, OpARMTEQshiftRL, types.TypeFlags) v0.AuxInt = c v0.AddArg2(x, y) - b.AddControl(v0) + b.resetWithControl(BlockARMGT, v0) return true } // match: (GT (CMPconst [0] l:(XORshiftRA x y [c])) yes no) @@ -19376,11 +19275,10 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMGT) v0 := b.NewValue0(v_0.Pos, OpARMTEQshiftRA, types.TypeFlags) v0.AuxInt = c v0.AddArg2(x, y) - b.AddControl(v0) + b.resetWithControl(BlockARMGT, v0) return true } // match: (GT (CMPconst [0] l:(XORshiftLLreg x y z)) yes no) @@ -19401,10 +19299,9 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMGT) v0 := b.NewValue0(v_0.Pos, OpARMTEQshiftLLreg, types.TypeFlags) v0.AddArg3(x, y, z) - b.AddControl(v0) + b.resetWithControl(BlockARMGT, v0) return true } // match: (GT (CMPconst [0] l:(XORshiftRLreg x y z)) yes no) @@ -19425,10 +19322,9 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMGT) v0 := b.NewValue0(v_0.Pos, OpARMTEQshiftRLreg, types.TypeFlags) v0.AddArg3(x, y, z) - b.AddControl(v0) + b.resetWithControl(BlockARMGT, v0) return true } // match: (GT (CMPconst [0] l:(XORshiftRAreg x y z)) yes no) @@ -19449,10 +19345,9 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMGT) v0 := b.NewValue0(v_0.Pos, OpARMTEQshiftRAreg, types.TypeFlags) v0.AddArg3(x, y, z) - b.AddControl(v0) + b.resetWithControl(BlockARMGT, v0) return true } case BlockIf: @@ -19461,8 +19356,7 @@ func rewriteBlockARM(b *Block) bool { for b.Controls[0].Op == OpARMEqual { v_0 := b.Controls[0] cc := v_0.Args[0] - b.Reset(BlockARMEQ) - b.AddControl(cc) + b.resetWithControl(BlockARMEQ, cc) return true } // match: (If (NotEqual cc) yes no) @@ -19470,8 +19364,7 @@ func rewriteBlockARM(b *Block) bool { for b.Controls[0].Op == OpARMNotEqual { v_0 := b.Controls[0] cc := v_0.Args[0] - b.Reset(BlockARMNE) - b.AddControl(cc) + b.resetWithControl(BlockARMNE, cc) return true } // match: (If (LessThan cc) yes no) @@ -19479,8 +19372,7 @@ func rewriteBlockARM(b *Block) bool { for b.Controls[0].Op == OpARMLessThan { v_0 := b.Controls[0] cc := v_0.Args[0] - b.Reset(BlockARMLT) - b.AddControl(cc) + b.resetWithControl(BlockARMLT, cc) return true } // match: (If (LessThanU cc) yes no) @@ -19488,8 +19380,7 @@ func rewriteBlockARM(b *Block) bool { for b.Controls[0].Op == OpARMLessThanU { v_0 := b.Controls[0] cc := v_0.Args[0] - b.Reset(BlockARMULT) - b.AddControl(cc) + b.resetWithControl(BlockARMULT, cc) return true } // match: (If (LessEqual cc) yes no) @@ -19497,8 +19388,7 @@ func rewriteBlockARM(b *Block) bool { for b.Controls[0].Op == OpARMLessEqual { v_0 := b.Controls[0] cc := v_0.Args[0] - b.Reset(BlockARMLE) - b.AddControl(cc) + b.resetWithControl(BlockARMLE, cc) return true } // match: (If (LessEqualU cc) yes no) @@ -19506,8 +19396,7 @@ func rewriteBlockARM(b *Block) bool { for b.Controls[0].Op == OpARMLessEqualU { v_0 := b.Controls[0] cc := v_0.Args[0] - b.Reset(BlockARMULE) - b.AddControl(cc) + b.resetWithControl(BlockARMULE, cc) return true } // match: (If (GreaterThan cc) yes no) @@ -19515,8 +19404,7 @@ func rewriteBlockARM(b *Block) bool { for b.Controls[0].Op == OpARMGreaterThan { v_0 := b.Controls[0] cc := v_0.Args[0] - b.Reset(BlockARMGT) - b.AddControl(cc) + b.resetWithControl(BlockARMGT, cc) return true } // match: (If (GreaterThanU cc) yes no) @@ -19524,8 +19412,7 @@ func rewriteBlockARM(b *Block) bool { for b.Controls[0].Op == OpARMGreaterThanU { v_0 := b.Controls[0] cc := v_0.Args[0] - b.Reset(BlockARMUGT) - b.AddControl(cc) + b.resetWithControl(BlockARMUGT, cc) return true } // match: (If (GreaterEqual cc) yes no) @@ -19533,8 +19420,7 @@ func rewriteBlockARM(b *Block) bool { for b.Controls[0].Op == OpARMGreaterEqual { v_0 := b.Controls[0] cc := v_0.Args[0] - b.Reset(BlockARMGE) - b.AddControl(cc) + b.resetWithControl(BlockARMGE, cc) return true } // match: (If (GreaterEqualU cc) yes no) @@ -19542,19 +19428,17 @@ func rewriteBlockARM(b *Block) bool { for b.Controls[0].Op == OpARMGreaterEqualU { v_0 := b.Controls[0] cc := v_0.Args[0] - b.Reset(BlockARMUGE) - b.AddControl(cc) + b.resetWithControl(BlockARMUGE, cc) return true } // match: (If cond yes no) // result: (NE (CMPconst [0] cond) yes no) for { cond := b.Controls[0] - b.Reset(BlockARMNE) v0 := b.NewValue0(cond.Pos, OpARMCMPconst, types.TypeFlags) v0.AuxInt = 0 v0.AddArg(cond) - b.AddControl(v0) + b.resetWithControl(BlockARMNE, v0) return true } case BlockARMLE: @@ -19595,8 +19479,7 @@ func rewriteBlockARM(b *Block) bool { for b.Controls[0].Op == OpARMInvertFlags { v_0 := b.Controls[0] cmp := v_0.Args[0] - b.Reset(BlockARMGE) - b.AddControl(cmp) + b.resetWithControl(BlockARMGE, cmp) return true } // match: (LE (CMPconst [0] l:(SUB x y)) yes no) @@ -19616,10 +19499,9 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMLE) v0 := b.NewValue0(v_0.Pos, OpARMCMP, types.TypeFlags) v0.AddArg2(x, y) - b.AddControl(v0) + b.resetWithControl(BlockARMLE, v0) return true } // match: (LE (CMPconst [0] l:(MULS x y a)) yes no) @@ -19640,12 +19522,11 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMLE) v0 := b.NewValue0(v_0.Pos, OpARMCMP, types.TypeFlags) v1 := b.NewValue0(v_0.Pos, OpARMMUL, x.Type) v1.AddArg2(x, y) v0.AddArg2(a, v1) - b.AddControl(v0) + b.resetWithControl(BlockARMLE, v0) return true } // match: (LE (CMPconst [0] l:(SUBconst [c] x)) yes no) @@ -19665,11 +19546,10 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMLE) v0 := b.NewValue0(v_0.Pos, OpARMCMPconst, types.TypeFlags) v0.AuxInt = c v0.AddArg(x) - b.AddControl(v0) + b.resetWithControl(BlockARMLE, v0) return true } // match: (LE (CMPconst [0] l:(SUBshiftLL x y [c])) yes no) @@ -19690,11 +19570,10 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMLE) v0 := b.NewValue0(v_0.Pos, OpARMCMPshiftLL, types.TypeFlags) v0.AuxInt = c v0.AddArg2(x, y) - b.AddControl(v0) + b.resetWithControl(BlockARMLE, v0) return true } // match: (LE (CMPconst [0] l:(SUBshiftRL x y [c])) yes no) @@ -19715,11 +19594,10 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMLE) v0 := b.NewValue0(v_0.Pos, OpARMCMPshiftRL, types.TypeFlags) v0.AuxInt = c v0.AddArg2(x, y) - b.AddControl(v0) + b.resetWithControl(BlockARMLE, v0) return true } // match: (LE (CMPconst [0] l:(SUBshiftRA x y [c])) yes no) @@ -19740,11 +19618,10 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMLE) v0 := b.NewValue0(v_0.Pos, OpARMCMPshiftRA, types.TypeFlags) v0.AuxInt = c v0.AddArg2(x, y) - b.AddControl(v0) + b.resetWithControl(BlockARMLE, v0) return true } // match: (LE (CMPconst [0] l:(SUBshiftLLreg x y z)) yes no) @@ -19765,10 +19642,9 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMLE) v0 := b.NewValue0(v_0.Pos, OpARMCMPshiftLLreg, types.TypeFlags) v0.AddArg3(x, y, z) - b.AddControl(v0) + b.resetWithControl(BlockARMLE, v0) return true } // match: (LE (CMPconst [0] l:(SUBshiftRLreg x y z)) yes no) @@ -19789,10 +19665,9 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMLE) v0 := b.NewValue0(v_0.Pos, OpARMCMPshiftRLreg, types.TypeFlags) v0.AddArg3(x, y, z) - b.AddControl(v0) + b.resetWithControl(BlockARMLE, v0) return true } // match: (LE (CMPconst [0] l:(SUBshiftRAreg x y z)) yes no) @@ -19813,10 +19688,9 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMLE) v0 := b.NewValue0(v_0.Pos, OpARMCMPshiftRAreg, types.TypeFlags) v0.AddArg3(x, y, z) - b.AddControl(v0) + b.resetWithControl(BlockARMLE, v0) return true } // match: (LE (CMPconst [0] l:(ADD x y)) yes no) @@ -19840,10 +19714,9 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { continue } - b.Reset(BlockARMLE) v0 := b.NewValue0(v_0.Pos, OpARMCMN, types.TypeFlags) v0.AddArg2(x, y) - b.AddControl(v0) + b.resetWithControl(BlockARMLE, v0) return true } break @@ -19866,12 +19739,11 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMLE) v0 := b.NewValue0(v_0.Pos, OpARMCMN, types.TypeFlags) v1 := b.NewValue0(v_0.Pos, OpARMMUL, x.Type) v1.AddArg2(x, y) v0.AddArg2(a, v1) - b.AddControl(v0) + b.resetWithControl(BlockARMLE, v0) return true } // match: (LE (CMPconst [0] l:(ADDconst [c] x)) yes no) @@ -19891,11 +19763,10 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMLE) v0 := b.NewValue0(v_0.Pos, OpARMCMNconst, types.TypeFlags) v0.AuxInt = c v0.AddArg(x) - b.AddControl(v0) + b.resetWithControl(BlockARMLE, v0) return true } // match: (LE (CMPconst [0] l:(ADDshiftLL x y [c])) yes no) @@ -19916,11 +19787,10 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMLE) v0 := b.NewValue0(v_0.Pos, OpARMCMNshiftLL, types.TypeFlags) v0.AuxInt = c v0.AddArg2(x, y) - b.AddControl(v0) + b.resetWithControl(BlockARMLE, v0) return true } // match: (LE (CMPconst [0] l:(ADDshiftRL x y [c])) yes no) @@ -19941,11 +19811,10 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMLE) v0 := b.NewValue0(v_0.Pos, OpARMCMNshiftRL, types.TypeFlags) v0.AuxInt = c v0.AddArg2(x, y) - b.AddControl(v0) + b.resetWithControl(BlockARMLE, v0) return true } // match: (LE (CMPconst [0] l:(ADDshiftRA x y [c])) yes no) @@ -19966,11 +19835,10 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMLE) v0 := b.NewValue0(v_0.Pos, OpARMCMNshiftRA, types.TypeFlags) v0.AuxInt = c v0.AddArg2(x, y) - b.AddControl(v0) + b.resetWithControl(BlockARMLE, v0) return true } // match: (LE (CMPconst [0] l:(ADDshiftLLreg x y z)) yes no) @@ -19991,10 +19859,9 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMLE) v0 := b.NewValue0(v_0.Pos, OpARMCMNshiftLLreg, types.TypeFlags) v0.AddArg3(x, y, z) - b.AddControl(v0) + b.resetWithControl(BlockARMLE, v0) return true } // match: (LE (CMPconst [0] l:(ADDshiftRLreg x y z)) yes no) @@ -20015,10 +19882,9 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMLE) v0 := b.NewValue0(v_0.Pos, OpARMCMNshiftRLreg, types.TypeFlags) v0.AddArg3(x, y, z) - b.AddControl(v0) + b.resetWithControl(BlockARMLE, v0) return true } // match: (LE (CMPconst [0] l:(ADDshiftRAreg x y z)) yes no) @@ -20039,10 +19905,9 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMLE) v0 := b.NewValue0(v_0.Pos, OpARMCMNshiftRAreg, types.TypeFlags) v0.AddArg3(x, y, z) - b.AddControl(v0) + b.resetWithControl(BlockARMLE, v0) return true } // match: (LE (CMPconst [0] l:(AND x y)) yes no) @@ -20066,10 +19931,9 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { continue } - b.Reset(BlockARMLE) v0 := b.NewValue0(v_0.Pos, OpARMTST, types.TypeFlags) v0.AddArg2(x, y) - b.AddControl(v0) + b.resetWithControl(BlockARMLE, v0) return true } break @@ -20091,11 +19955,10 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMLE) v0 := b.NewValue0(v_0.Pos, OpARMTSTconst, types.TypeFlags) v0.AuxInt = c v0.AddArg(x) - b.AddControl(v0) + b.resetWithControl(BlockARMLE, v0) return true } // match: (LE (CMPconst [0] l:(ANDshiftLL x y [c])) yes no) @@ -20116,11 +19979,10 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMLE) v0 := b.NewValue0(v_0.Pos, OpARMTSTshiftLL, types.TypeFlags) v0.AuxInt = c v0.AddArg2(x, y) - b.AddControl(v0) + b.resetWithControl(BlockARMLE, v0) return true } // match: (LE (CMPconst [0] l:(ANDshiftRL x y [c])) yes no) @@ -20141,11 +20003,10 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMLE) v0 := b.NewValue0(v_0.Pos, OpARMTSTshiftRL, types.TypeFlags) v0.AuxInt = c v0.AddArg2(x, y) - b.AddControl(v0) + b.resetWithControl(BlockARMLE, v0) return true } // match: (LE (CMPconst [0] l:(ANDshiftRA x y [c])) yes no) @@ -20166,11 +20027,10 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMLE) v0 := b.NewValue0(v_0.Pos, OpARMTSTshiftRA, types.TypeFlags) v0.AuxInt = c v0.AddArg2(x, y) - b.AddControl(v0) + b.resetWithControl(BlockARMLE, v0) return true } // match: (LE (CMPconst [0] l:(ANDshiftLLreg x y z)) yes no) @@ -20191,10 +20051,9 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMLE) v0 := b.NewValue0(v_0.Pos, OpARMTSTshiftLLreg, types.TypeFlags) v0.AddArg3(x, y, z) - b.AddControl(v0) + b.resetWithControl(BlockARMLE, v0) return true } // match: (LE (CMPconst [0] l:(ANDshiftRLreg x y z)) yes no) @@ -20215,10 +20074,9 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMLE) v0 := b.NewValue0(v_0.Pos, OpARMTSTshiftRLreg, types.TypeFlags) v0.AddArg3(x, y, z) - b.AddControl(v0) + b.resetWithControl(BlockARMLE, v0) return true } // match: (LE (CMPconst [0] l:(ANDshiftRAreg x y z)) yes no) @@ -20239,10 +20097,9 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMLE) v0 := b.NewValue0(v_0.Pos, OpARMTSTshiftRAreg, types.TypeFlags) v0.AddArg3(x, y, z) - b.AddControl(v0) + b.resetWithControl(BlockARMLE, v0) return true } // match: (LE (CMPconst [0] l:(XOR x y)) yes no) @@ -20266,10 +20123,9 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { continue } - b.Reset(BlockARMLE) v0 := b.NewValue0(v_0.Pos, OpARMTEQ, types.TypeFlags) v0.AddArg2(x, y) - b.AddControl(v0) + b.resetWithControl(BlockARMLE, v0) return true } break @@ -20291,11 +20147,10 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMLE) v0 := b.NewValue0(v_0.Pos, OpARMTEQconst, types.TypeFlags) v0.AuxInt = c v0.AddArg(x) - b.AddControl(v0) + b.resetWithControl(BlockARMLE, v0) return true } // match: (LE (CMPconst [0] l:(XORshiftLL x y [c])) yes no) @@ -20316,11 +20171,10 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMLE) v0 := b.NewValue0(v_0.Pos, OpARMTEQshiftLL, types.TypeFlags) v0.AuxInt = c v0.AddArg2(x, y) - b.AddControl(v0) + b.resetWithControl(BlockARMLE, v0) return true } // match: (LE (CMPconst [0] l:(XORshiftRL x y [c])) yes no) @@ -20341,11 +20195,10 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMLE) v0 := b.NewValue0(v_0.Pos, OpARMTEQshiftRL, types.TypeFlags) v0.AuxInt = c v0.AddArg2(x, y) - b.AddControl(v0) + b.resetWithControl(BlockARMLE, v0) return true } // match: (LE (CMPconst [0] l:(XORshiftRA x y [c])) yes no) @@ -20366,11 +20219,10 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMLE) v0 := b.NewValue0(v_0.Pos, OpARMTEQshiftRA, types.TypeFlags) v0.AuxInt = c v0.AddArg2(x, y) - b.AddControl(v0) + b.resetWithControl(BlockARMLE, v0) return true } // match: (LE (CMPconst [0] l:(XORshiftLLreg x y z)) yes no) @@ -20391,10 +20243,9 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMLE) v0 := b.NewValue0(v_0.Pos, OpARMTEQshiftLLreg, types.TypeFlags) v0.AddArg3(x, y, z) - b.AddControl(v0) + b.resetWithControl(BlockARMLE, v0) return true } // match: (LE (CMPconst [0] l:(XORshiftRLreg x y z)) yes no) @@ -20415,10 +20266,9 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMLE) v0 := b.NewValue0(v_0.Pos, OpARMTEQshiftRLreg, types.TypeFlags) v0.AddArg3(x, y, z) - b.AddControl(v0) + b.resetWithControl(BlockARMLE, v0) return true } // match: (LE (CMPconst [0] l:(XORshiftRAreg x y z)) yes no) @@ -20439,10 +20289,9 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMLE) v0 := b.NewValue0(v_0.Pos, OpARMTEQshiftRAreg, types.TypeFlags) v0.AddArg3(x, y, z) - b.AddControl(v0) + b.resetWithControl(BlockARMLE, v0) return true } case BlockARMLT: @@ -20484,8 +20333,7 @@ func rewriteBlockARM(b *Block) bool { for b.Controls[0].Op == OpARMInvertFlags { v_0 := b.Controls[0] cmp := v_0.Args[0] - b.Reset(BlockARMGT) - b.AddControl(cmp) + b.resetWithControl(BlockARMGT, cmp) return true } // match: (LT (CMPconst [0] l:(SUB x y)) yes no) @@ -20505,10 +20353,9 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMLT) v0 := b.NewValue0(v_0.Pos, OpARMCMP, types.TypeFlags) v0.AddArg2(x, y) - b.AddControl(v0) + b.resetWithControl(BlockARMLT, v0) return true } // match: (LT (CMPconst [0] l:(MULS x y a)) yes no) @@ -20529,12 +20376,11 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMLT) v0 := b.NewValue0(v_0.Pos, OpARMCMP, types.TypeFlags) v1 := b.NewValue0(v_0.Pos, OpARMMUL, x.Type) v1.AddArg2(x, y) v0.AddArg2(a, v1) - b.AddControl(v0) + b.resetWithControl(BlockARMLT, v0) return true } // match: (LT (CMPconst [0] l:(SUBconst [c] x)) yes no) @@ -20554,11 +20400,10 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMLT) v0 := b.NewValue0(v_0.Pos, OpARMCMPconst, types.TypeFlags) v0.AuxInt = c v0.AddArg(x) - b.AddControl(v0) + b.resetWithControl(BlockARMLT, v0) return true } // match: (LT (CMPconst [0] l:(SUBshiftLL x y [c])) yes no) @@ -20579,11 +20424,10 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMLT) v0 := b.NewValue0(v_0.Pos, OpARMCMPshiftLL, types.TypeFlags) v0.AuxInt = c v0.AddArg2(x, y) - b.AddControl(v0) + b.resetWithControl(BlockARMLT, v0) return true } // match: (LT (CMPconst [0] l:(SUBshiftRL x y [c])) yes no) @@ -20604,11 +20448,10 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMLT) v0 := b.NewValue0(v_0.Pos, OpARMCMPshiftRL, types.TypeFlags) v0.AuxInt = c v0.AddArg2(x, y) - b.AddControl(v0) + b.resetWithControl(BlockARMLT, v0) return true } // match: (LT (CMPconst [0] l:(SUBshiftRA x y [c])) yes no) @@ -20629,11 +20472,10 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMLT) v0 := b.NewValue0(v_0.Pos, OpARMCMPshiftRA, types.TypeFlags) v0.AuxInt = c v0.AddArg2(x, y) - b.AddControl(v0) + b.resetWithControl(BlockARMLT, v0) return true } // match: (LT (CMPconst [0] l:(SUBshiftLLreg x y z)) yes no) @@ -20654,10 +20496,9 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMLT) v0 := b.NewValue0(v_0.Pos, OpARMCMPshiftLLreg, types.TypeFlags) v0.AddArg3(x, y, z) - b.AddControl(v0) + b.resetWithControl(BlockARMLT, v0) return true } // match: (LT (CMPconst [0] l:(SUBshiftRLreg x y z)) yes no) @@ -20678,10 +20519,9 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMLT) v0 := b.NewValue0(v_0.Pos, OpARMCMPshiftRLreg, types.TypeFlags) v0.AddArg3(x, y, z) - b.AddControl(v0) + b.resetWithControl(BlockARMLT, v0) return true } // match: (LT (CMPconst [0] l:(SUBshiftRAreg x y z)) yes no) @@ -20702,10 +20542,9 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMLT) v0 := b.NewValue0(v_0.Pos, OpARMCMPshiftRAreg, types.TypeFlags) v0.AddArg3(x, y, z) - b.AddControl(v0) + b.resetWithControl(BlockARMLT, v0) return true } // match: (LT (CMPconst [0] l:(ADD x y)) yes no) @@ -20729,10 +20568,9 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { continue } - b.Reset(BlockARMLT) v0 := b.NewValue0(v_0.Pos, OpARMCMN, types.TypeFlags) v0.AddArg2(x, y) - b.AddControl(v0) + b.resetWithControl(BlockARMLT, v0) return true } break @@ -20755,12 +20593,11 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMLT) v0 := b.NewValue0(v_0.Pos, OpARMCMN, types.TypeFlags) v1 := b.NewValue0(v_0.Pos, OpARMMUL, x.Type) v1.AddArg2(x, y) v0.AddArg2(a, v1) - b.AddControl(v0) + b.resetWithControl(BlockARMLT, v0) return true } // match: (LT (CMPconst [0] l:(ADDconst [c] x)) yes no) @@ -20780,11 +20617,10 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMLT) v0 := b.NewValue0(v_0.Pos, OpARMCMNconst, types.TypeFlags) v0.AuxInt = c v0.AddArg(x) - b.AddControl(v0) + b.resetWithControl(BlockARMLT, v0) return true } // match: (LT (CMPconst [0] l:(ADDshiftLL x y [c])) yes no) @@ -20805,11 +20641,10 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMLT) v0 := b.NewValue0(v_0.Pos, OpARMCMNshiftLL, types.TypeFlags) v0.AuxInt = c v0.AddArg2(x, y) - b.AddControl(v0) + b.resetWithControl(BlockARMLT, v0) return true } // match: (LT (CMPconst [0] l:(ADDshiftRL x y [c])) yes no) @@ -20830,11 +20665,10 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMLT) v0 := b.NewValue0(v_0.Pos, OpARMCMNshiftRL, types.TypeFlags) v0.AuxInt = c v0.AddArg2(x, y) - b.AddControl(v0) + b.resetWithControl(BlockARMLT, v0) return true } // match: (LT (CMPconst [0] l:(ADDshiftRA x y [c])) yes no) @@ -20855,11 +20689,10 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMLT) v0 := b.NewValue0(v_0.Pos, OpARMCMNshiftRA, types.TypeFlags) v0.AuxInt = c v0.AddArg2(x, y) - b.AddControl(v0) + b.resetWithControl(BlockARMLT, v0) return true } // match: (LT (CMPconst [0] l:(ADDshiftLLreg x y z)) yes no) @@ -20880,10 +20713,9 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMLT) v0 := b.NewValue0(v_0.Pos, OpARMCMNshiftLLreg, types.TypeFlags) v0.AddArg3(x, y, z) - b.AddControl(v0) + b.resetWithControl(BlockARMLT, v0) return true } // match: (LT (CMPconst [0] l:(ADDshiftRLreg x y z)) yes no) @@ -20904,10 +20736,9 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMLT) v0 := b.NewValue0(v_0.Pos, OpARMCMNshiftRLreg, types.TypeFlags) v0.AddArg3(x, y, z) - b.AddControl(v0) + b.resetWithControl(BlockARMLT, v0) return true } // match: (LT (CMPconst [0] l:(ADDshiftRAreg x y z)) yes no) @@ -20928,10 +20759,9 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMLT) v0 := b.NewValue0(v_0.Pos, OpARMCMNshiftRAreg, types.TypeFlags) v0.AddArg3(x, y, z) - b.AddControl(v0) + b.resetWithControl(BlockARMLT, v0) return true } // match: (LT (CMPconst [0] l:(AND x y)) yes no) @@ -20955,10 +20785,9 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { continue } - b.Reset(BlockARMLT) v0 := b.NewValue0(v_0.Pos, OpARMTST, types.TypeFlags) v0.AddArg2(x, y) - b.AddControl(v0) + b.resetWithControl(BlockARMLT, v0) return true } break @@ -20980,11 +20809,10 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMLT) v0 := b.NewValue0(v_0.Pos, OpARMTSTconst, types.TypeFlags) v0.AuxInt = c v0.AddArg(x) - b.AddControl(v0) + b.resetWithControl(BlockARMLT, v0) return true } // match: (LT (CMPconst [0] l:(ANDshiftLL x y [c])) yes no) @@ -21005,11 +20833,10 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMLT) v0 := b.NewValue0(v_0.Pos, OpARMTSTshiftLL, types.TypeFlags) v0.AuxInt = c v0.AddArg2(x, y) - b.AddControl(v0) + b.resetWithControl(BlockARMLT, v0) return true } // match: (LT (CMPconst [0] l:(ANDshiftRL x y [c])) yes no) @@ -21030,11 +20857,10 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMLT) v0 := b.NewValue0(v_0.Pos, OpARMTSTshiftRL, types.TypeFlags) v0.AuxInt = c v0.AddArg2(x, y) - b.AddControl(v0) + b.resetWithControl(BlockARMLT, v0) return true } // match: (LT (CMPconst [0] l:(ANDshiftRA x y [c])) yes no) @@ -21055,11 +20881,10 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMLT) v0 := b.NewValue0(v_0.Pos, OpARMTSTshiftRA, types.TypeFlags) v0.AuxInt = c v0.AddArg2(x, y) - b.AddControl(v0) + b.resetWithControl(BlockARMLT, v0) return true } // match: (LT (CMPconst [0] l:(ANDshiftLLreg x y z)) yes no) @@ -21080,10 +20905,9 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMLT) v0 := b.NewValue0(v_0.Pos, OpARMTSTshiftLLreg, types.TypeFlags) v0.AddArg3(x, y, z) - b.AddControl(v0) + b.resetWithControl(BlockARMLT, v0) return true } // match: (LT (CMPconst [0] l:(ANDshiftRLreg x y z)) yes no) @@ -21104,10 +20928,9 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMLT) v0 := b.NewValue0(v_0.Pos, OpARMTSTshiftRLreg, types.TypeFlags) v0.AddArg3(x, y, z) - b.AddControl(v0) + b.resetWithControl(BlockARMLT, v0) return true } // match: (LT (CMPconst [0] l:(ANDshiftRAreg x y z)) yes no) @@ -21128,10 +20951,9 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMLT) v0 := b.NewValue0(v_0.Pos, OpARMTSTshiftRAreg, types.TypeFlags) v0.AddArg3(x, y, z) - b.AddControl(v0) + b.resetWithControl(BlockARMLT, v0) return true } // match: (LT (CMPconst [0] l:(XOR x y)) yes no) @@ -21155,10 +20977,9 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { continue } - b.Reset(BlockARMLT) v0 := b.NewValue0(v_0.Pos, OpARMTEQ, types.TypeFlags) v0.AddArg2(x, y) - b.AddControl(v0) + b.resetWithControl(BlockARMLT, v0) return true } break @@ -21180,11 +21001,10 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMLT) v0 := b.NewValue0(v_0.Pos, OpARMTEQconst, types.TypeFlags) v0.AuxInt = c v0.AddArg(x) - b.AddControl(v0) + b.resetWithControl(BlockARMLT, v0) return true } // match: (LT (CMPconst [0] l:(XORshiftLL x y [c])) yes no) @@ -21205,11 +21025,10 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMLT) v0 := b.NewValue0(v_0.Pos, OpARMTEQshiftLL, types.TypeFlags) v0.AuxInt = c v0.AddArg2(x, y) - b.AddControl(v0) + b.resetWithControl(BlockARMLT, v0) return true } // match: (LT (CMPconst [0] l:(XORshiftRL x y [c])) yes no) @@ -21230,11 +21049,10 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMLT) v0 := b.NewValue0(v_0.Pos, OpARMTEQshiftRL, types.TypeFlags) v0.AuxInt = c v0.AddArg2(x, y) - b.AddControl(v0) + b.resetWithControl(BlockARMLT, v0) return true } // match: (LT (CMPconst [0] l:(XORshiftRA x y [c])) yes no) @@ -21255,11 +21073,10 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMLT) v0 := b.NewValue0(v_0.Pos, OpARMTEQshiftRA, types.TypeFlags) v0.AuxInt = c v0.AddArg2(x, y) - b.AddControl(v0) + b.resetWithControl(BlockARMLT, v0) return true } // match: (LT (CMPconst [0] l:(XORshiftLLreg x y z)) yes no) @@ -21280,10 +21097,9 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMLT) v0 := b.NewValue0(v_0.Pos, OpARMTEQshiftLLreg, types.TypeFlags) v0.AddArg3(x, y, z) - b.AddControl(v0) + b.resetWithControl(BlockARMLT, v0) return true } // match: (LT (CMPconst [0] l:(XORshiftRLreg x y z)) yes no) @@ -21304,10 +21120,9 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMLT) v0 := b.NewValue0(v_0.Pos, OpARMTEQshiftRLreg, types.TypeFlags) v0.AddArg3(x, y, z) - b.AddControl(v0) + b.resetWithControl(BlockARMLT, v0) return true } // match: (LT (CMPconst [0] l:(XORshiftRAreg x y z)) yes no) @@ -21328,10 +21143,9 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMLT) v0 := b.NewValue0(v_0.Pos, OpARMTEQshiftRAreg, types.TypeFlags) v0.AddArg3(x, y, z) - b.AddControl(v0) + b.resetWithControl(BlockARMLT, v0) return true } case BlockARMNE: @@ -21347,8 +21161,7 @@ func rewriteBlockARM(b *Block) bool { break } cc := v_0_0.Args[0] - b.Reset(BlockARMEQ) - b.AddControl(cc) + b.resetWithControl(BlockARMEQ, cc) return true } // match: (NE (CMPconst [0] (NotEqual cc)) yes no) @@ -21363,8 +21176,7 @@ func rewriteBlockARM(b *Block) bool { break } cc := v_0_0.Args[0] - b.Reset(BlockARMNE) - b.AddControl(cc) + b.resetWithControl(BlockARMNE, cc) return true } // match: (NE (CMPconst [0] (LessThan cc)) yes no) @@ -21379,8 +21191,7 @@ func rewriteBlockARM(b *Block) bool { break } cc := v_0_0.Args[0] - b.Reset(BlockARMLT) - b.AddControl(cc) + b.resetWithControl(BlockARMLT, cc) return true } // match: (NE (CMPconst [0] (LessThanU cc)) yes no) @@ -21395,8 +21206,7 @@ func rewriteBlockARM(b *Block) bool { break } cc := v_0_0.Args[0] - b.Reset(BlockARMULT) - b.AddControl(cc) + b.resetWithControl(BlockARMULT, cc) return true } // match: (NE (CMPconst [0] (LessEqual cc)) yes no) @@ -21411,8 +21221,7 @@ func rewriteBlockARM(b *Block) bool { break } cc := v_0_0.Args[0] - b.Reset(BlockARMLE) - b.AddControl(cc) + b.resetWithControl(BlockARMLE, cc) return true } // match: (NE (CMPconst [0] (LessEqualU cc)) yes no) @@ -21427,8 +21236,7 @@ func rewriteBlockARM(b *Block) bool { break } cc := v_0_0.Args[0] - b.Reset(BlockARMULE) - b.AddControl(cc) + b.resetWithControl(BlockARMULE, cc) return true } // match: (NE (CMPconst [0] (GreaterThan cc)) yes no) @@ -21443,8 +21251,7 @@ func rewriteBlockARM(b *Block) bool { break } cc := v_0_0.Args[0] - b.Reset(BlockARMGT) - b.AddControl(cc) + b.resetWithControl(BlockARMGT, cc) return true } // match: (NE (CMPconst [0] (GreaterThanU cc)) yes no) @@ -21459,8 +21266,7 @@ func rewriteBlockARM(b *Block) bool { break } cc := v_0_0.Args[0] - b.Reset(BlockARMUGT) - b.AddControl(cc) + b.resetWithControl(BlockARMUGT, cc) return true } // match: (NE (CMPconst [0] (GreaterEqual cc)) yes no) @@ -21475,8 +21281,7 @@ func rewriteBlockARM(b *Block) bool { break } cc := v_0_0.Args[0] - b.Reset(BlockARMGE) - b.AddControl(cc) + b.resetWithControl(BlockARMGE, cc) return true } // match: (NE (CMPconst [0] (GreaterEqualU cc)) yes no) @@ -21491,8 +21296,7 @@ func rewriteBlockARM(b *Block) bool { break } cc := v_0_0.Args[0] - b.Reset(BlockARMUGE) - b.AddControl(cc) + b.resetWithControl(BlockARMUGE, cc) return true } // match: (NE (FlagEQ) yes no) @@ -21531,8 +21335,7 @@ func rewriteBlockARM(b *Block) bool { for b.Controls[0].Op == OpARMInvertFlags { v_0 := b.Controls[0] cmp := v_0.Args[0] - b.Reset(BlockARMNE) - b.AddControl(cmp) + b.resetWithControl(BlockARMNE, cmp) return true } // match: (NE (CMPconst [0] l:(SUB x y)) yes no) @@ -21552,10 +21355,9 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMNE) v0 := b.NewValue0(v_0.Pos, OpARMCMP, types.TypeFlags) v0.AddArg2(x, y) - b.AddControl(v0) + b.resetWithControl(BlockARMNE, v0) return true } // match: (NE (CMPconst [0] l:(MULS x y a)) yes no) @@ -21576,12 +21378,11 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMNE) v0 := b.NewValue0(v_0.Pos, OpARMCMP, types.TypeFlags) v1 := b.NewValue0(v_0.Pos, OpARMMUL, x.Type) v1.AddArg2(x, y) v0.AddArg2(a, v1) - b.AddControl(v0) + b.resetWithControl(BlockARMNE, v0) return true } // match: (NE (CMPconst [0] l:(SUBconst [c] x)) yes no) @@ -21601,11 +21402,10 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMNE) v0 := b.NewValue0(v_0.Pos, OpARMCMPconst, types.TypeFlags) v0.AuxInt = c v0.AddArg(x) - b.AddControl(v0) + b.resetWithControl(BlockARMNE, v0) return true } // match: (NE (CMPconst [0] l:(SUBshiftLL x y [c])) yes no) @@ -21626,11 +21426,10 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMNE) v0 := b.NewValue0(v_0.Pos, OpARMCMPshiftLL, types.TypeFlags) v0.AuxInt = c v0.AddArg2(x, y) - b.AddControl(v0) + b.resetWithControl(BlockARMNE, v0) return true } // match: (NE (CMPconst [0] l:(SUBshiftRL x y [c])) yes no) @@ -21651,11 +21450,10 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMNE) v0 := b.NewValue0(v_0.Pos, OpARMCMPshiftRL, types.TypeFlags) v0.AuxInt = c v0.AddArg2(x, y) - b.AddControl(v0) + b.resetWithControl(BlockARMNE, v0) return true } // match: (NE (CMPconst [0] l:(SUBshiftRA x y [c])) yes no) @@ -21676,11 +21474,10 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMNE) v0 := b.NewValue0(v_0.Pos, OpARMCMPshiftRA, types.TypeFlags) v0.AuxInt = c v0.AddArg2(x, y) - b.AddControl(v0) + b.resetWithControl(BlockARMNE, v0) return true } // match: (NE (CMPconst [0] l:(SUBshiftLLreg x y z)) yes no) @@ -21701,10 +21498,9 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMNE) v0 := b.NewValue0(v_0.Pos, OpARMCMPshiftLLreg, types.TypeFlags) v0.AddArg3(x, y, z) - b.AddControl(v0) + b.resetWithControl(BlockARMNE, v0) return true } // match: (NE (CMPconst [0] l:(SUBshiftRLreg x y z)) yes no) @@ -21725,10 +21521,9 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMNE) v0 := b.NewValue0(v_0.Pos, OpARMCMPshiftRLreg, types.TypeFlags) v0.AddArg3(x, y, z) - b.AddControl(v0) + b.resetWithControl(BlockARMNE, v0) return true } // match: (NE (CMPconst [0] l:(SUBshiftRAreg x y z)) yes no) @@ -21749,10 +21544,9 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMNE) v0 := b.NewValue0(v_0.Pos, OpARMCMPshiftRAreg, types.TypeFlags) v0.AddArg3(x, y, z) - b.AddControl(v0) + b.resetWithControl(BlockARMNE, v0) return true } // match: (NE (CMPconst [0] l:(ADD x y)) yes no) @@ -21776,10 +21570,9 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { continue } - b.Reset(BlockARMNE) v0 := b.NewValue0(v_0.Pos, OpARMCMN, types.TypeFlags) v0.AddArg2(x, y) - b.AddControl(v0) + b.resetWithControl(BlockARMNE, v0) return true } break @@ -21802,12 +21595,11 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMNE) v0 := b.NewValue0(v_0.Pos, OpARMCMN, types.TypeFlags) v1 := b.NewValue0(v_0.Pos, OpARMMUL, x.Type) v1.AddArg2(x, y) v0.AddArg2(a, v1) - b.AddControl(v0) + b.resetWithControl(BlockARMNE, v0) return true } // match: (NE (CMPconst [0] l:(ADDconst [c] x)) yes no) @@ -21827,11 +21619,10 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMNE) v0 := b.NewValue0(v_0.Pos, OpARMCMNconst, types.TypeFlags) v0.AuxInt = c v0.AddArg(x) - b.AddControl(v0) + b.resetWithControl(BlockARMNE, v0) return true } // match: (NE (CMPconst [0] l:(ADDshiftLL x y [c])) yes no) @@ -21852,11 +21643,10 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMNE) v0 := b.NewValue0(v_0.Pos, OpARMCMNshiftLL, types.TypeFlags) v0.AuxInt = c v0.AddArg2(x, y) - b.AddControl(v0) + b.resetWithControl(BlockARMNE, v0) return true } // match: (NE (CMPconst [0] l:(ADDshiftRL x y [c])) yes no) @@ -21877,11 +21667,10 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMNE) v0 := b.NewValue0(v_0.Pos, OpARMCMNshiftRL, types.TypeFlags) v0.AuxInt = c v0.AddArg2(x, y) - b.AddControl(v0) + b.resetWithControl(BlockARMNE, v0) return true } // match: (NE (CMPconst [0] l:(ADDshiftRA x y [c])) yes no) @@ -21902,11 +21691,10 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMNE) v0 := b.NewValue0(v_0.Pos, OpARMCMNshiftRA, types.TypeFlags) v0.AuxInt = c v0.AddArg2(x, y) - b.AddControl(v0) + b.resetWithControl(BlockARMNE, v0) return true } // match: (NE (CMPconst [0] l:(ADDshiftLLreg x y z)) yes no) @@ -21927,10 +21715,9 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMNE) v0 := b.NewValue0(v_0.Pos, OpARMCMNshiftLLreg, types.TypeFlags) v0.AddArg3(x, y, z) - b.AddControl(v0) + b.resetWithControl(BlockARMNE, v0) return true } // match: (NE (CMPconst [0] l:(ADDshiftRLreg x y z)) yes no) @@ -21951,10 +21738,9 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMNE) v0 := b.NewValue0(v_0.Pos, OpARMCMNshiftRLreg, types.TypeFlags) v0.AddArg3(x, y, z) - b.AddControl(v0) + b.resetWithControl(BlockARMNE, v0) return true } // match: (NE (CMPconst [0] l:(ADDshiftRAreg x y z)) yes no) @@ -21975,10 +21761,9 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMNE) v0 := b.NewValue0(v_0.Pos, OpARMCMNshiftRAreg, types.TypeFlags) v0.AddArg3(x, y, z) - b.AddControl(v0) + b.resetWithControl(BlockARMNE, v0) return true } // match: (NE (CMPconst [0] l:(AND x y)) yes no) @@ -22002,10 +21787,9 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { continue } - b.Reset(BlockARMNE) v0 := b.NewValue0(v_0.Pos, OpARMTST, types.TypeFlags) v0.AddArg2(x, y) - b.AddControl(v0) + b.resetWithControl(BlockARMNE, v0) return true } break @@ -22027,11 +21811,10 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMNE) v0 := b.NewValue0(v_0.Pos, OpARMTSTconst, types.TypeFlags) v0.AuxInt = c v0.AddArg(x) - b.AddControl(v0) + b.resetWithControl(BlockARMNE, v0) return true } // match: (NE (CMPconst [0] l:(ANDshiftLL x y [c])) yes no) @@ -22052,11 +21835,10 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMNE) v0 := b.NewValue0(v_0.Pos, OpARMTSTshiftLL, types.TypeFlags) v0.AuxInt = c v0.AddArg2(x, y) - b.AddControl(v0) + b.resetWithControl(BlockARMNE, v0) return true } // match: (NE (CMPconst [0] l:(ANDshiftRL x y [c])) yes no) @@ -22077,11 +21859,10 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMNE) v0 := b.NewValue0(v_0.Pos, OpARMTSTshiftRL, types.TypeFlags) v0.AuxInt = c v0.AddArg2(x, y) - b.AddControl(v0) + b.resetWithControl(BlockARMNE, v0) return true } // match: (NE (CMPconst [0] l:(ANDshiftRA x y [c])) yes no) @@ -22102,11 +21883,10 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMNE) v0 := b.NewValue0(v_0.Pos, OpARMTSTshiftRA, types.TypeFlags) v0.AuxInt = c v0.AddArg2(x, y) - b.AddControl(v0) + b.resetWithControl(BlockARMNE, v0) return true } // match: (NE (CMPconst [0] l:(ANDshiftLLreg x y z)) yes no) @@ -22127,10 +21907,9 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMNE) v0 := b.NewValue0(v_0.Pos, OpARMTSTshiftLLreg, types.TypeFlags) v0.AddArg3(x, y, z) - b.AddControl(v0) + b.resetWithControl(BlockARMNE, v0) return true } // match: (NE (CMPconst [0] l:(ANDshiftRLreg x y z)) yes no) @@ -22151,10 +21930,9 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMNE) v0 := b.NewValue0(v_0.Pos, OpARMTSTshiftRLreg, types.TypeFlags) v0.AddArg3(x, y, z) - b.AddControl(v0) + b.resetWithControl(BlockARMNE, v0) return true } // match: (NE (CMPconst [0] l:(ANDshiftRAreg x y z)) yes no) @@ -22175,10 +21953,9 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMNE) v0 := b.NewValue0(v_0.Pos, OpARMTSTshiftRAreg, types.TypeFlags) v0.AddArg3(x, y, z) - b.AddControl(v0) + b.resetWithControl(BlockARMNE, v0) return true } // match: (NE (CMPconst [0] l:(XOR x y)) yes no) @@ -22202,10 +21979,9 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { continue } - b.Reset(BlockARMNE) v0 := b.NewValue0(v_0.Pos, OpARMTEQ, types.TypeFlags) v0.AddArg2(x, y) - b.AddControl(v0) + b.resetWithControl(BlockARMNE, v0) return true } break @@ -22227,11 +22003,10 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMNE) v0 := b.NewValue0(v_0.Pos, OpARMTEQconst, types.TypeFlags) v0.AuxInt = c v0.AddArg(x) - b.AddControl(v0) + b.resetWithControl(BlockARMNE, v0) return true } // match: (NE (CMPconst [0] l:(XORshiftLL x y [c])) yes no) @@ -22252,11 +22027,10 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMNE) v0 := b.NewValue0(v_0.Pos, OpARMTEQshiftLL, types.TypeFlags) v0.AuxInt = c v0.AddArg2(x, y) - b.AddControl(v0) + b.resetWithControl(BlockARMNE, v0) return true } // match: (NE (CMPconst [0] l:(XORshiftRL x y [c])) yes no) @@ -22277,11 +22051,10 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMNE) v0 := b.NewValue0(v_0.Pos, OpARMTEQshiftRL, types.TypeFlags) v0.AuxInt = c v0.AddArg2(x, y) - b.AddControl(v0) + b.resetWithControl(BlockARMNE, v0) return true } // match: (NE (CMPconst [0] l:(XORshiftRA x y [c])) yes no) @@ -22302,11 +22075,10 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMNE) v0 := b.NewValue0(v_0.Pos, OpARMTEQshiftRA, types.TypeFlags) v0.AuxInt = c v0.AddArg2(x, y) - b.AddControl(v0) + b.resetWithControl(BlockARMNE, v0) return true } // match: (NE (CMPconst [0] l:(XORshiftLLreg x y z)) yes no) @@ -22327,10 +22099,9 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMNE) v0 := b.NewValue0(v_0.Pos, OpARMTEQshiftLLreg, types.TypeFlags) v0.AddArg3(x, y, z) - b.AddControl(v0) + b.resetWithControl(BlockARMNE, v0) return true } // match: (NE (CMPconst [0] l:(XORshiftRLreg x y z)) yes no) @@ -22351,10 +22122,9 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMNE) v0 := b.NewValue0(v_0.Pos, OpARMTEQshiftRLreg, types.TypeFlags) v0.AddArg3(x, y, z) - b.AddControl(v0) + b.resetWithControl(BlockARMNE, v0) return true } // match: (NE (CMPconst [0] l:(XORshiftRAreg x y z)) yes no) @@ -22375,10 +22145,9 @@ func rewriteBlockARM(b *Block) bool { if !(l.Uses == 1) { break } - b.Reset(BlockARMNE) v0 := b.NewValue0(v_0.Pos, OpARMTEQshiftRAreg, types.TypeFlags) v0.AddArg3(x, y, z) - b.AddControl(v0) + b.resetWithControl(BlockARMNE, v0) return true } case BlockARMUGE: @@ -22419,8 +22188,7 @@ func rewriteBlockARM(b *Block) bool { for b.Controls[0].Op == OpARMInvertFlags { v_0 := b.Controls[0] cmp := v_0.Args[0] - b.Reset(BlockARMULE) - b.AddControl(cmp) + b.resetWithControl(BlockARMULE, cmp) return true } case BlockARMUGT: @@ -22462,8 +22230,7 @@ func rewriteBlockARM(b *Block) bool { for b.Controls[0].Op == OpARMInvertFlags { v_0 := b.Controls[0] cmp := v_0.Args[0] - b.Reset(BlockARMULT) - b.AddControl(cmp) + b.resetWithControl(BlockARMULT, cmp) return true } case BlockARMULE: @@ -22504,8 +22271,7 @@ func rewriteBlockARM(b *Block) bool { for b.Controls[0].Op == OpARMInvertFlags { v_0 := b.Controls[0] cmp := v_0.Args[0] - b.Reset(BlockARMUGE) - b.AddControl(cmp) + b.resetWithControl(BlockARMUGE, cmp) return true } case BlockARMULT: @@ -22547,8 +22313,7 @@ func rewriteBlockARM(b *Block) bool { for b.Controls[0].Op == OpARMInvertFlags { v_0 := b.Controls[0] cmp := v_0.Args[0] - b.Reset(BlockARMUGT) - b.AddControl(cmp) + b.resetWithControl(BlockARMUGT, cmp) return true } } diff --git a/src/cmd/compile/internal/ssa/rewriteARM64.go b/src/cmd/compile/internal/ssa/rewriteARM64.go index a6b13497ac..99beedcea1 100644 --- a/src/cmd/compile/internal/ssa/rewriteARM64.go +++ b/src/cmd/compile/internal/ssa/rewriteARM64.go @@ -25854,11 +25854,10 @@ func rewriteBlockARM64(b *Block) bool { if !(x.Uses == 1) { break } - b.Reset(BlockARM64EQ) v0 := b.NewValue0(v_0.Pos, OpARM64TSTWconst, types.TypeFlags) v0.AuxInt = c v0.AddArg(y) - b.AddControl(v0) + b.resetWithControl(BlockARM64EQ, v0) return true } // match: (EQ (CMPconst [0] z:(AND x y)) yes no) @@ -25882,10 +25881,9 @@ func rewriteBlockARM64(b *Block) bool { if !(z.Uses == 1) { continue } - b.Reset(BlockARM64EQ) v0 := b.NewValue0(v_0.Pos, OpARM64TST, types.TypeFlags) v0.AddArg2(x, y) - b.AddControl(v0) + b.resetWithControl(BlockARM64EQ, v0) return true } break @@ -25911,10 +25909,9 @@ func rewriteBlockARM64(b *Block) bool { if !(z.Uses == 1) { continue } - b.Reset(BlockARM64EQ) v0 := b.NewValue0(v_0.Pos, OpARM64TSTW, types.TypeFlags) v0.AddArg2(x, y) - b.AddControl(v0) + b.resetWithControl(BlockARM64EQ, v0) return true } break @@ -25936,11 +25933,10 @@ func rewriteBlockARM64(b *Block) bool { if !(x.Uses == 1) { break } - b.Reset(BlockARM64EQ) v0 := b.NewValue0(v_0.Pos, OpARM64TSTconst, types.TypeFlags) v0.AuxInt = c v0.AddArg(y) - b.AddControl(v0) + b.resetWithControl(BlockARM64EQ, v0) return true } // match: (EQ (CMPconst [0] x:(ADDconst [c] y)) yes no) @@ -25960,11 +25956,10 @@ func rewriteBlockARM64(b *Block) bool { if !(x.Uses == 1) { break } - b.Reset(BlockARM64EQ) v0 := b.NewValue0(v_0.Pos, OpARM64CMNconst, types.TypeFlags) v0.AuxInt = c v0.AddArg(y) - b.AddControl(v0) + b.resetWithControl(BlockARM64EQ, v0) return true } // match: (EQ (CMPWconst [0] x:(ADDconst [c] y)) yes no) @@ -25984,11 +25979,10 @@ func rewriteBlockARM64(b *Block) bool { if !(x.Uses == 1) { break } - b.Reset(BlockARM64EQ) v0 := b.NewValue0(v_0.Pos, OpARM64CMNWconst, types.TypeFlags) v0.AuxInt = c v0.AddArg(y) - b.AddControl(v0) + b.resetWithControl(BlockARM64EQ, v0) return true } // match: (EQ (CMPconst [0] z:(ADD x y)) yes no) @@ -26012,10 +26006,9 @@ func rewriteBlockARM64(b *Block) bool { if !(z.Uses == 1) { continue } - b.Reset(BlockARM64EQ) v0 := b.NewValue0(v_0.Pos, OpARM64CMN, types.TypeFlags) v0.AddArg2(x, y) - b.AddControl(v0) + b.resetWithControl(BlockARM64EQ, v0) return true } break @@ -26041,10 +26034,9 @@ func rewriteBlockARM64(b *Block) bool { if !(z.Uses == 1) { continue } - b.Reset(BlockARM64EQ) v0 := b.NewValue0(v_0.Pos, OpARM64CMNW, types.TypeFlags) v0.AddArg2(x, y) - b.AddControl(v0) + b.resetWithControl(BlockARM64EQ, v0) return true } break @@ -26064,10 +26056,9 @@ func rewriteBlockARM64(b *Block) bool { if !(z.Uses == 1) { break } - b.Reset(BlockARM64EQ) v0 := b.NewValue0(v_0.Pos, OpARM64CMN, types.TypeFlags) v0.AddArg2(x, y) - b.AddControl(v0) + b.resetWithControl(BlockARM64EQ, v0) return true } // match: (EQ (CMPW x z:(NEG y)) yes no) @@ -26085,10 +26076,9 @@ func rewriteBlockARM64(b *Block) bool { if !(z.Uses == 1) { break } - b.Reset(BlockARM64EQ) v0 := b.NewValue0(v_0.Pos, OpARM64CMNW, types.TypeFlags) v0.AddArg2(x, y) - b.AddControl(v0) + b.resetWithControl(BlockARM64EQ, v0) return true } // match: (EQ (CMPconst [0] x) yes no) @@ -26099,8 +26089,7 @@ func rewriteBlockARM64(b *Block) bool { break } x := v_0.Args[0] - b.Reset(BlockARM64Z) - b.AddControl(x) + b.resetWithControl(BlockARM64Z, x) return true } // match: (EQ (CMPWconst [0] x) yes no) @@ -26111,8 +26100,7 @@ func rewriteBlockARM64(b *Block) bool { break } x := v_0.Args[0] - b.Reset(BlockARM64ZW) - b.AddControl(x) + b.resetWithControl(BlockARM64ZW, x) return true } // match: (EQ (CMPconst [0] z:(MADD a x y)) yes no) @@ -26133,12 +26121,11 @@ func rewriteBlockARM64(b *Block) bool { if !(z.Uses == 1) { break } - b.Reset(BlockARM64EQ) v0 := b.NewValue0(v_0.Pos, OpARM64CMN, types.TypeFlags) v1 := b.NewValue0(v_0.Pos, OpARM64MUL, x.Type) v1.AddArg2(x, y) v0.AddArg2(a, v1) - b.AddControl(v0) + b.resetWithControl(BlockARM64EQ, v0) return true } // match: (EQ (CMPconst [0] z:(MSUB a x y)) yes no) @@ -26159,12 +26146,11 @@ func rewriteBlockARM64(b *Block) bool { if !(z.Uses == 1) { break } - b.Reset(BlockARM64EQ) v0 := b.NewValue0(v_0.Pos, OpARM64CMP, types.TypeFlags) v1 := b.NewValue0(v_0.Pos, OpARM64MUL, x.Type) v1.AddArg2(x, y) v0.AddArg2(a, v1) - b.AddControl(v0) + b.resetWithControl(BlockARM64EQ, v0) return true } // match: (EQ (CMPWconst [0] z:(MADDW a x y)) yes no) @@ -26185,12 +26171,11 @@ func rewriteBlockARM64(b *Block) bool { if !(z.Uses == 1) { break } - b.Reset(BlockARM64EQ) v0 := b.NewValue0(v_0.Pos, OpARM64CMNW, types.TypeFlags) v1 := b.NewValue0(v_0.Pos, OpARM64MULW, x.Type) v1.AddArg2(x, y) v0.AddArg2(a, v1) - b.AddControl(v0) + b.resetWithControl(BlockARM64EQ, v0) return true } // match: (EQ (CMPWconst [0] z:(MSUBW a x y)) yes no) @@ -26211,12 +26196,11 @@ func rewriteBlockARM64(b *Block) bool { if !(z.Uses == 1) { break } - b.Reset(BlockARM64EQ) v0 := b.NewValue0(v_0.Pos, OpARM64CMPW, types.TypeFlags) v1 := b.NewValue0(v_0.Pos, OpARM64MULW, x.Type) v1.AddArg2(x, y) v0.AddArg2(a, v1) - b.AddControl(v0) + b.resetWithControl(BlockARM64EQ, v0) return true } // match: (EQ (TSTconst [c] x) yes no) @@ -26229,8 +26213,7 @@ func rewriteBlockARM64(b *Block) bool { if !(oneBit(c)) { break } - b.Reset(BlockARM64TBZ) - b.AddControl(x) + b.resetWithControl(BlockARM64TBZ, x) b.Aux = ntz(c) return true } @@ -26244,8 +26227,7 @@ func rewriteBlockARM64(b *Block) bool { if !(oneBit(int64(uint32(c)))) { break } - b.Reset(BlockARM64TBZ) - b.AddControl(x) + b.resetWithControl(BlockARM64TBZ, x) b.Aux = ntz(int64(uint32(c))) return true } @@ -26288,8 +26270,7 @@ func rewriteBlockARM64(b *Block) bool { for b.Controls[0].Op == OpARM64InvertFlags { v_0 := b.Controls[0] cmp := v_0.Args[0] - b.Reset(BlockARM64EQ) - b.AddControl(cmp) + b.resetWithControl(BlockARM64EQ, cmp) return true } case BlockARM64FGE: @@ -26298,8 +26279,7 @@ func rewriteBlockARM64(b *Block) bool { for b.Controls[0].Op == OpARM64InvertFlags { v_0 := b.Controls[0] cmp := v_0.Args[0] - b.Reset(BlockARM64FLE) - b.AddControl(cmp) + b.resetWithControl(BlockARM64FLE, cmp) return true } case BlockARM64FGT: @@ -26308,8 +26288,7 @@ func rewriteBlockARM64(b *Block) bool { for b.Controls[0].Op == OpARM64InvertFlags { v_0 := b.Controls[0] cmp := v_0.Args[0] - b.Reset(BlockARM64FLT) - b.AddControl(cmp) + b.resetWithControl(BlockARM64FLT, cmp) return true } case BlockARM64FLE: @@ -26318,8 +26297,7 @@ func rewriteBlockARM64(b *Block) bool { for b.Controls[0].Op == OpARM64InvertFlags { v_0 := b.Controls[0] cmp := v_0.Args[0] - b.Reset(BlockARM64FGE) - b.AddControl(cmp) + b.resetWithControl(BlockARM64FGE, cmp) return true } case BlockARM64FLT: @@ -26328,8 +26306,7 @@ func rewriteBlockARM64(b *Block) bool { for b.Controls[0].Op == OpARM64InvertFlags { v_0 := b.Controls[0] cmp := v_0.Args[0] - b.Reset(BlockARM64FGT) - b.AddControl(cmp) + b.resetWithControl(BlockARM64FGT, cmp) return true } case BlockARM64GE: @@ -26350,11 +26327,10 @@ func rewriteBlockARM64(b *Block) bool { if !(x.Uses == 1) { break } - b.Reset(BlockARM64GE) v0 := b.NewValue0(v_0.Pos, OpARM64TSTWconst, types.TypeFlags) v0.AuxInt = c v0.AddArg(y) - b.AddControl(v0) + b.resetWithControl(BlockARM64GE, v0) return true } // match: (GE (CMPconst [0] z:(AND x y)) yes no) @@ -26378,10 +26354,9 @@ func rewriteBlockARM64(b *Block) bool { if !(z.Uses == 1) { continue } - b.Reset(BlockARM64GE) v0 := b.NewValue0(v_0.Pos, OpARM64TST, types.TypeFlags) v0.AddArg2(x, y) - b.AddControl(v0) + b.resetWithControl(BlockARM64GE, v0) return true } break @@ -26407,10 +26382,9 @@ func rewriteBlockARM64(b *Block) bool { if !(z.Uses == 1) { continue } - b.Reset(BlockARM64GE) v0 := b.NewValue0(v_0.Pos, OpARM64TSTW, types.TypeFlags) v0.AddArg2(x, y) - b.AddControl(v0) + b.resetWithControl(BlockARM64GE, v0) return true } break @@ -26432,11 +26406,10 @@ func rewriteBlockARM64(b *Block) bool { if !(x.Uses == 1) { break } - b.Reset(BlockARM64GE) v0 := b.NewValue0(v_0.Pos, OpARM64TSTconst, types.TypeFlags) v0.AuxInt = c v0.AddArg(y) - b.AddControl(v0) + b.resetWithControl(BlockARM64GE, v0) return true } // match: (GE (CMPconst [0] x:(ADDconst [c] y)) yes no) @@ -26456,11 +26429,10 @@ func rewriteBlockARM64(b *Block) bool { if !(x.Uses == 1) { break } - b.Reset(BlockARM64GE) v0 := b.NewValue0(v_0.Pos, OpARM64CMNconst, types.TypeFlags) v0.AuxInt = c v0.AddArg(y) - b.AddControl(v0) + b.resetWithControl(BlockARM64GE, v0) return true } // match: (GE (CMPWconst [0] x:(ADDconst [c] y)) yes no) @@ -26480,11 +26452,10 @@ func rewriteBlockARM64(b *Block) bool { if !(x.Uses == 1) { break } - b.Reset(BlockARM64GE) v0 := b.NewValue0(v_0.Pos, OpARM64CMNWconst, types.TypeFlags) v0.AuxInt = c v0.AddArg(y) - b.AddControl(v0) + b.resetWithControl(BlockARM64GE, v0) return true } // match: (GE (CMPconst [0] z:(ADD x y)) yes no) @@ -26508,10 +26479,9 @@ func rewriteBlockARM64(b *Block) bool { if !(z.Uses == 1) { continue } - b.Reset(BlockARM64GE) v0 := b.NewValue0(v_0.Pos, OpARM64CMN, types.TypeFlags) v0.AddArg2(x, y) - b.AddControl(v0) + b.resetWithControl(BlockARM64GE, v0) return true } break @@ -26537,10 +26507,9 @@ func rewriteBlockARM64(b *Block) bool { if !(z.Uses == 1) { continue } - b.Reset(BlockARM64GE) v0 := b.NewValue0(v_0.Pos, OpARM64CMNW, types.TypeFlags) v0.AddArg2(x, y) - b.AddControl(v0) + b.resetWithControl(BlockARM64GE, v0) return true } break @@ -26560,10 +26529,9 @@ func rewriteBlockARM64(b *Block) bool { if !(z.Uses == 1) { break } - b.Reset(BlockARM64GE) v0 := b.NewValue0(v_0.Pos, OpARM64CMN, types.TypeFlags) v0.AddArg2(x, y) - b.AddControl(v0) + b.resetWithControl(BlockARM64GE, v0) return true } // match: (GE (CMPW x z:(NEG y)) yes no) @@ -26581,10 +26549,9 @@ func rewriteBlockARM64(b *Block) bool { if !(z.Uses == 1) { break } - b.Reset(BlockARM64GE) v0 := b.NewValue0(v_0.Pos, OpARM64CMNW, types.TypeFlags) v0.AddArg2(x, y) - b.AddControl(v0) + b.resetWithControl(BlockARM64GE, v0) return true } // match: (GE (CMPconst [0] z:(MADD a x y)) yes no) @@ -26605,12 +26572,11 @@ func rewriteBlockARM64(b *Block) bool { if !(z.Uses == 1) { break } - b.Reset(BlockARM64GE) v0 := b.NewValue0(v_0.Pos, OpARM64CMN, types.TypeFlags) v1 := b.NewValue0(v_0.Pos, OpARM64MUL, x.Type) v1.AddArg2(x, y) v0.AddArg2(a, v1) - b.AddControl(v0) + b.resetWithControl(BlockARM64GE, v0) return true } // match: (GE (CMPconst [0] z:(MSUB a x y)) yes no) @@ -26631,12 +26597,11 @@ func rewriteBlockARM64(b *Block) bool { if !(z.Uses == 1) { break } - b.Reset(BlockARM64GE) v0 := b.NewValue0(v_0.Pos, OpARM64CMP, types.TypeFlags) v1 := b.NewValue0(v_0.Pos, OpARM64MUL, x.Type) v1.AddArg2(x, y) v0.AddArg2(a, v1) - b.AddControl(v0) + b.resetWithControl(BlockARM64GE, v0) return true } // match: (GE (CMPWconst [0] z:(MADDW a x y)) yes no) @@ -26657,12 +26622,11 @@ func rewriteBlockARM64(b *Block) bool { if !(z.Uses == 1) { break } - b.Reset(BlockARM64GE) v0 := b.NewValue0(v_0.Pos, OpARM64CMNW, types.TypeFlags) v1 := b.NewValue0(v_0.Pos, OpARM64MULW, x.Type) v1.AddArg2(x, y) v0.AddArg2(a, v1) - b.AddControl(v0) + b.resetWithControl(BlockARM64GE, v0) return true } // match: (GE (CMPWconst [0] z:(MSUBW a x y)) yes no) @@ -26683,12 +26647,11 @@ func rewriteBlockARM64(b *Block) bool { if !(z.Uses == 1) { break } - b.Reset(BlockARM64GE) v0 := b.NewValue0(v_0.Pos, OpARM64CMPW, types.TypeFlags) v1 := b.NewValue0(v_0.Pos, OpARM64MULW, x.Type) v1.AddArg2(x, y) v0.AddArg2(a, v1) - b.AddControl(v0) + b.resetWithControl(BlockARM64GE, v0) return true } // match: (GE (CMPWconst [0] x) yes no) @@ -26699,8 +26662,7 @@ func rewriteBlockARM64(b *Block) bool { break } x := v_0.Args[0] - b.Reset(BlockARM64TBZ) - b.AddControl(x) + b.resetWithControl(BlockARM64TBZ, x) b.Aux = int64(31) return true } @@ -26712,8 +26674,7 @@ func rewriteBlockARM64(b *Block) bool { break } x := v_0.Args[0] - b.Reset(BlockARM64TBZ) - b.AddControl(x) + b.resetWithControl(BlockARM64TBZ, x) b.Aux = int64(63) return true } @@ -26754,8 +26715,7 @@ func rewriteBlockARM64(b *Block) bool { for b.Controls[0].Op == OpARM64InvertFlags { v_0 := b.Controls[0] cmp := v_0.Args[0] - b.Reset(BlockARM64LE) - b.AddControl(cmp) + b.resetWithControl(BlockARM64LE, cmp) return true } case BlockARM64GT: @@ -26776,11 +26736,10 @@ func rewriteBlockARM64(b *Block) bool { if !(x.Uses == 1) { break } - b.Reset(BlockARM64GT) v0 := b.NewValue0(v_0.Pos, OpARM64TSTWconst, types.TypeFlags) v0.AuxInt = c v0.AddArg(y) - b.AddControl(v0) + b.resetWithControl(BlockARM64GT, v0) return true } // match: (GT (CMPconst [0] z:(AND x y)) yes no) @@ -26804,10 +26763,9 @@ func rewriteBlockARM64(b *Block) bool { if !(z.Uses == 1) { continue } - b.Reset(BlockARM64GT) v0 := b.NewValue0(v_0.Pos, OpARM64TST, types.TypeFlags) v0.AddArg2(x, y) - b.AddControl(v0) + b.resetWithControl(BlockARM64GT, v0) return true } break @@ -26833,10 +26791,9 @@ func rewriteBlockARM64(b *Block) bool { if !(z.Uses == 1) { continue } - b.Reset(BlockARM64GT) v0 := b.NewValue0(v_0.Pos, OpARM64TSTW, types.TypeFlags) v0.AddArg2(x, y) - b.AddControl(v0) + b.resetWithControl(BlockARM64GT, v0) return true } break @@ -26858,11 +26815,10 @@ func rewriteBlockARM64(b *Block) bool { if !(x.Uses == 1) { break } - b.Reset(BlockARM64GT) v0 := b.NewValue0(v_0.Pos, OpARM64TSTconst, types.TypeFlags) v0.AuxInt = c v0.AddArg(y) - b.AddControl(v0) + b.resetWithControl(BlockARM64GT, v0) return true } // match: (GT (CMPconst [0] x:(ADDconst [c] y)) yes no) @@ -26882,11 +26838,10 @@ func rewriteBlockARM64(b *Block) bool { if !(x.Uses == 1) { break } - b.Reset(BlockARM64GT) v0 := b.NewValue0(v_0.Pos, OpARM64CMNconst, types.TypeFlags) v0.AuxInt = c v0.AddArg(y) - b.AddControl(v0) + b.resetWithControl(BlockARM64GT, v0) return true } // match: (GT (CMPWconst [0] x:(ADDconst [c] y)) yes no) @@ -26906,11 +26861,10 @@ func rewriteBlockARM64(b *Block) bool { if !(x.Uses == 1) { break } - b.Reset(BlockARM64GT) v0 := b.NewValue0(v_0.Pos, OpARM64CMNWconst, types.TypeFlags) v0.AuxInt = c v0.AddArg(y) - b.AddControl(v0) + b.resetWithControl(BlockARM64GT, v0) return true } // match: (GT (CMPconst [0] z:(ADD x y)) yes no) @@ -26934,10 +26888,9 @@ func rewriteBlockARM64(b *Block) bool { if !(z.Uses == 1) { continue } - b.Reset(BlockARM64GT) v0 := b.NewValue0(v_0.Pos, OpARM64CMN, types.TypeFlags) v0.AddArg2(x, y) - b.AddControl(v0) + b.resetWithControl(BlockARM64GT, v0) return true } break @@ -26963,10 +26916,9 @@ func rewriteBlockARM64(b *Block) bool { if !(z.Uses == 1) { continue } - b.Reset(BlockARM64GT) v0 := b.NewValue0(v_0.Pos, OpARM64CMNW, types.TypeFlags) v0.AddArg2(x, y) - b.AddControl(v0) + b.resetWithControl(BlockARM64GT, v0) return true } break @@ -26986,10 +26938,9 @@ func rewriteBlockARM64(b *Block) bool { if !(z.Uses == 1) { break } - b.Reset(BlockARM64GT) v0 := b.NewValue0(v_0.Pos, OpARM64CMN, types.TypeFlags) v0.AddArg2(x, y) - b.AddControl(v0) + b.resetWithControl(BlockARM64GT, v0) return true } // match: (GT (CMPW x z:(NEG y)) yes no) @@ -27007,10 +26958,9 @@ func rewriteBlockARM64(b *Block) bool { if !(z.Uses == 1) { break } - b.Reset(BlockARM64GT) v0 := b.NewValue0(v_0.Pos, OpARM64CMNW, types.TypeFlags) v0.AddArg2(x, y) - b.AddControl(v0) + b.resetWithControl(BlockARM64GT, v0) return true } // match: (GT (CMPconst [0] z:(MADD a x y)) yes no) @@ -27031,12 +26981,11 @@ func rewriteBlockARM64(b *Block) bool { if !(z.Uses == 1) { break } - b.Reset(BlockARM64GT) v0 := b.NewValue0(v_0.Pos, OpARM64CMN, types.TypeFlags) v1 := b.NewValue0(v_0.Pos, OpARM64MUL, x.Type) v1.AddArg2(x, y) v0.AddArg2(a, v1) - b.AddControl(v0) + b.resetWithControl(BlockARM64GT, v0) return true } // match: (GT (CMPconst [0] z:(MSUB a x y)) yes no) @@ -27057,12 +27006,11 @@ func rewriteBlockARM64(b *Block) bool { if !(z.Uses == 1) { break } - b.Reset(BlockARM64GT) v0 := b.NewValue0(v_0.Pos, OpARM64CMP, types.TypeFlags) v1 := b.NewValue0(v_0.Pos, OpARM64MUL, x.Type) v1.AddArg2(x, y) v0.AddArg2(a, v1) - b.AddControl(v0) + b.resetWithControl(BlockARM64GT, v0) return true } // match: (GT (CMPWconst [0] z:(MADDW a x y)) yes no) @@ -27083,12 +27031,11 @@ func rewriteBlockARM64(b *Block) bool { if !(z.Uses == 1) { break } - b.Reset(BlockARM64GT) v0 := b.NewValue0(v_0.Pos, OpARM64CMNW, types.TypeFlags) v1 := b.NewValue0(v_0.Pos, OpARM64MULW, x.Type) v1.AddArg2(x, y) v0.AddArg2(a, v1) - b.AddControl(v0) + b.resetWithControl(BlockARM64GT, v0) return true } // match: (GT (CMPWconst [0] z:(MSUBW a x y)) yes no) @@ -27109,12 +27056,11 @@ func rewriteBlockARM64(b *Block) bool { if !(z.Uses == 1) { break } - b.Reset(BlockARM64GT) v0 := b.NewValue0(v_0.Pos, OpARM64CMPW, types.TypeFlags) v1 := b.NewValue0(v_0.Pos, OpARM64MULW, x.Type) v1.AddArg2(x, y) v0.AddArg2(a, v1) - b.AddControl(v0) + b.resetWithControl(BlockARM64GT, v0) return true } // match: (GT (FlagEQ) yes no) @@ -27155,8 +27101,7 @@ func rewriteBlockARM64(b *Block) bool { for b.Controls[0].Op == OpARM64InvertFlags { v_0 := b.Controls[0] cmp := v_0.Args[0] - b.Reset(BlockARM64LT) - b.AddControl(cmp) + b.resetWithControl(BlockARM64LT, cmp) return true } case BlockIf: @@ -27165,8 +27110,7 @@ func rewriteBlockARM64(b *Block) bool { for b.Controls[0].Op == OpARM64Equal { v_0 := b.Controls[0] cc := v_0.Args[0] - b.Reset(BlockARM64EQ) - b.AddControl(cc) + b.resetWithControl(BlockARM64EQ, cc) return true } // match: (If (NotEqual cc) yes no) @@ -27174,8 +27118,7 @@ func rewriteBlockARM64(b *Block) bool { for b.Controls[0].Op == OpARM64NotEqual { v_0 := b.Controls[0] cc := v_0.Args[0] - b.Reset(BlockARM64NE) - b.AddControl(cc) + b.resetWithControl(BlockARM64NE, cc) return true } // match: (If (LessThan cc) yes no) @@ -27183,8 +27126,7 @@ func rewriteBlockARM64(b *Block) bool { for b.Controls[0].Op == OpARM64LessThan { v_0 := b.Controls[0] cc := v_0.Args[0] - b.Reset(BlockARM64LT) - b.AddControl(cc) + b.resetWithControl(BlockARM64LT, cc) return true } // match: (If (LessThanU cc) yes no) @@ -27192,8 +27134,7 @@ func rewriteBlockARM64(b *Block) bool { for b.Controls[0].Op == OpARM64LessThanU { v_0 := b.Controls[0] cc := v_0.Args[0] - b.Reset(BlockARM64ULT) - b.AddControl(cc) + b.resetWithControl(BlockARM64ULT, cc) return true } // match: (If (LessEqual cc) yes no) @@ -27201,8 +27142,7 @@ func rewriteBlockARM64(b *Block) bool { for b.Controls[0].Op == OpARM64LessEqual { v_0 := b.Controls[0] cc := v_0.Args[0] - b.Reset(BlockARM64LE) - b.AddControl(cc) + b.resetWithControl(BlockARM64LE, cc) return true } // match: (If (LessEqualU cc) yes no) @@ -27210,8 +27150,7 @@ func rewriteBlockARM64(b *Block) bool { for b.Controls[0].Op == OpARM64LessEqualU { v_0 := b.Controls[0] cc := v_0.Args[0] - b.Reset(BlockARM64ULE) - b.AddControl(cc) + b.resetWithControl(BlockARM64ULE, cc) return true } // match: (If (GreaterThan cc) yes no) @@ -27219,8 +27158,7 @@ func rewriteBlockARM64(b *Block) bool { for b.Controls[0].Op == OpARM64GreaterThan { v_0 := b.Controls[0] cc := v_0.Args[0] - b.Reset(BlockARM64GT) - b.AddControl(cc) + b.resetWithControl(BlockARM64GT, cc) return true } // match: (If (GreaterThanU cc) yes no) @@ -27228,8 +27166,7 @@ func rewriteBlockARM64(b *Block) bool { for b.Controls[0].Op == OpARM64GreaterThanU { v_0 := b.Controls[0] cc := v_0.Args[0] - b.Reset(BlockARM64UGT) - b.AddControl(cc) + b.resetWithControl(BlockARM64UGT, cc) return true } // match: (If (GreaterEqual cc) yes no) @@ -27237,8 +27174,7 @@ func rewriteBlockARM64(b *Block) bool { for b.Controls[0].Op == OpARM64GreaterEqual { v_0 := b.Controls[0] cc := v_0.Args[0] - b.Reset(BlockARM64GE) - b.AddControl(cc) + b.resetWithControl(BlockARM64GE, cc) return true } // match: (If (GreaterEqualU cc) yes no) @@ -27246,8 +27182,7 @@ func rewriteBlockARM64(b *Block) bool { for b.Controls[0].Op == OpARM64GreaterEqualU { v_0 := b.Controls[0] cc := v_0.Args[0] - b.Reset(BlockARM64UGE) - b.AddControl(cc) + b.resetWithControl(BlockARM64UGE, cc) return true } // match: (If (LessThanF cc) yes no) @@ -27255,8 +27190,7 @@ func rewriteBlockARM64(b *Block) bool { for b.Controls[0].Op == OpARM64LessThanF { v_0 := b.Controls[0] cc := v_0.Args[0] - b.Reset(BlockARM64FLT) - b.AddControl(cc) + b.resetWithControl(BlockARM64FLT, cc) return true } // match: (If (LessEqualF cc) yes no) @@ -27264,8 +27198,7 @@ func rewriteBlockARM64(b *Block) bool { for b.Controls[0].Op == OpARM64LessEqualF { v_0 := b.Controls[0] cc := v_0.Args[0] - b.Reset(BlockARM64FLE) - b.AddControl(cc) + b.resetWithControl(BlockARM64FLE, cc) return true } // match: (If (GreaterThanF cc) yes no) @@ -27273,8 +27206,7 @@ func rewriteBlockARM64(b *Block) bool { for b.Controls[0].Op == OpARM64GreaterThanF { v_0 := b.Controls[0] cc := v_0.Args[0] - b.Reset(BlockARM64FGT) - b.AddControl(cc) + b.resetWithControl(BlockARM64FGT, cc) return true } // match: (If (GreaterEqualF cc) yes no) @@ -27282,16 +27214,14 @@ func rewriteBlockARM64(b *Block) bool { for b.Controls[0].Op == OpARM64GreaterEqualF { v_0 := b.Controls[0] cc := v_0.Args[0] - b.Reset(BlockARM64FGE) - b.AddControl(cc) + b.resetWithControl(BlockARM64FGE, cc) return true } // match: (If cond yes no) // result: (NZ cond yes no) for { cond := b.Controls[0] - b.Reset(BlockARM64NZ) - b.AddControl(cond) + b.resetWithControl(BlockARM64NZ, cond) return true } case BlockARM64LE: @@ -27312,11 +27242,10 @@ func rewriteBlockARM64(b *Block) bool { if !(x.Uses == 1) { break } - b.Reset(BlockARM64LE) v0 := b.NewValue0(v_0.Pos, OpARM64TSTWconst, types.TypeFlags) v0.AuxInt = c v0.AddArg(y) - b.AddControl(v0) + b.resetWithControl(BlockARM64LE, v0) return true } // match: (LE (CMPconst [0] z:(AND x y)) yes no) @@ -27340,10 +27269,9 @@ func rewriteBlockARM64(b *Block) bool { if !(z.Uses == 1) { continue } - b.Reset(BlockARM64LE) v0 := b.NewValue0(v_0.Pos, OpARM64TST, types.TypeFlags) v0.AddArg2(x, y) - b.AddControl(v0) + b.resetWithControl(BlockARM64LE, v0) return true } break @@ -27369,10 +27297,9 @@ func rewriteBlockARM64(b *Block) bool { if !(z.Uses == 1) { continue } - b.Reset(BlockARM64LE) v0 := b.NewValue0(v_0.Pos, OpARM64TSTW, types.TypeFlags) v0.AddArg2(x, y) - b.AddControl(v0) + b.resetWithControl(BlockARM64LE, v0) return true } break @@ -27394,11 +27321,10 @@ func rewriteBlockARM64(b *Block) bool { if !(x.Uses == 1) { break } - b.Reset(BlockARM64LE) v0 := b.NewValue0(v_0.Pos, OpARM64TSTconst, types.TypeFlags) v0.AuxInt = c v0.AddArg(y) - b.AddControl(v0) + b.resetWithControl(BlockARM64LE, v0) return true } // match: (LE (CMPconst [0] x:(ADDconst [c] y)) yes no) @@ -27418,11 +27344,10 @@ func rewriteBlockARM64(b *Block) bool { if !(x.Uses == 1) { break } - b.Reset(BlockARM64LE) v0 := b.NewValue0(v_0.Pos, OpARM64CMNconst, types.TypeFlags) v0.AuxInt = c v0.AddArg(y) - b.AddControl(v0) + b.resetWithControl(BlockARM64LE, v0) return true } // match: (LE (CMPWconst [0] x:(ADDconst [c] y)) yes no) @@ -27442,11 +27367,10 @@ func rewriteBlockARM64(b *Block) bool { if !(x.Uses == 1) { break } - b.Reset(BlockARM64LE) v0 := b.NewValue0(v_0.Pos, OpARM64CMNWconst, types.TypeFlags) v0.AuxInt = c v0.AddArg(y) - b.AddControl(v0) + b.resetWithControl(BlockARM64LE, v0) return true } // match: (LE (CMPconst [0] z:(ADD x y)) yes no) @@ -27470,10 +27394,9 @@ func rewriteBlockARM64(b *Block) bool { if !(z.Uses == 1) { continue } - b.Reset(BlockARM64LE) v0 := b.NewValue0(v_0.Pos, OpARM64CMN, types.TypeFlags) v0.AddArg2(x, y) - b.AddControl(v0) + b.resetWithControl(BlockARM64LE, v0) return true } break @@ -27499,10 +27422,9 @@ func rewriteBlockARM64(b *Block) bool { if !(z.Uses == 1) { continue } - b.Reset(BlockARM64LE) v0 := b.NewValue0(v_0.Pos, OpARM64CMNW, types.TypeFlags) v0.AddArg2(x, y) - b.AddControl(v0) + b.resetWithControl(BlockARM64LE, v0) return true } break @@ -27522,10 +27444,9 @@ func rewriteBlockARM64(b *Block) bool { if !(z.Uses == 1) { break } - b.Reset(BlockARM64LE) v0 := b.NewValue0(v_0.Pos, OpARM64CMN, types.TypeFlags) v0.AddArg2(x, y) - b.AddControl(v0) + b.resetWithControl(BlockARM64LE, v0) return true } // match: (LE (CMPW x z:(NEG y)) yes no) @@ -27543,10 +27464,9 @@ func rewriteBlockARM64(b *Block) bool { if !(z.Uses == 1) { break } - b.Reset(BlockARM64LE) v0 := b.NewValue0(v_0.Pos, OpARM64CMNW, types.TypeFlags) v0.AddArg2(x, y) - b.AddControl(v0) + b.resetWithControl(BlockARM64LE, v0) return true } // match: (LE (CMPconst [0] z:(MADD a x y)) yes no) @@ -27567,12 +27487,11 @@ func rewriteBlockARM64(b *Block) bool { if !(z.Uses == 1) { break } - b.Reset(BlockARM64LE) v0 := b.NewValue0(v_0.Pos, OpARM64CMN, types.TypeFlags) v1 := b.NewValue0(v_0.Pos, OpARM64MUL, x.Type) v1.AddArg2(x, y) v0.AddArg2(a, v1) - b.AddControl(v0) + b.resetWithControl(BlockARM64LE, v0) return true } // match: (LE (CMPconst [0] z:(MSUB a x y)) yes no) @@ -27593,12 +27512,11 @@ func rewriteBlockARM64(b *Block) bool { if !(z.Uses == 1) { break } - b.Reset(BlockARM64LE) v0 := b.NewValue0(v_0.Pos, OpARM64CMP, types.TypeFlags) v1 := b.NewValue0(v_0.Pos, OpARM64MUL, x.Type) v1.AddArg2(x, y) v0.AddArg2(a, v1) - b.AddControl(v0) + b.resetWithControl(BlockARM64LE, v0) return true } // match: (LE (CMPWconst [0] z:(MADDW a x y)) yes no) @@ -27619,12 +27537,11 @@ func rewriteBlockARM64(b *Block) bool { if !(z.Uses == 1) { break } - b.Reset(BlockARM64LE) v0 := b.NewValue0(v_0.Pos, OpARM64CMNW, types.TypeFlags) v1 := b.NewValue0(v_0.Pos, OpARM64MULW, x.Type) v1.AddArg2(x, y) v0.AddArg2(a, v1) - b.AddControl(v0) + b.resetWithControl(BlockARM64LE, v0) return true } // match: (LE (CMPWconst [0] z:(MSUBW a x y)) yes no) @@ -27645,12 +27562,11 @@ func rewriteBlockARM64(b *Block) bool { if !(z.Uses == 1) { break } - b.Reset(BlockARM64LE) v0 := b.NewValue0(v_0.Pos, OpARM64CMPW, types.TypeFlags) v1 := b.NewValue0(v_0.Pos, OpARM64MULW, x.Type) v1.AddArg2(x, y) v0.AddArg2(a, v1) - b.AddControl(v0) + b.resetWithControl(BlockARM64LE, v0) return true } // match: (LE (FlagEQ) yes no) @@ -27690,8 +27606,7 @@ func rewriteBlockARM64(b *Block) bool { for b.Controls[0].Op == OpARM64InvertFlags { v_0 := b.Controls[0] cmp := v_0.Args[0] - b.Reset(BlockARM64GE) - b.AddControl(cmp) + b.resetWithControl(BlockARM64GE, cmp) return true } case BlockARM64LT: @@ -27712,11 +27627,10 @@ func rewriteBlockARM64(b *Block) bool { if !(x.Uses == 1) { break } - b.Reset(BlockARM64LT) v0 := b.NewValue0(v_0.Pos, OpARM64TSTWconst, types.TypeFlags) v0.AuxInt = c v0.AddArg(y) - b.AddControl(v0) + b.resetWithControl(BlockARM64LT, v0) return true } // match: (LT (CMPconst [0] z:(AND x y)) yes no) @@ -27740,10 +27654,9 @@ func rewriteBlockARM64(b *Block) bool { if !(z.Uses == 1) { continue } - b.Reset(BlockARM64LT) v0 := b.NewValue0(v_0.Pos, OpARM64TST, types.TypeFlags) v0.AddArg2(x, y) - b.AddControl(v0) + b.resetWithControl(BlockARM64LT, v0) return true } break @@ -27769,10 +27682,9 @@ func rewriteBlockARM64(b *Block) bool { if !(z.Uses == 1) { continue } - b.Reset(BlockARM64LT) v0 := b.NewValue0(v_0.Pos, OpARM64TSTW, types.TypeFlags) v0.AddArg2(x, y) - b.AddControl(v0) + b.resetWithControl(BlockARM64LT, v0) return true } break @@ -27794,11 +27706,10 @@ func rewriteBlockARM64(b *Block) bool { if !(x.Uses == 1) { break } - b.Reset(BlockARM64LT) v0 := b.NewValue0(v_0.Pos, OpARM64TSTconst, types.TypeFlags) v0.AuxInt = c v0.AddArg(y) - b.AddControl(v0) + b.resetWithControl(BlockARM64LT, v0) return true } // match: (LT (CMPconst [0] x:(ADDconst [c] y)) yes no) @@ -27818,11 +27729,10 @@ func rewriteBlockARM64(b *Block) bool { if !(x.Uses == 1) { break } - b.Reset(BlockARM64LT) v0 := b.NewValue0(v_0.Pos, OpARM64CMNconst, types.TypeFlags) v0.AuxInt = c v0.AddArg(y) - b.AddControl(v0) + b.resetWithControl(BlockARM64LT, v0) return true } // match: (LT (CMPWconst [0] x:(ADDconst [c] y)) yes no) @@ -27842,11 +27752,10 @@ func rewriteBlockARM64(b *Block) bool { if !(x.Uses == 1) { break } - b.Reset(BlockARM64LT) v0 := b.NewValue0(v_0.Pos, OpARM64CMNWconst, types.TypeFlags) v0.AuxInt = c v0.AddArg(y) - b.AddControl(v0) + b.resetWithControl(BlockARM64LT, v0) return true } // match: (LT (CMPconst [0] z:(ADD x y)) yes no) @@ -27870,10 +27779,9 @@ func rewriteBlockARM64(b *Block) bool { if !(z.Uses == 1) { continue } - b.Reset(BlockARM64LT) v0 := b.NewValue0(v_0.Pos, OpARM64CMN, types.TypeFlags) v0.AddArg2(x, y) - b.AddControl(v0) + b.resetWithControl(BlockARM64LT, v0) return true } break @@ -27899,10 +27807,9 @@ func rewriteBlockARM64(b *Block) bool { if !(z.Uses == 1) { continue } - b.Reset(BlockARM64LT) v0 := b.NewValue0(v_0.Pos, OpARM64CMNW, types.TypeFlags) v0.AddArg2(x, y) - b.AddControl(v0) + b.resetWithControl(BlockARM64LT, v0) return true } break @@ -27922,10 +27829,9 @@ func rewriteBlockARM64(b *Block) bool { if !(z.Uses == 1) { break } - b.Reset(BlockARM64LT) v0 := b.NewValue0(v_0.Pos, OpARM64CMN, types.TypeFlags) v0.AddArg2(x, y) - b.AddControl(v0) + b.resetWithControl(BlockARM64LT, v0) return true } // match: (LT (CMPW x z:(NEG y)) yes no) @@ -27943,10 +27849,9 @@ func rewriteBlockARM64(b *Block) bool { if !(z.Uses == 1) { break } - b.Reset(BlockARM64LT) v0 := b.NewValue0(v_0.Pos, OpARM64CMNW, types.TypeFlags) v0.AddArg2(x, y) - b.AddControl(v0) + b.resetWithControl(BlockARM64LT, v0) return true } // match: (LT (CMPconst [0] z:(MADD a x y)) yes no) @@ -27967,12 +27872,11 @@ func rewriteBlockARM64(b *Block) bool { if !(z.Uses == 1) { break } - b.Reset(BlockARM64LT) v0 := b.NewValue0(v_0.Pos, OpARM64CMN, types.TypeFlags) v1 := b.NewValue0(v_0.Pos, OpARM64MUL, x.Type) v1.AddArg2(x, y) v0.AddArg2(a, v1) - b.AddControl(v0) + b.resetWithControl(BlockARM64LT, v0) return true } // match: (LT (CMPconst [0] z:(MSUB a x y)) yes no) @@ -27993,12 +27897,11 @@ func rewriteBlockARM64(b *Block) bool { if !(z.Uses == 1) { break } - b.Reset(BlockARM64LT) v0 := b.NewValue0(v_0.Pos, OpARM64CMP, types.TypeFlags) v1 := b.NewValue0(v_0.Pos, OpARM64MUL, x.Type) v1.AddArg2(x, y) v0.AddArg2(a, v1) - b.AddControl(v0) + b.resetWithControl(BlockARM64LT, v0) return true } // match: (LT (CMPWconst [0] z:(MADDW a x y)) yes no) @@ -28019,12 +27922,11 @@ func rewriteBlockARM64(b *Block) bool { if !(z.Uses == 1) { break } - b.Reset(BlockARM64LT) v0 := b.NewValue0(v_0.Pos, OpARM64CMNW, types.TypeFlags) v1 := b.NewValue0(v_0.Pos, OpARM64MULW, x.Type) v1.AddArg2(x, y) v0.AddArg2(a, v1) - b.AddControl(v0) + b.resetWithControl(BlockARM64LT, v0) return true } // match: (LT (CMPWconst [0] z:(MSUBW a x y)) yes no) @@ -28045,12 +27947,11 @@ func rewriteBlockARM64(b *Block) bool { if !(z.Uses == 1) { break } - b.Reset(BlockARM64LT) v0 := b.NewValue0(v_0.Pos, OpARM64CMPW, types.TypeFlags) v1 := b.NewValue0(v_0.Pos, OpARM64MULW, x.Type) v1.AddArg2(x, y) v0.AddArg2(a, v1) - b.AddControl(v0) + b.resetWithControl(BlockARM64LT, v0) return true } // match: (LT (CMPWconst [0] x) yes no) @@ -28061,8 +27962,7 @@ func rewriteBlockARM64(b *Block) bool { break } x := v_0.Args[0] - b.Reset(BlockARM64TBNZ) - b.AddControl(x) + b.resetWithControl(BlockARM64TBNZ, x) b.Aux = int64(31) return true } @@ -28074,8 +27974,7 @@ func rewriteBlockARM64(b *Block) bool { break } x := v_0.Args[0] - b.Reset(BlockARM64TBNZ) - b.AddControl(x) + b.resetWithControl(BlockARM64TBNZ, x) b.Aux = int64(63) return true } @@ -28117,8 +28016,7 @@ func rewriteBlockARM64(b *Block) bool { for b.Controls[0].Op == OpARM64InvertFlags { v_0 := b.Controls[0] cmp := v_0.Args[0] - b.Reset(BlockARM64GT) - b.AddControl(cmp) + b.resetWithControl(BlockARM64GT, cmp) return true } case BlockARM64NE: @@ -28139,11 +28037,10 @@ func rewriteBlockARM64(b *Block) bool { if !(x.Uses == 1) { break } - b.Reset(BlockARM64NE) v0 := b.NewValue0(v_0.Pos, OpARM64TSTWconst, types.TypeFlags) v0.AuxInt = c v0.AddArg(y) - b.AddControl(v0) + b.resetWithControl(BlockARM64NE, v0) return true } // match: (NE (CMPconst [0] z:(AND x y)) yes no) @@ -28167,10 +28064,9 @@ func rewriteBlockARM64(b *Block) bool { if !(z.Uses == 1) { continue } - b.Reset(BlockARM64NE) v0 := b.NewValue0(v_0.Pos, OpARM64TST, types.TypeFlags) v0.AddArg2(x, y) - b.AddControl(v0) + b.resetWithControl(BlockARM64NE, v0) return true } break @@ -28196,10 +28092,9 @@ func rewriteBlockARM64(b *Block) bool { if !(z.Uses == 1) { continue } - b.Reset(BlockARM64NE) v0 := b.NewValue0(v_0.Pos, OpARM64TSTW, types.TypeFlags) v0.AddArg2(x, y) - b.AddControl(v0) + b.resetWithControl(BlockARM64NE, v0) return true } break @@ -28221,11 +28116,10 @@ func rewriteBlockARM64(b *Block) bool { if !(x.Uses == 1) { break } - b.Reset(BlockARM64NE) v0 := b.NewValue0(v_0.Pos, OpARM64TSTconst, types.TypeFlags) v0.AuxInt = c v0.AddArg(y) - b.AddControl(v0) + b.resetWithControl(BlockARM64NE, v0) return true } // match: (NE (CMPconst [0] x:(ADDconst [c] y)) yes no) @@ -28245,11 +28139,10 @@ func rewriteBlockARM64(b *Block) bool { if !(x.Uses == 1) { break } - b.Reset(BlockARM64NE) v0 := b.NewValue0(v_0.Pos, OpARM64CMNconst, types.TypeFlags) v0.AuxInt = c v0.AddArg(y) - b.AddControl(v0) + b.resetWithControl(BlockARM64NE, v0) return true } // match: (NE (CMPWconst [0] x:(ADDconst [c] y)) yes no) @@ -28269,11 +28162,10 @@ func rewriteBlockARM64(b *Block) bool { if !(x.Uses == 1) { break } - b.Reset(BlockARM64NE) v0 := b.NewValue0(v_0.Pos, OpARM64CMNWconst, types.TypeFlags) v0.AuxInt = c v0.AddArg(y) - b.AddControl(v0) + b.resetWithControl(BlockARM64NE, v0) return true } // match: (NE (CMPconst [0] z:(ADD x y)) yes no) @@ -28297,10 +28189,9 @@ func rewriteBlockARM64(b *Block) bool { if !(z.Uses == 1) { continue } - b.Reset(BlockARM64NE) v0 := b.NewValue0(v_0.Pos, OpARM64CMN, types.TypeFlags) v0.AddArg2(x, y) - b.AddControl(v0) + b.resetWithControl(BlockARM64NE, v0) return true } break @@ -28326,10 +28217,9 @@ func rewriteBlockARM64(b *Block) bool { if !(z.Uses == 1) { continue } - b.Reset(BlockARM64NE) v0 := b.NewValue0(v_0.Pos, OpARM64CMNW, types.TypeFlags) v0.AddArg2(x, y) - b.AddControl(v0) + b.resetWithControl(BlockARM64NE, v0) return true } break @@ -28349,10 +28239,9 @@ func rewriteBlockARM64(b *Block) bool { if !(z.Uses == 1) { break } - b.Reset(BlockARM64NE) v0 := b.NewValue0(v_0.Pos, OpARM64CMN, types.TypeFlags) v0.AddArg2(x, y) - b.AddControl(v0) + b.resetWithControl(BlockARM64NE, v0) return true } // match: (NE (CMPW x z:(NEG y)) yes no) @@ -28370,10 +28259,9 @@ func rewriteBlockARM64(b *Block) bool { if !(z.Uses == 1) { break } - b.Reset(BlockARM64NE) v0 := b.NewValue0(v_0.Pos, OpARM64CMNW, types.TypeFlags) v0.AddArg2(x, y) - b.AddControl(v0) + b.resetWithControl(BlockARM64NE, v0) return true } // match: (NE (CMPconst [0] x) yes no) @@ -28384,8 +28272,7 @@ func rewriteBlockARM64(b *Block) bool { break } x := v_0.Args[0] - b.Reset(BlockARM64NZ) - b.AddControl(x) + b.resetWithControl(BlockARM64NZ, x) return true } // match: (NE (CMPWconst [0] x) yes no) @@ -28396,8 +28283,7 @@ func rewriteBlockARM64(b *Block) bool { break } x := v_0.Args[0] - b.Reset(BlockARM64NZW) - b.AddControl(x) + b.resetWithControl(BlockARM64NZW, x) return true } // match: (NE (CMPconst [0] z:(MADD a x y)) yes no) @@ -28418,12 +28304,11 @@ func rewriteBlockARM64(b *Block) bool { if !(z.Uses == 1) { break } - b.Reset(BlockARM64NE) v0 := b.NewValue0(v_0.Pos, OpARM64CMN, types.TypeFlags) v1 := b.NewValue0(v_0.Pos, OpARM64MUL, x.Type) v1.AddArg2(x, y) v0.AddArg2(a, v1) - b.AddControl(v0) + b.resetWithControl(BlockARM64NE, v0) return true } // match: (NE (CMPconst [0] z:(MSUB a x y)) yes no) @@ -28444,12 +28329,11 @@ func rewriteBlockARM64(b *Block) bool { if !(z.Uses == 1) { break } - b.Reset(BlockARM64NE) v0 := b.NewValue0(v_0.Pos, OpARM64CMP, types.TypeFlags) v1 := b.NewValue0(v_0.Pos, OpARM64MUL, x.Type) v1.AddArg2(x, y) v0.AddArg2(a, v1) - b.AddControl(v0) + b.resetWithControl(BlockARM64NE, v0) return true } // match: (NE (CMPWconst [0] z:(MADDW a x y)) yes no) @@ -28470,12 +28354,11 @@ func rewriteBlockARM64(b *Block) bool { if !(z.Uses == 1) { break } - b.Reset(BlockARM64NE) v0 := b.NewValue0(v_0.Pos, OpARM64CMNW, types.TypeFlags) v1 := b.NewValue0(v_0.Pos, OpARM64MULW, x.Type) v1.AddArg2(x, y) v0.AddArg2(a, v1) - b.AddControl(v0) + b.resetWithControl(BlockARM64NE, v0) return true } // match: (NE (CMPWconst [0] z:(MSUBW a x y)) yes no) @@ -28496,12 +28379,11 @@ func rewriteBlockARM64(b *Block) bool { if !(z.Uses == 1) { break } - b.Reset(BlockARM64NE) v0 := b.NewValue0(v_0.Pos, OpARM64CMPW, types.TypeFlags) v1 := b.NewValue0(v_0.Pos, OpARM64MULW, x.Type) v1.AddArg2(x, y) v0.AddArg2(a, v1) - b.AddControl(v0) + b.resetWithControl(BlockARM64NE, v0) return true } // match: (NE (TSTconst [c] x) yes no) @@ -28514,8 +28396,7 @@ func rewriteBlockARM64(b *Block) bool { if !(oneBit(c)) { break } - b.Reset(BlockARM64TBNZ) - b.AddControl(x) + b.resetWithControl(BlockARM64TBNZ, x) b.Aux = ntz(c) return true } @@ -28529,8 +28410,7 @@ func rewriteBlockARM64(b *Block) bool { if !(oneBit(int64(uint32(c)))) { break } - b.Reset(BlockARM64TBNZ) - b.AddControl(x) + b.resetWithControl(BlockARM64TBNZ, x) b.Aux = ntz(int64(uint32(c))) return true } @@ -28570,8 +28450,7 @@ func rewriteBlockARM64(b *Block) bool { for b.Controls[0].Op == OpARM64InvertFlags { v_0 := b.Controls[0] cmp := v_0.Args[0] - b.Reset(BlockARM64NE) - b.AddControl(cmp) + b.resetWithControl(BlockARM64NE, cmp) return true } case BlockARM64NZ: @@ -28580,8 +28459,7 @@ func rewriteBlockARM64(b *Block) bool { for b.Controls[0].Op == OpARM64Equal { v_0 := b.Controls[0] cc := v_0.Args[0] - b.Reset(BlockARM64EQ) - b.AddControl(cc) + b.resetWithControl(BlockARM64EQ, cc) return true } // match: (NZ (NotEqual cc) yes no) @@ -28589,8 +28467,7 @@ func rewriteBlockARM64(b *Block) bool { for b.Controls[0].Op == OpARM64NotEqual { v_0 := b.Controls[0] cc := v_0.Args[0] - b.Reset(BlockARM64NE) - b.AddControl(cc) + b.resetWithControl(BlockARM64NE, cc) return true } // match: (NZ (LessThan cc) yes no) @@ -28598,8 +28475,7 @@ func rewriteBlockARM64(b *Block) bool { for b.Controls[0].Op == OpARM64LessThan { v_0 := b.Controls[0] cc := v_0.Args[0] - b.Reset(BlockARM64LT) - b.AddControl(cc) + b.resetWithControl(BlockARM64LT, cc) return true } // match: (NZ (LessThanU cc) yes no) @@ -28607,8 +28483,7 @@ func rewriteBlockARM64(b *Block) bool { for b.Controls[0].Op == OpARM64LessThanU { v_0 := b.Controls[0] cc := v_0.Args[0] - b.Reset(BlockARM64ULT) - b.AddControl(cc) + b.resetWithControl(BlockARM64ULT, cc) return true } // match: (NZ (LessEqual cc) yes no) @@ -28616,8 +28491,7 @@ func rewriteBlockARM64(b *Block) bool { for b.Controls[0].Op == OpARM64LessEqual { v_0 := b.Controls[0] cc := v_0.Args[0] - b.Reset(BlockARM64LE) - b.AddControl(cc) + b.resetWithControl(BlockARM64LE, cc) return true } // match: (NZ (LessEqualU cc) yes no) @@ -28625,8 +28499,7 @@ func rewriteBlockARM64(b *Block) bool { for b.Controls[0].Op == OpARM64LessEqualU { v_0 := b.Controls[0] cc := v_0.Args[0] - b.Reset(BlockARM64ULE) - b.AddControl(cc) + b.resetWithControl(BlockARM64ULE, cc) return true } // match: (NZ (GreaterThan cc) yes no) @@ -28634,8 +28507,7 @@ func rewriteBlockARM64(b *Block) bool { for b.Controls[0].Op == OpARM64GreaterThan { v_0 := b.Controls[0] cc := v_0.Args[0] - b.Reset(BlockARM64GT) - b.AddControl(cc) + b.resetWithControl(BlockARM64GT, cc) return true } // match: (NZ (GreaterThanU cc) yes no) @@ -28643,8 +28515,7 @@ func rewriteBlockARM64(b *Block) bool { for b.Controls[0].Op == OpARM64GreaterThanU { v_0 := b.Controls[0] cc := v_0.Args[0] - b.Reset(BlockARM64UGT) - b.AddControl(cc) + b.resetWithControl(BlockARM64UGT, cc) return true } // match: (NZ (GreaterEqual cc) yes no) @@ -28652,8 +28523,7 @@ func rewriteBlockARM64(b *Block) bool { for b.Controls[0].Op == OpARM64GreaterEqual { v_0 := b.Controls[0] cc := v_0.Args[0] - b.Reset(BlockARM64GE) - b.AddControl(cc) + b.resetWithControl(BlockARM64GE, cc) return true } // match: (NZ (GreaterEqualU cc) yes no) @@ -28661,8 +28531,7 @@ func rewriteBlockARM64(b *Block) bool { for b.Controls[0].Op == OpARM64GreaterEqualU { v_0 := b.Controls[0] cc := v_0.Args[0] - b.Reset(BlockARM64UGE) - b.AddControl(cc) + b.resetWithControl(BlockARM64UGE, cc) return true } // match: (NZ (LessThanF cc) yes no) @@ -28670,8 +28539,7 @@ func rewriteBlockARM64(b *Block) bool { for b.Controls[0].Op == OpARM64LessThanF { v_0 := b.Controls[0] cc := v_0.Args[0] - b.Reset(BlockARM64FLT) - b.AddControl(cc) + b.resetWithControl(BlockARM64FLT, cc) return true } // match: (NZ (LessEqualF cc) yes no) @@ -28679,8 +28547,7 @@ func rewriteBlockARM64(b *Block) bool { for b.Controls[0].Op == OpARM64LessEqualF { v_0 := b.Controls[0] cc := v_0.Args[0] - b.Reset(BlockARM64FLE) - b.AddControl(cc) + b.resetWithControl(BlockARM64FLE, cc) return true } // match: (NZ (GreaterThanF cc) yes no) @@ -28688,8 +28555,7 @@ func rewriteBlockARM64(b *Block) bool { for b.Controls[0].Op == OpARM64GreaterThanF { v_0 := b.Controls[0] cc := v_0.Args[0] - b.Reset(BlockARM64FGT) - b.AddControl(cc) + b.resetWithControl(BlockARM64FGT, cc) return true } // match: (NZ (GreaterEqualF cc) yes no) @@ -28697,8 +28563,7 @@ func rewriteBlockARM64(b *Block) bool { for b.Controls[0].Op == OpARM64GreaterEqualF { v_0 := b.Controls[0] cc := v_0.Args[0] - b.Reset(BlockARM64FGE) - b.AddControl(cc) + b.resetWithControl(BlockARM64FGE, cc) return true } // match: (NZ (ANDconst [c] x) yes no) @@ -28711,8 +28576,7 @@ func rewriteBlockARM64(b *Block) bool { if !(oneBit(c)) { break } - b.Reset(BlockARM64TBNZ) - b.AddControl(x) + b.resetWithControl(BlockARM64TBNZ, x) b.Aux = ntz(c) return true } @@ -28750,8 +28614,7 @@ func rewriteBlockARM64(b *Block) bool { if !(oneBit(int64(uint32(c)))) { break } - b.Reset(BlockARM64TBNZ) - b.AddControl(x) + b.resetWithControl(BlockARM64TBNZ, x) b.Aux = ntz(int64(uint32(c))) return true } @@ -28818,8 +28681,7 @@ func rewriteBlockARM64(b *Block) bool { for b.Controls[0].Op == OpARM64InvertFlags { v_0 := b.Controls[0] cmp := v_0.Args[0] - b.Reset(BlockARM64ULE) - b.AddControl(cmp) + b.resetWithControl(BlockARM64ULE, cmp) return true } case BlockARM64UGT: @@ -28861,8 +28723,7 @@ func rewriteBlockARM64(b *Block) bool { for b.Controls[0].Op == OpARM64InvertFlags { v_0 := b.Controls[0] cmp := v_0.Args[0] - b.Reset(BlockARM64ULT) - b.AddControl(cmp) + b.resetWithControl(BlockARM64ULT, cmp) return true } case BlockARM64ULE: @@ -28903,8 +28764,7 @@ func rewriteBlockARM64(b *Block) bool { for b.Controls[0].Op == OpARM64InvertFlags { v_0 := b.Controls[0] cmp := v_0.Args[0] - b.Reset(BlockARM64UGE) - b.AddControl(cmp) + b.resetWithControl(BlockARM64UGE, cmp) return true } case BlockARM64ULT: @@ -28946,8 +28806,7 @@ func rewriteBlockARM64(b *Block) bool { for b.Controls[0].Op == OpARM64InvertFlags { v_0 := b.Controls[0] cmp := v_0.Args[0] - b.Reset(BlockARM64UGT) - b.AddControl(cmp) + b.resetWithControl(BlockARM64UGT, cmp) return true } case BlockARM64Z: @@ -28961,8 +28820,7 @@ func rewriteBlockARM64(b *Block) bool { if !(oneBit(c)) { break } - b.Reset(BlockARM64TBZ) - b.AddControl(x) + b.resetWithControl(BlockARM64TBZ, x) b.Aux = ntz(c) return true } @@ -29000,8 +28858,7 @@ func rewriteBlockARM64(b *Block) bool { if !(oneBit(int64(uint32(c)))) { break } - b.Reset(BlockARM64TBZ) - b.AddControl(x) + b.resetWithControl(BlockARM64TBZ, x) b.Aux = ntz(int64(uint32(c))) return true } diff --git a/src/cmd/compile/internal/ssa/rewriteMIPS.go b/src/cmd/compile/internal/ssa/rewriteMIPS.go index e036885a16..83bb92fc35 100644 --- a/src/cmd/compile/internal/ssa/rewriteMIPS.go +++ b/src/cmd/compile/internal/ssa/rewriteMIPS.go @@ -7247,8 +7247,7 @@ func rewriteBlockMIPS(b *Block) bool { for b.Controls[0].Op == OpMIPSFPFlagTrue { v_0 := b.Controls[0] cmp := v_0.Args[0] - b.Reset(BlockMIPSFPF) - b.AddControl(cmp) + b.resetWithControl(BlockMIPSFPF, cmp) return true } // match: (EQ (FPFlagFalse cmp) yes no) @@ -7256,8 +7255,7 @@ func rewriteBlockMIPS(b *Block) bool { for b.Controls[0].Op == OpMIPSFPFlagFalse { v_0 := b.Controls[0] cmp := v_0.Args[0] - b.Reset(BlockMIPSFPT) - b.AddControl(cmp) + b.resetWithControl(BlockMIPSFPT, cmp) return true } // match: (EQ (XORconst [1] cmp:(SGT _ _)) yes no) @@ -7272,8 +7270,7 @@ func rewriteBlockMIPS(b *Block) bool { break } _ = cmp.Args[1] - b.Reset(BlockMIPSNE) - b.AddControl(cmp) + b.resetWithControl(BlockMIPSNE, cmp) return true } // match: (EQ (XORconst [1] cmp:(SGTU _ _)) yes no) @@ -7288,8 +7285,7 @@ func rewriteBlockMIPS(b *Block) bool { break } _ = cmp.Args[1] - b.Reset(BlockMIPSNE) - b.AddControl(cmp) + b.resetWithControl(BlockMIPSNE, cmp) return true } // match: (EQ (XORconst [1] cmp:(SGTconst _)) yes no) @@ -7303,8 +7299,7 @@ func rewriteBlockMIPS(b *Block) bool { if cmp.Op != OpMIPSSGTconst { break } - b.Reset(BlockMIPSNE) - b.AddControl(cmp) + b.resetWithControl(BlockMIPSNE, cmp) return true } // match: (EQ (XORconst [1] cmp:(SGTUconst _)) yes no) @@ -7318,8 +7313,7 @@ func rewriteBlockMIPS(b *Block) bool { if cmp.Op != OpMIPSSGTUconst { break } - b.Reset(BlockMIPSNE) - b.AddControl(cmp) + b.resetWithControl(BlockMIPSNE, cmp) return true } // match: (EQ (XORconst [1] cmp:(SGTzero _)) yes no) @@ -7333,8 +7327,7 @@ func rewriteBlockMIPS(b *Block) bool { if cmp.Op != OpMIPSSGTzero { break } - b.Reset(BlockMIPSNE) - b.AddControl(cmp) + b.resetWithControl(BlockMIPSNE, cmp) return true } // match: (EQ (XORconst [1] cmp:(SGTUzero _)) yes no) @@ -7348,8 +7341,7 @@ func rewriteBlockMIPS(b *Block) bool { if cmp.Op != OpMIPSSGTUzero { break } - b.Reset(BlockMIPSNE) - b.AddControl(cmp) + b.resetWithControl(BlockMIPSNE, cmp) return true } // match: (EQ (SGTUconst [1] x) yes no) @@ -7360,8 +7352,7 @@ func rewriteBlockMIPS(b *Block) bool { break } x := v_0.Args[0] - b.Reset(BlockMIPSNE) - b.AddControl(x) + b.resetWithControl(BlockMIPSNE, x) return true } // match: (EQ (SGTUzero x) yes no) @@ -7369,8 +7360,7 @@ func rewriteBlockMIPS(b *Block) bool { for b.Controls[0].Op == OpMIPSSGTUzero { v_0 := b.Controls[0] x := v_0.Args[0] - b.Reset(BlockMIPSEQ) - b.AddControl(x) + b.resetWithControl(BlockMIPSEQ, x) return true } // match: (EQ (SGTconst [0] x) yes no) @@ -7381,8 +7371,7 @@ func rewriteBlockMIPS(b *Block) bool { break } x := v_0.Args[0] - b.Reset(BlockMIPSGEZ) - b.AddControl(x) + b.resetWithControl(BlockMIPSGEZ, x) return true } // match: (EQ (SGTzero x) yes no) @@ -7390,8 +7379,7 @@ func rewriteBlockMIPS(b *Block) bool { for b.Controls[0].Op == OpMIPSSGTzero { v_0 := b.Controls[0] x := v_0.Args[0] - b.Reset(BlockMIPSLEZ) - b.AddControl(x) + b.resetWithControl(BlockMIPSLEZ, x) return true } // match: (EQ (MOVWconst [0]) yes no) @@ -7474,8 +7462,7 @@ func rewriteBlockMIPS(b *Block) bool { // result: (NE cond yes no) for { cond := b.Controls[0] - b.Reset(BlockMIPSNE) - b.AddControl(cond) + b.resetWithControl(BlockMIPSNE, cond) return true } case BlockMIPSLEZ: @@ -7536,8 +7523,7 @@ func rewriteBlockMIPS(b *Block) bool { for b.Controls[0].Op == OpMIPSFPFlagTrue { v_0 := b.Controls[0] cmp := v_0.Args[0] - b.Reset(BlockMIPSFPT) - b.AddControl(cmp) + b.resetWithControl(BlockMIPSFPT, cmp) return true } // match: (NE (FPFlagFalse cmp) yes no) @@ -7545,8 +7531,7 @@ func rewriteBlockMIPS(b *Block) bool { for b.Controls[0].Op == OpMIPSFPFlagFalse { v_0 := b.Controls[0] cmp := v_0.Args[0] - b.Reset(BlockMIPSFPF) - b.AddControl(cmp) + b.resetWithControl(BlockMIPSFPF, cmp) return true } // match: (NE (XORconst [1] cmp:(SGT _ _)) yes no) @@ -7561,8 +7546,7 @@ func rewriteBlockMIPS(b *Block) bool { break } _ = cmp.Args[1] - b.Reset(BlockMIPSEQ) - b.AddControl(cmp) + b.resetWithControl(BlockMIPSEQ, cmp) return true } // match: (NE (XORconst [1] cmp:(SGTU _ _)) yes no) @@ -7577,8 +7561,7 @@ func rewriteBlockMIPS(b *Block) bool { break } _ = cmp.Args[1] - b.Reset(BlockMIPSEQ) - b.AddControl(cmp) + b.resetWithControl(BlockMIPSEQ, cmp) return true } // match: (NE (XORconst [1] cmp:(SGTconst _)) yes no) @@ -7592,8 +7575,7 @@ func rewriteBlockMIPS(b *Block) bool { if cmp.Op != OpMIPSSGTconst { break } - b.Reset(BlockMIPSEQ) - b.AddControl(cmp) + b.resetWithControl(BlockMIPSEQ, cmp) return true } // match: (NE (XORconst [1] cmp:(SGTUconst _)) yes no) @@ -7607,8 +7589,7 @@ func rewriteBlockMIPS(b *Block) bool { if cmp.Op != OpMIPSSGTUconst { break } - b.Reset(BlockMIPSEQ) - b.AddControl(cmp) + b.resetWithControl(BlockMIPSEQ, cmp) return true } // match: (NE (XORconst [1] cmp:(SGTzero _)) yes no) @@ -7622,8 +7603,7 @@ func rewriteBlockMIPS(b *Block) bool { if cmp.Op != OpMIPSSGTzero { break } - b.Reset(BlockMIPSEQ) - b.AddControl(cmp) + b.resetWithControl(BlockMIPSEQ, cmp) return true } // match: (NE (XORconst [1] cmp:(SGTUzero _)) yes no) @@ -7637,8 +7617,7 @@ func rewriteBlockMIPS(b *Block) bool { if cmp.Op != OpMIPSSGTUzero { break } - b.Reset(BlockMIPSEQ) - b.AddControl(cmp) + b.resetWithControl(BlockMIPSEQ, cmp) return true } // match: (NE (SGTUconst [1] x) yes no) @@ -7649,8 +7628,7 @@ func rewriteBlockMIPS(b *Block) bool { break } x := v_0.Args[0] - b.Reset(BlockMIPSEQ) - b.AddControl(x) + b.resetWithControl(BlockMIPSEQ, x) return true } // match: (NE (SGTUzero x) yes no) @@ -7658,8 +7636,7 @@ func rewriteBlockMIPS(b *Block) bool { for b.Controls[0].Op == OpMIPSSGTUzero { v_0 := b.Controls[0] x := v_0.Args[0] - b.Reset(BlockMIPSNE) - b.AddControl(x) + b.resetWithControl(BlockMIPSNE, x) return true } // match: (NE (SGTconst [0] x) yes no) @@ -7670,8 +7647,7 @@ func rewriteBlockMIPS(b *Block) bool { break } x := v_0.Args[0] - b.Reset(BlockMIPSLTZ) - b.AddControl(x) + b.resetWithControl(BlockMIPSLTZ, x) return true } // match: (NE (SGTzero x) yes no) @@ -7679,8 +7655,7 @@ func rewriteBlockMIPS(b *Block) bool { for b.Controls[0].Op == OpMIPSSGTzero { v_0 := b.Controls[0] x := v_0.Args[0] - b.Reset(BlockMIPSGTZ) - b.AddControl(x) + b.resetWithControl(BlockMIPSGTZ, x) return true } // match: (NE (MOVWconst [0]) yes no) diff --git a/src/cmd/compile/internal/ssa/rewriteMIPS64.go b/src/cmd/compile/internal/ssa/rewriteMIPS64.go index 4413c535d6..c8d72363b3 100644 --- a/src/cmd/compile/internal/ssa/rewriteMIPS64.go +++ b/src/cmd/compile/internal/ssa/rewriteMIPS64.go @@ -7743,8 +7743,7 @@ func rewriteBlockMIPS64(b *Block) bool { for b.Controls[0].Op == OpMIPS64FPFlagTrue { v_0 := b.Controls[0] cmp := v_0.Args[0] - b.Reset(BlockMIPS64FPF) - b.AddControl(cmp) + b.resetWithControl(BlockMIPS64FPF, cmp) return true } // match: (EQ (FPFlagFalse cmp) yes no) @@ -7752,8 +7751,7 @@ func rewriteBlockMIPS64(b *Block) bool { for b.Controls[0].Op == OpMIPS64FPFlagFalse { v_0 := b.Controls[0] cmp := v_0.Args[0] - b.Reset(BlockMIPS64FPT) - b.AddControl(cmp) + b.resetWithControl(BlockMIPS64FPT, cmp) return true } // match: (EQ (XORconst [1] cmp:(SGT _ _)) yes no) @@ -7768,8 +7766,7 @@ func rewriteBlockMIPS64(b *Block) bool { break } _ = cmp.Args[1] - b.Reset(BlockMIPS64NE) - b.AddControl(cmp) + b.resetWithControl(BlockMIPS64NE, cmp) return true } // match: (EQ (XORconst [1] cmp:(SGTU _ _)) yes no) @@ -7784,8 +7781,7 @@ func rewriteBlockMIPS64(b *Block) bool { break } _ = cmp.Args[1] - b.Reset(BlockMIPS64NE) - b.AddControl(cmp) + b.resetWithControl(BlockMIPS64NE, cmp) return true } // match: (EQ (XORconst [1] cmp:(SGTconst _)) yes no) @@ -7799,8 +7795,7 @@ func rewriteBlockMIPS64(b *Block) bool { if cmp.Op != OpMIPS64SGTconst { break } - b.Reset(BlockMIPS64NE) - b.AddControl(cmp) + b.resetWithControl(BlockMIPS64NE, cmp) return true } // match: (EQ (XORconst [1] cmp:(SGTUconst _)) yes no) @@ -7814,8 +7809,7 @@ func rewriteBlockMIPS64(b *Block) bool { if cmp.Op != OpMIPS64SGTUconst { break } - b.Reset(BlockMIPS64NE) - b.AddControl(cmp) + b.resetWithControl(BlockMIPS64NE, cmp) return true } // match: (EQ (SGTUconst [1] x) yes no) @@ -7826,8 +7820,7 @@ func rewriteBlockMIPS64(b *Block) bool { break } x := v_0.Args[0] - b.Reset(BlockMIPS64NE) - b.AddControl(x) + b.resetWithControl(BlockMIPS64NE, x) return true } // match: (EQ (SGTU x (MOVVconst [0])) yes no) @@ -7840,8 +7833,7 @@ func rewriteBlockMIPS64(b *Block) bool { if v_0_1.Op != OpMIPS64MOVVconst || v_0_1.AuxInt != 0 { break } - b.Reset(BlockMIPS64EQ) - b.AddControl(x) + b.resetWithControl(BlockMIPS64EQ, x) return true } // match: (EQ (SGTconst [0] x) yes no) @@ -7852,8 +7844,7 @@ func rewriteBlockMIPS64(b *Block) bool { break } x := v_0.Args[0] - b.Reset(BlockMIPS64GEZ) - b.AddControl(x) + b.resetWithControl(BlockMIPS64GEZ, x) return true } // match: (EQ (SGT x (MOVVconst [0])) yes no) @@ -7866,8 +7857,7 @@ func rewriteBlockMIPS64(b *Block) bool { if v_0_1.Op != OpMIPS64MOVVconst || v_0_1.AuxInt != 0 { break } - b.Reset(BlockMIPS64LEZ) - b.AddControl(x) + b.resetWithControl(BlockMIPS64LEZ, x) return true } // match: (EQ (MOVVconst [0]) yes no) @@ -7950,8 +7940,7 @@ func rewriteBlockMIPS64(b *Block) bool { // result: (NE cond yes no) for { cond := b.Controls[0] - b.Reset(BlockMIPS64NE) - b.AddControl(cond) + b.resetWithControl(BlockMIPS64NE, cond) return true } case BlockMIPS64LEZ: @@ -8012,8 +8001,7 @@ func rewriteBlockMIPS64(b *Block) bool { for b.Controls[0].Op == OpMIPS64FPFlagTrue { v_0 := b.Controls[0] cmp := v_0.Args[0] - b.Reset(BlockMIPS64FPT) - b.AddControl(cmp) + b.resetWithControl(BlockMIPS64FPT, cmp) return true } // match: (NE (FPFlagFalse cmp) yes no) @@ -8021,8 +8009,7 @@ func rewriteBlockMIPS64(b *Block) bool { for b.Controls[0].Op == OpMIPS64FPFlagFalse { v_0 := b.Controls[0] cmp := v_0.Args[0] - b.Reset(BlockMIPS64FPF) - b.AddControl(cmp) + b.resetWithControl(BlockMIPS64FPF, cmp) return true } // match: (NE (XORconst [1] cmp:(SGT _ _)) yes no) @@ -8037,8 +8024,7 @@ func rewriteBlockMIPS64(b *Block) bool { break } _ = cmp.Args[1] - b.Reset(BlockMIPS64EQ) - b.AddControl(cmp) + b.resetWithControl(BlockMIPS64EQ, cmp) return true } // match: (NE (XORconst [1] cmp:(SGTU _ _)) yes no) @@ -8053,8 +8039,7 @@ func rewriteBlockMIPS64(b *Block) bool { break } _ = cmp.Args[1] - b.Reset(BlockMIPS64EQ) - b.AddControl(cmp) + b.resetWithControl(BlockMIPS64EQ, cmp) return true } // match: (NE (XORconst [1] cmp:(SGTconst _)) yes no) @@ -8068,8 +8053,7 @@ func rewriteBlockMIPS64(b *Block) bool { if cmp.Op != OpMIPS64SGTconst { break } - b.Reset(BlockMIPS64EQ) - b.AddControl(cmp) + b.resetWithControl(BlockMIPS64EQ, cmp) return true } // match: (NE (XORconst [1] cmp:(SGTUconst _)) yes no) @@ -8083,8 +8067,7 @@ func rewriteBlockMIPS64(b *Block) bool { if cmp.Op != OpMIPS64SGTUconst { break } - b.Reset(BlockMIPS64EQ) - b.AddControl(cmp) + b.resetWithControl(BlockMIPS64EQ, cmp) return true } // match: (NE (SGTUconst [1] x) yes no) @@ -8095,8 +8078,7 @@ func rewriteBlockMIPS64(b *Block) bool { break } x := v_0.Args[0] - b.Reset(BlockMIPS64EQ) - b.AddControl(x) + b.resetWithControl(BlockMIPS64EQ, x) return true } // match: (NE (SGTU x (MOVVconst [0])) yes no) @@ -8109,8 +8091,7 @@ func rewriteBlockMIPS64(b *Block) bool { if v_0_1.Op != OpMIPS64MOVVconst || v_0_1.AuxInt != 0 { break } - b.Reset(BlockMIPS64NE) - b.AddControl(x) + b.resetWithControl(BlockMIPS64NE, x) return true } // match: (NE (SGTconst [0] x) yes no) @@ -8121,8 +8102,7 @@ func rewriteBlockMIPS64(b *Block) bool { break } x := v_0.Args[0] - b.Reset(BlockMIPS64LTZ) - b.AddControl(x) + b.resetWithControl(BlockMIPS64LTZ, x) return true } // match: (NE (SGT x (MOVVconst [0])) yes no) @@ -8135,8 +8115,7 @@ func rewriteBlockMIPS64(b *Block) bool { if v_0_1.Op != OpMIPS64MOVVconst || v_0_1.AuxInt != 0 { break } - b.Reset(BlockMIPS64GTZ) - b.AddControl(x) + b.resetWithControl(BlockMIPS64GTZ, x) return true } // match: (NE (MOVVconst [0]) yes no) diff --git a/src/cmd/compile/internal/ssa/rewritePPC64.go b/src/cmd/compile/internal/ssa/rewritePPC64.go index 279fed4edb..1bdd8ee23a 100644 --- a/src/cmd/compile/internal/ssa/rewritePPC64.go +++ b/src/cmd/compile/internal/ssa/rewritePPC64.go @@ -15159,11 +15159,10 @@ func rewriteBlockPPC64(b *Block) bool { } c := v_0_0.AuxInt x := v_0_0.Args[0] - b.Reset(BlockPPC64EQ) v0 := b.NewValue0(v_0.Pos, OpPPC64ANDCCconst, types.TypeFlags) v0.AuxInt = c v0.AddArg(x) - b.AddControl(v0) + b.resetWithControl(BlockPPC64EQ, v0) return true } // match: (EQ (CMPWconst [0] (ANDconst [c] x)) yes no) @@ -15179,11 +15178,10 @@ func rewriteBlockPPC64(b *Block) bool { } c := v_0_0.AuxInt x := v_0_0.Args[0] - b.Reset(BlockPPC64EQ) v0 := b.NewValue0(v_0.Pos, OpPPC64ANDCCconst, types.TypeFlags) v0.AuxInt = c v0.AddArg(x) - b.AddControl(v0) + b.resetWithControl(BlockPPC64EQ, v0) return true } // match: (EQ (FlagEQ) yes no) @@ -15211,8 +15209,7 @@ func rewriteBlockPPC64(b *Block) bool { for b.Controls[0].Op == OpPPC64InvertFlags { v_0 := b.Controls[0] cmp := v_0.Args[0] - b.Reset(BlockPPC64EQ) - b.AddControl(cmp) + b.resetWithControl(BlockPPC64EQ, cmp) return true } // match: (EQ (CMPconst [0] (ANDconst [c] x)) yes no) @@ -15228,11 +15225,10 @@ func rewriteBlockPPC64(b *Block) bool { } c := v_0_0.AuxInt x := v_0_0.Args[0] - b.Reset(BlockPPC64EQ) v0 := b.NewValue0(v_0.Pos, OpPPC64ANDCCconst, types.TypeFlags) v0.AuxInt = c v0.AddArg(x) - b.AddControl(v0) + b.resetWithControl(BlockPPC64EQ, v0) return true } // match: (EQ (CMPWconst [0] (ANDconst [c] x)) yes no) @@ -15248,11 +15244,10 @@ func rewriteBlockPPC64(b *Block) bool { } c := v_0_0.AuxInt x := v_0_0.Args[0] - b.Reset(BlockPPC64EQ) v0 := b.NewValue0(v_0.Pos, OpPPC64ANDCCconst, types.TypeFlags) v0.AuxInt = c v0.AddArg(x) - b.AddControl(v0) + b.resetWithControl(BlockPPC64EQ, v0) return true } // match: (EQ (CMPconst [0] z:(AND x y)) yes no) @@ -15276,10 +15271,9 @@ func rewriteBlockPPC64(b *Block) bool { if !(z.Uses == 1) { continue } - b.Reset(BlockPPC64EQ) v0 := b.NewValue0(v_0.Pos, OpPPC64ANDCC, types.TypeFlags) v0.AddArg2(x, y) - b.AddControl(v0) + b.resetWithControl(BlockPPC64EQ, v0) return true } break @@ -15305,10 +15299,9 @@ func rewriteBlockPPC64(b *Block) bool { if !(z.Uses == 1) { continue } - b.Reset(BlockPPC64EQ) v0 := b.NewValue0(v_0.Pos, OpPPC64ORCC, types.TypeFlags) v0.AddArg2(x, y) - b.AddControl(v0) + b.resetWithControl(BlockPPC64EQ, v0) return true } break @@ -15334,10 +15327,9 @@ func rewriteBlockPPC64(b *Block) bool { if !(z.Uses == 1) { continue } - b.Reset(BlockPPC64EQ) v0 := b.NewValue0(v_0.Pos, OpPPC64XORCC, types.TypeFlags) v0.AddArg2(x, y) - b.AddControl(v0) + b.resetWithControl(BlockPPC64EQ, v0) return true } break @@ -15367,8 +15359,7 @@ func rewriteBlockPPC64(b *Block) bool { for b.Controls[0].Op == OpPPC64InvertFlags { v_0 := b.Controls[0] cmp := v_0.Args[0] - b.Reset(BlockPPC64LE) - b.AddControl(cmp) + b.resetWithControl(BlockPPC64LE, cmp) return true } // match: (GE (CMPconst [0] (ANDconst [c] x)) yes no) @@ -15384,11 +15375,10 @@ func rewriteBlockPPC64(b *Block) bool { } c := v_0_0.AuxInt x := v_0_0.Args[0] - b.Reset(BlockPPC64GE) v0 := b.NewValue0(v_0.Pos, OpPPC64ANDCCconst, types.TypeFlags) v0.AuxInt = c v0.AddArg(x) - b.AddControl(v0) + b.resetWithControl(BlockPPC64GE, v0) return true } // match: (GE (CMPWconst [0] (ANDconst [c] x)) yes no) @@ -15404,11 +15394,10 @@ func rewriteBlockPPC64(b *Block) bool { } c := v_0_0.AuxInt x := v_0_0.Args[0] - b.Reset(BlockPPC64GE) v0 := b.NewValue0(v_0.Pos, OpPPC64ANDCCconst, types.TypeFlags) v0.AuxInt = c v0.AddArg(x) - b.AddControl(v0) + b.resetWithControl(BlockPPC64GE, v0) return true } // match: (GE (CMPconst [0] z:(AND x y)) yes no) @@ -15432,10 +15421,9 @@ func rewriteBlockPPC64(b *Block) bool { if !(z.Uses == 1) { continue } - b.Reset(BlockPPC64GE) v0 := b.NewValue0(v_0.Pos, OpPPC64ANDCC, types.TypeFlags) v0.AddArg2(x, y) - b.AddControl(v0) + b.resetWithControl(BlockPPC64GE, v0) return true } break @@ -15461,10 +15449,9 @@ func rewriteBlockPPC64(b *Block) bool { if !(z.Uses == 1) { continue } - b.Reset(BlockPPC64GE) v0 := b.NewValue0(v_0.Pos, OpPPC64ORCC, types.TypeFlags) v0.AddArg2(x, y) - b.AddControl(v0) + b.resetWithControl(BlockPPC64GE, v0) return true } break @@ -15490,10 +15477,9 @@ func rewriteBlockPPC64(b *Block) bool { if !(z.Uses == 1) { continue } - b.Reset(BlockPPC64GE) v0 := b.NewValue0(v_0.Pos, OpPPC64XORCC, types.TypeFlags) v0.AddArg2(x, y) - b.AddControl(v0) + b.resetWithControl(BlockPPC64GE, v0) return true } break @@ -15524,8 +15510,7 @@ func rewriteBlockPPC64(b *Block) bool { for b.Controls[0].Op == OpPPC64InvertFlags { v_0 := b.Controls[0] cmp := v_0.Args[0] - b.Reset(BlockPPC64LT) - b.AddControl(cmp) + b.resetWithControl(BlockPPC64LT, cmp) return true } // match: (GT (CMPconst [0] (ANDconst [c] x)) yes no) @@ -15541,11 +15526,10 @@ func rewriteBlockPPC64(b *Block) bool { } c := v_0_0.AuxInt x := v_0_0.Args[0] - b.Reset(BlockPPC64GT) v0 := b.NewValue0(v_0.Pos, OpPPC64ANDCCconst, types.TypeFlags) v0.AuxInt = c v0.AddArg(x) - b.AddControl(v0) + b.resetWithControl(BlockPPC64GT, v0) return true } // match: (GT (CMPWconst [0] (ANDconst [c] x)) yes no) @@ -15561,11 +15545,10 @@ func rewriteBlockPPC64(b *Block) bool { } c := v_0_0.AuxInt x := v_0_0.Args[0] - b.Reset(BlockPPC64GT) v0 := b.NewValue0(v_0.Pos, OpPPC64ANDCCconst, types.TypeFlags) v0.AuxInt = c v0.AddArg(x) - b.AddControl(v0) + b.resetWithControl(BlockPPC64GT, v0) return true } // match: (GT (CMPconst [0] z:(AND x y)) yes no) @@ -15589,10 +15572,9 @@ func rewriteBlockPPC64(b *Block) bool { if !(z.Uses == 1) { continue } - b.Reset(BlockPPC64GT) v0 := b.NewValue0(v_0.Pos, OpPPC64ANDCC, types.TypeFlags) v0.AddArg2(x, y) - b.AddControl(v0) + b.resetWithControl(BlockPPC64GT, v0) return true } break @@ -15618,10 +15600,9 @@ func rewriteBlockPPC64(b *Block) bool { if !(z.Uses == 1) { continue } - b.Reset(BlockPPC64GT) v0 := b.NewValue0(v_0.Pos, OpPPC64ORCC, types.TypeFlags) v0.AddArg2(x, y) - b.AddControl(v0) + b.resetWithControl(BlockPPC64GT, v0) return true } break @@ -15647,10 +15628,9 @@ func rewriteBlockPPC64(b *Block) bool { if !(z.Uses == 1) { continue } - b.Reset(BlockPPC64GT) v0 := b.NewValue0(v_0.Pos, OpPPC64XORCC, types.TypeFlags) v0.AddArg2(x, y) - b.AddControl(v0) + b.resetWithControl(BlockPPC64GT, v0) return true } break @@ -15661,8 +15641,7 @@ func rewriteBlockPPC64(b *Block) bool { for b.Controls[0].Op == OpPPC64Equal { v_0 := b.Controls[0] cc := v_0.Args[0] - b.Reset(BlockPPC64EQ) - b.AddControl(cc) + b.resetWithControl(BlockPPC64EQ, cc) return true } // match: (If (NotEqual cc) yes no) @@ -15670,8 +15649,7 @@ func rewriteBlockPPC64(b *Block) bool { for b.Controls[0].Op == OpPPC64NotEqual { v_0 := b.Controls[0] cc := v_0.Args[0] - b.Reset(BlockPPC64NE) - b.AddControl(cc) + b.resetWithControl(BlockPPC64NE, cc) return true } // match: (If (LessThan cc) yes no) @@ -15679,8 +15657,7 @@ func rewriteBlockPPC64(b *Block) bool { for b.Controls[0].Op == OpPPC64LessThan { v_0 := b.Controls[0] cc := v_0.Args[0] - b.Reset(BlockPPC64LT) - b.AddControl(cc) + b.resetWithControl(BlockPPC64LT, cc) return true } // match: (If (LessEqual cc) yes no) @@ -15688,8 +15665,7 @@ func rewriteBlockPPC64(b *Block) bool { for b.Controls[0].Op == OpPPC64LessEqual { v_0 := b.Controls[0] cc := v_0.Args[0] - b.Reset(BlockPPC64LE) - b.AddControl(cc) + b.resetWithControl(BlockPPC64LE, cc) return true } // match: (If (GreaterThan cc) yes no) @@ -15697,8 +15673,7 @@ func rewriteBlockPPC64(b *Block) bool { for b.Controls[0].Op == OpPPC64GreaterThan { v_0 := b.Controls[0] cc := v_0.Args[0] - b.Reset(BlockPPC64GT) - b.AddControl(cc) + b.resetWithControl(BlockPPC64GT, cc) return true } // match: (If (GreaterEqual cc) yes no) @@ -15706,8 +15681,7 @@ func rewriteBlockPPC64(b *Block) bool { for b.Controls[0].Op == OpPPC64GreaterEqual { v_0 := b.Controls[0] cc := v_0.Args[0] - b.Reset(BlockPPC64GE) - b.AddControl(cc) + b.resetWithControl(BlockPPC64GE, cc) return true } // match: (If (FLessThan cc) yes no) @@ -15715,8 +15689,7 @@ func rewriteBlockPPC64(b *Block) bool { for b.Controls[0].Op == OpPPC64FLessThan { v_0 := b.Controls[0] cc := v_0.Args[0] - b.Reset(BlockPPC64FLT) - b.AddControl(cc) + b.resetWithControl(BlockPPC64FLT, cc) return true } // match: (If (FLessEqual cc) yes no) @@ -15724,8 +15697,7 @@ func rewriteBlockPPC64(b *Block) bool { for b.Controls[0].Op == OpPPC64FLessEqual { v_0 := b.Controls[0] cc := v_0.Args[0] - b.Reset(BlockPPC64FLE) - b.AddControl(cc) + b.resetWithControl(BlockPPC64FLE, cc) return true } // match: (If (FGreaterThan cc) yes no) @@ -15733,8 +15705,7 @@ func rewriteBlockPPC64(b *Block) bool { for b.Controls[0].Op == OpPPC64FGreaterThan { v_0 := b.Controls[0] cc := v_0.Args[0] - b.Reset(BlockPPC64FGT) - b.AddControl(cc) + b.resetWithControl(BlockPPC64FGT, cc) return true } // match: (If (FGreaterEqual cc) yes no) @@ -15742,19 +15713,17 @@ func rewriteBlockPPC64(b *Block) bool { for b.Controls[0].Op == OpPPC64FGreaterEqual { v_0 := b.Controls[0] cc := v_0.Args[0] - b.Reset(BlockPPC64FGE) - b.AddControl(cc) + b.resetWithControl(BlockPPC64FGE, cc) return true } // match: (If cond yes no) // result: (NE (CMPWconst [0] cond) yes no) for { cond := b.Controls[0] - b.Reset(BlockPPC64NE) v0 := b.NewValue0(cond.Pos, OpPPC64CMPWconst, types.TypeFlags) v0.AuxInt = 0 v0.AddArg(cond) - b.AddControl(v0) + b.resetWithControl(BlockPPC64NE, v0) return true } case BlockPPC64LE: @@ -15782,8 +15751,7 @@ func rewriteBlockPPC64(b *Block) bool { for b.Controls[0].Op == OpPPC64InvertFlags { v_0 := b.Controls[0] cmp := v_0.Args[0] - b.Reset(BlockPPC64GE) - b.AddControl(cmp) + b.resetWithControl(BlockPPC64GE, cmp) return true } // match: (LE (CMPconst [0] (ANDconst [c] x)) yes no) @@ -15799,11 +15767,10 @@ func rewriteBlockPPC64(b *Block) bool { } c := v_0_0.AuxInt x := v_0_0.Args[0] - b.Reset(BlockPPC64LE) v0 := b.NewValue0(v_0.Pos, OpPPC64ANDCCconst, types.TypeFlags) v0.AuxInt = c v0.AddArg(x) - b.AddControl(v0) + b.resetWithControl(BlockPPC64LE, v0) return true } // match: (LE (CMPWconst [0] (ANDconst [c] x)) yes no) @@ -15819,11 +15786,10 @@ func rewriteBlockPPC64(b *Block) bool { } c := v_0_0.AuxInt x := v_0_0.Args[0] - b.Reset(BlockPPC64LE) v0 := b.NewValue0(v_0.Pos, OpPPC64ANDCCconst, types.TypeFlags) v0.AuxInt = c v0.AddArg(x) - b.AddControl(v0) + b.resetWithControl(BlockPPC64LE, v0) return true } // match: (LE (CMPconst [0] z:(AND x y)) yes no) @@ -15847,10 +15813,9 @@ func rewriteBlockPPC64(b *Block) bool { if !(z.Uses == 1) { continue } - b.Reset(BlockPPC64LE) v0 := b.NewValue0(v_0.Pos, OpPPC64ANDCC, types.TypeFlags) v0.AddArg2(x, y) - b.AddControl(v0) + b.resetWithControl(BlockPPC64LE, v0) return true } break @@ -15876,10 +15841,9 @@ func rewriteBlockPPC64(b *Block) bool { if !(z.Uses == 1) { continue } - b.Reset(BlockPPC64LE) v0 := b.NewValue0(v_0.Pos, OpPPC64ORCC, types.TypeFlags) v0.AddArg2(x, y) - b.AddControl(v0) + b.resetWithControl(BlockPPC64LE, v0) return true } break @@ -15905,10 +15869,9 @@ func rewriteBlockPPC64(b *Block) bool { if !(z.Uses == 1) { continue } - b.Reset(BlockPPC64LE) v0 := b.NewValue0(v_0.Pos, OpPPC64XORCC, types.TypeFlags) v0.AddArg2(x, y) - b.AddControl(v0) + b.resetWithControl(BlockPPC64LE, v0) return true } break @@ -15939,8 +15902,7 @@ func rewriteBlockPPC64(b *Block) bool { for b.Controls[0].Op == OpPPC64InvertFlags { v_0 := b.Controls[0] cmp := v_0.Args[0] - b.Reset(BlockPPC64GT) - b.AddControl(cmp) + b.resetWithControl(BlockPPC64GT, cmp) return true } // match: (LT (CMPconst [0] (ANDconst [c] x)) yes no) @@ -15956,11 +15918,10 @@ func rewriteBlockPPC64(b *Block) bool { } c := v_0_0.AuxInt x := v_0_0.Args[0] - b.Reset(BlockPPC64LT) v0 := b.NewValue0(v_0.Pos, OpPPC64ANDCCconst, types.TypeFlags) v0.AuxInt = c v0.AddArg(x) - b.AddControl(v0) + b.resetWithControl(BlockPPC64LT, v0) return true } // match: (LT (CMPWconst [0] (ANDconst [c] x)) yes no) @@ -15976,11 +15937,10 @@ func rewriteBlockPPC64(b *Block) bool { } c := v_0_0.AuxInt x := v_0_0.Args[0] - b.Reset(BlockPPC64LT) v0 := b.NewValue0(v_0.Pos, OpPPC64ANDCCconst, types.TypeFlags) v0.AuxInt = c v0.AddArg(x) - b.AddControl(v0) + b.resetWithControl(BlockPPC64LT, v0) return true } // match: (LT (CMPconst [0] z:(AND x y)) yes no) @@ -16004,10 +15964,9 @@ func rewriteBlockPPC64(b *Block) bool { if !(z.Uses == 1) { continue } - b.Reset(BlockPPC64LT) v0 := b.NewValue0(v_0.Pos, OpPPC64ANDCC, types.TypeFlags) v0.AddArg2(x, y) - b.AddControl(v0) + b.resetWithControl(BlockPPC64LT, v0) return true } break @@ -16033,10 +15992,9 @@ func rewriteBlockPPC64(b *Block) bool { if !(z.Uses == 1) { continue } - b.Reset(BlockPPC64LT) v0 := b.NewValue0(v_0.Pos, OpPPC64ORCC, types.TypeFlags) v0.AddArg2(x, y) - b.AddControl(v0) + b.resetWithControl(BlockPPC64LT, v0) return true } break @@ -16062,10 +16020,9 @@ func rewriteBlockPPC64(b *Block) bool { if !(z.Uses == 1) { continue } - b.Reset(BlockPPC64LT) v0 := b.NewValue0(v_0.Pos, OpPPC64XORCC, types.TypeFlags) v0.AddArg2(x, y) - b.AddControl(v0) + b.resetWithControl(BlockPPC64LT, v0) return true } break @@ -16083,8 +16040,7 @@ func rewriteBlockPPC64(b *Block) bool { break } cc := v_0_0.Args[0] - b.Reset(BlockPPC64EQ) - b.AddControl(cc) + b.resetWithControl(BlockPPC64EQ, cc) return true } // match: (NE (CMPWconst [0] (NotEqual cc)) yes no) @@ -16099,8 +16055,7 @@ func rewriteBlockPPC64(b *Block) bool { break } cc := v_0_0.Args[0] - b.Reset(BlockPPC64NE) - b.AddControl(cc) + b.resetWithControl(BlockPPC64NE, cc) return true } // match: (NE (CMPWconst [0] (LessThan cc)) yes no) @@ -16115,8 +16070,7 @@ func rewriteBlockPPC64(b *Block) bool { break } cc := v_0_0.Args[0] - b.Reset(BlockPPC64LT) - b.AddControl(cc) + b.resetWithControl(BlockPPC64LT, cc) return true } // match: (NE (CMPWconst [0] (LessEqual cc)) yes no) @@ -16131,8 +16085,7 @@ func rewriteBlockPPC64(b *Block) bool { break } cc := v_0_0.Args[0] - b.Reset(BlockPPC64LE) - b.AddControl(cc) + b.resetWithControl(BlockPPC64LE, cc) return true } // match: (NE (CMPWconst [0] (GreaterThan cc)) yes no) @@ -16147,8 +16100,7 @@ func rewriteBlockPPC64(b *Block) bool { break } cc := v_0_0.Args[0] - b.Reset(BlockPPC64GT) - b.AddControl(cc) + b.resetWithControl(BlockPPC64GT, cc) return true } // match: (NE (CMPWconst [0] (GreaterEqual cc)) yes no) @@ -16163,8 +16115,7 @@ func rewriteBlockPPC64(b *Block) bool { break } cc := v_0_0.Args[0] - b.Reset(BlockPPC64GE) - b.AddControl(cc) + b.resetWithControl(BlockPPC64GE, cc) return true } // match: (NE (CMPWconst [0] (FLessThan cc)) yes no) @@ -16179,8 +16130,7 @@ func rewriteBlockPPC64(b *Block) bool { break } cc := v_0_0.Args[0] - b.Reset(BlockPPC64FLT) - b.AddControl(cc) + b.resetWithControl(BlockPPC64FLT, cc) return true } // match: (NE (CMPWconst [0] (FLessEqual cc)) yes no) @@ -16195,8 +16145,7 @@ func rewriteBlockPPC64(b *Block) bool { break } cc := v_0_0.Args[0] - b.Reset(BlockPPC64FLE) - b.AddControl(cc) + b.resetWithControl(BlockPPC64FLE, cc) return true } // match: (NE (CMPWconst [0] (FGreaterThan cc)) yes no) @@ -16211,8 +16160,7 @@ func rewriteBlockPPC64(b *Block) bool { break } cc := v_0_0.Args[0] - b.Reset(BlockPPC64FGT) - b.AddControl(cc) + b.resetWithControl(BlockPPC64FGT, cc) return true } // match: (NE (CMPWconst [0] (FGreaterEqual cc)) yes no) @@ -16227,8 +16175,7 @@ func rewriteBlockPPC64(b *Block) bool { break } cc := v_0_0.Args[0] - b.Reset(BlockPPC64FGE) - b.AddControl(cc) + b.resetWithControl(BlockPPC64FGE, cc) return true } // match: (NE (CMPconst [0] (ANDconst [c] x)) yes no) @@ -16244,11 +16191,10 @@ func rewriteBlockPPC64(b *Block) bool { } c := v_0_0.AuxInt x := v_0_0.Args[0] - b.Reset(BlockPPC64NE) v0 := b.NewValue0(v_0.Pos, OpPPC64ANDCCconst, types.TypeFlags) v0.AuxInt = c v0.AddArg(x) - b.AddControl(v0) + b.resetWithControl(BlockPPC64NE, v0) return true } // match: (NE (CMPWconst [0] (ANDconst [c] x)) yes no) @@ -16264,11 +16210,10 @@ func rewriteBlockPPC64(b *Block) bool { } c := v_0_0.AuxInt x := v_0_0.Args[0] - b.Reset(BlockPPC64NE) v0 := b.NewValue0(v_0.Pos, OpPPC64ANDCCconst, types.TypeFlags) v0.AuxInt = c v0.AddArg(x) - b.AddControl(v0) + b.resetWithControl(BlockPPC64NE, v0) return true } // match: (NE (FlagEQ) yes no) @@ -16295,8 +16240,7 @@ func rewriteBlockPPC64(b *Block) bool { for b.Controls[0].Op == OpPPC64InvertFlags { v_0 := b.Controls[0] cmp := v_0.Args[0] - b.Reset(BlockPPC64NE) - b.AddControl(cmp) + b.resetWithControl(BlockPPC64NE, cmp) return true } // match: (NE (CMPconst [0] (ANDconst [c] x)) yes no) @@ -16312,11 +16256,10 @@ func rewriteBlockPPC64(b *Block) bool { } c := v_0_0.AuxInt x := v_0_0.Args[0] - b.Reset(BlockPPC64NE) v0 := b.NewValue0(v_0.Pos, OpPPC64ANDCCconst, types.TypeFlags) v0.AuxInt = c v0.AddArg(x) - b.AddControl(v0) + b.resetWithControl(BlockPPC64NE, v0) return true } // match: (NE (CMPWconst [0] (ANDconst [c] x)) yes no) @@ -16332,11 +16275,10 @@ func rewriteBlockPPC64(b *Block) bool { } c := v_0_0.AuxInt x := v_0_0.Args[0] - b.Reset(BlockPPC64NE) v0 := b.NewValue0(v_0.Pos, OpPPC64ANDCCconst, types.TypeFlags) v0.AuxInt = c v0.AddArg(x) - b.AddControl(v0) + b.resetWithControl(BlockPPC64NE, v0) return true } // match: (NE (CMPconst [0] z:(AND x y)) yes no) @@ -16360,10 +16302,9 @@ func rewriteBlockPPC64(b *Block) bool { if !(z.Uses == 1) { continue } - b.Reset(BlockPPC64NE) v0 := b.NewValue0(v_0.Pos, OpPPC64ANDCC, types.TypeFlags) v0.AddArg2(x, y) - b.AddControl(v0) + b.resetWithControl(BlockPPC64NE, v0) return true } break @@ -16389,10 +16330,9 @@ func rewriteBlockPPC64(b *Block) bool { if !(z.Uses == 1) { continue } - b.Reset(BlockPPC64NE) v0 := b.NewValue0(v_0.Pos, OpPPC64ORCC, types.TypeFlags) v0.AddArg2(x, y) - b.AddControl(v0) + b.resetWithControl(BlockPPC64NE, v0) return true } break @@ -16418,10 +16358,9 @@ func rewriteBlockPPC64(b *Block) bool { if !(z.Uses == 1) { continue } - b.Reset(BlockPPC64NE) v0 := b.NewValue0(v_0.Pos, OpPPC64XORCC, types.TypeFlags) v0.AddArg2(x, y) - b.AddControl(v0) + b.resetWithControl(BlockPPC64NE, v0) return true } break diff --git a/src/cmd/compile/internal/ssa/rewriteRISCV64.go b/src/cmd/compile/internal/ssa/rewriteRISCV64.go index f20f744456..2e0b34de8d 100644 --- a/src/cmd/compile/internal/ssa/rewriteRISCV64.go +++ b/src/cmd/compile/internal/ssa/rewriteRISCV64.go @@ -4258,8 +4258,7 @@ func rewriteBlockRISCV64(b *Block) bool { // result: (BNE cond yes no) for { cond := b.Controls[0] - b.Reset(BlockRISCV64BNE) - b.AddControl(cond) + b.resetWithControl(BlockRISCV64BNE, cond) return true } } diff --git a/src/cmd/compile/internal/ssa/rewriteS390X.go b/src/cmd/compile/internal/ssa/rewriteS390X.go index 874bac1fde..2ac8a6f05e 100644 --- a/src/cmd/compile/internal/ssa/rewriteS390X.go +++ b/src/cmd/compile/internal/ssa/rewriteS390X.go @@ -19046,9 +19046,7 @@ func rewriteBlockS390X(b *Block) bool { y := v_0.Args[1] x := v_0.Args[0] c := b.Aux - b.Reset(BlockS390XCGRJ) - b.AddControl(x) - b.AddControl(y) + b.resetWithControl2(BlockS390XCGRJ, x, y) b.Aux = c.(s390x.CCMask) &^ s390x.Unordered return true } @@ -19059,9 +19057,7 @@ func rewriteBlockS390X(b *Block) bool { y := v_0.Args[1] x := v_0.Args[0] c := b.Aux - b.Reset(BlockS390XCRJ) - b.AddControl(x) - b.AddControl(y) + b.resetWithControl2(BlockS390XCRJ, x, y) b.Aux = c.(s390x.CCMask) &^ s390x.Unordered return true } @@ -19072,9 +19068,7 @@ func rewriteBlockS390X(b *Block) bool { y := v_0.Args[1] x := v_0.Args[0] c := b.Aux - b.Reset(BlockS390XCLGRJ) - b.AddControl(x) - b.AddControl(y) + b.resetWithControl2(BlockS390XCLGRJ, x, y) b.Aux = c.(s390x.CCMask) &^ s390x.Unordered return true } @@ -19085,9 +19079,7 @@ func rewriteBlockS390X(b *Block) bool { y := v_0.Args[1] x := v_0.Args[0] c := b.Aux - b.Reset(BlockS390XCLRJ) - b.AddControl(x) - b.AddControl(y) + b.resetWithControl2(BlockS390XCLRJ, x, y) b.Aux = c.(s390x.CCMask) &^ s390x.Unordered return true } @@ -19102,8 +19094,7 @@ func rewriteBlockS390X(b *Block) bool { if !(is8Bit(y)) { break } - b.Reset(BlockS390XCGIJ) - b.AddControl(x) + b.resetWithControl(BlockS390XCGIJ, x) b.AuxInt = int64(int8(y)) b.Aux = c.(s390x.CCMask) &^ s390x.Unordered return true @@ -19119,8 +19110,7 @@ func rewriteBlockS390X(b *Block) bool { if !(is8Bit(y)) { break } - b.Reset(BlockS390XCIJ) - b.AddControl(x) + b.resetWithControl(BlockS390XCIJ, x) b.AuxInt = int64(int8(y)) b.Aux = c.(s390x.CCMask) &^ s390x.Unordered return true @@ -19136,8 +19126,7 @@ func rewriteBlockS390X(b *Block) bool { if !(isU8Bit(y)) { break } - b.Reset(BlockS390XCLGIJ) - b.AddControl(x) + b.resetWithControl(BlockS390XCLGIJ, x) b.AuxInt = int64(int8(y)) b.Aux = c.(s390x.CCMask) &^ s390x.Unordered return true @@ -19153,8 +19142,7 @@ func rewriteBlockS390X(b *Block) bool { if !(isU8Bit(y)) { break } - b.Reset(BlockS390XCLIJ) - b.AddControl(x) + b.resetWithControl(BlockS390XCLIJ, x) b.AuxInt = int64(int8(y)) b.Aux = c.(s390x.CCMask) &^ s390x.Unordered return true @@ -19170,8 +19158,7 @@ func rewriteBlockS390X(b *Block) bool { if b.Aux != s390x.Less { break } - b.Reset(BlockS390XCGIJ) - b.AddControl(x) + b.resetWithControl(BlockS390XCGIJ, x) b.AuxInt = 127 b.Aux = s390x.LessOrEqual return true @@ -19187,8 +19174,7 @@ func rewriteBlockS390X(b *Block) bool { if b.Aux != s390x.Less { break } - b.Reset(BlockS390XCIJ) - b.AddControl(x) + b.resetWithControl(BlockS390XCIJ, x) b.AuxInt = 127 b.Aux = s390x.LessOrEqual return true @@ -19204,8 +19190,7 @@ func rewriteBlockS390X(b *Block) bool { if b.Aux != s390x.LessOrEqual { break } - b.Reset(BlockS390XCGIJ) - b.AddControl(x) + b.resetWithControl(BlockS390XCGIJ, x) b.AuxInt = -128 b.Aux = s390x.Less return true @@ -19221,8 +19206,7 @@ func rewriteBlockS390X(b *Block) bool { if b.Aux != s390x.LessOrEqual { break } - b.Reset(BlockS390XCIJ) - b.AddControl(x) + b.resetWithControl(BlockS390XCIJ, x) b.AuxInt = -128 b.Aux = s390x.Less return true @@ -19238,8 +19222,7 @@ func rewriteBlockS390X(b *Block) bool { if b.Aux != s390x.Greater { break } - b.Reset(BlockS390XCGIJ) - b.AddControl(x) + b.resetWithControl(BlockS390XCGIJ, x) b.AuxInt = -128 b.Aux = s390x.GreaterOrEqual return true @@ -19255,8 +19238,7 @@ func rewriteBlockS390X(b *Block) bool { if b.Aux != s390x.Greater { break } - b.Reset(BlockS390XCIJ) - b.AddControl(x) + b.resetWithControl(BlockS390XCIJ, x) b.AuxInt = -128 b.Aux = s390x.GreaterOrEqual return true @@ -19272,8 +19254,7 @@ func rewriteBlockS390X(b *Block) bool { if b.Aux != s390x.GreaterOrEqual { break } - b.Reset(BlockS390XCGIJ) - b.AddControl(x) + b.resetWithControl(BlockS390XCGIJ, x) b.AuxInt = 127 b.Aux = s390x.Greater return true @@ -19289,8 +19270,7 @@ func rewriteBlockS390X(b *Block) bool { if b.Aux != s390x.GreaterOrEqual { break } - b.Reset(BlockS390XCIJ) - b.AddControl(x) + b.resetWithControl(BlockS390XCIJ, x) b.AuxInt = 127 b.Aux = s390x.Greater return true @@ -19306,8 +19286,7 @@ func rewriteBlockS390X(b *Block) bool { if b.Aux != s390x.Less { break } - b.Reset(BlockS390XCLIJ) - b.AddControl(x) + b.resetWithControl(BlockS390XCLIJ, x) b.AuxInt = -1 b.Aux = s390x.LessOrEqual return true @@ -19323,8 +19302,7 @@ func rewriteBlockS390X(b *Block) bool { if b.Aux != s390x.Less { break } - b.Reset(BlockS390XCLGIJ) - b.AddControl(x) + b.resetWithControl(BlockS390XCLGIJ, x) b.AuxInt = -1 b.Aux = s390x.LessOrEqual return true @@ -19340,8 +19318,7 @@ func rewriteBlockS390X(b *Block) bool { if b.Aux != s390x.GreaterOrEqual { break } - b.Reset(BlockS390XCLIJ) - b.AddControl(x) + b.resetWithControl(BlockS390XCLIJ, x) b.AuxInt = -1 b.Aux = s390x.Greater return true @@ -19357,8 +19334,7 @@ func rewriteBlockS390X(b *Block) bool { if b.Aux != s390x.GreaterOrEqual { break } - b.Reset(BlockS390XCLGIJ) - b.AddControl(x) + b.resetWithControl(BlockS390XCLGIJ, x) b.AuxInt = -1 b.Aux = s390x.Greater return true @@ -19369,8 +19345,7 @@ func rewriteBlockS390X(b *Block) bool { v_0 := b.Controls[0] cmp := v_0.Args[0] c := b.Aux - b.Reset(BlockS390XBRC) - b.AddControl(cmp) + b.resetWithControl(BlockS390XBRC, cmp) b.Aux = c.(s390x.CCMask).ReverseComparison() return true } @@ -19566,8 +19541,7 @@ func rewriteBlockS390X(b *Block) bool { if !(is8Bit(y)) { break } - b.Reset(BlockS390XCGIJ) - b.AddControl(x) + b.resetWithControl(BlockS390XCGIJ, x) b.AuxInt = int64(int8(y)) b.Aux = c return true @@ -19583,8 +19557,7 @@ func rewriteBlockS390X(b *Block) bool { if !(is8Bit(x)) { break } - b.Reset(BlockS390XCGIJ) - b.AddControl(y) + b.resetWithControl(BlockS390XCGIJ, y) b.AuxInt = int64(int8(x)) b.Aux = c.(s390x.CCMask).ReverseComparison() return true @@ -19600,11 +19573,10 @@ func rewriteBlockS390X(b *Block) bool { if !(!is8Bit(y) && is32Bit(y)) { break } - b.Reset(BlockS390XBRC) v0 := b.NewValue0(x.Pos, OpS390XCMPconst, types.TypeFlags) v0.AuxInt = int64(int32(y)) v0.AddArg(x) - b.AddControl(v0) + b.resetWithControl(BlockS390XBRC, v0) b.Aux = c return true } @@ -19619,11 +19591,10 @@ func rewriteBlockS390X(b *Block) bool { if !(!is8Bit(x) && is32Bit(x)) { break } - b.Reset(BlockS390XBRC) v0 := b.NewValue0(v_0.Pos, OpS390XCMPconst, types.TypeFlags) v0.AuxInt = int64(int32(x)) v0.AddArg(y) - b.AddControl(v0) + b.resetWithControl(BlockS390XBRC, v0) b.Aux = c.(s390x.CCMask).ReverseComparison() return true } @@ -19662,8 +19633,7 @@ func rewriteBlockS390X(b *Block) bool { x := v_0.Args[0] y := b.AuxInt c := b.Aux - b.Reset(BlockS390XCIJ) - b.AddControl(x) + b.resetWithControl(BlockS390XCIJ, x) b.AuxInt = y b.Aux = c return true @@ -19675,8 +19645,7 @@ func rewriteBlockS390X(b *Block) bool { x := v_0.Args[0] y := b.AuxInt c := b.Aux - b.Reset(BlockS390XCIJ) - b.AddControl(x) + b.resetWithControl(BlockS390XCIJ, x) b.AuxInt = y b.Aux = c return true @@ -19887,8 +19856,7 @@ func rewriteBlockS390X(b *Block) bool { if !(isU8Bit(y)) { break } - b.Reset(BlockS390XCLGIJ) - b.AddControl(x) + b.resetWithControl(BlockS390XCLGIJ, x) b.AuxInt = int64(int8(y)) b.Aux = c return true @@ -19904,8 +19872,7 @@ func rewriteBlockS390X(b *Block) bool { if !(isU8Bit(x)) { break } - b.Reset(BlockS390XCLGIJ) - b.AddControl(y) + b.resetWithControl(BlockS390XCLGIJ, y) b.AuxInt = int64(int8(x)) b.Aux = c.(s390x.CCMask).ReverseComparison() return true @@ -19921,11 +19888,10 @@ func rewriteBlockS390X(b *Block) bool { if !(!isU8Bit(y) && isU32Bit(y)) { break } - b.Reset(BlockS390XBRC) v0 := b.NewValue0(x.Pos, OpS390XCMPUconst, types.TypeFlags) v0.AuxInt = int64(int32(y)) v0.AddArg(x) - b.AddControl(v0) + b.resetWithControl(BlockS390XBRC, v0) b.Aux = c return true } @@ -19940,11 +19906,10 @@ func rewriteBlockS390X(b *Block) bool { if !(!isU8Bit(x) && isU32Bit(x)) { break } - b.Reset(BlockS390XBRC) v0 := b.NewValue0(v_0.Pos, OpS390XCMPUconst, types.TypeFlags) v0.AuxInt = int64(int32(x)) v0.AddArg(y) - b.AddControl(v0) + b.resetWithControl(BlockS390XBRC, v0) b.Aux = c.(s390x.CCMask).ReverseComparison() return true } @@ -19995,8 +19960,7 @@ func rewriteBlockS390X(b *Block) bool { if b.AuxInt != 0 || b.Aux != s390x.LessOrGreater || !(int32(x) != 0) { break } - b.Reset(BlockS390XBRC) - b.AddControl(cmp) + b.resetWithControl(BlockS390XBRC, cmp) b.Aux = d return true } @@ -20007,8 +19971,7 @@ func rewriteBlockS390X(b *Block) bool { x := v_0.Args[0] y := b.AuxInt c := b.Aux - b.Reset(BlockS390XCLIJ) - b.AddControl(x) + b.resetWithControl(BlockS390XCLIJ, x) b.AuxInt = y b.Aux = c return true @@ -20020,8 +19983,7 @@ func rewriteBlockS390X(b *Block) bool { x := v_0.Args[0] y := b.AuxInt c := b.Aux - b.Reset(BlockS390XCLIJ) - b.AddControl(x) + b.resetWithControl(BlockS390XCLIJ, x) b.AuxInt = y b.Aux = c return true @@ -20144,8 +20106,7 @@ func rewriteBlockS390X(b *Block) bool { if !(isU8Bit(y)) { break } - b.Reset(BlockS390XCLIJ) - b.AddControl(x) + b.resetWithControl(BlockS390XCLIJ, x) b.AuxInt = int64(int8(y)) b.Aux = c return true @@ -20161,8 +20122,7 @@ func rewriteBlockS390X(b *Block) bool { if !(isU8Bit(x)) { break } - b.Reset(BlockS390XCLIJ) - b.AddControl(y) + b.resetWithControl(BlockS390XCLIJ, y) b.AuxInt = int64(int8(x)) b.Aux = c.(s390x.CCMask).ReverseComparison() return true @@ -20178,11 +20138,10 @@ func rewriteBlockS390X(b *Block) bool { if !(!isU8Bit(y) && isU32Bit(y)) { break } - b.Reset(BlockS390XBRC) v0 := b.NewValue0(x.Pos, OpS390XCMPWUconst, types.TypeFlags) v0.AuxInt = int64(int32(y)) v0.AddArg(x) - b.AddControl(v0) + b.resetWithControl(BlockS390XBRC, v0) b.Aux = c return true } @@ -20197,11 +20156,10 @@ func rewriteBlockS390X(b *Block) bool { if !(!isU8Bit(x) && isU32Bit(x)) { break } - b.Reset(BlockS390XBRC) v0 := b.NewValue0(v_0.Pos, OpS390XCMPWUconst, types.TypeFlags) v0.AuxInt = int64(int32(x)) v0.AddArg(y) - b.AddControl(v0) + b.resetWithControl(BlockS390XBRC, v0) b.Aux = c.(s390x.CCMask).ReverseComparison() return true } @@ -20244,8 +20202,7 @@ func rewriteBlockS390X(b *Block) bool { if !(is8Bit(y)) { break } - b.Reset(BlockS390XCIJ) - b.AddControl(x) + b.resetWithControl(BlockS390XCIJ, x) b.AuxInt = int64(int8(y)) b.Aux = c return true @@ -20261,8 +20218,7 @@ func rewriteBlockS390X(b *Block) bool { if !(is8Bit(x)) { break } - b.Reset(BlockS390XCIJ) - b.AddControl(y) + b.resetWithControl(BlockS390XCIJ, y) b.AuxInt = int64(int8(x)) b.Aux = c.(s390x.CCMask).ReverseComparison() return true @@ -20278,11 +20234,10 @@ func rewriteBlockS390X(b *Block) bool { if !(!is8Bit(y) && is32Bit(y)) { break } - b.Reset(BlockS390XBRC) v0 := b.NewValue0(x.Pos, OpS390XCMPWconst, types.TypeFlags) v0.AuxInt = int64(int32(y)) v0.AddArg(x) - b.AddControl(v0) + b.resetWithControl(BlockS390XBRC, v0) b.Aux = c return true } @@ -20297,11 +20252,10 @@ func rewriteBlockS390X(b *Block) bool { if !(!is8Bit(x) && is32Bit(x)) { break } - b.Reset(BlockS390XBRC) v0 := b.NewValue0(v_0.Pos, OpS390XCMPWconst, types.TypeFlags) v0.AuxInt = int64(int32(x)) v0.AddArg(y) - b.AddControl(v0) + b.resetWithControl(BlockS390XBRC, v0) b.Aux = c.(s390x.CCMask).ReverseComparison() return true } @@ -20337,10 +20291,9 @@ func rewriteBlockS390X(b *Block) bool { // result: (CLIJ {s390x.LessOrGreater} (MOVBZreg cond) [0] yes no) for { cond := b.Controls[0] - b.Reset(BlockS390XCLIJ) v0 := b.NewValue0(cond.Pos, OpS390XMOVBZreg, typ.Bool) v0.AddArg(cond) - b.AddControl(v0) + b.resetWithControl(BlockS390XCLIJ, v0) b.AuxInt = 0 b.Aux = s390x.LessOrGreater return true diff --git a/src/cmd/compile/internal/ssa/rewritegeneric.go b/src/cmd/compile/internal/ssa/rewritegeneric.go index 0089df46b9..e0541f8710 100644 --- a/src/cmd/compile/internal/ssa/rewritegeneric.go +++ b/src/cmd/compile/internal/ssa/rewritegeneric.go @@ -21806,8 +21806,7 @@ func rewriteBlockgeneric(b *Block) bool { for b.Controls[0].Op == OpNot { v_0 := b.Controls[0] cond := v_0.Args[0] - b.Reset(BlockIf) - b.AddControl(cond) + b.resetWithControl(BlockIf, cond) b.swapSuccessors() return true } From a2bff7c2964c6bf2c9741eb767d749d773f20770 Mon Sep 17 00:00:00 2001 From: Josh Bleecher Snyder Date: Sun, 1 Mar 2020 16:44:06 -0800 Subject: [PATCH 29/69] cmd/compile: make pre-elimination of rulegen bounds checks more precise In cases in which we had a named value whose args were all _, like this rule from ARM.rules: (MOVBUreg x:(MOVBUload _ _)) -> (MOVWreg x) We previously inserted _ = x.Args[1] even though it is unnecessary. This change eliminates this pointless bounds check. And in other cases, we now check bounds just as far as strictly necessary. No significant movement on any compiler metrics. Just nicer (and less) code. Passes toolstash-check -all. Change-Id: I075dfe9f926cc561cdc705e9ddaab563164bed3a Reviewed-on: https://go-review.googlesource.com/c/go/+/221781 Run-TryBot: Josh Bleecher Snyder TryBot-Result: Gobot Gobot Reviewed-by: Keith Randall --- src/cmd/compile/internal/ssa/gen/rulegen.go | 25 +- src/cmd/compile/internal/ssa/rewrite386.go | 15 +- src/cmd/compile/internal/ssa/rewriteAMD64.go | 82 ++---- src/cmd/compile/internal/ssa/rewriteARM.go | 92 ++----- src/cmd/compile/internal/ssa/rewriteARM64.go | 92 ++----- src/cmd/compile/internal/ssa/rewriteMIPS.go | 35 +-- src/cmd/compile/internal/ssa/rewriteMIPS64.go | 19 -- src/cmd/compile/internal/ssa/rewritePPC64.go | 30 +-- src/cmd/compile/internal/ssa/rewriteS390X.go | 233 ++++-------------- src/cmd/compile/internal/ssa/rewriteWasm.go | 12 - src/cmd/compile/internal/ssa/rewritedec.go | 5 - src/cmd/compile/internal/ssa/rewritedec64.go | 13 - .../compile/internal/ssa/rewritegeneric.go | 162 ++---------- 13 files changed, 170 insertions(+), 645 deletions(-) diff --git a/src/cmd/compile/internal/ssa/gen/rulegen.go b/src/cmd/compile/internal/ssa/gen/rulegen.go index 759336fb2b..8e88d0b6a3 100644 --- a/src/cmd/compile/internal/ssa/gen/rulegen.go +++ b/src/cmd/compile/internal/ssa/gen/rulegen.go @@ -1001,16 +1001,21 @@ func genMatch0(rr *RuleRewrite, arch arch, match, v string, cnt map[string]int, } } - // Access last argument first to minimize bounds checks. - if n := len(args); n > 1 && !pregenTop { - a := args[n-1] - if a != "_" && !rr.declared(a) && token.IsIdentifier(a) && !(commutative && len(args) == 2) { - rr.add(declf(a, "%s.Args[%d]", v, n-1)) - - // delete the last argument so it is not reprocessed - args = args[:n-1] - } else { - rr.add(stmtf("_ = %s.Args[%d]", v, n-1)) + if !pregenTop { + // Access last argument first to minimize bounds checks. + for n := len(args) - 1; n > 0; n-- { + a := args[n] + if a == "_" { + continue + } + if !rr.declared(a) && token.IsIdentifier(a) && !(commutative && len(args) == 2) { + rr.add(declf(a, "%s.Args[%d]", v, n)) + // delete the last argument so it is not reprocessed + args = args[:n] + } else { + rr.add(stmtf("_ = %s.Args[%d]", v, n)) + } + break } } if commutative && !pregenTop { diff --git a/src/cmd/compile/internal/ssa/rewrite386.go b/src/cmd/compile/internal/ssa/rewrite386.go index b83c65da86..8b2da94c13 100644 --- a/src/cmd/compile/internal/ssa/rewrite386.go +++ b/src/cmd/compile/internal/ssa/rewrite386.go @@ -4087,9 +4087,8 @@ func rewriteValue386_Op386MOVBLSXload(v *Value) bool { } off2 := v_1.AuxInt sym2 := v_1.Aux - _ = v_1.Args[2] - ptr2 := v_1.Args[0] x := v_1.Args[1] + ptr2 := v_1.Args[0] if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) { break } @@ -4203,9 +4202,8 @@ func rewriteValue386_Op386MOVBload(v *Value) bool { } off2 := v_1.AuxInt sym2 := v_1.Aux - _ = v_1.Args[2] - ptr2 := v_1.Args[0] x := v_1.Args[1] + ptr2 := v_1.Args[0] if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) { break } @@ -5109,9 +5107,8 @@ func rewriteValue386_Op386MOVLload(v *Value) bool { } off2 := v_1.AuxInt sym2 := v_1.Aux - _ = v_1.Args[2] - ptr2 := v_1.Args[0] x := v_1.Args[1] + ptr2 := v_1.Args[0] if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) { break } @@ -7523,9 +7520,8 @@ func rewriteValue386_Op386MOVWLSXload(v *Value) bool { } off2 := v_1.AuxInt sym2 := v_1.Aux - _ = v_1.Args[2] - ptr2 := v_1.Args[0] x := v_1.Args[1] + ptr2 := v_1.Args[0] if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) { break } @@ -7663,9 +7659,8 @@ func rewriteValue386_Op386MOVWload(v *Value) bool { } off2 := v_1.AuxInt sym2 := v_1.Aux - _ = v_1.Args[2] - ptr2 := v_1.Args[0] x := v_1.Args[1] + ptr2 := v_1.Args[0] if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) { break } diff --git a/src/cmd/compile/internal/ssa/rewriteAMD64.go b/src/cmd/compile/internal/ssa/rewriteAMD64.go index a0d422b372..c37bae2c22 100644 --- a/src/cmd/compile/internal/ssa/rewriteAMD64.go +++ b/src/cmd/compile/internal/ssa/rewriteAMD64.go @@ -1858,11 +1858,10 @@ func rewriteValueAMD64_OpAMD64ADDLload(v *Value) bool { if v_2.Op != OpAMD64MOVSSstore || v_2.AuxInt != off || v_2.Aux != sym { break } - _ = v_2.Args[2] + y := v_2.Args[1] if ptr != v_2.Args[0] { break } - y := v_2.Args[1] v.reset(OpAMD64ADDL) v0 := b.NewValue0(v_2.Pos, OpAMD64MOVLf2i, typ.UInt32) v0.AddArg(y) @@ -2479,11 +2478,10 @@ func rewriteValueAMD64_OpAMD64ADDQload(v *Value) bool { if v_2.Op != OpAMD64MOVSDstore || v_2.AuxInt != off || v_2.Aux != sym { break } - _ = v_2.Args[2] + y := v_2.Args[1] if ptr != v_2.Args[0] { break } - y := v_2.Args[1] v.reset(OpAMD64ADDQ) v0 := b.NewValue0(v_2.Pos, OpAMD64MOVQf2i, typ.UInt64) v0.AddArg(y) @@ -2634,11 +2632,10 @@ func rewriteValueAMD64_OpAMD64ADDSDload(v *Value) bool { if v_2.Op != OpAMD64MOVQstore || v_2.AuxInt != off || v_2.Aux != sym { break } - _ = v_2.Args[2] + y := v_2.Args[1] if ptr != v_2.Args[0] { break } - y := v_2.Args[1] v.reset(OpAMD64ADDSD) v0 := b.NewValue0(v_2.Pos, OpAMD64MOVQi2f, typ.Float64) v0.AddArg(y) @@ -2738,11 +2735,10 @@ func rewriteValueAMD64_OpAMD64ADDSSload(v *Value) bool { if v_2.Op != OpAMD64MOVLstore || v_2.AuxInt != off || v_2.Aux != sym { break } - _ = v_2.Args[2] + y := v_2.Args[1] if ptr != v_2.Args[0] { break } - y := v_2.Args[1] v.reset(OpAMD64ADDSS) v0 := b.NewValue0(v_2.Pos, OpAMD64MOVLi2f, typ.Float32) v0.AddArg(y) @@ -3063,11 +3059,10 @@ func rewriteValueAMD64_OpAMD64ANDLload(v *Value) bool { if v_2.Op != OpAMD64MOVSSstore || v_2.AuxInt != off || v_2.Aux != sym { break } - _ = v_2.Args[2] + y := v_2.Args[1] if ptr != v_2.Args[0] { break } - y := v_2.Args[1] v.reset(OpAMD64ANDL) v0 := b.NewValue0(v_2.Pos, OpAMD64MOVLf2i, typ.UInt32) v0.AddArg(y) @@ -3450,11 +3445,10 @@ func rewriteValueAMD64_OpAMD64ANDQload(v *Value) bool { if v_2.Op != OpAMD64MOVSDstore || v_2.AuxInt != off || v_2.Aux != sym { break } - _ = v_2.Args[2] + y := v_2.Args[1] if ptr != v_2.Args[0] { break } - y := v_2.Args[1] v.reset(OpAMD64ANDQ) v0 := b.NewValue0(v_2.Pos, OpAMD64MOVQf2i, typ.UInt64) v0.AddArg(y) @@ -9766,9 +9760,8 @@ func rewriteValueAMD64_OpAMD64MOVBQSXload(v *Value) bool { } off2 := v_1.AuxInt sym2 := v_1.Aux - _ = v_1.Args[2] - ptr2 := v_1.Args[0] x := v_1.Args[1] + ptr2 := v_1.Args[0] if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) { break } @@ -10019,9 +10012,8 @@ func rewriteValueAMD64_OpAMD64MOVBload(v *Value) bool { } off2 := v_1.AuxInt sym2 := v_1.Aux - _ = v_1.Args[2] - ptr2 := v_1.Args[0] x := v_1.Args[1] + ptr2 := v_1.Args[0] if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) { break } @@ -12046,9 +12038,8 @@ func rewriteValueAMD64_OpAMD64MOVLQSXload(v *Value) bool { } off2 := v_1.AuxInt sym2 := v_1.Aux - _ = v_1.Args[2] - ptr2 := v_1.Args[0] x := v_1.Args[1] + ptr2 := v_1.Args[0] if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) { break } @@ -12353,9 +12344,8 @@ func rewriteValueAMD64_OpAMD64MOVLload(v *Value) bool { } off2 := v_1.AuxInt sym2 := v_1.Aux - _ = v_1.Args[2] - ptr2 := v_1.Args[0] x := v_1.Args[1] + ptr2 := v_1.Args[0] if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) { break } @@ -12554,11 +12544,10 @@ func rewriteValueAMD64_OpAMD64MOVLload(v *Value) bool { if v_1.Op != OpAMD64MOVSSstore || v_1.AuxInt != off || v_1.Aux != sym { break } - _ = v_1.Args[2] + val := v_1.Args[1] if ptr != v_1.Args[0] { break } - val := v_1.Args[1] v.reset(OpAMD64MOVLf2i) v.AddArg(val) return true @@ -14721,7 +14710,6 @@ func rewriteValueAMD64_OpAMD64MOVOstore(v *Value) bool { } srcOff := v_1.AuxInt srcSym := v_1.Aux - _ = v_1.Args[1] v_1_0 := v_1.Args[0] if v_1_0.Op != OpSB { break @@ -14863,9 +14851,8 @@ func rewriteValueAMD64_OpAMD64MOVQload(v *Value) bool { } off2 := v_1.AuxInt sym2 := v_1.Aux - _ = v_1.Args[2] - ptr2 := v_1.Args[0] x := v_1.Args[1] + ptr2 := v_1.Args[0] if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) { break } @@ -15040,11 +15027,10 @@ func rewriteValueAMD64_OpAMD64MOVQload(v *Value) bool { if v_1.Op != OpAMD64MOVSDstore || v_1.AuxInt != off || v_1.Aux != sym { break } - _ = v_1.Args[2] + val := v_1.Args[1] if ptr != v_1.Args[0] { break } - val := v_1.Args[1] v.reset(OpAMD64MOVQf2i) v.AddArg(val) return true @@ -16627,11 +16613,10 @@ func rewriteValueAMD64_OpAMD64MOVSDload(v *Value) bool { if v_1.Op != OpAMD64MOVQstore || v_1.AuxInt != off || v_1.Aux != sym { break } - _ = v_1.Args[2] + val := v_1.Args[1] if ptr != v_1.Args[0] { break } - val := v_1.Args[1] v.reset(OpAMD64MOVQi2f) v.AddArg(val) return true @@ -17237,11 +17222,10 @@ func rewriteValueAMD64_OpAMD64MOVSSload(v *Value) bool { if v_1.Op != OpAMD64MOVLstore || v_1.AuxInt != off || v_1.Aux != sym { break } - _ = v_1.Args[2] + val := v_1.Args[1] if ptr != v_1.Args[0] { break } - val := v_1.Args[1] v.reset(OpAMD64MOVLi2f) v.AddArg(val) return true @@ -17847,9 +17831,8 @@ func rewriteValueAMD64_OpAMD64MOVWQSXload(v *Value) bool { } off2 := v_1.AuxInt sym2 := v_1.Aux - _ = v_1.Args[2] - ptr2 := v_1.Args[0] x := v_1.Args[1] + ptr2 := v_1.Args[0] if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) { break } @@ -18066,9 +18049,8 @@ func rewriteValueAMD64_OpAMD64MOVWload(v *Value) bool { } off2 := v_1.AuxInt sym2 := v_1.Aux - _ = v_1.Args[2] - ptr2 := v_1.Args[0] x := v_1.Args[1] + ptr2 := v_1.Args[0] if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) { break } @@ -20680,11 +20662,10 @@ func rewriteValueAMD64_OpAMD64MULSDload(v *Value) bool { if v_2.Op != OpAMD64MOVQstore || v_2.AuxInt != off || v_2.Aux != sym { break } - _ = v_2.Args[2] + y := v_2.Args[1] if ptr != v_2.Args[0] { break } - y := v_2.Args[1] v.reset(OpAMD64MULSD) v0 := b.NewValue0(v_2.Pos, OpAMD64MOVQi2f, typ.Float64) v0.AddArg(y) @@ -20784,11 +20765,10 @@ func rewriteValueAMD64_OpAMD64MULSSload(v *Value) bool { if v_2.Op != OpAMD64MOVLstore || v_2.AuxInt != off || v_2.Aux != sym { break } - _ = v_2.Args[2] + y := v_2.Args[1] if ptr != v_2.Args[0] { break } - y := v_2.Args[1] v.reset(OpAMD64MULSS) v0 := b.NewValue0(v_2.Pos, OpAMD64MOVLi2f, typ.Float32) v0.AddArg(y) @@ -22560,11 +22540,10 @@ func rewriteValueAMD64_OpAMD64ORLload(v *Value) bool { if v_2.Op != OpAMD64MOVSSstore || v_2.AuxInt != off || v_2.Aux != sym { break } - _ = v_2.Args[2] + y := v_2.Args[1] if ptr != v_2.Args[0] { break } - y := v_2.Args[1] v.reset(OpAMD64ORL) v0 := b.NewValue0(v_2.Pos, OpAMD64MOVLf2i, typ.UInt32) v0.AddArg(y) @@ -24304,11 +24283,10 @@ func rewriteValueAMD64_OpAMD64ORQload(v *Value) bool { if v_2.Op != OpAMD64MOVSDstore || v_2.AuxInt != off || v_2.Aux != sym { break } - _ = v_2.Args[2] + y := v_2.Args[1] if ptr != v_2.Args[0] { break } - y := v_2.Args[1] v.reset(OpAMD64ORQ) v0 := b.NewValue0(v_2.Pos, OpAMD64MOVQf2i, typ.UInt64) v0.AddArg(y) @@ -30779,11 +30757,10 @@ func rewriteValueAMD64_OpAMD64SUBLload(v *Value) bool { if v_2.Op != OpAMD64MOVSSstore || v_2.AuxInt != off || v_2.Aux != sym { break } - _ = v_2.Args[2] + y := v_2.Args[1] if ptr != v_2.Args[0] { break } - y := v_2.Args[1] v.reset(OpAMD64SUBL) v0 := b.NewValue0(v_2.Pos, OpAMD64MOVLf2i, typ.UInt32) v0.AddArg(y) @@ -31059,11 +31036,10 @@ func rewriteValueAMD64_OpAMD64SUBQload(v *Value) bool { if v_2.Op != OpAMD64MOVSDstore || v_2.AuxInt != off || v_2.Aux != sym { break } - _ = v_2.Args[2] + y := v_2.Args[1] if ptr != v_2.Args[0] { break } - y := v_2.Args[1] v.reset(OpAMD64SUBQ) v0 := b.NewValue0(v_2.Pos, OpAMD64MOVQf2i, typ.UInt64) v0.AddArg(y) @@ -31211,11 +31187,10 @@ func rewriteValueAMD64_OpAMD64SUBSDload(v *Value) bool { if v_2.Op != OpAMD64MOVQstore || v_2.AuxInt != off || v_2.Aux != sym { break } - _ = v_2.Args[2] + y := v_2.Args[1] if ptr != v_2.Args[0] { break } - y := v_2.Args[1] v.reset(OpAMD64SUBSD) v0 := b.NewValue0(v_2.Pos, OpAMD64MOVQi2f, typ.Float64) v0.AddArg(y) @@ -31312,11 +31287,10 @@ func rewriteValueAMD64_OpAMD64SUBSSload(v *Value) bool { if v_2.Op != OpAMD64MOVLstore || v_2.AuxInt != off || v_2.Aux != sym { break } - _ = v_2.Args[2] + y := v_2.Args[1] if ptr != v_2.Args[0] { break } - y := v_2.Args[1] v.reset(OpAMD64SUBSS) v0 := b.NewValue0(v_2.Pos, OpAMD64MOVLi2f, typ.Float32) v0.AddArg(y) @@ -32238,11 +32212,10 @@ func rewriteValueAMD64_OpAMD64XORLload(v *Value) bool { if v_2.Op != OpAMD64MOVSSstore || v_2.AuxInt != off || v_2.Aux != sym { break } - _ = v_2.Args[2] + y := v_2.Args[1] if ptr != v_2.Args[0] { break } - y := v_2.Args[1] v.reset(OpAMD64XORL) v0 := b.NewValue0(v_2.Pos, OpAMD64MOVLf2i, typ.UInt32) v0.AddArg(y) @@ -32603,11 +32576,10 @@ func rewriteValueAMD64_OpAMD64XORQload(v *Value) bool { if v_2.Op != OpAMD64MOVSDstore || v_2.AuxInt != off || v_2.Aux != sym { break } - _ = v_2.Args[2] + y := v_2.Args[1] if ptr != v_2.Args[0] { break } - y := v_2.Args[1] v.reset(OpAMD64XORQ) v0 := b.NewValue0(v_2.Pos, OpAMD64MOVQf2i, typ.UInt64) v0.AddArg(y) diff --git a/src/cmd/compile/internal/ssa/rewriteARM.go b/src/cmd/compile/internal/ssa/rewriteARM.go index 52ab522434..91ef5fe14f 100644 --- a/src/cmd/compile/internal/ssa/rewriteARM.go +++ b/src/cmd/compile/internal/ssa/rewriteARM.go @@ -5120,9 +5120,8 @@ func rewriteValueARM_OpARMMOVBUload(v *Value) bool { } off2 := v_1.AuxInt sym2 := v_1.Aux - _ = v_1.Args[2] - ptr2 := v_1.Args[0] x := v_1.Args[1] + ptr2 := v_1.Args[0] if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) { break } @@ -5179,13 +5178,9 @@ func rewriteValueARM_OpARMMOVBUloadidx(v *Value) bool { if v_2.Op != OpARMMOVBstoreidx { break } - _ = v_2.Args[3] - ptr2 := v_2.Args[0] - if idx != v_2.Args[1] { - break - } x := v_2.Args[2] - if !(isSamePtr(ptr, ptr2)) { + ptr2 := v_2.Args[0] + if idx != v_2.Args[1] || !(isSamePtr(ptr, ptr2)) { break } v.reset(OpARMMOVBUreg) @@ -5231,7 +5226,6 @@ func rewriteValueARM_OpARMMOVBUreg(v *Value) bool { if x.Op != OpARMMOVBUload { break } - _ = x.Args[1] v.reset(OpARMMOVWreg) v.AddArg(x) return true @@ -5344,9 +5338,8 @@ func rewriteValueARM_OpARMMOVBload(v *Value) bool { } off2 := v_1.AuxInt sym2 := v_1.Aux - _ = v_1.Args[2] - ptr2 := v_1.Args[0] x := v_1.Args[1] + ptr2 := v_1.Args[0] if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) { break } @@ -5390,13 +5383,9 @@ func rewriteValueARM_OpARMMOVBloadidx(v *Value) bool { if v_2.Op != OpARMMOVBstoreidx { break } - _ = v_2.Args[3] - ptr2 := v_2.Args[0] - if idx != v_2.Args[1] { - break - } x := v_2.Args[2] - if !(isSamePtr(ptr, ptr2)) { + ptr2 := v_2.Args[0] + if idx != v_2.Args[1] || !(isSamePtr(ptr, ptr2)) { break } v.reset(OpARMMOVBreg) @@ -5442,7 +5431,6 @@ func rewriteValueARM_OpARMMOVBreg(v *Value) bool { if x.Op != OpARMMOVBload { break } - _ = x.Args[1] v.reset(OpARMMOVWreg) v.AddArg(x) return true @@ -5751,9 +5739,8 @@ func rewriteValueARM_OpARMMOVDload(v *Value) bool { } off2 := v_1.AuxInt sym2 := v_1.Aux - _ = v_1.Args[2] - ptr2 := v_1.Args[0] x := v_1.Args[1] + ptr2 := v_1.Args[0] if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) { break } @@ -5898,9 +5885,8 @@ func rewriteValueARM_OpARMMOVFload(v *Value) bool { } off2 := v_1.AuxInt sym2 := v_1.Aux - _ = v_1.Args[2] - ptr2 := v_1.Args[0] x := v_1.Args[1] + ptr2 := v_1.Args[0] if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) { break } @@ -6047,9 +6033,8 @@ func rewriteValueARM_OpARMMOVHUload(v *Value) bool { } off2 := v_1.AuxInt sym2 := v_1.Aux - _ = v_1.Args[2] - ptr2 := v_1.Args[0] x := v_1.Args[1] + ptr2 := v_1.Args[0] if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) { break } @@ -6106,13 +6091,9 @@ func rewriteValueARM_OpARMMOVHUloadidx(v *Value) bool { if v_2.Op != OpARMMOVHstoreidx { break } - _ = v_2.Args[3] - ptr2 := v_2.Args[0] - if idx != v_2.Args[1] { - break - } x := v_2.Args[2] - if !(isSamePtr(ptr, ptr2)) { + ptr2 := v_2.Args[0] + if idx != v_2.Args[1] || !(isSamePtr(ptr, ptr2)) { break } v.reset(OpARMMOVHUreg) @@ -6158,7 +6139,6 @@ func rewriteValueARM_OpARMMOVHUreg(v *Value) bool { if x.Op != OpARMMOVBUload { break } - _ = x.Args[1] v.reset(OpARMMOVWreg) v.AddArg(x) return true @@ -6170,7 +6150,6 @@ func rewriteValueARM_OpARMMOVHUreg(v *Value) bool { if x.Op != OpARMMOVHUload { break } - _ = x.Args[1] v.reset(OpARMMOVWreg) v.AddArg(x) return true @@ -6294,9 +6273,8 @@ func rewriteValueARM_OpARMMOVHload(v *Value) bool { } off2 := v_1.AuxInt sym2 := v_1.Aux - _ = v_1.Args[2] - ptr2 := v_1.Args[0] x := v_1.Args[1] + ptr2 := v_1.Args[0] if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) { break } @@ -6340,13 +6318,9 @@ func rewriteValueARM_OpARMMOVHloadidx(v *Value) bool { if v_2.Op != OpARMMOVHstoreidx { break } - _ = v_2.Args[3] - ptr2 := v_2.Args[0] - if idx != v_2.Args[1] { - break - } x := v_2.Args[2] - if !(isSamePtr(ptr, ptr2)) { + ptr2 := v_2.Args[0] + if idx != v_2.Args[1] || !(isSamePtr(ptr, ptr2)) { break } v.reset(OpARMMOVHreg) @@ -6392,7 +6366,6 @@ func rewriteValueARM_OpARMMOVHreg(v *Value) bool { if x.Op != OpARMMOVBload { break } - _ = x.Args[1] v.reset(OpARMMOVWreg) v.AddArg(x) return true @@ -6404,7 +6377,6 @@ func rewriteValueARM_OpARMMOVHreg(v *Value) bool { if x.Op != OpARMMOVBUload { break } - _ = x.Args[1] v.reset(OpARMMOVWreg) v.AddArg(x) return true @@ -6416,7 +6388,6 @@ func rewriteValueARM_OpARMMOVHreg(v *Value) bool { if x.Op != OpARMMOVHload { break } - _ = x.Args[1] v.reset(OpARMMOVWreg) v.AddArg(x) return true @@ -6715,9 +6686,8 @@ func rewriteValueARM_OpARMMOVWload(v *Value) bool { } off2 := v_1.AuxInt sym2 := v_1.Aux - _ = v_1.Args[2] - ptr2 := v_1.Args[0] x := v_1.Args[1] + ptr2 := v_1.Args[0] if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) { break } @@ -6842,13 +6812,9 @@ func rewriteValueARM_OpARMMOVWloadidx(v *Value) bool { if v_2.Op != OpARMMOVWstoreidx { break } - _ = v_2.Args[3] - ptr2 := v_2.Args[0] - if idx != v_2.Args[1] { - break - } x := v_2.Args[2] - if !(isSamePtr(ptr, ptr2)) { + ptr2 := v_2.Args[0] + if idx != v_2.Args[1] || !(isSamePtr(ptr, ptr2)) { break } v.copyOf(x) @@ -6989,13 +6955,9 @@ func rewriteValueARM_OpARMMOVWloadshiftLL(v *Value) bool { break } d := v_2.AuxInt - _ = v_2.Args[3] - ptr2 := v_2.Args[0] - if idx != v_2.Args[1] { - break - } x := v_2.Args[2] - if !(c == d && isSamePtr(ptr, ptr2)) { + ptr2 := v_2.Args[0] + if idx != v_2.Args[1] || !(c == d && isSamePtr(ptr, ptr2)) { break } v.copyOf(x) @@ -7033,13 +6995,9 @@ func rewriteValueARM_OpARMMOVWloadshiftRA(v *Value) bool { break } d := v_2.AuxInt - _ = v_2.Args[3] - ptr2 := v_2.Args[0] - if idx != v_2.Args[1] { - break - } x := v_2.Args[2] - if !(c == d && isSamePtr(ptr, ptr2)) { + ptr2 := v_2.Args[0] + if idx != v_2.Args[1] || !(c == d && isSamePtr(ptr, ptr2)) { break } v.copyOf(x) @@ -7077,13 +7035,9 @@ func rewriteValueARM_OpARMMOVWloadshiftRL(v *Value) bool { break } d := v_2.AuxInt - _ = v_2.Args[3] - ptr2 := v_2.Args[0] - if idx != v_2.Args[1] { - break - } x := v_2.Args[2] - if !(c == d && isSamePtr(ptr, ptr2)) { + ptr2 := v_2.Args[0] + if idx != v_2.Args[1] || !(c == d && isSamePtr(ptr, ptr2)) { break } v.copyOf(x) diff --git a/src/cmd/compile/internal/ssa/rewriteARM64.go b/src/cmd/compile/internal/ssa/rewriteARM64.go index 99beedcea1..4d1ed50d9b 100644 --- a/src/cmd/compile/internal/ssa/rewriteARM64.go +++ b/src/cmd/compile/internal/ssa/rewriteARM64.go @@ -4118,11 +4118,10 @@ func rewriteValueARM64_OpARM64FMOVDload(v *Value) bool { if v_1.Op != OpARM64MOVDstore || v_1.AuxInt != off || v_1.Aux != sym { break } - _ = v_1.Args[2] + val := v_1.Args[1] if ptr != v_1.Args[0] { break } - val := v_1.Args[1] v.reset(OpARM64FMOVDgpfp) v.AddArg(val) return true @@ -4366,11 +4365,10 @@ func rewriteValueARM64_OpARM64FMOVSload(v *Value) bool { if v_1.Op != OpARM64MOVWstore || v_1.AuxInt != off || v_1.Aux != sym { break } - _ = v_1.Args[2] + val := v_1.Args[1] if ptr != v_1.Args[0] { break } - val := v_1.Args[1] v.reset(OpARM64FMOVSgpfp) v.AddArg(val) return true @@ -6838,7 +6836,6 @@ func rewriteValueARM64_OpARM64MOVBUload(v *Value) bool { } off2 := v_1.AuxInt sym2 := v_1.Aux - _ = v_1.Args[1] ptr2 := v_1.Args[0] if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) { break @@ -6903,9 +6900,8 @@ func rewriteValueARM64_OpARM64MOVBUloadidx(v *Value) bool { if v_2.Op != OpARM64MOVBstorezeroidx { break } - _ = v_2.Args[2] - ptr2 := v_2.Args[0] idx2 := v_2.Args[1] + ptr2 := v_2.Args[0] if !(isSamePtr(ptr, ptr2) && isSamePtr(idx, idx2) || isSamePtr(ptr, idx2) && isSamePtr(idx, ptr2)) { break } @@ -6924,7 +6920,6 @@ func rewriteValueARM64_OpARM64MOVBUreg(v *Value) bool { if x.Op != OpARM64MOVBUload { break } - _ = x.Args[1] v.reset(OpARM64MOVDreg) v.AddArg(x) return true @@ -6936,7 +6931,6 @@ func rewriteValueARM64_OpARM64MOVBUreg(v *Value) bool { if x.Op != OpARM64MOVBUloadidx { break } - _ = x.Args[2] v.reset(OpARM64MOVDreg) v.AddArg(x) return true @@ -7103,7 +7097,6 @@ func rewriteValueARM64_OpARM64MOVBload(v *Value) bool { } off2 := v_1.AuxInt sym2 := v_1.Aux - _ = v_1.Args[1] ptr2 := v_1.Args[0] if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) { break @@ -7155,9 +7148,8 @@ func rewriteValueARM64_OpARM64MOVBloadidx(v *Value) bool { if v_2.Op != OpARM64MOVBstorezeroidx { break } - _ = v_2.Args[2] - ptr2 := v_2.Args[0] idx2 := v_2.Args[1] + ptr2 := v_2.Args[0] if !(isSamePtr(ptr, ptr2) && isSamePtr(idx, idx2) || isSamePtr(ptr, idx2) && isSamePtr(idx, ptr2)) { break } @@ -7176,7 +7168,6 @@ func rewriteValueARM64_OpARM64MOVBreg(v *Value) bool { if x.Op != OpARM64MOVBload { break } - _ = x.Args[1] v.reset(OpARM64MOVDreg) v.AddArg(x) return true @@ -7188,7 +7179,6 @@ func rewriteValueARM64_OpARM64MOVBreg(v *Value) bool { if x.Op != OpARM64MOVBloadidx { break } - _ = x.Args[2] v.reset(OpARM64MOVDreg) v.AddArg(x) return true @@ -9277,11 +9267,10 @@ func rewriteValueARM64_OpARM64MOVDload(v *Value) bool { if v_1.Op != OpARM64FMOVDstore || v_1.AuxInt != off || v_1.Aux != sym { break } - _ = v_1.Args[2] + val := v_1.Args[1] if ptr != v_1.Args[0] { break } - val := v_1.Args[1] v.reset(OpARM64FMOVDfpgp) v.AddArg(val) return true @@ -9379,7 +9368,6 @@ func rewriteValueARM64_OpARM64MOVDload(v *Value) bool { } off2 := v_1.AuxInt sym2 := v_1.Aux - _ = v_1.Args[1] ptr2 := v_1.Args[0] if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) { break @@ -9470,9 +9458,8 @@ func rewriteValueARM64_OpARM64MOVDloadidx(v *Value) bool { if v_2.Op != OpARM64MOVDstorezeroidx { break } - _ = v_2.Args[2] - ptr2 := v_2.Args[0] idx2 := v_2.Args[1] + ptr2 := v_2.Args[0] if !(isSamePtr(ptr, ptr2) && isSamePtr(idx, idx2) || isSamePtr(ptr, idx2) && isSamePtr(idx, ptr2)) { break } @@ -9509,9 +9496,8 @@ func rewriteValueARM64_OpARM64MOVDloadidx8(v *Value) bool { if v_2.Op != OpARM64MOVDstorezeroidx8 { break } - _ = v_2.Args[2] - ptr2 := v_2.Args[0] idx2 := v_2.Args[1] + ptr2 := v_2.Args[0] if !(isSamePtr(ptr, ptr2) && isSamePtr(idx, idx2)) { break } @@ -10145,7 +10131,6 @@ func rewriteValueARM64_OpARM64MOVHUload(v *Value) bool { } off2 := v_1.AuxInt sym2 := v_1.Aux - _ = v_1.Args[1] ptr2 := v_1.Args[0] if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) { break @@ -10255,9 +10240,8 @@ func rewriteValueARM64_OpARM64MOVHUloadidx(v *Value) bool { if v_2.Op != OpARM64MOVHstorezeroidx { break } - _ = v_2.Args[2] - ptr2 := v_2.Args[0] idx2 := v_2.Args[1] + ptr2 := v_2.Args[0] if !(isSamePtr(ptr, ptr2) && isSamePtr(idx, idx2) || isSamePtr(ptr, idx2) && isSamePtr(idx, ptr2)) { break } @@ -10294,9 +10278,8 @@ func rewriteValueARM64_OpARM64MOVHUloadidx2(v *Value) bool { if v_2.Op != OpARM64MOVHstorezeroidx2 { break } - _ = v_2.Args[2] - ptr2 := v_2.Args[0] idx2 := v_2.Args[1] + ptr2 := v_2.Args[0] if !(isSamePtr(ptr, ptr2) && isSamePtr(idx, idx2)) { break } @@ -10315,7 +10298,6 @@ func rewriteValueARM64_OpARM64MOVHUreg(v *Value) bool { if x.Op != OpARM64MOVBUload { break } - _ = x.Args[1] v.reset(OpARM64MOVDreg) v.AddArg(x) return true @@ -10327,7 +10309,6 @@ func rewriteValueARM64_OpARM64MOVHUreg(v *Value) bool { if x.Op != OpARM64MOVHUload { break } - _ = x.Args[1] v.reset(OpARM64MOVDreg) v.AddArg(x) return true @@ -10339,7 +10320,6 @@ func rewriteValueARM64_OpARM64MOVHUreg(v *Value) bool { if x.Op != OpARM64MOVBUloadidx { break } - _ = x.Args[2] v.reset(OpARM64MOVDreg) v.AddArg(x) return true @@ -10351,7 +10331,6 @@ func rewriteValueARM64_OpARM64MOVHUreg(v *Value) bool { if x.Op != OpARM64MOVHUloadidx { break } - _ = x.Args[2] v.reset(OpARM64MOVDreg) v.AddArg(x) return true @@ -10363,7 +10342,6 @@ func rewriteValueARM64_OpARM64MOVHUreg(v *Value) bool { if x.Op != OpARM64MOVHUloadidx2 { break } - _ = x.Args[2] v.reset(OpARM64MOVDreg) v.AddArg(x) return true @@ -10548,7 +10526,6 @@ func rewriteValueARM64_OpARM64MOVHload(v *Value) bool { } off2 := v_1.AuxInt sym2 := v_1.Aux - _ = v_1.Args[1] ptr2 := v_1.Args[0] if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) { break @@ -10645,9 +10622,8 @@ func rewriteValueARM64_OpARM64MOVHloadidx(v *Value) bool { if v_2.Op != OpARM64MOVHstorezeroidx { break } - _ = v_2.Args[2] - ptr2 := v_2.Args[0] idx2 := v_2.Args[1] + ptr2 := v_2.Args[0] if !(isSamePtr(ptr, ptr2) && isSamePtr(idx, idx2) || isSamePtr(ptr, idx2) && isSamePtr(idx, ptr2)) { break } @@ -10684,9 +10660,8 @@ func rewriteValueARM64_OpARM64MOVHloadidx2(v *Value) bool { if v_2.Op != OpARM64MOVHstorezeroidx2 { break } - _ = v_2.Args[2] - ptr2 := v_2.Args[0] idx2 := v_2.Args[1] + ptr2 := v_2.Args[0] if !(isSamePtr(ptr, ptr2) && isSamePtr(idx, idx2)) { break } @@ -10705,7 +10680,6 @@ func rewriteValueARM64_OpARM64MOVHreg(v *Value) bool { if x.Op != OpARM64MOVBload { break } - _ = x.Args[1] v.reset(OpARM64MOVDreg) v.AddArg(x) return true @@ -10717,7 +10691,6 @@ func rewriteValueARM64_OpARM64MOVHreg(v *Value) bool { if x.Op != OpARM64MOVBUload { break } - _ = x.Args[1] v.reset(OpARM64MOVDreg) v.AddArg(x) return true @@ -10729,7 +10702,6 @@ func rewriteValueARM64_OpARM64MOVHreg(v *Value) bool { if x.Op != OpARM64MOVHload { break } - _ = x.Args[1] v.reset(OpARM64MOVDreg) v.AddArg(x) return true @@ -10741,7 +10713,6 @@ func rewriteValueARM64_OpARM64MOVHreg(v *Value) bool { if x.Op != OpARM64MOVBloadidx { break } - _ = x.Args[2] v.reset(OpARM64MOVDreg) v.AddArg(x) return true @@ -10753,7 +10724,6 @@ func rewriteValueARM64_OpARM64MOVHreg(v *Value) bool { if x.Op != OpARM64MOVBUloadidx { break } - _ = x.Args[2] v.reset(OpARM64MOVDreg) v.AddArg(x) return true @@ -10765,7 +10735,6 @@ func rewriteValueARM64_OpARM64MOVHreg(v *Value) bool { if x.Op != OpARM64MOVHloadidx { break } - _ = x.Args[2] v.reset(OpARM64MOVDreg) v.AddArg(x) return true @@ -10777,7 +10746,6 @@ func rewriteValueARM64_OpARM64MOVHreg(v *Value) bool { if x.Op != OpARM64MOVHloadidx2 { break } - _ = x.Args[2] v.reset(OpARM64MOVDreg) v.AddArg(x) return true @@ -12086,11 +12054,10 @@ func rewriteValueARM64_OpARM64MOVWUload(v *Value) bool { if v_1.Op != OpARM64FMOVSstore || v_1.AuxInt != off || v_1.Aux != sym { break } - _ = v_1.Args[2] + val := v_1.Args[1] if ptr != v_1.Args[0] { break } - val := v_1.Args[1] v.reset(OpARM64FMOVSfpgp) v.AddArg(val) return true @@ -12188,7 +12155,6 @@ func rewriteValueARM64_OpARM64MOVWUload(v *Value) bool { } off2 := v_1.AuxInt sym2 := v_1.Aux - _ = v_1.Args[1] ptr2 := v_1.Args[0] if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) { break @@ -12279,9 +12245,8 @@ func rewriteValueARM64_OpARM64MOVWUloadidx(v *Value) bool { if v_2.Op != OpARM64MOVWstorezeroidx { break } - _ = v_2.Args[2] - ptr2 := v_2.Args[0] idx2 := v_2.Args[1] + ptr2 := v_2.Args[0] if !(isSamePtr(ptr, ptr2) && isSamePtr(idx, idx2) || isSamePtr(ptr, idx2) && isSamePtr(idx, ptr2)) { break } @@ -12318,9 +12283,8 @@ func rewriteValueARM64_OpARM64MOVWUloadidx4(v *Value) bool { if v_2.Op != OpARM64MOVWstorezeroidx4 { break } - _ = v_2.Args[2] - ptr2 := v_2.Args[0] idx2 := v_2.Args[1] + ptr2 := v_2.Args[0] if !(isSamePtr(ptr, ptr2) && isSamePtr(idx, idx2)) { break } @@ -12339,7 +12303,6 @@ func rewriteValueARM64_OpARM64MOVWUreg(v *Value) bool { if x.Op != OpARM64MOVBUload { break } - _ = x.Args[1] v.reset(OpARM64MOVDreg) v.AddArg(x) return true @@ -12351,7 +12314,6 @@ func rewriteValueARM64_OpARM64MOVWUreg(v *Value) bool { if x.Op != OpARM64MOVHUload { break } - _ = x.Args[1] v.reset(OpARM64MOVDreg) v.AddArg(x) return true @@ -12363,7 +12325,6 @@ func rewriteValueARM64_OpARM64MOVWUreg(v *Value) bool { if x.Op != OpARM64MOVWUload { break } - _ = x.Args[1] v.reset(OpARM64MOVDreg) v.AddArg(x) return true @@ -12375,7 +12336,6 @@ func rewriteValueARM64_OpARM64MOVWUreg(v *Value) bool { if x.Op != OpARM64MOVBUloadidx { break } - _ = x.Args[2] v.reset(OpARM64MOVDreg) v.AddArg(x) return true @@ -12387,7 +12347,6 @@ func rewriteValueARM64_OpARM64MOVWUreg(v *Value) bool { if x.Op != OpARM64MOVHUloadidx { break } - _ = x.Args[2] v.reset(OpARM64MOVDreg) v.AddArg(x) return true @@ -12399,7 +12358,6 @@ func rewriteValueARM64_OpARM64MOVWUreg(v *Value) bool { if x.Op != OpARM64MOVWUloadidx { break } - _ = x.Args[2] v.reset(OpARM64MOVDreg) v.AddArg(x) return true @@ -12411,7 +12369,6 @@ func rewriteValueARM64_OpARM64MOVWUreg(v *Value) bool { if x.Op != OpARM64MOVHUloadidx2 { break } - _ = x.Args[2] v.reset(OpARM64MOVDreg) v.AddArg(x) return true @@ -12423,7 +12380,6 @@ func rewriteValueARM64_OpARM64MOVWUreg(v *Value) bool { if x.Op != OpARM64MOVWUloadidx4 { break } - _ = x.Args[2] v.reset(OpARM64MOVDreg) v.AddArg(x) return true @@ -12619,7 +12575,6 @@ func rewriteValueARM64_OpARM64MOVWload(v *Value) bool { } off2 := v_1.AuxInt sym2 := v_1.Aux - _ = v_1.Args[1] ptr2 := v_1.Args[0] if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) { break @@ -12697,9 +12652,8 @@ func rewriteValueARM64_OpARM64MOVWloadidx(v *Value) bool { if v_2.Op != OpARM64MOVWstorezeroidx { break } - _ = v_2.Args[2] - ptr2 := v_2.Args[0] idx2 := v_2.Args[1] + ptr2 := v_2.Args[0] if !(isSamePtr(ptr, ptr2) && isSamePtr(idx, idx2) || isSamePtr(ptr, idx2) && isSamePtr(idx, ptr2)) { break } @@ -12736,9 +12690,8 @@ func rewriteValueARM64_OpARM64MOVWloadidx4(v *Value) bool { if v_2.Op != OpARM64MOVWstorezeroidx4 { break } - _ = v_2.Args[2] - ptr2 := v_2.Args[0] idx2 := v_2.Args[1] + ptr2 := v_2.Args[0] if !(isSamePtr(ptr, ptr2) && isSamePtr(idx, idx2)) { break } @@ -12757,7 +12710,6 @@ func rewriteValueARM64_OpARM64MOVWreg(v *Value) bool { if x.Op != OpARM64MOVBload { break } - _ = x.Args[1] v.reset(OpARM64MOVDreg) v.AddArg(x) return true @@ -12769,7 +12721,6 @@ func rewriteValueARM64_OpARM64MOVWreg(v *Value) bool { if x.Op != OpARM64MOVBUload { break } - _ = x.Args[1] v.reset(OpARM64MOVDreg) v.AddArg(x) return true @@ -12781,7 +12732,6 @@ func rewriteValueARM64_OpARM64MOVWreg(v *Value) bool { if x.Op != OpARM64MOVHload { break } - _ = x.Args[1] v.reset(OpARM64MOVDreg) v.AddArg(x) return true @@ -12793,7 +12743,6 @@ func rewriteValueARM64_OpARM64MOVWreg(v *Value) bool { if x.Op != OpARM64MOVHUload { break } - _ = x.Args[1] v.reset(OpARM64MOVDreg) v.AddArg(x) return true @@ -12805,7 +12754,6 @@ func rewriteValueARM64_OpARM64MOVWreg(v *Value) bool { if x.Op != OpARM64MOVWload { break } - _ = x.Args[1] v.reset(OpARM64MOVDreg) v.AddArg(x) return true @@ -12817,7 +12765,6 @@ func rewriteValueARM64_OpARM64MOVWreg(v *Value) bool { if x.Op != OpARM64MOVBloadidx { break } - _ = x.Args[2] v.reset(OpARM64MOVDreg) v.AddArg(x) return true @@ -12829,7 +12776,6 @@ func rewriteValueARM64_OpARM64MOVWreg(v *Value) bool { if x.Op != OpARM64MOVBUloadidx { break } - _ = x.Args[2] v.reset(OpARM64MOVDreg) v.AddArg(x) return true @@ -12841,7 +12787,6 @@ func rewriteValueARM64_OpARM64MOVWreg(v *Value) bool { if x.Op != OpARM64MOVHloadidx { break } - _ = x.Args[2] v.reset(OpARM64MOVDreg) v.AddArg(x) return true @@ -12853,7 +12798,6 @@ func rewriteValueARM64_OpARM64MOVWreg(v *Value) bool { if x.Op != OpARM64MOVHUloadidx { break } - _ = x.Args[2] v.reset(OpARM64MOVDreg) v.AddArg(x) return true @@ -12865,7 +12809,6 @@ func rewriteValueARM64_OpARM64MOVWreg(v *Value) bool { if x.Op != OpARM64MOVWloadidx { break } - _ = x.Args[2] v.reset(OpARM64MOVDreg) v.AddArg(x) return true @@ -12877,7 +12820,6 @@ func rewriteValueARM64_OpARM64MOVWreg(v *Value) bool { if x.Op != OpARM64MOVHloadidx2 { break } - _ = x.Args[2] v.reset(OpARM64MOVDreg) v.AddArg(x) return true @@ -12889,7 +12831,6 @@ func rewriteValueARM64_OpARM64MOVWreg(v *Value) bool { if x.Op != OpARM64MOVHUloadidx2 { break } - _ = x.Args[2] v.reset(OpARM64MOVDreg) v.AddArg(x) return true @@ -12901,7 +12842,6 @@ func rewriteValueARM64_OpARM64MOVWreg(v *Value) bool { if x.Op != OpARM64MOVWloadidx4 { break } - _ = x.Args[2] v.reset(OpARM64MOVDreg) v.AddArg(x) return true diff --git a/src/cmd/compile/internal/ssa/rewriteMIPS.go b/src/cmd/compile/internal/ssa/rewriteMIPS.go index 83bb92fc35..5815874db9 100644 --- a/src/cmd/compile/internal/ssa/rewriteMIPS.go +++ b/src/cmd/compile/internal/ssa/rewriteMIPS.go @@ -2416,9 +2416,8 @@ func rewriteValueMIPS_OpMIPSMOVBUload(v *Value) bool { } off2 := v_1.AuxInt sym2 := v_1.Aux - _ = v_1.Args[2] - ptr2 := v_1.Args[0] x := v_1.Args[1] + ptr2 := v_1.Args[0] if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) { break } @@ -2438,7 +2437,6 @@ func rewriteValueMIPS_OpMIPSMOVBUreg(v *Value) bool { if x.Op != OpMIPSMOVBUload { break } - _ = x.Args[1] v.reset(OpMIPSMOVWreg) v.AddArg(x) return true @@ -2563,9 +2561,8 @@ func rewriteValueMIPS_OpMIPSMOVBload(v *Value) bool { } off2 := v_1.AuxInt sym2 := v_1.Aux - _ = v_1.Args[2] - ptr2 := v_1.Args[0] x := v_1.Args[1] + ptr2 := v_1.Args[0] if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) { break } @@ -2585,7 +2582,6 @@ func rewriteValueMIPS_OpMIPSMOVBreg(v *Value) bool { if x.Op != OpMIPSMOVBload { break } - _ = x.Args[1] v.reset(OpMIPSMOVWreg) v.AddArg(x) return true @@ -2916,9 +2912,8 @@ func rewriteValueMIPS_OpMIPSMOVDload(v *Value) bool { } off2 := v_1.AuxInt sym2 := v_1.Aux - _ = v_1.Args[2] - ptr2 := v_1.Args[0] x := v_1.Args[1] + ptr2 := v_1.Args[0] if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) { break } @@ -3038,9 +3033,8 @@ func rewriteValueMIPS_OpMIPSMOVFload(v *Value) bool { } off2 := v_1.AuxInt sym2 := v_1.Aux - _ = v_1.Args[2] - ptr2 := v_1.Args[0] x := v_1.Args[1] + ptr2 := v_1.Args[0] if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) { break } @@ -3160,9 +3154,8 @@ func rewriteValueMIPS_OpMIPSMOVHUload(v *Value) bool { } off2 := v_1.AuxInt sym2 := v_1.Aux - _ = v_1.Args[2] - ptr2 := v_1.Args[0] x := v_1.Args[1] + ptr2 := v_1.Args[0] if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) { break } @@ -3182,7 +3175,6 @@ func rewriteValueMIPS_OpMIPSMOVHUreg(v *Value) bool { if x.Op != OpMIPSMOVBUload { break } - _ = x.Args[1] v.reset(OpMIPSMOVWreg) v.AddArg(x) return true @@ -3194,7 +3186,6 @@ func rewriteValueMIPS_OpMIPSMOVHUreg(v *Value) bool { if x.Op != OpMIPSMOVHUload { break } - _ = x.Args[1] v.reset(OpMIPSMOVWreg) v.AddArg(x) return true @@ -3330,9 +3321,8 @@ func rewriteValueMIPS_OpMIPSMOVHload(v *Value) bool { } off2 := v_1.AuxInt sym2 := v_1.Aux - _ = v_1.Args[2] - ptr2 := v_1.Args[0] x := v_1.Args[1] + ptr2 := v_1.Args[0] if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) { break } @@ -3352,7 +3342,6 @@ func rewriteValueMIPS_OpMIPSMOVHreg(v *Value) bool { if x.Op != OpMIPSMOVBload { break } - _ = x.Args[1] v.reset(OpMIPSMOVWreg) v.AddArg(x) return true @@ -3364,7 +3353,6 @@ func rewriteValueMIPS_OpMIPSMOVHreg(v *Value) bool { if x.Op != OpMIPSMOVBUload { break } - _ = x.Args[1] v.reset(OpMIPSMOVWreg) v.AddArg(x) return true @@ -3376,7 +3364,6 @@ func rewriteValueMIPS_OpMIPSMOVHreg(v *Value) bool { if x.Op != OpMIPSMOVHload { break } - _ = x.Args[1] v.reset(OpMIPSMOVWreg) v.AddArg(x) return true @@ -3695,9 +3682,8 @@ func rewriteValueMIPS_OpMIPSMOVWload(v *Value) bool { } off2 := v_1.AuxInt sym2 := v_1.Aux - _ = v_1.Args[2] - ptr2 := v_1.Args[0] x := v_1.Args[1] + ptr2 := v_1.Args[0] if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) { break } @@ -6445,7 +6431,6 @@ func rewriteValueMIPS_OpSelect0(v *Value) bool { if v_0.Op != OpMIPSMULTU { break } - _ = v_0.Args[1] v_0_0 := v_0.Args[0] v_0_1 := v_0.Args[1] for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { @@ -6464,7 +6449,6 @@ func rewriteValueMIPS_OpSelect0(v *Value) bool { if v_0.Op != OpMIPSMULTU { break } - _ = v_0.Args[1] v_0_0 := v_0.Args[0] v_0_1 := v_0.Args[1] for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { @@ -6638,7 +6622,6 @@ func rewriteValueMIPS_OpSelect1(v *Value) bool { if v_0.Op != OpMIPSMULTU { break } - _ = v_0.Args[1] v_0_0 := v_0.Args[0] v_0_1 := v_0.Args[1] for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { @@ -7269,7 +7252,6 @@ func rewriteBlockMIPS(b *Block) bool { if cmp.Op != OpMIPSSGT { break } - _ = cmp.Args[1] b.resetWithControl(BlockMIPSNE, cmp) return true } @@ -7284,7 +7266,6 @@ func rewriteBlockMIPS(b *Block) bool { if cmp.Op != OpMIPSSGTU { break } - _ = cmp.Args[1] b.resetWithControl(BlockMIPSNE, cmp) return true } @@ -7545,7 +7526,6 @@ func rewriteBlockMIPS(b *Block) bool { if cmp.Op != OpMIPSSGT { break } - _ = cmp.Args[1] b.resetWithControl(BlockMIPSEQ, cmp) return true } @@ -7560,7 +7540,6 @@ func rewriteBlockMIPS(b *Block) bool { if cmp.Op != OpMIPSSGTU { break } - _ = cmp.Args[1] b.resetWithControl(BlockMIPSEQ, cmp) return true } diff --git a/src/cmd/compile/internal/ssa/rewriteMIPS64.go b/src/cmd/compile/internal/ssa/rewriteMIPS64.go index c8d72363b3..5136b1ca62 100644 --- a/src/cmd/compile/internal/ssa/rewriteMIPS64.go +++ b/src/cmd/compile/internal/ssa/rewriteMIPS64.go @@ -2606,7 +2606,6 @@ func rewriteValueMIPS64_OpMIPS64MOVBUreg(v *Value) bool { if x.Op != OpMIPS64MOVBUload { break } - _ = x.Args[1] v.reset(OpMIPS64MOVVreg) v.AddArg(x) return true @@ -2692,7 +2691,6 @@ func rewriteValueMIPS64_OpMIPS64MOVBreg(v *Value) bool { if x.Op != OpMIPS64MOVBload { break } - _ = x.Args[1] v.reset(OpMIPS64MOVVreg) v.AddArg(x) return true @@ -3193,7 +3191,6 @@ func rewriteValueMIPS64_OpMIPS64MOVHUreg(v *Value) bool { if x.Op != OpMIPS64MOVBUload { break } - _ = x.Args[1] v.reset(OpMIPS64MOVVreg) v.AddArg(x) return true @@ -3205,7 +3202,6 @@ func rewriteValueMIPS64_OpMIPS64MOVHUreg(v *Value) bool { if x.Op != OpMIPS64MOVHUload { break } - _ = x.Args[1] v.reset(OpMIPS64MOVVreg) v.AddArg(x) return true @@ -3302,7 +3298,6 @@ func rewriteValueMIPS64_OpMIPS64MOVHreg(v *Value) bool { if x.Op != OpMIPS64MOVBload { break } - _ = x.Args[1] v.reset(OpMIPS64MOVVreg) v.AddArg(x) return true @@ -3314,7 +3309,6 @@ func rewriteValueMIPS64_OpMIPS64MOVHreg(v *Value) bool { if x.Op != OpMIPS64MOVBUload { break } - _ = x.Args[1] v.reset(OpMIPS64MOVVreg) v.AddArg(x) return true @@ -3326,7 +3320,6 @@ func rewriteValueMIPS64_OpMIPS64MOVHreg(v *Value) bool { if x.Op != OpMIPS64MOVHload { break } - _ = x.Args[1] v.reset(OpMIPS64MOVVreg) v.AddArg(x) return true @@ -3807,7 +3800,6 @@ func rewriteValueMIPS64_OpMIPS64MOVWUreg(v *Value) bool { if x.Op != OpMIPS64MOVBUload { break } - _ = x.Args[1] v.reset(OpMIPS64MOVVreg) v.AddArg(x) return true @@ -3819,7 +3811,6 @@ func rewriteValueMIPS64_OpMIPS64MOVWUreg(v *Value) bool { if x.Op != OpMIPS64MOVHUload { break } - _ = x.Args[1] v.reset(OpMIPS64MOVVreg) v.AddArg(x) return true @@ -3831,7 +3822,6 @@ func rewriteValueMIPS64_OpMIPS64MOVWUreg(v *Value) bool { if x.Op != OpMIPS64MOVWUload { break } - _ = x.Args[1] v.reset(OpMIPS64MOVVreg) v.AddArg(x) return true @@ -3939,7 +3929,6 @@ func rewriteValueMIPS64_OpMIPS64MOVWreg(v *Value) bool { if x.Op != OpMIPS64MOVBload { break } - _ = x.Args[1] v.reset(OpMIPS64MOVVreg) v.AddArg(x) return true @@ -3951,7 +3940,6 @@ func rewriteValueMIPS64_OpMIPS64MOVWreg(v *Value) bool { if x.Op != OpMIPS64MOVBUload { break } - _ = x.Args[1] v.reset(OpMIPS64MOVVreg) v.AddArg(x) return true @@ -3963,7 +3951,6 @@ func rewriteValueMIPS64_OpMIPS64MOVWreg(v *Value) bool { if x.Op != OpMIPS64MOVHload { break } - _ = x.Args[1] v.reset(OpMIPS64MOVVreg) v.AddArg(x) return true @@ -3975,7 +3962,6 @@ func rewriteValueMIPS64_OpMIPS64MOVWreg(v *Value) bool { if x.Op != OpMIPS64MOVHUload { break } - _ = x.Args[1] v.reset(OpMIPS64MOVVreg) v.AddArg(x) return true @@ -3987,7 +3973,6 @@ func rewriteValueMIPS64_OpMIPS64MOVWreg(v *Value) bool { if x.Op != OpMIPS64MOVWload { break } - _ = x.Args[1] v.reset(OpMIPS64MOVVreg) v.AddArg(x) return true @@ -7765,7 +7750,6 @@ func rewriteBlockMIPS64(b *Block) bool { if cmp.Op != OpMIPS64SGT { break } - _ = cmp.Args[1] b.resetWithControl(BlockMIPS64NE, cmp) return true } @@ -7780,7 +7764,6 @@ func rewriteBlockMIPS64(b *Block) bool { if cmp.Op != OpMIPS64SGTU { break } - _ = cmp.Args[1] b.resetWithControl(BlockMIPS64NE, cmp) return true } @@ -8023,7 +8006,6 @@ func rewriteBlockMIPS64(b *Block) bool { if cmp.Op != OpMIPS64SGT { break } - _ = cmp.Args[1] b.resetWithControl(BlockMIPS64EQ, cmp) return true } @@ -8038,7 +8020,6 @@ func rewriteBlockMIPS64(b *Block) bool { if cmp.Op != OpMIPS64SGTU { break } - _ = cmp.Args[1] b.resetWithControl(BlockMIPS64EQ, cmp) return true } diff --git a/src/cmd/compile/internal/ssa/rewritePPC64.go b/src/cmd/compile/internal/ssa/rewritePPC64.go index 1bdd8ee23a..82fa1354f8 100644 --- a/src/cmd/compile/internal/ssa/rewritePPC64.go +++ b/src/cmd/compile/internal/ssa/rewritePPC64.go @@ -4125,7 +4125,6 @@ func rewriteValuePPC64_OpPPC64AND(v *Value) bool { if x.Op != OpPPC64MOVBZload { continue } - _ = x.Args[1] v.reset(OpPPC64ANDconst) v.AuxInt = c & 0xFF v.AddArg(x) @@ -5003,11 +5002,10 @@ func rewriteValuePPC64_OpPPC64FMOVDload(v *Value) bool { if v_1.Op != OpPPC64MOVDstore || v_1.AuxInt != off || v_1.Aux != sym { break } - _ = v_1.Args[2] + x := v_1.Args[1] if ptr != v_1.Args[0] { break } - x := v_1.Args[1] v.reset(OpPPC64MTVSRD) v.AddArg(x) return true @@ -6359,7 +6357,6 @@ func rewriteValuePPC64_OpPPC64MOVBZreg(v *Value) bool { if x.Op != OpPPC64MOVBZload { break } - _ = x.Args[1] v.copyOf(x) return true } @@ -6370,7 +6367,6 @@ func rewriteValuePPC64_OpPPC64MOVBZreg(v *Value) bool { if x.Op != OpPPC64MOVBZloadidx { break } - _ = x.Args[2] v.copyOf(x) return true } @@ -7651,11 +7647,10 @@ func rewriteValuePPC64_OpPPC64MOVDload(v *Value) bool { if v_1.Op != OpPPC64FMOVDstore || v_1.AuxInt != off || v_1.Aux != sym { break } - _ = v_1.Args[2] + x := v_1.Args[1] if ptr != v_1.Args[0] { break } - x := v_1.Args[1] v.reset(OpPPC64MFVSRD) v.AddArg(x) return true @@ -8284,7 +8279,6 @@ func rewriteValuePPC64_OpPPC64MOVHZreg(v *Value) bool { if y.Op != OpPPC64MOVHBRload { break } - _ = y.Args[1] v.copyOf(y) return true } @@ -8307,7 +8301,6 @@ func rewriteValuePPC64_OpPPC64MOVHZreg(v *Value) bool { if x.Op != OpPPC64MOVBZload { break } - _ = x.Args[1] v.copyOf(x) return true } @@ -8318,7 +8311,6 @@ func rewriteValuePPC64_OpPPC64MOVHZreg(v *Value) bool { if x.Op != OpPPC64MOVBZloadidx { break } - _ = x.Args[2] v.copyOf(x) return true } @@ -8329,7 +8321,6 @@ func rewriteValuePPC64_OpPPC64MOVHZreg(v *Value) bool { if x.Op != OpPPC64MOVHZload { break } - _ = x.Args[1] v.copyOf(x) return true } @@ -8340,7 +8331,6 @@ func rewriteValuePPC64_OpPPC64MOVHZreg(v *Value) bool { if x.Op != OpPPC64MOVHZloadidx { break } - _ = x.Args[2] v.copyOf(x) return true } @@ -8666,7 +8656,6 @@ func rewriteValuePPC64_OpPPC64MOVHreg(v *Value) bool { if x.Op != OpPPC64MOVHload { break } - _ = x.Args[1] v.copyOf(x) return true } @@ -8677,7 +8666,6 @@ func rewriteValuePPC64_OpPPC64MOVHreg(v *Value) bool { if x.Op != OpPPC64MOVHloadidx { break } - _ = x.Args[2] v.copyOf(x) return true } @@ -9268,7 +9256,6 @@ func rewriteValuePPC64_OpPPC64MOVWZreg(v *Value) bool { if y.Op != OpPPC64AND { break } - _ = y.Args[1] y_0 := y.Args[0] y_1 := y.Args[1] for _i0 := 0; _i0 <= 1; _i0, y_0, y_1 = _i0+1, y_1, y_0 { @@ -9412,7 +9399,6 @@ func rewriteValuePPC64_OpPPC64MOVWZreg(v *Value) bool { if y.Op != OpPPC64MOVHBRload { break } - _ = y.Args[1] v.copyOf(y) return true } @@ -9423,7 +9409,6 @@ func rewriteValuePPC64_OpPPC64MOVWZreg(v *Value) bool { if y.Op != OpPPC64MOVWBRload { break } - _ = y.Args[1] v.copyOf(y) return true } @@ -9446,7 +9431,6 @@ func rewriteValuePPC64_OpPPC64MOVWZreg(v *Value) bool { if x.Op != OpPPC64MOVBZload { break } - _ = x.Args[1] v.copyOf(x) return true } @@ -9457,7 +9441,6 @@ func rewriteValuePPC64_OpPPC64MOVWZreg(v *Value) bool { if x.Op != OpPPC64MOVBZloadidx { break } - _ = x.Args[2] v.copyOf(x) return true } @@ -9468,7 +9451,6 @@ func rewriteValuePPC64_OpPPC64MOVWZreg(v *Value) bool { if x.Op != OpPPC64MOVHZload { break } - _ = x.Args[1] v.copyOf(x) return true } @@ -9479,7 +9461,6 @@ func rewriteValuePPC64_OpPPC64MOVWZreg(v *Value) bool { if x.Op != OpPPC64MOVHZloadidx { break } - _ = x.Args[2] v.copyOf(x) return true } @@ -9490,7 +9471,6 @@ func rewriteValuePPC64_OpPPC64MOVWZreg(v *Value) bool { if x.Op != OpPPC64MOVWZload { break } - _ = x.Args[1] v.copyOf(x) return true } @@ -9501,7 +9481,6 @@ func rewriteValuePPC64_OpPPC64MOVWZreg(v *Value) bool { if x.Op != OpPPC64MOVWZloadidx { break } - _ = x.Args[2] v.copyOf(x) return true } @@ -9673,7 +9652,6 @@ func rewriteValuePPC64_OpPPC64MOVWreg(v *Value) bool { if y.Op != OpPPC64AND { break } - _ = y.Args[1] y_0 := y.Args[0] y_1 := y.Args[1] for _i0 := 0; _i0 <= 1; _i0, y_0, y_1 = _i0+1, y_1, y_0 { @@ -9846,7 +9824,6 @@ func rewriteValuePPC64_OpPPC64MOVWreg(v *Value) bool { if x.Op != OpPPC64MOVHload { break } - _ = x.Args[1] v.copyOf(x) return true } @@ -9857,7 +9834,6 @@ func rewriteValuePPC64_OpPPC64MOVWreg(v *Value) bool { if x.Op != OpPPC64MOVHloadidx { break } - _ = x.Args[2] v.copyOf(x) return true } @@ -9868,7 +9844,6 @@ func rewriteValuePPC64_OpPPC64MOVWreg(v *Value) bool { if x.Op != OpPPC64MOVWload { break } - _ = x.Args[1] v.copyOf(x) return true } @@ -9879,7 +9854,6 @@ func rewriteValuePPC64_OpPPC64MOVWreg(v *Value) bool { if x.Op != OpPPC64MOVWloadidx { break } - _ = x.Args[2] v.copyOf(x) return true } diff --git a/src/cmd/compile/internal/ssa/rewriteS390X.go b/src/cmd/compile/internal/ssa/rewriteS390X.go index 2ac8a6f05e..7c750574bc 100644 --- a/src/cmd/compile/internal/ssa/rewriteS390X.go +++ b/src/cmd/compile/internal/ssa/rewriteS390X.go @@ -5652,9 +5652,8 @@ func rewriteValueS390X_OpS390XADDload(v *Value) bool { if v_2.Op != OpS390XFMOVDstore || v_2.AuxInt != off || v_2.Aux != sym { break } - _ = v_2.Args[2] - ptr2 := v_2.Args[0] y := v_2.Args[1] + ptr2 := v_2.Args[0] if !(isSamePtr(ptr1, ptr2)) { break } @@ -6154,9 +6153,8 @@ func rewriteValueS390X_OpS390XANDload(v *Value) bool { if v_2.Op != OpS390XFMOVDstore || v_2.AuxInt != off || v_2.Aux != sym { break } - _ = v_2.Args[2] - ptr2 := v_2.Args[0] y := v_2.Args[1] + ptr2 := v_2.Args[0] if !(isSamePtr(ptr1, ptr2)) { break } @@ -7245,9 +7243,8 @@ func rewriteValueS390X_OpS390XFMOVDload(v *Value) bool { if v_1.Op != OpS390XMOVDstore || v_1.AuxInt != off || v_1.Aux != sym { break } - _ = v_1.Args[2] - ptr2 := v_1.Args[0] x := v_1.Args[1] + ptr2 := v_1.Args[0] if !(isSamePtr(ptr1, ptr2)) { break } @@ -7265,9 +7262,8 @@ func rewriteValueS390X_OpS390XFMOVDload(v *Value) bool { if v_1.Op != OpS390XFMOVDstore || v_1.AuxInt != off || v_1.Aux != sym { break } - _ = v_1.Args[2] - ptr2 := v_1.Args[0] x := v_1.Args[1] + ptr2 := v_1.Args[0] if !(isSamePtr(ptr1, ptr2)) { break } @@ -7588,9 +7584,8 @@ func rewriteValueS390X_OpS390XFMOVSload(v *Value) bool { if v_1.Op != OpS390XFMOVSstore || v_1.AuxInt != off || v_1.Aux != sym { break } - _ = v_1.Args[2] - ptr2 := v_1.Args[0] x := v_1.Args[1] + ptr2 := v_1.Args[0] if !(isSamePtr(ptr1, ptr2)) { break } @@ -8304,9 +8299,8 @@ func rewriteValueS390X_OpS390XMOVBZload(v *Value) bool { if v_1.Op != OpS390XMOVBstore || v_1.AuxInt != off || v_1.Aux != sym { break } - _ = v_1.Args[2] - ptr2 := v_1.Args[0] x := v_1.Args[1] + ptr2 := v_1.Args[0] if !(isSamePtr(ptr1, ptr2)) { break } @@ -8570,11 +8564,7 @@ func rewriteValueS390X_OpS390XMOVBZreg(v *Value) bool { // result: x for { x := v_0 - if x.Op != OpS390XMOVBZload { - break - } - _ = x.Args[1] - if !(!x.Type.IsSigned() || x.Type.Size() > 1) { + if x.Op != OpS390XMOVBZload || !(!x.Type.IsSigned() || x.Type.Size() > 1) { break } v.copyOf(x) @@ -8585,11 +8575,7 @@ func rewriteValueS390X_OpS390XMOVBZreg(v *Value) bool { // result: x for { x := v_0 - if x.Op != OpS390XMOVBZloadidx { - break - } - _ = x.Args[2] - if !(!x.Type.IsSigned() || x.Type.Size() > 1) { + if x.Op != OpS390XMOVBZloadidx || !(!x.Type.IsSigned() || x.Type.Size() > 1) { break } v.copyOf(x) @@ -8678,7 +8664,7 @@ func rewriteValueS390X_OpS390XMOVBZreg(v *Value) bool { if x.Op != OpS390XLOCGR { break } - _ = x.Args[2] + _ = x.Args[1] x_0 := x.Args[0] if x_0.Op != OpS390XMOVDconst { break @@ -8725,9 +8711,8 @@ func rewriteValueS390X_OpS390XMOVBload(v *Value) bool { if v_1.Op != OpS390XMOVBstore || v_1.AuxInt != off || v_1.Aux != sym { break } - _ = v_1.Args[2] - ptr2 := v_1.Args[0] x := v_1.Args[1] + ptr2 := v_1.Args[0] if !(isSamePtr(ptr1, ptr2)) { break } @@ -8991,11 +8976,7 @@ func rewriteValueS390X_OpS390XMOVBreg(v *Value) bool { // result: x for { x := v_0 - if x.Op != OpS390XMOVBload { - break - } - _ = x.Args[1] - if !(x.Type.IsSigned() || x.Type.Size() == 8) { + if x.Op != OpS390XMOVBload || !(x.Type.IsSigned() || x.Type.Size() == 8) { break } v.copyOf(x) @@ -9006,11 +8987,7 @@ func rewriteValueS390X_OpS390XMOVBreg(v *Value) bool { // result: x for { x := v_0 - if x.Op != OpS390XMOVBloadidx { - break - } - _ = x.Args[2] - if !(x.Type.IsSigned() || x.Type.Size() == 8) { + if x.Op != OpS390XMOVBloadidx || !(x.Type.IsSigned() || x.Type.Size() == 8) { break } v.copyOf(x) @@ -10018,9 +9995,8 @@ func rewriteValueS390X_OpS390XMOVDload(v *Value) bool { if v_1.Op != OpS390XMOVDstore || v_1.AuxInt != off || v_1.Aux != sym { break } - _ = v_1.Args[2] - ptr2 := v_1.Args[0] x := v_1.Args[1] + ptr2 := v_1.Args[0] if !(isSamePtr(ptr1, ptr2)) { break } @@ -10037,9 +10013,8 @@ func rewriteValueS390X_OpS390XMOVDload(v *Value) bool { if v_1.Op != OpS390XFMOVDstore || v_1.AuxInt != off || v_1.Aux != sym { break } - _ = v_1.Args[2] - ptr2 := v_1.Args[0] x := v_1.Args[1] + ptr2 := v_1.Args[0] if !(isSamePtr(ptr1, ptr2)) { break } @@ -10790,9 +10765,8 @@ func rewriteValueS390X_OpS390XMOVHZload(v *Value) bool { if v_1.Op != OpS390XMOVHstore || v_1.AuxInt != off || v_1.Aux != sym { break } - _ = v_1.Args[2] - ptr2 := v_1.Args[0] x := v_1.Args[1] + ptr2 := v_1.Args[0] if !(isSamePtr(ptr1, ptr2)) { break } @@ -11041,11 +11015,7 @@ func rewriteValueS390X_OpS390XMOVHZreg(v *Value) bool { // result: x for { x := v_0 - if x.Op != OpS390XMOVBZload { - break - } - _ = x.Args[1] - if !(!x.Type.IsSigned() || x.Type.Size() > 1) { + if x.Op != OpS390XMOVBZload || !(!x.Type.IsSigned() || x.Type.Size() > 1) { break } v.copyOf(x) @@ -11056,11 +11026,7 @@ func rewriteValueS390X_OpS390XMOVHZreg(v *Value) bool { // result: x for { x := v_0 - if x.Op != OpS390XMOVBZloadidx { - break - } - _ = x.Args[2] - if !(!x.Type.IsSigned() || x.Type.Size() > 1) { + if x.Op != OpS390XMOVBZloadidx || !(!x.Type.IsSigned() || x.Type.Size() > 1) { break } v.copyOf(x) @@ -11071,11 +11037,7 @@ func rewriteValueS390X_OpS390XMOVHZreg(v *Value) bool { // result: x for { x := v_0 - if x.Op != OpS390XMOVHZload { - break - } - _ = x.Args[1] - if !(!x.Type.IsSigned() || x.Type.Size() > 2) { + if x.Op != OpS390XMOVHZload || !(!x.Type.IsSigned() || x.Type.Size() > 2) { break } v.copyOf(x) @@ -11086,11 +11048,7 @@ func rewriteValueS390X_OpS390XMOVHZreg(v *Value) bool { // result: x for { x := v_0 - if x.Op != OpS390XMOVHZloadidx { - break - } - _ = x.Args[2] - if !(!x.Type.IsSigned() || x.Type.Size() > 2) { + if x.Op != OpS390XMOVHZloadidx || !(!x.Type.IsSigned() || x.Type.Size() > 2) { break } v.copyOf(x) @@ -11201,9 +11159,8 @@ func rewriteValueS390X_OpS390XMOVHload(v *Value) bool { if v_1.Op != OpS390XMOVHstore || v_1.AuxInt != off || v_1.Aux != sym { break } - _ = v_1.Args[2] - ptr2 := v_1.Args[0] x := v_1.Args[1] + ptr2 := v_1.Args[0] if !(isSamePtr(ptr1, ptr2)) { break } @@ -11452,11 +11409,7 @@ func rewriteValueS390X_OpS390XMOVHreg(v *Value) bool { // result: x for { x := v_0 - if x.Op != OpS390XMOVBload { - break - } - _ = x.Args[1] - if !(x.Type.IsSigned() || x.Type.Size() == 8) { + if x.Op != OpS390XMOVBload || !(x.Type.IsSigned() || x.Type.Size() == 8) { break } v.copyOf(x) @@ -11467,11 +11420,7 @@ func rewriteValueS390X_OpS390XMOVHreg(v *Value) bool { // result: x for { x := v_0 - if x.Op != OpS390XMOVBloadidx { - break - } - _ = x.Args[2] - if !(x.Type.IsSigned() || x.Type.Size() == 8) { + if x.Op != OpS390XMOVBloadidx || !(x.Type.IsSigned() || x.Type.Size() == 8) { break } v.copyOf(x) @@ -11482,11 +11431,7 @@ func rewriteValueS390X_OpS390XMOVHreg(v *Value) bool { // result: x for { x := v_0 - if x.Op != OpS390XMOVHload { - break - } - _ = x.Args[1] - if !(x.Type.IsSigned() || x.Type.Size() == 8) { + if x.Op != OpS390XMOVHload || !(x.Type.IsSigned() || x.Type.Size() == 8) { break } v.copyOf(x) @@ -11497,11 +11442,7 @@ func rewriteValueS390X_OpS390XMOVHreg(v *Value) bool { // result: x for { x := v_0 - if x.Op != OpS390XMOVHloadidx { - break - } - _ = x.Args[2] - if !(x.Type.IsSigned() || x.Type.Size() == 8) { + if x.Op != OpS390XMOVHloadidx || !(x.Type.IsSigned() || x.Type.Size() == 8) { break } v.copyOf(x) @@ -11512,11 +11453,7 @@ func rewriteValueS390X_OpS390XMOVHreg(v *Value) bool { // result: x for { x := v_0 - if x.Op != OpS390XMOVBZload { - break - } - _ = x.Args[1] - if !(!x.Type.IsSigned() || x.Type.Size() > 1) { + if x.Op != OpS390XMOVBZload || !(!x.Type.IsSigned() || x.Type.Size() > 1) { break } v.copyOf(x) @@ -11527,11 +11464,7 @@ func rewriteValueS390X_OpS390XMOVHreg(v *Value) bool { // result: x for { x := v_0 - if x.Op != OpS390XMOVBZloadidx { - break - } - _ = x.Args[2] - if !(!x.Type.IsSigned() || x.Type.Size() > 1) { + if x.Op != OpS390XMOVBZloadidx || !(!x.Type.IsSigned() || x.Type.Size() > 1) { break } v.copyOf(x) @@ -12340,9 +12273,8 @@ func rewriteValueS390X_OpS390XMOVWZload(v *Value) bool { if v_1.Op != OpS390XMOVWstore || v_1.AuxInt != off || v_1.Aux != sym { break } - _ = v_1.Args[2] - ptr2 := v_1.Args[0] x := v_1.Args[1] + ptr2 := v_1.Args[0] if !(isSamePtr(ptr1, ptr2)) { break } @@ -12574,11 +12506,7 @@ func rewriteValueS390X_OpS390XMOVWZreg(v *Value) bool { // result: x for { x := v_0 - if x.Op != OpS390XMOVBZload { - break - } - _ = x.Args[1] - if !(!x.Type.IsSigned() || x.Type.Size() > 1) { + if x.Op != OpS390XMOVBZload || !(!x.Type.IsSigned() || x.Type.Size() > 1) { break } v.copyOf(x) @@ -12589,11 +12517,7 @@ func rewriteValueS390X_OpS390XMOVWZreg(v *Value) bool { // result: x for { x := v_0 - if x.Op != OpS390XMOVBZloadidx { - break - } - _ = x.Args[2] - if !(!x.Type.IsSigned() || x.Type.Size() > 1) { + if x.Op != OpS390XMOVBZloadidx || !(!x.Type.IsSigned() || x.Type.Size() > 1) { break } v.copyOf(x) @@ -12604,11 +12528,7 @@ func rewriteValueS390X_OpS390XMOVWZreg(v *Value) bool { // result: x for { x := v_0 - if x.Op != OpS390XMOVHZload { - break - } - _ = x.Args[1] - if !(!x.Type.IsSigned() || x.Type.Size() > 2) { + if x.Op != OpS390XMOVHZload || !(!x.Type.IsSigned() || x.Type.Size() > 2) { break } v.copyOf(x) @@ -12619,11 +12539,7 @@ func rewriteValueS390X_OpS390XMOVWZreg(v *Value) bool { // result: x for { x := v_0 - if x.Op != OpS390XMOVHZloadidx { - break - } - _ = x.Args[2] - if !(!x.Type.IsSigned() || x.Type.Size() > 2) { + if x.Op != OpS390XMOVHZloadidx || !(!x.Type.IsSigned() || x.Type.Size() > 2) { break } v.copyOf(x) @@ -12634,11 +12550,7 @@ func rewriteValueS390X_OpS390XMOVWZreg(v *Value) bool { // result: x for { x := v_0 - if x.Op != OpS390XMOVWZload { - break - } - _ = x.Args[1] - if !(!x.Type.IsSigned() || x.Type.Size() > 4) { + if x.Op != OpS390XMOVWZload || !(!x.Type.IsSigned() || x.Type.Size() > 4) { break } v.copyOf(x) @@ -12649,11 +12561,7 @@ func rewriteValueS390X_OpS390XMOVWZreg(v *Value) bool { // result: x for { x := v_0 - if x.Op != OpS390XMOVWZloadidx { - break - } - _ = x.Args[2] - if !(!x.Type.IsSigned() || x.Type.Size() > 4) { + if x.Op != OpS390XMOVWZloadidx || !(!x.Type.IsSigned() || x.Type.Size() > 4) { break } v.copyOf(x) @@ -12749,9 +12657,8 @@ func rewriteValueS390X_OpS390XMOVWload(v *Value) bool { if v_1.Op != OpS390XMOVWstore || v_1.AuxInt != off || v_1.Aux != sym { break } - _ = v_1.Args[2] - ptr2 := v_1.Args[0] x := v_1.Args[1] + ptr2 := v_1.Args[0] if !(isSamePtr(ptr1, ptr2)) { break } @@ -12983,11 +12890,7 @@ func rewriteValueS390X_OpS390XMOVWreg(v *Value) bool { // result: x for { x := v_0 - if x.Op != OpS390XMOVBload { - break - } - _ = x.Args[1] - if !(x.Type.IsSigned() || x.Type.Size() == 8) { + if x.Op != OpS390XMOVBload || !(x.Type.IsSigned() || x.Type.Size() == 8) { break } v.copyOf(x) @@ -12998,11 +12901,7 @@ func rewriteValueS390X_OpS390XMOVWreg(v *Value) bool { // result: x for { x := v_0 - if x.Op != OpS390XMOVBloadidx { - break - } - _ = x.Args[2] - if !(x.Type.IsSigned() || x.Type.Size() == 8) { + if x.Op != OpS390XMOVBloadidx || !(x.Type.IsSigned() || x.Type.Size() == 8) { break } v.copyOf(x) @@ -13013,11 +12912,7 @@ func rewriteValueS390X_OpS390XMOVWreg(v *Value) bool { // result: x for { x := v_0 - if x.Op != OpS390XMOVHload { - break - } - _ = x.Args[1] - if !(x.Type.IsSigned() || x.Type.Size() == 8) { + if x.Op != OpS390XMOVHload || !(x.Type.IsSigned() || x.Type.Size() == 8) { break } v.copyOf(x) @@ -13028,11 +12923,7 @@ func rewriteValueS390X_OpS390XMOVWreg(v *Value) bool { // result: x for { x := v_0 - if x.Op != OpS390XMOVHloadidx { - break - } - _ = x.Args[2] - if !(x.Type.IsSigned() || x.Type.Size() == 8) { + if x.Op != OpS390XMOVHloadidx || !(x.Type.IsSigned() || x.Type.Size() == 8) { break } v.copyOf(x) @@ -13043,11 +12934,7 @@ func rewriteValueS390X_OpS390XMOVWreg(v *Value) bool { // result: x for { x := v_0 - if x.Op != OpS390XMOVWload { - break - } - _ = x.Args[1] - if !(x.Type.IsSigned() || x.Type.Size() == 8) { + if x.Op != OpS390XMOVWload || !(x.Type.IsSigned() || x.Type.Size() == 8) { break } v.copyOf(x) @@ -13058,11 +12945,7 @@ func rewriteValueS390X_OpS390XMOVWreg(v *Value) bool { // result: x for { x := v_0 - if x.Op != OpS390XMOVWloadidx { - break - } - _ = x.Args[2] - if !(x.Type.IsSigned() || x.Type.Size() == 8) { + if x.Op != OpS390XMOVWloadidx || !(x.Type.IsSigned() || x.Type.Size() == 8) { break } v.copyOf(x) @@ -13073,11 +12956,7 @@ func rewriteValueS390X_OpS390XMOVWreg(v *Value) bool { // result: x for { x := v_0 - if x.Op != OpS390XMOVBZload { - break - } - _ = x.Args[1] - if !(!x.Type.IsSigned() || x.Type.Size() > 1) { + if x.Op != OpS390XMOVBZload || !(!x.Type.IsSigned() || x.Type.Size() > 1) { break } v.copyOf(x) @@ -13088,11 +12967,7 @@ func rewriteValueS390X_OpS390XMOVWreg(v *Value) bool { // result: x for { x := v_0 - if x.Op != OpS390XMOVBZloadidx { - break - } - _ = x.Args[2] - if !(!x.Type.IsSigned() || x.Type.Size() > 1) { + if x.Op != OpS390XMOVBZloadidx || !(!x.Type.IsSigned() || x.Type.Size() > 1) { break } v.copyOf(x) @@ -13103,11 +12978,7 @@ func rewriteValueS390X_OpS390XMOVWreg(v *Value) bool { // result: x for { x := v_0 - if x.Op != OpS390XMOVHZload { - break - } - _ = x.Args[1] - if !(!x.Type.IsSigned() || x.Type.Size() > 2) { + if x.Op != OpS390XMOVHZload || !(!x.Type.IsSigned() || x.Type.Size() > 2) { break } v.copyOf(x) @@ -13118,11 +12989,7 @@ func rewriteValueS390X_OpS390XMOVWreg(v *Value) bool { // result: x for { x := v_0 - if x.Op != OpS390XMOVHZloadidx { - break - } - _ = x.Args[2] - if !(!x.Type.IsSigned() || x.Type.Size() > 2) { + if x.Op != OpS390XMOVHZloadidx || !(!x.Type.IsSigned() || x.Type.Size() > 2) { break } v.copyOf(x) @@ -13873,9 +13740,8 @@ func rewriteValueS390X_OpS390XMULLDload(v *Value) bool { if v_2.Op != OpS390XFMOVDstore || v_2.AuxInt != off || v_2.Aux != sym { break } - _ = v_2.Args[2] - ptr2 := v_2.Args[0] y := v_2.Args[1] + ptr2 := v_2.Args[0] if !(isSamePtr(ptr1, ptr2)) { break } @@ -16511,9 +16377,8 @@ func rewriteValueS390X_OpS390XORload(v *Value) bool { if v_2.Op != OpS390XFMOVDstore || v_2.AuxInt != off || v_2.Aux != sym { break } - _ = v_2.Args[2] - ptr2 := v_2.Args[0] y := v_2.Args[1] + ptr2 := v_2.Args[0] if !(isSamePtr(ptr1, ptr2)) { break } @@ -17949,9 +17814,8 @@ func rewriteValueS390X_OpS390XSUBload(v *Value) bool { if v_2.Op != OpS390XFMOVDstore || v_2.AuxInt != off || v_2.Aux != sym { break } - _ = v_2.Args[2] - ptr2 := v_2.Args[0] y := v_2.Args[1] + ptr2 := v_2.Args[0] if !(isSamePtr(ptr1, ptr2)) { break } @@ -18397,9 +18261,8 @@ func rewriteValueS390X_OpS390XXORload(v *Value) bool { if v_2.Op != OpS390XFMOVDstore || v_2.AuxInt != off || v_2.Aux != sym { break } - _ = v_2.Args[2] - ptr2 := v_2.Args[0] y := v_2.Args[1] + ptr2 := v_2.Args[0] if !(isSamePtr(ptr1, ptr2)) { break } diff --git a/src/cmd/compile/internal/ssa/rewriteWasm.go b/src/cmd/compile/internal/ssa/rewriteWasm.go index 4b100b6c32..2c7add4996 100644 --- a/src/cmd/compile/internal/ssa/rewriteWasm.go +++ b/src/cmd/compile/internal/ssa/rewriteWasm.go @@ -3096,7 +3096,6 @@ func rewriteValueWasm_OpSignExt16to32(v *Value) bool { if x.Op != OpWasmI64Load16S { break } - _ = x.Args[1] v.copyOf(x) return true } @@ -3138,7 +3137,6 @@ func rewriteValueWasm_OpSignExt16to64(v *Value) bool { if x.Op != OpWasmI64Load16S { break } - _ = x.Args[1] v.copyOf(x) return true } @@ -3180,7 +3178,6 @@ func rewriteValueWasm_OpSignExt32to64(v *Value) bool { if x.Op != OpWasmI64Load32S { break } - _ = x.Args[1] v.copyOf(x) return true } @@ -3222,7 +3219,6 @@ func rewriteValueWasm_OpSignExt8to16(v *Value) bool { if x.Op != OpWasmI64Load8S { break } - _ = x.Args[1] v.copyOf(x) return true } @@ -3264,7 +3260,6 @@ func rewriteValueWasm_OpSignExt8to32(v *Value) bool { if x.Op != OpWasmI64Load8S { break } - _ = x.Args[1] v.copyOf(x) return true } @@ -3306,7 +3301,6 @@ func rewriteValueWasm_OpSignExt8to64(v *Value) bool { if x.Op != OpWasmI64Load8S { break } - _ = x.Args[1] v.copyOf(x) return true } @@ -4591,7 +4585,6 @@ func rewriteValueWasm_OpZeroExt16to32(v *Value) bool { if x.Op != OpWasmI64Load16U { break } - _ = x.Args[1] v.copyOf(x) return true } @@ -4617,7 +4610,6 @@ func rewriteValueWasm_OpZeroExt16to64(v *Value) bool { if x.Op != OpWasmI64Load16U { break } - _ = x.Args[1] v.copyOf(x) return true } @@ -4643,7 +4635,6 @@ func rewriteValueWasm_OpZeroExt32to64(v *Value) bool { if x.Op != OpWasmI64Load32U { break } - _ = x.Args[1] v.copyOf(x) return true } @@ -4669,7 +4660,6 @@ func rewriteValueWasm_OpZeroExt8to16(v *Value) bool { if x.Op != OpWasmI64Load8U { break } - _ = x.Args[1] v.copyOf(x) return true } @@ -4695,7 +4685,6 @@ func rewriteValueWasm_OpZeroExt8to32(v *Value) bool { if x.Op != OpWasmI64Load8U { break } - _ = x.Args[1] v.copyOf(x) return true } @@ -4721,7 +4710,6 @@ func rewriteValueWasm_OpZeroExt8to64(v *Value) bool { if x.Op != OpWasmI64Load8U { break } - _ = x.Args[1] v.copyOf(x) return true } diff --git a/src/cmd/compile/internal/ssa/rewritedec.go b/src/cmd/compile/internal/ssa/rewritedec.go index 08ed1fd129..1d7979f5c9 100644 --- a/src/cmd/compile/internal/ssa/rewritedec.go +++ b/src/cmd/compile/internal/ssa/rewritedec.go @@ -54,7 +54,6 @@ func rewriteValuedec_OpComplexReal(v *Value) bool { if v_0.Op != OpComplexMake { break } - _ = v_0.Args[1] real := v_0.Args[0] v.copyOf(real) return true @@ -83,7 +82,6 @@ func rewriteValuedec_OpITab(v *Value) bool { if v_0.Op != OpIMake { break } - _ = v_0.Args[1] itab := v_0.Args[0] v.copyOf(itab) return true @@ -230,7 +228,6 @@ func rewriteValuedec_OpSliceLen(v *Value) bool { if v_0.Op != OpSliceMake { break } - _ = v_0.Args[2] len := v_0.Args[1] v.copyOf(len) return true @@ -245,7 +242,6 @@ func rewriteValuedec_OpSlicePtr(v *Value) bool { if v_0.Op != OpSliceMake { break } - _ = v_0.Args[2] ptr := v_0.Args[0] v.copyOf(ptr) return true @@ -405,7 +401,6 @@ func rewriteValuedec_OpStringPtr(v *Value) bool { if v_0.Op != OpStringMake { break } - _ = v_0.Args[1] ptr := v_0.Args[0] v.copyOf(ptr) return true diff --git a/src/cmd/compile/internal/ssa/rewritedec64.go b/src/cmd/compile/internal/ssa/rewritedec64.go index 08a045ccac..b7048f111c 100644 --- a/src/cmd/compile/internal/ssa/rewritedec64.go +++ b/src/cmd/compile/internal/ssa/rewritedec64.go @@ -439,7 +439,6 @@ func rewriteValuedec64_OpInt64Hi(v *Value) bool { if v_0.Op != OpInt64Make { break } - _ = v_0.Args[1] hi := v_0.Args[0] v.copyOf(hi) return true @@ -704,7 +703,6 @@ func rewriteValuedec64_OpLsh16x64(v *Value) bool { if v_1.Op != OpInt64Make { break } - _ = v_1.Args[1] v_1_0 := v_1.Args[0] if v_1_0.Op != OpConst32 { break @@ -770,7 +768,6 @@ func rewriteValuedec64_OpLsh32x64(v *Value) bool { if v_1.Op != OpInt64Make { break } - _ = v_1.Args[1] v_1_0 := v_1.Args[0] if v_1_0.Op != OpConst32 { break @@ -916,7 +913,6 @@ func rewriteValuedec64_OpLsh64x64(v *Value) bool { if v_1.Op != OpInt64Make { break } - _ = v_1.Args[1] v_1_0 := v_1.Args[0] if v_1_0.Op != OpConst32 { break @@ -1022,7 +1018,6 @@ func rewriteValuedec64_OpLsh8x64(v *Value) bool { if v_1.Op != OpInt64Make { break } - _ = v_1.Args[1] v_1_0 := v_1.Args[0] if v_1_0.Op != OpConst32 { break @@ -1204,7 +1199,6 @@ func rewriteValuedec64_OpRsh16Ux64(v *Value) bool { if v_1.Op != OpInt64Make { break } - _ = v_1.Args[1] v_1_0 := v_1.Args[0] if v_1_0.Op != OpConst32 { break @@ -1271,7 +1265,6 @@ func rewriteValuedec64_OpRsh16x64(v *Value) bool { if v_1.Op != OpInt64Make { break } - _ = v_1.Args[1] v_1_0 := v_1.Args[0] if v_1_0.Op != OpConst32 { break @@ -1339,7 +1332,6 @@ func rewriteValuedec64_OpRsh32Ux64(v *Value) bool { if v_1.Op != OpInt64Make { break } - _ = v_1.Args[1] v_1_0 := v_1.Args[0] if v_1_0.Op != OpConst32 { break @@ -1406,7 +1398,6 @@ func rewriteValuedec64_OpRsh32x64(v *Value) bool { if v_1.Op != OpInt64Make { break } - _ = v_1.Args[1] v_1_0 := v_1.Args[0] if v_1_0.Op != OpConst32 { break @@ -1552,7 +1543,6 @@ func rewriteValuedec64_OpRsh64Ux64(v *Value) bool { if v_1.Op != OpInt64Make { break } - _ = v_1.Args[1] v_1_0 := v_1.Args[0] if v_1_0.Op != OpConst32 { break @@ -1757,7 +1747,6 @@ func rewriteValuedec64_OpRsh64x64(v *Value) bool { if v_1.Op != OpInt64Make { break } - _ = v_1.Args[1] v_1_0 := v_1.Args[0] if v_1_0.Op != OpConst32 { break @@ -1881,7 +1870,6 @@ func rewriteValuedec64_OpRsh8Ux64(v *Value) bool { if v_1.Op != OpInt64Make { break } - _ = v_1.Args[1] v_1_0 := v_1.Args[0] if v_1_0.Op != OpConst32 { break @@ -1948,7 +1936,6 @@ func rewriteValuedec64_OpRsh8x64(v *Value) bool { if v_1.Op != OpInt64Make { break } - _ = v_1.Args[1] v_1_0 := v_1.Args[0] if v_1_0.Op != OpConst32 { break diff --git a/src/cmd/compile/internal/ssa/rewritegeneric.go b/src/cmd/compile/internal/ssa/rewritegeneric.go index e0541f8710..0a4879a8ad 100644 --- a/src/cmd/compile/internal/ssa/rewritegeneric.go +++ b/src/cmd/compile/internal/ssa/rewritegeneric.go @@ -6886,12 +6886,10 @@ func rewriteValuegeneric_OpEqPtr(v *Value) bool { continue } a := v_0.Aux - _ = v_0.Args[1] if v_1.Op != OpLocalAddr { continue } b := v_1.Aux - _ = v_1.Args[1] v.reset(OpConstBool) v.AuxInt = b2i(a == b) return true @@ -6906,7 +6904,6 @@ func rewriteValuegeneric_OpEqPtr(v *Value) bool { continue } a := v_0.Aux - _ = v_0.Args[1] if v_1.Op != OpOffPtr { continue } @@ -6916,7 +6913,6 @@ func rewriteValuegeneric_OpEqPtr(v *Value) bool { continue } b := v_1_0.Aux - _ = v_1_0.Args[1] v.reset(OpConstBool) v.AuxInt = b2i(a == b && o == 0) return true @@ -6936,7 +6932,6 @@ func rewriteValuegeneric_OpEqPtr(v *Value) bool { continue } a := v_0_0.Aux - _ = v_0_0.Args[1] if v_1.Op != OpOffPtr { continue } @@ -6946,7 +6941,6 @@ func rewriteValuegeneric_OpEqPtr(v *Value) bool { continue } b := v_1_0.Aux - _ = v_1_0.Args[1] v.reset(OpConstBool) v.AuxInt = b2i(a == b && o1 == o2) return true @@ -7037,11 +7031,7 @@ func rewriteValuegeneric_OpEqPtr(v *Value) bool { // result: (ConstBool [0]) for { for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - if v_0.Op != OpLocalAddr { - continue - } - _ = v_0.Args[1] - if v_1.Op != OpAddr { + if v_0.Op != OpLocalAddr || v_1.Op != OpAddr { continue } v.reset(OpConstBool) @@ -7058,11 +7048,7 @@ func rewriteValuegeneric_OpEqPtr(v *Value) bool { continue } v_0_0 := v_0.Args[0] - if v_0_0.Op != OpLocalAddr { - continue - } - _ = v_0_0.Args[1] - if v_1.Op != OpAddr { + if v_0_0.Op != OpLocalAddr || v_1.Op != OpAddr { continue } v.reset(OpConstBool) @@ -7075,11 +7061,7 @@ func rewriteValuegeneric_OpEqPtr(v *Value) bool { // result: (ConstBool [0]) for { for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - if v_0.Op != OpLocalAddr { - continue - } - _ = v_0.Args[1] - if v_1.Op != OpOffPtr { + if v_0.Op != OpLocalAddr || v_1.Op != OpOffPtr { continue } v_1_0 := v_1.Args[0] @@ -7100,11 +7082,7 @@ func rewriteValuegeneric_OpEqPtr(v *Value) bool { continue } v_0_0 := v_0.Args[0] - if v_0_0.Op != OpLocalAddr { - continue - } - _ = v_0_0.Args[1] - if v_1.Op != OpOffPtr { + if v_0_0.Op != OpLocalAddr || v_1.Op != OpOffPtr { continue } v_1_0 := v_1.Args[0] @@ -7328,7 +7306,6 @@ func rewriteValuegeneric_OpInterCall(v *Value) bool { if v_0.Op != OpLoad { break } - _ = v_0.Args[1] v_0_0 := v_0.Args[0] if v_0_0.Op != OpOffPtr { break @@ -7342,7 +7319,6 @@ func rewriteValuegeneric_OpInterCall(v *Value) bool { if v_0_0_0_0.Op != OpIMake { break } - _ = v_0_0_0_0.Args[1] v_0_0_0_0_0 := v_0_0_0_0.Args[0] if v_0_0_0_0_0.Op != OpAddr { break @@ -7445,7 +7421,6 @@ func rewriteValuegeneric_OpIsInBounds(v *Value) bool { if v_0.Op != OpAnd8 { break } - _ = v_0.Args[1] v_0_0 := v_0.Args[0] v_0_1 := v_0.Args[1] for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { @@ -7477,7 +7452,6 @@ func rewriteValuegeneric_OpIsInBounds(v *Value) bool { if v_0_0.Op != OpAnd8 { break } - _ = v_0_0.Args[1] v_0_0_0 := v_0_0.Args[0] v_0_0_1 := v_0_0.Args[1] for _i0 := 0; _i0 <= 1; _i0, v_0_0_0, v_0_0_1 = _i0+1, v_0_0_1, v_0_0_0 { @@ -7509,7 +7483,6 @@ func rewriteValuegeneric_OpIsInBounds(v *Value) bool { if v_0_0.Op != OpAnd8 { break } - _ = v_0_0.Args[1] v_0_0_0 := v_0_0.Args[0] v_0_0_1 := v_0_0.Args[1] for _i0 := 0; _i0 <= 1; _i0, v_0_0_0, v_0_0_1 = _i0+1, v_0_0_1, v_0_0_0 { @@ -7541,7 +7514,6 @@ func rewriteValuegeneric_OpIsInBounds(v *Value) bool { if v_0_0.Op != OpAnd8 { break } - _ = v_0_0.Args[1] v_0_0_0 := v_0_0.Args[0] v_0_0_1 := v_0_0.Args[1] for _i0 := 0; _i0 <= 1; _i0, v_0_0_0, v_0_0_1 = _i0+1, v_0_0_1, v_0_0_0 { @@ -7569,7 +7541,6 @@ func rewriteValuegeneric_OpIsInBounds(v *Value) bool { if v_0.Op != OpAnd16 { break } - _ = v_0.Args[1] v_0_0 := v_0.Args[0] v_0_1 := v_0.Args[1] for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { @@ -7601,7 +7572,6 @@ func rewriteValuegeneric_OpIsInBounds(v *Value) bool { if v_0_0.Op != OpAnd16 { break } - _ = v_0_0.Args[1] v_0_0_0 := v_0_0.Args[0] v_0_0_1 := v_0_0.Args[1] for _i0 := 0; _i0 <= 1; _i0, v_0_0_0, v_0_0_1 = _i0+1, v_0_0_1, v_0_0_0 { @@ -7633,7 +7603,6 @@ func rewriteValuegeneric_OpIsInBounds(v *Value) bool { if v_0_0.Op != OpAnd16 { break } - _ = v_0_0.Args[1] v_0_0_0 := v_0_0.Args[0] v_0_0_1 := v_0_0.Args[1] for _i0 := 0; _i0 <= 1; _i0, v_0_0_0, v_0_0_1 = _i0+1, v_0_0_1, v_0_0_0 { @@ -7661,7 +7630,6 @@ func rewriteValuegeneric_OpIsInBounds(v *Value) bool { if v_0.Op != OpAnd32 { break } - _ = v_0.Args[1] v_0_0 := v_0.Args[0] v_0_1 := v_0.Args[1] for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { @@ -7693,7 +7661,6 @@ func rewriteValuegeneric_OpIsInBounds(v *Value) bool { if v_0_0.Op != OpAnd32 { break } - _ = v_0_0.Args[1] v_0_0_0 := v_0_0.Args[0] v_0_0_1 := v_0_0.Args[1] for _i0 := 0; _i0 <= 1; _i0, v_0_0_0, v_0_0_1 = _i0+1, v_0_0_1, v_0_0_0 { @@ -7721,7 +7688,6 @@ func rewriteValuegeneric_OpIsInBounds(v *Value) bool { if v_0.Op != OpAnd64 { break } - _ = v_0.Args[1] v_0_0 := v_0.Args[0] v_0_1 := v_0.Args[1] for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { @@ -8116,7 +8082,6 @@ func rewriteValuegeneric_OpIsNonNil(v *Value) bool { if v_0.Op != OpLocalAddr { break } - _ = v_0.Args[1] v.reset(OpConstBool) v.AuxInt = 1 return true @@ -8144,7 +8109,6 @@ func rewriteValuegeneric_OpIsSliceInBounds(v *Value) bool { if v_0.Op != OpAnd32 { break } - _ = v_0.Args[1] v_0_0 := v_0.Args[0] v_0_1 := v_0.Args[1] for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { @@ -8172,7 +8136,6 @@ func rewriteValuegeneric_OpIsSliceInBounds(v *Value) bool { if v_0.Op != OpAnd64 { break } - _ = v_0.Args[1] v_0_0 := v_0.Args[0] v_0_1 := v_0.Args[1] for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { @@ -8850,9 +8813,8 @@ func rewriteValuegeneric_OpLoad(v *Value) bool { break } t2 := v_1.Aux - _ = v_1.Args[2] - p2 := v_1.Args[0] x := v_1.Args[1] + p2 := v_1.Args[0] if !(isSamePtr(p1, p2) && t1.Compare(x.Type) == types.CMPeq && t1.Size() == sizeof(t2)) { break } @@ -8876,9 +8838,8 @@ func rewriteValuegeneric_OpLoad(v *Value) bool { break } t3 := v_1_2.Aux - _ = v_1_2.Args[2] - p3 := v_1_2.Args[0] x := v_1_2.Args[1] + p3 := v_1_2.Args[0] if !(isSamePtr(p1, p3) && t1.Compare(x.Type) == types.CMPeq && t1.Size() == sizeof(t2) && disjoint(p3, sizeof(t3), p2, sizeof(t2))) { break } @@ -8909,9 +8870,8 @@ func rewriteValuegeneric_OpLoad(v *Value) bool { break } t4 := v_1_2_2.Aux - _ = v_1_2_2.Args[2] - p4 := v_1_2_2.Args[0] x := v_1_2_2.Args[1] + p4 := v_1_2_2.Args[0] if !(isSamePtr(p1, p4) && t1.Compare(x.Type) == types.CMPeq && t1.Size() == sizeof(t2) && disjoint(p4, sizeof(t4), p2, sizeof(t2)) && disjoint(p4, sizeof(t4), p3, sizeof(t3))) { break } @@ -8949,9 +8909,8 @@ func rewriteValuegeneric_OpLoad(v *Value) bool { break } t5 := v_1_2_2_2.Aux - _ = v_1_2_2_2.Args[2] - p5 := v_1_2_2_2.Args[0] x := v_1_2_2_2.Args[1] + p5 := v_1_2_2_2.Args[0] if !(isSamePtr(p1, p5) && t1.Compare(x.Type) == types.CMPeq && t1.Size() == sizeof(t2) && disjoint(p5, sizeof(t5), p2, sizeof(t2)) && disjoint(p5, sizeof(t5), p3, sizeof(t3)) && disjoint(p5, sizeof(t5), p4, sizeof(t4))) { break } @@ -8968,7 +8927,7 @@ func rewriteValuegeneric_OpLoad(v *Value) bool { break } t2 := v_1.Aux - _ = v_1.Args[2] + _ = v_1.Args[1] p2 := v_1.Args[0] v_1_1 := v_1.Args[1] if v_1_1.Op != OpConst64 { @@ -8992,7 +8951,7 @@ func rewriteValuegeneric_OpLoad(v *Value) bool { break } t2 := v_1.Aux - _ = v_1.Args[2] + _ = v_1.Args[1] p2 := v_1.Args[0] v_1_1 := v_1.Args[1] if v_1_1.Op != OpConst32 { @@ -9016,7 +8975,7 @@ func rewriteValuegeneric_OpLoad(v *Value) bool { break } t2 := v_1.Aux - _ = v_1.Args[2] + _ = v_1.Args[1] p2 := v_1.Args[0] v_1_1 := v_1.Args[1] if v_1_1.Op != OpConst64F { @@ -9040,7 +8999,7 @@ func rewriteValuegeneric_OpLoad(v *Value) bool { break } t2 := v_1.Aux - _ = v_1.Args[2] + _ = v_1.Args[1] p2 := v_1.Args[0] v_1_1 := v_1.Args[1] if v_1_1.Op != OpConst32F { @@ -9076,7 +9035,6 @@ func rewriteValuegeneric_OpLoad(v *Value) bool { break } n := mem.AuxInt - _ = mem.Args[1] p3 := mem.Args[0] if !(o1 >= 0 && o1+t1.Size() <= n && isSamePtr(p1, p3) && fe.CanSSA(t1) && disjoint(op, t1.Size(), p2, sizeof(t2))) { break @@ -9119,7 +9077,6 @@ func rewriteValuegeneric_OpLoad(v *Value) bool { break } n := mem.AuxInt - _ = mem.Args[1] p4 := mem.Args[0] if !(o1 >= 0 && o1+t1.Size() <= n && isSamePtr(p1, p4) && fe.CanSSA(t1) && disjoint(op, t1.Size(), p2, sizeof(t2)) && disjoint(op, t1.Size(), p3, sizeof(t3))) { break @@ -9169,7 +9126,6 @@ func rewriteValuegeneric_OpLoad(v *Value) bool { break } n := mem.AuxInt - _ = mem.Args[1] p5 := mem.Args[0] if !(o1 >= 0 && o1+t1.Size() <= n && isSamePtr(p1, p5) && fe.CanSSA(t1) && disjoint(op, t1.Size(), p2, sizeof(t2)) && disjoint(op, t1.Size(), p3, sizeof(t3)) && disjoint(op, t1.Size(), p4, sizeof(t4))) { break @@ -9226,7 +9182,6 @@ func rewriteValuegeneric_OpLoad(v *Value) bool { break } n := mem.AuxInt - _ = mem.Args[1] p6 := mem.Args[0] if !(o1 >= 0 && o1+t1.Size() <= n && isSamePtr(p1, p6) && fe.CanSSA(t1) && disjoint(op, t1.Size(), p2, sizeof(t2)) && disjoint(op, t1.Size(), p3, sizeof(t3)) && disjoint(op, t1.Size(), p4, sizeof(t4)) && disjoint(op, t1.Size(), p5, sizeof(t5))) { break @@ -9254,7 +9209,6 @@ func rewriteValuegeneric_OpLoad(v *Value) bool { break } n := v_1.AuxInt - _ = v_1.Args[1] p2 := v_1.Args[0] if !(t1.IsBoolean() && isSamePtr(p1, p2) && n >= o+1) { break @@ -9277,7 +9231,6 @@ func rewriteValuegeneric_OpLoad(v *Value) bool { break } n := v_1.AuxInt - _ = v_1.Args[1] p2 := v_1.Args[0] if !(is8BitInt(t1) && isSamePtr(p1, p2) && n >= o+1) { break @@ -9300,7 +9253,6 @@ func rewriteValuegeneric_OpLoad(v *Value) bool { break } n := v_1.AuxInt - _ = v_1.Args[1] p2 := v_1.Args[0] if !(is16BitInt(t1) && isSamePtr(p1, p2) && n >= o+2) { break @@ -9323,7 +9275,6 @@ func rewriteValuegeneric_OpLoad(v *Value) bool { break } n := v_1.AuxInt - _ = v_1.Args[1] p2 := v_1.Args[0] if !(is32BitInt(t1) && isSamePtr(p1, p2) && n >= o+4) { break @@ -9346,7 +9297,6 @@ func rewriteValuegeneric_OpLoad(v *Value) bool { break } n := v_1.AuxInt - _ = v_1.Args[1] p2 := v_1.Args[0] if !(is64BitInt(t1) && isSamePtr(p1, p2) && n >= o+8) { break @@ -9369,7 +9319,6 @@ func rewriteValuegeneric_OpLoad(v *Value) bool { break } n := v_1.AuxInt - _ = v_1.Args[1] p2 := v_1.Args[0] if !(is32BitFloat(t1) && isSamePtr(p1, p2) && n >= o+4) { break @@ -9392,7 +9341,6 @@ func rewriteValuegeneric_OpLoad(v *Value) bool { break } n := v_1.AuxInt - _ = v_1.Args[1] p2 := v_1.Args[0] if !(is64BitFloat(t1) && isSamePtr(p1, p2) && n >= o+8) { break @@ -11082,7 +11030,6 @@ func rewriteValuegeneric_OpMove(v *Value) bool { if mem.Op != OpZero || mem.AuxInt != n || mem.Aux != t { break } - _ = mem.Args[1] dst2 := mem.Args[0] if !(isSamePtr(src, dst2)) { break @@ -11109,7 +11056,6 @@ func rewriteValuegeneric_OpMove(v *Value) bool { if mem_0.Op != OpZero || mem_0.AuxInt != n || mem_0.Aux != t { break } - _ = mem_0.Args[1] dst0 := mem_0.Args[0] if !(isSamePtr(src, dst0)) { break @@ -11284,7 +11230,7 @@ func rewriteValuegeneric_OpMove(v *Value) bool { break } t3 := mem_2.Aux - _ = mem_2.Args[2] + d2 := mem_2.Args[1] op3 := mem_2.Args[0] if op3.Op != OpOffPtr { break @@ -11294,7 +11240,6 @@ func rewriteValuegeneric_OpMove(v *Value) bool { break } p3 := op3.Args[0] - d2 := mem_2.Args[1] if !(isSamePtr(p1, p2) && isSamePtr(p2, p3) && alignof(t2) <= alignof(t1) && alignof(t3) <= alignof(t1) && registerizable(b, t2) && registerizable(b, t3) && o2 == sizeof(t3) && n == sizeof(t2)+sizeof(t3)) { break } @@ -11353,7 +11298,7 @@ func rewriteValuegeneric_OpMove(v *Value) bool { break } t4 := mem_2_2.Aux - _ = mem_2_2.Args[2] + d3 := mem_2_2.Args[1] op4 := mem_2_2.Args[0] if op4.Op != OpOffPtr { break @@ -11363,7 +11308,6 @@ func rewriteValuegeneric_OpMove(v *Value) bool { break } p4 := op4.Args[0] - d3 := mem_2_2.Args[1] if !(isSamePtr(p1, p2) && isSamePtr(p2, p3) && isSamePtr(p3, p4) && alignof(t2) <= alignof(t1) && alignof(t3) <= alignof(t1) && alignof(t4) <= alignof(t1) && registerizable(b, t2) && registerizable(b, t3) && registerizable(b, t4) && o3 == sizeof(t4) && o2-o3 == sizeof(t3) && n == sizeof(t2)+sizeof(t3)+sizeof(t4)) { break } @@ -11442,7 +11386,7 @@ func rewriteValuegeneric_OpMove(v *Value) bool { break } t5 := mem_2_2_2.Aux - _ = mem_2_2_2.Args[2] + d4 := mem_2_2_2.Args[1] op5 := mem_2_2_2.Args[0] if op5.Op != OpOffPtr { break @@ -11452,7 +11396,6 @@ func rewriteValuegeneric_OpMove(v *Value) bool { break } p5 := op5.Args[0] - d4 := mem_2_2_2.Args[1] if !(isSamePtr(p1, p2) && isSamePtr(p2, p3) && isSamePtr(p3, p4) && isSamePtr(p4, p5) && alignof(t2) <= alignof(t1) && alignof(t3) <= alignof(t1) && alignof(t4) <= alignof(t1) && alignof(t5) <= alignof(t1) && registerizable(b, t2) && registerizable(b, t3) && registerizable(b, t4) && registerizable(b, t5) && o4 == sizeof(t5) && o3-o4 == sizeof(t4) && o2-o3 == sizeof(t3) && n == sizeof(t2)+sizeof(t3)+sizeof(t4)+sizeof(t5)) { break } @@ -11513,7 +11456,7 @@ func rewriteValuegeneric_OpMove(v *Value) bool { break } t3 := mem_0_2.Aux - _ = mem_0_2.Args[2] + d2 := mem_0_2.Args[1] op3 := mem_0_2.Args[0] if op3.Op != OpOffPtr { break @@ -11523,7 +11466,6 @@ func rewriteValuegeneric_OpMove(v *Value) bool { break } p3 := op3.Args[0] - d2 := mem_0_2.Args[1] if !(isSamePtr(p1, p2) && isSamePtr(p2, p3) && alignof(t2) <= alignof(t1) && alignof(t3) <= alignof(t1) && registerizable(b, t2) && registerizable(b, t3) && o2 == sizeof(t3) && n == sizeof(t2)+sizeof(t3)) { break } @@ -11586,7 +11528,7 @@ func rewriteValuegeneric_OpMove(v *Value) bool { break } t4 := mem_0_2_2.Aux - _ = mem_0_2_2.Args[2] + d3 := mem_0_2_2.Args[1] op4 := mem_0_2_2.Args[0] if op4.Op != OpOffPtr { break @@ -11596,7 +11538,6 @@ func rewriteValuegeneric_OpMove(v *Value) bool { break } p4 := op4.Args[0] - d3 := mem_0_2_2.Args[1] if !(isSamePtr(p1, p2) && isSamePtr(p2, p3) && isSamePtr(p3, p4) && alignof(t2) <= alignof(t1) && alignof(t3) <= alignof(t1) && alignof(t4) <= alignof(t1) && registerizable(b, t2) && registerizable(b, t3) && registerizable(b, t4) && o3 == sizeof(t4) && o2-o3 == sizeof(t3) && n == sizeof(t2)+sizeof(t3)+sizeof(t4)) { break } @@ -11679,7 +11620,7 @@ func rewriteValuegeneric_OpMove(v *Value) bool { break } t5 := mem_0_2_2_2.Aux - _ = mem_0_2_2_2.Args[2] + d4 := mem_0_2_2_2.Args[1] op5 := mem_0_2_2_2.Args[0] if op5.Op != OpOffPtr { break @@ -11689,7 +11630,6 @@ func rewriteValuegeneric_OpMove(v *Value) bool { break } p5 := op5.Args[0] - d4 := mem_0_2_2_2.Args[1] if !(isSamePtr(p1, p2) && isSamePtr(p2, p3) && isSamePtr(p3, p4) && isSamePtr(p4, p5) && alignof(t2) <= alignof(t1) && alignof(t3) <= alignof(t1) && alignof(t4) <= alignof(t1) && alignof(t5) <= alignof(t1) && registerizable(b, t2) && registerizable(b, t3) && registerizable(b, t4) && registerizable(b, t5) && o4 == sizeof(t5) && o3-o4 == sizeof(t4) && o2-o3 == sizeof(t3) && n == sizeof(t2)+sizeof(t3)+sizeof(t4)+sizeof(t5)) { break } @@ -11746,7 +11686,6 @@ func rewriteValuegeneric_OpMove(v *Value) bool { break } t3 := mem_2.Aux - _ = mem_2.Args[1] p3 := mem_2.Args[0] if !(isSamePtr(p1, p2) && isSamePtr(p2, p3) && alignof(t2) <= alignof(t1) && alignof(t3) <= alignof(t1) && registerizable(b, t2) && n >= o2+sizeof(t2)) { break @@ -11804,7 +11743,6 @@ func rewriteValuegeneric_OpMove(v *Value) bool { break } t4 := mem_2_2.Aux - _ = mem_2_2.Args[1] p4 := mem_2_2.Args[0] if !(isSamePtr(p1, p2) && isSamePtr(p2, p3) && isSamePtr(p3, p4) && alignof(t2) <= alignof(t1) && alignof(t3) <= alignof(t1) && alignof(t4) <= alignof(t1) && registerizable(b, t2) && registerizable(b, t3) && n >= o2+sizeof(t2) && n >= o3+sizeof(t3)) { break @@ -11882,7 +11820,6 @@ func rewriteValuegeneric_OpMove(v *Value) bool { break } t5 := mem_2_2_2.Aux - _ = mem_2_2_2.Args[1] p5 := mem_2_2_2.Args[0] if !(isSamePtr(p1, p2) && isSamePtr(p2, p3) && isSamePtr(p3, p4) && isSamePtr(p4, p5) && alignof(t2) <= alignof(t1) && alignof(t3) <= alignof(t1) && alignof(t4) <= alignof(t1) && alignof(t5) <= alignof(t1) && registerizable(b, t2) && registerizable(b, t3) && registerizable(b, t4) && n >= o2+sizeof(t2) && n >= o3+sizeof(t3) && n >= o4+sizeof(t4)) { break @@ -11980,7 +11917,6 @@ func rewriteValuegeneric_OpMove(v *Value) bool { break } t6 := mem_2_2_2_2.Aux - _ = mem_2_2_2_2.Args[1] p6 := mem_2_2_2_2.Args[0] if !(isSamePtr(p1, p2) && isSamePtr(p2, p3) && isSamePtr(p3, p4) && isSamePtr(p4, p5) && isSamePtr(p5, p6) && alignof(t2) <= alignof(t1) && alignof(t3) <= alignof(t1) && alignof(t4) <= alignof(t1) && alignof(t5) <= alignof(t1) && alignof(t6) <= alignof(t1) && registerizable(b, t2) && registerizable(b, t3) && registerizable(b, t4) && registerizable(b, t5) && n >= o2+sizeof(t2) && n >= o3+sizeof(t3) && n >= o4+sizeof(t4) && n >= o5+sizeof(t5)) { break @@ -12046,7 +11982,6 @@ func rewriteValuegeneric_OpMove(v *Value) bool { break } t3 := mem_0_2.Aux - _ = mem_0_2.Args[1] p3 := mem_0_2.Args[0] if !(isSamePtr(p1, p2) && isSamePtr(p2, p3) && alignof(t2) <= alignof(t1) && alignof(t3) <= alignof(t1) && registerizable(b, t2) && n >= o2+sizeof(t2)) { break @@ -12108,7 +12043,6 @@ func rewriteValuegeneric_OpMove(v *Value) bool { break } t4 := mem_0_2_2.Aux - _ = mem_0_2_2.Args[1] p4 := mem_0_2_2.Args[0] if !(isSamePtr(p1, p2) && isSamePtr(p2, p3) && isSamePtr(p3, p4) && alignof(t2) <= alignof(t1) && alignof(t3) <= alignof(t1) && alignof(t4) <= alignof(t1) && registerizable(b, t2) && registerizable(b, t3) && n >= o2+sizeof(t2) && n >= o3+sizeof(t3)) { break @@ -12190,7 +12124,6 @@ func rewriteValuegeneric_OpMove(v *Value) bool { break } t5 := mem_0_2_2_2.Aux - _ = mem_0_2_2_2.Args[1] p5 := mem_0_2_2_2.Args[0] if !(isSamePtr(p1, p2) && isSamePtr(p2, p3) && isSamePtr(p3, p4) && isSamePtr(p4, p5) && alignof(t2) <= alignof(t1) && alignof(t3) <= alignof(t1) && alignof(t4) <= alignof(t1) && alignof(t5) <= alignof(t1) && registerizable(b, t2) && registerizable(b, t3) && registerizable(b, t4) && n >= o2+sizeof(t2) && n >= o3+sizeof(t3) && n >= o4+sizeof(t4)) { break @@ -12292,7 +12225,6 @@ func rewriteValuegeneric_OpMove(v *Value) bool { break } t6 := mem_0_2_2_2_2.Aux - _ = mem_0_2_2_2_2.Args[1] p6 := mem_0_2_2_2_2.Args[0] if !(isSamePtr(p1, p2) && isSamePtr(p2, p3) && isSamePtr(p3, p4) && isSamePtr(p4, p5) && isSamePtr(p5, p6) && alignof(t2) <= alignof(t1) && alignof(t3) <= alignof(t1) && alignof(t4) <= alignof(t1) && alignof(t5) <= alignof(t1) && alignof(t6) <= alignof(t1) && registerizable(b, t2) && registerizable(b, t3) && registerizable(b, t4) && registerizable(b, t5) && n >= o2+sizeof(t2) && n >= o3+sizeof(t3) && n >= o4+sizeof(t4) && n >= o5+sizeof(t5)) { break @@ -12340,9 +12272,8 @@ func rewriteValuegeneric_OpMove(v *Value) bool { break } t2 := midmem.Aux - _ = midmem.Args[2] - tmp2 := midmem.Args[0] src := midmem.Args[1] + tmp2 := midmem.Args[0] if !(t1.(*types.Type).Compare(t2.(*types.Type)) == types.CMPeq && isSamePtr(tmp1, tmp2) && isStackPtr(src) && disjoint(src, s, tmp2, s) && (disjoint(src, s, dst, s) || isInlinableMemmove(dst, src, s, config))) { break } @@ -12369,9 +12300,8 @@ func rewriteValuegeneric_OpMove(v *Value) bool { break } t2 := midmem_0.Aux - _ = midmem_0.Args[2] - tmp2 := midmem_0.Args[0] src := midmem_0.Args[1] + tmp2 := midmem_0.Args[0] if !(t1.(*types.Type).Compare(t2.(*types.Type)) == types.CMPeq && isSamePtr(tmp1, tmp2) && isStackPtr(src) && disjoint(src, s, tmp2, s) && (disjoint(src, s, dst, s) || isInlinableMemmove(dst, src, s, config))) { break } @@ -14378,12 +14308,10 @@ func rewriteValuegeneric_OpNeqPtr(v *Value) bool { continue } a := v_0.Aux - _ = v_0.Args[1] if v_1.Op != OpLocalAddr { continue } b := v_1.Aux - _ = v_1.Args[1] v.reset(OpConstBool) v.AuxInt = b2i(a != b) return true @@ -14398,7 +14326,6 @@ func rewriteValuegeneric_OpNeqPtr(v *Value) bool { continue } a := v_0.Aux - _ = v_0.Args[1] if v_1.Op != OpOffPtr { continue } @@ -14408,7 +14335,6 @@ func rewriteValuegeneric_OpNeqPtr(v *Value) bool { continue } b := v_1_0.Aux - _ = v_1_0.Args[1] v.reset(OpConstBool) v.AuxInt = b2i(a != b || o != 0) return true @@ -14428,7 +14354,6 @@ func rewriteValuegeneric_OpNeqPtr(v *Value) bool { continue } a := v_0_0.Aux - _ = v_0_0.Args[1] if v_1.Op != OpOffPtr { continue } @@ -14438,7 +14363,6 @@ func rewriteValuegeneric_OpNeqPtr(v *Value) bool { continue } b := v_1_0.Aux - _ = v_1_0.Args[1] v.reset(OpConstBool) v.AuxInt = b2i(a != b || o1 != o2) return true @@ -14529,11 +14453,7 @@ func rewriteValuegeneric_OpNeqPtr(v *Value) bool { // result: (ConstBool [1]) for { for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - if v_0.Op != OpLocalAddr { - continue - } - _ = v_0.Args[1] - if v_1.Op != OpAddr { + if v_0.Op != OpLocalAddr || v_1.Op != OpAddr { continue } v.reset(OpConstBool) @@ -14550,11 +14470,7 @@ func rewriteValuegeneric_OpNeqPtr(v *Value) bool { continue } v_0_0 := v_0.Args[0] - if v_0_0.Op != OpLocalAddr { - continue - } - _ = v_0_0.Args[1] - if v_1.Op != OpAddr { + if v_0_0.Op != OpLocalAddr || v_1.Op != OpAddr { continue } v.reset(OpConstBool) @@ -14567,11 +14483,7 @@ func rewriteValuegeneric_OpNeqPtr(v *Value) bool { // result: (ConstBool [1]) for { for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - if v_0.Op != OpLocalAddr { - continue - } - _ = v_0.Args[1] - if v_1.Op != OpOffPtr { + if v_0.Op != OpLocalAddr || v_1.Op != OpOffPtr { continue } v_1_0 := v_1.Args[0] @@ -14592,11 +14504,7 @@ func rewriteValuegeneric_OpNeqPtr(v *Value) bool { continue } v_0_0 := v_0.Args[0] - if v_0_0.Op != OpLocalAddr { - continue - } - _ = v_0_0.Args[1] - if v_1.Op != OpOffPtr { + if v_0_0.Op != OpLocalAddr || v_1.Op != OpOffPtr { continue } v_1_0 := v_1.Args[0] @@ -16290,7 +16198,6 @@ func rewriteValuegeneric_OpRsh16Ux64(v *Value) bool { if v_0.Op != OpRsh16x64 { break } - _ = v_0.Args[1] x := v_0.Args[0] if v_1.Op != OpConst64 { break @@ -16725,7 +16632,6 @@ func rewriteValuegeneric_OpRsh32Ux64(v *Value) bool { if v_0.Op != OpRsh32x64 { break } - _ = v_0.Args[1] x := v_0.Args[0] if v_1.Op != OpConst64 { break @@ -17196,7 +17102,6 @@ func rewriteValuegeneric_OpRsh64Ux64(v *Value) bool { if v_0.Op != OpRsh64x64 { break } - _ = v_0.Args[1] x := v_0.Args[0] if v_1.Op != OpConst64 { break @@ -17703,7 +17608,6 @@ func rewriteValuegeneric_OpRsh8Ux64(v *Value) bool { if v_0.Op != OpRsh8x64 { break } - _ = v_0.Args[1] x := v_0.Args[0] if v_1.Op != OpConst64 { break @@ -18298,7 +18202,7 @@ func rewriteValuegeneric_OpSliceLen(v *Value) bool { if v_0.Op != OpSliceMake { break } - _ = v_0.Args[2] + _ = v_0.Args[1] v_0_1 := v_0.Args[1] if v_0_1.Op != OpConst64 { break @@ -18316,7 +18220,7 @@ func rewriteValuegeneric_OpSliceLen(v *Value) bool { if v_0.Op != OpSliceMake { break } - _ = v_0.Args[2] + _ = v_0.Args[1] v_0_1 := v_0.Args[1] if v_0_1.Op != OpConst32 { break @@ -18334,7 +18238,7 @@ func rewriteValuegeneric_OpSliceLen(v *Value) bool { if v_0.Op != OpSliceMake { break } - _ = v_0.Args[2] + _ = v_0.Args[1] v_0_1 := v_0.Args[1] if v_0_1.Op != OpSliceLen { break @@ -18354,7 +18258,6 @@ func rewriteValuegeneric_OpSlicePtr(v *Value) bool { if v_0.Op != OpSliceMake { break } - _ = v_0.Args[2] v_0_0 := v_0.Args[0] if v_0_0.Op != OpSlicePtr { break @@ -18664,7 +18567,6 @@ func rewriteValuegeneric_OpStore(v *Value) bool { break } n := mem.AuxInt - _ = mem.Args[1] p2 := mem.Args[0] if !(isConstZero(x) && o >= 0 && sizeof(t)+o <= n && isSamePtr(p1, p2)) { break @@ -18696,7 +18598,6 @@ func rewriteValuegeneric_OpStore(v *Value) bool { break } n := mem_2.AuxInt - _ = mem_2.Args[1] p3 := mem_2.Args[0] if !(isConstZero(x) && o1 >= 0 && sizeof(t1)+o1 <= n && isSamePtr(p1, p3) && disjoint(op, sizeof(t1), p2, sizeof(t2))) { break @@ -18735,7 +18636,6 @@ func rewriteValuegeneric_OpStore(v *Value) bool { break } n := mem_2_2.AuxInt - _ = mem_2_2.Args[1] p4 := mem_2_2.Args[0] if !(isConstZero(x) && o1 >= 0 && sizeof(t1)+o1 <= n && isSamePtr(p1, p4) && disjoint(op, sizeof(t1), p2, sizeof(t2)) && disjoint(op, sizeof(t1), p3, sizeof(t3))) { break @@ -18781,7 +18681,6 @@ func rewriteValuegeneric_OpStore(v *Value) bool { break } n := mem_2_2_2.AuxInt - _ = mem_2_2_2.Args[1] p5 := mem_2_2_2.Args[0] if !(isConstZero(x) && o1 >= 0 && sizeof(t1)+o1 <= n && isSamePtr(p1, p5) && disjoint(op, sizeof(t1), p2, sizeof(t2)) && disjoint(op, sizeof(t1), p3, sizeof(t3)) && disjoint(op, sizeof(t1), p4, sizeof(t4))) { break @@ -19416,7 +19315,6 @@ func rewriteValuegeneric_OpStringPtr(v *Value) bool { if v_0.Op != OpStringMake { break } - _ = v_0.Args[1] v_0_0 := v_0.Args[0] if v_0_0.Op != OpAddr { break @@ -19452,7 +19350,6 @@ func rewriteValuegeneric_OpStructSelect(v *Value) bool { if v.AuxInt != 0 || v_0.Op != OpStructMake2 { break } - _ = v_0.Args[1] x := v_0.Args[0] v.copyOf(x) return true @@ -19473,7 +19370,6 @@ func rewriteValuegeneric_OpStructSelect(v *Value) bool { if v.AuxInt != 0 || v_0.Op != OpStructMake3 { break } - _ = v_0.Args[2] x := v_0.Args[0] v.copyOf(x) return true @@ -19484,7 +19380,6 @@ func rewriteValuegeneric_OpStructSelect(v *Value) bool { if v.AuxInt != 1 || v_0.Op != OpStructMake3 { break } - _ = v_0.Args[2] x := v_0.Args[1] v.copyOf(x) return true @@ -19505,7 +19400,6 @@ func rewriteValuegeneric_OpStructSelect(v *Value) bool { if v.AuxInt != 0 || v_0.Op != OpStructMake4 { break } - _ = v_0.Args[3] x := v_0.Args[0] v.copyOf(x) return true @@ -19516,7 +19410,6 @@ func rewriteValuegeneric_OpStructSelect(v *Value) bool { if v.AuxInt != 1 || v_0.Op != OpStructMake4 { break } - _ = v_0.Args[3] x := v_0.Args[1] v.copyOf(x) return true @@ -19527,7 +19420,6 @@ func rewriteValuegeneric_OpStructSelect(v *Value) bool { if v.AuxInt != 2 || v_0.Op != OpStructMake4 { break } - _ = v_0.Args[3] x := v_0.Args[2] v.copyOf(x) return true From 63f1bc59922d454f288ad3d193bc60d7c980dbb0 Mon Sep 17 00:00:00 2001 From: Keith Randall Date: Fri, 28 Feb 2020 12:59:38 -0800 Subject: [PATCH 30/69] runtime: print instruction bytes when reporting a SIGILL Print the bytes of the instruction that generated a SIGILL. This should help us respond to bug reports without having to go back-and-forth with the reporter to get the instruction involved. Might also help with SIGILL problems that are difficult to reproduce. Update #37513 Change-Id: I33059b1dbfc97bce16142a843f32a88a6547e280 Reviewed-on: https://go-review.googlesource.com/c/go/+/221431 Run-TryBot: Keith Randall TryBot-Result: Gobot Gobot Reviewed-by: Ian Lance Taylor --- src/runtime/signal_unix.go | 24 +++++++++++++++++ test/fixedbugs/issue37513.dir/main.go | 27 ++++++++++++++++++++ test/fixedbugs/issue37513.dir/sigill_amd64.s | 7 +++++ test/fixedbugs/issue37513.go | 9 +++++++ 4 files changed, 67 insertions(+) create mode 100644 test/fixedbugs/issue37513.dir/main.go create mode 100644 test/fixedbugs/issue37513.dir/sigill_amd64.s create mode 100644 test/fixedbugs/issue37513.go diff --git a/src/runtime/signal_unix.go b/src/runtime/signal_unix.go index d2e6693805..32b192c977 100644 --- a/src/runtime/signal_unix.go +++ b/src/runtime/signal_unix.go @@ -607,6 +607,30 @@ func sighandler(sig uint32, info *siginfo, ctxt unsafe.Pointer, gp *g) { print("signal arrived during cgo execution\n") gp = _g_.m.lockedg.ptr() } + if sig == _SIGILL { + // It would be nice to know how long the instruction is. + // Unfortunately, that's complicated to do in general (mostly for x86 + // and s930x, but other archs have non-standard instruction lengths also). + // Opt to print 16 bytes, which covers most instructions. + const maxN = 16 + n := uintptr(maxN) + // We have to be careful, though. If we're near the end of + // a page and the following page isn't mapped, we could + // segfault. So make sure we don't straddle a page (even though + // that could lead to printing an incomplete instruction). + // We're assuming here we can read at least the page containing the PC. + // I suppose it is possible that the page is mapped executable but not readable? + pc := c.sigpc() + if n > physPageSize-pc%physPageSize { + n = physPageSize - pc%physPageSize + } + print("instruction bytes:") + b := (*[maxN]byte)(unsafe.Pointer(pc)) + for i := uintptr(0); i < n; i++ { + print(" ", hex(b[i])) + } + println() + } print("\n") level, _, docrash := gotraceback() diff --git a/test/fixedbugs/issue37513.dir/main.go b/test/fixedbugs/issue37513.dir/main.go new file mode 100644 index 0000000000..75106521b6 --- /dev/null +++ b/test/fixedbugs/issue37513.dir/main.go @@ -0,0 +1,27 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import ( + "bytes" + "fmt" + "os" + "os/exec" +) + +func main() { + if len(os.Args) > 1 { + // Generate a SIGILL. + sigill() + return + } + // Run ourselves with an extra argument. That process should SIGILL. + out, _ := exec.Command(os.Args[0], "foo").CombinedOutput() + want := "instruction bytes: 0xf 0xb 0xc3" + if !bytes.Contains(out, []byte(want)) { + fmt.Printf("got:\n%s\nwant:\n%s\n", string(out), want) + } +} +func sigill() diff --git a/test/fixedbugs/issue37513.dir/sigill_amd64.s b/test/fixedbugs/issue37513.dir/sigill_amd64.s new file mode 100644 index 0000000000..43260c21ae --- /dev/null +++ b/test/fixedbugs/issue37513.dir/sigill_amd64.s @@ -0,0 +1,7 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +TEXT ·sigill(SB),0,$0-0 + UD2 // generates a SIGILL + RET diff --git a/test/fixedbugs/issue37513.go b/test/fixedbugs/issue37513.go new file mode 100644 index 0000000000..e05b2d861f --- /dev/null +++ b/test/fixedbugs/issue37513.go @@ -0,0 +1,9 @@ +// buildrundir + +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build linux,amd64 darwin,amd64 linux,386 + +package ignored From 37fc092be1ffe4906f67eae1c4c7be40f00f89ed Mon Sep 17 00:00:00 2001 From: Josh Bleecher Snyder Date: Thu, 6 Feb 2020 10:36:29 -0800 Subject: [PATCH 31/69] cmd/compile: remove duplicate ppc64 rules Const64 gets lowered to MOVDconst. Change rules using interior Const64 to use MOVDconst instead, to be less dependent on rule application order. As a result of doing this, some of the rules end up being exact duplicates; remove those. We had those exact duplicates because of the order dependency; ppc64 had no way to optimize away shifts by a constant if the initial lowering didn't catch it. Add those optimizations as well. The outcome is the same, but this makes the overall rules more robust. Change-Id: Iadd97a9fe73d52358d571d022ace145e506d160b Reviewed-on: https://go-review.googlesource.com/c/go/+/220877 Run-TryBot: Josh Bleecher Snyder TryBot-Result: Gobot Gobot Reviewed-by: Lynn Boger --- src/cmd/compile/internal/ppc64/ssa.go | 2 +- src/cmd/compile/internal/ssa/gen/PPC64.rules | 64 +- src/cmd/compile/internal/ssa/gen/PPC64Ops.go | 11 +- src/cmd/compile/internal/ssa/opGen.go | 12 + src/cmd/compile/internal/ssa/rewritePPC64.go | 710 +++++++------------ 5 files changed, 303 insertions(+), 496 deletions(-) diff --git a/src/cmd/compile/internal/ppc64/ssa.go b/src/cmd/compile/internal/ppc64/ssa.go index 591f3666e7..ce30c9ae37 100644 --- a/src/cmd/compile/internal/ppc64/ssa.go +++ b/src/cmd/compile/internal/ppc64/ssa.go @@ -1328,7 +1328,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { v.Fatalf("Pseudo-op should not make it to codegen: %s ###\n", v.LongString()) case ssa.OpPPC64InvertFlags: v.Fatalf("InvertFlags should never make it to codegen %v", v.LongString()) - case ssa.OpPPC64FlagEQ, ssa.OpPPC64FlagLT, ssa.OpPPC64FlagGT: + case ssa.OpPPC64FlagEQ, ssa.OpPPC64FlagLT, ssa.OpPPC64FlagGT, ssa.OpPPC64FlagCarrySet, ssa.OpPPC64FlagCarryClear: v.Fatalf("Flag* ops should never make it to codegen %v", v.LongString()) case ssa.OpClobber: // TODO: implement for clobberdead experiment. Nop is ok for now. diff --git a/src/cmd/compile/internal/ssa/gen/PPC64.rules b/src/cmd/compile/internal/ssa/gen/PPC64.rules index 961f833e90..f2b2b9b898 100644 --- a/src/cmd/compile/internal/ssa/gen/PPC64.rules +++ b/src/cmd/compile/internal/ssa/gen/PPC64.rules @@ -118,47 +118,22 @@ (ROTLW x (MOVDconst [c])) -> (ROTLWconst x [c&31]) (ROTL x (MOVDconst [c])) -> (ROTLconst x [c&63]) -(Lsh64x64 x (Const64 [c])) && uint64(c) < 64 -> (SLDconst x [c]) -(Rsh64x64 x (Const64 [c])) && uint64(c) < 64 -> (SRADconst x [c]) -(Rsh64Ux64 x (Const64 [c])) && uint64(c) < 64 -> (SRDconst x [c]) -(Lsh32x64 x (Const64 [c])) && uint64(c) < 32 -> (SLWconst x [c]) -(Rsh32x64 x (Const64 [c])) && uint64(c) < 32 -> (SRAWconst x [c]) -(Rsh32Ux64 x (Const64 [c])) && uint64(c) < 32 -> (SRWconst x [c]) -(Lsh16x64 x (Const64 [c])) && uint64(c) < 16 -> (SLWconst x [c]) -(Rsh16x64 x (Const64 [c])) && uint64(c) < 16 -> (SRAWconst (SignExt16to32 x) [c]) -(Rsh16Ux64 x (Const64 [c])) && uint64(c) < 16 -> (SRWconst (ZeroExt16to32 x) [c]) -(Lsh8x64 x (Const64 [c])) && uint64(c) < 8 -> (SLWconst x [c]) -(Rsh8x64 x (Const64 [c])) && uint64(c) < 8 -> (SRAWconst (SignExt8to32 x) [c]) -(Rsh8Ux64 x (Const64 [c])) && uint64(c) < 8 -> (SRWconst (ZeroExt8to32 x) [c]) - -(Lsh64x32 x (Const64 [c])) && uint32(c) < 64 -> (SLDconst x [c]) -(Rsh64x32 x (Const64 [c])) && uint32(c) < 64 -> (SRADconst x [c]) -(Rsh64Ux32 x (Const64 [c])) && uint32(c) < 64 -> (SRDconst x [c]) -(Lsh32x32 x (Const64 [c])) && uint32(c) < 32 -> (SLWconst x [c]) -(Rsh32x32 x (Const64 [c])) && uint32(c) < 32 -> (SRAWconst x [c]) -(Rsh32Ux32 x (Const64 [c])) && uint32(c) < 32 -> (SRWconst x [c]) -(Lsh16x32 x (Const64 [c])) && uint32(c) < 16 -> (SLWconst x [c]) -(Rsh16x32 x (Const64 [c])) && uint32(c) < 16 -> (SRAWconst (SignExt16to32 x) [c]) -(Rsh16Ux32 x (Const64 [c])) && uint32(c) < 16 -> (SRWconst (ZeroExt16to32 x) [c]) -(Lsh8x32 x (Const64 [c])) && uint32(c) < 8 -> (SLWconst x [c]) -(Rsh8x32 x (Const64 [c])) && uint32(c) < 8 -> (SRAWconst (SignExt8to32 x) [c]) -(Rsh8Ux32 x (Const64 [c])) && uint32(c) < 8 -> (SRWconst (ZeroExt8to32 x) [c]) // large constant shifts -(Lsh64x64 _ (Const64 [c])) && uint64(c) >= 64 -> (MOVDconst [0]) -(Rsh64Ux64 _ (Const64 [c])) && uint64(c) >= 64 -> (MOVDconst [0]) -(Lsh32x64 _ (Const64 [c])) && uint64(c) >= 32 -> (MOVDconst [0]) -(Rsh32Ux64 _ (Const64 [c])) && uint64(c) >= 32 -> (MOVDconst [0]) -(Lsh16x64 _ (Const64 [c])) && uint64(c) >= 16 -> (MOVDconst [0]) -(Rsh16Ux64 _ (Const64 [c])) && uint64(c) >= 16 -> (MOVDconst [0]) -(Lsh8x64 _ (Const64 [c])) && uint64(c) >= 8 -> (MOVDconst [0]) -(Rsh8Ux64 _ (Const64 [c])) && uint64(c) >= 8 -> (MOVDconst [0]) +(Lsh64x64 _ (MOVDconst [c])) && uint64(c) >= 64 -> (MOVDconst [0]) +(Rsh64Ux64 _ (MOVDconst [c])) && uint64(c) >= 64 -> (MOVDconst [0]) +(Lsh32x64 _ (MOVDconst [c])) && uint64(c) >= 32 -> (MOVDconst [0]) +(Rsh32Ux64 _ (MOVDconst [c])) && uint64(c) >= 32 -> (MOVDconst [0]) +(Lsh16x64 _ (MOVDconst [c])) && uint64(c) >= 16 -> (MOVDconst [0]) +(Rsh16Ux64 _ (MOVDconst [c])) && uint64(c) >= 16 -> (MOVDconst [0]) +(Lsh8x64 _ (MOVDconst [c])) && uint64(c) >= 8 -> (MOVDconst [0]) +(Rsh8Ux64 _ (MOVDconst [c])) && uint64(c) >= 8 -> (MOVDconst [0]) // large constant signed right shift, we leave the sign bit -(Rsh64x64 x (Const64 [c])) && uint64(c) >= 64 -> (SRADconst x [63]) -(Rsh32x64 x (Const64 [c])) && uint64(c) >= 32 -> (SRAWconst x [63]) -(Rsh16x64 x (Const64 [c])) && uint64(c) >= 16 -> (SRAWconst (SignExt16to32 x) [63]) -(Rsh8x64 x (Const64 [c])) && uint64(c) >= 8 -> (SRAWconst (SignExt8to32 x) [63]) +(Rsh64x64 x (MOVDconst [c])) && uint64(c) >= 64 -> (SRADconst x [63]) +(Rsh32x64 x (MOVDconst [c])) && uint64(c) >= 32 -> (SRAWconst x [63]) +(Rsh16x64 x (MOVDconst [c])) && uint64(c) >= 16 -> (SRAWconst (SignExt16to32 x) [63]) +(Rsh8x64 x (MOVDconst [c])) && uint64(c) >= 8 -> (SRAWconst (SignExt8to32 x) [63]) // constant shifts (Lsh64x64 x (MOVDconst [c])) && uint64(c) < 64 -> (SLDconst x [c]) @@ -299,11 +274,13 @@ (MaskIfNotCarry (ADDconstForCarry [c] (ANDconst [d] _))) && c < 0 && d > 0 && c + d < 0 -> (MOVDconst [-1]) (ORN x (MOVDconst [-1])) -> x -// Potentially useful optimizing rewrites. -// (ADDconstForCarry [k] c), k < 0 && (c < 0 || k+c >= 0) -> CarrySet -// (ADDconstForCarry [k] c), K < 0 && (c >= 0 && k+c < 0) -> CarryClear -// (MaskIfNotCarry CarrySet) -> 0 -// (MaskIfNotCarry CarrySet) -> -1 +(ADDconstForCarry [c] (MOVDconst [d])) && int64(int16(c)) < 0 && (int64(int16(c)) < 0 || int64(int16(c)) + d >= 0) -> (FlagCarryClear) +(ADDconstForCarry [c] (MOVDconst [d])) && int64(int16(c)) < 0 && int64(int16(c)) >= 0 && int64(int16(c)) + d < 0 -> (FlagCarrySet) + +(MaskIfNotCarry (FlagCarrySet)) -> (MOVDconst [0]) +(MaskIfNotCarry (FlagCarryClear)) -> (MOVDconst [-1]) + +(S(RAD|RAW|RD|RW|LD|LW) x (MOVDconst [c])) -> (S(RAD|RAW|RD|RW|LD|LW)const [c] x) (Addr ...) -> (MOVDaddr ...) (LocalAddr {sym} base _) -> (MOVDaddr {sym} base) @@ -664,6 +641,9 @@ (AND (MOVDconst [c]) (MOVDconst [d])) -> (MOVDconst [c&d]) (OR (MOVDconst [c]) (MOVDconst [d])) -> (MOVDconst [c|d]) (XOR (MOVDconst [c]) (MOVDconst [d])) -> (MOVDconst [c^d]) +(ORN (MOVDconst [c]) (MOVDconst [d])) -> (MOVDconst [c|^d]) +(ANDN (MOVDconst [c]) (MOVDconst [d])) -> (MOVDconst [c&^d]) +(NOR (MOVDconst [c]) (MOVDconst [d])) -> (MOVDconst [^(c|d)]) // Discover consts (AND x (MOVDconst [c])) && isU16Bit(c) -> (ANDconst [c] x) diff --git a/src/cmd/compile/internal/ssa/gen/PPC64Ops.go b/src/cmd/compile/internal/ssa/gen/PPC64Ops.go index 6660b921ef..d0a22c1f20 100644 --- a/src/cmd/compile/internal/ssa/gen/PPC64Ops.go +++ b/src/cmd/compile/internal/ssa/gen/PPC64Ops.go @@ -205,7 +205,7 @@ func init() { {name: "ROTLW", argLength: 2, reg: gp21, asm: "ROTLW"}, // uint32(arg0) rotate left by arg1 mod 32 {name: "LoweredAdd64Carry", argLength: 3, reg: gp32, resultNotInArgs: true}, // arg0 + arg1 + carry, returns (sum, carry) - {name: "ADDconstForCarry", argLength: 1, reg: regInfo{inputs: []regMask{gp | sp | sb}, clobbers: tmp}, aux: "Int16", asm: "ADDC", typ: "Flags"}, // _, carry := arg0 + aux + {name: "ADDconstForCarry", argLength: 1, reg: regInfo{inputs: []regMask{gp | sp | sb}, clobbers: tmp}, aux: "Int16", asm: "ADDC", typ: "Flags"}, // _, carry := arg0 + auxint {name: "MaskIfNotCarry", argLength: 1, reg: crgp, asm: "ADDME", typ: "Int64"}, // carry - 1 (if carry then 0 else -1) {name: "SRADconst", argLength: 1, reg: gp11, asm: "SRAD", aux: "Int64"}, // arg0 >>a aux, 64 bits @@ -588,10 +588,11 @@ func init() { // These ops are for temporary use by rewrite rules. They // cannot appear in the generated assembly. - {name: "FlagEQ"}, // equal - {name: "FlagLT"}, // signed < or unsigned < - {name: "FlagGT"}, // signed > or unsigned > - + {name: "FlagEQ"}, // equal + {name: "FlagLT"}, // signed < or unsigned < + {name: "FlagGT"}, // signed > or unsigned > + {name: "FlagCarrySet"}, // carry flag set + {name: "FlagCarryClear"}, // carry flag clear } blocks := []blockData{ diff --git a/src/cmd/compile/internal/ssa/opGen.go b/src/cmd/compile/internal/ssa/opGen.go index d9d38a8b80..fb887017cf 100644 --- a/src/cmd/compile/internal/ssa/opGen.go +++ b/src/cmd/compile/internal/ssa/opGen.go @@ -1881,6 +1881,8 @@ const ( OpPPC64FlagEQ OpPPC64FlagLT OpPPC64FlagGT + OpPPC64FlagCarrySet + OpPPC64FlagCarryClear OpRISCV64ADD OpRISCV64ADDI @@ -24995,6 +24997,16 @@ var opcodeTable = [...]opInfo{ argLen: 0, reg: regInfo{}, }, + { + name: "FlagCarrySet", + argLen: 0, + reg: regInfo{}, + }, + { + name: "FlagCarryClear", + argLen: 0, + reg: regInfo{}, + }, { name: "ADD", diff --git a/src/cmd/compile/internal/ssa/rewritePPC64.go b/src/cmd/compile/internal/ssa/rewritePPC64.go index 82fa1354f8..fe15e71a3e 100644 --- a/src/cmd/compile/internal/ssa/rewritePPC64.go +++ b/src/cmd/compile/internal/ssa/rewritePPC64.go @@ -444,8 +444,12 @@ func rewriteValuePPC64(v *Value) bool { return rewriteValuePPC64_OpPPC64ADD(v) case OpPPC64ADDconst: return rewriteValuePPC64_OpPPC64ADDconst(v) + case OpPPC64ADDconstForCarry: + return rewriteValuePPC64_OpPPC64ADDconstForCarry(v) case OpPPC64AND: return rewriteValuePPC64_OpPPC64AND(v) + case OpPPC64ANDN: + return rewriteValuePPC64_OpPPC64ANDN(v) case OpPPC64ANDconst: return rewriteValuePPC64_OpPPC64ANDconst(v) case OpPPC64CMP: @@ -584,6 +588,8 @@ func rewriteValuePPC64(v *Value) bool { return rewriteValuePPC64_OpPPC64MTVSRD(v) case OpPPC64MaskIfNotCarry: return rewriteValuePPC64_OpPPC64MaskIfNotCarry(v) + case OpPPC64NOR: + return rewriteValuePPC64_OpPPC64NOR(v) case OpPPC64NotEqual: return rewriteValuePPC64_OpPPC64NotEqual(v) case OpPPC64OR: @@ -596,6 +602,18 @@ func rewriteValuePPC64(v *Value) bool { return rewriteValuePPC64_OpPPC64ROTL(v) case OpPPC64ROTLW: return rewriteValuePPC64_OpPPC64ROTLW(v) + case OpPPC64SLD: + return rewriteValuePPC64_OpPPC64SLD(v) + case OpPPC64SLW: + return rewriteValuePPC64_OpPPC64SLW(v) + case OpPPC64SRAD: + return rewriteValuePPC64_OpPPC64SRAD(v) + case OpPPC64SRAW: + return rewriteValuePPC64_OpPPC64SRAW(v) + case OpPPC64SRD: + return rewriteValuePPC64_OpPPC64SRD(v) + case OpPPC64SRW: + return rewriteValuePPC64_OpPPC64SRW(v) case OpPPC64SUB: return rewriteValuePPC64_OpPPC64SUB(v) case OpPPC64XOR: @@ -2307,23 +2325,6 @@ func rewriteValuePPC64_OpLsh16x32(v *Value) bool { v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (Lsh16x32 x (Const64 [c])) - // cond: uint32(c) < 16 - // result: (SLWconst x [c]) - for { - x := v_0 - if v_1.Op != OpConst64 { - break - } - c := v_1.AuxInt - if !(uint32(c) < 16) { - break - } - v.reset(OpPPC64SLWconst) - v.AuxInt = c - v.AddArg(x) - return true - } // match: (Lsh16x32 x (MOVDconst [c])) // cond: uint32(c) < 16 // result: (SLWconst x [c]) @@ -2378,28 +2379,11 @@ func rewriteValuePPC64_OpLsh16x64(v *Value) bool { v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (Lsh16x64 x (Const64 [c])) - // cond: uint64(c) < 16 - // result: (SLWconst x [c]) - for { - x := v_0 - if v_1.Op != OpConst64 { - break - } - c := v_1.AuxInt - if !(uint64(c) < 16) { - break - } - v.reset(OpPPC64SLWconst) - v.AuxInt = c - v.AddArg(x) - return true - } - // match: (Lsh16x64 _ (Const64 [c])) + // match: (Lsh16x64 _ (MOVDconst [c])) // cond: uint64(c) >= 16 // result: (MOVDconst [0]) for { - if v_1.Op != OpConst64 { + if v_1.Op != OpPPC64MOVDconst { break } c := v_1.AuxInt @@ -2536,23 +2520,6 @@ func rewriteValuePPC64_OpLsh32x32(v *Value) bool { v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (Lsh32x32 x (Const64 [c])) - // cond: uint32(c) < 32 - // result: (SLWconst x [c]) - for { - x := v_0 - if v_1.Op != OpConst64 { - break - } - c := v_1.AuxInt - if !(uint32(c) < 32) { - break - } - v.reset(OpPPC64SLWconst) - v.AuxInt = c - v.AddArg(x) - return true - } // match: (Lsh32x32 x (MOVDconst [c])) // cond: uint32(c) < 32 // result: (SLWconst x [c]) @@ -2607,28 +2574,11 @@ func rewriteValuePPC64_OpLsh32x64(v *Value) bool { v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (Lsh32x64 x (Const64 [c])) - // cond: uint64(c) < 32 - // result: (SLWconst x [c]) - for { - x := v_0 - if v_1.Op != OpConst64 { - break - } - c := v_1.AuxInt - if !(uint64(c) < 32) { - break - } - v.reset(OpPPC64SLWconst) - v.AuxInt = c - v.AddArg(x) - return true - } - // match: (Lsh32x64 _ (Const64 [c])) + // match: (Lsh32x64 _ (MOVDconst [c])) // cond: uint64(c) >= 32 // result: (MOVDconst [0]) for { - if v_1.Op != OpConst64 { + if v_1.Op != OpPPC64MOVDconst { break } c := v_1.AuxInt @@ -2804,23 +2754,6 @@ func rewriteValuePPC64_OpLsh64x32(v *Value) bool { v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (Lsh64x32 x (Const64 [c])) - // cond: uint32(c) < 64 - // result: (SLDconst x [c]) - for { - x := v_0 - if v_1.Op != OpConst64 { - break - } - c := v_1.AuxInt - if !(uint32(c) < 64) { - break - } - v.reset(OpPPC64SLDconst) - v.AuxInt = c - v.AddArg(x) - return true - } // match: (Lsh64x32 x (MOVDconst [c])) // cond: uint32(c) < 64 // result: (SLDconst x [c]) @@ -2875,28 +2808,11 @@ func rewriteValuePPC64_OpLsh64x64(v *Value) bool { v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (Lsh64x64 x (Const64 [c])) - // cond: uint64(c) < 64 - // result: (SLDconst x [c]) - for { - x := v_0 - if v_1.Op != OpConst64 { - break - } - c := v_1.AuxInt - if !(uint64(c) < 64) { - break - } - v.reset(OpPPC64SLDconst) - v.AuxInt = c - v.AddArg(x) - return true - } - // match: (Lsh64x64 _ (Const64 [c])) + // match: (Lsh64x64 _ (MOVDconst [c])) // cond: uint64(c) >= 64 // result: (MOVDconst [0]) for { - if v_1.Op != OpConst64 { + if v_1.Op != OpPPC64MOVDconst { break } c := v_1.AuxInt @@ -3072,23 +2988,6 @@ func rewriteValuePPC64_OpLsh8x32(v *Value) bool { v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (Lsh8x32 x (Const64 [c])) - // cond: uint32(c) < 8 - // result: (SLWconst x [c]) - for { - x := v_0 - if v_1.Op != OpConst64 { - break - } - c := v_1.AuxInt - if !(uint32(c) < 8) { - break - } - v.reset(OpPPC64SLWconst) - v.AuxInt = c - v.AddArg(x) - return true - } // match: (Lsh8x32 x (MOVDconst [c])) // cond: uint32(c) < 8 // result: (SLWconst x [c]) @@ -3143,28 +3042,11 @@ func rewriteValuePPC64_OpLsh8x64(v *Value) bool { v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (Lsh8x64 x (Const64 [c])) - // cond: uint64(c) < 8 - // result: (SLWconst x [c]) - for { - x := v_0 - if v_1.Op != OpConst64 { - break - } - c := v_1.AuxInt - if !(uint64(c) < 8) { - break - } - v.reset(OpPPC64SLWconst) - v.AuxInt = c - v.AddArg(x) - return true - } - // match: (Lsh8x64 _ (Const64 [c])) + // match: (Lsh8x64 _ (MOVDconst [c])) // cond: uint64(c) >= 8 // result: (MOVDconst [0]) for { - if v_1.Op != OpConst64 { + if v_1.Op != OpPPC64MOVDconst { break } c := v_1.AuxInt @@ -4018,6 +3900,40 @@ func rewriteValuePPC64_OpPPC64ADDconst(v *Value) bool { } return false } +func rewriteValuePPC64_OpPPC64ADDconstForCarry(v *Value) bool { + v_0 := v.Args[0] + // match: (ADDconstForCarry [c] (MOVDconst [d])) + // cond: int64(int16(c)) < 0 && (int64(int16(c)) < 0 || int64(int16(c)) + d >= 0) + // result: (FlagCarryClear) + for { + c := v.AuxInt + if v_0.Op != OpPPC64MOVDconst { + break + } + d := v_0.AuxInt + if !(int64(int16(c)) < 0 && (int64(int16(c)) < 0 || int64(int16(c))+d >= 0)) { + break + } + v.reset(OpPPC64FlagCarryClear) + return true + } + // match: (ADDconstForCarry [c] (MOVDconst [d])) + // cond: int64(int16(c)) < 0 && int64(int16(c)) >= 0 && int64(int16(c)) + d < 0 + // result: (FlagCarrySet) + for { + c := v.AuxInt + if v_0.Op != OpPPC64MOVDconst { + break + } + d := v_0.AuxInt + if !(int64(int16(c)) < 0 && int64(int16(c)) >= 0 && int64(int16(c))+d < 0) { + break + } + v.reset(OpPPC64FlagCarrySet) + return true + } + return false +} func rewriteValuePPC64_OpPPC64AND(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] @@ -4134,6 +4050,26 @@ func rewriteValuePPC64_OpPPC64AND(v *Value) bool { } return false } +func rewriteValuePPC64_OpPPC64ANDN(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (ANDN (MOVDconst [c]) (MOVDconst [d])) + // result: (MOVDconst [c&^d]) + for { + if v_0.Op != OpPPC64MOVDconst { + break + } + c := v_0.AuxInt + if v_1.Op != OpPPC64MOVDconst { + break + } + d := v_1.AuxInt + v.reset(OpPPC64MOVDconst) + v.AuxInt = c &^ d + return true + } + return false +} func rewriteValuePPC64_OpPPC64ANDconst(v *Value) bool { v_0 := v.Args[0] // match: (ANDconst [c] (ANDconst [d] x)) @@ -10200,6 +10136,49 @@ func rewriteValuePPC64_OpPPC64MaskIfNotCarry(v *Value) bool { v.AuxInt = -1 return true } + // match: (MaskIfNotCarry (FlagCarrySet)) + // result: (MOVDconst [0]) + for { + if v_0.Op != OpPPC64FlagCarrySet { + break + } + v.reset(OpPPC64MOVDconst) + v.AuxInt = 0 + return true + } + // match: (MaskIfNotCarry (FlagCarryClear)) + // result: (MOVDconst [-1]) + for { + if v_0.Op != OpPPC64FlagCarryClear { + break + } + v.reset(OpPPC64MOVDconst) + v.AuxInt = -1 + return true + } + return false +} +func rewriteValuePPC64_OpPPC64NOR(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (NOR (MOVDconst [c]) (MOVDconst [d])) + // result: (MOVDconst [^(c|d)]) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpPPC64MOVDconst { + continue + } + c := v_0.AuxInt + if v_1.Op != OpPPC64MOVDconst { + continue + } + d := v_1.AuxInt + v.reset(OpPPC64MOVDconst) + v.AuxInt = ^(c | d) + return true + } + break + } return false } func rewriteValuePPC64_OpPPC64NotEqual(v *Value) bool { @@ -11733,6 +11712,21 @@ func rewriteValuePPC64_OpPPC64ORN(v *Value) bool { v.copyOf(x) return true } + // match: (ORN (MOVDconst [c]) (MOVDconst [d])) + // result: (MOVDconst [c|^d]) + for { + if v_0.Op != OpPPC64MOVDconst { + break + } + c := v_0.AuxInt + if v_1.Op != OpPPC64MOVDconst { + break + } + d := v_1.AuxInt + v.reset(OpPPC64MOVDconst) + v.AuxInt = c | ^d + return true + } return false } func rewriteValuePPC64_OpPPC64ORconst(v *Value) bool { @@ -11809,6 +11803,114 @@ func rewriteValuePPC64_OpPPC64ROTLW(v *Value) bool { } return false } +func rewriteValuePPC64_OpPPC64SLD(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (SLD x (MOVDconst [c])) + // result: (SLDconst [c] x) + for { + x := v_0 + if v_1.Op != OpPPC64MOVDconst { + break + } + c := v_1.AuxInt + v.reset(OpPPC64SLDconst) + v.AuxInt = c + v.AddArg(x) + return true + } + return false +} +func rewriteValuePPC64_OpPPC64SLW(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (SLW x (MOVDconst [c])) + // result: (SLWconst [c] x) + for { + x := v_0 + if v_1.Op != OpPPC64MOVDconst { + break + } + c := v_1.AuxInt + v.reset(OpPPC64SLWconst) + v.AuxInt = c + v.AddArg(x) + return true + } + return false +} +func rewriteValuePPC64_OpPPC64SRAD(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (SRAD x (MOVDconst [c])) + // result: (SRADconst [c] x) + for { + x := v_0 + if v_1.Op != OpPPC64MOVDconst { + break + } + c := v_1.AuxInt + v.reset(OpPPC64SRADconst) + v.AuxInt = c + v.AddArg(x) + return true + } + return false +} +func rewriteValuePPC64_OpPPC64SRAW(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (SRAW x (MOVDconst [c])) + // result: (SRAWconst [c] x) + for { + x := v_0 + if v_1.Op != OpPPC64MOVDconst { + break + } + c := v_1.AuxInt + v.reset(OpPPC64SRAWconst) + v.AuxInt = c + v.AddArg(x) + return true + } + return false +} +func rewriteValuePPC64_OpPPC64SRD(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (SRD x (MOVDconst [c])) + // result: (SRDconst [c] x) + for { + x := v_0 + if v_1.Op != OpPPC64MOVDconst { + break + } + c := v_1.AuxInt + v.reset(OpPPC64SRDconst) + v.AuxInt = c + v.AddArg(x) + return true + } + return false +} +func rewriteValuePPC64_OpPPC64SRW(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (SRW x (MOVDconst [c])) + // result: (SRWconst [c] x) + for { + x := v_0 + if v_1.Op != OpPPC64MOVDconst { + break + } + c := v_1.AuxInt + v.reset(OpPPC64SRWconst) + v.AuxInt = c + v.AddArg(x) + return true + } + return false +} func rewriteValuePPC64_OpPPC64SUB(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] @@ -12285,25 +12387,6 @@ func rewriteValuePPC64_OpRsh16Ux32(v *Value) bool { v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (Rsh16Ux32 x (Const64 [c])) - // cond: uint32(c) < 16 - // result: (SRWconst (ZeroExt16to32 x) [c]) - for { - x := v_0 - if v_1.Op != OpConst64 { - break - } - c := v_1.AuxInt - if !(uint32(c) < 16) { - break - } - v.reset(OpPPC64SRWconst) - v.AuxInt = c - v0 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) - v0.AddArg(x) - v.AddArg(v0) - return true - } // match: (Rsh16Ux32 x (MOVDconst [c])) // cond: uint32(c) < 16 // result: (SRWconst (ZeroExt16to32 x) [c]) @@ -12364,30 +12447,11 @@ func rewriteValuePPC64_OpRsh16Ux64(v *Value) bool { v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (Rsh16Ux64 x (Const64 [c])) - // cond: uint64(c) < 16 - // result: (SRWconst (ZeroExt16to32 x) [c]) - for { - x := v_0 - if v_1.Op != OpConst64 { - break - } - c := v_1.AuxInt - if !(uint64(c) < 16) { - break - } - v.reset(OpPPC64SRWconst) - v.AuxInt = c - v0 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) - v0.AddArg(x) - v.AddArg(v0) - return true - } - // match: (Rsh16Ux64 _ (Const64 [c])) + // match: (Rsh16Ux64 _ (MOVDconst [c])) // cond: uint64(c) >= 16 // result: (MOVDconst [0]) for { - if v_1.Op != OpConst64 { + if v_1.Op != OpPPC64MOVDconst { break } c := v_1.AuxInt @@ -12538,25 +12602,6 @@ func rewriteValuePPC64_OpRsh16x32(v *Value) bool { v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (Rsh16x32 x (Const64 [c])) - // cond: uint32(c) < 16 - // result: (SRAWconst (SignExt16to32 x) [c]) - for { - x := v_0 - if v_1.Op != OpConst64 { - break - } - c := v_1.AuxInt - if !(uint32(c) < 16) { - break - } - v.reset(OpPPC64SRAWconst) - v.AuxInt = c - v0 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32) - v0.AddArg(x) - v.AddArg(v0) - return true - } // match: (Rsh16x32 x (MOVDconst [c])) // cond: uint32(c) < 16 // result: (SRAWconst (SignExt16to32 x) [c]) @@ -12617,31 +12662,12 @@ func rewriteValuePPC64_OpRsh16x64(v *Value) bool { v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (Rsh16x64 x (Const64 [c])) - // cond: uint64(c) < 16 - // result: (SRAWconst (SignExt16to32 x) [c]) - for { - x := v_0 - if v_1.Op != OpConst64 { - break - } - c := v_1.AuxInt - if !(uint64(c) < 16) { - break - } - v.reset(OpPPC64SRAWconst) - v.AuxInt = c - v0 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32) - v0.AddArg(x) - v.AddArg(v0) - return true - } - // match: (Rsh16x64 x (Const64 [c])) + // match: (Rsh16x64 x (MOVDconst [c])) // cond: uint64(c) >= 16 // result: (SRAWconst (SignExt16to32 x) [63]) for { x := v_0 - if v_1.Op != OpConst64 { + if v_1.Op != OpPPC64MOVDconst { break } c := v_1.AuxInt @@ -12791,23 +12817,6 @@ func rewriteValuePPC64_OpRsh32Ux32(v *Value) bool { v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (Rsh32Ux32 x (Const64 [c])) - // cond: uint32(c) < 32 - // result: (SRWconst x [c]) - for { - x := v_0 - if v_1.Op != OpConst64 { - break - } - c := v_1.AuxInt - if !(uint32(c) < 32) { - break - } - v.reset(OpPPC64SRWconst) - v.AuxInt = c - v.AddArg(x) - return true - } // match: (Rsh32Ux32 x (MOVDconst [c])) // cond: uint32(c) < 32 // result: (SRWconst x [c]) @@ -12862,28 +12871,11 @@ func rewriteValuePPC64_OpRsh32Ux64(v *Value) bool { v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (Rsh32Ux64 x (Const64 [c])) - // cond: uint64(c) < 32 - // result: (SRWconst x [c]) - for { - x := v_0 - if v_1.Op != OpConst64 { - break - } - c := v_1.AuxInt - if !(uint64(c) < 32) { - break - } - v.reset(OpPPC64SRWconst) - v.AuxInt = c - v.AddArg(x) - return true - } - // match: (Rsh32Ux64 _ (Const64 [c])) + // match: (Rsh32Ux64 _ (MOVDconst [c])) // cond: uint64(c) >= 32 // result: (MOVDconst [0]) for { - if v_1.Op != OpConst64 { + if v_1.Op != OpPPC64MOVDconst { break } c := v_1.AuxInt @@ -13124,23 +13116,6 @@ func rewriteValuePPC64_OpRsh32x32(v *Value) bool { v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (Rsh32x32 x (Const64 [c])) - // cond: uint32(c) < 32 - // result: (SRAWconst x [c]) - for { - x := v_0 - if v_1.Op != OpConst64 { - break - } - c := v_1.AuxInt - if !(uint32(c) < 32) { - break - } - v.reset(OpPPC64SRAWconst) - v.AuxInt = c - v.AddArg(x) - return true - } // match: (Rsh32x32 x (MOVDconst [c])) // cond: uint32(c) < 32 // result: (SRAWconst x [c]) @@ -13195,29 +13170,12 @@ func rewriteValuePPC64_OpRsh32x64(v *Value) bool { v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (Rsh32x64 x (Const64 [c])) - // cond: uint64(c) < 32 - // result: (SRAWconst x [c]) - for { - x := v_0 - if v_1.Op != OpConst64 { - break - } - c := v_1.AuxInt - if !(uint64(c) < 32) { - break - } - v.reset(OpPPC64SRAWconst) - v.AuxInt = c - v.AddArg(x) - return true - } - // match: (Rsh32x64 x (Const64 [c])) + // match: (Rsh32x64 x (MOVDconst [c])) // cond: uint64(c) >= 32 // result: (SRAWconst x [63]) for { x := v_0 - if v_1.Op != OpConst64 { + if v_1.Op != OpPPC64MOVDconst { break } c := v_1.AuxInt @@ -13459,23 +13417,6 @@ func rewriteValuePPC64_OpRsh64Ux32(v *Value) bool { v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (Rsh64Ux32 x (Const64 [c])) - // cond: uint32(c) < 64 - // result: (SRDconst x [c]) - for { - x := v_0 - if v_1.Op != OpConst64 { - break - } - c := v_1.AuxInt - if !(uint32(c) < 64) { - break - } - v.reset(OpPPC64SRDconst) - v.AuxInt = c - v.AddArg(x) - return true - } // match: (Rsh64Ux32 x (MOVDconst [c])) // cond: uint32(c) < 64 // result: (SRDconst x [c]) @@ -13530,28 +13471,11 @@ func rewriteValuePPC64_OpRsh64Ux64(v *Value) bool { v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (Rsh64Ux64 x (Const64 [c])) - // cond: uint64(c) < 64 - // result: (SRDconst x [c]) - for { - x := v_0 - if v_1.Op != OpConst64 { - break - } - c := v_1.AuxInt - if !(uint64(c) < 64) { - break - } - v.reset(OpPPC64SRDconst) - v.AuxInt = c - v.AddArg(x) - return true - } - // match: (Rsh64Ux64 _ (Const64 [c])) + // match: (Rsh64Ux64 _ (MOVDconst [c])) // cond: uint64(c) >= 64 // result: (MOVDconst [0]) for { - if v_1.Op != OpConst64 { + if v_1.Op != OpPPC64MOVDconst { break } c := v_1.AuxInt @@ -13792,23 +13716,6 @@ func rewriteValuePPC64_OpRsh64x32(v *Value) bool { v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (Rsh64x32 x (Const64 [c])) - // cond: uint32(c) < 64 - // result: (SRADconst x [c]) - for { - x := v_0 - if v_1.Op != OpConst64 { - break - } - c := v_1.AuxInt - if !(uint32(c) < 64) { - break - } - v.reset(OpPPC64SRADconst) - v.AuxInt = c - v.AddArg(x) - return true - } // match: (Rsh64x32 x (MOVDconst [c])) // cond: uint32(c) < 64 // result: (SRADconst x [c]) @@ -13863,29 +13770,12 @@ func rewriteValuePPC64_OpRsh64x64(v *Value) bool { v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (Rsh64x64 x (Const64 [c])) - // cond: uint64(c) < 64 - // result: (SRADconst x [c]) - for { - x := v_0 - if v_1.Op != OpConst64 { - break - } - c := v_1.AuxInt - if !(uint64(c) < 64) { - break - } - v.reset(OpPPC64SRADconst) - v.AuxInt = c - v.AddArg(x) - return true - } - // match: (Rsh64x64 x (Const64 [c])) + // match: (Rsh64x64 x (MOVDconst [c])) // cond: uint64(c) >= 64 // result: (SRADconst x [63]) for { x := v_0 - if v_1.Op != OpConst64 { + if v_1.Op != OpPPC64MOVDconst { break } c := v_1.AuxInt @@ -14131,25 +14021,6 @@ func rewriteValuePPC64_OpRsh8Ux32(v *Value) bool { v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (Rsh8Ux32 x (Const64 [c])) - // cond: uint32(c) < 8 - // result: (SRWconst (ZeroExt8to32 x) [c]) - for { - x := v_0 - if v_1.Op != OpConst64 { - break - } - c := v_1.AuxInt - if !(uint32(c) < 8) { - break - } - v.reset(OpPPC64SRWconst) - v.AuxInt = c - v0 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) - v0.AddArg(x) - v.AddArg(v0) - return true - } // match: (Rsh8Ux32 x (MOVDconst [c])) // cond: uint32(c) < 8 // result: (SRWconst (ZeroExt8to32 x) [c]) @@ -14210,30 +14081,11 @@ func rewriteValuePPC64_OpRsh8Ux64(v *Value) bool { v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (Rsh8Ux64 x (Const64 [c])) - // cond: uint64(c) < 8 - // result: (SRWconst (ZeroExt8to32 x) [c]) - for { - x := v_0 - if v_1.Op != OpConst64 { - break - } - c := v_1.AuxInt - if !(uint64(c) < 8) { - break - } - v.reset(OpPPC64SRWconst) - v.AuxInt = c - v0 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) - v0.AddArg(x) - v.AddArg(v0) - return true - } - // match: (Rsh8Ux64 _ (Const64 [c])) + // match: (Rsh8Ux64 _ (MOVDconst [c])) // cond: uint64(c) >= 8 // result: (MOVDconst [0]) for { - if v_1.Op != OpConst64 { + if v_1.Op != OpPPC64MOVDconst { break } c := v_1.AuxInt @@ -14384,25 +14236,6 @@ func rewriteValuePPC64_OpRsh8x32(v *Value) bool { v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (Rsh8x32 x (Const64 [c])) - // cond: uint32(c) < 8 - // result: (SRAWconst (SignExt8to32 x) [c]) - for { - x := v_0 - if v_1.Op != OpConst64 { - break - } - c := v_1.AuxInt - if !(uint32(c) < 8) { - break - } - v.reset(OpPPC64SRAWconst) - v.AuxInt = c - v0 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32) - v0.AddArg(x) - v.AddArg(v0) - return true - } // match: (Rsh8x32 x (MOVDconst [c])) // cond: uint32(c) < 8 // result: (SRAWconst (SignExt8to32 x) [c]) @@ -14463,31 +14296,12 @@ func rewriteValuePPC64_OpRsh8x64(v *Value) bool { v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (Rsh8x64 x (Const64 [c])) - // cond: uint64(c) < 8 - // result: (SRAWconst (SignExt8to32 x) [c]) - for { - x := v_0 - if v_1.Op != OpConst64 { - break - } - c := v_1.AuxInt - if !(uint64(c) < 8) { - break - } - v.reset(OpPPC64SRAWconst) - v.AuxInt = c - v0 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32) - v0.AddArg(x) - v.AddArg(v0) - return true - } - // match: (Rsh8x64 x (Const64 [c])) + // match: (Rsh8x64 x (MOVDconst [c])) // cond: uint64(c) >= 8 // result: (SRAWconst (SignExt8to32 x) [63]) for { x := v_0 - if v_1.Op != OpConst64 { + if v_1.Op != OpPPC64MOVDconst { break } c := v_1.AuxInt From 97a268624c9f2830133d2bdfae677f5d99ec82cb Mon Sep 17 00:00:00 2001 From: Josh Bleecher Snyder Date: Fri, 24 Jan 2020 13:52:41 -0800 Subject: [PATCH 32/69] cmd/compile: add -d=ssa/check/seed=SEED This change adds the option to run the ssa checker with a random seed. The current system uses a completely fixed seed, which is good for reproducibility but bad for exploring the state space. Preserve what we have, but also provide a way for the caller to provide a seed. The caller can report the seed alongside any failures. Change-Id: I2676a8112d8260e6cac86d95d2e8db4d3221aeeb Reviewed-on: https://go-review.googlesource.com/c/go/+/216418 Reviewed-by: Keith Randall --- src/cmd/compile/internal/ssa/compile.go | 20 +++++++++++++++++--- 1 file changed, 17 insertions(+), 3 deletions(-) diff --git a/src/cmd/compile/internal/ssa/compile.go b/src/cmd/compile/internal/ssa/compile.go index 8551c0a54b..448b1cf814 100644 --- a/src/cmd/compile/internal/ssa/compile.go +++ b/src/cmd/compile/internal/ssa/compile.go @@ -35,7 +35,8 @@ func Compile(f *Func) { var rnd *rand.Rand if checkEnabled { - rnd = rand.New(rand.NewSource(int64(crc32.ChecksumIEEE(([]byte)(f.Name))))) + seed := int64(crc32.ChecksumIEEE(([]byte)(f.Name))) ^ int64(checkRandSeed) + rnd = rand.New(rand.NewSource(seed)) } // hook to print function & phase if panic happens @@ -199,7 +200,10 @@ func (p *pass) addDump(s string) { } // Run consistency checker between each phase -var checkEnabled = false +var ( + checkEnabled = false + checkRandSeed = 0 +) // Debug output var IntrinsicsDebug int @@ -253,7 +257,7 @@ where: ` + phasenames + ` - is one of: - on, off, debug, mem, time, test, stats, dump + on, off, debug, mem, time, test, stats, dump, seed - defaults to 1 @@ -271,6 +275,10 @@ Examples: -d=ssa/check/on enables checking after each phase + -d=ssa/check/seed=1234 +enables checking after each phase, using 1234 to seed the PRNG +used for value order randomization + -d=ssa/all/time enables time reporting for all phases @@ -294,6 +302,12 @@ commas. For example: debugPoset = checkEnabled return "" } + if phase == "check" && flag == "seed" { + checkEnabled = true + checkRandSeed = val + debugPoset = checkEnabled + return "" + } alltime := false allmem := false From ca3dd1d36b5aa2dd810d31ec425a32902ae50ba9 Mon Sep 17 00:00:00 2001 From: Robert Griesemer Date: Fri, 7 Feb 2020 15:26:19 -0800 Subject: [PATCH 33/69] go/types: fix method set computation When computing method sets, any struct field that "shadows" a method at a lower embedding level eliminates that method from the method set. Treat any field at a given level as a "collision" for any methods at lower embedding level. Method sets are not directly used by go/types (except for self- verification in debug mode); they are a functionality provided by go/types. Thus, the method sets that go/types is using were not affected by this bug. Fixes #37081. Change-Id: Ic1937e01891b3614a6f7965d4384aeb485f3fe3e Reviewed-on: https://go-review.googlesource.com/c/go/+/218617 Reviewed-by: Alan Donovan --- src/go/types/example_test.go | 11 +++++++++++ src/go/types/methodset.go | 20 +++++++++----------- 2 files changed, 20 insertions(+), 11 deletions(-) diff --git a/src/go/types/example_test.go b/src/go/types/example_test.go index b8fba7312a..3747f3b15a 100644 --- a/src/go/types/example_test.go +++ b/src/go/types/example_test.go @@ -120,6 +120,9 @@ import "fmt" type Celsius float64 func (c Celsius) String() string { return fmt.Sprintf("%g°C", c) } func (c *Celsius) SetF(f float64) { *c = Celsius(f - 32 / 9 * 5) } + +type S struct { I; m int } +type I interface { m() byte } ` fset := token.NewFileSet() f, err := parser.ParseFile(fset, "celsius.go", input, 0) @@ -147,6 +150,11 @@ func (c *Celsius) SetF(f float64) { *c = Celsius(f - 32 / 9 * 5) } fmt.Println() } + // Print the method set of S. + styp := pkg.Scope().Lookup("S").Type() + fmt.Printf("Method set of %s:\n", styp) + fmt.Println(types.NewMethodSet(styp)) + // Output: // Method set of temperature.Celsius: // method (temperature.Celsius) String() string @@ -154,6 +162,9 @@ func (c *Celsius) SetF(f float64) { *c = Celsius(f - 32 / 9 * 5) } // Method set of *temperature.Celsius: // method (*temperature.Celsius) SetF(f float64) // method (*temperature.Celsius) String() string + // + // Method set of temperature.S: + // MethodSet {} } // ExampleInfo prints various facts recorded by the type checker in a diff --git a/src/go/types/methodset.go b/src/go/types/methodset.go index a236fe2ea8..aacbb0f82a 100644 --- a/src/go/types/methodset.go +++ b/src/go/types/methodset.go @@ -166,17 +166,15 @@ func NewMethodSet(T Type) *MethodSet { } } - // Multiple fields with matching names collide at this depth and shadow all - // entries further down; add them as collisions to base if no entries with - // matching names exist already. - for k, f := range fset { - if f == nil { - if _, found := base[k]; !found { - if base == nil { - base = make(methodSet) - } - base[k] = nil // collision + // Add all fields at this depth as collisions (since they will hide any + // method further down) to base if no entries with matching names exist + // already. + for k := range fset { + if _, found := base[k]; !found { + if base == nil { + base = make(methodSet) } + base[k] = nil // collision } } @@ -233,7 +231,7 @@ func (s fieldSet) add(f *Var, multiples bool) fieldSet { // A methodSet is a set of methods and name collisions. // A collision indicates that multiple methods with the -// same unique id appeared. +// same unique id, or a field with that id appeared. type methodSet map[string]*Selection // a nil entry indicates a name collision // Add adds all functions in list to the method set s. From 117297cf42c5fd96fc1392e600c8d62d3bba7c5f Mon Sep 17 00:00:00 2001 From: Robert Griesemer Date: Fri, 7 Feb 2020 16:01:01 -0800 Subject: [PATCH 34/69] go/types: simplify method set computation After fixing #37081 we don't need to explicitly keep track of field collisions in the method set computation anymore; we only need to know which field (names) exists at each embedding level. Simplify the code by removing the dedicated fieldSet data type in favor of a simple string set. Follow-up on https://golang.org/cl/218617; separate CL to make it easier to identify a problem with these two changes, should there be one. Updates #37081. Change-Id: I5c259c63c75a148a42d5c3e1e4860e1ffe5631bd Reviewed-on: https://go-review.googlesource.com/c/go/+/218618 Reviewed-by: Alan Donovan --- src/go/types/methodset.go | 40 +++++++++------------------------------ 1 file changed, 9 insertions(+), 31 deletions(-) diff --git a/src/go/types/methodset.go b/src/go/types/methodset.go index aacbb0f82a..c34d732b7a 100644 --- a/src/go/types/methodset.go +++ b/src/go/types/methodset.go @@ -99,8 +99,8 @@ func NewMethodSet(T Type) *MethodSet { for len(current) > 0 { var next []embeddedType // embedded types found at current depth - // field and method sets at current depth, allocated lazily - var fset fieldSet + // field and method sets at current depth, indexed by names (Id's), and allocated lazily + var fset map[string]bool // we only care about the field names var mset methodSet for _, e := range current { @@ -131,7 +131,10 @@ func NewMethodSet(T Type) *MethodSet { switch t := typ.(type) { case *Struct: for i, f := range t.fields { - fset = fset.add(f, e.multiples) + if fset == nil { + fset = make(map[string]bool) + } + fset[f.Id()] = true // Embedded fields are always of the form T or *T where // T is a type name. If typ appeared multiple times at @@ -156,7 +159,7 @@ func NewMethodSet(T Type) *MethodSet { for k, m := range mset { if _, found := base[k]; !found { // Fields collide with methods of the same name at this depth. - if _, found := fset[k]; found { + if fset[k] { m = nil // collision } if base == nil { @@ -166,9 +169,8 @@ func NewMethodSet(T Type) *MethodSet { } } - // Add all fields at this depth as collisions (since they will hide any - // method further down) to base if no entries with matching names exist - // already. + // Add all (remaining) fields at this depth as collisions (since they will + // hide any method further down) if no entries with matching names exist already. for k := range fset { if _, found := base[k]; !found { if base == nil { @@ -205,30 +207,6 @@ func NewMethodSet(T Type) *MethodSet { return &MethodSet{list} } -// A fieldSet is a set of fields and name collisions. -// A collision indicates that multiple fields with the -// same unique id appeared. -type fieldSet map[string]*Var // a nil entry indicates a name collision - -// Add adds field f to the field set s. -// If multiples is set, f appears multiple times -// and is treated as a collision. -func (s fieldSet) add(f *Var, multiples bool) fieldSet { - if s == nil { - s = make(fieldSet) - } - key := f.Id() - // if f is not in the set, add it - if !multiples { - if _, found := s[key]; !found { - s[key] = f - return s - } - } - s[key] = nil // collision - return s -} - // A methodSet is a set of methods and name collisions. // A collision indicates that multiple methods with the // same unique id, or a field with that id appeared. From 4f989323a5571e29c27f5ef86154676bd889fe15 Mon Sep 17 00:00:00 2001 From: Josh Bleecher Snyder Date: Mon, 2 Mar 2020 13:57:17 -0800 Subject: [PATCH 35/69] cmd/compile: regenerate rules CL 210897 went in concurrently with some rulegen.go changes. Regenerate. Change-Id: I39ffa8bdffdfcc7f60cc8158d188fb1a3e70fcb6 Reviewed-on: https://go-review.googlesource.com/c/go/+/221787 Run-TryBot: Josh Bleecher Snyder Reviewed-by: Cherry Zhang --- src/cmd/compile/internal/ssa/rewriteMIPS64.go | 9 +++------ 1 file changed, 3 insertions(+), 6 deletions(-) diff --git a/src/cmd/compile/internal/ssa/rewriteMIPS64.go b/src/cmd/compile/internal/ssa/rewriteMIPS64.go index 5136b1ca62..125c33d002 100644 --- a/src/cmd/compile/internal/ssa/rewriteMIPS64.go +++ b/src/cmd/compile/internal/ssa/rewriteMIPS64.go @@ -6909,8 +6909,7 @@ func rewriteValueMIPS64_OpSelect0(v *Value) bool { v.reset(OpSelect1) v.Type = typ.UInt64 v0 := b.NewValue0(v.Pos, OpMIPS64MULVU, types.NewTuple(typ.UInt64, typ.UInt64)) - v0.AddArg(x) - v0.AddArg(y) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -7011,13 +7010,11 @@ func rewriteValueMIPS64_OpSelect1(v *Value) bool { v.Type = typ.Bool v0 := b.NewValue0(v.Pos, OpSelect0, typ.UInt64) v1 := b.NewValue0(v.Pos, OpMIPS64MULVU, types.NewTuple(typ.UInt64, typ.UInt64)) - v1.AddArg(x) - v1.AddArg(y) + v1.AddArg2(x, y) v0.AddArg(v1) - v.AddArg(v0) v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) v2.AuxInt = 0 - v.AddArg(v2) + v.AddArg2(v0, v2) return true } // match: (Select1 (MULVU x (MOVVconst [-1]))) From 9828c43288a53d3df75b1f73edad0d037a91dff8 Mon Sep 17 00:00:00 2001 From: Josh Bleecher Snyder Date: Mon, 27 Jan 2020 11:28:05 -0800 Subject: [PATCH 36/69] runtime: prevent allocation when converting small ints to interfaces MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Prior to this change, we avoid allocation when converting 0 to an interface. This change extends that optimization to larger value types whose values happens to be in the range 0 to 255. This is marginally more expensive in the case of a 0 value, in that the address is computed rather than fixed. name old time/op new time/op delta ConvT2ESmall-8 2.36ns ± 4% 2.65ns ± 4% +12.23% (p=0.000 n=87+91) ConvT2EUintptr-8 2.36ns ± 4% 2.84ns ± 6% +20.05% (p=0.000 n=96+99) ConvT2ELarge-8 23.8ns ± 2% 23.1ns ± 3% -2.94% (p=0.000 n=93+95) ConvT2ISmall-8 2.67ns ± 5% 2.74ns ±27% ~ (p=0.214 n=99+100) ConvT2IUintptr-8 2.65ns ± 5% 2.46ns ± 5% -7.19% (p=0.000 n=98+98) ConvT2ILarge-8 24.2ns ± 2% 23.5ns ± 4% -3.16% (p=0.000 n=91+97) ConvT2Ezero/zero/16-8 2.79ns ± 6% 2.99ns ± 4% +7.52% (p=0.000 n=94+88) ConvT2Ezero/zero/32-8 2.34ns ± 3% 2.65ns ± 3% +13.06% (p=0.000 n=92+98) ConvT2Ezero/zero/64-8 2.35ns ± 4% 2.65ns ± 6% +12.86% (p=0.000 n=99+94) ConvT2Ezero/zero/str-8 2.55ns ± 4% 2.54ns ± 4% ~ (p=0.063 n=97+99) ConvT2Ezero/zero/slice-8 2.82ns ± 4% 2.85ns ± 5% +1.00% (p=0.000 n=99+95) ConvT2Ezero/zero/big-8 94.3ns ± 5% 93.4ns ± 4% -0.94% (p=0.000 n=88+90) ConvT2Ezero/nonzero/str-8 29.6ns ± 3% 27.7ns ± 3% -6.69% (p=0.000 n=98+97) ConvT2Ezero/nonzero/slice-8 36.6ns ± 2% 37.1ns ± 2% +1.31% (p=0.000 n=94+90) ConvT2Ezero/nonzero/big-8 93.4ns ± 3% 92.7ns ± 3% -0.74% (p=0.000 n=88+84) ConvT2Ezero/smallint/16-8 13.3ns ± 4% 2.7ns ± 6% -79.82% (p=0.000 n=100+97) ConvT2Ezero/smallint/32-8 12.5ns ± 1% 2.9ns ± 5% -77.17% (p=0.000 n=85+96) ConvT2Ezero/smallint/64-8 14.7ns ± 3% 2.6ns ± 3% -82.05% (p=0.000 n=94+94) ConvT2Ezero/largeint/16-8 14.0ns ± 4% 13.2ns ± 7% -5.44% (p=0.000 n=95+99) ConvT2Ezero/largeint/32-8 12.8ns ± 4% 12.9ns ± 3% ~ (p=0.096 n=99+87) ConvT2Ezero/largeint/64-8 15.5ns ± 2% 15.0ns ± 2% -3.46% (p=0.000 n=95+96) An example of a program for which this makes a perceptible difference is running the compiler with the -S flag: name old time/op new time/op delta Template 349ms ± 2% 344ms ± 2% -1.48% (p=0.000 n=23+25) Unicode 138ms ± 4% 136ms ± 3% -1.67% (p=0.003 n=25+25) GoTypes 1.25s ± 2% 1.24s ± 2% -1.11% (p=0.001 n=24+25) Compiler 5.73s ± 2% 5.67s ± 2% -1.09% (p=0.002 n=25+24) SSA 20.2s ± 2% 19.9s ± 2% -1.45% (p=0.000 n=25+23) Flate 216ms ± 4% 210ms ± 2% -2.77% (p=0.000 n=25+24) GoParser 283ms ± 2% 278ms ± 3% -1.58% (p=0.000 n=23+23) Reflect 757ms ± 2% 745ms ± 2% -1.58% (p=0.000 n=25+25) Tar 303ms ± 4% 296ms ± 2% -2.20% (p=0.000 n=22+23) XML 415ms ± 2% 411ms ± 3% -0.94% (p=0.002 n=25+22) [Geo mean] 726ms 715ms -1.59% name old user-time/op new user-time/op delta Template 434ms ± 3% 427ms ± 2% -1.66% (p=0.000 n=23+24) Unicode 204ms ±12% 198ms ±12% -2.83% (p=0.032 n=25+25) GoTypes 1.59s ± 2% 1.56s ± 2% -1.64% (p=0.000 n=22+25) Compiler 7.50s ± 1% 7.40s ± 2% -1.32% (p=0.000 n=25+25) SSA 27.2s ± 2% 26.8s ± 2% -1.50% (p=0.000 n=24+23) Flate 266ms ± 6% 254ms ± 3% -4.38% (p=0.000 n=25+25) GoParser 357ms ± 2% 351ms ± 2% -1.90% (p=0.000 n=24+23) Reflect 966ms ± 2% 947ms ± 2% -1.94% (p=0.000 n=24+25) Tar 387ms ± 2% 380ms ± 3% -1.83% (p=0.000 n=22+24) XML 538ms ± 1% 532ms ± 1% -1.15% (p=0.000 n=24+20) [Geo mean] 942ms 923ms -2.02% name old alloc/op new alloc/op delta Template 54.1MB ± 0% 52.9MB ± 0% -2.26% (p=0.000 n=25+25) Unicode 33.5MB ± 0% 33.1MB ± 0% -1.03% (p=0.000 n=25+24) GoTypes 189MB ± 0% 185MB ± 0% -2.27% (p=0.000 n=25+25) Compiler 875MB ± 0% 858MB ± 0% -1.99% (p=0.000 n=23+25) SSA 3.19GB ± 0% 3.13GB ± 0% -1.95% (p=0.000 n=25+25) Flate 32.9MB ± 0% 32.2MB ± 0% -2.26% (p=0.000 n=25+25) GoParser 44.0MB ± 0% 42.9MB ± 0% -2.33% (p=0.000 n=25+25) Reflect 117MB ± 0% 114MB ± 0% -2.60% (p=0.000 n=25+25) Tar 48.6MB ± 0% 47.5MB ± 0% -2.18% (p=0.000 n=25+24) XML 65.7MB ± 0% 64.4MB ± 0% -1.96% (p=0.000 n=23+25) [Geo mean] 118MB 115MB -2.08% name old allocs/op new allocs/op delta Template 1.07M ± 0% 0.92M ± 0% -14.29% (p=0.000 n=25+25) Unicode 539k ± 0% 494k ± 0% -8.27% (p=0.000 n=25+25) GoTypes 3.97M ± 0% 3.43M ± 0% -13.71% (p=0.000 n=24+25) Compiler 17.6M ± 0% 15.4M ± 0% -12.69% (p=0.000 n=25+24) SSA 66.1M ± 0% 58.1M ± 0% -12.17% (p=0.000 n=25+25) Flate 629k ± 0% 536k ± 0% -14.73% (p=0.000 n=24+24) GoParser 929k ± 0% 799k ± 0% -13.96% (p=0.000 n=25+25) Reflect 2.49M ± 0% 2.11M ± 0% -15.28% (p=0.000 n=25+25) Tar 919k ± 0% 788k ± 0% -14.30% (p=0.000 n=25+25) XML 1.28M ± 0% 1.11M ± 0% -12.85% (p=0.000 n=24+25) [Geo mean] 2.32M 2.01M -13.24% There is a slight increase in binary size from this change: file before after Δ % addr2line 4307728 4307760 +32 +0.001% api 5972680 5972728 +48 +0.001% asm 5114200 5114232 +32 +0.001% buildid 2843720 2847848 +4128 +0.145% cgo 4823736 4827864 +4128 +0.086% compile 24912056 24912104 +48 +0.000% cover 5259800 5259832 +32 +0.001% dist 3665080 3665128 +48 +0.001% doc 4672712 4672744 +32 +0.001% fix 3376952 3376984 +32 +0.001% link 6618008 6622152 +4144 +0.063% nm 4253280 4257424 +4144 +0.097% objdump 4655376 4659504 +4128 +0.089% pack 2294280 2294328 +48 +0.002% pprof 14747476 14751620 +4144 +0.028% test2json 2819320 2823448 +4128 +0.146% trace 11665068 11669212 +4144 +0.036% vet 8342360 8342408 +48 +0.001% Change-Id: I38ef70244e23069bfd14334061d43ae22a294519 Reviewed-on: https://go-review.googlesource.com/c/go/+/216401 Run-TryBot: Josh Bleecher Snyder TryBot-Result: Gobot Gobot Reviewed-by: Keith Randall --- src/runtime/iface.go | 54 ++++++++++++++++++++++++++---- src/runtime/iface_test.go | 70 +++++++++++++++++++++++++-------------- 2 files changed, 94 insertions(+), 30 deletions(-) diff --git a/src/runtime/iface.go b/src/runtime/iface.go index 05de282aa7..892e5a400f 100644 --- a/src/runtime/iface.go +++ b/src/runtime/iface.go @@ -331,8 +331,11 @@ func convT2E(t *_type, elem unsafe.Pointer) (e eface) { } func convT16(val uint16) (x unsafe.Pointer) { - if val == 0 { - x = unsafe.Pointer(&zeroVal[0]) + if val < uint16(len(staticuint64s)) { + x = unsafe.Pointer(&staticuint64s[val]) + if sys.BigEndian { + x = add(x, 6) + } } else { x = mallocgc(2, uint16Type, false) *(*uint16)(x) = val @@ -341,8 +344,11 @@ func convT16(val uint16) (x unsafe.Pointer) { } func convT32(val uint32) (x unsafe.Pointer) { - if val == 0 { - x = unsafe.Pointer(&zeroVal[0]) + if val < uint32(len(staticuint64s)) { + x = unsafe.Pointer(&staticuint64s[val]) + if sys.BigEndian { + x = add(x, 4) + } } else { x = mallocgc(4, uint32Type, false) *(*uint32)(x) = val @@ -351,8 +357,8 @@ func convT32(val uint32) (x unsafe.Pointer) { } func convT64(val uint64) (x unsafe.Pointer) { - if val == 0 { - x = unsafe.Pointer(&zeroVal[0]) + if val < uint64(len(staticuint64s)) { + x = unsafe.Pointer(&staticuint64s[val]) } else { x = mallocgc(8, uint64Type, false) *(*uint64)(x) = val @@ -556,3 +562,39 @@ var staticbytes = [...]byte{ 0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, 0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xff, } + +// staticuint64s is used to avoid allocating in convTx for small integer values. +var staticuint64s = [...]uint64{ + 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, + 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, + 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, + 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, + 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, + 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, + 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, + 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, + 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, + 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, + 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, + 0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, + 0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, + 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, + 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, + 0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, + 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, + 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, + 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, + 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f, + 0xa0, 0xa1, 0xa2, 0xa3, 0xa4, 0xa5, 0xa6, 0xa7, + 0xa8, 0xa9, 0xaa, 0xab, 0xac, 0xad, 0xae, 0xaf, + 0xb0, 0xb1, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7, + 0xb8, 0xb9, 0xba, 0xbb, 0xbc, 0xbd, 0xbe, 0xbf, + 0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, + 0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf, + 0xd0, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, + 0xd8, 0xd9, 0xda, 0xdb, 0xdc, 0xdd, 0xde, 0xdf, + 0xe0, 0xe1, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, + 0xe8, 0xe9, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xef, + 0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, + 0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xff, +} diff --git a/src/runtime/iface_test.go b/src/runtime/iface_test.go index 6d8f8614d9..73beebffe2 100644 --- a/src/runtime/iface_test.go +++ b/src/runtime/iface_test.go @@ -311,17 +311,20 @@ var ( eight8 uint8 = 8 eight8I T8 = 8 - zero16 uint16 = 0 - zero16I T16 = 0 - one16 uint16 = 1 + zero16 uint16 = 0 + zero16I T16 = 0 + one16 uint16 = 1 + thousand16 uint16 = 1000 - zero32 uint32 = 0 - zero32I T32 = 0 - one32 uint32 = 1 + zero32 uint32 = 0 + zero32I T32 = 0 + one32 uint32 = 1 + thousand32 uint32 = 1000 - zero64 uint64 = 0 - zero64I T64 = 0 - one64 uint64 = 1 + zero64 uint64 = 0 + zero64I T64 = 0 + one64 uint64 = 1 + thousand64 uint64 = 1000 zerostr string = "" zerostrI Tstr = "" @@ -369,21 +372,6 @@ func BenchmarkConvT2Ezero(b *testing.B) { }) }) b.Run("nonzero", func(b *testing.B) { - b.Run("16", func(b *testing.B) { - for i := 0; i < b.N; i++ { - e = one16 - } - }) - b.Run("32", func(b *testing.B) { - for i := 0; i < b.N; i++ { - e = one32 - } - }) - b.Run("64", func(b *testing.B) { - for i := 0; i < b.N; i++ { - e = one64 - } - }) b.Run("str", func(b *testing.B) { for i := 0; i < b.N; i++ { e = nzstr @@ -400,4 +388,38 @@ func BenchmarkConvT2Ezero(b *testing.B) { } }) }) + b.Run("smallint", func(b *testing.B) { + b.Run("16", func(b *testing.B) { + for i := 0; i < b.N; i++ { + e = one16 + } + }) + b.Run("32", func(b *testing.B) { + for i := 0; i < b.N; i++ { + e = one32 + } + }) + b.Run("64", func(b *testing.B) { + for i := 0; i < b.N; i++ { + e = one64 + } + }) + }) + b.Run("largeint", func(b *testing.B) { + b.Run("16", func(b *testing.B) { + for i := 0; i < b.N; i++ { + e = thousand16 + } + }) + b.Run("32", func(b *testing.B) { + for i := 0; i < b.N; i++ { + e = thousand32 + } + }) + b.Run("64", func(b *testing.B) { + for i := 0; i < b.N; i++ { + e = thousand64 + } + }) + }) } From 2001685ec01c240eda84762a3bc612ddd3ca93fe Mon Sep 17 00:00:00 2001 From: Robert Griesemer Date: Fri, 28 Feb 2020 22:25:39 -0800 Subject: [PATCH 37/69] cmd/compile/internal/syntax: add -skip flag to exclude files from TestStdLib TestStdLib reports parsed lines and lines/s information. To make it easier to compare apples to apples when making changes in the std lib, a regular expression provided via the -skip flag filters files we don't want to process. Change-Id: I27d9c32032eac4e78581205892e4f26947c91bd9 Reviewed-on: https://go-review.googlesource.com/c/go/+/221600 Reviewed-by: Emmanuel Odeke --- .../compile/internal/syntax/parser_test.go | 25 ++++++++++++++++--- 1 file changed, 22 insertions(+), 3 deletions(-) diff --git a/src/cmd/compile/internal/syntax/parser_test.go b/src/cmd/compile/internal/syntax/parser_test.go index 673339d667..81945faee9 100644 --- a/src/cmd/compile/internal/syntax/parser_test.go +++ b/src/cmd/compile/internal/syntax/parser_test.go @@ -10,6 +10,7 @@ import ( "fmt" "io/ioutil" "path/filepath" + "regexp" "runtime" "strings" "sync" @@ -17,9 +18,12 @@ import ( "time" ) -var fast = flag.Bool("fast", false, "parse package files in parallel") -var src_ = flag.String("src", "parser.go", "source file to parse") -var verify = flag.Bool("verify", false, "verify idempotent printing") +var ( + fast = flag.Bool("fast", false, "parse package files in parallel") + verify = flag.Bool("verify", false, "verify idempotent printing") + src_ = flag.String("src", "parser.go", "source file to parse") + skip = flag.String("skip", "", "files matching this regular expression are skipped by TestStdLib") +) func TestParse(t *testing.T) { ParseFile(*src_, func(err error) { t.Error(err) }, nil, 0) @@ -30,6 +34,15 @@ func TestStdLib(t *testing.T) { t.Skip("skipping test in short mode") } + var skipRx *regexp.Regexp + if *skip != "" { + var err error + skipRx, err = regexp.Compile(*skip) + if err != nil { + t.Fatalf("invalid argument for -skip (%v)", err) + } + } + var m1 runtime.MemStats runtime.ReadMemStats(&m1) start := time.Now() @@ -46,6 +59,12 @@ func TestStdLib(t *testing.T) { runtime.GOROOT(), } { walkDirs(t, dir, func(filename string) { + if skipRx != nil && skipRx.MatchString(filename) { + // Always report skipped files since regexp + // typos can lead to surprising results. + fmt.Printf("skipping %s\n", filename) + return + } if debug { fmt.Printf("parsing %s\n", filename) } From 972df38445977cc04414c7b6f469e2a8e5a63861 Mon Sep 17 00:00:00 2001 From: Emmanuel T Odeke Date: Sun, 1 Mar 2020 17:41:44 -0800 Subject: [PATCH 38/69] runtime: during panic, print value instead of address, if kind is printable Make panics more useful by printing values, if their underlying kind is printable, instead of just their memory address. Thus now given any custom type derived from any of: float*, int*, string, uint* if we have panic with such a result, its value will be printed. Thus given any of: type MyComplex128 complex128 type MyFloat64 float64 type MyString string type MyUintptr uintptr panic(MyComplex128(32.1 + 10i)) panic(MyFloat64(-93.7)) panic(MyString("This one")) panic(MyUintptr(93)) They will now print in the panic: panic: main.MyComplex64(+1.100000e-001+3.000000e+000i) panic: main.MyFloat64(-9.370000e+001) panic: main.MyString("This one") panic: main.MyUintptr(93) instead of: panic: (main.MyComplex128) (0xe0100,0x138cc0) panic: (main.MyFloat64) (0xe0100,0x138068) panic: (main.MyString) (0x48aa00,0x4c0840) panic: (main.MyUintptr) (0xe0100,0x137e58) and anything else will be printed as in the past with: panic: (main.MyStruct) (0xe4ee0,0x40a0e0) Also while here, updated the Go1.15 release notes. Fixes #37531 Change-Id: Ia486424344a386014f2869ab3483e42a9ef48ac4 Reviewed-on: https://go-review.googlesource.com/c/go/+/221779 Run-TryBot: Emmanuel Odeke TryBot-Result: Gobot Gobot Reviewed-by: Ian Lance Taylor --- doc/go1.15.html | 12 +++ src/runtime/error.go | 51 +++++++-- src/runtime/panic_test.go | 48 +++++++++ src/runtime/testdata/testprog/panicprint.go | 111 ++++++++++++++++++++ 4 files changed, 216 insertions(+), 6 deletions(-) create mode 100644 src/runtime/panic_test.go create mode 100644 src/runtime/testdata/testprog/panicprint.go diff --git a/doc/go1.15.html b/doc/go1.15.html index 1eb159c318..9cc576e4be 100644 --- a/doc/go1.15.html +++ b/doc/go1.15.html @@ -92,6 +92,18 @@ TODO TODO

+
runtime
+
+

+ If panic is invoked with a value whose type is derived from any + of: bool, complex64, complex128, float32, float64, + int, int8, int16, int32, int64, string, + uint, uint8, uint16, uint32, uint64, uintptr, + then the value will be printed, instead of just its address. +

+
+
+
sync

diff --git a/src/runtime/error.go b/src/runtime/error.go index 555befa43d..386569bead 100644 --- a/src/runtime/error.go +++ b/src/runtime/error.go @@ -185,11 +185,6 @@ type stringer interface { String() string } -func typestring(x interface{}) string { - e := efaceOf(&x) - return e._type.string() -} - // printany prints an argument passed to panic. // If panic is called with a value that has a String or Error method, // it has already been converted into a string by preprintpanics. @@ -232,7 +227,51 @@ func printany(i interface{}) { case string: print(v) default: - print("(", typestring(i), ") ", i) + printanycustomtype(i) + } +} + +func printanycustomtype(i interface{}) { + eface := efaceOf(&i) + typestring := eface._type.string() + + switch eface._type.kind { + case kindString: + print(typestring, `("`, *(*string)(eface.data), `")`) + case kindBool: + print(typestring, "(", *(*bool)(eface.data), ")") + case kindInt: + print(typestring, "(", *(*int)(eface.data), ")") + case kindInt8: + print(typestring, "(", *(*int8)(eface.data), ")") + case kindInt16: + print(typestring, "(", *(*int16)(eface.data), ")") + case kindInt32: + print(typestring, "(", *(*int32)(eface.data), ")") + case kindInt64: + print(typestring, "(", *(*int64)(eface.data), ")") + case kindUint: + print(typestring, "(", *(*uint)(eface.data), ")") + case kindUint8: + print(typestring, "(", *(*uint8)(eface.data), ")") + case kindUint16: + print(typestring, "(", *(*uint16)(eface.data), ")") + case kindUint32: + print(typestring, "(", *(*uint32)(eface.data), ")") + case kindUint64: + print(typestring, "(", *(*uint64)(eface.data), ")") + case kindUintptr: + print(typestring, "(", *(*uintptr)(eface.data), ")") + case kindFloat32: + print(typestring, "(", *(*float32)(eface.data), ")") + case kindFloat64: + print(typestring, "(", *(*float64)(eface.data), ")") + case kindComplex64: + print(typestring, *(*complex64)(eface.data)) + case kindComplex128: + print(typestring, *(*complex128)(eface.data)) + default: + print("(", typestring, ") ", eface.data) } } diff --git a/src/runtime/panic_test.go b/src/runtime/panic_test.go new file mode 100644 index 0000000000..45ffa9858b --- /dev/null +++ b/src/runtime/panic_test.go @@ -0,0 +1,48 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package runtime_test + +import ( + "strings" + "testing" +) + +// Test that panics print out the underlying value +// when the underlying kind is directly printable. +// Issue: https://golang/go/issues/37531 +func TestPanicWithDirectlyPrintableCustomTypes(t *testing.T) { + tests := []struct { + name string + wantPanicPrefix string + }{ + {"panicCustomBool", `panic: main.MyBool(true)`}, + {"panicCustomComplex128", `panic: main.MyComplex128(+3.210000e+001+1.000000e+001i)`}, + {"panicCustomComplex64", `panic: main.MyComplex64(+1.100000e-001+3.000000e+000i)`}, + {"panicCustomFloat32", `panic: main.MyFloat32(-9.370000e+001)`}, + {"panicCustomFloat64", `panic: main.MyFloat64(-9.370000e+001)`}, + {"panicCustomInt", `panic: main.MyInt(93)`}, + {"panicCustomInt8", `panic: main.MyInt8(93)`}, + {"panicCustomInt16", `panic: main.MyInt16(93)`}, + {"panicCustomInt32", `panic: main.MyInt32(93)`}, + {"panicCustomInt64", `panic: main.MyInt64(93)`}, + {"panicCustomString", `panic: main.MyString("Panic")`}, + {"panicCustomUint", `panic: main.MyUint(93)`}, + {"panicCustomUint8", `panic: main.MyUint8(93)`}, + {"panicCustomUint16", `panic: main.MyUint16(93)`}, + {"panicCustomUint32", `panic: main.MyUint32(93)`}, + {"panicCustomUint64", `panic: main.MyUint64(93)`}, + {"panicCustomUintptr", `panic: main.MyUintptr(93)`}, + } + + for _, tt := range tests { + t := t + t.Run(tt.name, func(t *testing.T) { + output := runTestProg(t, "testprog", tt.name) + if !strings.HasPrefix(output, tt.wantPanicPrefix) { + t.Fatalf("%q\nis not present in\n%s", tt.wantPanicPrefix, output) + } + }) + } +} diff --git a/src/runtime/testdata/testprog/panicprint.go b/src/runtime/testdata/testprog/panicprint.go new file mode 100644 index 0000000000..c8deabe2ab --- /dev/null +++ b/src/runtime/testdata/testprog/panicprint.go @@ -0,0 +1,111 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +type MyBool bool +type MyComplex128 complex128 +type MyComplex64 complex64 +type MyFloat32 float32 +type MyFloat64 float64 +type MyInt int +type MyInt8 int8 +type MyInt16 int16 +type MyInt32 int32 +type MyInt64 int64 +type MyString string +type MyUint uint +type MyUint8 uint8 +type MyUint16 uint16 +type MyUint32 uint32 +type MyUint64 uint64 +type MyUintptr uintptr + +func panicCustomComplex64() { + panic(MyComplex64(0.11 + 3i)) +} + +func panicCustomComplex128() { + panic(MyComplex128(32.1 + 10i)) +} + +func panicCustomString() { + panic(MyString("Panic")) +} + +func panicCustomBool() { + panic(MyBool(true)) +} + +func panicCustomInt() { + panic(MyInt(93)) +} + +func panicCustomInt8() { + panic(MyInt8(93)) +} + +func panicCustomInt16() { + panic(MyInt16(93)) +} + +func panicCustomInt32() { + panic(MyInt32(93)) +} + +func panicCustomInt64() { + panic(MyInt64(93)) +} + +func panicCustomUint() { + panic(MyUint(93)) +} + +func panicCustomUint8() { + panic(MyUint8(93)) +} + +func panicCustomUint16() { + panic(MyUint16(93)) +} + +func panicCustomUint32() { + panic(MyUint32(93)) +} + +func panicCustomUint64() { + panic(MyUint64(93)) +} + +func panicCustomUintptr() { + panic(MyUintptr(93)) +} + +func panicCustomFloat64() { + panic(MyFloat64(-93.70)) +} + +func panicCustomFloat32() { + panic(MyFloat32(-93.70)) +} + +func init() { + register("panicCustomComplex64", panicCustomComplex64) + register("panicCustomComplex128", panicCustomComplex128) + register("panicCustomBool", panicCustomBool) + register("panicCustomFloat32", panicCustomFloat32) + register("panicCustomFloat64", panicCustomFloat64) + register("panicCustomInt", panicCustomInt) + register("panicCustomInt8", panicCustomInt8) + register("panicCustomInt16", panicCustomInt16) + register("panicCustomInt32", panicCustomInt32) + register("panicCustomInt64", panicCustomInt64) + register("panicCustomString", panicCustomString) + register("panicCustomUint", panicCustomUint) + register("panicCustomUint8", panicCustomUint8) + register("panicCustomUint16", panicCustomUint16) + register("panicCustomUint32", panicCustomUint32) + register("panicCustomUint64", panicCustomUint64) + register("panicCustomUintptr", panicCustomUintptr) +} From 5b15941c61f478b8ed08b76a27186527ba73d273 Mon Sep 17 00:00:00 2001 From: yuz Date: Sun, 1 Mar 2020 21:35:56 +0900 Subject: [PATCH 39/69] flag: changed flag variable name in package doc, for clarity Changed the flag variable name to nFlag instead of flagname, because flagname was confusing. Change-Id: I20dd4c4b4f605395d427a125ba4fd14580e5d766 Reviewed-on: https://go-review.googlesource.com/c/go/+/221678 Reviewed-by: Rob Pike --- src/flag/flag.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/flag/flag.go b/src/flag/flag.go index abf20b6240..eb88c1faa8 100644 --- a/src/flag/flag.go +++ b/src/flag/flag.go @@ -9,9 +9,9 @@ Define flags using flag.String(), Bool(), Int(), etc. - This declares an integer flag, -flagname, stored in the pointer ip, with type *int. + This declares an integer flag, -n, stored in the pointer nFlag, with type *int: import "flag" - var ip = flag.Int("flagname", 1234, "help message for flagname") + var nFlag = flag.Int("n", 1234, "help message for flag n") If you like, you can bind the flag to a variable using the Var() functions. var flagvar int func init() { From ed91661745790f673f4adf212d0ca499f0bd5ee4 Mon Sep 17 00:00:00 2001 From: Joel Sing Date: Mon, 2 Mar 2020 04:26:21 +1100 Subject: [PATCH 40/69] cmd/compile/internal/riscv64: correct ssa.BlockRetJmp The obj.Prog needs to be an obj.ARET rather than an obj.AJMP, otherwise the epilogue does not get correctly produced. Change-Id: Ie1262f2028d3b51720eeb0364a627fbde8b14df9 Reviewed-on: https://go-review.googlesource.com/c/go/+/221683 Reviewed-by: Cherry Zhang Run-TryBot: Cherry Zhang TryBot-Result: Gobot Gobot --- src/cmd/compile/internal/riscv64/ssa.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/cmd/compile/internal/riscv64/ssa.go b/src/cmd/compile/internal/riscv64/ssa.go index 167c9a3411..91f3164336 100644 --- a/src/cmd/compile/internal/riscv64/ssa.go +++ b/src/cmd/compile/internal/riscv64/ssa.go @@ -464,7 +464,7 @@ func ssaGenBlock(s *gc.SSAGenState, b, next *ssa.Block) { case ssa.BlockRet: s.Prog(obj.ARET) case ssa.BlockRetJmp: - p := s.Prog(obj.AJMP) + p := s.Prog(obj.ARET) p.To.Type = obj.TYPE_MEM p.To.Name = obj.NAME_EXTERN p.To.Sym = b.Aux.(*obj.LSym) From 34830beffa6396129bd9d9da58afb7a046775d49 Mon Sep 17 00:00:00 2001 From: Jingwei Date: Sat, 14 Dec 2019 15:52:17 +0800 Subject: [PATCH 41/69] doc/mem: remove unnecessary pre tags within same snippet currently the snippet is segmented but should be one code snippet. Change-Id: Ic747faf9bb1b52f9d1786eca70616a05b71ee801 Reviewed-on: https://go-review.googlesource.com/c/go/+/211198 Reviewed-by: Emmanuel Odeke --- doc/go_mem.html | 2 -- 1 file changed, 2 deletions(-) diff --git a/doc/go_mem.html b/doc/go_mem.html index d355bebaed..5f1eb68af3 100644 --- a/doc/go_mem.html +++ b/doc/go_mem.html @@ -273,9 +273,7 @@ func f() { a = "hello, world" <-c } - -

 func main() {
 	go f()
 	c <- 0

From 52b457d6f320c3491ab14a71afc3c7416b36430c Mon Sep 17 00:00:00 2001
From: Xiangdong Ji 
Date: Thu, 21 Nov 2019 07:46:37 +0000
Subject: [PATCH 42/69] runtime: use CBZ/CBNZ in linux/arm64 assembly code

Replace compare and branch on zero/non-zero instructions in linux/arm64
assembly files with CBZ/CBNZ.

Change-Id: I4dbf56678f85827e83b5863804368bc28a4603b5
Reviewed-on: https://go-review.googlesource.com/c/go/+/209617
Run-TryBot: Tobias Klauser 
TryBot-Result: Gobot Gobot 
Reviewed-by: Tobias Klauser 
---
 src/runtime/asm_arm64.s       | 36 ++++++++++++-----------------------
 src/runtime/race_arm64.s      |  3 +--
 src/runtime/rt0_linux_arm64.s |  3 +--
 src/runtime/sys_linux_arm64.s |  3 +--
 src/runtime/tls_arm64.s       |  6 ++----
 5 files changed, 17 insertions(+), 34 deletions(-)

diff --git a/src/runtime/asm_arm64.s b/src/runtime/asm_arm64.s
index 0be06d124e..6b3d1e779e 100644
--- a/src/runtime/asm_arm64.s
+++ b/src/runtime/asm_arm64.s
@@ -27,8 +27,7 @@ TEXT runtime·rt0_go(SB),NOSPLIT,$0
 
 	// if there is a _cgo_init, call it using the gcc ABI.
 	MOVD	_cgo_init(SB), R12
-	CMP	$0, R12
-	BEQ	nocgo
+	CBZ	R12, nocgo
 
 	MRS_TPIDR_R0			// load TLS base pointer
 	MOVD	R0, R3			// arg 3: TLS base pointer
@@ -114,8 +113,7 @@ TEXT runtime·gosave(SB), NOSPLIT|NOFRAME, $0-8
 	MOVD	ZR, gobuf_ret(R3)
 	// Assert ctxt is zero. See func save.
 	MOVD	gobuf_ctxt(R3), R0
-	CMP	$0, R0
-	BEQ	2(PC)
+	CBZ	R0, 2(PC)
 	CALL	runtime·badctxt(SB)
 	RET
 
@@ -448,8 +446,7 @@ CALLFN(·call1073741824, 1073741832 )
 // func memhash32(p unsafe.Pointer, h uintptr) uintptr
 TEXT runtime·memhash32(SB),NOSPLIT|NOFRAME,$0-24
 	MOVB	runtime·useAeshash(SB), R0
-	CMP	$0, R0
-	BEQ	noaes
+	CBZ	R0, noaes
 	MOVD	p+0(FP), R0
 	MOVD	h+8(FP), R1
 	MOVD	$ret+16(FP), R2
@@ -474,8 +471,7 @@ noaes:
 // func memhash64(p unsafe.Pointer, h uintptr) uintptr
 TEXT runtime·memhash64(SB),NOSPLIT|NOFRAME,$0-24
 	MOVB	runtime·useAeshash(SB), R0
-	CMP	$0, R0
-	BEQ	noaes
+	CBZ	R0, noaes
 	MOVD	p+0(FP), R0
 	MOVD	h+8(FP), R1
 	MOVD	$ret+16(FP), R2
@@ -500,8 +496,7 @@ noaes:
 // func memhash(p unsafe.Pointer, h, size uintptr) uintptr
 TEXT runtime·memhash(SB),NOSPLIT|NOFRAME,$0-32
 	MOVB	runtime·useAeshash(SB), R0
-	CMP	$0, R0
-	BEQ	noaes
+	CBZ	R0, noaes
 	MOVD	p+0(FP), R0
 	MOVD	s+16(FP), R1
 	MOVD	h+8(FP), R3
@@ -513,8 +508,7 @@ noaes:
 // func strhash(p unsafe.Pointer, h uintptr) uintptr
 TEXT runtime·strhash(SB),NOSPLIT|NOFRAME,$0-24
 	MOVB	runtime·useAeshash(SB), R0
-	CMP	$0, R0
-	BEQ	noaes
+	CBZ	R0, noaes
 	MOVD	p+0(FP), R10 // string pointer
 	LDP	(R10), (R0, R1) //string data/ length
 	MOVD	h+8(FP), R3
@@ -548,8 +542,7 @@ TEXT aeshashbody<>(SB),NOSPLIT|NOFRAME,$0
 	B	aes129plus
 
 aes0to15:
-	CMP	$0, R1
-	BEQ	aes0
+	CBZ	R1, aes0
 	VEOR	V2.B16, V2.B16, V2.B16
 	TBZ	$3, R1, less_than_8
 	VLD1.P	8(R0), V2.D[0]
@@ -879,8 +872,7 @@ TEXT gosave<>(SB),NOSPLIT|NOFRAME,$0
 	MOVD	$0, (g_sched+gobuf_ret)(g)
 	// Assert ctxt is zero. See func save.
 	MOVD	(g_sched+gobuf_ctxt)(g), R0
-	CMP	$0, R0
-	BEQ	2(PC)
+	CBZ	R0, 2(PC)
 	CALL	runtime·badctxt(SB)
 	RET
 
@@ -893,8 +885,7 @@ TEXT ·asmcgocall(SB),NOSPLIT,$0-20
 	MOVD	arg+8(FP), R0
 
 	MOVD	RSP, R2		// save original stack pointer
-	CMP	$0, g
-	BEQ	nosave
+	CBZ	g, nosave
 	MOVD	g, R4
 
 	// Figure out if we need to switch to m->g0 stack.
@@ -990,8 +981,7 @@ TEXT ·cgocallback_gofunc(SB),NOSPLIT,$24-32
 
 	// Load g from thread-local storage.
 	MOVB	runtime·iscgo(SB), R3
-	CMP	$0, R3
-	BEQ	nocgo
+	CBZ	R3, nocgo
 	BL	runtime·load_g(SB)
 nocgo:
 
@@ -1000,8 +990,7 @@ nocgo:
 	// In this case, we're running on the thread stack, so there's
 	// lots of space, but the linker doesn't know. Hide the call from
 	// the linker analysis by using an indirect call.
-	CMP	$0, g
-	BEQ	needm
+	CBZ	g, needm
 
 	MOVD	g_m(g), R8
 	MOVD	R8, savedm-8(SP)
@@ -1092,8 +1081,7 @@ havem:
 	// If the m on entry was nil, we called needm above to borrow an m
 	// for the duration of the call. Since the call is over, return it with dropm.
 	MOVD	savedm-8(SP), R6
-	CMP	$0, R6
-	BNE	droppedm
+	CBNZ	R6, droppedm
 	MOVD	$runtime·dropm(SB), R0
 	BL	(R0)
 droppedm:
diff --git a/src/runtime/race_arm64.s b/src/runtime/race_arm64.s
index 46224f8d73..9b909ac021 100644
--- a/src/runtime/race_arm64.s
+++ b/src/runtime/race_arm64.s
@@ -421,8 +421,7 @@ TEXT	runtime·racecallbackthunk(SB), NOSPLIT|NOFRAME, $0
 	// First, code below assumes that we are on curg, while raceGetProcCmd
 	// can be executed on g0. Second, it is called frequently, so will
 	// benefit from this fast path.
-	CMP	$0, R0
-	BNE	rest
+	CBNZ	R0, rest
 	MOVD	g, R13
 	load_g
 	MOVD	g_m(g), R0
diff --git a/src/runtime/rt0_linux_arm64.s b/src/runtime/rt0_linux_arm64.s
index a6bc99df56..f48a8d6190 100644
--- a/src/runtime/rt0_linux_arm64.s
+++ b/src/runtime/rt0_linux_arm64.s
@@ -44,8 +44,7 @@ TEXT _rt0_arm64_linux_lib(SB),NOSPLIT,$184
 
 	// Create a new thread to do the runtime initialization and return.
 	MOVD	_cgo_sys_thread_create(SB), R4
-	CMP	$0, R4
-	BEQ	nocgo
+	CBZ	R4, nocgo
 	MOVD	$_rt0_arm64_linux_lib_go(SB), R0
 	MOVD	$0, R1
 	SUB	$16, RSP		// reserve 16 bytes for sp-8 where fp may be saved.
diff --git a/src/runtime/sys_linux_arm64.s b/src/runtime/sys_linux_arm64.s
index b9588cec30..b23e3b9a11 100644
--- a/src/runtime/sys_linux_arm64.s
+++ b/src/runtime/sys_linux_arm64.s
@@ -419,8 +419,7 @@ TEXT runtime·sigtramp(SB),NOSPLIT,$192
 	// first save R0, because runtime·load_g will clobber it
 	MOVW	R0, 8(RSP)
 	MOVBU	runtime·iscgo(SB), R0
-	CMP	$0, R0
-	BEQ	2(PC)
+	CBZ	R0, 2(PC)
 	BL	runtime·load_g(SB)
 
 	MOVD	R1, 16(RSP)
diff --git a/src/runtime/tls_arm64.s b/src/runtime/tls_arm64.s
index fb8627db29..999914d655 100644
--- a/src/runtime/tls_arm64.s
+++ b/src/runtime/tls_arm64.s
@@ -10,8 +10,7 @@
 
 TEXT runtime·load_g(SB),NOSPLIT,$0
 	MOVB	runtime·iscgo(SB), R0
-	CMP	$0, R0
-	BEQ	nocgo
+	CBZ	R0, nocgo
 
 	MRS_TPIDR_R0
 #ifdef GOOS_darwin
@@ -27,8 +26,7 @@ nocgo:
 
 TEXT runtime·save_g(SB),NOSPLIT,$0
 	MOVB	runtime·iscgo(SB), R0
-	CMP	$0, R0
-	BEQ	nocgo
+	CBZ	R0, nocgo
 
 	MRS_TPIDR_R0
 #ifdef GOOS_darwin

From bd6f4cd886458bb4ab1f492202fa2b499dcfc6f7 Mon Sep 17 00:00:00 2001
From: Joel Sing 
Date: Mon, 2 Mar 2020 04:23:12 +1100
Subject: [PATCH 43/69] cmd/compile: improve subtraction of constants on
 riscv64

Convert subtraction of a constant into an ADDI with a negative immediate,
where possible.

Change-Id: Ie8d54b7538f0012e5f898abea233b2957fe31899
Reviewed-on: https://go-review.googlesource.com/c/go/+/221679
Reviewed-by: Cherry Zhang 
---
 .../compile/internal/ssa/gen/RISCV64.rules    | 14 +++-
 .../compile/internal/ssa/rewriteRISCV64.go    | 75 +++++++++++++++++++
 2 files changed, 86 insertions(+), 3 deletions(-)

diff --git a/src/cmd/compile/internal/ssa/gen/RISCV64.rules b/src/cmd/compile/internal/ssa/gen/RISCV64.rules
index 4ab4656bd5..3fd482b50c 100644
--- a/src/cmd/compile/internal/ssa/gen/RISCV64.rules
+++ b/src/cmd/compile/internal/ssa/gen/RISCV64.rules
@@ -438,9 +438,6 @@
 (MOVDconst  [c]) && !is32Bit(c) && int32(c) <  0 -> (ADD (SLLI  [32] (MOVDconst [c>>32+1])) (MOVDconst [int64(int32(c))]))
 (MOVDconst  [c]) && !is32Bit(c) && int32(c) >= 0 -> (ADD (SLLI  [32] (MOVDconst [c>>32+0])) (MOVDconst [int64(int32(c))]))
 
-// Fold ADD+MOVDconst into ADDI where possible.
-(ADD (MOVDconst [off]) ptr) && is32Bit(off) -> (ADDI [off] ptr)
-
 (Addr ...) -> (MOVaddr ...)
 (LocalAddr {sym} base _) -> (MOVaddr {sym} base)
 
@@ -459,5 +456,16 @@
 (ClosureCall ...) -> (CALLclosure ...)
 (InterCall   ...) -> (CALLinter   ...)
 
+// Optimizations
+
+// Fold ADD+MOVDconst into ADDI where possible.
+(ADD (MOVDconst [off]) ptr) && is32Bit(off) -> (ADDI [off] ptr)
+
+// Convert subtraction of a const into ADDI with negative immediate, where possible.
+(SUB x (MOVBconst [val])) && is32Bit(-val) -> (ADDI [-val] x)
+(SUB x (MOVHconst [val])) && is32Bit(-val) -> (ADDI [-val] x)
+(SUB x (MOVWconst [val])) && is32Bit(-val) -> (ADDI [-val] x)
+(SUB x (MOVDconst [val])) && is32Bit(-val) -> (ADDI [-val] x)
+
 // remove redundant *const ops
 (ADDI [0]  x) -> x
diff --git a/src/cmd/compile/internal/ssa/rewriteRISCV64.go b/src/cmd/compile/internal/ssa/rewriteRISCV64.go
index 2e0b34de8d..128f7bb2b2 100644
--- a/src/cmd/compile/internal/ssa/rewriteRISCV64.go
+++ b/src/cmd/compile/internal/ssa/rewriteRISCV64.go
@@ -404,6 +404,8 @@ func rewriteValueRISCV64(v *Value) bool {
 		return rewriteValueRISCV64_OpRISCV64MOVWload(v)
 	case OpRISCV64MOVWstore:
 		return rewriteValueRISCV64_OpRISCV64MOVWstore(v)
+	case OpRISCV64SUB:
+		return rewriteValueRISCV64_OpRISCV64SUB(v)
 	case OpRotateLeft16:
 		return rewriteValueRISCV64_OpRotateLeft16(v)
 	case OpRotateLeft32:
@@ -2877,6 +2879,79 @@ func rewriteValueRISCV64_OpRISCV64MOVWstore(v *Value) bool {
 	}
 	return false
 }
+func rewriteValueRISCV64_OpRISCV64SUB(v *Value) bool {
+	v_1 := v.Args[1]
+	v_0 := v.Args[0]
+	// match: (SUB x (MOVBconst [val]))
+	// cond: is32Bit(-val)
+	// result: (ADDI [-val] x)
+	for {
+		x := v_0
+		if v_1.Op != OpRISCV64MOVBconst {
+			break
+		}
+		val := v_1.AuxInt
+		if !(is32Bit(-val)) {
+			break
+		}
+		v.reset(OpRISCV64ADDI)
+		v.AuxInt = -val
+		v.AddArg(x)
+		return true
+	}
+	// match: (SUB x (MOVHconst [val]))
+	// cond: is32Bit(-val)
+	// result: (ADDI [-val] x)
+	for {
+		x := v_0
+		if v_1.Op != OpRISCV64MOVHconst {
+			break
+		}
+		val := v_1.AuxInt
+		if !(is32Bit(-val)) {
+			break
+		}
+		v.reset(OpRISCV64ADDI)
+		v.AuxInt = -val
+		v.AddArg(x)
+		return true
+	}
+	// match: (SUB x (MOVWconst [val]))
+	// cond: is32Bit(-val)
+	// result: (ADDI [-val] x)
+	for {
+		x := v_0
+		if v_1.Op != OpRISCV64MOVWconst {
+			break
+		}
+		val := v_1.AuxInt
+		if !(is32Bit(-val)) {
+			break
+		}
+		v.reset(OpRISCV64ADDI)
+		v.AuxInt = -val
+		v.AddArg(x)
+		return true
+	}
+	// match: (SUB x (MOVDconst [val]))
+	// cond: is32Bit(-val)
+	// result: (ADDI [-val] x)
+	for {
+		x := v_0
+		if v_1.Op != OpRISCV64MOVDconst {
+			break
+		}
+		val := v_1.AuxInt
+		if !(is32Bit(-val)) {
+			break
+		}
+		v.reset(OpRISCV64ADDI)
+		v.AuxInt = -val
+		v.AddArg(x)
+		return true
+	}
+	return false
+}
 func rewriteValueRISCV64_OpRotateLeft16(v *Value) bool {
 	v_1 := v.Args[1]
 	v_0 := v.Args[0]

From d28b8524a4d5d86d7b1e2df790abff6244de2a00 Mon Sep 17 00:00:00 2001
From: Joel Sing 
Date: Mon, 2 Mar 2020 04:24:35 +1100
Subject: [PATCH 44/69] cmd/compile: optimize subtraction of zero on riscv64

Change-Id: I9a994b01e9fecb13077c30df4b7677d40d179cce
Reviewed-on: https://go-review.googlesource.com/c/go/+/221681
Reviewed-by: Cherry Zhang 
---
 .../compile/internal/ssa/gen/RISCV64.rules    |  9 +++
 .../compile/internal/ssa/rewriteRISCV64.go    | 59 +++++++++++++++++++
 2 files changed, 68 insertions(+)

diff --git a/src/cmd/compile/internal/ssa/gen/RISCV64.rules b/src/cmd/compile/internal/ssa/gen/RISCV64.rules
index 3fd482b50c..9c1169dc67 100644
--- a/src/cmd/compile/internal/ssa/gen/RISCV64.rules
+++ b/src/cmd/compile/internal/ssa/gen/RISCV64.rules
@@ -467,5 +467,14 @@
 (SUB x (MOVWconst [val])) && is32Bit(-val) -> (ADDI [-val] x)
 (SUB x (MOVDconst [val])) && is32Bit(-val) -> (ADDI [-val] x)
 
+// Subtraction of zero.
+(SUB x (MOVBconst [0])) -> x
+(SUB x (MOVHconst [0])) -> x
+(SUB x (MOVWconst [0])) -> x
+(SUB x (MOVDconst [0])) -> x
+
+// Subtraction of zero with sign extension.
+(SUBW x (MOVWconst [0])) -> (ADDIW [0] x)
+
 // remove redundant *const ops
 (ADDI [0]  x) -> x
diff --git a/src/cmd/compile/internal/ssa/rewriteRISCV64.go b/src/cmd/compile/internal/ssa/rewriteRISCV64.go
index 128f7bb2b2..b363b10ad7 100644
--- a/src/cmd/compile/internal/ssa/rewriteRISCV64.go
+++ b/src/cmd/compile/internal/ssa/rewriteRISCV64.go
@@ -406,6 +406,8 @@ func rewriteValueRISCV64(v *Value) bool {
 		return rewriteValueRISCV64_OpRISCV64MOVWstore(v)
 	case OpRISCV64SUB:
 		return rewriteValueRISCV64_OpRISCV64SUB(v)
+	case OpRISCV64SUBW:
+		return rewriteValueRISCV64_OpRISCV64SUBW(v)
 	case OpRotateLeft16:
 		return rewriteValueRISCV64_OpRotateLeft16(v)
 	case OpRotateLeft32:
@@ -2950,6 +2952,63 @@ func rewriteValueRISCV64_OpRISCV64SUB(v *Value) bool {
 		v.AddArg(x)
 		return true
 	}
+	// match: (SUB x (MOVBconst [0]))
+	// result: x
+	for {
+		x := v_0
+		if v_1.Op != OpRISCV64MOVBconst || v_1.AuxInt != 0 {
+			break
+		}
+		v.copyOf(x)
+		return true
+	}
+	// match: (SUB x (MOVHconst [0]))
+	// result: x
+	for {
+		x := v_0
+		if v_1.Op != OpRISCV64MOVHconst || v_1.AuxInt != 0 {
+			break
+		}
+		v.copyOf(x)
+		return true
+	}
+	// match: (SUB x (MOVWconst [0]))
+	// result: x
+	for {
+		x := v_0
+		if v_1.Op != OpRISCV64MOVWconst || v_1.AuxInt != 0 {
+			break
+		}
+		v.copyOf(x)
+		return true
+	}
+	// match: (SUB x (MOVDconst [0]))
+	// result: x
+	for {
+		x := v_0
+		if v_1.Op != OpRISCV64MOVDconst || v_1.AuxInt != 0 {
+			break
+		}
+		v.copyOf(x)
+		return true
+	}
+	return false
+}
+func rewriteValueRISCV64_OpRISCV64SUBW(v *Value) bool {
+	v_1 := v.Args[1]
+	v_0 := v.Args[0]
+	// match: (SUBW x (MOVWconst [0]))
+	// result: (ADDIW [0] x)
+	for {
+		x := v_0
+		if v_1.Op != OpRISCV64MOVWconst || v_1.AuxInt != 0 {
+			break
+		}
+		v.reset(OpRISCV64ADDIW)
+		v.AuxInt = 0
+		v.AddArg(x)
+		return true
+	}
 	return false
 }
 func rewriteValueRISCV64_OpRotateLeft16(v *Value) bool {

From c9ece81cc8c1a81ebdebcf6dfc13ebf5c4cbdb61 Mon Sep 17 00:00:00 2001
From: Joel Sing 
Date: Mon, 2 Mar 2020 04:25:54 +1100
Subject: [PATCH 45/69] cmd/compile: absorb SNEZ into branch on riscv64

Change-Id: I55fd93843a7fb574a7dd66ebb87fdd96e944d555
Reviewed-on: https://go-review.googlesource.com/c/go/+/221682
Reviewed-by: Cherry Zhang 
---
 src/cmd/compile/internal/ssa/gen/RISCV64.rules | 3 +++
 src/cmd/compile/internal/ssa/rewriteRISCV64.go | 9 +++++++++
 2 files changed, 12 insertions(+)

diff --git a/src/cmd/compile/internal/ssa/gen/RISCV64.rules b/src/cmd/compile/internal/ssa/gen/RISCV64.rules
index 9c1169dc67..a19f8aa55b 100644
--- a/src/cmd/compile/internal/ssa/gen/RISCV64.rules
+++ b/src/cmd/compile/internal/ssa/gen/RISCV64.rules
@@ -458,6 +458,9 @@
 
 // Optimizations
 
+// Absorb SNEZ into branch.
+(BNE (SNEZ x) yes no) -> (BNE x yes no)
+
 // Fold ADD+MOVDconst into ADDI where possible.
 (ADD (MOVDconst [off]) ptr) && is32Bit(off) -> (ADDI [off] ptr)
 
diff --git a/src/cmd/compile/internal/ssa/rewriteRISCV64.go b/src/cmd/compile/internal/ssa/rewriteRISCV64.go
index b363b10ad7..6b3f4f70b5 100644
--- a/src/cmd/compile/internal/ssa/rewriteRISCV64.go
+++ b/src/cmd/compile/internal/ssa/rewriteRISCV64.go
@@ -4387,6 +4387,15 @@ func rewriteValueRISCV64_OpZeroExt8to64(v *Value) bool {
 }
 func rewriteBlockRISCV64(b *Block) bool {
 	switch b.Kind {
+	case BlockRISCV64BNE:
+		// match: (BNE (SNEZ x) yes no)
+		// result: (BNE x yes no)
+		for b.Controls[0].Op == OpRISCV64SNEZ {
+			v_0 := b.Controls[0]
+			x := v_0.Args[0]
+			b.resetWithControl(BlockRISCV64BNE, x)
+			return true
+		}
 	case BlockIf:
 		// match: (If cond yes no)
 		// result: (BNE cond yes no)

From e37cc298636abcd500aa8acc7375d001c431c64e Mon Sep 17 00:00:00 2001
From: Michael Munday 
Date: Mon, 20 May 2019 11:55:56 -0700
Subject: [PATCH 46/69] cmd/compile: optimize integer-in-range checks
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

This CL incorporates code from CL 201206 by Josh Bleecher Snyder
(thanks Josh).

This CL restores the integer-in-range optimizations in the SSA
backend. The fuse pass is enhanced to detect inequalities that
could be merged and fuse their associated blocks while the generic
rules optimize them into a single unsigned comparison.

For example, the inequality `x >= 0 && x < 10` will now be optimized
to `unsigned(x) < 10`.

Overall has a fairly positive impact on binary sizes.

name                      old time/op       new time/op       delta
Template                        192ms ± 1%        192ms ± 1%    ~     (p=0.757 n=17+18)
Unicode                        76.6ms ± 2%       76.5ms ± 2%    ~     (p=0.603 n=19+19)
GoTypes                         694ms ± 1%        693ms ± 1%    ~     (p=0.569 n=19+20)
Compiler                        3.26s ± 0%        3.27s ± 0%  +0.25%  (p=0.000 n=20+20)
SSA                             7.41s ± 0%        7.49s ± 0%  +1.10%  (p=0.000 n=17+19)
Flate                           120ms ± 1%        120ms ± 1%  +0.38%  (p=0.003 n=19+19)
GoParser                        152ms ± 1%        152ms ± 1%    ~     (p=0.061 n=17+19)
Reflect                         422ms ± 1%        425ms ± 2%  +0.76%  (p=0.001 n=18+20)
Tar                             167ms ± 1%        167ms ± 0%    ~     (p=0.730 n=18+19)
XML                             233ms ± 4%        231ms ± 1%    ~     (p=0.752 n=20+17)
LinkCompiler                    927ms ± 8%        928ms ± 8%    ~     (p=0.857 n=19+20)
ExternalLinkCompiler            1.81s ± 2%        1.81s ± 2%    ~     (p=0.513 n=19+20)
LinkWithoutDebugCompiler        556ms ±10%        583ms ±13%  +4.95%  (p=0.007 n=20+20)
[Geo mean]                      478ms             481ms       +0.52%

name                      old user-time/op  new user-time/op  delta
Template                        270ms ± 5%        269ms ± 7%    ~     (p=0.925 n=20+20)
Unicode                         134ms ± 7%        131ms ±14%    ~     (p=0.593 n=18+20)
GoTypes                         981ms ± 3%        987ms ± 2%  +0.63%  (p=0.049 n=19+18)
Compiler                        4.50s ± 2%        4.50s ± 1%    ~     (p=0.588 n=19+20)
SSA                             10.6s ± 2%        10.6s ± 1%    ~     (p=0.141 n=20+19)
Flate                           164ms ± 8%        165ms ±10%    ~     (p=0.738 n=20+20)
GoParser                        202ms ± 5%        203ms ± 6%    ~     (p=0.820 n=20+20)
Reflect                         587ms ± 6%        597ms ± 3%    ~     (p=0.087 n=20+18)
Tar                             230ms ± 6%        228ms ± 8%    ~     (p=0.569 n=19+20)
XML                             311ms ± 6%        314ms ± 5%    ~     (p=0.369 n=20+20)
LinkCompiler                    878ms ± 8%        887ms ± 7%    ~     (p=0.289 n=20+20)
ExternalLinkCompiler            1.60s ± 7%        1.60s ± 7%    ~     (p=0.820 n=20+20)
LinkWithoutDebugCompiler        498ms ±12%        489ms ±11%    ~     (p=0.398 n=20+20)
[Geo mean]                      611ms             611ms       +0.05%

name                      old alloc/op      new alloc/op      delta
Template                       36.1MB ± 0%       36.0MB ± 0%  -0.32%  (p=0.000 n=20+20)
Unicode                        28.3MB ± 0%       28.3MB ± 0%  -0.03%  (p=0.000 n=19+20)
GoTypes                         121MB ± 0%        121MB ± 0%    ~     (p=0.226 n=16+20)
Compiler                        563MB ± 0%        563MB ± 0%    ~     (p=0.166 n=20+19)
SSA                            1.32GB ± 0%       1.33GB ± 0%  +0.88%  (p=0.000 n=20+19)
Flate                          22.7MB ± 0%       22.7MB ± 0%  -0.02%  (p=0.033 n=19+20)
GoParser                       27.9MB ± 0%       27.9MB ± 0%  -0.02%  (p=0.001 n=20+20)
Reflect                        78.3MB ± 0%       78.2MB ± 0%  -0.01%  (p=0.019 n=20+20)
Tar                            34.0MB ± 0%       34.0MB ± 0%  -0.04%  (p=0.000 n=20+20)
XML                            43.9MB ± 0%       43.9MB ± 0%  -0.07%  (p=0.000 n=20+19)
LinkCompiler                    205MB ± 0%        205MB ± 0%  +0.44%  (p=0.000 n=20+18)
ExternalLinkCompiler            223MB ± 0%        223MB ± 0%  +0.03%  (p=0.000 n=20+20)
LinkWithoutDebugCompiler        139MB ± 0%        142MB ± 0%  +1.75%  (p=0.000 n=20+20)
[Geo mean]                     93.7MB            93.9MB       +0.20%

name                      old allocs/op     new allocs/op     delta
Template                         363k ± 0%         361k ± 0%  -0.58%  (p=0.000 n=20+19)
Unicode                          329k ± 0%         329k ± 0%  -0.06%  (p=0.000 n=19+20)
GoTypes                         1.28M ± 0%        1.28M ± 0%  -0.01%  (p=0.000 n=20+20)
Compiler                        5.40M ± 0%        5.40M ± 0%  -0.01%  (p=0.000 n=20+20)
SSA                             12.7M ± 0%        12.8M ± 0%  +0.80%  (p=0.000 n=20+20)
Flate                            228k ± 0%         228k ± 0%    ~     (p=0.194 n=20+20)
GoParser                         295k ± 0%         295k ± 0%  -0.04%  (p=0.000 n=20+20)
Reflect                          949k ± 0%         949k ± 0%  -0.01%  (p=0.000 n=20+20)
Tar                              337k ± 0%         337k ± 0%  -0.06%  (p=0.000 n=20+20)
XML                              418k ± 0%         417k ± 0%  -0.17%  (p=0.000 n=20+20)
LinkCompiler                     553k ± 0%         554k ± 0%  +0.22%  (p=0.000 n=20+19)
ExternalLinkCompiler            1.52M ± 0%        1.52M ± 0%  +0.27%  (p=0.000 n=20+20)
LinkWithoutDebugCompiler         186k ± 0%         186k ± 0%  +0.06%  (p=0.000 n=20+20)
[Geo mean]                       723k              723k       +0.03%

name                      old text-bytes    new text-bytes    delta
HelloSize                       828kB ± 0%        828kB ± 0%  -0.01%  (p=0.000 n=20+20)

name                      old data-bytes    new data-bytes    delta
HelloSize                      13.4kB ± 0%       13.4kB ± 0%    ~     (all equal)

name                      old bss-bytes     new bss-bytes     delta
HelloSize                       180kB ± 0%        180kB ± 0%    ~     (all equal)

name                      old exe-bytes     new exe-bytes     delta
HelloSize                      1.23MB ± 0%       1.23MB ± 0%  -0.33%  (p=0.000 n=20+20)

file      before    after     Δ       %
addr2line 4320075   4311883   -8192   -0.190%
asm       5191932   5187836   -4096   -0.079%
buildid   2835338   2831242   -4096   -0.144%
compile   20531717  20569099  +37382  +0.182%
cover     5322511   5318415   -4096   -0.077%
dist      3723749   3719653   -4096   -0.110%
doc       4743515   4739419   -4096   -0.086%
fix       3413960   3409864   -4096   -0.120%
link      6690119   6686023   -4096   -0.061%
nm        4269616   4265520   -4096   -0.096%
pprof     14942189  14929901  -12288  -0.082%
trace     11807164  11790780  -16384  -0.139%
vet       8384104   8388200   +4096   +0.049%
go        15339076  15334980  -4096   -0.027%
total     132258257 132226007 -32250  -0.024%

Fixes #30645.

Change-Id: If551ac5996097f3685870d083151b5843170aab0
Reviewed-on: https://go-review.googlesource.com/c/go/+/165998
Run-TryBot: Michael Munday 
TryBot-Result: Gobot Gobot 
Reviewed-by: Keith Randall 
---
 src/cmd/compile/internal/ssa/branchelim.go    |   16 +-
 src/cmd/compile/internal/ssa/compile.go       |    6 +-
 src/cmd/compile/internal/ssa/fuse.go          |   13 +-
 .../compile/internal/ssa/fuse_comparisons.go  |  157 +
 src/cmd/compile/internal/ssa/fuse_test.go     |   12 +-
 .../compile/internal/ssa/gen/generic.rules    |   48 +
 src/cmd/compile/internal/ssa/nilcheck_test.go |   18 +-
 .../compile/internal/ssa/rewritegeneric.go    | 2640 +++++++++++++++++
 test/codegen/fuse.go                          |  197 ++
 9 files changed, 3080 insertions(+), 27 deletions(-)
 create mode 100644 src/cmd/compile/internal/ssa/fuse_comparisons.go
 create mode 100644 test/codegen/fuse.go

diff --git a/src/cmd/compile/internal/ssa/branchelim.go b/src/cmd/compile/internal/ssa/branchelim.go
index c7c3f8c15f..4f9fd8e22e 100644
--- a/src/cmd/compile/internal/ssa/branchelim.go
+++ b/src/cmd/compile/internal/ssa/branchelim.go
@@ -148,7 +148,7 @@ func elimIf(f *Func, loadAddr *sparseSet, dom *Block) bool {
 	// the number of useless instructions executed.
 	const maxfuseinsts = 2
 
-	if len(simple.Values) > maxfuseinsts || !allTrivial(simple) {
+	if len(simple.Values) > maxfuseinsts || !canSpeculativelyExecute(simple) {
 		return false
 	}
 
@@ -305,10 +305,10 @@ func elimIfElse(f *Func, loadAddr *sparseSet, b *Block) bool {
 		return false
 	}
 	yes, no := b.Succs[0].Block(), b.Succs[1].Block()
-	if !isLeafPlain(yes) || len(yes.Values) > 1 || !allTrivial(yes) {
+	if !isLeafPlain(yes) || len(yes.Values) > 1 || !canSpeculativelyExecute(yes) {
 		return false
 	}
-	if !isLeafPlain(no) || len(no.Values) > 1 || !allTrivial(no) {
+	if !isLeafPlain(no) || len(no.Values) > 1 || !canSpeculativelyExecute(no) {
 		return false
 	}
 	if b.Succs[0].Block().Succs[0].Block() != b.Succs[1].Block().Succs[0].Block() {
@@ -415,7 +415,15 @@ func shouldElimIfElse(no, yes, post *Block, arch string) bool {
 	}
 }
 
-func allTrivial(b *Block) bool {
+// canSpeculativelyExecute reports whether every value in the block can
+// be evaluated without causing any observable side effects (memory
+// accesses, panics and so on) except for execution time changes. It
+// also ensures that the block does not contain any phis which we can't
+// speculatively execute.
+// Warning: this function cannot currently detect values that represent
+// instructions the execution of which need to be guarded with CPU
+// hardware feature checks. See issue #34950.
+func canSpeculativelyExecute(b *Block) bool {
 	// don't fuse memory ops, Phi ops, divides (can panic),
 	// or anything else with side-effects
 	for _, v := range b.Values {
diff --git a/src/cmd/compile/internal/ssa/compile.go b/src/cmd/compile/internal/ssa/compile.go
index 448b1cf814..2de4e133bf 100644
--- a/src/cmd/compile/internal/ssa/compile.go
+++ b/src/cmd/compile/internal/ssa/compile.go
@@ -428,7 +428,7 @@ var passes = [...]pass{
 	{name: "gcse deadcode", fn: deadcode, required: true}, // clean out after cse and phiopt
 	{name: "nilcheckelim", fn: nilcheckelim},
 	{name: "prove", fn: prove},
-	{name: "fuse plain", fn: fusePlain},
+	{name: "early fuse", fn: fuseEarly},
 	{name: "decompose builtin", fn: decomposeBuiltIn, required: true},
 	{name: "softfloat", fn: softfloat, required: true},
 	{name: "late opt", fn: opt, required: true}, // TODO: split required rules and optimizing rules
@@ -436,7 +436,7 @@ var passes = [...]pass{
 	{name: "generic deadcode", fn: deadcode, required: true}, // remove dead stores, which otherwise mess up store chain
 	{name: "check bce", fn: checkbce},
 	{name: "branchelim", fn: branchelim},
-	{name: "fuse", fn: fuseAll},
+	{name: "late fuse", fn: fuseLate},
 	{name: "dse", fn: dse},
 	{name: "writebarrier", fn: writebarrier, required: true}, // expand write barrier ops
 	{name: "insert resched checks", fn: insertLoopReschedChecks,
@@ -491,7 +491,7 @@ var passOrder = [...]constraint{
 	// allow deadcode to clean up after nilcheckelim
 	{"nilcheckelim", "generic deadcode"},
 	// nilcheckelim generates sequences of plain basic blocks
-	{"nilcheckelim", "fuse"},
+	{"nilcheckelim", "late fuse"},
 	// nilcheckelim relies on opt to rewrite user nil checks
 	{"opt", "nilcheckelim"},
 	// tighten will be most effective when as many values have been removed as possible
diff --git a/src/cmd/compile/internal/ssa/fuse.go b/src/cmd/compile/internal/ssa/fuse.go
index c2d4051da8..f80ec0dc5d 100644
--- a/src/cmd/compile/internal/ssa/fuse.go
+++ b/src/cmd/compile/internal/ssa/fuse.go
@@ -8,18 +8,18 @@ import (
 	"cmd/internal/src"
 )
 
-// fusePlain runs fuse(f, fuseTypePlain).
-func fusePlain(f *Func) { fuse(f, fuseTypePlain) }
+// fuseEarly runs fuse(f, fuseTypePlain|fuseTypeIntInRange).
+func fuseEarly(f *Func) { fuse(f, fuseTypePlain|fuseTypeIntInRange) }
 
-// fuseAll runs fuse(f, fuseTypeAll).
-func fuseAll(f *Func) { fuse(f, fuseTypeAll) }
+// fuseLate runs fuse(f, fuseTypePlain|fuseTypeIf).
+func fuseLate(f *Func) { fuse(f, fuseTypePlain|fuseTypeIf) }
 
 type fuseType uint8
 
 const (
 	fuseTypePlain fuseType = 1 << iota
 	fuseTypeIf
-	fuseTypeAll = fuseTypePlain | fuseTypeIf
+	fuseTypeIntInRange
 )
 
 // fuse simplifies control flow by joining basic blocks.
@@ -32,6 +32,9 @@ func fuse(f *Func, typ fuseType) {
 			if typ&fuseTypeIf != 0 {
 				changed = fuseBlockIf(b) || changed
 			}
+			if typ&fuseTypeIntInRange != 0 {
+				changed = fuseIntegerComparisons(b) || changed
+			}
 			if typ&fuseTypePlain != 0 {
 				changed = fuseBlockPlain(b) || changed
 			}
diff --git a/src/cmd/compile/internal/ssa/fuse_comparisons.go b/src/cmd/compile/internal/ssa/fuse_comparisons.go
new file mode 100644
index 0000000000..d843fc3fda
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/fuse_comparisons.go
@@ -0,0 +1,157 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa
+
+// fuseIntegerComparisons optimizes inequalities such as '1 <= x && x < 5',
+// which can be optimized to 'unsigned(x-1) < 4'.
+//
+// Look for branch structure like:
+//
+//   p
+//   |\
+//   | b
+//   |/ \
+//   s0 s1
+//
+// In our example, p has control '1 <= x', b has control 'x < 5',
+// and s0 and s1 are the if and else results of the comparison.
+//
+// This will be optimized into:
+//
+//   p
+//    \
+//     b
+//    / \
+//   s0 s1
+//
+// where b has the combined control value 'unsigned(x-1) < 4'.
+// Later passes will then fuse p and b.
+func fuseIntegerComparisons(b *Block) bool {
+	if len(b.Preds) != 1 {
+		return false
+	}
+	p := b.Preds[0].Block()
+	if b.Kind != BlockIf || p.Kind != BlockIf {
+		return false
+	}
+
+	// Don't merge control values if b is likely to be bypassed anyway.
+	if p.Likely == BranchLikely && p.Succs[0].Block() != b {
+		return false
+	}
+	if p.Likely == BranchUnlikely && p.Succs[1].Block() != b {
+		return false
+	}
+
+	// Check if the control values combine to make an integer inequality that
+	// can be further optimized later.
+	bc := b.Controls[0]
+	pc := p.Controls[0]
+	if !areMergeableInequalities(bc, pc) {
+		return false
+	}
+
+	// If the first (true) successors match then we have a disjunction (||).
+	// If the second (false) successors match then we have a conjunction (&&).
+	for i, op := range [2]Op{OpOrB, OpAndB} {
+		if p.Succs[i].Block() != b.Succs[i].Block() {
+			continue
+		}
+
+		// TODO(mundaym): should we also check the cost of executing b?
+		// Currently we might speculatively execute b even if b contains
+		// a lot of instructions. We could just check that len(b.Values)
+		// is lower than a fixed amount. Bear in mind however that the
+		// other optimization passes might yet reduce the cost of b
+		// significantly so we shouldn't be overly conservative.
+		if !canSpeculativelyExecute(b) {
+			return false
+		}
+
+		// Logically combine the control values for p and b.
+		v := b.NewValue0(bc.Pos, op, bc.Type)
+		v.AddArg(pc)
+		v.AddArg(bc)
+
+		// Set the combined control value as the control value for b.
+		b.SetControl(v)
+
+		// Modify p so that it jumps directly to b.
+		p.removeEdge(i)
+		p.Kind = BlockPlain
+		p.Likely = BranchUnknown
+		p.ResetControls()
+
+		return true
+	}
+
+	// TODO: could negate condition(s) to merge controls.
+	return false
+}
+
+// getConstIntArgIndex returns the index of the first argument that is a
+// constant integer or -1 if no such argument exists.
+func getConstIntArgIndex(v *Value) int {
+	for i, a := range v.Args {
+		switch a.Op {
+		case OpConst8, OpConst16, OpConst32, OpConst64:
+			return i
+		}
+	}
+	return -1
+}
+
+// isSignedInequality reports whether op represents the inequality < or ≤
+// in the signed domain.
+func isSignedInequality(v *Value) bool {
+	switch v.Op {
+	case OpLess64, OpLess32, OpLess16, OpLess8,
+		OpLeq64, OpLeq32, OpLeq16, OpLeq8:
+		return true
+	}
+	return false
+}
+
+// isUnsignedInequality reports whether op represents the inequality < or ≤
+// in the unsigned domain.
+func isUnsignedInequality(v *Value) bool {
+	switch v.Op {
+	case OpLess64U, OpLess32U, OpLess16U, OpLess8U,
+		OpLeq64U, OpLeq32U, OpLeq16U, OpLeq8U:
+		return true
+	}
+	return false
+}
+
+func areMergeableInequalities(x, y *Value) bool {
+	// We need both inequalities to be either in the signed or unsigned domain.
+	// TODO(mundaym): it would also be good to merge when we have an Eq op that
+	// could be transformed into a Less/Leq. For example in the unsigned
+	// domain 'x == 0 || 3 < x' is equivalent to 'x <= 0 || 3 < x'
+	inequalityChecks := [...]func(*Value) bool{
+		isSignedInequality,
+		isUnsignedInequality,
+	}
+	for _, f := range inequalityChecks {
+		if !f(x) || !f(y) {
+			continue
+		}
+
+		// Check that both inequalities are comparisons with constants.
+		xi := getConstIntArgIndex(x)
+		if xi < 0 {
+			return false
+		}
+		yi := getConstIntArgIndex(y)
+		if yi < 0 {
+			return false
+		}
+
+		// Check that the non-constant arguments to the inequalities
+		// are the same.
+		return x.Args[xi^1] == y.Args[yi^1]
+	}
+	return false
+}
diff --git a/src/cmd/compile/internal/ssa/fuse_test.go b/src/cmd/compile/internal/ssa/fuse_test.go
index 77d2aad5c1..5fe3da93ca 100644
--- a/src/cmd/compile/internal/ssa/fuse_test.go
+++ b/src/cmd/compile/internal/ssa/fuse_test.go
@@ -26,7 +26,7 @@ func TestFuseEliminatesOneBranch(t *testing.T) {
 			Exit("mem")))
 
 	CheckFunc(fun.f)
-	fuseAll(fun.f)
+	fuseLate(fun.f)
 
 	for _, b := range fun.f.Blocks {
 		if b == fun.blocks["then"] && b.Kind != BlockInvalid {
@@ -56,7 +56,7 @@ func TestFuseEliminatesBothBranches(t *testing.T) {
 			Exit("mem")))
 
 	CheckFunc(fun.f)
-	fuseAll(fun.f)
+	fuseLate(fun.f)
 
 	for _, b := range fun.f.Blocks {
 		if b == fun.blocks["then"] && b.Kind != BlockInvalid {
@@ -90,7 +90,7 @@ func TestFuseHandlesPhis(t *testing.T) {
 			Exit("mem")))
 
 	CheckFunc(fun.f)
-	fuseAll(fun.f)
+	fuseLate(fun.f)
 
 	for _, b := range fun.f.Blocks {
 		if b == fun.blocks["then"] && b.Kind != BlockInvalid {
@@ -122,7 +122,7 @@ func TestFuseEliminatesEmptyBlocks(t *testing.T) {
 		))
 
 	CheckFunc(fun.f)
-	fuseAll(fun.f)
+	fuseLate(fun.f)
 
 	for k, b := range fun.blocks {
 		if k[:1] == "z" && b.Kind != BlockInvalid {
@@ -153,7 +153,7 @@ func TestFuseSideEffects(t *testing.T) {
 			Goto("loop")))
 
 	CheckFunc(fun.f)
-	fuseAll(fun.f)
+	fuseLate(fun.f)
 
 	for _, b := range fun.f.Blocks {
 		if b == fun.blocks["then"] && b.Kind == BlockInvalid {
@@ -196,7 +196,7 @@ func BenchmarkFuse(b *testing.B) {
 			b.ResetTimer()
 			for i := 0; i < b.N; i++ {
 				fun := c.Fun("entry", blocks...)
-				fuseAll(fun.f)
+				fuseLate(fun.f)
 			}
 		})
 	}
diff --git a/src/cmd/compile/internal/ssa/gen/generic.rules b/src/cmd/compile/internal/ssa/gen/generic.rules
index 54c5ed646f..bc16f5a7af 100644
--- a/src/cmd/compile/internal/ssa/gen/generic.rules
+++ b/src/cmd/compile/internal/ssa/gen/generic.rules
@@ -254,6 +254,54 @@
 (Neq16 (Const16  [c]) (Add16 (Const16  [d]) x)) -> (Neq16 (Const16  [int64(int16(c-d))]) x)
 (Neq8  (Const8   [c]) (Add8  (Const8   [d]) x)) -> (Neq8 (Const8  [int64(int8(c-d))]) x)
 
+// signed integer range: ( c <= x && x (<|<=) d ) -> ( unsigned(x-c) (<|<=) unsigned(d-c) )
+(AndB (Leq64 (Const64 [c]) x) ((Less|Leq)64 x (Const64 [d]))) && d >= c -> ((Less|Leq)64U (Sub64  x (Const64  [c])) (Const64  [d-c]))
+(AndB (Leq32 (Const32 [c]) x) ((Less|Leq)32 x (Const32 [d]))) && d >= c -> ((Less|Leq)32U (Sub32  x (Const32  [c])) (Const32  [d-c]))
+(AndB (Leq16 (Const16 [c]) x) ((Less|Leq)16 x (Const16 [d]))) && d >= c -> ((Less|Leq)16U (Sub16  x (Const16  [c])) (Const16  [d-c]))
+(AndB (Leq8  (Const8  [c]) x) ((Less|Leq)8  x (Const8  [d]))) && d >= c -> ((Less|Leq)8U  (Sub8   x (Const8   [c])) (Const8   [d-c]))
+
+// signed integer range: ( c < x && x (<|<=) d ) -> ( unsigned(x-(c+1)) (<|<=) unsigned(d-(c+1)) )
+(AndB (Less64 (Const64 [c]) x) ((Less|Leq)64 x (Const64 [d]))) && d >= c+1 && int64(c+1) > int64(c) -> ((Less|Leq)64U (Sub64  x (Const64  [c+1])) (Const64  [d-c-1]))
+(AndB (Less32 (Const32 [c]) x) ((Less|Leq)32 x (Const32 [d]))) && d >= c+1 && int32(c+1) > int32(c) -> ((Less|Leq)32U (Sub32  x (Const32  [c+1])) (Const32  [d-c-1]))
+(AndB (Less16 (Const16 [c]) x) ((Less|Leq)16 x (Const16 [d]))) && d >= c+1 && int16(c+1) > int16(c) -> ((Less|Leq)16U (Sub16  x (Const16  [c+1])) (Const16  [d-c-1]))
+(AndB (Less8  (Const8  [c]) x) ((Less|Leq)8  x (Const8  [d]))) && d >= c+1 && int8(c+1)  > int8(c)  -> ((Less|Leq)8U  (Sub8   x (Const8   [c+1])) (Const8   [d-c-1]))
+
+// unsigned integer range: ( c <= x && x (<|<=) d ) -> ( x-c (<|<=) d-c )
+(AndB (Leq64U (Const64 [c]) x) ((Less|Leq)64U x (Const64 [d]))) && uint64(d) >= uint64(c) -> ((Less|Leq)64U (Sub64  x (Const64  [c])) (Const64  [d-c]))
+(AndB (Leq32U (Const32 [c]) x) ((Less|Leq)32U x (Const32 [d]))) && uint32(d) >= uint32(c) -> ((Less|Leq)32U (Sub32  x (Const32  [c])) (Const32  [int64(int32(d-c))]))
+(AndB (Leq16U (Const16 [c]) x) ((Less|Leq)16U x (Const16 [d]))) && uint16(d) >= uint16(c) -> ((Less|Leq)16U (Sub16  x (Const16  [c])) (Const16  [int64(int16(d-c))]))
+(AndB (Leq8U  (Const8  [c]) x) ((Less|Leq)8U  x (Const8  [d]))) && uint8(d)  >= uint8(c)  -> ((Less|Leq)8U  (Sub8   x (Const8   [c])) (Const8   [int64(int8(d-c))]))
+
+// unsigned integer range: ( c < x && x (<|<=) d ) -> ( x-(c+1) (<|<=) d-(c+1) )
+(AndB (Less64U (Const64 [c]) x) ((Less|Leq)64U x (Const64 [d]))) && uint64(d) >= uint64(c+1) && uint64(c+1) > uint64(c) -> ((Less|Leq)64U (Sub64  x (Const64  [c+1])) (Const64  [d-c-1]))
+(AndB (Less32U (Const32 [c]) x) ((Less|Leq)32U x (Const32 [d]))) && uint32(d) >= uint32(c+1) && uint32(c+1) > uint32(c) -> ((Less|Leq)32U (Sub32  x (Const32  [int64(int32(c+1))])) (Const32  [int64(int32(d-c-1))]))
+(AndB (Less16U (Const16 [c]) x) ((Less|Leq)16U x (Const16 [d]))) && uint16(d) >= uint16(c+1) && uint16(c+1) > uint16(c) -> ((Less|Leq)16U (Sub16  x (Const16  [int64(int16(c+1))])) (Const16  [int64(int16(d-c-1))]))
+(AndB (Less8U  (Const8  [c]) x) ((Less|Leq)8U  x (Const8  [d]))) && uint8(d)  >= uint8(c+1)  && uint8(c+1)  > uint8(c)  -> ((Less|Leq)8U  (Sub8   x (Const8   [int64(int8(c+1))]))  (Const8   [int64(int8(d-c-1))]))
+
+// signed integer range: ( c (<|<=) x || x < d ) -> ( unsigned(c-d) (<|<=) unsigned(x-d) )
+(OrB ((Less|Leq)64 (Const64 [c]) x) (Less64 x (Const64 [d]))) && c >= d -> ((Less|Leq)64U (Const64  [c-d]) (Sub64  x (Const64  [d])))
+(OrB ((Less|Leq)32 (Const32 [c]) x) (Less32 x (Const32 [d]))) && c >= d -> ((Less|Leq)32U (Const32  [c-d]) (Sub32  x (Const32  [d])))
+(OrB ((Less|Leq)16 (Const16 [c]) x) (Less16 x (Const16 [d]))) && c >= d -> ((Less|Leq)16U (Const16  [c-d]) (Sub16  x (Const16  [d])))
+(OrB ((Less|Leq)8  (Const8  [c]) x) (Less8  x (Const8  [d]))) && c >= d -> ((Less|Leq)8U  (Const8   [c-d]) (Sub8   x (Const8   [d])))
+
+// signed integer range: ( c (<|<=) x || x <= d ) -> ( unsigned(c-(d+1)) (<|<=) unsigned(x-(d+1)) )
+(OrB ((Less|Leq)64 (Const64 [c]) x) (Leq64 x (Const64 [d]))) && c >= d+1 && int64(d+1) > int64(d) -> ((Less|Leq)64U (Const64  [c-d-1]) (Sub64  x (Const64  [d+1])))
+(OrB ((Less|Leq)32 (Const32 [c]) x) (Leq32 x (Const32 [d]))) && c >= d+1 && int32(d+1) > int32(d) -> ((Less|Leq)32U (Const32  [c-d-1]) (Sub32  x (Const32  [d+1])))
+(OrB ((Less|Leq)16 (Const16 [c]) x) (Leq16 x (Const16 [d]))) && c >= d+1 && int16(d+1) > int16(d) -> ((Less|Leq)16U (Const16  [c-d-1]) (Sub16  x (Const16  [d+1])))
+(OrB ((Less|Leq)8  (Const8  [c]) x) (Leq8  x (Const8  [d]))) && c >= d+1 && int8(d+1)  > int8(d)  -> ((Less|Leq)8U  (Const8   [c-d-1]) (Sub8   x (Const8   [d+1])))
+
+// unsigned integer range: ( c (<|<=) x || x < d ) -> ( c-d (<|<=) x-d )
+(OrB ((Less|Leq)64U (Const64 [c]) x) (Less64U x (Const64 [d]))) && uint64(c) >= uint64(d) -> ((Less|Leq)64U (Const64                [c-d]) (Sub64  x (Const64  [d])))
+(OrB ((Less|Leq)32U (Const32 [c]) x) (Less32U x (Const32 [d]))) && uint32(c) >= uint32(d) -> ((Less|Leq)32U (Const32  [int64(int32(c-d))]) (Sub32  x (Const32  [d])))
+(OrB ((Less|Leq)16U (Const16 [c]) x) (Less16U x (Const16 [d]))) && uint16(c) >= uint16(d) -> ((Less|Leq)16U (Const16  [int64(int16(c-d))]) (Sub16  x (Const16  [d])))
+(OrB ((Less|Leq)8U  (Const8  [c]) x) (Less8U  x (Const8  [d]))) && uint8(c)  >= uint8(d)  -> ((Less|Leq)8U  (Const8   [int64( int8(c-d))]) (Sub8   x (Const8   [d])))
+
+// unsigned integer range: ( c (<|<=) x || x <= d ) -> ( c-(d+1) (<|<=) x-(d+1) )
+(OrB ((Less|Leq)64U (Const64 [c]) x) (Leq64U x (Const64 [d]))) && uint64(c) >= uint64(d+1) && uint64(d+1) > uint64(d) -> ((Less|Leq)64U (Const64                [c-d-1]) (Sub64  x (Const64  [d+1])))
+(OrB ((Less|Leq)32U (Const32 [c]) x) (Leq32U x (Const32 [d]))) && uint32(c) >= uint32(d+1) && uint32(d+1) > uint32(d) -> ((Less|Leq)32U (Const32  [int64(int32(c-d-1))]) (Sub32  x (Const32  [int64(int32(d+1))])))
+(OrB ((Less|Leq)16U (Const16 [c]) x) (Leq16U x (Const16 [d]))) && uint16(c) >= uint16(d+1) && uint16(d+1) > uint16(d) -> ((Less|Leq)16U (Const16  [int64(int16(c-d-1))]) (Sub16  x (Const16  [int64(int16(d+1))])))
+(OrB ((Less|Leq)8U  (Const8  [c]) x) (Leq8U  x (Const8  [d]))) && uint8(c)  >= uint8(d+1)  && uint8(d+1)  > uint8(d)  -> ((Less|Leq)8U  (Const8   [int64( int8(c-d-1))]) (Sub8   x (Const8   [int64( int8(d+1))])))
+
 // Canonicalize x-const to x+(-const)
 (Sub64 x (Const64  [c])) && x.Op != OpConst64 -> (Add64 (Const64  [-c]) x)
 (Sub32 x (Const32  [c])) && x.Op != OpConst32 -> (Add32 (Const32  [int64(int32(-c))]) x)
diff --git a/src/cmd/compile/internal/ssa/nilcheck_test.go b/src/cmd/compile/internal/ssa/nilcheck_test.go
index f728e8ee25..16d94614d8 100644
--- a/src/cmd/compile/internal/ssa/nilcheck_test.go
+++ b/src/cmd/compile/internal/ssa/nilcheck_test.go
@@ -87,7 +87,7 @@ func TestNilcheckSimple(t *testing.T) {
 	nilcheckelim(fun.f)
 
 	// clean up the removed nil check
-	fusePlain(fun.f)
+	fuse(fun.f, fuseTypePlain)
 	deadcode(fun.f)
 
 	CheckFunc(fun.f)
@@ -124,7 +124,7 @@ func TestNilcheckDomOrder(t *testing.T) {
 	nilcheckelim(fun.f)
 
 	// clean up the removed nil check
-	fusePlain(fun.f)
+	fuse(fun.f, fuseTypePlain)
 	deadcode(fun.f)
 
 	CheckFunc(fun.f)
@@ -157,7 +157,7 @@ func TestNilcheckAddr(t *testing.T) {
 	nilcheckelim(fun.f)
 
 	// clean up the removed nil check
-	fusePlain(fun.f)
+	fuse(fun.f, fuseTypePlain)
 	deadcode(fun.f)
 
 	CheckFunc(fun.f)
@@ -191,7 +191,7 @@ func TestNilcheckAddPtr(t *testing.T) {
 	nilcheckelim(fun.f)
 
 	// clean up the removed nil check
-	fusePlain(fun.f)
+	fuse(fun.f, fuseTypePlain)
 	deadcode(fun.f)
 
 	CheckFunc(fun.f)
@@ -235,7 +235,7 @@ func TestNilcheckPhi(t *testing.T) {
 	nilcheckelim(fun.f)
 
 	// clean up the removed nil check
-	fusePlain(fun.f)
+	fuse(fun.f, fuseTypePlain)
 	deadcode(fun.f)
 
 	CheckFunc(fun.f)
@@ -276,7 +276,7 @@ func TestNilcheckKeepRemove(t *testing.T) {
 	nilcheckelim(fun.f)
 
 	// clean up the removed nil check
-	fusePlain(fun.f)
+	fuse(fun.f, fuseTypePlain)
 	deadcode(fun.f)
 
 	CheckFunc(fun.f)
@@ -323,7 +323,7 @@ func TestNilcheckInFalseBranch(t *testing.T) {
 	nilcheckelim(fun.f)
 
 	// clean up the removed nil check
-	fusePlain(fun.f)
+	fuse(fun.f, fuseTypePlain)
 	deadcode(fun.f)
 
 	CheckFunc(fun.f)
@@ -374,7 +374,7 @@ func TestNilcheckUser(t *testing.T) {
 	nilcheckelim(fun.f)
 
 	// clean up the removed nil check
-	fusePlain(fun.f)
+	fuse(fun.f, fuseTypePlain)
 	deadcode(fun.f)
 
 	CheckFunc(fun.f)
@@ -418,7 +418,7 @@ func TestNilcheckBug(t *testing.T) {
 	nilcheckelim(fun.f)
 
 	// clean up the removed nil check
-	fusePlain(fun.f)
+	fuse(fun.f, fuseTypePlain)
 	deadcode(fun.f)
 
 	CheckFunc(fun.f)
diff --git a/src/cmd/compile/internal/ssa/rewritegeneric.go b/src/cmd/compile/internal/ssa/rewritegeneric.go
index 0a4879a8ad..9e743838ab 100644
--- a/src/cmd/compile/internal/ssa/rewritegeneric.go
+++ b/src/cmd/compile/internal/ssa/rewritegeneric.go
@@ -30,6 +30,8 @@ func rewriteValuegeneric(v *Value) bool {
 		return rewriteValuegeneric_OpAnd64(v)
 	case OpAnd8:
 		return rewriteValuegeneric_OpAnd8(v)
+	case OpAndB:
+		return rewriteValuegeneric_OpAndB(v)
 	case OpArraySelect:
 		return rewriteValuegeneric_OpArraySelect(v)
 	case OpCom16:
@@ -278,6 +280,8 @@ func rewriteValuegeneric(v *Value) bool {
 		return rewriteValuegeneric_OpOr64(v)
 	case OpOr8:
 		return rewriteValuegeneric_OpOr8(v)
+	case OpOrB:
+		return rewriteValuegeneric_OpOrB(v)
 	case OpPhi:
 		return rewriteValuegeneric_OpPhi(v)
 	case OpPtrIndex:
@@ -2328,6 +2332,1324 @@ func rewriteValuegeneric_OpAnd8(v *Value) bool {
 	}
 	return false
 }
+func rewriteValuegeneric_OpAndB(v *Value) bool {
+	v_1 := v.Args[1]
+	v_0 := v.Args[0]
+	b := v.Block
+	// match: (AndB (Leq64 (Const64 [c]) x) (Less64 x (Const64 [d])))
+	// cond: d >= c
+	// result: (Less64U (Sub64  x (Const64  [c])) (Const64  [d-c]))
+	for {
+		for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+			if v_0.Op != OpLeq64 {
+				continue
+			}
+			x := v_0.Args[1]
+			v_0_0 := v_0.Args[0]
+			if v_0_0.Op != OpConst64 {
+				continue
+			}
+			c := v_0_0.AuxInt
+			if v_1.Op != OpLess64 {
+				continue
+			}
+			_ = v_1.Args[1]
+			if x != v_1.Args[0] {
+				continue
+			}
+			v_1_1 := v_1.Args[1]
+			if v_1_1.Op != OpConst64 {
+				continue
+			}
+			d := v_1_1.AuxInt
+			if !(d >= c) {
+				continue
+			}
+			v.reset(OpLess64U)
+			v0 := b.NewValue0(v.Pos, OpSub64, x.Type)
+			v1 := b.NewValue0(v.Pos, OpConst64, x.Type)
+			v1.AuxInt = c
+			v0.AddArg2(x, v1)
+			v2 := b.NewValue0(v.Pos, OpConst64, x.Type)
+			v2.AuxInt = d - c
+			v.AddArg2(v0, v2)
+			return true
+		}
+		break
+	}
+	// match: (AndB (Leq64 (Const64 [c]) x) (Leq64 x (Const64 [d])))
+	// cond: d >= c
+	// result: (Leq64U (Sub64  x (Const64  [c])) (Const64  [d-c]))
+	for {
+		for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+			if v_0.Op != OpLeq64 {
+				continue
+			}
+			x := v_0.Args[1]
+			v_0_0 := v_0.Args[0]
+			if v_0_0.Op != OpConst64 {
+				continue
+			}
+			c := v_0_0.AuxInt
+			if v_1.Op != OpLeq64 {
+				continue
+			}
+			_ = v_1.Args[1]
+			if x != v_1.Args[0] {
+				continue
+			}
+			v_1_1 := v_1.Args[1]
+			if v_1_1.Op != OpConst64 {
+				continue
+			}
+			d := v_1_1.AuxInt
+			if !(d >= c) {
+				continue
+			}
+			v.reset(OpLeq64U)
+			v0 := b.NewValue0(v.Pos, OpSub64, x.Type)
+			v1 := b.NewValue0(v.Pos, OpConst64, x.Type)
+			v1.AuxInt = c
+			v0.AddArg2(x, v1)
+			v2 := b.NewValue0(v.Pos, OpConst64, x.Type)
+			v2.AuxInt = d - c
+			v.AddArg2(v0, v2)
+			return true
+		}
+		break
+	}
+	// match: (AndB (Leq32 (Const32 [c]) x) (Less32 x (Const32 [d])))
+	// cond: d >= c
+	// result: (Less32U (Sub32  x (Const32  [c])) (Const32  [d-c]))
+	for {
+		for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+			if v_0.Op != OpLeq32 {
+				continue
+			}
+			x := v_0.Args[1]
+			v_0_0 := v_0.Args[0]
+			if v_0_0.Op != OpConst32 {
+				continue
+			}
+			c := v_0_0.AuxInt
+			if v_1.Op != OpLess32 {
+				continue
+			}
+			_ = v_1.Args[1]
+			if x != v_1.Args[0] {
+				continue
+			}
+			v_1_1 := v_1.Args[1]
+			if v_1_1.Op != OpConst32 {
+				continue
+			}
+			d := v_1_1.AuxInt
+			if !(d >= c) {
+				continue
+			}
+			v.reset(OpLess32U)
+			v0 := b.NewValue0(v.Pos, OpSub32, x.Type)
+			v1 := b.NewValue0(v.Pos, OpConst32, x.Type)
+			v1.AuxInt = c
+			v0.AddArg2(x, v1)
+			v2 := b.NewValue0(v.Pos, OpConst32, x.Type)
+			v2.AuxInt = d - c
+			v.AddArg2(v0, v2)
+			return true
+		}
+		break
+	}
+	// match: (AndB (Leq32 (Const32 [c]) x) (Leq32 x (Const32 [d])))
+	// cond: d >= c
+	// result: (Leq32U (Sub32  x (Const32  [c])) (Const32  [d-c]))
+	for {
+		for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+			if v_0.Op != OpLeq32 {
+				continue
+			}
+			x := v_0.Args[1]
+			v_0_0 := v_0.Args[0]
+			if v_0_0.Op != OpConst32 {
+				continue
+			}
+			c := v_0_0.AuxInt
+			if v_1.Op != OpLeq32 {
+				continue
+			}
+			_ = v_1.Args[1]
+			if x != v_1.Args[0] {
+				continue
+			}
+			v_1_1 := v_1.Args[1]
+			if v_1_1.Op != OpConst32 {
+				continue
+			}
+			d := v_1_1.AuxInt
+			if !(d >= c) {
+				continue
+			}
+			v.reset(OpLeq32U)
+			v0 := b.NewValue0(v.Pos, OpSub32, x.Type)
+			v1 := b.NewValue0(v.Pos, OpConst32, x.Type)
+			v1.AuxInt = c
+			v0.AddArg2(x, v1)
+			v2 := b.NewValue0(v.Pos, OpConst32, x.Type)
+			v2.AuxInt = d - c
+			v.AddArg2(v0, v2)
+			return true
+		}
+		break
+	}
+	// match: (AndB (Leq16 (Const16 [c]) x) (Less16 x (Const16 [d])))
+	// cond: d >= c
+	// result: (Less16U (Sub16  x (Const16  [c])) (Const16  [d-c]))
+	for {
+		for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+			if v_0.Op != OpLeq16 {
+				continue
+			}
+			x := v_0.Args[1]
+			v_0_0 := v_0.Args[0]
+			if v_0_0.Op != OpConst16 {
+				continue
+			}
+			c := v_0_0.AuxInt
+			if v_1.Op != OpLess16 {
+				continue
+			}
+			_ = v_1.Args[1]
+			if x != v_1.Args[0] {
+				continue
+			}
+			v_1_1 := v_1.Args[1]
+			if v_1_1.Op != OpConst16 {
+				continue
+			}
+			d := v_1_1.AuxInt
+			if !(d >= c) {
+				continue
+			}
+			v.reset(OpLess16U)
+			v0 := b.NewValue0(v.Pos, OpSub16, x.Type)
+			v1 := b.NewValue0(v.Pos, OpConst16, x.Type)
+			v1.AuxInt = c
+			v0.AddArg2(x, v1)
+			v2 := b.NewValue0(v.Pos, OpConst16, x.Type)
+			v2.AuxInt = d - c
+			v.AddArg2(v0, v2)
+			return true
+		}
+		break
+	}
+	// match: (AndB (Leq16 (Const16 [c]) x) (Leq16 x (Const16 [d])))
+	// cond: d >= c
+	// result: (Leq16U (Sub16  x (Const16  [c])) (Const16  [d-c]))
+	for {
+		for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+			if v_0.Op != OpLeq16 {
+				continue
+			}
+			x := v_0.Args[1]
+			v_0_0 := v_0.Args[0]
+			if v_0_0.Op != OpConst16 {
+				continue
+			}
+			c := v_0_0.AuxInt
+			if v_1.Op != OpLeq16 {
+				continue
+			}
+			_ = v_1.Args[1]
+			if x != v_1.Args[0] {
+				continue
+			}
+			v_1_1 := v_1.Args[1]
+			if v_1_1.Op != OpConst16 {
+				continue
+			}
+			d := v_1_1.AuxInt
+			if !(d >= c) {
+				continue
+			}
+			v.reset(OpLeq16U)
+			v0 := b.NewValue0(v.Pos, OpSub16, x.Type)
+			v1 := b.NewValue0(v.Pos, OpConst16, x.Type)
+			v1.AuxInt = c
+			v0.AddArg2(x, v1)
+			v2 := b.NewValue0(v.Pos, OpConst16, x.Type)
+			v2.AuxInt = d - c
+			v.AddArg2(v0, v2)
+			return true
+		}
+		break
+	}
+	// match: (AndB (Leq8 (Const8 [c]) x) (Less8 x (Const8 [d])))
+	// cond: d >= c
+	// result: (Less8U (Sub8  x (Const8  [c])) (Const8  [d-c]))
+	for {
+		for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+			if v_0.Op != OpLeq8 {
+				continue
+			}
+			x := v_0.Args[1]
+			v_0_0 := v_0.Args[0]
+			if v_0_0.Op != OpConst8 {
+				continue
+			}
+			c := v_0_0.AuxInt
+			if v_1.Op != OpLess8 {
+				continue
+			}
+			_ = v_1.Args[1]
+			if x != v_1.Args[0] {
+				continue
+			}
+			v_1_1 := v_1.Args[1]
+			if v_1_1.Op != OpConst8 {
+				continue
+			}
+			d := v_1_1.AuxInt
+			if !(d >= c) {
+				continue
+			}
+			v.reset(OpLess8U)
+			v0 := b.NewValue0(v.Pos, OpSub8, x.Type)
+			v1 := b.NewValue0(v.Pos, OpConst8, x.Type)
+			v1.AuxInt = c
+			v0.AddArg2(x, v1)
+			v2 := b.NewValue0(v.Pos, OpConst8, x.Type)
+			v2.AuxInt = d - c
+			v.AddArg2(v0, v2)
+			return true
+		}
+		break
+	}
+	// match: (AndB (Leq8 (Const8 [c]) x) (Leq8 x (Const8 [d])))
+	// cond: d >= c
+	// result: (Leq8U (Sub8  x (Const8  [c])) (Const8  [d-c]))
+	for {
+		for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+			if v_0.Op != OpLeq8 {
+				continue
+			}
+			x := v_0.Args[1]
+			v_0_0 := v_0.Args[0]
+			if v_0_0.Op != OpConst8 {
+				continue
+			}
+			c := v_0_0.AuxInt
+			if v_1.Op != OpLeq8 {
+				continue
+			}
+			_ = v_1.Args[1]
+			if x != v_1.Args[0] {
+				continue
+			}
+			v_1_1 := v_1.Args[1]
+			if v_1_1.Op != OpConst8 {
+				continue
+			}
+			d := v_1_1.AuxInt
+			if !(d >= c) {
+				continue
+			}
+			v.reset(OpLeq8U)
+			v0 := b.NewValue0(v.Pos, OpSub8, x.Type)
+			v1 := b.NewValue0(v.Pos, OpConst8, x.Type)
+			v1.AuxInt = c
+			v0.AddArg2(x, v1)
+			v2 := b.NewValue0(v.Pos, OpConst8, x.Type)
+			v2.AuxInt = d - c
+			v.AddArg2(v0, v2)
+			return true
+		}
+		break
+	}
+	// match: (AndB (Less64 (Const64 [c]) x) (Less64 x (Const64 [d])))
+	// cond: d >= c+1 && int64(c+1) > int64(c)
+	// result: (Less64U (Sub64  x (Const64  [c+1])) (Const64  [d-c-1]))
+	for {
+		for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+			if v_0.Op != OpLess64 {
+				continue
+			}
+			x := v_0.Args[1]
+			v_0_0 := v_0.Args[0]
+			if v_0_0.Op != OpConst64 {
+				continue
+			}
+			c := v_0_0.AuxInt
+			if v_1.Op != OpLess64 {
+				continue
+			}
+			_ = v_1.Args[1]
+			if x != v_1.Args[0] {
+				continue
+			}
+			v_1_1 := v_1.Args[1]
+			if v_1_1.Op != OpConst64 {
+				continue
+			}
+			d := v_1_1.AuxInt
+			if !(d >= c+1 && int64(c+1) > int64(c)) {
+				continue
+			}
+			v.reset(OpLess64U)
+			v0 := b.NewValue0(v.Pos, OpSub64, x.Type)
+			v1 := b.NewValue0(v.Pos, OpConst64, x.Type)
+			v1.AuxInt = c + 1
+			v0.AddArg2(x, v1)
+			v2 := b.NewValue0(v.Pos, OpConst64, x.Type)
+			v2.AuxInt = d - c - 1
+			v.AddArg2(v0, v2)
+			return true
+		}
+		break
+	}
+	// match: (AndB (Less64 (Const64 [c]) x) (Leq64 x (Const64 [d])))
+	// cond: d >= c+1 && int64(c+1) > int64(c)
+	// result: (Leq64U (Sub64  x (Const64  [c+1])) (Const64  [d-c-1]))
+	for {
+		for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+			if v_0.Op != OpLess64 {
+				continue
+			}
+			x := v_0.Args[1]
+			v_0_0 := v_0.Args[0]
+			if v_0_0.Op != OpConst64 {
+				continue
+			}
+			c := v_0_0.AuxInt
+			if v_1.Op != OpLeq64 {
+				continue
+			}
+			_ = v_1.Args[1]
+			if x != v_1.Args[0] {
+				continue
+			}
+			v_1_1 := v_1.Args[1]
+			if v_1_1.Op != OpConst64 {
+				continue
+			}
+			d := v_1_1.AuxInt
+			if !(d >= c+1 && int64(c+1) > int64(c)) {
+				continue
+			}
+			v.reset(OpLeq64U)
+			v0 := b.NewValue0(v.Pos, OpSub64, x.Type)
+			v1 := b.NewValue0(v.Pos, OpConst64, x.Type)
+			v1.AuxInt = c + 1
+			v0.AddArg2(x, v1)
+			v2 := b.NewValue0(v.Pos, OpConst64, x.Type)
+			v2.AuxInt = d - c - 1
+			v.AddArg2(v0, v2)
+			return true
+		}
+		break
+	}
+	// match: (AndB (Less32 (Const32 [c]) x) (Less32 x (Const32 [d])))
+	// cond: d >= c+1 && int32(c+1) > int32(c)
+	// result: (Less32U (Sub32  x (Const32  [c+1])) (Const32  [d-c-1]))
+	for {
+		for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+			if v_0.Op != OpLess32 {
+				continue
+			}
+			x := v_0.Args[1]
+			v_0_0 := v_0.Args[0]
+			if v_0_0.Op != OpConst32 {
+				continue
+			}
+			c := v_0_0.AuxInt
+			if v_1.Op != OpLess32 {
+				continue
+			}
+			_ = v_1.Args[1]
+			if x != v_1.Args[0] {
+				continue
+			}
+			v_1_1 := v_1.Args[1]
+			if v_1_1.Op != OpConst32 {
+				continue
+			}
+			d := v_1_1.AuxInt
+			if !(d >= c+1 && int32(c+1) > int32(c)) {
+				continue
+			}
+			v.reset(OpLess32U)
+			v0 := b.NewValue0(v.Pos, OpSub32, x.Type)
+			v1 := b.NewValue0(v.Pos, OpConst32, x.Type)
+			v1.AuxInt = c + 1
+			v0.AddArg2(x, v1)
+			v2 := b.NewValue0(v.Pos, OpConst32, x.Type)
+			v2.AuxInt = d - c - 1
+			v.AddArg2(v0, v2)
+			return true
+		}
+		break
+	}
+	// match: (AndB (Less32 (Const32 [c]) x) (Leq32 x (Const32 [d])))
+	// cond: d >= c+1 && int32(c+1) > int32(c)
+	// result: (Leq32U (Sub32  x (Const32  [c+1])) (Const32  [d-c-1]))
+	for {
+		for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+			if v_0.Op != OpLess32 {
+				continue
+			}
+			x := v_0.Args[1]
+			v_0_0 := v_0.Args[0]
+			if v_0_0.Op != OpConst32 {
+				continue
+			}
+			c := v_0_0.AuxInt
+			if v_1.Op != OpLeq32 {
+				continue
+			}
+			_ = v_1.Args[1]
+			if x != v_1.Args[0] {
+				continue
+			}
+			v_1_1 := v_1.Args[1]
+			if v_1_1.Op != OpConst32 {
+				continue
+			}
+			d := v_1_1.AuxInt
+			if !(d >= c+1 && int32(c+1) > int32(c)) {
+				continue
+			}
+			v.reset(OpLeq32U)
+			v0 := b.NewValue0(v.Pos, OpSub32, x.Type)
+			v1 := b.NewValue0(v.Pos, OpConst32, x.Type)
+			v1.AuxInt = c + 1
+			v0.AddArg2(x, v1)
+			v2 := b.NewValue0(v.Pos, OpConst32, x.Type)
+			v2.AuxInt = d - c - 1
+			v.AddArg2(v0, v2)
+			return true
+		}
+		break
+	}
+	// match: (AndB (Less16 (Const16 [c]) x) (Less16 x (Const16 [d])))
+	// cond: d >= c+1 && int16(c+1) > int16(c)
+	// result: (Less16U (Sub16  x (Const16  [c+1])) (Const16  [d-c-1]))
+	for {
+		for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+			if v_0.Op != OpLess16 {
+				continue
+			}
+			x := v_0.Args[1]
+			v_0_0 := v_0.Args[0]
+			if v_0_0.Op != OpConst16 {
+				continue
+			}
+			c := v_0_0.AuxInt
+			if v_1.Op != OpLess16 {
+				continue
+			}
+			_ = v_1.Args[1]
+			if x != v_1.Args[0] {
+				continue
+			}
+			v_1_1 := v_1.Args[1]
+			if v_1_1.Op != OpConst16 {
+				continue
+			}
+			d := v_1_1.AuxInt
+			if !(d >= c+1 && int16(c+1) > int16(c)) {
+				continue
+			}
+			v.reset(OpLess16U)
+			v0 := b.NewValue0(v.Pos, OpSub16, x.Type)
+			v1 := b.NewValue0(v.Pos, OpConst16, x.Type)
+			v1.AuxInt = c + 1
+			v0.AddArg2(x, v1)
+			v2 := b.NewValue0(v.Pos, OpConst16, x.Type)
+			v2.AuxInt = d - c - 1
+			v.AddArg2(v0, v2)
+			return true
+		}
+		break
+	}
+	// match: (AndB (Less16 (Const16 [c]) x) (Leq16 x (Const16 [d])))
+	// cond: d >= c+1 && int16(c+1) > int16(c)
+	// result: (Leq16U (Sub16  x (Const16  [c+1])) (Const16  [d-c-1]))
+	for {
+		for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+			if v_0.Op != OpLess16 {
+				continue
+			}
+			x := v_0.Args[1]
+			v_0_0 := v_0.Args[0]
+			if v_0_0.Op != OpConst16 {
+				continue
+			}
+			c := v_0_0.AuxInt
+			if v_1.Op != OpLeq16 {
+				continue
+			}
+			_ = v_1.Args[1]
+			if x != v_1.Args[0] {
+				continue
+			}
+			v_1_1 := v_1.Args[1]
+			if v_1_1.Op != OpConst16 {
+				continue
+			}
+			d := v_1_1.AuxInt
+			if !(d >= c+1 && int16(c+1) > int16(c)) {
+				continue
+			}
+			v.reset(OpLeq16U)
+			v0 := b.NewValue0(v.Pos, OpSub16, x.Type)
+			v1 := b.NewValue0(v.Pos, OpConst16, x.Type)
+			v1.AuxInt = c + 1
+			v0.AddArg2(x, v1)
+			v2 := b.NewValue0(v.Pos, OpConst16, x.Type)
+			v2.AuxInt = d - c - 1
+			v.AddArg2(v0, v2)
+			return true
+		}
+		break
+	}
+	// match: (AndB (Less8 (Const8 [c]) x) (Less8 x (Const8 [d])))
+	// cond: d >= c+1 && int8(c+1) > int8(c)
+	// result: (Less8U (Sub8  x (Const8  [c+1])) (Const8  [d-c-1]))
+	for {
+		for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+			if v_0.Op != OpLess8 {
+				continue
+			}
+			x := v_0.Args[1]
+			v_0_0 := v_0.Args[0]
+			if v_0_0.Op != OpConst8 {
+				continue
+			}
+			c := v_0_0.AuxInt
+			if v_1.Op != OpLess8 {
+				continue
+			}
+			_ = v_1.Args[1]
+			if x != v_1.Args[0] {
+				continue
+			}
+			v_1_1 := v_1.Args[1]
+			if v_1_1.Op != OpConst8 {
+				continue
+			}
+			d := v_1_1.AuxInt
+			if !(d >= c+1 && int8(c+1) > int8(c)) {
+				continue
+			}
+			v.reset(OpLess8U)
+			v0 := b.NewValue0(v.Pos, OpSub8, x.Type)
+			v1 := b.NewValue0(v.Pos, OpConst8, x.Type)
+			v1.AuxInt = c + 1
+			v0.AddArg2(x, v1)
+			v2 := b.NewValue0(v.Pos, OpConst8, x.Type)
+			v2.AuxInt = d - c - 1
+			v.AddArg2(v0, v2)
+			return true
+		}
+		break
+	}
+	// match: (AndB (Less8 (Const8 [c]) x) (Leq8 x (Const8 [d])))
+	// cond: d >= c+1 && int8(c+1) > int8(c)
+	// result: (Leq8U (Sub8  x (Const8  [c+1])) (Const8  [d-c-1]))
+	for {
+		for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+			if v_0.Op != OpLess8 {
+				continue
+			}
+			x := v_0.Args[1]
+			v_0_0 := v_0.Args[0]
+			if v_0_0.Op != OpConst8 {
+				continue
+			}
+			c := v_0_0.AuxInt
+			if v_1.Op != OpLeq8 {
+				continue
+			}
+			_ = v_1.Args[1]
+			if x != v_1.Args[0] {
+				continue
+			}
+			v_1_1 := v_1.Args[1]
+			if v_1_1.Op != OpConst8 {
+				continue
+			}
+			d := v_1_1.AuxInt
+			if !(d >= c+1 && int8(c+1) > int8(c)) {
+				continue
+			}
+			v.reset(OpLeq8U)
+			v0 := b.NewValue0(v.Pos, OpSub8, x.Type)
+			v1 := b.NewValue0(v.Pos, OpConst8, x.Type)
+			v1.AuxInt = c + 1
+			v0.AddArg2(x, v1)
+			v2 := b.NewValue0(v.Pos, OpConst8, x.Type)
+			v2.AuxInt = d - c - 1
+			v.AddArg2(v0, v2)
+			return true
+		}
+		break
+	}
+	// match: (AndB (Leq64U (Const64 [c]) x) (Less64U x (Const64 [d])))
+	// cond: uint64(d) >= uint64(c)
+	// result: (Less64U (Sub64  x (Const64  [c])) (Const64  [d-c]))
+	for {
+		for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+			if v_0.Op != OpLeq64U {
+				continue
+			}
+			x := v_0.Args[1]
+			v_0_0 := v_0.Args[0]
+			if v_0_0.Op != OpConst64 {
+				continue
+			}
+			c := v_0_0.AuxInt
+			if v_1.Op != OpLess64U {
+				continue
+			}
+			_ = v_1.Args[1]
+			if x != v_1.Args[0] {
+				continue
+			}
+			v_1_1 := v_1.Args[1]
+			if v_1_1.Op != OpConst64 {
+				continue
+			}
+			d := v_1_1.AuxInt
+			if !(uint64(d) >= uint64(c)) {
+				continue
+			}
+			v.reset(OpLess64U)
+			v0 := b.NewValue0(v.Pos, OpSub64, x.Type)
+			v1 := b.NewValue0(v.Pos, OpConst64, x.Type)
+			v1.AuxInt = c
+			v0.AddArg2(x, v1)
+			v2 := b.NewValue0(v.Pos, OpConst64, x.Type)
+			v2.AuxInt = d - c
+			v.AddArg2(v0, v2)
+			return true
+		}
+		break
+	}
+	// match: (AndB (Leq64U (Const64 [c]) x) (Leq64U x (Const64 [d])))
+	// cond: uint64(d) >= uint64(c)
+	// result: (Leq64U (Sub64  x (Const64  [c])) (Const64  [d-c]))
+	for {
+		for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+			if v_0.Op != OpLeq64U {
+				continue
+			}
+			x := v_0.Args[1]
+			v_0_0 := v_0.Args[0]
+			if v_0_0.Op != OpConst64 {
+				continue
+			}
+			c := v_0_0.AuxInt
+			if v_1.Op != OpLeq64U {
+				continue
+			}
+			_ = v_1.Args[1]
+			if x != v_1.Args[0] {
+				continue
+			}
+			v_1_1 := v_1.Args[1]
+			if v_1_1.Op != OpConst64 {
+				continue
+			}
+			d := v_1_1.AuxInt
+			if !(uint64(d) >= uint64(c)) {
+				continue
+			}
+			v.reset(OpLeq64U)
+			v0 := b.NewValue0(v.Pos, OpSub64, x.Type)
+			v1 := b.NewValue0(v.Pos, OpConst64, x.Type)
+			v1.AuxInt = c
+			v0.AddArg2(x, v1)
+			v2 := b.NewValue0(v.Pos, OpConst64, x.Type)
+			v2.AuxInt = d - c
+			v.AddArg2(v0, v2)
+			return true
+		}
+		break
+	}
+	// match: (AndB (Leq32U (Const32 [c]) x) (Less32U x (Const32 [d])))
+	// cond: uint32(d) >= uint32(c)
+	// result: (Less32U (Sub32  x (Const32  [c])) (Const32  [int64(int32(d-c))]))
+	for {
+		for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+			if v_0.Op != OpLeq32U {
+				continue
+			}
+			x := v_0.Args[1]
+			v_0_0 := v_0.Args[0]
+			if v_0_0.Op != OpConst32 {
+				continue
+			}
+			c := v_0_0.AuxInt
+			if v_1.Op != OpLess32U {
+				continue
+			}
+			_ = v_1.Args[1]
+			if x != v_1.Args[0] {
+				continue
+			}
+			v_1_1 := v_1.Args[1]
+			if v_1_1.Op != OpConst32 {
+				continue
+			}
+			d := v_1_1.AuxInt
+			if !(uint32(d) >= uint32(c)) {
+				continue
+			}
+			v.reset(OpLess32U)
+			v0 := b.NewValue0(v.Pos, OpSub32, x.Type)
+			v1 := b.NewValue0(v.Pos, OpConst32, x.Type)
+			v1.AuxInt = c
+			v0.AddArg2(x, v1)
+			v2 := b.NewValue0(v.Pos, OpConst32, x.Type)
+			v2.AuxInt = int64(int32(d - c))
+			v.AddArg2(v0, v2)
+			return true
+		}
+		break
+	}
+	// match: (AndB (Leq32U (Const32 [c]) x) (Leq32U x (Const32 [d])))
+	// cond: uint32(d) >= uint32(c)
+	// result: (Leq32U (Sub32  x (Const32  [c])) (Const32  [int64(int32(d-c))]))
+	for {
+		for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+			if v_0.Op != OpLeq32U {
+				continue
+			}
+			x := v_0.Args[1]
+			v_0_0 := v_0.Args[0]
+			if v_0_0.Op != OpConst32 {
+				continue
+			}
+			c := v_0_0.AuxInt
+			if v_1.Op != OpLeq32U {
+				continue
+			}
+			_ = v_1.Args[1]
+			if x != v_1.Args[0] {
+				continue
+			}
+			v_1_1 := v_1.Args[1]
+			if v_1_1.Op != OpConst32 {
+				continue
+			}
+			d := v_1_1.AuxInt
+			if !(uint32(d) >= uint32(c)) {
+				continue
+			}
+			v.reset(OpLeq32U)
+			v0 := b.NewValue0(v.Pos, OpSub32, x.Type)
+			v1 := b.NewValue0(v.Pos, OpConst32, x.Type)
+			v1.AuxInt = c
+			v0.AddArg2(x, v1)
+			v2 := b.NewValue0(v.Pos, OpConst32, x.Type)
+			v2.AuxInt = int64(int32(d - c))
+			v.AddArg2(v0, v2)
+			return true
+		}
+		break
+	}
+	// match: (AndB (Leq16U (Const16 [c]) x) (Less16U x (Const16 [d])))
+	// cond: uint16(d) >= uint16(c)
+	// result: (Less16U (Sub16  x (Const16  [c])) (Const16  [int64(int16(d-c))]))
+	for {
+		for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+			if v_0.Op != OpLeq16U {
+				continue
+			}
+			x := v_0.Args[1]
+			v_0_0 := v_0.Args[0]
+			if v_0_0.Op != OpConst16 {
+				continue
+			}
+			c := v_0_0.AuxInt
+			if v_1.Op != OpLess16U {
+				continue
+			}
+			_ = v_1.Args[1]
+			if x != v_1.Args[0] {
+				continue
+			}
+			v_1_1 := v_1.Args[1]
+			if v_1_1.Op != OpConst16 {
+				continue
+			}
+			d := v_1_1.AuxInt
+			if !(uint16(d) >= uint16(c)) {
+				continue
+			}
+			v.reset(OpLess16U)
+			v0 := b.NewValue0(v.Pos, OpSub16, x.Type)
+			v1 := b.NewValue0(v.Pos, OpConst16, x.Type)
+			v1.AuxInt = c
+			v0.AddArg2(x, v1)
+			v2 := b.NewValue0(v.Pos, OpConst16, x.Type)
+			v2.AuxInt = int64(int16(d - c))
+			v.AddArg2(v0, v2)
+			return true
+		}
+		break
+	}
+	// match: (AndB (Leq16U (Const16 [c]) x) (Leq16U x (Const16 [d])))
+	// cond: uint16(d) >= uint16(c)
+	// result: (Leq16U (Sub16  x (Const16  [c])) (Const16  [int64(int16(d-c))]))
+	for {
+		for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+			if v_0.Op != OpLeq16U {
+				continue
+			}
+			x := v_0.Args[1]
+			v_0_0 := v_0.Args[0]
+			if v_0_0.Op != OpConst16 {
+				continue
+			}
+			c := v_0_0.AuxInt
+			if v_1.Op != OpLeq16U {
+				continue
+			}
+			_ = v_1.Args[1]
+			if x != v_1.Args[0] {
+				continue
+			}
+			v_1_1 := v_1.Args[1]
+			if v_1_1.Op != OpConst16 {
+				continue
+			}
+			d := v_1_1.AuxInt
+			if !(uint16(d) >= uint16(c)) {
+				continue
+			}
+			v.reset(OpLeq16U)
+			v0 := b.NewValue0(v.Pos, OpSub16, x.Type)
+			v1 := b.NewValue0(v.Pos, OpConst16, x.Type)
+			v1.AuxInt = c
+			v0.AddArg2(x, v1)
+			v2 := b.NewValue0(v.Pos, OpConst16, x.Type)
+			v2.AuxInt = int64(int16(d - c))
+			v.AddArg2(v0, v2)
+			return true
+		}
+		break
+	}
+	// match: (AndB (Leq8U (Const8 [c]) x) (Less8U x (Const8 [d])))
+	// cond: uint8(d) >= uint8(c)
+	// result: (Less8U (Sub8  x (Const8  [c])) (Const8  [int64(int8(d-c))]))
+	for {
+		for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+			if v_0.Op != OpLeq8U {
+				continue
+			}
+			x := v_0.Args[1]
+			v_0_0 := v_0.Args[0]
+			if v_0_0.Op != OpConst8 {
+				continue
+			}
+			c := v_0_0.AuxInt
+			if v_1.Op != OpLess8U {
+				continue
+			}
+			_ = v_1.Args[1]
+			if x != v_1.Args[0] {
+				continue
+			}
+			v_1_1 := v_1.Args[1]
+			if v_1_1.Op != OpConst8 {
+				continue
+			}
+			d := v_1_1.AuxInt
+			if !(uint8(d) >= uint8(c)) {
+				continue
+			}
+			v.reset(OpLess8U)
+			v0 := b.NewValue0(v.Pos, OpSub8, x.Type)
+			v1 := b.NewValue0(v.Pos, OpConst8, x.Type)
+			v1.AuxInt = c
+			v0.AddArg2(x, v1)
+			v2 := b.NewValue0(v.Pos, OpConst8, x.Type)
+			v2.AuxInt = int64(int8(d - c))
+			v.AddArg2(v0, v2)
+			return true
+		}
+		break
+	}
+	// match: (AndB (Leq8U (Const8 [c]) x) (Leq8U x (Const8 [d])))
+	// cond: uint8(d) >= uint8(c)
+	// result: (Leq8U (Sub8  x (Const8  [c])) (Const8  [int64(int8(d-c))]))
+	for {
+		for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+			if v_0.Op != OpLeq8U {
+				continue
+			}
+			x := v_0.Args[1]
+			v_0_0 := v_0.Args[0]
+			if v_0_0.Op != OpConst8 {
+				continue
+			}
+			c := v_0_0.AuxInt
+			if v_1.Op != OpLeq8U {
+				continue
+			}
+			_ = v_1.Args[1]
+			if x != v_1.Args[0] {
+				continue
+			}
+			v_1_1 := v_1.Args[1]
+			if v_1_1.Op != OpConst8 {
+				continue
+			}
+			d := v_1_1.AuxInt
+			if !(uint8(d) >= uint8(c)) {
+				continue
+			}
+			v.reset(OpLeq8U)
+			v0 := b.NewValue0(v.Pos, OpSub8, x.Type)
+			v1 := b.NewValue0(v.Pos, OpConst8, x.Type)
+			v1.AuxInt = c
+			v0.AddArg2(x, v1)
+			v2 := b.NewValue0(v.Pos, OpConst8, x.Type)
+			v2.AuxInt = int64(int8(d - c))
+			v.AddArg2(v0, v2)
+			return true
+		}
+		break
+	}
+	// match: (AndB (Less64U (Const64 [c]) x) (Less64U x (Const64 [d])))
+	// cond: uint64(d) >= uint64(c+1) && uint64(c+1) > uint64(c)
+	// result: (Less64U (Sub64  x (Const64  [c+1])) (Const64  [d-c-1]))
+	for {
+		for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+			if v_0.Op != OpLess64U {
+				continue
+			}
+			x := v_0.Args[1]
+			v_0_0 := v_0.Args[0]
+			if v_0_0.Op != OpConst64 {
+				continue
+			}
+			c := v_0_0.AuxInt
+			if v_1.Op != OpLess64U {
+				continue
+			}
+			_ = v_1.Args[1]
+			if x != v_1.Args[0] {
+				continue
+			}
+			v_1_1 := v_1.Args[1]
+			if v_1_1.Op != OpConst64 {
+				continue
+			}
+			d := v_1_1.AuxInt
+			if !(uint64(d) >= uint64(c+1) && uint64(c+1) > uint64(c)) {
+				continue
+			}
+			v.reset(OpLess64U)
+			v0 := b.NewValue0(v.Pos, OpSub64, x.Type)
+			v1 := b.NewValue0(v.Pos, OpConst64, x.Type)
+			v1.AuxInt = c + 1
+			v0.AddArg2(x, v1)
+			v2 := b.NewValue0(v.Pos, OpConst64, x.Type)
+			v2.AuxInt = d - c - 1
+			v.AddArg2(v0, v2)
+			return true
+		}
+		break
+	}
+	// match: (AndB (Less64U (Const64 [c]) x) (Leq64U x (Const64 [d])))
+	// cond: uint64(d) >= uint64(c+1) && uint64(c+1) > uint64(c)
+	// result: (Leq64U (Sub64  x (Const64  [c+1])) (Const64  [d-c-1]))
+	for {
+		for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+			if v_0.Op != OpLess64U {
+				continue
+			}
+			x := v_0.Args[1]
+			v_0_0 := v_0.Args[0]
+			if v_0_0.Op != OpConst64 {
+				continue
+			}
+			c := v_0_0.AuxInt
+			if v_1.Op != OpLeq64U {
+				continue
+			}
+			_ = v_1.Args[1]
+			if x != v_1.Args[0] {
+				continue
+			}
+			v_1_1 := v_1.Args[1]
+			if v_1_1.Op != OpConst64 {
+				continue
+			}
+			d := v_1_1.AuxInt
+			if !(uint64(d) >= uint64(c+1) && uint64(c+1) > uint64(c)) {
+				continue
+			}
+			v.reset(OpLeq64U)
+			v0 := b.NewValue0(v.Pos, OpSub64, x.Type)
+			v1 := b.NewValue0(v.Pos, OpConst64, x.Type)
+			v1.AuxInt = c + 1
+			v0.AddArg2(x, v1)
+			v2 := b.NewValue0(v.Pos, OpConst64, x.Type)
+			v2.AuxInt = d - c - 1
+			v.AddArg2(v0, v2)
+			return true
+		}
+		break
+	}
+	// match: (AndB (Less32U (Const32 [c]) x) (Less32U x (Const32 [d])))
+	// cond: uint32(d) >= uint32(c+1) && uint32(c+1) > uint32(c)
+	// result: (Less32U (Sub32  x (Const32  [int64(int32(c+1))])) (Const32  [int64(int32(d-c-1))]))
+	for {
+		for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+			if v_0.Op != OpLess32U {
+				continue
+			}
+			x := v_0.Args[1]
+			v_0_0 := v_0.Args[0]
+			if v_0_0.Op != OpConst32 {
+				continue
+			}
+			c := v_0_0.AuxInt
+			if v_1.Op != OpLess32U {
+				continue
+			}
+			_ = v_1.Args[1]
+			if x != v_1.Args[0] {
+				continue
+			}
+			v_1_1 := v_1.Args[1]
+			if v_1_1.Op != OpConst32 {
+				continue
+			}
+			d := v_1_1.AuxInt
+			if !(uint32(d) >= uint32(c+1) && uint32(c+1) > uint32(c)) {
+				continue
+			}
+			v.reset(OpLess32U)
+			v0 := b.NewValue0(v.Pos, OpSub32, x.Type)
+			v1 := b.NewValue0(v.Pos, OpConst32, x.Type)
+			v1.AuxInt = int64(int32(c + 1))
+			v0.AddArg2(x, v1)
+			v2 := b.NewValue0(v.Pos, OpConst32, x.Type)
+			v2.AuxInt = int64(int32(d - c - 1))
+			v.AddArg2(v0, v2)
+			return true
+		}
+		break
+	}
+	// match: (AndB (Less32U (Const32 [c]) x) (Leq32U x (Const32 [d])))
+	// cond: uint32(d) >= uint32(c+1) && uint32(c+1) > uint32(c)
+	// result: (Leq32U (Sub32  x (Const32  [int64(int32(c+1))])) (Const32  [int64(int32(d-c-1))]))
+	for {
+		for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+			if v_0.Op != OpLess32U {
+				continue
+			}
+			x := v_0.Args[1]
+			v_0_0 := v_0.Args[0]
+			if v_0_0.Op != OpConst32 {
+				continue
+			}
+			c := v_0_0.AuxInt
+			if v_1.Op != OpLeq32U {
+				continue
+			}
+			_ = v_1.Args[1]
+			if x != v_1.Args[0] {
+				continue
+			}
+			v_1_1 := v_1.Args[1]
+			if v_1_1.Op != OpConst32 {
+				continue
+			}
+			d := v_1_1.AuxInt
+			if !(uint32(d) >= uint32(c+1) && uint32(c+1) > uint32(c)) {
+				continue
+			}
+			v.reset(OpLeq32U)
+			v0 := b.NewValue0(v.Pos, OpSub32, x.Type)
+			v1 := b.NewValue0(v.Pos, OpConst32, x.Type)
+			v1.AuxInt = int64(int32(c + 1))
+			v0.AddArg2(x, v1)
+			v2 := b.NewValue0(v.Pos, OpConst32, x.Type)
+			v2.AuxInt = int64(int32(d - c - 1))
+			v.AddArg2(v0, v2)
+			return true
+		}
+		break
+	}
+	// match: (AndB (Less16U (Const16 [c]) x) (Less16U x (Const16 [d])))
+	// cond: uint16(d) >= uint16(c+1) && uint16(c+1) > uint16(c)
+	// result: (Less16U (Sub16  x (Const16  [int64(int16(c+1))])) (Const16  [int64(int16(d-c-1))]))
+	for {
+		for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+			if v_0.Op != OpLess16U {
+				continue
+			}
+			x := v_0.Args[1]
+			v_0_0 := v_0.Args[0]
+			if v_0_0.Op != OpConst16 {
+				continue
+			}
+			c := v_0_0.AuxInt
+			if v_1.Op != OpLess16U {
+				continue
+			}
+			_ = v_1.Args[1]
+			if x != v_1.Args[0] {
+				continue
+			}
+			v_1_1 := v_1.Args[1]
+			if v_1_1.Op != OpConst16 {
+				continue
+			}
+			d := v_1_1.AuxInt
+			if !(uint16(d) >= uint16(c+1) && uint16(c+1) > uint16(c)) {
+				continue
+			}
+			v.reset(OpLess16U)
+			v0 := b.NewValue0(v.Pos, OpSub16, x.Type)
+			v1 := b.NewValue0(v.Pos, OpConst16, x.Type)
+			v1.AuxInt = int64(int16(c + 1))
+			v0.AddArg2(x, v1)
+			v2 := b.NewValue0(v.Pos, OpConst16, x.Type)
+			v2.AuxInt = int64(int16(d - c - 1))
+			v.AddArg2(v0, v2)
+			return true
+		}
+		break
+	}
+	// match: (AndB (Less16U (Const16 [c]) x) (Leq16U x (Const16 [d])))
+	// cond: uint16(d) >= uint16(c+1) && uint16(c+1) > uint16(c)
+	// result: (Leq16U (Sub16  x (Const16  [int64(int16(c+1))])) (Const16  [int64(int16(d-c-1))]))
+	for {
+		for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+			if v_0.Op != OpLess16U {
+				continue
+			}
+			x := v_0.Args[1]
+			v_0_0 := v_0.Args[0]
+			if v_0_0.Op != OpConst16 {
+				continue
+			}
+			c := v_0_0.AuxInt
+			if v_1.Op != OpLeq16U {
+				continue
+			}
+			_ = v_1.Args[1]
+			if x != v_1.Args[0] {
+				continue
+			}
+			v_1_1 := v_1.Args[1]
+			if v_1_1.Op != OpConst16 {
+				continue
+			}
+			d := v_1_1.AuxInt
+			if !(uint16(d) >= uint16(c+1) && uint16(c+1) > uint16(c)) {
+				continue
+			}
+			v.reset(OpLeq16U)
+			v0 := b.NewValue0(v.Pos, OpSub16, x.Type)
+			v1 := b.NewValue0(v.Pos, OpConst16, x.Type)
+			v1.AuxInt = int64(int16(c + 1))
+			v0.AddArg2(x, v1)
+			v2 := b.NewValue0(v.Pos, OpConst16, x.Type)
+			v2.AuxInt = int64(int16(d - c - 1))
+			v.AddArg2(v0, v2)
+			return true
+		}
+		break
+	}
+	// match: (AndB (Less8U (Const8 [c]) x) (Less8U x (Const8 [d])))
+	// cond: uint8(d) >= uint8(c+1) && uint8(c+1) > uint8(c)
+	// result: (Less8U (Sub8  x (Const8  [int64(int8(c+1))])) (Const8  [int64(int8(d-c-1))]))
+	for {
+		for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+			if v_0.Op != OpLess8U {
+				continue
+			}
+			x := v_0.Args[1]
+			v_0_0 := v_0.Args[0]
+			if v_0_0.Op != OpConst8 {
+				continue
+			}
+			c := v_0_0.AuxInt
+			if v_1.Op != OpLess8U {
+				continue
+			}
+			_ = v_1.Args[1]
+			if x != v_1.Args[0] {
+				continue
+			}
+			v_1_1 := v_1.Args[1]
+			if v_1_1.Op != OpConst8 {
+				continue
+			}
+			d := v_1_1.AuxInt
+			if !(uint8(d) >= uint8(c+1) && uint8(c+1) > uint8(c)) {
+				continue
+			}
+			v.reset(OpLess8U)
+			v0 := b.NewValue0(v.Pos, OpSub8, x.Type)
+			v1 := b.NewValue0(v.Pos, OpConst8, x.Type)
+			v1.AuxInt = int64(int8(c + 1))
+			v0.AddArg2(x, v1)
+			v2 := b.NewValue0(v.Pos, OpConst8, x.Type)
+			v2.AuxInt = int64(int8(d - c - 1))
+			v.AddArg2(v0, v2)
+			return true
+		}
+		break
+	}
+	// match: (AndB (Less8U (Const8 [c]) x) (Leq8U x (Const8 [d])))
+	// cond: uint8(d) >= uint8(c+1) && uint8(c+1) > uint8(c)
+	// result: (Leq8U (Sub8  x (Const8  [int64(int8(c+1))])) (Const8  [int64(int8(d-c-1))]))
+	for {
+		for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+			if v_0.Op != OpLess8U {
+				continue
+			}
+			x := v_0.Args[1]
+			v_0_0 := v_0.Args[0]
+			if v_0_0.Op != OpConst8 {
+				continue
+			}
+			c := v_0_0.AuxInt
+			if v_1.Op != OpLeq8U {
+				continue
+			}
+			_ = v_1.Args[1]
+			if x != v_1.Args[0] {
+				continue
+			}
+			v_1_1 := v_1.Args[1]
+			if v_1_1.Op != OpConst8 {
+				continue
+			}
+			d := v_1_1.AuxInt
+			if !(uint8(d) >= uint8(c+1) && uint8(c+1) > uint8(c)) {
+				continue
+			}
+			v.reset(OpLeq8U)
+			v0 := b.NewValue0(v.Pos, OpSub8, x.Type)
+			v1 := b.NewValue0(v.Pos, OpConst8, x.Type)
+			v1.AuxInt = int64(int8(c + 1))
+			v0.AddArg2(x, v1)
+			v2 := b.NewValue0(v.Pos, OpConst8, x.Type)
+			v2.AuxInt = int64(int8(d - c - 1))
+			v.AddArg2(v0, v2)
+			return true
+		}
+		break
+	}
+	return false
+}
 func rewriteValuegeneric_OpArraySelect(v *Value) bool {
 	v_0 := v.Args[0]
 	// match: (ArraySelect (ArrayMake1 x))
@@ -15824,6 +17146,1324 @@ func rewriteValuegeneric_OpOr8(v *Value) bool {
 	}
 	return false
 }
+func rewriteValuegeneric_OpOrB(v *Value) bool {
+	v_1 := v.Args[1]
+	v_0 := v.Args[0]
+	b := v.Block
+	// match: (OrB (Less64 (Const64 [c]) x) (Less64 x (Const64 [d])))
+	// cond: c >= d
+	// result: (Less64U (Const64  [c-d]) (Sub64  x (Const64  [d])))
+	for {
+		for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+			if v_0.Op != OpLess64 {
+				continue
+			}
+			x := v_0.Args[1]
+			v_0_0 := v_0.Args[0]
+			if v_0_0.Op != OpConst64 {
+				continue
+			}
+			c := v_0_0.AuxInt
+			if v_1.Op != OpLess64 {
+				continue
+			}
+			_ = v_1.Args[1]
+			if x != v_1.Args[0] {
+				continue
+			}
+			v_1_1 := v_1.Args[1]
+			if v_1_1.Op != OpConst64 {
+				continue
+			}
+			d := v_1_1.AuxInt
+			if !(c >= d) {
+				continue
+			}
+			v.reset(OpLess64U)
+			v0 := b.NewValue0(v.Pos, OpConst64, x.Type)
+			v0.AuxInt = c - d
+			v1 := b.NewValue0(v.Pos, OpSub64, x.Type)
+			v2 := b.NewValue0(v.Pos, OpConst64, x.Type)
+			v2.AuxInt = d
+			v1.AddArg2(x, v2)
+			v.AddArg2(v0, v1)
+			return true
+		}
+		break
+	}
+	// match: (OrB (Leq64 (Const64 [c]) x) (Less64 x (Const64 [d])))
+	// cond: c >= d
+	// result: (Leq64U (Const64  [c-d]) (Sub64  x (Const64  [d])))
+	for {
+		for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+			if v_0.Op != OpLeq64 {
+				continue
+			}
+			x := v_0.Args[1]
+			v_0_0 := v_0.Args[0]
+			if v_0_0.Op != OpConst64 {
+				continue
+			}
+			c := v_0_0.AuxInt
+			if v_1.Op != OpLess64 {
+				continue
+			}
+			_ = v_1.Args[1]
+			if x != v_1.Args[0] {
+				continue
+			}
+			v_1_1 := v_1.Args[1]
+			if v_1_1.Op != OpConst64 {
+				continue
+			}
+			d := v_1_1.AuxInt
+			if !(c >= d) {
+				continue
+			}
+			v.reset(OpLeq64U)
+			v0 := b.NewValue0(v.Pos, OpConst64, x.Type)
+			v0.AuxInt = c - d
+			v1 := b.NewValue0(v.Pos, OpSub64, x.Type)
+			v2 := b.NewValue0(v.Pos, OpConst64, x.Type)
+			v2.AuxInt = d
+			v1.AddArg2(x, v2)
+			v.AddArg2(v0, v1)
+			return true
+		}
+		break
+	}
+	// match: (OrB (Less32 (Const32 [c]) x) (Less32 x (Const32 [d])))
+	// cond: c >= d
+	// result: (Less32U (Const32  [c-d]) (Sub32  x (Const32  [d])))
+	for {
+		for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+			if v_0.Op != OpLess32 {
+				continue
+			}
+			x := v_0.Args[1]
+			v_0_0 := v_0.Args[0]
+			if v_0_0.Op != OpConst32 {
+				continue
+			}
+			c := v_0_0.AuxInt
+			if v_1.Op != OpLess32 {
+				continue
+			}
+			_ = v_1.Args[1]
+			if x != v_1.Args[0] {
+				continue
+			}
+			v_1_1 := v_1.Args[1]
+			if v_1_1.Op != OpConst32 {
+				continue
+			}
+			d := v_1_1.AuxInt
+			if !(c >= d) {
+				continue
+			}
+			v.reset(OpLess32U)
+			v0 := b.NewValue0(v.Pos, OpConst32, x.Type)
+			v0.AuxInt = c - d
+			v1 := b.NewValue0(v.Pos, OpSub32, x.Type)
+			v2 := b.NewValue0(v.Pos, OpConst32, x.Type)
+			v2.AuxInt = d
+			v1.AddArg2(x, v2)
+			v.AddArg2(v0, v1)
+			return true
+		}
+		break
+	}
+	// match: (OrB (Leq32 (Const32 [c]) x) (Less32 x (Const32 [d])))
+	// cond: c >= d
+	// result: (Leq32U (Const32  [c-d]) (Sub32  x (Const32  [d])))
+	for {
+		for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+			if v_0.Op != OpLeq32 {
+				continue
+			}
+			x := v_0.Args[1]
+			v_0_0 := v_0.Args[0]
+			if v_0_0.Op != OpConst32 {
+				continue
+			}
+			c := v_0_0.AuxInt
+			if v_1.Op != OpLess32 {
+				continue
+			}
+			_ = v_1.Args[1]
+			if x != v_1.Args[0] {
+				continue
+			}
+			v_1_1 := v_1.Args[1]
+			if v_1_1.Op != OpConst32 {
+				continue
+			}
+			d := v_1_1.AuxInt
+			if !(c >= d) {
+				continue
+			}
+			v.reset(OpLeq32U)
+			v0 := b.NewValue0(v.Pos, OpConst32, x.Type)
+			v0.AuxInt = c - d
+			v1 := b.NewValue0(v.Pos, OpSub32, x.Type)
+			v2 := b.NewValue0(v.Pos, OpConst32, x.Type)
+			v2.AuxInt = d
+			v1.AddArg2(x, v2)
+			v.AddArg2(v0, v1)
+			return true
+		}
+		break
+	}
+	// match: (OrB (Less16 (Const16 [c]) x) (Less16 x (Const16 [d])))
+	// cond: c >= d
+	// result: (Less16U (Const16  [c-d]) (Sub16  x (Const16  [d])))
+	for {
+		for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+			if v_0.Op != OpLess16 {
+				continue
+			}
+			x := v_0.Args[1]
+			v_0_0 := v_0.Args[0]
+			if v_0_0.Op != OpConst16 {
+				continue
+			}
+			c := v_0_0.AuxInt
+			if v_1.Op != OpLess16 {
+				continue
+			}
+			_ = v_1.Args[1]
+			if x != v_1.Args[0] {
+				continue
+			}
+			v_1_1 := v_1.Args[1]
+			if v_1_1.Op != OpConst16 {
+				continue
+			}
+			d := v_1_1.AuxInt
+			if !(c >= d) {
+				continue
+			}
+			v.reset(OpLess16U)
+			v0 := b.NewValue0(v.Pos, OpConst16, x.Type)
+			v0.AuxInt = c - d
+			v1 := b.NewValue0(v.Pos, OpSub16, x.Type)
+			v2 := b.NewValue0(v.Pos, OpConst16, x.Type)
+			v2.AuxInt = d
+			v1.AddArg2(x, v2)
+			v.AddArg2(v0, v1)
+			return true
+		}
+		break
+	}
+	// match: (OrB (Leq16 (Const16 [c]) x) (Less16 x (Const16 [d])))
+	// cond: c >= d
+	// result: (Leq16U (Const16  [c-d]) (Sub16  x (Const16  [d])))
+	for {
+		for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+			if v_0.Op != OpLeq16 {
+				continue
+			}
+			x := v_0.Args[1]
+			v_0_0 := v_0.Args[0]
+			if v_0_0.Op != OpConst16 {
+				continue
+			}
+			c := v_0_0.AuxInt
+			if v_1.Op != OpLess16 {
+				continue
+			}
+			_ = v_1.Args[1]
+			if x != v_1.Args[0] {
+				continue
+			}
+			v_1_1 := v_1.Args[1]
+			if v_1_1.Op != OpConst16 {
+				continue
+			}
+			d := v_1_1.AuxInt
+			if !(c >= d) {
+				continue
+			}
+			v.reset(OpLeq16U)
+			v0 := b.NewValue0(v.Pos, OpConst16, x.Type)
+			v0.AuxInt = c - d
+			v1 := b.NewValue0(v.Pos, OpSub16, x.Type)
+			v2 := b.NewValue0(v.Pos, OpConst16, x.Type)
+			v2.AuxInt = d
+			v1.AddArg2(x, v2)
+			v.AddArg2(v0, v1)
+			return true
+		}
+		break
+	}
+	// match: (OrB (Less8 (Const8 [c]) x) (Less8 x (Const8 [d])))
+	// cond: c >= d
+	// result: (Less8U (Const8  [c-d]) (Sub8  x (Const8  [d])))
+	for {
+		for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+			if v_0.Op != OpLess8 {
+				continue
+			}
+			x := v_0.Args[1]
+			v_0_0 := v_0.Args[0]
+			if v_0_0.Op != OpConst8 {
+				continue
+			}
+			c := v_0_0.AuxInt
+			if v_1.Op != OpLess8 {
+				continue
+			}
+			_ = v_1.Args[1]
+			if x != v_1.Args[0] {
+				continue
+			}
+			v_1_1 := v_1.Args[1]
+			if v_1_1.Op != OpConst8 {
+				continue
+			}
+			d := v_1_1.AuxInt
+			if !(c >= d) {
+				continue
+			}
+			v.reset(OpLess8U)
+			v0 := b.NewValue0(v.Pos, OpConst8, x.Type)
+			v0.AuxInt = c - d
+			v1 := b.NewValue0(v.Pos, OpSub8, x.Type)
+			v2 := b.NewValue0(v.Pos, OpConst8, x.Type)
+			v2.AuxInt = d
+			v1.AddArg2(x, v2)
+			v.AddArg2(v0, v1)
+			return true
+		}
+		break
+	}
+	// match: (OrB (Leq8 (Const8 [c]) x) (Less8 x (Const8 [d])))
+	// cond: c >= d
+	// result: (Leq8U (Const8  [c-d]) (Sub8  x (Const8  [d])))
+	for {
+		for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+			if v_0.Op != OpLeq8 {
+				continue
+			}
+			x := v_0.Args[1]
+			v_0_0 := v_0.Args[0]
+			if v_0_0.Op != OpConst8 {
+				continue
+			}
+			c := v_0_0.AuxInt
+			if v_1.Op != OpLess8 {
+				continue
+			}
+			_ = v_1.Args[1]
+			if x != v_1.Args[0] {
+				continue
+			}
+			v_1_1 := v_1.Args[1]
+			if v_1_1.Op != OpConst8 {
+				continue
+			}
+			d := v_1_1.AuxInt
+			if !(c >= d) {
+				continue
+			}
+			v.reset(OpLeq8U)
+			v0 := b.NewValue0(v.Pos, OpConst8, x.Type)
+			v0.AuxInt = c - d
+			v1 := b.NewValue0(v.Pos, OpSub8, x.Type)
+			v2 := b.NewValue0(v.Pos, OpConst8, x.Type)
+			v2.AuxInt = d
+			v1.AddArg2(x, v2)
+			v.AddArg2(v0, v1)
+			return true
+		}
+		break
+	}
+	// match: (OrB (Less64 (Const64 [c]) x) (Leq64 x (Const64 [d])))
+	// cond: c >= d+1 && int64(d+1) > int64(d)
+	// result: (Less64U (Const64  [c-d-1]) (Sub64  x (Const64  [d+1])))
+	for {
+		for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+			if v_0.Op != OpLess64 {
+				continue
+			}
+			x := v_0.Args[1]
+			v_0_0 := v_0.Args[0]
+			if v_0_0.Op != OpConst64 {
+				continue
+			}
+			c := v_0_0.AuxInt
+			if v_1.Op != OpLeq64 {
+				continue
+			}
+			_ = v_1.Args[1]
+			if x != v_1.Args[0] {
+				continue
+			}
+			v_1_1 := v_1.Args[1]
+			if v_1_1.Op != OpConst64 {
+				continue
+			}
+			d := v_1_1.AuxInt
+			if !(c >= d+1 && int64(d+1) > int64(d)) {
+				continue
+			}
+			v.reset(OpLess64U)
+			v0 := b.NewValue0(v.Pos, OpConst64, x.Type)
+			v0.AuxInt = c - d - 1
+			v1 := b.NewValue0(v.Pos, OpSub64, x.Type)
+			v2 := b.NewValue0(v.Pos, OpConst64, x.Type)
+			v2.AuxInt = d + 1
+			v1.AddArg2(x, v2)
+			v.AddArg2(v0, v1)
+			return true
+		}
+		break
+	}
+	// match: (OrB (Leq64 (Const64 [c]) x) (Leq64 x (Const64 [d])))
+	// cond: c >= d+1 && int64(d+1) > int64(d)
+	// result: (Leq64U (Const64  [c-d-1]) (Sub64  x (Const64  [d+1])))
+	for {
+		for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+			if v_0.Op != OpLeq64 {
+				continue
+			}
+			x := v_0.Args[1]
+			v_0_0 := v_0.Args[0]
+			if v_0_0.Op != OpConst64 {
+				continue
+			}
+			c := v_0_0.AuxInt
+			if v_1.Op != OpLeq64 {
+				continue
+			}
+			_ = v_1.Args[1]
+			if x != v_1.Args[0] {
+				continue
+			}
+			v_1_1 := v_1.Args[1]
+			if v_1_1.Op != OpConst64 {
+				continue
+			}
+			d := v_1_1.AuxInt
+			if !(c >= d+1 && int64(d+1) > int64(d)) {
+				continue
+			}
+			v.reset(OpLeq64U)
+			v0 := b.NewValue0(v.Pos, OpConst64, x.Type)
+			v0.AuxInt = c - d - 1
+			v1 := b.NewValue0(v.Pos, OpSub64, x.Type)
+			v2 := b.NewValue0(v.Pos, OpConst64, x.Type)
+			v2.AuxInt = d + 1
+			v1.AddArg2(x, v2)
+			v.AddArg2(v0, v1)
+			return true
+		}
+		break
+	}
+	// match: (OrB (Less32 (Const32 [c]) x) (Leq32 x (Const32 [d])))
+	// cond: c >= d+1 && int32(d+1) > int32(d)
+	// result: (Less32U (Const32  [c-d-1]) (Sub32  x (Const32  [d+1])))
+	for {
+		for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+			if v_0.Op != OpLess32 {
+				continue
+			}
+			x := v_0.Args[1]
+			v_0_0 := v_0.Args[0]
+			if v_0_0.Op != OpConst32 {
+				continue
+			}
+			c := v_0_0.AuxInt
+			if v_1.Op != OpLeq32 {
+				continue
+			}
+			_ = v_1.Args[1]
+			if x != v_1.Args[0] {
+				continue
+			}
+			v_1_1 := v_1.Args[1]
+			if v_1_1.Op != OpConst32 {
+				continue
+			}
+			d := v_1_1.AuxInt
+			if !(c >= d+1 && int32(d+1) > int32(d)) {
+				continue
+			}
+			v.reset(OpLess32U)
+			v0 := b.NewValue0(v.Pos, OpConst32, x.Type)
+			v0.AuxInt = c - d - 1
+			v1 := b.NewValue0(v.Pos, OpSub32, x.Type)
+			v2 := b.NewValue0(v.Pos, OpConst32, x.Type)
+			v2.AuxInt = d + 1
+			v1.AddArg2(x, v2)
+			v.AddArg2(v0, v1)
+			return true
+		}
+		break
+	}
+	// match: (OrB (Leq32 (Const32 [c]) x) (Leq32 x (Const32 [d])))
+	// cond: c >= d+1 && int32(d+1) > int32(d)
+	// result: (Leq32U (Const32  [c-d-1]) (Sub32  x (Const32  [d+1])))
+	for {
+		for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+			if v_0.Op != OpLeq32 {
+				continue
+			}
+			x := v_0.Args[1]
+			v_0_0 := v_0.Args[0]
+			if v_0_0.Op != OpConst32 {
+				continue
+			}
+			c := v_0_0.AuxInt
+			if v_1.Op != OpLeq32 {
+				continue
+			}
+			_ = v_1.Args[1]
+			if x != v_1.Args[0] {
+				continue
+			}
+			v_1_1 := v_1.Args[1]
+			if v_1_1.Op != OpConst32 {
+				continue
+			}
+			d := v_1_1.AuxInt
+			if !(c >= d+1 && int32(d+1) > int32(d)) {
+				continue
+			}
+			v.reset(OpLeq32U)
+			v0 := b.NewValue0(v.Pos, OpConst32, x.Type)
+			v0.AuxInt = c - d - 1
+			v1 := b.NewValue0(v.Pos, OpSub32, x.Type)
+			v2 := b.NewValue0(v.Pos, OpConst32, x.Type)
+			v2.AuxInt = d + 1
+			v1.AddArg2(x, v2)
+			v.AddArg2(v0, v1)
+			return true
+		}
+		break
+	}
+	// match: (OrB (Less16 (Const16 [c]) x) (Leq16 x (Const16 [d])))
+	// cond: c >= d+1 && int16(d+1) > int16(d)
+	// result: (Less16U (Const16  [c-d-1]) (Sub16  x (Const16  [d+1])))
+	for {
+		for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+			if v_0.Op != OpLess16 {
+				continue
+			}
+			x := v_0.Args[1]
+			v_0_0 := v_0.Args[0]
+			if v_0_0.Op != OpConst16 {
+				continue
+			}
+			c := v_0_0.AuxInt
+			if v_1.Op != OpLeq16 {
+				continue
+			}
+			_ = v_1.Args[1]
+			if x != v_1.Args[0] {
+				continue
+			}
+			v_1_1 := v_1.Args[1]
+			if v_1_1.Op != OpConst16 {
+				continue
+			}
+			d := v_1_1.AuxInt
+			if !(c >= d+1 && int16(d+1) > int16(d)) {
+				continue
+			}
+			v.reset(OpLess16U)
+			v0 := b.NewValue0(v.Pos, OpConst16, x.Type)
+			v0.AuxInt = c - d - 1
+			v1 := b.NewValue0(v.Pos, OpSub16, x.Type)
+			v2 := b.NewValue0(v.Pos, OpConst16, x.Type)
+			v2.AuxInt = d + 1
+			v1.AddArg2(x, v2)
+			v.AddArg2(v0, v1)
+			return true
+		}
+		break
+	}
+	// match: (OrB (Leq16 (Const16 [c]) x) (Leq16 x (Const16 [d])))
+	// cond: c >= d+1 && int16(d+1) > int16(d)
+	// result: (Leq16U (Const16  [c-d-1]) (Sub16  x (Const16  [d+1])))
+	for {
+		for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+			if v_0.Op != OpLeq16 {
+				continue
+			}
+			x := v_0.Args[1]
+			v_0_0 := v_0.Args[0]
+			if v_0_0.Op != OpConst16 {
+				continue
+			}
+			c := v_0_0.AuxInt
+			if v_1.Op != OpLeq16 {
+				continue
+			}
+			_ = v_1.Args[1]
+			if x != v_1.Args[0] {
+				continue
+			}
+			v_1_1 := v_1.Args[1]
+			if v_1_1.Op != OpConst16 {
+				continue
+			}
+			d := v_1_1.AuxInt
+			if !(c >= d+1 && int16(d+1) > int16(d)) {
+				continue
+			}
+			v.reset(OpLeq16U)
+			v0 := b.NewValue0(v.Pos, OpConst16, x.Type)
+			v0.AuxInt = c - d - 1
+			v1 := b.NewValue0(v.Pos, OpSub16, x.Type)
+			v2 := b.NewValue0(v.Pos, OpConst16, x.Type)
+			v2.AuxInt = d + 1
+			v1.AddArg2(x, v2)
+			v.AddArg2(v0, v1)
+			return true
+		}
+		break
+	}
+	// match: (OrB (Less8 (Const8 [c]) x) (Leq8 x (Const8 [d])))
+	// cond: c >= d+1 && int8(d+1) > int8(d)
+	// result: (Less8U (Const8  [c-d-1]) (Sub8  x (Const8  [d+1])))
+	for {
+		for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+			if v_0.Op != OpLess8 {
+				continue
+			}
+			x := v_0.Args[1]
+			v_0_0 := v_0.Args[0]
+			if v_0_0.Op != OpConst8 {
+				continue
+			}
+			c := v_0_0.AuxInt
+			if v_1.Op != OpLeq8 {
+				continue
+			}
+			_ = v_1.Args[1]
+			if x != v_1.Args[0] {
+				continue
+			}
+			v_1_1 := v_1.Args[1]
+			if v_1_1.Op != OpConst8 {
+				continue
+			}
+			d := v_1_1.AuxInt
+			if !(c >= d+1 && int8(d+1) > int8(d)) {
+				continue
+			}
+			v.reset(OpLess8U)
+			v0 := b.NewValue0(v.Pos, OpConst8, x.Type)
+			v0.AuxInt = c - d - 1
+			v1 := b.NewValue0(v.Pos, OpSub8, x.Type)
+			v2 := b.NewValue0(v.Pos, OpConst8, x.Type)
+			v2.AuxInt = d + 1
+			v1.AddArg2(x, v2)
+			v.AddArg2(v0, v1)
+			return true
+		}
+		break
+	}
+	// match: (OrB (Leq8 (Const8 [c]) x) (Leq8 x (Const8 [d])))
+	// cond: c >= d+1 && int8(d+1) > int8(d)
+	// result: (Leq8U (Const8  [c-d-1]) (Sub8  x (Const8  [d+1])))
+	for {
+		for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+			if v_0.Op != OpLeq8 {
+				continue
+			}
+			x := v_0.Args[1]
+			v_0_0 := v_0.Args[0]
+			if v_0_0.Op != OpConst8 {
+				continue
+			}
+			c := v_0_0.AuxInt
+			if v_1.Op != OpLeq8 {
+				continue
+			}
+			_ = v_1.Args[1]
+			if x != v_1.Args[0] {
+				continue
+			}
+			v_1_1 := v_1.Args[1]
+			if v_1_1.Op != OpConst8 {
+				continue
+			}
+			d := v_1_1.AuxInt
+			if !(c >= d+1 && int8(d+1) > int8(d)) {
+				continue
+			}
+			v.reset(OpLeq8U)
+			v0 := b.NewValue0(v.Pos, OpConst8, x.Type)
+			v0.AuxInt = c - d - 1
+			v1 := b.NewValue0(v.Pos, OpSub8, x.Type)
+			v2 := b.NewValue0(v.Pos, OpConst8, x.Type)
+			v2.AuxInt = d + 1
+			v1.AddArg2(x, v2)
+			v.AddArg2(v0, v1)
+			return true
+		}
+		break
+	}
+	// match: (OrB (Less64U (Const64 [c]) x) (Less64U x (Const64 [d])))
+	// cond: uint64(c) >= uint64(d)
+	// result: (Less64U (Const64  [c-d]) (Sub64  x (Const64  [d])))
+	for {
+		for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+			if v_0.Op != OpLess64U {
+				continue
+			}
+			x := v_0.Args[1]
+			v_0_0 := v_0.Args[0]
+			if v_0_0.Op != OpConst64 {
+				continue
+			}
+			c := v_0_0.AuxInt
+			if v_1.Op != OpLess64U {
+				continue
+			}
+			_ = v_1.Args[1]
+			if x != v_1.Args[0] {
+				continue
+			}
+			v_1_1 := v_1.Args[1]
+			if v_1_1.Op != OpConst64 {
+				continue
+			}
+			d := v_1_1.AuxInt
+			if !(uint64(c) >= uint64(d)) {
+				continue
+			}
+			v.reset(OpLess64U)
+			v0 := b.NewValue0(v.Pos, OpConst64, x.Type)
+			v0.AuxInt = c - d
+			v1 := b.NewValue0(v.Pos, OpSub64, x.Type)
+			v2 := b.NewValue0(v.Pos, OpConst64, x.Type)
+			v2.AuxInt = d
+			v1.AddArg2(x, v2)
+			v.AddArg2(v0, v1)
+			return true
+		}
+		break
+	}
+	// match: (OrB (Leq64U (Const64 [c]) x) (Less64U x (Const64 [d])))
+	// cond: uint64(c) >= uint64(d)
+	// result: (Leq64U (Const64  [c-d]) (Sub64  x (Const64  [d])))
+	for {
+		for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+			if v_0.Op != OpLeq64U {
+				continue
+			}
+			x := v_0.Args[1]
+			v_0_0 := v_0.Args[0]
+			if v_0_0.Op != OpConst64 {
+				continue
+			}
+			c := v_0_0.AuxInt
+			if v_1.Op != OpLess64U {
+				continue
+			}
+			_ = v_1.Args[1]
+			if x != v_1.Args[0] {
+				continue
+			}
+			v_1_1 := v_1.Args[1]
+			if v_1_1.Op != OpConst64 {
+				continue
+			}
+			d := v_1_1.AuxInt
+			if !(uint64(c) >= uint64(d)) {
+				continue
+			}
+			v.reset(OpLeq64U)
+			v0 := b.NewValue0(v.Pos, OpConst64, x.Type)
+			v0.AuxInt = c - d
+			v1 := b.NewValue0(v.Pos, OpSub64, x.Type)
+			v2 := b.NewValue0(v.Pos, OpConst64, x.Type)
+			v2.AuxInt = d
+			v1.AddArg2(x, v2)
+			v.AddArg2(v0, v1)
+			return true
+		}
+		break
+	}
+	// match: (OrB (Less32U (Const32 [c]) x) (Less32U x (Const32 [d])))
+	// cond: uint32(c) >= uint32(d)
+	// result: (Less32U (Const32  [int64(int32(c-d))]) (Sub32  x (Const32  [d])))
+	for {
+		for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+			if v_0.Op != OpLess32U {
+				continue
+			}
+			x := v_0.Args[1]
+			v_0_0 := v_0.Args[0]
+			if v_0_0.Op != OpConst32 {
+				continue
+			}
+			c := v_0_0.AuxInt
+			if v_1.Op != OpLess32U {
+				continue
+			}
+			_ = v_1.Args[1]
+			if x != v_1.Args[0] {
+				continue
+			}
+			v_1_1 := v_1.Args[1]
+			if v_1_1.Op != OpConst32 {
+				continue
+			}
+			d := v_1_1.AuxInt
+			if !(uint32(c) >= uint32(d)) {
+				continue
+			}
+			v.reset(OpLess32U)
+			v0 := b.NewValue0(v.Pos, OpConst32, x.Type)
+			v0.AuxInt = int64(int32(c - d))
+			v1 := b.NewValue0(v.Pos, OpSub32, x.Type)
+			v2 := b.NewValue0(v.Pos, OpConst32, x.Type)
+			v2.AuxInt = d
+			v1.AddArg2(x, v2)
+			v.AddArg2(v0, v1)
+			return true
+		}
+		break
+	}
+	// match: (OrB (Leq32U (Const32 [c]) x) (Less32U x (Const32 [d])))
+	// cond: uint32(c) >= uint32(d)
+	// result: (Leq32U (Const32  [int64(int32(c-d))]) (Sub32  x (Const32  [d])))
+	for {
+		for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+			if v_0.Op != OpLeq32U {
+				continue
+			}
+			x := v_0.Args[1]
+			v_0_0 := v_0.Args[0]
+			if v_0_0.Op != OpConst32 {
+				continue
+			}
+			c := v_0_0.AuxInt
+			if v_1.Op != OpLess32U {
+				continue
+			}
+			_ = v_1.Args[1]
+			if x != v_1.Args[0] {
+				continue
+			}
+			v_1_1 := v_1.Args[1]
+			if v_1_1.Op != OpConst32 {
+				continue
+			}
+			d := v_1_1.AuxInt
+			if !(uint32(c) >= uint32(d)) {
+				continue
+			}
+			v.reset(OpLeq32U)
+			v0 := b.NewValue0(v.Pos, OpConst32, x.Type)
+			v0.AuxInt = int64(int32(c - d))
+			v1 := b.NewValue0(v.Pos, OpSub32, x.Type)
+			v2 := b.NewValue0(v.Pos, OpConst32, x.Type)
+			v2.AuxInt = d
+			v1.AddArg2(x, v2)
+			v.AddArg2(v0, v1)
+			return true
+		}
+		break
+	}
+	// match: (OrB (Less16U (Const16 [c]) x) (Less16U x (Const16 [d])))
+	// cond: uint16(c) >= uint16(d)
+	// result: (Less16U (Const16  [int64(int16(c-d))]) (Sub16  x (Const16  [d])))
+	for {
+		for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+			if v_0.Op != OpLess16U {
+				continue
+			}
+			x := v_0.Args[1]
+			v_0_0 := v_0.Args[0]
+			if v_0_0.Op != OpConst16 {
+				continue
+			}
+			c := v_0_0.AuxInt
+			if v_1.Op != OpLess16U {
+				continue
+			}
+			_ = v_1.Args[1]
+			if x != v_1.Args[0] {
+				continue
+			}
+			v_1_1 := v_1.Args[1]
+			if v_1_1.Op != OpConst16 {
+				continue
+			}
+			d := v_1_1.AuxInt
+			if !(uint16(c) >= uint16(d)) {
+				continue
+			}
+			v.reset(OpLess16U)
+			v0 := b.NewValue0(v.Pos, OpConst16, x.Type)
+			v0.AuxInt = int64(int16(c - d))
+			v1 := b.NewValue0(v.Pos, OpSub16, x.Type)
+			v2 := b.NewValue0(v.Pos, OpConst16, x.Type)
+			v2.AuxInt = d
+			v1.AddArg2(x, v2)
+			v.AddArg2(v0, v1)
+			return true
+		}
+		break
+	}
+	// match: (OrB (Leq16U (Const16 [c]) x) (Less16U x (Const16 [d])))
+	// cond: uint16(c) >= uint16(d)
+	// result: (Leq16U (Const16  [int64(int16(c-d))]) (Sub16  x (Const16  [d])))
+	for {
+		for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+			if v_0.Op != OpLeq16U {
+				continue
+			}
+			x := v_0.Args[1]
+			v_0_0 := v_0.Args[0]
+			if v_0_0.Op != OpConst16 {
+				continue
+			}
+			c := v_0_0.AuxInt
+			if v_1.Op != OpLess16U {
+				continue
+			}
+			_ = v_1.Args[1]
+			if x != v_1.Args[0] {
+				continue
+			}
+			v_1_1 := v_1.Args[1]
+			if v_1_1.Op != OpConst16 {
+				continue
+			}
+			d := v_1_1.AuxInt
+			if !(uint16(c) >= uint16(d)) {
+				continue
+			}
+			v.reset(OpLeq16U)
+			v0 := b.NewValue0(v.Pos, OpConst16, x.Type)
+			v0.AuxInt = int64(int16(c - d))
+			v1 := b.NewValue0(v.Pos, OpSub16, x.Type)
+			v2 := b.NewValue0(v.Pos, OpConst16, x.Type)
+			v2.AuxInt = d
+			v1.AddArg2(x, v2)
+			v.AddArg2(v0, v1)
+			return true
+		}
+		break
+	}
+	// match: (OrB (Less8U (Const8 [c]) x) (Less8U x (Const8 [d])))
+	// cond: uint8(c) >= uint8(d)
+	// result: (Less8U (Const8  [int64( int8(c-d))]) (Sub8  x (Const8  [d])))
+	for {
+		for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+			if v_0.Op != OpLess8U {
+				continue
+			}
+			x := v_0.Args[1]
+			v_0_0 := v_0.Args[0]
+			if v_0_0.Op != OpConst8 {
+				continue
+			}
+			c := v_0_0.AuxInt
+			if v_1.Op != OpLess8U {
+				continue
+			}
+			_ = v_1.Args[1]
+			if x != v_1.Args[0] {
+				continue
+			}
+			v_1_1 := v_1.Args[1]
+			if v_1_1.Op != OpConst8 {
+				continue
+			}
+			d := v_1_1.AuxInt
+			if !(uint8(c) >= uint8(d)) {
+				continue
+			}
+			v.reset(OpLess8U)
+			v0 := b.NewValue0(v.Pos, OpConst8, x.Type)
+			v0.AuxInt = int64(int8(c - d))
+			v1 := b.NewValue0(v.Pos, OpSub8, x.Type)
+			v2 := b.NewValue0(v.Pos, OpConst8, x.Type)
+			v2.AuxInt = d
+			v1.AddArg2(x, v2)
+			v.AddArg2(v0, v1)
+			return true
+		}
+		break
+	}
+	// match: (OrB (Leq8U (Const8 [c]) x) (Less8U x (Const8 [d])))
+	// cond: uint8(c) >= uint8(d)
+	// result: (Leq8U (Const8  [int64( int8(c-d))]) (Sub8  x (Const8  [d])))
+	for {
+		for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+			if v_0.Op != OpLeq8U {
+				continue
+			}
+			x := v_0.Args[1]
+			v_0_0 := v_0.Args[0]
+			if v_0_0.Op != OpConst8 {
+				continue
+			}
+			c := v_0_0.AuxInt
+			if v_1.Op != OpLess8U {
+				continue
+			}
+			_ = v_1.Args[1]
+			if x != v_1.Args[0] {
+				continue
+			}
+			v_1_1 := v_1.Args[1]
+			if v_1_1.Op != OpConst8 {
+				continue
+			}
+			d := v_1_1.AuxInt
+			if !(uint8(c) >= uint8(d)) {
+				continue
+			}
+			v.reset(OpLeq8U)
+			v0 := b.NewValue0(v.Pos, OpConst8, x.Type)
+			v0.AuxInt = int64(int8(c - d))
+			v1 := b.NewValue0(v.Pos, OpSub8, x.Type)
+			v2 := b.NewValue0(v.Pos, OpConst8, x.Type)
+			v2.AuxInt = d
+			v1.AddArg2(x, v2)
+			v.AddArg2(v0, v1)
+			return true
+		}
+		break
+	}
+	// match: (OrB (Less64U (Const64 [c]) x) (Leq64U x (Const64 [d])))
+	// cond: uint64(c) >= uint64(d+1) && uint64(d+1) > uint64(d)
+	// result: (Less64U (Const64  [c-d-1]) (Sub64  x (Const64  [d+1])))
+	for {
+		for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+			if v_0.Op != OpLess64U {
+				continue
+			}
+			x := v_0.Args[1]
+			v_0_0 := v_0.Args[0]
+			if v_0_0.Op != OpConst64 {
+				continue
+			}
+			c := v_0_0.AuxInt
+			if v_1.Op != OpLeq64U {
+				continue
+			}
+			_ = v_1.Args[1]
+			if x != v_1.Args[0] {
+				continue
+			}
+			v_1_1 := v_1.Args[1]
+			if v_1_1.Op != OpConst64 {
+				continue
+			}
+			d := v_1_1.AuxInt
+			if !(uint64(c) >= uint64(d+1) && uint64(d+1) > uint64(d)) {
+				continue
+			}
+			v.reset(OpLess64U)
+			v0 := b.NewValue0(v.Pos, OpConst64, x.Type)
+			v0.AuxInt = c - d - 1
+			v1 := b.NewValue0(v.Pos, OpSub64, x.Type)
+			v2 := b.NewValue0(v.Pos, OpConst64, x.Type)
+			v2.AuxInt = d + 1
+			v1.AddArg2(x, v2)
+			v.AddArg2(v0, v1)
+			return true
+		}
+		break
+	}
+	// match: (OrB (Leq64U (Const64 [c]) x) (Leq64U x (Const64 [d])))
+	// cond: uint64(c) >= uint64(d+1) && uint64(d+1) > uint64(d)
+	// result: (Leq64U (Const64  [c-d-1]) (Sub64  x (Const64  [d+1])))
+	for {
+		for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+			if v_0.Op != OpLeq64U {
+				continue
+			}
+			x := v_0.Args[1]
+			v_0_0 := v_0.Args[0]
+			if v_0_0.Op != OpConst64 {
+				continue
+			}
+			c := v_0_0.AuxInt
+			if v_1.Op != OpLeq64U {
+				continue
+			}
+			_ = v_1.Args[1]
+			if x != v_1.Args[0] {
+				continue
+			}
+			v_1_1 := v_1.Args[1]
+			if v_1_1.Op != OpConst64 {
+				continue
+			}
+			d := v_1_1.AuxInt
+			if !(uint64(c) >= uint64(d+1) && uint64(d+1) > uint64(d)) {
+				continue
+			}
+			v.reset(OpLeq64U)
+			v0 := b.NewValue0(v.Pos, OpConst64, x.Type)
+			v0.AuxInt = c - d - 1
+			v1 := b.NewValue0(v.Pos, OpSub64, x.Type)
+			v2 := b.NewValue0(v.Pos, OpConst64, x.Type)
+			v2.AuxInt = d + 1
+			v1.AddArg2(x, v2)
+			v.AddArg2(v0, v1)
+			return true
+		}
+		break
+	}
+	// match: (OrB (Less32U (Const32 [c]) x) (Leq32U x (Const32 [d])))
+	// cond: uint32(c) >= uint32(d+1) && uint32(d+1) > uint32(d)
+	// result: (Less32U (Const32  [int64(int32(c-d-1))]) (Sub32  x (Const32  [int64(int32(d+1))])))
+	for {
+		for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+			if v_0.Op != OpLess32U {
+				continue
+			}
+			x := v_0.Args[1]
+			v_0_0 := v_0.Args[0]
+			if v_0_0.Op != OpConst32 {
+				continue
+			}
+			c := v_0_0.AuxInt
+			if v_1.Op != OpLeq32U {
+				continue
+			}
+			_ = v_1.Args[1]
+			if x != v_1.Args[0] {
+				continue
+			}
+			v_1_1 := v_1.Args[1]
+			if v_1_1.Op != OpConst32 {
+				continue
+			}
+			d := v_1_1.AuxInt
+			if !(uint32(c) >= uint32(d+1) && uint32(d+1) > uint32(d)) {
+				continue
+			}
+			v.reset(OpLess32U)
+			v0 := b.NewValue0(v.Pos, OpConst32, x.Type)
+			v0.AuxInt = int64(int32(c - d - 1))
+			v1 := b.NewValue0(v.Pos, OpSub32, x.Type)
+			v2 := b.NewValue0(v.Pos, OpConst32, x.Type)
+			v2.AuxInt = int64(int32(d + 1))
+			v1.AddArg2(x, v2)
+			v.AddArg2(v0, v1)
+			return true
+		}
+		break
+	}
+	// match: (OrB (Leq32U (Const32 [c]) x) (Leq32U x (Const32 [d])))
+	// cond: uint32(c) >= uint32(d+1) && uint32(d+1) > uint32(d)
+	// result: (Leq32U (Const32  [int64(int32(c-d-1))]) (Sub32  x (Const32  [int64(int32(d+1))])))
+	for {
+		for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+			if v_0.Op != OpLeq32U {
+				continue
+			}
+			x := v_0.Args[1]
+			v_0_0 := v_0.Args[0]
+			if v_0_0.Op != OpConst32 {
+				continue
+			}
+			c := v_0_0.AuxInt
+			if v_1.Op != OpLeq32U {
+				continue
+			}
+			_ = v_1.Args[1]
+			if x != v_1.Args[0] {
+				continue
+			}
+			v_1_1 := v_1.Args[1]
+			if v_1_1.Op != OpConst32 {
+				continue
+			}
+			d := v_1_1.AuxInt
+			if !(uint32(c) >= uint32(d+1) && uint32(d+1) > uint32(d)) {
+				continue
+			}
+			v.reset(OpLeq32U)
+			v0 := b.NewValue0(v.Pos, OpConst32, x.Type)
+			v0.AuxInt = int64(int32(c - d - 1))
+			v1 := b.NewValue0(v.Pos, OpSub32, x.Type)
+			v2 := b.NewValue0(v.Pos, OpConst32, x.Type)
+			v2.AuxInt = int64(int32(d + 1))
+			v1.AddArg2(x, v2)
+			v.AddArg2(v0, v1)
+			return true
+		}
+		break
+	}
+	// match: (OrB (Less16U (Const16 [c]) x) (Leq16U x (Const16 [d])))
+	// cond: uint16(c) >= uint16(d+1) && uint16(d+1) > uint16(d)
+	// result: (Less16U (Const16  [int64(int16(c-d-1))]) (Sub16  x (Const16  [int64(int16(d+1))])))
+	for {
+		for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+			if v_0.Op != OpLess16U {
+				continue
+			}
+			x := v_0.Args[1]
+			v_0_0 := v_0.Args[0]
+			if v_0_0.Op != OpConst16 {
+				continue
+			}
+			c := v_0_0.AuxInt
+			if v_1.Op != OpLeq16U {
+				continue
+			}
+			_ = v_1.Args[1]
+			if x != v_1.Args[0] {
+				continue
+			}
+			v_1_1 := v_1.Args[1]
+			if v_1_1.Op != OpConst16 {
+				continue
+			}
+			d := v_1_1.AuxInt
+			if !(uint16(c) >= uint16(d+1) && uint16(d+1) > uint16(d)) {
+				continue
+			}
+			v.reset(OpLess16U)
+			v0 := b.NewValue0(v.Pos, OpConst16, x.Type)
+			v0.AuxInt = int64(int16(c - d - 1))
+			v1 := b.NewValue0(v.Pos, OpSub16, x.Type)
+			v2 := b.NewValue0(v.Pos, OpConst16, x.Type)
+			v2.AuxInt = int64(int16(d + 1))
+			v1.AddArg2(x, v2)
+			v.AddArg2(v0, v1)
+			return true
+		}
+		break
+	}
+	// match: (OrB (Leq16U (Const16 [c]) x) (Leq16U x (Const16 [d])))
+	// cond: uint16(c) >= uint16(d+1) && uint16(d+1) > uint16(d)
+	// result: (Leq16U (Const16  [int64(int16(c-d-1))]) (Sub16  x (Const16  [int64(int16(d+1))])))
+	for {
+		for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+			if v_0.Op != OpLeq16U {
+				continue
+			}
+			x := v_0.Args[1]
+			v_0_0 := v_0.Args[0]
+			if v_0_0.Op != OpConst16 {
+				continue
+			}
+			c := v_0_0.AuxInt
+			if v_1.Op != OpLeq16U {
+				continue
+			}
+			_ = v_1.Args[1]
+			if x != v_1.Args[0] {
+				continue
+			}
+			v_1_1 := v_1.Args[1]
+			if v_1_1.Op != OpConst16 {
+				continue
+			}
+			d := v_1_1.AuxInt
+			if !(uint16(c) >= uint16(d+1) && uint16(d+1) > uint16(d)) {
+				continue
+			}
+			v.reset(OpLeq16U)
+			v0 := b.NewValue0(v.Pos, OpConst16, x.Type)
+			v0.AuxInt = int64(int16(c - d - 1))
+			v1 := b.NewValue0(v.Pos, OpSub16, x.Type)
+			v2 := b.NewValue0(v.Pos, OpConst16, x.Type)
+			v2.AuxInt = int64(int16(d + 1))
+			v1.AddArg2(x, v2)
+			v.AddArg2(v0, v1)
+			return true
+		}
+		break
+	}
+	// match: (OrB (Less8U (Const8 [c]) x) (Leq8U x (Const8 [d])))
+	// cond: uint8(c) >= uint8(d+1) && uint8(d+1) > uint8(d)
+	// result: (Less8U (Const8  [int64( int8(c-d-1))]) (Sub8  x (Const8  [int64( int8(d+1))])))
+	for {
+		for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+			if v_0.Op != OpLess8U {
+				continue
+			}
+			x := v_0.Args[1]
+			v_0_0 := v_0.Args[0]
+			if v_0_0.Op != OpConst8 {
+				continue
+			}
+			c := v_0_0.AuxInt
+			if v_1.Op != OpLeq8U {
+				continue
+			}
+			_ = v_1.Args[1]
+			if x != v_1.Args[0] {
+				continue
+			}
+			v_1_1 := v_1.Args[1]
+			if v_1_1.Op != OpConst8 {
+				continue
+			}
+			d := v_1_1.AuxInt
+			if !(uint8(c) >= uint8(d+1) && uint8(d+1) > uint8(d)) {
+				continue
+			}
+			v.reset(OpLess8U)
+			v0 := b.NewValue0(v.Pos, OpConst8, x.Type)
+			v0.AuxInt = int64(int8(c - d - 1))
+			v1 := b.NewValue0(v.Pos, OpSub8, x.Type)
+			v2 := b.NewValue0(v.Pos, OpConst8, x.Type)
+			v2.AuxInt = int64(int8(d + 1))
+			v1.AddArg2(x, v2)
+			v.AddArg2(v0, v1)
+			return true
+		}
+		break
+	}
+	// match: (OrB (Leq8U (Const8 [c]) x) (Leq8U x (Const8 [d])))
+	// cond: uint8(c) >= uint8(d+1) && uint8(d+1) > uint8(d)
+	// result: (Leq8U (Const8  [int64( int8(c-d-1))]) (Sub8  x (Const8  [int64( int8(d+1))])))
+	for {
+		for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+			if v_0.Op != OpLeq8U {
+				continue
+			}
+			x := v_0.Args[1]
+			v_0_0 := v_0.Args[0]
+			if v_0_0.Op != OpConst8 {
+				continue
+			}
+			c := v_0_0.AuxInt
+			if v_1.Op != OpLeq8U {
+				continue
+			}
+			_ = v_1.Args[1]
+			if x != v_1.Args[0] {
+				continue
+			}
+			v_1_1 := v_1.Args[1]
+			if v_1_1.Op != OpConst8 {
+				continue
+			}
+			d := v_1_1.AuxInt
+			if !(uint8(c) >= uint8(d+1) && uint8(d+1) > uint8(d)) {
+				continue
+			}
+			v.reset(OpLeq8U)
+			v0 := b.NewValue0(v.Pos, OpConst8, x.Type)
+			v0.AuxInt = int64(int8(c - d - 1))
+			v1 := b.NewValue0(v.Pos, OpSub8, x.Type)
+			v2 := b.NewValue0(v.Pos, OpConst8, x.Type)
+			v2.AuxInt = int64(int8(d + 1))
+			v1.AddArg2(x, v2)
+			v.AddArg2(v0, v1)
+			return true
+		}
+		break
+	}
+	return false
+}
 func rewriteValuegeneric_OpPhi(v *Value) bool {
 	// match: (Phi (Const8 [c]) (Const8 [c]))
 	// result: (Const8 [c])
diff --git a/test/codegen/fuse.go b/test/codegen/fuse.go
new file mode 100644
index 0000000000..79dd337dee
--- /dev/null
+++ b/test/codegen/fuse.go
@@ -0,0 +1,197 @@
+// asmcheck
+
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package codegen
+
+// Notes:
+// - these examples use channels to provide a source of
+//   unknown values that cannot be optimized away
+// - these examples use for loops to force branches
+//   backward (predicted taken)
+
+// ---------------------------------- //
+// signed integer range (conjunction) //
+// ---------------------------------- //
+
+func si1c(c <-chan int64) {
+	// amd64:"CMPQ\t.+, [$]256"
+	// s390x:"CLGIJ\t[$]12, R[0-9]+, [$]255"
+	for x := <-c; x >= 0 && x < 256; x = <-c {
+	}
+}
+
+func si2c(c <-chan int32) {
+	// amd64:"CMPL\t.+, [$]256"
+	// s390x:"CLIJ\t[$]12, R[0-9]+, [$]255"
+	for x := <-c; x >= 0 && x < 256; x = <-c {
+	}
+}
+
+func si3c(c <-chan int16) {
+	// amd64:"CMPW\t.+, [$]256"
+	// s390x:"CLIJ\t[$]12, R[0-9]+, [$]255"
+	for x := <-c; x >= 0 && x < 256; x = <-c {
+	}
+}
+
+func si4c(c <-chan int8) {
+	// amd64:"CMPB\t.+, [$]10"
+	// s390x:"CLIJ\t[$]4, R[0-9]+, [$]10"
+	for x := <-c; x >= 0 && x < 10; x = <-c {
+	}
+}
+
+func si5c(c <-chan int64) {
+	// amd64:"CMPQ\t.+, [$]251","ADDQ\t[$]-5,"
+	// s390x:"CLGIJ\t[$]4, R[0-9]+, [$]251","ADD\t[$]-5,"
+	for x := <-c; x < 256 && x > 4; x = <-c {
+	}
+}
+
+func si6c(c <-chan int32) {
+	// amd64:"CMPL\t.+, [$]255","DECL\t"
+	// s390x:"CLIJ\t[$]12, R[0-9]+, [$]255","ADDW\t[$]-1,"
+	for x := <-c; x > 0 && x <= 256; x = <-c {
+	}
+}
+
+func si7c(c <-chan int16) {
+	// amd64:"CMPW\t.+, [$]60","ADDL\t[$]10,"
+	// s390x:"CLIJ\t[$]12, R[0-9]+, [$]60","ADDW\t[$]10,"
+	for x := <-c; x >= -10 && x <= 50; x = <-c {
+	}
+}
+
+func si8c(c <-chan int8) {
+	// amd64:"CMPB\t.+, [$]126","ADDL\t[$]126,"
+	// s390x:"CLIJ\t[$]4, R[0-9]+, [$]126","ADDW\t[$]126,"
+	for x := <-c; x >= -126 && x < 0; x = <-c {
+	}
+}
+
+// ---------------------------------- //
+// signed integer range (disjunction) //
+// ---------------------------------- //
+
+func si1d(c <-chan int64) {
+	// amd64:"CMPQ\t.+, [$]256"
+	// s390x:"CLGIJ\t[$]2, R[0-9]+, [$]255"
+	for x := <-c; x < 0 || x >= 256; x = <-c {
+	}
+}
+
+func si2d(c <-chan int32) {
+	// amd64:"CMPL\t.+, [$]256"
+	// s390x:"CLIJ\t[$]2, R[0-9]+, [$]255"
+	for x := <-c; x < 0 || x >= 256; x = <-c {
+	}
+}
+
+func si3d(c <-chan int16) {
+	// amd64:"CMPW\t.+, [$]256"
+	// s390x:"CLIJ\t[$]2, R[0-9]+, [$]255"
+	for x := <-c; x < 0 || x >= 256; x = <-c {
+	}
+}
+
+func si4d(c <-chan int8) {
+	// amd64:"CMPB\t.+, [$]10"
+	// s390x:"CLIJ\t[$]10, R[0-9]+, [$]10"
+	for x := <-c; x < 0 || x >= 10; x = <-c {
+	}
+}
+
+func si5d(c <-chan int64) {
+	// amd64:"CMPQ\t.+, [$]251","ADDQ\t[$]-5,"
+	// s390x:"CLGIJ\t[$]10, R[0-9]+, [$]251","ADD\t[$]-5,"
+	for x := <-c; x >= 256 || x <= 4; x = <-c {
+	}
+}
+
+func si6d(c <-chan int32) {
+	// amd64:"CMPL\t.+, [$]255","DECL\t"
+	// s390x:"CLIJ\t[$]2, R[0-9]+, [$]255","ADDW\t[$]-1,"
+	for x := <-c; x <= 0 || x > 256; x = <-c {
+	}
+}
+
+func si7d(c <-chan int16) {
+	// amd64:"CMPW\t.+, [$]60","ADDL\t[$]10,"
+	// s390x:"CLIJ\t[$]2, R[0-9]+, [$]60","ADDW\t[$]10,"
+	for x := <-c; x < -10 || x > 50; x = <-c {
+	}
+}
+
+func si8d(c <-chan int8) {
+	// amd64:"CMPB\t.+, [$]126","ADDL\t[$]126,"
+	// s390x:"CLIJ\t[$]10, R[0-9]+, [$]126","ADDW\t[$]126,"
+	for x := <-c; x < -126 || x >= 0; x = <-c {
+	}
+}
+
+// ------------------------------------ //
+// unsigned integer range (conjunction) //
+// ------------------------------------ //
+
+func ui1c(c <-chan uint64) {
+	// amd64:"CMPQ\t.+, [$]251","ADDQ\t[$]-5,"
+	// s390x:"CLGIJ\t[$]4, R[0-9]+, [$]251","ADD\t[$]-5,"
+	for x := <-c; x < 256 && x > 4; x = <-c {
+	}
+}
+
+func ui2c(c <-chan uint32) {
+	// amd64:"CMPL\t.+, [$]255","DECL\t"
+	// s390x:"CLIJ\t[$]12, R[0-9]+, [$]255","ADDW\t[$]-1,"
+	for x := <-c; x > 0 && x <= 256; x = <-c {
+	}
+}
+
+func ui3c(c <-chan uint16) {
+	// amd64:"CMPW\t.+, [$]40","ADDL\t[$]-10,"
+	// s390x:"CLIJ\t[$]12, R[0-9]+, [$]40","ADDW\t[$]-10,"
+	for x := <-c; x >= 10 && x <= 50; x = <-c {
+	}
+}
+
+func ui4c(c <-chan uint8) {
+	// amd64:"CMPB\t.+, [$]2","ADDL\t[$]-126,"
+	// s390x:"CLIJ\t[$]4, R[0-9]+, [$]2","ADDW\t[$]-126,"
+	for x := <-c; x >= 126 && x < 128; x = <-c {
+	}
+}
+
+// ------------------------------------ //
+// unsigned integer range (disjunction) //
+// ------------------------------------ //
+
+func ui1d(c <-chan uint64) {
+	// amd64:"CMPQ\t.+, [$]251","ADDQ\t[$]-5,"
+	// s390x:"CLGIJ\t[$]10, R[0-9]+, [$]251","ADD\t[$]-5,"
+	for x := <-c; x >= 256 || x <= 4; x = <-c {
+	}
+}
+
+func ui2d(c <-chan uint32) {
+	// amd64:"CMPL\t.+, [$]254","ADDL\t[$]-2,"
+	// s390x:"CLIJ\t[$]2, R[0-9]+, [$]254","ADDW\t[$]-2,"
+	for x := <-c; x <= 1 || x > 256; x = <-c {
+	}
+}
+
+func ui3d(c <-chan uint16) {
+	// amd64:"CMPW\t.+, [$]40","ADDL\t[$]-10,"
+	// s390x:"CLIJ\t[$]2, R[0-9]+, [$]40","ADDW\t[$]-10,"
+	for x := <-c; x < 10 || x > 50; x = <-c {
+	}
+}
+
+func ui4d(c <-chan uint8) {
+	// amd64:"CMPB\t.+, [$]2","ADDL\t[$]-126,"
+	// s390x:"CLIJ\t[$]10, R[0-9]+, [$]2","ADDW\t[$]-126,"
+	for x := <-c; x < 126 || x >= 128; x = <-c {
+	}
+}

From 96acb74655531794d6f563242736d25f4e716b11 Mon Sep 17 00:00:00 2001
From: Tim Cooper 
Date: Tue, 3 Mar 2020 07:08:06 -0600
Subject: [PATCH 47/69] encoding/hex: remove unused variable from BenchmarkDump

Change-Id: I1fd47e5eab27346cec488098d4f6102a0749bd28
Reviewed-on: https://go-review.googlesource.com/c/go/+/221788
Run-TryBot: Ian Lance Taylor 
TryBot-Result: Gobot Gobot 
Reviewed-by: Ian Lance Taylor 
---
 src/encoding/hex/hex_test.go | 1 -
 1 file changed, 1 deletion(-)

diff --git a/src/encoding/hex/hex_test.go b/src/encoding/hex/hex_test.go
index dbb00b94ca..31e3f68936 100644
--- a/src/encoding/hex/hex_test.go
+++ b/src/encoding/hex/hex_test.go
@@ -267,7 +267,6 @@ func BenchmarkDecode(b *testing.B) {
 func BenchmarkDump(b *testing.B) {
 	for _, size := range []int{256, 1024, 4096, 16384} {
 		src := bytes.Repeat([]byte{2, 3, 5, 7, 9, 11, 13, 17}, size/8)
-		sink = make([]byte, 2*size)
 
 		b.Run(fmt.Sprintf("%v", size), func(b *testing.B) {
 			b.SetBytes(int64(size))

From ea1437a8cdf6bb3c2d2447833a5d06dbd75f7ae4 Mon Sep 17 00:00:00 2001
From: Russ Cox 
Date: Tue, 25 Feb 2020 13:01:59 -0500
Subject: [PATCH 48/69] net/http: fix handling of HTTP/2 upgrade failures

If an error occurs during the HTTP/2 upgrade phase, originally this
resulted in a pconn with pconn.alt set to an http2erringRoundTripper,
which always fails. This is not wanted - we want to retry in this case.

CL 202078 added a check for the http2erringRoundTripper to treat it
as a failed pconn, but the handling of the failure was wrong in the case
where the pconn is not in the idle list at all (common in HTTP/2).
This made the added test TestDontCacheBrokenHTTP2Conn flaky.

CL 218097 (unsubmitted) proposed to expand the handling of the
http2erringRoundTripper after the new check, to dispose of the pconn
more thoroughly. Bryan Mills pointed out in that review that we probably
shouldn't make the never-going-to-work pconn in the first place.

This CL changes the upgrade phase look for the http2erringRoundTripper
and return the underlying error instead of claiming to have a working
connection. Having done that, the CL undoes the change in CL 202078
and with it the need for CL 218097, but it keeps the new test added
by CL 202078.

On my laptop, before this commit, TestDontCacheBrokenHTTP2Conn
failed 66 times out of 20,000. With this commit, I see 0 out of 20,000.

Fixes #34978.
Fixes #35113.

Change-Id: Ibd908b63c2ae96e159e8e604213d8373afb350e3
Reviewed-on: https://go-review.googlesource.com/c/go/+/220905
Reviewed-by: Bryan C. Mills 
Run-TryBot: Russ Cox 
TryBot-Result: Gobot Gobot 
---
 src/net/http/omithttp2.go |  2 +-
 src/net/http/transport.go | 14 ++++++++------
 2 files changed, 9 insertions(+), 7 deletions(-)

diff --git a/src/net/http/omithttp2.go b/src/net/http/omithttp2.go
index 307d93a3b1..7e2f492579 100644
--- a/src/net/http/omithttp2.go
+++ b/src/net/http/omithttp2.go
@@ -32,7 +32,7 @@ type http2Transport struct {
 func (*http2Transport) RoundTrip(*Request) (*Response, error) { panic(noHTTP2) }
 func (*http2Transport) CloseIdleConnections()                 {}
 
-type http2erringRoundTripper struct{}
+type http2erringRoundTripper struct{ err error }
 
 func (http2erringRoundTripper) RoundTrip(*Request) (*Response, error) { panic(noHTTP2) }
 
diff --git a/src/net/http/transport.go b/src/net/http/transport.go
index d0bfdb412c..15feeaf41f 100644
--- a/src/net/http/transport.go
+++ b/src/net/http/transport.go
@@ -569,14 +569,11 @@ func (t *Transport) roundTrip(req *Request) (*Response, error) {
 		}
 
 		// Failed. Clean up and determine whether to retry.
-
-		_, isH2DialError := pconn.alt.(http2erringRoundTripper)
-		if http2isNoCachedConnError(err) || isH2DialError {
+		if http2isNoCachedConnError(err) {
 			if t.removeIdleConn(pconn) {
 				t.decConnsPerHost(pconn.cacheKey)
 			}
-		}
-		if !pconn.shouldRetryRequest(req, err) {
+		} else if !pconn.shouldRetryRequest(req, err) {
 			// Issue 16465: return underlying net.Conn.Read error from peek,
 			// as we've historically done.
 			if e, ok := err.(transportReadFromServerError); ok {
@@ -1637,7 +1634,12 @@ func (t *Transport) dialConn(ctx context.Context, cm connectMethod) (pconn *pers
 
 	if s := pconn.tlsState; s != nil && s.NegotiatedProtocolIsMutual && s.NegotiatedProtocol != "" {
 		if next, ok := t.TLSNextProto[s.NegotiatedProtocol]; ok {
-			return &persistConn{t: t, cacheKey: pconn.cacheKey, alt: next(cm.targetAddr, pconn.conn.(*tls.Conn))}, nil
+			alt := next(cm.targetAddr, pconn.conn.(*tls.Conn))
+			if e, ok := alt.(http2erringRoundTripper); ok {
+				// pconn.conn was closed by next (http2configureTransport.upgradeFn).
+				return nil, e.err
+			}
+			return &persistConn{t: t, cacheKey: pconn.cacheKey, alt: alt}, nil
 		}
 	}
 

From b49d8ce2fa66df6e201a3e7e89c42003e7b7a76a Mon Sep 17 00:00:00 2001
From: Josh Bleecher Snyder 
Date: Tue, 9 May 2017 12:48:23 -0700
Subject: [PATCH 49/69] all: fix two minor typos in comments

Change-Id: Iec6cd81c9787d3419850aa97e75052956ad139bc
Reviewed-on: https://go-review.googlesource.com/c/go/+/221789
Reviewed-by: Emmanuel Odeke 
---
 src/cmd/compile/internal/x86/387.go | 2 +-
 test/codegen/README                 | 2 +-
 2 files changed, 2 insertions(+), 2 deletions(-)

diff --git a/src/cmd/compile/internal/x86/387.go b/src/cmd/compile/internal/x86/387.go
index 18838fb4ca..796aa82f19 100644
--- a/src/cmd/compile/internal/x86/387.go
+++ b/src/cmd/compile/internal/x86/387.go
@@ -326,7 +326,7 @@ func push(s *gc.SSAGenState, v *ssa.Value) {
 }
 
 // popAndSave pops a value off of the floating-point stack and stores
-// it in the reigster assigned to v.
+// it in the register assigned to v.
 func popAndSave(s *gc.SSAGenState, v *ssa.Value) {
 	r := v.Reg()
 	if _, ok := s.SSEto387[r]; ok {
diff --git a/test/codegen/README b/test/codegen/README
index f6877e701d..d6b8cf5b32 100644
--- a/test/codegen/README
+++ b/test/codegen/README
@@ -125,7 +125,7 @@ As a general guideline, test functions should be small, to avoid
 possible interactions between unrelated lines of code that may be
 introduced, for example, by the compiler's optimization passes.
 
-Any given line of Go code could get assigned more instructions that it
+Any given line of Go code could get assigned more instructions than it
 may appear from reading the source. In particular, matching all MOV
 instructions should be avoided; the compiler may add them for
 unrelated reasons and this may render the test ineffective.

From 7b0b6c2f7e9d925763a2e1d2ba10682019827a9b Mon Sep 17 00:00:00 2001
From: Josh Bleecher Snyder 
Date: Mon, 2 Mar 2020 13:51:20 -0800
Subject: [PATCH 50/69] cmd/compile: simplify converted SSA form for 'if false'

The goal here is to make it easier for a human to
examine the SSA when a function contains lots of dead code.

No significant compiler metric or generated code differences.

Change-Id: I81915fa4639bc8820cc9a5e45e526687d0d1f57a
Reviewed-on: https://go-review.googlesource.com/c/go/+/221791
Run-TryBot: Josh Bleecher Snyder 
TryBot-Result: Gobot Gobot 
Reviewed-by: Keith Randall 
---
 src/cmd/compile/internal/gc/ssa.go | 10 ++++++++++
 1 file changed, 10 insertions(+)

diff --git a/src/cmd/compile/internal/gc/ssa.go b/src/cmd/compile/internal/gc/ssa.go
index f2a472bde6..34adeabae1 100644
--- a/src/cmd/compile/internal/gc/ssa.go
+++ b/src/cmd/compile/internal/gc/ssa.go
@@ -1274,6 +1274,16 @@ func (s *state) stmt(n *Node) {
 		s.assign(n.Left, r, deref, skip)
 
 	case OIF:
+		if Isconst(n.Left, CTBOOL) {
+			s.stmtList(n.Left.Ninit)
+			if n.Left.Bool() {
+				s.stmtList(n.Nbody)
+			} else {
+				s.stmtList(n.Rlist)
+			}
+			break
+		}
+
 		bEnd := s.f.NewBlock(ssa.BlockPlain)
 		var likely int8
 		if n.Likely() {

From 24343cb88640ae1e7dbfc4ec2f3ae81fc0aa07c7 Mon Sep 17 00:00:00 2001
From: Michael Munday 
Date: Tue, 3 Mar 2020 12:44:19 +0000
Subject: [PATCH 51/69] cmd/compile: remove walkinrange optimization
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

The walkinrange optimization has been superseded by CL 165998.

Has a small positive impact on binary sizes:

compilecmp master -> HEAD
master (e37cc29863): cmd/compile: optimize integer-in-range checks
HEAD (1a70680a34): cmd/compile: remove walkinrange optimization
platform: linux/amd64

file      before    after     Δ       %
addr2line 4329144   4325048   -4096   -0.095%
api       6060970   6056874   -4096   -0.068%
asm       5196905   5192809   -4096   -0.079%
cgo       4898769   4890577   -8192   -0.167%
compile   20222193  20209713  -12480  -0.062%
cover     5331580   5323388   -8192   -0.154%
dist      3732778   3728682   -4096   -0.110%
doc       4748488   4740296   -8192   -0.173%
link      6707380   6695092   -12288  -0.183%
nm        4278685   4274589   -4096   -0.096%
pack      2305038   2300942   -4096   -0.178%
pprof     14874834  14870738  -4096   -0.028%
test2json 2849221   2845125   -4096   -0.144%
vet       8393173   8384981   -8192   -0.098%
go        15205572  15193284  -12288  -0.081%
total     131812292 131709700 -102592 -0.078%

Updates #30645.

Change-Id: I42d74481652c90fef1a9bc58c70836e42c9b1c4b
Reviewed-on: https://go-review.googlesource.com/c/go/+/221802
Run-TryBot: Michael Munday 
TryBot-Result: Gobot Gobot 
Reviewed-by: Josh Bleecher Snyder 
---
 src/cmd/compile/internal/gc/walk.go | 128 ----------------------------
 1 file changed, 128 deletions(-)

diff --git a/src/cmd/compile/internal/gc/walk.go b/src/cmd/compile/internal/gc/walk.go
index 9298d7b783..d468f241f9 100644
--- a/src/cmd/compile/internal/gc/walk.go
+++ b/src/cmd/compile/internal/gc/walk.go
@@ -565,7 +565,6 @@ opswitch:
 
 		n.Right = walkexpr(n.Right, &ll)
 		n.Right = addinit(n.Right, ll.Slice())
-		n = walkinrange(n, init)
 
 	case OPRINT, OPRINTN:
 		n = walkprint(n, init)
@@ -3523,133 +3522,6 @@ func (n *Node) isIntOrdering() bool {
 	return n.Left.Type.IsInteger() && n.Right.Type.IsInteger()
 }
 
-// walkinrange optimizes integer-in-range checks, such as 4 <= x && x < 10.
-// n must be an OANDAND or OOROR node.
-// The result of walkinrange MUST be assigned back to n, e.g.
-// 	n.Left = walkinrange(n.Left)
-func walkinrange(n *Node, init *Nodes) *Node {
-	// We are looking for something equivalent to a opl b OP b opr c, where:
-	// * a, b, and c have integer type
-	// * b is side-effect-free
-	// * opl and opr are each < or ≤
-	// * OP is &&
-	l := n.Left
-	r := n.Right
-	if !l.isIntOrdering() || !r.isIntOrdering() {
-		return n
-	}
-
-	// Find b, if it exists, and rename appropriately.
-	// Input is: l.Left l.Op l.Right ANDAND/OROR r.Left r.Op r.Right
-	// Output is: a opl b(==x) ANDAND/OROR b(==x) opr c
-	a, opl, b := l.Left, l.Op, l.Right
-	x, opr, c := r.Left, r.Op, r.Right
-	for i := 0; ; i++ {
-		if samesafeexpr(b, x) {
-			break
-		}
-		if i == 3 {
-			// Tried all permutations and couldn't find an appropriate b == x.
-			return n
-		}
-		if i&1 == 0 {
-			a, opl, b = b, brrev(opl), a
-		} else {
-			x, opr, c = c, brrev(opr), x
-		}
-	}
-
-	// If n.Op is ||, apply de Morgan.
-	// Negate the internal ops now; we'll negate the top level op at the end.
-	// Henceforth assume &&.
-	negateResult := n.Op == OOROR
-	if negateResult {
-		opl = brcom(opl)
-		opr = brcom(opr)
-	}
-
-	cmpdir := func(o Op) int {
-		switch o {
-		case OLE, OLT:
-			return -1
-		case OGE, OGT:
-			return +1
-		}
-		Fatalf("walkinrange cmpdir %v", o)
-		return 0
-	}
-	if cmpdir(opl) != cmpdir(opr) {
-		// Not a range check; something like b < a && b < c.
-		return n
-	}
-
-	switch opl {
-	case OGE, OGT:
-		// We have something like a > b && b ≥ c.
-		// Switch and reverse ops and rename constants,
-		// to make it look like a ≤ b && b < c.
-		a, c = c, a
-		opl, opr = brrev(opr), brrev(opl)
-	}
-
-	// We must ensure that c-a is non-negative.
-	// For now, require a and c to be constants.
-	// In the future, we could also support a == 0 and c == len/cap(...).
-	// Unfortunately, by this point, most len/cap expressions have been
-	// stored into temporary variables.
-	if !Isconst(a, CTINT) || !Isconst(c, CTINT) {
-		return n
-	}
-
-	// Ensure that Int64() does not overflow on a and c (it'll happen
-	// for any const above 2**63; see issue #27143).
-	if !a.CanInt64() || !c.CanInt64() {
-		return n
-	}
-
-	if opl == OLT {
-		// We have a < b && ...
-		// We need a ≤ b && ... to safely use unsigned comparison tricks.
-		// If a is not the maximum constant for b's type,
-		// we can increment a and switch to ≤.
-		if a.Int64() >= maxintval[b.Type.Etype].Int64() {
-			return n
-		}
-		a = nodintconst(a.Int64() + 1)
-		opl = OLE
-	}
-
-	bound := c.Int64() - a.Int64()
-	if bound < 0 {
-		// Bad news. Something like 5 <= x && x < 3.
-		// Rare in practice, and we still need to generate side-effects,
-		// so just leave it alone.
-		return n
-	}
-
-	// We have a ≤ b && b < c (or a ≤ b && b ≤ c).
-	// This is equivalent to (a-a) ≤ (b-a) && (b-a) < (c-a),
-	// which is equivalent to 0 ≤ (b-a) && (b-a) < (c-a),
-	// which is equivalent to uint(b-a) < uint(c-a).
-	ut := b.Type.ToUnsigned()
-	lhs := conv(nod(OSUB, b, a), ut)
-	rhs := nodintconst(bound)
-	if negateResult {
-		// Negate top level.
-		opr = brcom(opr)
-	}
-	cmp := nod(opr, lhs, rhs)
-	cmp.Pos = n.Pos
-	cmp = addinit(cmp, l.Ninit.Slice())
-	cmp = addinit(cmp, r.Ninit.Slice())
-	// Typecheck the AST rooted at cmp...
-	cmp = typecheck(cmp, ctxExpr)
-	// ...but then reset cmp's type to match n's type.
-	cmp.Type = n.Type
-	cmp = walkexpr(cmp, init)
-	return cmp
-}
-
 // return 1 if integer n must be in range [0, max), 0 otherwise
 func bounded(n *Node, max int64) bool {
 	if n.Type == nil || !n.Type.IsInteger() {

From cd9fd640db419ec81026945eb4f22bfe5ff5a27f Mon Sep 17 00:00:00 2001
From: Keith Randall 
Date: Tue, 3 Mar 2020 17:56:20 +0000
Subject: [PATCH 52/69] cmd/compile: don't allow NaNs in floating-point
 constant ops

Trying this CL again, with a fixed test that allows platforms
to disagree on the exact behavior of converting NaNs.

We store 32-bit floating point constants in a 64-bit field, by
converting that 32-bit float to 64-bit float to store it, and convert
it back to use it.

That works for *almost* all floating-point constants. The exception is
signaling NaNs. The round trip described above means we can't represent
a 32-bit signaling NaN, because conversions strip the signaling bit.

To fix this issue, just forbid NaNs as floating-point constants in SSA
form. This shouldn't affect any real-world code, as people seldom
constant-propagate NaNs (except in test code).

Additionally, NaNs are somewhat underspecified (which of the many NaNs
do you get when dividing 0/0?), so when cross-compiling there's a
danger of using the compiler machine's NaN regime for some math, and
the target machine's NaN regime for other math. Better to use the
target machine's NaN regime always.

Update #36400

Change-Id: Idf203b688a15abceabbd66ba290d4e9f63619ecb
Reviewed-on: https://go-review.googlesource.com/c/go/+/221790
Run-TryBot: Keith Randall 
TryBot-Result: Gobot Gobot 
Reviewed-by: Josh Bleecher Snyder 
---
 src/cmd/compile/internal/gc/float_test.go     | 58 +++++++++++++++++++
 src/cmd/compile/internal/ssa/check.go         | 10 +++-
 src/cmd/compile/internal/ssa/gen/PPC64.rules  |  2 +-
 src/cmd/compile/internal/ssa/gen/Wasm.rules   | 21 +++----
 .../compile/internal/ssa/gen/generic.rules    | 14 ++---
 .../compile/internal/ssa/gen/genericOps.go    |  7 ++-
 src/cmd/compile/internal/ssa/rewrite.go       |  6 ++
 src/cmd/compile/internal/ssa/rewritePPC64.go  |  4 ++
 src/cmd/compile/internal/ssa/rewriteWasm.go   | 41 +++++++++++++
 .../compile/internal/ssa/rewritegeneric.go    | 28 +++++++--
 test/codegen/math.go                          | 33 ++++++++++-
 11 files changed, 198 insertions(+), 26 deletions(-)

diff --git a/src/cmd/compile/internal/gc/float_test.go b/src/cmd/compile/internal/gc/float_test.go
index c5c604003a..6ae363be22 100644
--- a/src/cmd/compile/internal/gc/float_test.go
+++ b/src/cmd/compile/internal/gc/float_test.go
@@ -483,6 +483,64 @@ func TestFloat32StoreToLoadConstantFold(t *testing.T) {
 	}
 }
 
+// Signaling NaN values as constants.
+const (
+	snan32bits uint32 = 0x7f800001
+	snan64bits uint64 = 0x7ff0000000000001
+)
+
+// Signaling NaNs as variables.
+var snan32bitsVar uint32 = snan32bits
+var snan64bitsVar uint64 = snan64bits
+
+func TestFloatSignalingNaN(t *testing.T) {
+	// Make sure we generate a signaling NaN from a constant properly.
+	// See issue 36400.
+	f32 := math.Float32frombits(snan32bits)
+	g32 := math.Float32frombits(snan32bitsVar)
+	x32 := math.Float32bits(f32)
+	y32 := math.Float32bits(g32)
+	if x32 != y32 {
+		t.Errorf("got %x, want %x (diff=%x)", x32, y32, x32^y32)
+	}
+
+	f64 := math.Float64frombits(snan64bits)
+	g64 := math.Float64frombits(snan64bitsVar)
+	x64 := math.Float64bits(f64)
+	y64 := math.Float64bits(g64)
+	if x64 != y64 {
+		t.Errorf("got %x, want %x (diff=%x)", x64, y64, x64^y64)
+	}
+}
+
+func TestFloatSignalingNaNConversion(t *testing.T) {
+	// Test to make sure when we convert a signaling NaN, we get a NaN.
+	// (Ideally we want a quiet NaN, but some platforms don't agree.)
+	// See issue 36399.
+	s32 := math.Float32frombits(snan32bitsVar)
+	if s32 == s32 {
+		t.Errorf("converting a NaN did not result in a NaN")
+	}
+	s64 := math.Float64frombits(snan64bitsVar)
+	if s64 == s64 {
+		t.Errorf("converting a NaN did not result in a NaN")
+	}
+}
+
+func TestFloatSignalingNaNConversionConst(t *testing.T) {
+	// Test to make sure when we convert a signaling NaN, it converts to a NaN.
+	// (Ideally we want a quiet NaN, but some platforms don't agree.)
+	// See issue 36399 and 36400.
+	s32 := math.Float32frombits(snan32bits)
+	if s32 == s32 {
+		t.Errorf("converting a NaN did not result in a NaN")
+	}
+	s64 := math.Float64frombits(snan64bits)
+	if s64 == s64 {
+		t.Errorf("converting a NaN did not result in a NaN")
+	}
+}
+
 var sinkFloat float64
 
 func BenchmarkMul2(b *testing.B) {
diff --git a/src/cmd/compile/internal/ssa/check.go b/src/cmd/compile/internal/ssa/check.go
index a6746805f7..4c694a03ac 100644
--- a/src/cmd/compile/internal/ssa/check.go
+++ b/src/cmd/compile/internal/ssa/check.go
@@ -141,15 +141,23 @@ func checkFunc(f *Func) {
 					f.Fatalf("bad int32 AuxInt value for %v", v)
 				}
 				canHaveAuxInt = true
-			case auxInt64, auxFloat64, auxARM64BitField:
+			case auxInt64, auxARM64BitField:
 				canHaveAuxInt = true
 			case auxInt128:
 				// AuxInt must be zero, so leave canHaveAuxInt set to false.
 			case auxFloat32:
 				canHaveAuxInt = true
+				if math.IsNaN(v.AuxFloat()) {
+					f.Fatalf("value %v has an AuxInt that encodes a NaN", v)
+				}
 				if !isExactFloat32(v.AuxFloat()) {
 					f.Fatalf("value %v has an AuxInt value that is not an exact float32", v)
 				}
+			case auxFloat64:
+				canHaveAuxInt = true
+				if math.IsNaN(v.AuxFloat()) {
+					f.Fatalf("value %v has an AuxInt that encodes a NaN", v)
+				}
 			case auxString, auxSym, auxTyp, auxArchSpecific:
 				canHaveAux = true
 			case auxSymOff, auxSymValAndOff, auxTypSize:
diff --git a/src/cmd/compile/internal/ssa/gen/PPC64.rules b/src/cmd/compile/internal/ssa/gen/PPC64.rules
index f2b2b9b898..c53ec0fde1 100644
--- a/src/cmd/compile/internal/ssa/gen/PPC64.rules
+++ b/src/cmd/compile/internal/ssa/gen/PPC64.rules
@@ -80,7 +80,7 @@
 
 // Constant folding
 (FABS (FMOVDconst [x])) -> (FMOVDconst [auxFrom64F(math.Abs(auxTo64F(x)))])
-(FSQRT (FMOVDconst [x])) -> (FMOVDconst [auxFrom64F(math.Sqrt(auxTo64F(x)))])
+(FSQRT (FMOVDconst [x])) && auxTo64F(x) >= 0 -> (FMOVDconst [auxFrom64F(math.Sqrt(auxTo64F(x)))])
 (FFLOOR (FMOVDconst [x])) -> (FMOVDconst [auxFrom64F(math.Floor(auxTo64F(x)))])
 (FCEIL (FMOVDconst [x])) -> (FMOVDconst [auxFrom64F(math.Ceil(auxTo64F(x)))])
 (FTRUNC (FMOVDconst [x])) -> (FMOVDconst [auxFrom64F(math.Trunc(auxTo64F(x)))])
diff --git a/src/cmd/compile/internal/ssa/gen/Wasm.rules b/src/cmd/compile/internal/ssa/gen/Wasm.rules
index cdcbc28c30..bf2b904baf 100644
--- a/src/cmd/compile/internal/ssa/gen/Wasm.rules
+++ b/src/cmd/compile/internal/ssa/gen/Wasm.rules
@@ -357,7 +357,7 @@
 (I64Or  (I64Const [x]) (I64Const [y])) -> (I64Const [x | y])
 (I64Xor (I64Const [x]) (I64Const [y])) -> (I64Const [x ^ y])
 (F64Add (F64Const [x]) (F64Const [y])) -> (F64Const [auxFrom64F(auxTo64F(x) + auxTo64F(y))])
-(F64Mul (F64Const [x]) (F64Const [y])) -> (F64Const [auxFrom64F(auxTo64F(x) * auxTo64F(y))])
+(F64Mul (F64Const [x]) (F64Const [y])) && !math.IsNaN(auxTo64F(x) * auxTo64F(y)) -> (F64Const [auxFrom64F(auxTo64F(x) * auxTo64F(y))])
 (I64Eq  (I64Const [x]) (I64Const [y])) && x == y -> (I64Const [1])
 (I64Eq  (I64Const [x]) (I64Const [y])) && x != y -> (I64Const [0])
 (I64Ne  (I64Const [x]) (I64Const [y])) && x == y -> (I64Const [0])
@@ -367,15 +367,16 @@
 (I64ShrU (I64Const [x]) (I64Const [y])) -> (I64Const [int64(uint64(x) >> uint64(y))])
 (I64ShrS (I64Const [x]) (I64Const [y])) -> (I64Const [x >> uint64(y)])
 
-(I64Add (I64Const [x]) y) -> (I64Add y (I64Const [x]))
-(I64Mul (I64Const [x]) y) -> (I64Mul y (I64Const [x]))
-(I64And (I64Const [x]) y) -> (I64And y (I64Const [x]))
-(I64Or  (I64Const [x]) y) -> (I64Or  y (I64Const [x]))
-(I64Xor (I64Const [x]) y) -> (I64Xor y (I64Const [x]))
-(F64Add (F64Const [x]) y) -> (F64Add y (F64Const [x]))
-(F64Mul (F64Const [x]) y) -> (F64Mul y (F64Const [x]))
-(I64Eq  (I64Const [x]) y) -> (I64Eq y  (I64Const [x]))
-(I64Ne  (I64Const [x]) y) -> (I64Ne y  (I64Const [x]))
+// TODO: declare these operations as commutative and get rid of these rules?
+(I64Add (I64Const [x]) y) && y.Op != OpWasmI64Const -> (I64Add y (I64Const [x]))
+(I64Mul (I64Const [x]) y) && y.Op != OpWasmI64Const -> (I64Mul y (I64Const [x]))
+(I64And (I64Const [x]) y) && y.Op != OpWasmI64Const -> (I64And y (I64Const [x]))
+(I64Or  (I64Const [x]) y) && y.Op != OpWasmI64Const -> (I64Or  y (I64Const [x]))
+(I64Xor (I64Const [x]) y) && y.Op != OpWasmI64Const -> (I64Xor y (I64Const [x]))
+(F64Add (F64Const [x]) y) && y.Op != OpWasmF64Const -> (F64Add y (F64Const [x]))
+(F64Mul (F64Const [x]) y) && y.Op != OpWasmF64Const -> (F64Mul y (F64Const [x]))
+(I64Eq  (I64Const [x]) y) && y.Op != OpWasmI64Const -> (I64Eq y  (I64Const [x]))
+(I64Ne  (I64Const [x]) y) && y.Op != OpWasmI64Const -> (I64Ne y  (I64Const [x]))
 
 (I64Eq x (I64Const [0])) -> (I64Eqz x)
 (I64Ne x (I64Const [0])) -> (I64Eqz (I64Eqz x))
diff --git a/src/cmd/compile/internal/ssa/gen/generic.rules b/src/cmd/compile/internal/ssa/gen/generic.rules
index bc16f5a7af..8ec22d86e7 100644
--- a/src/cmd/compile/internal/ssa/gen/generic.rules
+++ b/src/cmd/compile/internal/ssa/gen/generic.rules
@@ -119,8 +119,8 @@
 (Mul16  (Const16 [c])  (Const16 [d]))  -> (Const16 [int64(int16(c*d))])
 (Mul32  (Const32 [c])  (Const32 [d]))  -> (Const32 [int64(int32(c*d))])
 (Mul64  (Const64 [c])  (Const64 [d]))  -> (Const64 [c*d])
-(Mul32F (Const32F [c]) (Const32F [d])) -> (Const32F [auxFrom32F(auxTo32F(c) * auxTo32F(d))])
-(Mul64F (Const64F [c]) (Const64F [d])) -> (Const64F [auxFrom64F(auxTo64F(c) * auxTo64F(d))])
+(Mul32F (Const32F [c]) (Const32F [d])) && !math.IsNaN(float64(auxTo32F(c) * auxTo32F(d))) -> (Const32F [auxFrom32F(auxTo32F(c) * auxTo32F(d))])
+(Mul64F (Const64F [c]) (Const64F [d])) && !math.IsNaN(auxTo64F(c) * auxTo64F(d)) -> (Const64F [auxFrom64F(auxTo64F(c) * auxTo64F(d))])
 
 (And8   (Const8 [c])   (Const8 [d]))   -> (Const8  [int64(int8(c&d))])
 (And16  (Const16 [c])  (Const16 [d]))  -> (Const16 [int64(int16(c&d))])
@@ -145,8 +145,8 @@
 (Div16u (Const16 [c])  (Const16 [d])) && d != 0 -> (Const16 [int64(int16(uint16(c)/uint16(d)))])
 (Div32u (Const32 [c])  (Const32 [d])) && d != 0 -> (Const32 [int64(int32(uint32(c)/uint32(d)))])
 (Div64u (Const64 [c])  (Const64 [d])) && d != 0 -> (Const64 [int64(uint64(c)/uint64(d))])
-(Div32F (Const32F [c]) (Const32F [d])) -> (Const32F [auxFrom32F(auxTo32F(c) / auxTo32F(d))])
-(Div64F (Const64F [c]) (Const64F [d])) -> (Const64F [auxFrom64F(auxTo64F(c) / auxTo64F(d))])
+(Div32F (Const32F [c]) (Const32F [d])) && !math.IsNaN(float64(auxTo32F(c) / auxTo32F(d))) -> (Const32F [auxFrom32F(auxTo32F(c) / auxTo32F(d))])
+(Div64F (Const64F [c]) (Const64F [d])) && !math.IsNaN(auxTo64F(c) / auxTo64F(d)) -> (Const64F [auxFrom64F(auxTo64F(c) / auxTo64F(d))])
 (Select0 (Div128u (Const64 [0]) lo y)) -> (Div64u lo y)
 (Select1 (Div128u (Const64 [0]) lo y)) -> (Mod64u lo y)
 
@@ -623,8 +623,8 @@
 	-> x
 
 // Pass constants through math.Float{32,64}bits and math.Float{32,64}frombits
-(Load  p1 (Store {t2} p2 (Const64  [x]) _)) && isSamePtr(p1,p2) && sizeof(t2) == 8 && is64BitFloat(t1) -> (Const64F [x])
-(Load  p1 (Store {t2} p2 (Const32  [x]) _)) && isSamePtr(p1,p2) && sizeof(t2) == 4 && is32BitFloat(t1) -> (Const32F [auxFrom32F(math.Float32frombits(uint32(x)))])
+        (Load  p1 (Store {t2} p2 (Const64  [x]) _)) && isSamePtr(p1,p2) && sizeof(t2) == 8 && is64BitFloat(t1) && !math.IsNaN(math.Float64frombits(uint64(x))) -> (Const64F [x])
+        (Load  p1 (Store {t2} p2 (Const32  [x]) _)) && isSamePtr(p1,p2) && sizeof(t2) == 4 && is32BitFloat(t1) && !math.IsNaN(float64(math.Float32frombits(uint32(x)))) -> (Const32F [auxFrom32F(math.Float32frombits(uint32(x)))])
 (Load  p1 (Store {t2} p2 (Const64F [x]) _)) && isSamePtr(p1,p2) && sizeof(t2) == 8 && is64BitInt(t1)   -> (Const64  [x])
 (Load  p1 (Store {t2} p2 (Const32F [x]) _)) && isSamePtr(p1,p2) && sizeof(t2) == 4 && is32BitInt(t1)   -> (Const32  [int64(int32(math.Float32bits(auxTo32F(x))))])
 
@@ -1893,7 +1893,7 @@
 (Div32F x (Const32F  [c])) && reciprocalExact32(auxTo32F(c)) -> (Mul32F x (Const32F  [auxFrom32F(1/auxTo32F(c))]))
 (Div64F x (Const64F  [c])) && reciprocalExact64(auxTo64F(c)) -> (Mul64F x (Const64F  [auxFrom64F(1/auxTo64F(c))]))
 
-(Sqrt (Const64F [c])) -> (Const64F [auxFrom64F(math.Sqrt(auxTo64F(c)))])
+(Sqrt (Const64F [c])) && !math.IsNaN(math.Sqrt(auxTo64F(c))) -> (Const64F [auxFrom64F(math.Sqrt(auxTo64F(c)))])
 
 // recognize runtime.newobject and don't Zero/Nilcheck it
 (Zero (Load (OffPtr [c] (SP)) mem) mem)
diff --git a/src/cmd/compile/internal/ssa/gen/genericOps.go b/src/cmd/compile/internal/ssa/gen/genericOps.go
index 54c6968c5b..b7e91a1f20 100644
--- a/src/cmd/compile/internal/ssa/gen/genericOps.go
+++ b/src/cmd/compile/internal/ssa/gen/genericOps.go
@@ -323,7 +323,12 @@ var genericOps = []opData{
 	{name: "Const32", aux: "Int32"},      // auxint is sign-extended 32 bits
 	// Note: ConstX are sign-extended even when the type of the value is unsigned.
 	// For instance, uint8(0xaa) is stored as auxint=0xffffffffffffffaa.
-	{name: "Const64", aux: "Int64"},    // value is auxint
+	{name: "Const64", aux: "Int64"}, // value is auxint
+	// Note: for both Const32F and Const64F, we disallow encoding NaNs.
+	// Signaling NaNs are tricky because if you do anything with them, they become quiet.
+	// Particularly, converting a 32 bit sNaN to 64 bit and back converts it to a qNaN.
+	// See issue 36399 and 36400.
+	// Encodings of +inf, -inf, and -0 are fine.
 	{name: "Const32F", aux: "Float32"}, // value is math.Float64frombits(uint64(auxint)) and is exactly representable as float 32
 	{name: "Const64F", aux: "Float64"}, // value is math.Float64frombits(uint64(auxint))
 	{name: "ConstInterface"},           // nil interface
diff --git a/src/cmd/compile/internal/ssa/rewrite.go b/src/cmd/compile/internal/ssa/rewrite.go
index fcbb76cf34..238e243096 100644
--- a/src/cmd/compile/internal/ssa/rewrite.go
+++ b/src/cmd/compile/internal/ssa/rewrite.go
@@ -487,11 +487,17 @@ func DivisionNeedsFixUp(v *Value) bool {
 
 // auxFrom64F encodes a float64 value so it can be stored in an AuxInt.
 func auxFrom64F(f float64) int64 {
+	if f != f {
+		panic("can't encode a NaN in AuxInt field")
+	}
 	return int64(math.Float64bits(f))
 }
 
 // auxFrom32F encodes a float32 value so it can be stored in an AuxInt.
 func auxFrom32F(f float32) int64 {
+	if f != f {
+		panic("can't encode a NaN in AuxInt field")
+	}
 	return int64(math.Float64bits(extend32Fto64F(f)))
 }
 
diff --git a/src/cmd/compile/internal/ssa/rewritePPC64.go b/src/cmd/compile/internal/ssa/rewritePPC64.go
index fe15e71a3e..0094ba1b74 100644
--- a/src/cmd/compile/internal/ssa/rewritePPC64.go
+++ b/src/cmd/compile/internal/ssa/rewritePPC64.go
@@ -5191,12 +5191,16 @@ func rewriteValuePPC64_OpPPC64FNEG(v *Value) bool {
 func rewriteValuePPC64_OpPPC64FSQRT(v *Value) bool {
 	v_0 := v.Args[0]
 	// match: (FSQRT (FMOVDconst [x]))
+	// cond: auxTo64F(x) >= 0
 	// result: (FMOVDconst [auxFrom64F(math.Sqrt(auxTo64F(x)))])
 	for {
 		if v_0.Op != OpPPC64FMOVDconst {
 			break
 		}
 		x := v_0.AuxInt
+		if !(auxTo64F(x) >= 0) {
+			break
+		}
 		v.reset(OpPPC64FMOVDconst)
 		v.AuxInt = auxFrom64F(math.Sqrt(auxTo64F(x)))
 		return true
diff --git a/src/cmd/compile/internal/ssa/rewriteWasm.go b/src/cmd/compile/internal/ssa/rewriteWasm.go
index 2c7add4996..be1b51e7aa 100644
--- a/src/cmd/compile/internal/ssa/rewriteWasm.go
+++ b/src/cmd/compile/internal/ssa/rewriteWasm.go
@@ -3,6 +3,7 @@
 
 package ssa
 
+import "math"
 import "cmd/internal/objabi"
 import "cmd/compile/internal/types"
 
@@ -3467,6 +3468,7 @@ func rewriteValueWasm_OpWasmF64Add(v *Value) bool {
 		return true
 	}
 	// match: (F64Add (F64Const [x]) y)
+	// cond: y.Op != OpWasmF64Const
 	// result: (F64Add y (F64Const [x]))
 	for {
 		if v_0.Op != OpWasmF64Const {
@@ -3474,6 +3476,9 @@ func rewriteValueWasm_OpWasmF64Add(v *Value) bool {
 		}
 		x := v_0.AuxInt
 		y := v_1
+		if !(y.Op != OpWasmF64Const) {
+			break
+		}
 		v.reset(OpWasmF64Add)
 		v0 := b.NewValue0(v.Pos, OpWasmF64Const, typ.Float64)
 		v0.AuxInt = x
@@ -3488,6 +3493,7 @@ func rewriteValueWasm_OpWasmF64Mul(v *Value) bool {
 	b := v.Block
 	typ := &b.Func.Config.Types
 	// match: (F64Mul (F64Const [x]) (F64Const [y]))
+	// cond: !math.IsNaN(auxTo64F(x) * auxTo64F(y))
 	// result: (F64Const [auxFrom64F(auxTo64F(x) * auxTo64F(y))])
 	for {
 		if v_0.Op != OpWasmF64Const {
@@ -3498,11 +3504,15 @@ func rewriteValueWasm_OpWasmF64Mul(v *Value) bool {
 			break
 		}
 		y := v_1.AuxInt
+		if !(!math.IsNaN(auxTo64F(x) * auxTo64F(y))) {
+			break
+		}
 		v.reset(OpWasmF64Const)
 		v.AuxInt = auxFrom64F(auxTo64F(x) * auxTo64F(y))
 		return true
 	}
 	// match: (F64Mul (F64Const [x]) y)
+	// cond: y.Op != OpWasmF64Const
 	// result: (F64Mul y (F64Const [x]))
 	for {
 		if v_0.Op != OpWasmF64Const {
@@ -3510,6 +3520,9 @@ func rewriteValueWasm_OpWasmF64Mul(v *Value) bool {
 		}
 		x := v_0.AuxInt
 		y := v_1
+		if !(y.Op != OpWasmF64Const) {
+			break
+		}
 		v.reset(OpWasmF64Mul)
 		v0 := b.NewValue0(v.Pos, OpWasmF64Const, typ.Float64)
 		v0.AuxInt = x
@@ -3539,6 +3552,7 @@ func rewriteValueWasm_OpWasmI64Add(v *Value) bool {
 		return true
 	}
 	// match: (I64Add (I64Const [x]) y)
+	// cond: y.Op != OpWasmI64Const
 	// result: (I64Add y (I64Const [x]))
 	for {
 		if v_0.Op != OpWasmI64Const {
@@ -3546,6 +3560,9 @@ func rewriteValueWasm_OpWasmI64Add(v *Value) bool {
 		}
 		x := v_0.AuxInt
 		y := v_1
+		if !(y.Op != OpWasmI64Const) {
+			break
+		}
 		v.reset(OpWasmI64Add)
 		v0 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64)
 		v0.AuxInt = x
@@ -3622,6 +3639,7 @@ func rewriteValueWasm_OpWasmI64And(v *Value) bool {
 		return true
 	}
 	// match: (I64And (I64Const [x]) y)
+	// cond: y.Op != OpWasmI64Const
 	// result: (I64And y (I64Const [x]))
 	for {
 		if v_0.Op != OpWasmI64Const {
@@ -3629,6 +3647,9 @@ func rewriteValueWasm_OpWasmI64And(v *Value) bool {
 		}
 		x := v_0.AuxInt
 		y := v_1
+		if !(y.Op != OpWasmI64Const) {
+			break
+		}
 		v.reset(OpWasmI64And)
 		v0 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64)
 		v0.AuxInt = x
@@ -3681,6 +3702,7 @@ func rewriteValueWasm_OpWasmI64Eq(v *Value) bool {
 		return true
 	}
 	// match: (I64Eq (I64Const [x]) y)
+	// cond: y.Op != OpWasmI64Const
 	// result: (I64Eq y (I64Const [x]))
 	for {
 		if v_0.Op != OpWasmI64Const {
@@ -3688,6 +3710,9 @@ func rewriteValueWasm_OpWasmI64Eq(v *Value) bool {
 		}
 		x := v_0.AuxInt
 		y := v_1
+		if !(y.Op != OpWasmI64Const) {
+			break
+		}
 		v.reset(OpWasmI64Eq)
 		v0 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64)
 		v0.AuxInt = x
@@ -3993,6 +4018,7 @@ func rewriteValueWasm_OpWasmI64Mul(v *Value) bool {
 		return true
 	}
 	// match: (I64Mul (I64Const [x]) y)
+	// cond: y.Op != OpWasmI64Const
 	// result: (I64Mul y (I64Const [x]))
 	for {
 		if v_0.Op != OpWasmI64Const {
@@ -4000,6 +4026,9 @@ func rewriteValueWasm_OpWasmI64Mul(v *Value) bool {
 		}
 		x := v_0.AuxInt
 		y := v_1
+		if !(y.Op != OpWasmI64Const) {
+			break
+		}
 		v.reset(OpWasmI64Mul)
 		v0 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64)
 		v0.AuxInt = x
@@ -4052,6 +4081,7 @@ func rewriteValueWasm_OpWasmI64Ne(v *Value) bool {
 		return true
 	}
 	// match: (I64Ne (I64Const [x]) y)
+	// cond: y.Op != OpWasmI64Const
 	// result: (I64Ne y (I64Const [x]))
 	for {
 		if v_0.Op != OpWasmI64Const {
@@ -4059,6 +4089,9 @@ func rewriteValueWasm_OpWasmI64Ne(v *Value) bool {
 		}
 		x := v_0.AuxInt
 		y := v_1
+		if !(y.Op != OpWasmI64Const) {
+			break
+		}
 		v.reset(OpWasmI64Ne)
 		v0 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64)
 		v0.AuxInt = x
@@ -4101,6 +4134,7 @@ func rewriteValueWasm_OpWasmI64Or(v *Value) bool {
 		return true
 	}
 	// match: (I64Or (I64Const [x]) y)
+	// cond: y.Op != OpWasmI64Const
 	// result: (I64Or y (I64Const [x]))
 	for {
 		if v_0.Op != OpWasmI64Const {
@@ -4108,6 +4142,9 @@ func rewriteValueWasm_OpWasmI64Or(v *Value) bool {
 		}
 		x := v_0.AuxInt
 		y := v_1
+		if !(y.Op != OpWasmI64Const) {
+			break
+		}
 		v.reset(OpWasmI64Or)
 		v0 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64)
 		v0.AuxInt = x
@@ -4301,6 +4338,7 @@ func rewriteValueWasm_OpWasmI64Xor(v *Value) bool {
 		return true
 	}
 	// match: (I64Xor (I64Const [x]) y)
+	// cond: y.Op != OpWasmI64Const
 	// result: (I64Xor y (I64Const [x]))
 	for {
 		if v_0.Op != OpWasmI64Const {
@@ -4308,6 +4346,9 @@ func rewriteValueWasm_OpWasmI64Xor(v *Value) bool {
 		}
 		x := v_0.AuxInt
 		y := v_1
+		if !(y.Op != OpWasmI64Const) {
+			break
+		}
 		v.reset(OpWasmI64Xor)
 		v0 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64)
 		v0.AuxInt = x
diff --git a/src/cmd/compile/internal/ssa/rewritegeneric.go b/src/cmd/compile/internal/ssa/rewritegeneric.go
index 9e743838ab..13873b2ac8 100644
--- a/src/cmd/compile/internal/ssa/rewritegeneric.go
+++ b/src/cmd/compile/internal/ssa/rewritegeneric.go
@@ -4734,6 +4734,7 @@ func rewriteValuegeneric_OpDiv32F(v *Value) bool {
 	v_0 := v.Args[0]
 	b := v.Block
 	// match: (Div32F (Const32F [c]) (Const32F [d]))
+	// cond: !math.IsNaN(float64(auxTo32F(c) / auxTo32F(d)))
 	// result: (Const32F [auxFrom32F(auxTo32F(c) / auxTo32F(d))])
 	for {
 		if v_0.Op != OpConst32F {
@@ -4744,6 +4745,9 @@ func rewriteValuegeneric_OpDiv32F(v *Value) bool {
 			break
 		}
 		d := v_1.AuxInt
+		if !(!math.IsNaN(float64(auxTo32F(c) / auxTo32F(d)))) {
+			break
+		}
 		v.reset(OpConst32F)
 		v.AuxInt = auxFrom32F(auxTo32F(c) / auxTo32F(d))
 		return true
@@ -5171,6 +5175,7 @@ func rewriteValuegeneric_OpDiv64F(v *Value) bool {
 	v_0 := v.Args[0]
 	b := v.Block
 	// match: (Div64F (Const64F [c]) (Const64F [d]))
+	// cond: !math.IsNaN(auxTo64F(c) / auxTo64F(d))
 	// result: (Const64F [auxFrom64F(auxTo64F(c) / auxTo64F(d))])
 	for {
 		if v_0.Op != OpConst64F {
@@ -5181,6 +5186,9 @@ func rewriteValuegeneric_OpDiv64F(v *Value) bool {
 			break
 		}
 		d := v_1.AuxInt
+		if !(!math.IsNaN(auxTo64F(c) / auxTo64F(d))) {
+			break
+		}
 		v.reset(OpConst64F)
 		v.AuxInt = auxFrom64F(auxTo64F(c) / auxTo64F(d))
 		return true
@@ -10240,7 +10248,7 @@ func rewriteValuegeneric_OpLoad(v *Value) bool {
 		return true
 	}
 	// match: (Load  p1 (Store {t2} p2 (Const64 [x]) _))
-	// cond: isSamePtr(p1,p2) && sizeof(t2) == 8 && is64BitFloat(t1)
+	// cond: isSamePtr(p1,p2) && sizeof(t2) == 8 && is64BitFloat(t1) && !math.IsNaN(math.Float64frombits(uint64(x)))
 	// result: (Const64F [x])
 	for {
 		t1 := v.Type
@@ -10256,7 +10264,7 @@ func rewriteValuegeneric_OpLoad(v *Value) bool {
 			break
 		}
 		x := v_1_1.AuxInt
-		if !(isSamePtr(p1, p2) && sizeof(t2) == 8 && is64BitFloat(t1)) {
+		if !(isSamePtr(p1, p2) && sizeof(t2) == 8 && is64BitFloat(t1) && !math.IsNaN(math.Float64frombits(uint64(x)))) {
 			break
 		}
 		v.reset(OpConst64F)
@@ -10264,7 +10272,7 @@ func rewriteValuegeneric_OpLoad(v *Value) bool {
 		return true
 	}
 	// match: (Load  p1 (Store {t2} p2 (Const32 [x]) _))
-	// cond: isSamePtr(p1,p2) && sizeof(t2) == 4 && is32BitFloat(t1)
+	// cond: isSamePtr(p1,p2) && sizeof(t2) == 4 && is32BitFloat(t1) && !math.IsNaN(float64(math.Float32frombits(uint32(x))))
 	// result: (Const32F [auxFrom32F(math.Float32frombits(uint32(x)))])
 	for {
 		t1 := v.Type
@@ -10280,7 +10288,7 @@ func rewriteValuegeneric_OpLoad(v *Value) bool {
 			break
 		}
 		x := v_1_1.AuxInt
-		if !(isSamePtr(p1, p2) && sizeof(t2) == 4 && is32BitFloat(t1)) {
+		if !(isSamePtr(p1, p2) && sizeof(t2) == 4 && is32BitFloat(t1) && !math.IsNaN(float64(math.Float32frombits(uint32(x))))) {
 			break
 		}
 		v.reset(OpConst32F)
@@ -13970,6 +13978,7 @@ func rewriteValuegeneric_OpMul32F(v *Value) bool {
 	v_1 := v.Args[1]
 	v_0 := v.Args[0]
 	// match: (Mul32F (Const32F [c]) (Const32F [d]))
+	// cond: !math.IsNaN(float64(auxTo32F(c) * auxTo32F(d)))
 	// result: (Const32F [auxFrom32F(auxTo32F(c) * auxTo32F(d))])
 	for {
 		for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
@@ -13981,6 +13990,9 @@ func rewriteValuegeneric_OpMul32F(v *Value) bool {
 				continue
 			}
 			d := v_1.AuxInt
+			if !(!math.IsNaN(float64(auxTo32F(c) * auxTo32F(d)))) {
+				continue
+			}
 			v.reset(OpConst32F)
 			v.AuxInt = auxFrom32F(auxTo32F(c) * auxTo32F(d))
 			return true
@@ -14210,6 +14222,7 @@ func rewriteValuegeneric_OpMul64F(v *Value) bool {
 	v_1 := v.Args[1]
 	v_0 := v.Args[0]
 	// match: (Mul64F (Const64F [c]) (Const64F [d]))
+	// cond: !math.IsNaN(auxTo64F(c) * auxTo64F(d))
 	// result: (Const64F [auxFrom64F(auxTo64F(c) * auxTo64F(d))])
 	for {
 		for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
@@ -14221,6 +14234,9 @@ func rewriteValuegeneric_OpMul64F(v *Value) bool {
 				continue
 			}
 			d := v_1.AuxInt
+			if !(!math.IsNaN(auxTo64F(c) * auxTo64F(d))) {
+				continue
+			}
 			v.reset(OpConst64F)
 			v.AuxInt = auxFrom64F(auxTo64F(c) * auxTo64F(d))
 			return true
@@ -20966,12 +20982,16 @@ func rewriteValuegeneric_OpSlicemask(v *Value) bool {
 func rewriteValuegeneric_OpSqrt(v *Value) bool {
 	v_0 := v.Args[0]
 	// match: (Sqrt (Const64F [c]))
+	// cond: !math.IsNaN(math.Sqrt(auxTo64F(c)))
 	// result: (Const64F [auxFrom64F(math.Sqrt(auxTo64F(c)))])
 	for {
 		if v_0.Op != OpConst64F {
 			break
 		}
 		c := v_0.AuxInt
+		if !(!math.IsNaN(math.Sqrt(auxTo64F(c)))) {
+			break
+		}
 		v.reset(OpConst64F)
 		v.AuxInt = auxFrom64F(math.Sqrt(auxTo64F(c)))
 		return true
diff --git a/test/codegen/math.go b/test/codegen/math.go
index 80e5d60d96..1ebfda0405 100644
--- a/test/codegen/math.go
+++ b/test/codegen/math.go
@@ -151,13 +151,13 @@ func toFloat32(u32 uint32) float32 {
 func constantCheck64() bool {
 	// amd64:"MOVB\t[$]0",-"FCMP",-"MOVB\t[$]1"
 	// s390x:"MOV(B|BZ|D)\t[$]0,",-"FCMPU",-"MOV(B|BZ|D)\t[$]1,"
-	return 0.5 == float64(uint32(1)) || 1.5 > float64(uint64(1<<63)) || math.NaN() == math.NaN()
+	return 0.5 == float64(uint32(1)) || 1.5 > float64(uint64(1<<63))
 }
 
 func constantCheck32() bool {
 	// amd64:"MOVB\t[$]1",-"FCMP",-"MOVB\t[$]0"
 	// s390x:"MOV(B|BZ|D)\t[$]1,",-"FCMPU",-"MOV(B|BZ|D)\t[$]0,"
-	return float32(0.5) <= float32(int64(1)) && float32(1.5) >= float32(int32(-1<<31)) && float32(math.NaN()) != float32(math.NaN())
+	return float32(0.5) <= float32(int64(1)) && float32(1.5) >= float32(int32(-1<<31))
 }
 
 // Test that integer constants are converted to floating point constants
@@ -186,3 +186,32 @@ func constantConvertInt32(x uint32) uint32 {
 	}
 	return x
 }
+
+func nanGenerate64() float64 {
+	// Test to make sure we don't generate a NaN while constant propagating.
+	// See issue 36400.
+	zero := 0.0
+	// amd64:-"DIVSD"
+	inf := 1 / zero // +inf. We can constant propagate this one.
+	negone := -1.0
+
+	// amd64:"DIVSD"
+	z0 := zero / zero
+	// amd64:"MULSD"
+	z1 := zero * inf
+	// amd64:"SQRTSD"
+	z2 := math.Sqrt(negone)
+	return z0 + z1 + z2
+}
+
+func nanGenerate32() float32 {
+	zero := float32(0.0)
+	// amd64:-"DIVSS"
+	inf := 1 / zero // +inf. We can constant propagate this one.
+
+	// amd64:"DIVSS"
+	z0 := zero / zero
+	// amd64:"MULSS"
+	z1 := zero * inf
+	return z0 + z1
+}

From cec08794ef325e84f141e1a7b4deca0bedaeab34 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Cl=C3=A9ment=20Chigot?= 
Date: Tue, 3 Mar 2020 16:24:32 +0100
Subject: [PATCH 53/69] misc/cgo/test: fix sigaltstack test on AIX

Increase the size of the signal stack as the value given by SIGSTKSZ
is too small for the Go signal handler.

Fixes #37609

Change-Id: I56f1006bc69a2a9fb43f9e0da00061964290a690
Reviewed-on: https://go-review.googlesource.com/c/go/+/221804
Reviewed-by: Ian Lance Taylor 
Reviewed-by: Bryan C. Mills 
---
 misc/cgo/test/sigaltstack.go | 11 +++++++++--
 1 file changed, 9 insertions(+), 2 deletions(-)

diff --git a/misc/cgo/test/sigaltstack.go b/misc/cgo/test/sigaltstack.go
index 2c9b81ced7..7b3f4acbb7 100644
--- a/misc/cgo/test/sigaltstack.go
+++ b/misc/cgo/test/sigaltstack.go
@@ -14,15 +14,22 @@ package cgotest
 #include 
 #include 
 
+#ifdef _AIX
+// On AIX, SIGSTKSZ is too small to handle Go sighandler.
+#define CSIGSTKSZ 0x4000
+#else
+#define CSIGSTKSZ SIGSTKSZ
+#endif
+
 static stack_t oss;
-static char signalStack[SIGSTKSZ];
+static char signalStack[CSIGSTKSZ];
 
 static void changeSignalStack(void) {
 	stack_t ss;
 	memset(&ss, 0, sizeof ss);
 	ss.ss_sp = signalStack;
 	ss.ss_flags = 0;
-	ss.ss_size = SIGSTKSZ;
+	ss.ss_size = CSIGSTKSZ;
 	if (sigaltstack(&ss, &oss) < 0) {
 		perror("sigaltstack");
 		abort();

From 18a6fd44bb9de9399dba86a4c1ae1e56f967bcda Mon Sep 17 00:00:00 2001
From: erifan01 
Date: Thu, 21 Nov 2019 14:38:25 +0800
Subject: [PATCH 54/69] bytes, strings: moves indexRabinKarp function to
 internal/bytealg
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

In order to facilitate optimization of IndexAny and LastIndexAny, this patch moves
three Rabin-Karp related functions indexRabinKarp, hashStr and hashStrRev in strings
package to initernal/bytealg. There are also three functions in the bytes package with
the same names and functions but different parameter types. To highlight this, this
patch also moves them to internal/bytealg and gives them slightly different names.

Related benchmark changes on amd64 and arm64:

name          old time/op    new time/op    delta
pkg:strings goos:linux goarch:amd64
Index-16        14.0ns ± 1%    14.1ns ± 2%    ~     (p=0.738 n=5+5)
LastIndex-16    15.5ns ± 1%    15.7ns ± 4%    ~     (p=0.897 n=5+5)
pkg:bytes goos:linux goarch:amd64
Index/10-16     26.5ns ± 1%    26.5ns ± 0%    ~     (p=0.873 n=5+5)
Index/32-16     26.2ns ± 0%    25.7ns ± 0%  -1.68%  (p=0.008 n=5+5)
Index/4K-16     5.12µs ± 4%    5.14µs ± 2%    ~     (p=0.841 n=5+5)
Index/4M-16     5.44ms ± 3%    5.34ms ± 2%    ~     (p=0.056 n=5+5)
Index/64M-16    85.8ms ± 3%    84.6ms ± 0%  -1.37%  (p=0.016 n=5+5)

name          old speed      new speed      delta
pkg:bytes goos:linux goarch:amd64
Index/10-16    377MB/s ± 1%   377MB/s ± 0%    ~     (p=1.000 n=5+5)
Index/32-16   1.22GB/s ± 1%  1.24GB/s ± 0%  +1.66%  (p=0.008 n=5+5)
Index/4K-16    800MB/s ± 4%   797MB/s ± 2%    ~     (p=0.841 n=5+5)
Index/4M-16    771MB/s ± 3%   786MB/s ± 2%    ~     (p=0.056 n=5+5)
Index/64M-16   783MB/s ± 3%   793MB/s ± 0%  +1.36%  (p=0.016 n=5+5)

name         old time/op   new time/op   delta
pkg:strings goos:linux goarch:arm64
Index-8       22.6ns ± 0%   22.5ns ± 0%    ~     (p=0.167 n=5+5)
LastIndex-8   17.5ns ± 0%   17.5ns ± 0%    ~     (all equal)
pkg:bytes goos:linux goarch:arm64
Index/10-8    25.0ns ± 0%   25.0ns ± 0%    ~     (all equal)
Index/32-8     160ns ± 0%    160ns ± 0%    ~     (all equal)
Index/4K-8    6.26µs ± 0%   6.26µs ± 0%    ~     (p=0.167 n=5+5)
Index/4M-8    6.30ms ± 0%   6.31ms ± 0%    ~     (p=1.000 n=5+5)
Index/64M-8    101ms ± 0%    101ms ± 0%    ~     (p=0.690 n=5+5)

name         old speed     new speed     delta
pkg:bytes goos:linux goarch:arm64
Index/10-8   399MB/s ± 0%  400MB/s ± 0%  +0.08%  (p=0.008 n=5+5)
Index/32-8   200MB/s ± 0%  200MB/s ± 0%    ~     (p=0.127 n=4+5)
Index/4K-8   654MB/s ± 0%  654MB/s ± 0%  +0.01%  (p=0.016 n=5+5)
Index/4M-8   665MB/s ± 0%  665MB/s ± 0%    ~     (p=0.833 n=5+5)
Index/64M-8  665MB/s ± 0%  665MB/s ± 0%    ~     (p=0.913 n=5+5)

Change-Id: Icce3bc162bb8613ac36dc963a46c51f8e82ab842
Reviewed-on: https://go-review.googlesource.com/c/go/+/208638
Run-TryBot: eric fang 
TryBot-Result: Gobot Gobot 
Reviewed-by: Ian Lance Taylor 
---
 src/bytes/bytes.go              |  68 +----------------
 src/bytes/bytes_test.go         |  24 +++++-
 src/internal/bytealg/bytealg.go | 125 ++++++++++++++++++++++++++++++++
 src/strings/strings.go          |  68 +----------------
 4 files changed, 156 insertions(+), 129 deletions(-)

diff --git a/src/bytes/bytes.go b/src/bytes/bytes.go
index e872cc2050..e7931387aa 100644
--- a/src/bytes/bytes.go
+++ b/src/bytes/bytes.go
@@ -117,17 +117,17 @@ func LastIndex(s, sep []byte) int {
 		return -1
 	}
 	// Rabin-Karp search from the end of the string
-	hashss, pow := hashStrRev(sep)
+	hashss, pow := bytealg.HashStrRevBytes(sep)
 	last := len(s) - n
 	var h uint32
 	for i := len(s) - 1; i >= last; i-- {
-		h = h*primeRK + uint32(s[i])
+		h = h*bytealg.PrimeRK + uint32(s[i])
 	}
 	if h == hashss && Equal(s[last:], sep) {
 		return last
 	}
 	for i := last - 1; i >= 0; i-- {
-		h *= primeRK
+		h *= bytealg.PrimeRK
 		h += uint32(s[i])
 		h -= pow * uint32(s[i+n])
 		if h == hashss && Equal(s[i:i+n], sep) {
@@ -1068,7 +1068,7 @@ func Index(s, sep []byte) int {
 			// we should cutover at even larger average skips,
 			// because Equal becomes that much more expensive.
 			// This code does not take that effect into account.
-			j := indexRabinKarp(s[i:], sep)
+			j := bytealg.IndexRabinKarpBytes(s[i:], sep)
 			if j < 0 {
 				return -1
 			}
@@ -1077,63 +1077,3 @@ func Index(s, sep []byte) int {
 	}
 	return -1
 }
-
-func indexRabinKarp(s, sep []byte) int {
-	// Rabin-Karp search
-	hashsep, pow := hashStr(sep)
-	n := len(sep)
-	var h uint32
-	for i := 0; i < n; i++ {
-		h = h*primeRK + uint32(s[i])
-	}
-	if h == hashsep && Equal(s[:n], sep) {
-		return 0
-	}
-	for i := n; i < len(s); {
-		h *= primeRK
-		h += uint32(s[i])
-		h -= pow * uint32(s[i-n])
-		i++
-		if h == hashsep && Equal(s[i-n:i], sep) {
-			return i - n
-		}
-	}
-	return -1
-}
-
-// primeRK is the prime base used in Rabin-Karp algorithm.
-const primeRK = 16777619
-
-// hashStr returns the hash and the appropriate multiplicative
-// factor for use in Rabin-Karp algorithm.
-func hashStr(sep []byte) (uint32, uint32) {
-	hash := uint32(0)
-	for i := 0; i < len(sep); i++ {
-		hash = hash*primeRK + uint32(sep[i])
-	}
-	var pow, sq uint32 = 1, primeRK
-	for i := len(sep); i > 0; i >>= 1 {
-		if i&1 != 0 {
-			pow *= sq
-		}
-		sq *= sq
-	}
-	return hash, pow
-}
-
-// hashStrRev returns the hash of the reverse of sep and the
-// appropriate multiplicative factor for use in Rabin-Karp algorithm.
-func hashStrRev(sep []byte) (uint32, uint32) {
-	hash := uint32(0)
-	for i := len(sep) - 1; i >= 0; i-- {
-		hash = hash*primeRK + uint32(sep[i])
-	}
-	var pow, sq uint32 = 1, primeRK
-	for i := len(sep); i > 0; i >>= 1 {
-		if i&1 != 0 {
-			pow *= sq
-		}
-		sq *= sq
-	}
-	return hash, pow
-}
diff --git a/src/bytes/bytes_test.go b/src/bytes/bytes_test.go
index 2dbbb99f37..a208d4ed76 100644
--- a/src/bytes/bytes_test.go
+++ b/src/bytes/bytes_test.go
@@ -141,9 +141,10 @@ var indexTests = []BinOpTest{
 	{"barfoobarfooyyyzzzyyyzzzyyyzzzyyyxxxzzzyyy", "x", 33},
 	{"foofyfoobarfoobar", "y", 4},
 	{"oooooooooooooooooooooo", "r", -1},
-	// test fallback to Rabin-Karp.
 	{"oxoxoxoxoxoxoxoxoxoxoxoy", "oy", 22},
 	{"oxoxoxoxoxoxoxoxoxoxoxox", "oy", -1},
+	// test fallback to Rabin-Karp.
+	{"000000000000000000000000000000000000000000000000000000000000000000000001", "0000000000000000000000000000000000000000000000000000000000000000001", 5},
 }
 
 var lastIndexTests = []BinOpTest{
@@ -209,6 +210,27 @@ func runIndexTests(t *testing.T, f func(s, sep []byte) int, funcName string, tes
 			t.Errorf("%s(%q,%q) = %v; want %v", funcName, a, b, actual, test.i)
 		}
 	}
+	var allocTests = []struct {
+		a []byte
+		b []byte
+		i int
+	}{
+		// case for function Index.
+		{[]byte("000000000000000000000000000000000000000000000000000000000000000000000001"), []byte("0000000000000000000000000000000000000000000000000000000000000000001"), 5},
+		// case for function LastIndex.
+		{[]byte("000000000000000000000000000000000000000000000000000000000000000010000"), []byte("00000000000000000000000000000000000000000000000000000000000001"), 3},
+	}
+	allocs := testing.AllocsPerRun(100, func() {
+		if i := Index(allocTests[1].a, allocTests[1].b); i != allocTests[1].i {
+			t.Errorf("Index([]byte(%q), []byte(%q)) = %v; want %v", allocTests[1].a, allocTests[1].b, i, allocTests[1].i)
+		}
+		if i := LastIndex(allocTests[0].a, allocTests[0].b); i != allocTests[0].i {
+			t.Errorf("LastIndex([]byte(%q), []byte(%q)) = %v; want %v", allocTests[0].a, allocTests[0].b, i, allocTests[0].i)
+		}
+	})
+	if allocs != 0 {
+		t.Errorf("expected no allocations, got %f", allocs)
+	}
 }
 
 func runIndexAnyTests(t *testing.T, f func(s []byte, chars string) int, funcName string, testCases []BinOpTest) {
diff --git a/src/internal/bytealg/bytealg.go b/src/internal/bytealg/bytealg.go
index 9ecd8eb004..4c90cd3671 100644
--- a/src/internal/bytealg/bytealg.go
+++ b/src/internal/bytealg/bytealg.go
@@ -21,3 +21,128 @@ const (
 
 // MaxLen is the maximum length of the string to be searched for (argument b) in Index.
 var MaxLen int
+
+// FIXME: the logic of HashStrBytes, HashStrRevBytes, IndexRabinKarpBytes and HashStr, HashStrRev,
+// IndexRabinKarp are exactly the same, except that the types are different. Can we eliminate
+// three of them without causing allocation?
+
+// PrimeRK is the prime base used in Rabin-Karp algorithm.
+const PrimeRK = 16777619
+
+// HashStrBytes returns the hash and the appropriate multiplicative
+// factor for use in Rabin-Karp algorithm.
+func HashStrBytes(sep []byte) (uint32, uint32) {
+	hash := uint32(0)
+	for i := 0; i < len(sep); i++ {
+		hash = hash*PrimeRK + uint32(sep[i])
+	}
+	var pow, sq uint32 = 1, PrimeRK
+	for i := len(sep); i > 0; i >>= 1 {
+		if i&1 != 0 {
+			pow *= sq
+		}
+		sq *= sq
+	}
+	return hash, pow
+}
+
+// HashStr returns the hash and the appropriate multiplicative
+// factor for use in Rabin-Karp algorithm.
+func HashStr(sep string) (uint32, uint32) {
+	hash := uint32(0)
+	for i := 0; i < len(sep); i++ {
+		hash = hash*PrimeRK + uint32(sep[i])
+	}
+	var pow, sq uint32 = 1, PrimeRK
+	for i := len(sep); i > 0; i >>= 1 {
+		if i&1 != 0 {
+			pow *= sq
+		}
+		sq *= sq
+	}
+	return hash, pow
+}
+
+// HashStrRevBytes returns the hash of the reverse of sep and the
+// appropriate multiplicative factor for use in Rabin-Karp algorithm.
+func HashStrRevBytes(sep []byte) (uint32, uint32) {
+	hash := uint32(0)
+	for i := len(sep) - 1; i >= 0; i-- {
+		hash = hash*PrimeRK + uint32(sep[i])
+	}
+	var pow, sq uint32 = 1, PrimeRK
+	for i := len(sep); i > 0; i >>= 1 {
+		if i&1 != 0 {
+			pow *= sq
+		}
+		sq *= sq
+	}
+	return hash, pow
+}
+
+// HashStrRev returns the hash of the reverse of sep and the
+// appropriate multiplicative factor for use in Rabin-Karp algorithm.
+func HashStrRev(sep string) (uint32, uint32) {
+	hash := uint32(0)
+	for i := len(sep) - 1; i >= 0; i-- {
+		hash = hash*PrimeRK + uint32(sep[i])
+	}
+	var pow, sq uint32 = 1, PrimeRK
+	for i := len(sep); i > 0; i >>= 1 {
+		if i&1 != 0 {
+			pow *= sq
+		}
+		sq *= sq
+	}
+	return hash, pow
+}
+
+// IndexRabinKarpBytes uses the Rabin-Karp search algorithm to return the index of the
+// first occurence of substr in s, or -1 if not present.
+func IndexRabinKarpBytes(s, sep []byte) int {
+	// Rabin-Karp search
+	hashsep, pow := HashStrBytes(sep)
+	n := len(sep)
+	var h uint32
+	for i := 0; i < n; i++ {
+		h = h*PrimeRK + uint32(s[i])
+	}
+	if h == hashsep && Equal(s[:n], sep) {
+		return 0
+	}
+	for i := n; i < len(s); {
+		h *= PrimeRK
+		h += uint32(s[i])
+		h -= pow * uint32(s[i-n])
+		i++
+		if h == hashsep && Equal(s[i-n:i], sep) {
+			return i - n
+		}
+	}
+	return -1
+}
+
+// IndexRabinKarp uses the Rabin-Karp search algorithm to return the index of the
+// first occurence of substr in s, or -1 if not present.
+func IndexRabinKarp(s, substr string) int {
+	// Rabin-Karp search
+	hashss, pow := HashStr(substr)
+	n := len(substr)
+	var h uint32
+	for i := 0; i < n; i++ {
+		h = h*PrimeRK + uint32(s[i])
+	}
+	if h == hashss && s[:n] == substr {
+		return 0
+	}
+	for i := n; i < len(s); {
+		h *= PrimeRK
+		h += uint32(s[i])
+		h -= pow * uint32(s[i-n])
+		i++
+		if h == hashss && s[i-n:i] == substr {
+			return i - n
+		}
+	}
+	return -1
+}
diff --git a/src/strings/strings.go b/src/strings/strings.go
index 238d657f61..7fb05b7d0e 100644
--- a/src/strings/strings.go
+++ b/src/strings/strings.go
@@ -36,43 +36,6 @@ func explode(s string, n int) []string {
 	return a
 }
 
-// primeRK is the prime base used in Rabin-Karp algorithm.
-const primeRK = 16777619
-
-// hashStr returns the hash and the appropriate multiplicative
-// factor for use in Rabin-Karp algorithm.
-func hashStr(sep string) (uint32, uint32) {
-	hash := uint32(0)
-	for i := 0; i < len(sep); i++ {
-		hash = hash*primeRK + uint32(sep[i])
-	}
-	var pow, sq uint32 = 1, primeRK
-	for i := len(sep); i > 0; i >>= 1 {
-		if i&1 != 0 {
-			pow *= sq
-		}
-		sq *= sq
-	}
-	return hash, pow
-}
-
-// hashStrRev returns the hash of the reverse of sep and the
-// appropriate multiplicative factor for use in Rabin-Karp algorithm.
-func hashStrRev(sep string) (uint32, uint32) {
-	hash := uint32(0)
-	for i := len(sep) - 1; i >= 0; i-- {
-		hash = hash*primeRK + uint32(sep[i])
-	}
-	var pow, sq uint32 = 1, primeRK
-	for i := len(sep); i > 0; i >>= 1 {
-		if i&1 != 0 {
-			pow *= sq
-		}
-		sq *= sq
-	}
-	return hash, pow
-}
-
 // Count counts the number of non-overlapping instances of substr in s.
 // If substr is an empty string, Count returns 1 + the number of Unicode code points in s.
 func Count(s, substr string) int {
@@ -126,17 +89,17 @@ func LastIndex(s, substr string) int {
 		return -1
 	}
 	// Rabin-Karp search from the end of the string
-	hashss, pow := hashStrRev(substr)
+	hashss, pow := bytealg.HashStrRev(substr)
 	last := len(s) - n
 	var h uint32
 	for i := len(s) - 1; i >= last; i-- {
-		h = h*primeRK + uint32(s[i])
+		h = h*bytealg.PrimeRK + uint32(s[i])
 	}
 	if h == hashss && s[last:] == substr {
 		return last
 	}
 	for i := last - 1; i >= 0; i-- {
-		h *= primeRK
+		h *= bytealg.PrimeRK
 		h += uint32(s[i])
 		h -= pow * uint32(s[i+n])
 		if h == hashss && s[i:i+n] == substr {
@@ -1095,7 +1058,7 @@ func Index(s, substr string) int {
 		fails++
 		if fails >= 4+i>>4 && i < t {
 			// See comment in ../bytes/bytes.go.
-			j := indexRabinKarp(s[i:], substr)
+			j := bytealg.IndexRabinKarp(s[i:], substr)
 			if j < 0 {
 				return -1
 			}
@@ -1104,26 +1067,3 @@ func Index(s, substr string) int {
 	}
 	return -1
 }
-
-func indexRabinKarp(s, substr string) int {
-	// Rabin-Karp search
-	hashss, pow := hashStr(substr)
-	n := len(substr)
-	var h uint32
-	for i := 0; i < n; i++ {
-		h = h*primeRK + uint32(s[i])
-	}
-	if h == hashss && s[:n] == substr {
-		return 0
-	}
-	for i := n; i < len(s); {
-		h *= primeRK
-		h += uint32(s[i])
-		h -= pow * uint32(s[i-n])
-		i++
-		if h == hashss && s[i-n:i] == substr {
-			return i - n
-		}
-	}
-	return -1
-}

From 588ee7987d7f6be605166872ff8c478aa125bc58 Mon Sep 17 00:00:00 2001
From: Cherry Zhang 
Date: Wed, 4 Mar 2020 11:14:53 -0500
Subject: [PATCH 55/69] runtime: don't save/restore FP registers in softfloat
 mode on MIPS(64)

Fixes #37653.

Change-Id: I6188e44b4bc4aba7b56f29d9ce9de4618c70fd7b
Reviewed-on: https://go-review.googlesource.com/c/go/+/222057
Run-TryBot: Cherry Zhang 
Reviewed-by: Keith Randall 
TryBot-Result: Gobot Gobot 
---
 src/runtime/mkpreempt.go      | 24 +++++++++++++++++-------
 src/runtime/preempt_mips64x.s |  4 ++++
 src/runtime/preempt_mipsx.s   |  4 ++++
 3 files changed, 25 insertions(+), 7 deletions(-)

diff --git a/src/runtime/mkpreempt.go b/src/runtime/mkpreempt.go
index c26406e55f..35ed42871f 100644
--- a/src/runtime/mkpreempt.go
+++ b/src/runtime/mkpreempt.go
@@ -382,6 +382,7 @@ func genMIPS(_64bit bool) {
 	sub := "SUB"
 	r28 := "R28"
 	regsize := 4
+	softfloat := "GOMIPS_softfloat"
 	if _64bit {
 		mov = "MOVV"
 		movf = "MOVD"
@@ -389,6 +390,7 @@ func genMIPS(_64bit bool) {
 		sub = "SUBV"
 		r28 = "RSB"
 		regsize = 8
+		softfloat = "GOMIPS64_softfloat"
 	}
 
 	// Add integer registers R1-R22, R24-R25, R28
@@ -411,28 +413,36 @@ func genMIPS(_64bit bool) {
 		mov+" LO, R1\n"+mov+" R1, %d(R29)",
 		mov+" %d(R29), R1\n"+mov+" R1, LO",
 		regsize)
+
 	// Add floating point control/status register FCR31 (FCR0-FCR30 are irrelevant)
-	l.addSpecial(
+	var lfp = layout{sp: "R29", stack: l.stack}
+	lfp.addSpecial(
 		mov+" FCR31, R1\n"+mov+" R1, %d(R29)",
 		mov+" %d(R29), R1\n"+mov+" R1, FCR31",
 		regsize)
 	// Add floating point registers F0-F31.
 	for i := 0; i <= 31; i++ {
 		reg := fmt.Sprintf("F%d", i)
-		l.add(movf, reg, regsize)
+		lfp.add(movf, reg, regsize)
 	}
 
 	// allocate frame, save PC of interrupted instruction (in LR)
-	p(mov+" R31, -%d(R29)", l.stack)
-	p(sub+" $%d, R29", l.stack)
+	p(mov+" R31, -%d(R29)", lfp.stack)
+	p(sub+" $%d, R29", lfp.stack)
 
 	l.save()
+	p("#ifndef %s", softfloat)
+	lfp.save()
+	p("#endif")
 	p("CALL ·asyncPreempt2(SB)")
+	p("#ifndef %s", softfloat)
+	lfp.restore()
+	p("#endif")
 	l.restore()
 
-	p(mov+" %d(R29), R31", l.stack)     // sigctxt.pushCall has pushed LR (at interrupt) on stack, restore it
-	p(mov + " (R29), R23")              // load PC to REGTMP
-	p(add+" $%d, R29", l.stack+regsize) // pop frame (including the space pushed by sigctxt.pushCall)
+	p(mov+" %d(R29), R31", lfp.stack)     // sigctxt.pushCall has pushed LR (at interrupt) on stack, restore it
+	p(mov + " (R29), R23")                // load PC to REGTMP
+	p(add+" $%d, R29", lfp.stack+regsize) // pop frame (including the space pushed by sigctxt.pushCall)
 	p("JMP (R23)")
 }
 
diff --git a/src/runtime/preempt_mips64x.s b/src/runtime/preempt_mips64x.s
index 8048a87cd3..1e123e8077 100644
--- a/src/runtime/preempt_mips64x.s
+++ b/src/runtime/preempt_mips64x.s
@@ -37,6 +37,7 @@ TEXT ·asyncPreempt(SB),NOSPLIT|NOFRAME,$0-0
 	MOVV R1, 208(R29)
 	MOVV LO, R1
 	MOVV R1, 216(R29)
+	#ifndef GOMIPS64_softfloat
 	MOVV FCR31, R1
 	MOVV R1, 224(R29)
 	MOVD F0, 232(R29)
@@ -71,7 +72,9 @@ TEXT ·asyncPreempt(SB),NOSPLIT|NOFRAME,$0-0
 	MOVD F29, 464(R29)
 	MOVD F30, 472(R29)
 	MOVD F31, 480(R29)
+	#endif
 	CALL ·asyncPreempt2(SB)
+	#ifndef GOMIPS64_softfloat
 	MOVD 480(R29), F31
 	MOVD 472(R29), F30
 	MOVD 464(R29), F29
@@ -106,6 +109,7 @@ TEXT ·asyncPreempt(SB),NOSPLIT|NOFRAME,$0-0
 	MOVD 232(R29), F0
 	MOVV 224(R29), R1
 	MOVV R1, FCR31
+	#endif
 	MOVV 216(R29), R1
 	MOVV R1, LO
 	MOVV 208(R29), R1
diff --git a/src/runtime/preempt_mipsx.s b/src/runtime/preempt_mipsx.s
index 840e861497..afac33e0a0 100644
--- a/src/runtime/preempt_mipsx.s
+++ b/src/runtime/preempt_mipsx.s
@@ -37,6 +37,7 @@ TEXT ·asyncPreempt(SB),NOSPLIT|NOFRAME,$0-0
 	MOVW R1, 104(R29)
 	MOVW LO, R1
 	MOVW R1, 108(R29)
+	#ifndef GOMIPS_softfloat
 	MOVW FCR31, R1
 	MOVW R1, 112(R29)
 	MOVF F0, 116(R29)
@@ -71,7 +72,9 @@ TEXT ·asyncPreempt(SB),NOSPLIT|NOFRAME,$0-0
 	MOVF F29, 232(R29)
 	MOVF F30, 236(R29)
 	MOVF F31, 240(R29)
+	#endif
 	CALL ·asyncPreempt2(SB)
+	#ifndef GOMIPS_softfloat
 	MOVF 240(R29), F31
 	MOVF 236(R29), F30
 	MOVF 232(R29), F29
@@ -106,6 +109,7 @@ TEXT ·asyncPreempt(SB),NOSPLIT|NOFRAME,$0-0
 	MOVF 116(R29), F0
 	MOVW 112(R29), R1
 	MOVW R1, FCR31
+	#endif
 	MOVW 108(R29), R1
 	MOVW R1, LO
 	MOVW 104(R29), R1

From 68fea523fda227ca5fe7a1eadb7542be4b0a840c Mon Sep 17 00:00:00 2001
From: Meng Zhuo 
Date: Tue, 3 Mar 2020 23:05:32 +0800
Subject: [PATCH 56/69] cmd/asm: add MIPS MSA LD/ST/LDI support for mips64x

This CL adding primitive asm support of MIPS MSA by introducing
new sets of register W0-W31 (C_WREG) and 12 new instructions:

* VMOV{B,H,W,D} ADDCONST, WREG  (Vector load immediate)
* VMOV{B,H,W,D} SOREG, WREG     (Vector load)
* VMOV{B,H,W,D} WREG, SOREG     (Vector store)

Ref: MIPS Architecture for Programmers Volume IV-j: The MIPS64 SIMD Architecture Module

Change-Id: I3362c59a73c82c94769c18a19a0bee7e5029217d
Reviewed-on: https://go-review.googlesource.com/c/go/+/215723
Run-TryBot: Meng Zhuo 
TryBot-Result: Gobot Gobot 
Reviewed-by: Cherry Zhang 
---
 src/cmd/asm/internal/arch/arch.go          |  4 ++
 src/cmd/asm/internal/arch/mips.go          |  4 ++
 src/cmd/asm/internal/asm/testdata/mips64.s | 33 ++++++++++
 src/cmd/internal/obj/mips/a.out.go         | 48 ++++++++++++++
 src/cmd/internal/obj/mips/anames.go        |  4 ++
 src/cmd/internal/obj/mips/anames0.go       |  1 +
 src/cmd/internal/obj/mips/asm0.go          | 74 ++++++++++++++++++++++
 src/cmd/internal/obj/mips/list0.go         |  3 +
 8 files changed, 171 insertions(+)

diff --git a/src/cmd/asm/internal/arch/arch.go b/src/cmd/asm/internal/arch/arch.go
index f090d12bed..d9ba6670e8 100644
--- a/src/cmd/asm/internal/arch/arch.go
+++ b/src/cmd/asm/internal/arch/arch.go
@@ -484,6 +484,9 @@ func archMips64(linkArch *obj.LinkArch) *Arch {
 	for i := mips.REG_FCR0; i <= mips.REG_FCR31; i++ {
 		register[obj.Rconv(i)] = int16(i)
 	}
+	for i := mips.REG_W0; i <= mips.REG_W31; i++ {
+		register[obj.Rconv(i)] = int16(i)
+	}
 	register["HI"] = mips.REG_HI
 	register["LO"] = mips.REG_LO
 	// Pseudo-registers.
@@ -501,6 +504,7 @@ func archMips64(linkArch *obj.LinkArch) *Arch {
 		"FCR": true,
 		"M":   true,
 		"R":   true,
+		"W":   true,
 	}
 
 	instructions := make(map[string]obj.As)
diff --git a/src/cmd/asm/internal/arch/mips.go b/src/cmd/asm/internal/arch/mips.go
index 79fb7cf02e..5d71f40fbe 100644
--- a/src/cmd/asm/internal/arch/mips.go
+++ b/src/cmd/asm/internal/arch/mips.go
@@ -63,6 +63,10 @@ func mipsRegisterNumber(name string, n int16) (int16, bool) {
 		if 0 <= n && n <= 31 {
 			return mips.REG_R0 + n, true
 		}
+	case "W":
+		if 0 <= n && n <= 31 {
+			return mips.REG_W0 + n, true
+		}
 	}
 	return 0, false
 }
diff --git a/src/cmd/asm/internal/asm/testdata/mips64.s b/src/cmd/asm/internal/asm/testdata/mips64.s
index 2a8c288d7b..21ab82f319 100644
--- a/src/cmd/asm/internal/asm/testdata/mips64.s
+++ b/src/cmd/asm/internal/asm/testdata/mips64.s
@@ -583,6 +583,39 @@ label4:
 	NEGV	R1, R2 // 0001102f
 	RET
 
+// MSA VMOVI
+	VMOVB	$511, W0   // 7b0ff807
+	VMOVH	$24, W23   // 7b20c5c7
+	VMOVW	$-24, W15  // 7b5f43c7
+	VMOVD	$-511, W31 // 7b700fc7
+
+	VMOVB	(R0), W8       // 78000220
+	VMOVB	511(R3), W0    // 79ff1820
+	VMOVB	-512(R12), W21 // 7a006560
+	VMOVH	(R24), W12     // 7800c321
+	VMOVH	110(R19), W8   // 78379a21
+	VMOVH	-70(R12), W3   // 7bdd60e1
+	VMOVW	(R3), W31      // 78001fe2
+	VMOVW	64(R20), W16   // 7810a422
+	VMOVW	-104(R17), W24 // 7be68e22
+	VMOVD	(R3), W2       // 780018a3
+	VMOVD	128(R23), W19  // 7810bce3
+	VMOVD	-256(R31), W0  // 7be0f823
+
+	VMOVB	W8, (R0)       // 78000224
+	VMOVB	W0, 511(R3)    // 79ff1824
+	VMOVB	W21, -512(R12) // 7a006564
+	VMOVH	W12, (R24)     // 7800c325
+	VMOVH	W8, 110(R19)   // 78379a25
+	VMOVH	W3, -70(R12)   // 7bdd60e5
+	VMOVW	W31, (R3)      // 78001fe6
+	VMOVW	W16, 64(R20)   // 7810a426
+	VMOVW	W24, -104(R17) // 7be68e26
+	VMOVD	W2, (R3)       // 780018a7
+	VMOVD	W19, 128(R23)  // 7810bce7
+	VMOVD	W0, -256(R31)  // 7be0f827
+	RET
+
 // END
 //
 //	LEND	comma // asm doesn't support the trailing comma.
diff --git a/src/cmd/internal/obj/mips/a.out.go b/src/cmd/internal/obj/mips/a.out.go
index b0205ec11a..ddd048a17f 100644
--- a/src/cmd/internal/obj/mips/a.out.go
+++ b/src/cmd/internal/obj/mips/a.out.go
@@ -43,6 +43,7 @@ const (
 	NSYM   = 50
 	NREG   = 32 /* number of general registers */
 	NFREG  = 32 /* number of floating point registers */
+	NWREG  = 32 /* number of MSA registers */
 )
 
 const (
@@ -180,6 +181,41 @@ const (
 	REG_FCR30
 	REG_FCR31
 
+	// MSA registers
+	// The lower bits of W registers are alias to F registers
+	REG_W0 // must be a multiple of 32
+	REG_W1
+	REG_W2
+	REG_W3
+	REG_W4
+	REG_W5
+	REG_W6
+	REG_W7
+	REG_W8
+	REG_W9
+	REG_W10
+	REG_W11
+	REG_W12
+	REG_W13
+	REG_W14
+	REG_W15
+	REG_W16
+	REG_W17
+	REG_W18
+	REG_W19
+	REG_W20
+	REG_W21
+	REG_W22
+	REG_W23
+	REG_W24
+	REG_W25
+	REG_W26
+	REG_W27
+	REG_W28
+	REG_W29
+	REG_W30
+	REG_W31
+
 	REG_HI
 	REG_LO
 
@@ -217,6 +253,8 @@ func init() {
 	f(REG_F0, REG_F31, 32) // For 32-bit MIPS, compiler only uses even numbered registers --  see cmd/compile/internal/ssa/gen/MIPSOps.go
 	MIPSDWARFRegisters[REG_HI] = 64
 	MIPSDWARFRegisters[REG_LO] = 65
+	// The lower bits of W registers are alias to F registers
+	f(REG_W0, REG_W31, 32)
 }
 
 const (
@@ -243,6 +281,7 @@ const (
 	C_FREG
 	C_FCREG
 	C_MREG /* special processor register */
+	C_WREG /* MSA registers */
 	C_HI
 	C_LO
 	C_ZCON
@@ -405,6 +444,12 @@ const (
 	AMOVVF
 	AMOVVD
 
+	/* MSA */
+	AVMOVB
+	AVMOVH
+	AVMOVW
+	AVMOVD
+
 	ALAST
 
 	// aliases
@@ -430,4 +475,7 @@ func init() {
 	if REG_FCR0%32 != 0 {
 		panic("REG_FCR0 is not a multiple of 32")
 	}
+	if REG_W0%32 != 0 {
+		panic("REG_W0 is not a multiple of 32")
+	}
 }
diff --git a/src/cmd/internal/obj/mips/anames.go b/src/cmd/internal/obj/mips/anames.go
index 9a2e4f5703..2a44e4ca70 100644
--- a/src/cmd/internal/obj/mips/anames.go
+++ b/src/cmd/internal/obj/mips/anames.go
@@ -127,5 +127,9 @@ var Anames = []string{
 	"MOVDV",
 	"MOVVF",
 	"MOVVD",
+	"VMOVB",
+	"VMOVH",
+	"VMOVW",
+	"VMOVD",
 	"LAST",
 }
diff --git a/src/cmd/internal/obj/mips/anames0.go b/src/cmd/internal/obj/mips/anames0.go
index c56d34eaf5..c300696730 100644
--- a/src/cmd/internal/obj/mips/anames0.go
+++ b/src/cmd/internal/obj/mips/anames0.go
@@ -10,6 +10,7 @@ var cnames0 = []string{
 	"FREG",
 	"FCREG",
 	"MREG",
+	"WREG",
 	"HI",
 	"LO",
 	"ZCON",
diff --git a/src/cmd/internal/obj/mips/asm0.go b/src/cmd/internal/obj/mips/asm0.go
index 934f88a0b1..c19541522f 100644
--- a/src/cmd/internal/obj/mips/asm0.go
+++ b/src/cmd/internal/obj/mips/asm0.go
@@ -377,6 +377,11 @@ var optab = []Optab{
 	{ATEQ, C_SCON, C_NONE, C_REG, 15, 4, 0, 0, 0},
 	{ACMOVT, C_REG, C_NONE, C_REG, 17, 4, 0, 0, 0},
 
+	{AVMOVB, C_SCON, C_NONE, C_WREG, 56, 4, 0, sys.MIPS64, 0},
+	{AVMOVB, C_ADDCON, C_NONE, C_WREG, 56, 4, 0, sys.MIPS64, 0},
+	{AVMOVB, C_SOREG, C_NONE, C_WREG, 57, 4, 0, sys.MIPS64, 0},
+	{AVMOVB, C_WREG, C_NONE, C_SOREG, 58, 4, 0, sys.MIPS64, 0},
+
 	{ABREAK, C_REG, C_NONE, C_SEXT, 7, 4, REGSB, sys.MIPS64, 0}, /* really CACHE instruction */
 	{ABREAK, C_REG, C_NONE, C_SAUTO, 7, 4, REGSP, sys.MIPS64, 0},
 	{ABREAK, C_REG, C_NONE, C_SOREG, 7, 4, REGZERO, sys.MIPS64, 0},
@@ -556,6 +561,9 @@ func (c *ctxt0) aclass(a *obj.Addr) int {
 		if REG_FCR0 <= a.Reg && a.Reg <= REG_FCR31 {
 			return C_FCREG
 		}
+		if REG_W0 <= a.Reg && a.Reg <= REG_W31 {
+			return C_WREG
+		}
 		if a.Reg == REG_LO {
 			return C_LO
 		}
@@ -1029,6 +1037,11 @@ func buildop(ctxt *obj.Link) {
 		case AMOVVL:
 			opset(AMOVVR, r0)
 
+		case AVMOVB:
+			opset(AVMOVH, r0)
+			opset(AVMOVW, r0)
+			opset(AVMOVD, r0)
+
 		case AMOVW,
 			AMOVD,
 			AMOVF,
@@ -1121,6 +1134,14 @@ func OP_JMP(op uint32, i uint32) uint32 {
 	return op | i&0x3FFFFFF
 }
 
+func OP_VI10(op uint32, df uint32, s10 int32, wd uint32, minor uint32) uint32 {
+	return 0x1e<<26 | (op&7)<<23 | (df&3)<<21 | uint32(s10&0x3FF)<<11 | (wd&31)<<6 | minor&0x3F
+}
+
+func OP_VMI10(s10 int32, rs uint32, wd uint32, minor uint32, df uint32) uint32 {
+	return 0x1e<<26 | uint32(s10&0x3FF)<<16 | (rs&31)<<11 | (wd&31)<<6 | (minor&15)<<2 | df&3
+}
+
 func (c *ctxt0) asmout(p *obj.Prog, o *Optab, out []uint32) {
 	o1 := uint32(0)
 	o2 := uint32(0)
@@ -1629,6 +1650,19 @@ func (c *ctxt0) asmout(p *obj.Prog, o *Optab, out []uint32) {
 		rel.Sym = p.From.Sym
 		rel.Add = p.From.Offset
 		rel.Type = objabi.R_ADDRMIPSTLS
+
+	case 56: /* vmov{b,h,w,d} $scon, wr */
+
+		v := c.regoff(&p.From)
+		o1 = OP_VI10(110, c.twobitdf(p.As), v, uint32(p.To.Reg), 7)
+
+	case 57: /* vld $soreg, wr */
+		v := c.lsoffset(p.As, c.regoff(&p.From))
+		o1 = OP_VMI10(v, uint32(p.From.Reg), uint32(p.To.Reg), 8, c.twobitdf(p.As))
+
+	case 58: /* vst wr, $soreg */
+		v := c.lsoffset(p.As, c.regoff(&p.To))
+		o1 = OP_VMI10(v, uint32(p.To.Reg), uint32(p.From.Reg), 9, c.twobitdf(p.As))
 	}
 
 	out[0] = o1
@@ -2009,3 +2043,43 @@ func vshift(a obj.As) bool {
 	}
 	return false
 }
+
+// MSA Two-bit Data Format Field Encoding
+func (c *ctxt0) twobitdf(a obj.As) uint32 {
+	switch a {
+	case AVMOVB:
+		return 0
+	case AVMOVH:
+		return 1
+	case AVMOVW:
+		return 2
+	case AVMOVD:
+		return 3
+	default:
+		c.ctxt.Diag("unsupported data format %v", a)
+	}
+	return 0
+}
+
+// MSA Load/Store offset have to be multiple of size of data format
+func (c *ctxt0) lsoffset(a obj.As, o int32) int32 {
+	var mod int32
+	switch a {
+	case AVMOVB:
+		mod = 1
+	case AVMOVH:
+		mod = 2
+	case AVMOVW:
+		mod = 4
+	case AVMOVD:
+		mod = 8
+	default:
+		c.ctxt.Diag("unsupported instruction:%v", a)
+	}
+
+	if o%mod != 0 {
+		c.ctxt.Diag("invalid offset for %v: %d is not a multiple of %d", a, o, mod)
+	}
+
+	return o / mod
+}
diff --git a/src/cmd/internal/obj/mips/list0.go b/src/cmd/internal/obj/mips/list0.go
index addf9f70d8..f734e21ede 100644
--- a/src/cmd/internal/obj/mips/list0.go
+++ b/src/cmd/internal/obj/mips/list0.go
@@ -59,6 +59,9 @@ func rconv(r int) string {
 	if REG_FCR0 <= r && r <= REG_FCR31 {
 		return fmt.Sprintf("FCR%d", r-REG_FCR0)
 	}
+	if REG_W0 <= r && r <= REG_W31 {
+		return fmt.Sprintf("W%d", r-REG_W0)
+	}
 	if r == REG_HI {
 		return "HI"
 	}

From c55a50edb9454dbdaca165be4b030a1e0cfbaa19 Mon Sep 17 00:00:00 2001
From: "Bryan C. Mills" 
Date: Fri, 13 Dec 2019 15:42:24 -0500
Subject: [PATCH 57/69] cmd/go: invalidate cached test results when the
 -timeout flag changes

Fixes #36134

Change-Id: Icc5e1269696db778ba5c1e6bebed9969b8841c81
Reviewed-on: https://go-review.googlesource.com/c/go/+/220365
Run-TryBot: Bryan C. Mills 
TryBot-Result: Gobot Gobot 
Reviewed-by: Jay Conrod 
Reviewed-by: Michael Matloob 
---
 doc/go1.15.html                               |  8 +++++++
 src/cmd/go/go_test.go                         | 24 -------------------
 src/cmd/go/internal/test/test.go              |  5 +---
 .../go/testdata/script/test_cache_inputs.txt  | 21 ++++++++++++++++
 4 files changed, 30 insertions(+), 28 deletions(-)

diff --git a/doc/go1.15.html b/doc/go1.15.html
index 9cc576e4be..b4319874c9 100644
--- a/doc/go1.15.html
+++ b/doc/go1.15.html
@@ -47,6 +47,14 @@ TODO
 TODO
 

+

go test

+ +

+ Changing the -timeout flag now invalidates cached test results. A + cached result for a test run with a long timeout will no longer count as + passing when go test is re-invoked with a short one. +

+

Flag parsing

diff --git a/src/cmd/go/go_test.go b/src/cmd/go/go_test.go index 6654bd3143..a5b0f0898b 100644 --- a/src/cmd/go/go_test.go +++ b/src/cmd/go/go_test.go @@ -2431,30 +2431,6 @@ func TestTestCache(t *testing.T) { tg.setenv("GOPATH", tg.tempdir) tg.setenv("GOCACHE", tg.path("cache")) - if runtime.Compiler != "gccgo" { - // timeout here should not affect result being cached - // or being retrieved later. - tg.run("test", "-x", "-timeout=10s", "errors") - tg.grepStderr(`[\\/]compile|gccgo`, "did not run compiler") - tg.grepStderr(`[\\/]link|gccgo`, "did not run linker") - tg.grepStderr(`errors\.test`, "did not run test") - - tg.run("test", "-x", "errors") - tg.grepStdout(`ok \terrors\t\(cached\)`, "did not report cached result") - tg.grepStderrNot(`[\\/]compile|gccgo`, "incorrectly ran compiler") - tg.grepStderrNot(`[\\/]link|gccgo`, "incorrectly ran linker") - tg.grepStderrNot(`errors\.test`, "incorrectly ran test") - tg.grepStderrNot("DO NOT USE", "poisoned action status leaked") - - // Even very low timeouts do not disqualify cached entries. - tg.run("test", "-timeout=1ns", "-x", "errors") - tg.grepStderrNot(`errors\.test`, "incorrectly ran test") - - tg.run("clean", "-testcache") - tg.run("test", "-x", "errors") - tg.grepStderr(`errors\.test`, "did not run test") - } - // The -p=1 in the commands below just makes the -x output easier to read. t.Log("\n\nINITIAL\n\n") diff --git a/src/cmd/go/internal/test/test.go b/src/cmd/go/internal/test/test.go index 600f76df4c..1c6fb0b97f 100644 --- a/src/cmd/go/internal/test/test.go +++ b/src/cmd/go/internal/test/test.go @@ -1291,16 +1291,13 @@ func (c *runCache) tryCacheWithID(b *work.Builder, a *work.Action, id string) bo "-test.parallel", "-test.run", "-test.short", + "-test.timeout", "-test.v": // These are cacheable. // Note that this list is documented above, // so if you add to this list, update the docs too. cacheArgs = append(cacheArgs, arg) - case "-test.timeout": - // Special case: this is cacheable but ignored during the hash. - // Do not add to cacheArgs. - default: // nothing else is cacheable if cache.DebugTest { diff --git a/src/cmd/go/testdata/script/test_cache_inputs.txt b/src/cmd/go/testdata/script/test_cache_inputs.txt index 46faca0f42..57602e91dc 100644 --- a/src/cmd/go/testdata/script/test_cache_inputs.txt +++ b/src/cmd/go/testdata/script/test_cache_inputs.txt @@ -29,6 +29,23 @@ go test testcache -run=TestLookupEnv go test testcache -run=TestLookupEnv stdout '\(cached\)' +# Changes in arguments forwarded to the test should invalidate cached test +# results. +go test testcache -run=TestOSArgs -v hello +! stdout '\(cached\)' +stdout 'hello' +go test testcache -run=TestOSArgs -v goodbye +! stdout '\(cached\)' +stdout 'goodbye' + +# golang.org/issue/36134: that includes the `-timeout` argument. +go test testcache -run=TestOSArgs -timeout=20m -v +! stdout '\(cached\)' +stdout '-test\.timeout[= ]20m' +go test testcache -run=TestOSArgs -timeout=5s -v +! stdout '\(cached\)' +stdout '-test\.timeout[= ]5s' + # If the test stats a file, changes to the file should invalidate the cache. go test testcache -run=FileSize go test testcache -run=FileSize @@ -207,6 +224,10 @@ func TestExternalFile(t *testing.T) { t.Fatal(err) } } + +func TestOSArgs(t *testing.T) { + t.Log(os.Args) +} -- mkold.go -- package main From 19ed0d993cf7b0df804c4c2e96dc674da4059e03 Mon Sep 17 00:00:00 2001 From: Diogo Pinela Date: Tue, 3 Mar 2020 21:03:40 +0000 Subject: [PATCH 58/69] cmd/compile: use staticuint64s instead of staticbytes MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit There are still two places in src/runtime/string.go that use staticbytes, so we cannot delete it just yet. There is a new codegen test to verify that the index calculation is constant-folded, at least on amd64. ppc64, mips[64] and s390x cannot currently do that. There is also a new runtime benchmark to ensure that this does not slow down performance (tested against parent commit): name old time/op new time/op delta ConvT2EByteSized/bool-4 1.07ns ± 1% 1.07ns ± 1% ~ (p=0.060 n=14+15) ConvT2EByteSized/uint8-4 1.06ns ± 1% 1.07ns ± 1% ~ (p=0.095 n=14+15) Updates #37612 Change-Id: I5ec30738edaa48cda78dfab4a78e24a32fa7fd6a Reviewed-on: https://go-review.googlesource.com/c/go/+/221957 Run-TryBot: Ian Lance Taylor TryBot-Result: Gobot Gobot Reviewed-by: Josh Bleecher Snyder --- src/cmd/compile/internal/gc/go.go | 2 +- src/cmd/compile/internal/gc/walk.go | 37 ++++++++++++++++++++--------- src/runtime/iface_test.go | 14 +++++++++++ test/codegen/smallintiface.go | 22 +++++++++++++++++ 4 files changed, 63 insertions(+), 12 deletions(-) create mode 100644 test/codegen/smallintiface.go diff --git a/src/cmd/compile/internal/gc/go.go b/src/cmd/compile/internal/gc/go.go index 50b866ca65..85c857c214 100644 --- a/src/cmd/compile/internal/gc/go.go +++ b/src/cmd/compile/internal/gc/go.go @@ -279,7 +279,7 @@ type Arch struct { var thearch Arch var ( - staticbytes, + staticuint64s, zerobase *Node assertE2I, diff --git a/src/cmd/compile/internal/gc/walk.go b/src/cmd/compile/internal/gc/walk.go index d468f241f9..14af03f58c 100644 --- a/src/cmd/compile/internal/gc/walk.go +++ b/src/cmd/compile/internal/gc/walk.go @@ -837,10 +837,12 @@ opswitch: break } - if staticbytes == nil { - staticbytes = newname(Runtimepkg.Lookup("staticbytes")) - staticbytes.SetClass(PEXTERN) - staticbytes.Type = types.NewArray(types.Types[TUINT8], 256) + if staticuint64s == nil { + staticuint64s = newname(Runtimepkg.Lookup("staticuint64s")) + staticuint64s.SetClass(PEXTERN) + // The actual type is [256]uint64, but we use [256*8]uint8 so we can address + // individual bytes. + staticuint64s.Type = types.NewArray(types.Types[TUINT8], 256*8) zerobase = newname(Runtimepkg.Lookup("zerobase")) zerobase.SetClass(PEXTERN) zerobase.Type = types.Types[TUINTPTR] @@ -856,9 +858,16 @@ opswitch: cheapexpr(n.Left, init) // Evaluate n.Left for side-effects. See issue 19246. value = zerobase case fromType.IsBoolean() || (fromType.Size() == 1 && fromType.IsInteger()): - // n.Left is a bool/byte. Use staticbytes[n.Left]. + // n.Left is a bool/byte. Use staticuint64s[n.Left * 8] on little-endian + // and staticuint64s[n.Left * 8 + 7] on big-endian. n.Left = cheapexpr(n.Left, init) - value = nod(OINDEX, staticbytes, byteindex(n.Left)) + // byteindex widens n.Left so that the multiplication doesn't overflow. + index := nod(OLSH, byteindex(n.Left), nodintconst(3)) + index.SetBounded(true) + if thearch.LinkArch.ByteOrder == binary.BigEndian { + index = nod(OADD, index, nodintconst(7)) + } + value = nod(OINDEX, staticuint64s, index) value.SetBounded(true) case n.Left.Class() == PEXTERN && n.Left.Name != nil && n.Left.Name.Readonly(): // n.Left is a readonly global; use it directly. @@ -2423,15 +2432,21 @@ func convnop(n *Node, t *types.Type) *Node { return n } -// byteindex converts n, which is byte-sized, to a uint8. -// We cannot use conv, because we allow converting bool to uint8 here, +// byteindex converts n, which is byte-sized, to an int used to index into an array. +// We cannot use conv, because we allow converting bool to int here, // which is forbidden in user code. func byteindex(n *Node) *Node { - if types.Identical(n.Type, types.Types[TUINT8]) { - return n + // We cannot convert from bool to int directly. + // While converting from int8 to int is possible, it would yield + // the wrong result for negative values. + // Reinterpreting the value as an unsigned byte solves both cases. + if !types.Identical(n.Type, types.Types[TUINT8]) { + n = nod(OCONV, n, nil) + n.Type = types.Types[TUINT8] + n.SetTypecheck(1) } n = nod(OCONV, n, nil) - n.Type = types.Types[TUINT8] + n.Type = types.Types[TINT] n.SetTypecheck(1) return n } diff --git a/src/runtime/iface_test.go b/src/runtime/iface_test.go index 73beebffe2..4fab6c968a 100644 --- a/src/runtime/iface_test.go +++ b/src/runtime/iface_test.go @@ -95,6 +95,19 @@ func BenchmarkNeIfaceConcrete(b *testing.B) { } } +func BenchmarkConvT2EByteSized(b *testing.B) { + b.Run("bool", func(b *testing.B) { + for i := 0; i < b.N; i++ { + e = yes + } + }) + b.Run("uint8", func(b *testing.B) { + for i := 0; i < b.N; i++ { + e = eight8 + } + }) +} + func BenchmarkConvT2ESmall(b *testing.B) { for i := 0; i < b.N; i++ { e = ts @@ -310,6 +323,7 @@ func TestZeroConvT2x(t *testing.T) { var ( eight8 uint8 = 8 eight8I T8 = 8 + yes bool = true zero16 uint16 = 0 zero16I T16 = 0 diff --git a/test/codegen/smallintiface.go b/test/codegen/smallintiface.go new file mode 100644 index 0000000000..0207a0af79 --- /dev/null +++ b/test/codegen/smallintiface.go @@ -0,0 +1,22 @@ +// asmcheck + +package codegen + +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +func booliface() interface{} { + // amd64:`LEAQ\truntime.staticuint64s\+8\(SB\)` + return true +} + +func smallint8iface() interface{} { + // amd64:`LEAQ\truntime.staticuint64s\+2024\(SB\)` + return int8(-3) +} + +func smalluint8iface() interface{} { + // amd64:`LEAQ\truntime.staticuint64s\+24\(SB\)` + return uint8(3) +} From 17e6252c051e09a3a433620182fc54952a402d73 Mon Sep 17 00:00:00 2001 From: Robert Griesemer Date: Mon, 10 Feb 2020 21:32:04 -0800 Subject: [PATCH 59/69] cmd/compile/internal/syntax: improved scanner tests This is one of several changes that were part of a larger rewrite which I made in early 2019 after switching to the new number literal syntax implementation. The purpose of the rewrite was to simplify reading of source code (Unicode character by character) and speed up the scanner but was never submitted for review due to other priorities. Part 1 of 3: This change contains improvements to the scanner tests. Change-Id: Iecfcaef00fdeb690b0db786edbd52e828417141b Reviewed-on: https://go-review.googlesource.com/c/go/+/221601 Reviewed-by: Matthew Dempsky --- .../compile/internal/syntax/scanner_test.go | 151 +++++++++++------- 1 file changed, 97 insertions(+), 54 deletions(-) diff --git a/src/cmd/compile/internal/syntax/scanner_test.go b/src/cmd/compile/internal/syntax/scanner_test.go index d76231a4af..612c59507e 100644 --- a/src/cmd/compile/internal/syntax/scanner_test.go +++ b/src/cmd/compile/internal/syntax/scanner_test.go @@ -12,19 +12,59 @@ import ( "testing" ) +// errh is a default error handler for basic tests. +func errh(line, col uint, msg string) { + panic(fmt.Sprintf("%d:%d: %s", line, col, msg)) +} + +// Don't bother with other tests if TestSmoke doesn't pass. +func TestSmoke(t *testing.T) { + const src = "if (+foo\t+=..123/***/4.2_0e-0i'a'`raw`\"string\" ;//$" + tokens := []token{_If, _Lparen, _Operator, _Name, _AssignOp, _Dot, _Literal, _Literal, _Literal, _Literal, _Literal, _Semi, _EOF} + + var got scanner + got.init(strings.NewReader(src), errh, 0) + for _, want := range tokens { + got.next() + if got.tok != want { + t.Errorf("%d:%d: got %s; want %s", got.line, got.col, got.tok, want) + continue + } + } +} + +// Once TestSmoke passes, run TestTokens next. +func TestTokens(t *testing.T) { + var got scanner + for _, want := range sampleTokens { + got.init(strings.NewReader(want.src), func(line, col uint, msg string) { + t.Errorf("%s:%d:%d: %s", want.src, line, col, msg) + }, 0) + got.next() + if got.tok != want.tok { + t.Errorf("%s: got %s; want %s", want.src, got.tok, want.tok) + continue + } + if (got.tok == _Name || got.tok == _Literal) && got.lit != want.src { + t.Errorf("%s: got %q; want %q", want.src, got.lit, want.src) + } + } +} + func TestScanner(t *testing.T) { if testing.Short() { t.Skip("skipping test in short mode") } - src, err := os.Open("parser.go") + filename := *src_ // can be changed via -src flag + src, err := os.Open(filename) if err != nil { t.Fatal(err) } defer src.Close() var s scanner - s.init(src, nil, 0) + s.init(src, errh, 0) for { s.next() if s.tok == _EOF { @@ -34,64 +74,66 @@ func TestScanner(t *testing.T) { continue } switch s.tok { - case _Name: - fmt.Println(s.line, s.tok, "=>", s.lit) + case _Name, _Literal: + fmt.Printf("%s:%d:%d: %s => %s\n", filename, s.line, s.col, s.tok, s.lit) case _Operator: - fmt.Println(s.line, s.tok, "=>", s.op, s.prec) + fmt.Printf("%s:%d:%d: %s => %s (prec = %d)\n", filename, s.line, s.col, s.tok, s.op, s.prec) default: - fmt.Println(s.line, s.tok) + fmt.Printf("%s:%d:%d: %s\n", filename, s.line, s.col, s.tok) } } } -func TestTokens(t *testing.T) { +func TestEmbeddedTokens(t *testing.T) { // make source var buf bytes.Buffer for i, s := range sampleTokens { - buf.WriteString("\t\t\t\t"[:i&3]) // leading indentation - buf.WriteString(s.src) // token - buf.WriteString(" "[:i&7]) // trailing spaces - fmt.Fprintf(&buf, "/*line foo:%d */ // bar\n", i+linebase) // comments (don't crash w/o directive handler) + buf.WriteString("\t\t\t\t"[:i&3]) // leading indentation + buf.WriteString(s.src) // token + buf.WriteString(" "[:i&7]) // trailing spaces + buf.WriteString(fmt.Sprintf("/*line foo:%d */ // bar\n", i)) // comments + newline (don't crash w/o directive handler) } // scan source var got scanner + var src string got.init(&buf, func(line, col uint, msg string) { - t.Fatalf("%d:%d: %s", line, col, msg) + t.Fatalf("%s:%d:%d: %s", src, line, col, msg) }, 0) got.next() for i, want := range sampleTokens { + src = want.src nlsemi := false - if got.line != uint(i+linebase) { - t.Errorf("got line %d; want %d", got.line, i+linebase) + if got.line-linebase != uint(i) { + t.Errorf("%s: got line %d; want %d", src, got.line-linebase, i) } if got.tok != want.tok { - t.Errorf("got tok = %s; want %s", got.tok, want.tok) + t.Errorf("%s: got tok %s; want %s", src, got.tok, want.tok) continue } switch want.tok { case _Semi: if got.lit != "semicolon" { - t.Errorf("got %s; want semicolon", got.lit) + t.Errorf("%s: got %s; want semicolon", src, got.lit) } case _Name, _Literal: if got.lit != want.src { - t.Errorf("got lit = %q; want %q", got.lit, want.src) + t.Errorf("%s: got lit %q; want %q", src, got.lit, want.src) continue } nlsemi = true case _Operator, _AssignOp, _IncOp: if got.op != want.op { - t.Errorf("got op = %s; want %s", got.op, want.op) + t.Errorf("%s: got op %s; want %s", src, got.op, want.op) continue } if got.prec != want.prec { - t.Errorf("got prec = %d; want %d", got.prec, want.prec) + t.Errorf("%s: got prec %d; want %d", src, got.prec, want.prec) continue } nlsemi = want.tok == _IncOp @@ -103,11 +145,11 @@ func TestTokens(t *testing.T) { if nlsemi { got.next() if got.tok != _Semi { - t.Errorf("got tok = %s; want ;", got.tok) + t.Errorf("%s: got tok %s; want ;", src, got.tok) continue } if got.lit != "newline" { - t.Errorf("got %s; want newline", got.lit) + t.Errorf("%s: got %s; want newline", src, got.lit) } } @@ -299,7 +341,7 @@ func TestComments(t *testing.T) { {"//", comment{0, 0, "//"}}, /*-style comments */ - {"/* regular comment */", comment{0, 0, "/* regular comment */"}}, + {"123/* regular comment */", comment{0, 3, "/* regular comment */"}}, {"package p /* regular comment", comment{0, 0, ""}}, {"\n\n\n/*\n*//* want this one */", comment{4, 2, "/* want this one */"}}, {"\n\n/**/", comment{2, 0, "/**/"}}, @@ -307,17 +349,16 @@ func TestComments(t *testing.T) { } { var s scanner var got comment - s.init(strings.NewReader(test.src), - func(line, col uint, msg string) { - if msg[0] != '/' { - // error - if msg != "comment not terminated" { - t.Errorf("%q: %s", test.src, msg) - } - return + s.init(strings.NewReader(test.src), func(line, col uint, msg string) { + if msg[0] != '/' { + // error + if msg != "comment not terminated" { + t.Errorf("%q: %s", test.src, msg) } - got = comment{line - linebase, col - colbase, msg} // keep last one - }, comments) + return + } + got = comment{line - linebase, col - colbase, msg} // keep last one + }, comments) for { s.next() @@ -542,7 +583,7 @@ func TestNumbers(t *testing.T) { func TestScanErrors(t *testing.T) { for _, test := range []struct { - src, msg string + src, err string line, col uint // 0-based }{ // Note: Positions for lexical errors are the earliest position @@ -582,7 +623,7 @@ func TestScanErrors(t *testing.T) { {`'xx`, "invalid character literal (missing closing ')", 0, 0}, {`'xx'`, "invalid character literal (more than one character)", 0, 0}, - {"\"\n", "newline in string", 0, 1}, + {"\n \"foo\n", "newline in string", 1, 7}, {`"`, "string not terminated", 0, 0}, {`"foo`, "string not terminated", 0, 0}, {"`", "string not terminated", 0, 0}, @@ -607,27 +648,19 @@ func TestScanErrors(t *testing.T) { {`var s string = "\x"`, "non-hex character in escape sequence: \"", 0, 18}, {`return "\Uffffffff"`, "escape sequence is invalid Unicode code point U+FFFFFFFF", 0, 18}, + {"0b.0", "invalid radix point in binary literal", 0, 2}, + {"0x.p0\n", "hexadecimal literal has no digits", 0, 3}, + // former problem cases {"package p\n\n\xef", "invalid UTF-8 encoding", 2, 0}, } { var s scanner - nerrors := 0 - s.init(strings.NewReader(test.src), func(line, col uint, msg string) { - nerrors++ - // only check the first error - if nerrors == 1 { - if msg != test.msg { - t.Errorf("%q: got msg = %q; want %q", test.src, msg, test.msg) - } - if line != test.line+linebase { - t.Errorf("%q: got line = %d; want %d", test.src, line, test.line+linebase) - } - if col != test.col+colbase { - t.Errorf("%q: got col = %d; want %d", test.src, col, test.col+colbase) - } - } else if nerrors > 1 { - // TODO(gri) make this use position info - t.Errorf("%q: got unexpected %q at line = %d", test.src, msg, line) + var line, col uint + var err string + s.init(strings.NewReader(test.src), func(l, c uint, msg string) { + if err == "" { + line, col = l-linebase, c-colbase + err = msg } }, 0) @@ -638,8 +671,18 @@ func TestScanErrors(t *testing.T) { } } - if nerrors == 0 { - t.Errorf("%q: got no error; want %q", test.src, test.msg) + if err != "" { + if err != test.err { + t.Errorf("%q: got err = %q; want %q", test.src, err, test.err) + } + if line != test.line { + t.Errorf("%q: got line = %d; want %d", test.src, line, test.line) + } + if col != test.col { + t.Errorf("%q: got col = %d; want %d", test.src, col, test.col) + } + } else { + t.Errorf("%q: got no error; want %q", test.src, test.err) } } } @@ -648,7 +691,7 @@ func TestIssue21938(t *testing.T) { s := "/*" + strings.Repeat(" ", 4089) + "*/ .5" var got scanner - got.init(strings.NewReader(s), nil, 0) + got.init(strings.NewReader(s), errh, 0) got.next() if got.tok != _Literal || got.lit != ".5" { From bfb903f2521ff24639aa7a5219330df38b06f412 Mon Sep 17 00:00:00 2001 From: Robert Griesemer Date: Mon, 10 Feb 2020 22:02:47 -0800 Subject: [PATCH 60/69] cmd/compile/internal/syntax: better scanner error messages This is one of several changes that were part of a larger rewrite which I made in early 2019 after switching to the new number literal syntax implementation. The purpose of the rewrite was to simplify reading of source code (Unicode character by character) and speed up the scanner but was never submitted for review due to other priorities. Part 2 of 3: This change contains improvements to the scanner error messages: - Use "rune literal" rather than "character literal" to match the spec nomenclature. - Shorter, more to the point error messages. (For instance, "more than one character in rune literal" rather than "invalid character literal (more than one character)", etc.) Change-Id: I1aaf79003374a68dbb05926437ed305cf2a8ec96 Reviewed-on: https://go-review.googlesource.com/c/go/+/221602 Run-TryBot: Robert Griesemer TryBot-Result: Gobot Gobot Reviewed-by: Matthew Dempsky --- src/cmd/compile/internal/syntax/scanner.go | 18 +++---- .../compile/internal/syntax/scanner_test.go | 54 +++++++++---------- test/fixedbugs/bug169.go | 2 +- test/fixedbugs/issue15611.go | 8 +-- test/fixedbugs/issue32133.go | 2 +- 5 files changed, 42 insertions(+), 42 deletions(-) diff --git a/src/cmd/compile/internal/syntax/scanner.go b/src/cmd/compile/internal/syntax/scanner.go index fef87171bc..f2f6fd2bb6 100644 --- a/src/cmd/compile/internal/syntax/scanner.go +++ b/src/cmd/compile/internal/syntax/scanner.go @@ -385,7 +385,7 @@ func (s *scanner) isIdentRune(c rune, first bool) bool { s.errorf("identifier cannot begin with digit %#U", c) } case c >= utf8.RuneSelf: - s.errorf("invalid identifier character %#U", c) + s.errorf("invalid character %#U in identifier", c) default: return false } @@ -612,13 +612,13 @@ func (s *scanner) rune() { if r == '\n' { s.ungetr() // assume newline is not part of literal if !s.bad { - s.errorf("newline in character literal") + s.errorf("newline in rune literal") } break } if r < 0 { if !s.bad { - s.errorAtf(0, "invalid character literal (missing closing ')") + s.errorAtf(0, "rune literal not terminated") } break } @@ -626,9 +626,9 @@ func (s *scanner) rune() { if !s.bad { if n == 0 { - s.errorf("empty character literal or unescaped ' in character literal") + s.errorf("empty rune literal or unescaped '") } else if n != 1 { - s.errorAtf(0, "invalid character literal (more than one character)") + s.errorAtf(0, "more than one character in rune literal") } } @@ -815,7 +815,7 @@ func (s *scanner) escape(quote rune) { if c < 0 { return // complain in caller about EOF } - s.errorf("unknown escape sequence") + s.errorf("unknown escape") return } @@ -836,7 +836,7 @@ func (s *scanner) escape(quote rune) { if base == 8 { kind = "octal" } - s.errorf("non-%s character in escape sequence: %c", kind, c) + s.errorf("invalid character %q in %s escape", c, kind) s.ungetr() return } @@ -847,11 +847,11 @@ func (s *scanner) escape(quote rune) { s.ungetr() if x > max && base == 8 { - s.errorf("octal escape value > 255: %d", x) + s.errorf("octal escape value %d > 255", x) return } if x > max || 0xD800 <= x && x < 0xE000 /* surrogate range */ { - s.errorf("escape sequence is invalid Unicode code point %#U", x) + s.errorf("escape is invalid Unicode code point %#U", x) } } diff --git a/src/cmd/compile/internal/syntax/scanner_test.go b/src/cmd/compile/internal/syntax/scanner_test.go index 612c59507e..f683341650 100644 --- a/src/cmd/compile/internal/syntax/scanner_test.go +++ b/src/cmd/compile/internal/syntax/scanner_test.go @@ -596,10 +596,10 @@ func TestScanErrors(t *testing.T) { {"foo\n\n\xff ", "invalid UTF-8 encoding", 2, 0}, // token-level errors - {"\u00BD" /* ½ */, "invalid identifier character U+00BD '½'", 0, 0}, - {"\U0001d736\U0001d737\U0001d738_½" /* 𝜶𝜷𝜸_½ */, "invalid identifier character U+00BD '½'", 0, 13 /* byte offset */}, + {"\u00BD" /* ½ */, "invalid character U+00BD '½' in identifier", 0, 0}, + {"\U0001d736\U0001d737\U0001d738_½" /* 𝜶𝜷𝜸_½ */, "invalid character U+00BD '½' in identifier", 0, 13 /* byte offset */}, {"\U0001d7d8" /* 𝟘 */, "identifier cannot begin with digit U+1D7D8 '𝟘'", 0, 0}, - {"foo\U0001d7d8_½" /* foo𝟘_½ */, "invalid identifier character U+00BD '½'", 0, 8 /* byte offset */}, + {"foo\U0001d7d8_½" /* foo𝟘_½ */, "invalid character U+00BD '½' in identifier", 0, 8 /* byte offset */}, {"x + ~y", "invalid character U+007E '~'", 0, 4}, {"foo$bar = 0", "invalid character U+0024 '$'", 0, 3}, @@ -608,20 +608,20 @@ func TestScanErrors(t *testing.T) { {"0123456789e0 /*\nfoobar", "comment not terminated", 0, 13}, // valid float constant {"var a, b = 09, 07\n", "invalid digit '9' in octal literal", 0, 12}, - {`''`, "empty character literal or unescaped ' in character literal", 0, 1}, - {"'\n", "newline in character literal", 0, 1}, - {`'\`, "invalid character literal (missing closing ')", 0, 0}, - {`'\'`, "invalid character literal (missing closing ')", 0, 0}, - {`'\x`, "invalid character literal (missing closing ')", 0, 0}, - {`'\x'`, "non-hex character in escape sequence: '", 0, 3}, - {`'\y'`, "unknown escape sequence", 0, 2}, - {`'\x0'`, "non-hex character in escape sequence: '", 0, 4}, - {`'\00'`, "non-octal character in escape sequence: '", 0, 4}, + {`''`, "empty rune literal or unescaped '", 0, 1}, + {"'\n", "newline in rune literal", 0, 1}, + {`'\`, "rune literal not terminated", 0, 0}, + {`'\'`, "rune literal not terminated", 0, 0}, + {`'\x`, "rune literal not terminated", 0, 0}, + {`'\x'`, "invalid character '\\'' in hex escape", 0, 3}, + {`'\y'`, "unknown escape", 0, 2}, + {`'\x0'`, "invalid character '\\'' in hex escape", 0, 4}, + {`'\00'`, "invalid character '\\'' in octal escape", 0, 4}, {`'\377' /*`, "comment not terminated", 0, 7}, // valid octal escape - {`'\378`, "non-octal character in escape sequence: 8", 0, 4}, - {`'\400'`, "octal escape value > 255: 256", 0, 5}, - {`'xx`, "invalid character literal (missing closing ')", 0, 0}, - {`'xx'`, "invalid character literal (more than one character)", 0, 0}, + {`'\378`, "invalid character '8' in octal escape", 0, 4}, + {`'\400'`, "octal escape value 256 > 255", 0, 5}, + {`'xx`, "rune literal not terminated", 0, 0}, + {`'xx'`, "more than one character in rune literal", 0, 0}, {"\n \"foo\n", "newline in string", 1, 7}, {`"`, "string not terminated", 0, 0}, @@ -633,20 +633,20 @@ func TestScanErrors(t *testing.T) { {`"\`, "string not terminated", 0, 0}, {`"\"`, "string not terminated", 0, 0}, {`"\x`, "string not terminated", 0, 0}, - {`"\x"`, "non-hex character in escape sequence: \"", 0, 3}, - {`"\y"`, "unknown escape sequence", 0, 2}, - {`"\x0"`, "non-hex character in escape sequence: \"", 0, 4}, - {`"\00"`, "non-octal character in escape sequence: \"", 0, 4}, + {`"\x"`, "invalid character '\"' in hex escape", 0, 3}, + {`"\y"`, "unknown escape", 0, 2}, + {`"\x0"`, "invalid character '\"' in hex escape", 0, 4}, + {`"\00"`, "invalid character '\"' in octal escape", 0, 4}, {`"\377" /*`, "comment not terminated", 0, 7}, // valid octal escape - {`"\378"`, "non-octal character in escape sequence: 8", 0, 4}, - {`"\400"`, "octal escape value > 255: 256", 0, 5}, + {`"\378"`, "invalid character '8' in octal escape", 0, 4}, + {`"\400"`, "octal escape value 256 > 255", 0, 5}, - {`s := "foo\z"`, "unknown escape sequence", 0, 10}, - {`s := "foo\z00\nbar"`, "unknown escape sequence", 0, 10}, + {`s := "foo\z"`, "unknown escape", 0, 10}, + {`s := "foo\z00\nbar"`, "unknown escape", 0, 10}, {`"\x`, "string not terminated", 0, 0}, - {`"\x"`, "non-hex character in escape sequence: \"", 0, 3}, - {`var s string = "\x"`, "non-hex character in escape sequence: \"", 0, 18}, - {`return "\Uffffffff"`, "escape sequence is invalid Unicode code point U+FFFFFFFF", 0, 18}, + {`"\x"`, "invalid character '\"' in hex escape", 0, 3}, + {`var s string = "\x"`, "invalid character '\"' in hex escape", 0, 18}, + {`return "\Uffffffff"`, "escape is invalid Unicode code point U+FFFFFFFF", 0, 18}, {"0b.0", "invalid radix point in binary literal", 0, 2}, {"0x.p0\n", "hexadecimal literal has no digits", 0, 3}, diff --git a/test/fixedbugs/bug169.go b/test/fixedbugs/bug169.go index f63c2f3e1a..62ab7c2fa1 100644 --- a/test/fixedbugs/bug169.go +++ b/test/fixedbugs/bug169.go @@ -5,6 +5,6 @@ // license that can be found in the LICENSE file. package main -var x = '''; // ERROR "char" +var x = '''; // ERROR "char|rune" diff --git a/test/fixedbugs/issue15611.go b/test/fixedbugs/issue15611.go index 6a627d9b5e..3634475418 100644 --- a/test/fixedbugs/issue15611.go +++ b/test/fixedbugs/issue15611.go @@ -8,13 +8,13 @@ package p // These error messages are for the invalid literals on lines 19 and 20: -// ERROR "newline in character literal" -// ERROR "invalid character literal \(missing closing '\)" +// ERROR "newline in character literal|newline in rune literal" +// ERROR "invalid character literal \(missing closing '\)|rune literal not terminated" const ( - _ = '' // ERROR "empty character literal or unescaped ' in character literal" + _ = '' // ERROR "empty character literal or unescaped ' in character literal|empty rune literal" _ = 'f' - _ = 'foo' // ERROR "invalid character literal \(more than one character\)" + _ = 'foo' // ERROR "invalid character literal \(more than one character\)|more than one character in rune literal" //line issue15611.go:11 _ = ' _ = ' \ No newline at end of file diff --git a/test/fixedbugs/issue32133.go b/test/fixedbugs/issue32133.go index 13e4658a0f..f3cca87a72 100644 --- a/test/fixedbugs/issue32133.go +++ b/test/fixedbugs/issue32133.go @@ -8,7 +8,7 @@ package p // errors for the //line-adjusted code below // ERROR "newline in string" -// ERROR "newline in character literal" +// ERROR "newline in character literal|newline in rune literal" // ERROR "newline in string" // ERROR "string not terminated" From 585e31df63f6879c03b285711de6f9dcba1f2cb0 Mon Sep 17 00:00:00 2001 From: Ivan Trubach Date: Thu, 12 Dec 2019 13:33:42 +0000 Subject: [PATCH 61/69] cmd/doc: fix merging comments in -src mode MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit These changes fix go doc -src mode that vomits comments from random files if filesystem does not sort files by name. The issue was with parse.ParseDir using the Readdir order of files, which varies between platforms and filesystem implementations. Another option is to merge comments using token.FileSet.Iterate order in cmd/doc, but since ParseDir is mostly used in go doc, I’ve opted for smaller change because it’s unlikely to break other uses or cause any perfomance issues. Example (macOS APFS): `go doc -src net.ListenPacket` Change-Id: I7f9f368c7d9ccd9a2cbc48665f2cb9798c7b3a3f GitHub-Last-Rev: 654fb450421266a0bb64518016944db22bd681e3 GitHub-Pull-Request: golang/go#36104 Reviewed-on: https://go-review.googlesource.com/c/go/+/210999 Run-TryBot: Rob Pike TryBot-Result: Gobot Gobot Reviewed-by: Rob Pike --- src/cmd/doc/doc_test.go | 34 ++++++++++++++++++++++++++++++++ src/cmd/doc/testdata/merge/aa.go | 7 +++++++ src/cmd/doc/testdata/merge/bb.go | 7 +++++++ src/go/parser/interface.go | 8 +------- 4 files changed, 49 insertions(+), 7 deletions(-) create mode 100644 src/cmd/doc/testdata/merge/aa.go create mode 100644 src/cmd/doc/testdata/merge/bb.go diff --git a/src/cmd/doc/doc_test.go b/src/cmd/doc/doc_test.go index c0959acca1..fd2ae30827 100644 --- a/src/cmd/doc/doc_test.go +++ b/src/cmd/doc/doc_test.go @@ -724,6 +724,40 @@ var tests = []test{ }, }, + // Merging comments with -src. + { + "merge comments with -src A", + []string{"-src", p + "/merge", `A`}, + []string{ + `A doc`, + `func A`, + `A comment`, + }, + []string{ + `Package A doc`, + `Package B doc`, + `B doc`, + `B comment`, + `B doc`, + }, + }, + { + "merge comments with -src B", + []string{"-src", p + "/merge", `B`}, + []string{ + `B doc`, + `func B`, + `B comment`, + }, + []string{ + `Package A doc`, + `Package B doc`, + `A doc`, + `A comment`, + `A doc`, + }, + }, + // No dups with -u. Issue 21797. { "case matching on, no dups", diff --git a/src/cmd/doc/testdata/merge/aa.go b/src/cmd/doc/testdata/merge/aa.go new file mode 100644 index 0000000000..f8ab92dfd0 --- /dev/null +++ b/src/cmd/doc/testdata/merge/aa.go @@ -0,0 +1,7 @@ +// Package comment A. +package merge + +// A doc. +func A() { + // A comment. +} diff --git a/src/cmd/doc/testdata/merge/bb.go b/src/cmd/doc/testdata/merge/bb.go new file mode 100644 index 0000000000..fd8cf3c446 --- /dev/null +++ b/src/cmd/doc/testdata/merge/bb.go @@ -0,0 +1,7 @@ +// Package comment B. +package merge + +// B doc. +func B() { + // B comment. +} diff --git a/src/go/parser/interface.go b/src/go/parser/interface.go index 500c98d496..54f9d7b80a 100644 --- a/src/go/parser/interface.go +++ b/src/go/parser/interface.go @@ -133,13 +133,7 @@ func ParseFile(fset *token.FileSet, filename string, src interface{}, mode Mode) // first error encountered are returned. // func ParseDir(fset *token.FileSet, path string, filter func(os.FileInfo) bool, mode Mode) (pkgs map[string]*ast.Package, first error) { - fd, err := os.Open(path) - if err != nil { - return nil, err - } - defer fd.Close() - - list, err := fd.Readdir(-1) + list, err := ioutil.ReadDir(path) if err != nil { return nil, err } From cc6a8bd0d7f782c31e1a35793b4e1253c6716ad5 Mon Sep 17 00:00:00 2001 From: Joel Sing Date: Mon, 2 Mar 2020 04:26:54 +1100 Subject: [PATCH 62/69] cmd/compile: add zero store operations for riscv64 This allows for zero stores to be performed using the zero register, rather than loading a separate register with zero. Change-Id: Ic81d8dbcdacbb2ca2c3f77682ff5ad7cdc33d18d Reviewed-on: https://go-review.googlesource.com/c/go/+/221684 Reviewed-by: Cherry Zhang Run-TryBot: Cherry Zhang TryBot-Result: Gobot Gobot --- src/cmd/compile/internal/riscv64/ssa.go | 7 + .../compile/internal/ssa/gen/RISCV64.rules | 18 ++ .../compile/internal/ssa/gen/RISCV64Ops.go | 19 +- src/cmd/compile/internal/ssa/opGen.go | 56 ++++ .../compile/internal/ssa/rewriteRISCV64.go | 264 ++++++++++++++++++ 5 files changed, 358 insertions(+), 6 deletions(-) diff --git a/src/cmd/compile/internal/riscv64/ssa.go b/src/cmd/compile/internal/riscv64/ssa.go index 91f3164336..3fece75b1b 100644 --- a/src/cmd/compile/internal/riscv64/ssa.go +++ b/src/cmd/compile/internal/riscv64/ssa.go @@ -314,6 +314,13 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { p.To.Type = obj.TYPE_MEM p.To.Reg = v.Args[0].Reg() gc.AddAux(&p.To, v) + case ssa.OpRISCV64MOVBstorezero, ssa.OpRISCV64MOVHstorezero, ssa.OpRISCV64MOVWstorezero, ssa.OpRISCV64MOVDstorezero: + p := s.Prog(v.Op.Asm()) + p.From.Type = obj.TYPE_REG + p.From.Reg = riscv.REG_ZERO + p.To.Type = obj.TYPE_MEM + p.To.Reg = v.Args[0].Reg() + gc.AddAux(&p.To, v) case ssa.OpRISCV64SEQZ, ssa.OpRISCV64SNEZ: p := s.Prog(v.Op.Asm()) p.From.Type = obj.TYPE_REG diff --git a/src/cmd/compile/internal/ssa/gen/RISCV64.rules b/src/cmd/compile/internal/ssa/gen/RISCV64.rules index a19f8aa55b..9b88b56871 100644 --- a/src/cmd/compile/internal/ssa/gen/RISCV64.rules +++ b/src/cmd/compile/internal/ssa/gen/RISCV64.rules @@ -327,6 +327,14 @@ (MOVWstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) (MOVDstore [off1] {sym1} (MOVaddr [off2] {sym2} base) val mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) -> (MOVDstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) +(MOVBstorezero [off1] {sym1} (MOVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) && is32Bit(off1+off2) -> + (MOVBstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem) +(MOVHstorezero [off1] {sym1} (MOVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) && is32Bit(off1+off2) -> + (MOVHstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem) +(MOVWstorezero [off1] {sym1} (MOVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) && is32Bit(off1+off2) -> + (MOVWstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem) +(MOVDstorezero [off1] {sym1} (MOVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) && is32Bit(off1+off2) -> + (MOVDstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem) (MOVBUload [off1] {sym} (ADDI [off2] base) mem) && is32Bit(off1+off2) -> (MOVBUload [off1+off2] {sym} base mem) @@ -351,6 +359,10 @@ (MOVWstore [off1+off2] {sym} base val mem) (MOVDstore [off1] {sym} (ADDI [off2] base) val mem) && is32Bit(off1+off2) -> (MOVDstore [off1+off2] {sym} base val mem) +(MOVBstorezero [off1] {sym} (ADDI [off2] ptr) mem) && is32Bit(off1+off2) -> (MOVBstorezero [off1+off2] {sym} ptr mem) +(MOVHstorezero [off1] {sym} (ADDI [off2] ptr) mem) && is32Bit(off1+off2) -> (MOVHstorezero [off1+off2] {sym} ptr mem) +(MOVWstorezero [off1] {sym} (ADDI [off2] ptr) mem) && is32Bit(off1+off2) -> (MOVWstorezero [off1+off2] {sym} ptr mem) +(MOVDstorezero [off1] {sym} (ADDI [off2] ptr) mem) && is32Bit(off1+off2) -> (MOVDstorezero [off1+off2] {sym} ptr mem) // Similarly, fold ADDI into MOVaddr to avoid confusing live variable analysis // with OffPtr -> ADDI. @@ -461,6 +473,12 @@ // Absorb SNEZ into branch. (BNE (SNEZ x) yes no) -> (BNE x yes no) +// Store zero +(MOVBstore [off] {sym} ptr (MOVBconst [0]) mem) -> (MOVBstorezero [off] {sym} ptr mem) +(MOVHstore [off] {sym} ptr (MOVHconst [0]) mem) -> (MOVHstorezero [off] {sym} ptr mem) +(MOVWstore [off] {sym} ptr (MOVWconst [0]) mem) -> (MOVWstorezero [off] {sym} ptr mem) +(MOVDstore [off] {sym} ptr (MOVDconst [0]) mem) -> (MOVDstorezero [off] {sym} ptr mem) + // Fold ADD+MOVDconst into ADDI where possible. (ADD (MOVDconst [off]) ptr) && is32Bit(off) -> (ADDI [off] ptr) diff --git a/src/cmd/compile/internal/ssa/gen/RISCV64Ops.go b/src/cmd/compile/internal/ssa/gen/RISCV64Ops.go index 7829f9a07c..28a91d559f 100644 --- a/src/cmd/compile/internal/ssa/gen/RISCV64Ops.go +++ b/src/cmd/compile/internal/ssa/gen/RISCV64Ops.go @@ -106,12 +106,13 @@ func init() { callerSave := gpMask | fpMask | regNamed["g"] var ( - gpstore = regInfo{inputs: []regMask{gpspsbMask, gpspMask, 0}} // SB in first input so we can load from a global, but not in second to avoid using SB as a temporary register - gp01 = regInfo{outputs: []regMask{gpMask}} - gp11 = regInfo{inputs: []regMask{gpMask}, outputs: []regMask{gpMask}} - gp21 = regInfo{inputs: []regMask{gpMask, gpMask}, outputs: []regMask{gpMask}} - gpload = regInfo{inputs: []regMask{gpspsbMask, 0}, outputs: []regMask{gpMask}} - gp11sb = regInfo{inputs: []regMask{gpspsbMask}, outputs: []regMask{gpMask}} + gpstore = regInfo{inputs: []regMask{gpspsbMask, gpspMask, 0}} // SB in first input so we can load from a global, but not in second to avoid using SB as a temporary register + gpstore0 = regInfo{inputs: []regMask{gpspsbMask}} + gp01 = regInfo{outputs: []regMask{gpMask}} + gp11 = regInfo{inputs: []regMask{gpMask}, outputs: []regMask{gpMask}} + gp21 = regInfo{inputs: []regMask{gpMask, gpMask}, outputs: []regMask{gpMask}} + gpload = regInfo{inputs: []regMask{gpspsbMask, 0}, outputs: []regMask{gpMask}} + gp11sb = regInfo{inputs: []regMask{gpspsbMask}, outputs: []regMask{gpMask}} fp11 = regInfo{inputs: []regMask{fpMask}, outputs: []regMask{fpMask}} fp21 = regInfo{inputs: []regMask{fpMask, fpMask}, outputs: []regMask{fpMask}} @@ -171,6 +172,12 @@ func init() { {name: "MOVWstore", argLength: 3, reg: gpstore, asm: "MOVW", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // 32 bits {name: "MOVDstore", argLength: 3, reg: gpstore, asm: "MOV", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // 64 bits + // Stores: store of zero in arg0+auxint+aux; arg1=mem + {name: "MOVBstorezero", argLength: 2, reg: gpstore0, aux: "SymOff", asm: "MOVB", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // 8 bits + {name: "MOVHstorezero", argLength: 2, reg: gpstore0, aux: "SymOff", asm: "MOVH", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // 16 bits + {name: "MOVWstorezero", argLength: 2, reg: gpstore0, aux: "SymOff", asm: "MOVW", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // 32 bits + {name: "MOVDstorezero", argLength: 2, reg: gpstore0, aux: "SymOff", asm: "MOV", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // 64 bits + // Shift ops {name: "SLL", argLength: 2, reg: gp21, asm: "SLL"}, // arg0 << aux1 {name: "SRA", argLength: 2, reg: gp21, asm: "SRA"}, // arg0 >> aux1, signed diff --git a/src/cmd/compile/internal/ssa/opGen.go b/src/cmd/compile/internal/ssa/opGen.go index fb887017cf..9da7376a8a 100644 --- a/src/cmd/compile/internal/ssa/opGen.go +++ b/src/cmd/compile/internal/ssa/opGen.go @@ -1917,6 +1917,10 @@ const ( OpRISCV64MOVHstore OpRISCV64MOVWstore OpRISCV64MOVDstore + OpRISCV64MOVBstorezero + OpRISCV64MOVHstorezero + OpRISCV64MOVWstorezero + OpRISCV64MOVDstorezero OpRISCV64SLL OpRISCV64SRA OpRISCV64SRL @@ -25483,6 +25487,58 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "MOVBstorezero", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: riscv.AMOVB, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372037928517622}, // SP X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 SB + }, + }, + }, + { + name: "MOVHstorezero", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: riscv.AMOVH, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372037928517622}, // SP X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 SB + }, + }, + }, + { + name: "MOVWstorezero", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: riscv.AMOVW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372037928517622}, // SP X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 SB + }, + }, + }, + { + name: "MOVDstorezero", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: riscv.AMOV, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372037928517622}, // SP X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 SB + }, + }, + }, { name: "SLL", argLen: 2, diff --git a/src/cmd/compile/internal/ssa/rewriteRISCV64.go b/src/cmd/compile/internal/ssa/rewriteRISCV64.go index 6b3f4f70b5..676ca52e8e 100644 --- a/src/cmd/compile/internal/ssa/rewriteRISCV64.go +++ b/src/cmd/compile/internal/ssa/rewriteRISCV64.go @@ -386,24 +386,32 @@ func rewriteValueRISCV64(v *Value) bool { return rewriteValueRISCV64_OpRISCV64MOVBload(v) case OpRISCV64MOVBstore: return rewriteValueRISCV64_OpRISCV64MOVBstore(v) + case OpRISCV64MOVBstorezero: + return rewriteValueRISCV64_OpRISCV64MOVBstorezero(v) case OpRISCV64MOVDconst: return rewriteValueRISCV64_OpRISCV64MOVDconst(v) case OpRISCV64MOVDload: return rewriteValueRISCV64_OpRISCV64MOVDload(v) case OpRISCV64MOVDstore: return rewriteValueRISCV64_OpRISCV64MOVDstore(v) + case OpRISCV64MOVDstorezero: + return rewriteValueRISCV64_OpRISCV64MOVDstorezero(v) case OpRISCV64MOVHUload: return rewriteValueRISCV64_OpRISCV64MOVHUload(v) case OpRISCV64MOVHload: return rewriteValueRISCV64_OpRISCV64MOVHload(v) case OpRISCV64MOVHstore: return rewriteValueRISCV64_OpRISCV64MOVHstore(v) + case OpRISCV64MOVHstorezero: + return rewriteValueRISCV64_OpRISCV64MOVHstorezero(v) case OpRISCV64MOVWUload: return rewriteValueRISCV64_OpRISCV64MOVWUload(v) case OpRISCV64MOVWload: return rewriteValueRISCV64_OpRISCV64MOVWload(v) case OpRISCV64MOVWstore: return rewriteValueRISCV64_OpRISCV64MOVWstore(v) + case OpRISCV64MOVWstorezero: + return rewriteValueRISCV64_OpRISCV64MOVWstorezero(v) case OpRISCV64SUB: return rewriteValueRISCV64_OpRISCV64SUB(v) case OpRISCV64SUBW: @@ -2441,6 +2449,70 @@ func rewriteValueRISCV64_OpRISCV64MOVBstore(v *Value) bool { v.AddArg3(base, val, mem) return true } + // match: (MOVBstore [off] {sym} ptr (MOVBconst [0]) mem) + // result: (MOVBstorezero [off] {sym} ptr mem) + for { + off := v.AuxInt + sym := v.Aux + ptr := v_0 + if v_1.Op != OpRISCV64MOVBconst || v_1.AuxInt != 0 { + break + } + mem := v_2 + v.reset(OpRISCV64MOVBstorezero) + v.AuxInt = off + v.Aux = sym + v.AddArg2(ptr, mem) + return true + } + return false +} +func rewriteValueRISCV64_OpRISCV64MOVBstorezero(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (MOVBstorezero [off1] {sym1} (MOVaddr [off2] {sym2} ptr) mem) + // cond: canMergeSym(sym1,sym2) && is32Bit(off1+off2) + // result: (MOVBstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem) + for { + off1 := v.AuxInt + sym1 := v.Aux + if v_0.Op != OpRISCV64MOVaddr { + break + } + off2 := v_0.AuxInt + sym2 := v_0.Aux + ptr := v_0.Args[0] + mem := v_1 + if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2)) { + break + } + v.reset(OpRISCV64MOVBstorezero) + v.AuxInt = off1 + off2 + v.Aux = mergeSym(sym1, sym2) + v.AddArg2(ptr, mem) + return true + } + // match: (MOVBstorezero [off1] {sym} (ADDI [off2] ptr) mem) + // cond: is32Bit(off1+off2) + // result: (MOVBstorezero [off1+off2] {sym} ptr mem) + for { + off1 := v.AuxInt + sym := v.Aux + if v_0.Op != OpRISCV64ADDI { + break + } + off2 := v_0.AuxInt + ptr := v_0.Args[0] + mem := v_1 + if !(is32Bit(off1 + off2)) { + break + } + v.reset(OpRISCV64MOVBstorezero) + v.AuxInt = off1 + off2 + v.Aux = sym + v.AddArg2(ptr, mem) + return true + } return false } func rewriteValueRISCV64_OpRISCV64MOVDconst(v *Value) bool { @@ -2585,6 +2657,70 @@ func rewriteValueRISCV64_OpRISCV64MOVDstore(v *Value) bool { v.AddArg3(base, val, mem) return true } + // match: (MOVDstore [off] {sym} ptr (MOVDconst [0]) mem) + // result: (MOVDstorezero [off] {sym} ptr mem) + for { + off := v.AuxInt + sym := v.Aux + ptr := v_0 + if v_1.Op != OpRISCV64MOVDconst || v_1.AuxInt != 0 { + break + } + mem := v_2 + v.reset(OpRISCV64MOVDstorezero) + v.AuxInt = off + v.Aux = sym + v.AddArg2(ptr, mem) + return true + } + return false +} +func rewriteValueRISCV64_OpRISCV64MOVDstorezero(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (MOVDstorezero [off1] {sym1} (MOVaddr [off2] {sym2} ptr) mem) + // cond: canMergeSym(sym1,sym2) && is32Bit(off1+off2) + // result: (MOVDstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem) + for { + off1 := v.AuxInt + sym1 := v.Aux + if v_0.Op != OpRISCV64MOVaddr { + break + } + off2 := v_0.AuxInt + sym2 := v_0.Aux + ptr := v_0.Args[0] + mem := v_1 + if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2)) { + break + } + v.reset(OpRISCV64MOVDstorezero) + v.AuxInt = off1 + off2 + v.Aux = mergeSym(sym1, sym2) + v.AddArg2(ptr, mem) + return true + } + // match: (MOVDstorezero [off1] {sym} (ADDI [off2] ptr) mem) + // cond: is32Bit(off1+off2) + // result: (MOVDstorezero [off1+off2] {sym} ptr mem) + for { + off1 := v.AuxInt + sym := v.Aux + if v_0.Op != OpRISCV64ADDI { + break + } + off2 := v_0.AuxInt + ptr := v_0.Args[0] + mem := v_1 + if !(is32Bit(off1 + off2)) { + break + } + v.reset(OpRISCV64MOVDstorezero) + v.AuxInt = off1 + off2 + v.Aux = sym + v.AddArg2(ptr, mem) + return true + } return false } func rewriteValueRISCV64_OpRISCV64MOVHUload(v *Value) bool { @@ -2732,6 +2868,70 @@ func rewriteValueRISCV64_OpRISCV64MOVHstore(v *Value) bool { v.AddArg3(base, val, mem) return true } + // match: (MOVHstore [off] {sym} ptr (MOVHconst [0]) mem) + // result: (MOVHstorezero [off] {sym} ptr mem) + for { + off := v.AuxInt + sym := v.Aux + ptr := v_0 + if v_1.Op != OpRISCV64MOVHconst || v_1.AuxInt != 0 { + break + } + mem := v_2 + v.reset(OpRISCV64MOVHstorezero) + v.AuxInt = off + v.Aux = sym + v.AddArg2(ptr, mem) + return true + } + return false +} +func rewriteValueRISCV64_OpRISCV64MOVHstorezero(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (MOVHstorezero [off1] {sym1} (MOVaddr [off2] {sym2} ptr) mem) + // cond: canMergeSym(sym1,sym2) && is32Bit(off1+off2) + // result: (MOVHstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem) + for { + off1 := v.AuxInt + sym1 := v.Aux + if v_0.Op != OpRISCV64MOVaddr { + break + } + off2 := v_0.AuxInt + sym2 := v_0.Aux + ptr := v_0.Args[0] + mem := v_1 + if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2)) { + break + } + v.reset(OpRISCV64MOVHstorezero) + v.AuxInt = off1 + off2 + v.Aux = mergeSym(sym1, sym2) + v.AddArg2(ptr, mem) + return true + } + // match: (MOVHstorezero [off1] {sym} (ADDI [off2] ptr) mem) + // cond: is32Bit(off1+off2) + // result: (MOVHstorezero [off1+off2] {sym} ptr mem) + for { + off1 := v.AuxInt + sym := v.Aux + if v_0.Op != OpRISCV64ADDI { + break + } + off2 := v_0.AuxInt + ptr := v_0.Args[0] + mem := v_1 + if !(is32Bit(off1 + off2)) { + break + } + v.reset(OpRISCV64MOVHstorezero) + v.AuxInt = off1 + off2 + v.Aux = sym + v.AddArg2(ptr, mem) + return true + } return false } func rewriteValueRISCV64_OpRISCV64MOVWUload(v *Value) bool { @@ -2879,6 +3079,70 @@ func rewriteValueRISCV64_OpRISCV64MOVWstore(v *Value) bool { v.AddArg3(base, val, mem) return true } + // match: (MOVWstore [off] {sym} ptr (MOVWconst [0]) mem) + // result: (MOVWstorezero [off] {sym} ptr mem) + for { + off := v.AuxInt + sym := v.Aux + ptr := v_0 + if v_1.Op != OpRISCV64MOVWconst || v_1.AuxInt != 0 { + break + } + mem := v_2 + v.reset(OpRISCV64MOVWstorezero) + v.AuxInt = off + v.Aux = sym + v.AddArg2(ptr, mem) + return true + } + return false +} +func rewriteValueRISCV64_OpRISCV64MOVWstorezero(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (MOVWstorezero [off1] {sym1} (MOVaddr [off2] {sym2} ptr) mem) + // cond: canMergeSym(sym1,sym2) && is32Bit(off1+off2) + // result: (MOVWstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem) + for { + off1 := v.AuxInt + sym1 := v.Aux + if v_0.Op != OpRISCV64MOVaddr { + break + } + off2 := v_0.AuxInt + sym2 := v_0.Aux + ptr := v_0.Args[0] + mem := v_1 + if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2)) { + break + } + v.reset(OpRISCV64MOVWstorezero) + v.AuxInt = off1 + off2 + v.Aux = mergeSym(sym1, sym2) + v.AddArg2(ptr, mem) + return true + } + // match: (MOVWstorezero [off1] {sym} (ADDI [off2] ptr) mem) + // cond: is32Bit(off1+off2) + // result: (MOVWstorezero [off1+off2] {sym} ptr mem) + for { + off1 := v.AuxInt + sym := v.Aux + if v_0.Op != OpRISCV64ADDI { + break + } + off2 := v_0.AuxInt + ptr := v_0.Args[0] + mem := v_1 + if !(is32Bit(off1 + off2)) { + break + } + v.reset(OpRISCV64MOVWstorezero) + v.AuxInt = off1 + off2 + v.Aux = sym + v.AddArg2(ptr, mem) + return true + } return false } func rewriteValueRISCV64_OpRISCV64SUB(v *Value) bool { From 4de606b55f58d0b0e4121516cb4b514507b614da Mon Sep 17 00:00:00 2001 From: Robert Griesemer Date: Wed, 26 Feb 2020 21:31:00 -0800 Subject: [PATCH 63/69] cmd/compile/internal/syntax: faster and simpler source reader This is one of several changes that were part of a larger rewrite which I made in early 2019 after switching to the new number literal syntax implementation. The purpose of the rewrite was to simplify reading of source code (Unicode character by character) and speed up the scanner but was never submitted for review due to other priorities. Part 3 of 3: This change contains a complete rewrite of source.go, the file that implements reading individual Unicode characters from the source. The new implementation is easier to use and has simpler literal buffer management, resulting in faster scanner and thus parser performance. Thew new source.go (internal) API is centered around nextch() which advances the scanner by one character. The scanner has been adjusted around nextch() and now consistently does one character look-ahead (there's no need for complicated ungetr-ing anymore). Only in one case backtrack is needed (when finding '..' rather than '...') and that case is now more cleanly solved with the new reset() function. Measuring line/s parsing peformance by running go test -run StdLib -fast -skip "syntax/(scanner|source)\.go" (best of 5 runs on "quiet" MacBook Pro, 3.3GHz Dual-Core i7, 16GB RAM, OS X 10.15.3) before and after shows consistently 3-5% improvement of line parsing speed: old: parsed 1788155 lines (3969 files) in 1.255520307s (1424234 lines/s) new: parsed 1788155 lines (3969 files) in 1.213197037s (1473919 lines/s) (scanner.go and parser.go are skipped because this CL changed those files.) Change-Id: Ida947f4b538d42eb2d2349062c69edb6c9e5ca66 Reviewed-on: https://go-review.googlesource.com/c/go/+/221603 Reviewed-by: Matthew Dempsky --- src/cmd/compile/internal/syntax/parser.go | 2 +- src/cmd/compile/internal/syntax/scanner.go | 440 +++++++++--------- .../compile/internal/syntax/scanner_test.go | 4 +- src/cmd/compile/internal/syntax/source.go | 299 ++++++------ 4 files changed, 373 insertions(+), 372 deletions(-) diff --git a/src/cmd/compile/internal/syntax/parser.go b/src/cmd/compile/internal/syntax/parser.go index 469d9ad69b..5e52800b39 100644 --- a/src/cmd/compile/internal/syntax/parser.go +++ b/src/cmd/compile/internal/syntax/parser.go @@ -419,7 +419,7 @@ func (p *parser) fileOrNil() *File { } // p.tok == _EOF - f.Lines = p.source.line + f.Lines = p.line return f } diff --git a/src/cmd/compile/internal/syntax/scanner.go b/src/cmd/compile/internal/syntax/scanner.go index f2f6fd2bb6..2ce6203dd9 100644 --- a/src/cmd/compile/internal/syntax/scanner.go +++ b/src/cmd/compile/internal/syntax/scanner.go @@ -6,9 +6,9 @@ // Go source. After initialization, consecutive calls of // next advance the scanner one token at a time. // -// This file, source.go, and tokens.go are self-contained -// (go tool compile scanner.go source.go tokens.go compiles) -// and thus could be made into its own package. +// This file, source.go, tokens.go, and token_string.go are self-contained +// (`go tool compile scanner.go source.go tokens.go token_string.go` compiles) +// and thus could be made into their own package. package syntax @@ -86,20 +86,21 @@ func (s *scanner) next() { redo: // skip white space - c := s.getr() - for c == ' ' || c == '\t' || c == '\n' && !nlsemi || c == '\r' { - c = s.getr() + s.stop() + for s.ch == ' ' || s.ch == '\t' || s.ch == '\n' && !nlsemi || s.ch == '\r' { + s.nextch() } // token start - s.line, s.col = s.source.line0, s.source.col0 - - if isLetter(c) || c >= utf8.RuneSelf && s.isIdentRune(c, true) { + s.line, s.col = s.pos() + s.start() + if isLetter(s.ch) || s.ch >= utf8.RuneSelf && s.atIdentChar(true) { + s.nextch() s.ident() return } - switch c { + switch s.ch { case -1: if nlsemi { s.lit = "EOF" @@ -109,11 +110,12 @@ redo: s.tok = _EOF case '\n': + s.nextch() s.lit = "newline" s.tok = _Semi case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': - s.number(c) + s.number(false) case '"': s.stdString() @@ -125,97 +127,110 @@ redo: s.rune() case '(': + s.nextch() s.tok = _Lparen case '[': + s.nextch() s.tok = _Lbrack case '{': + s.nextch() s.tok = _Lbrace case ',': + s.nextch() s.tok = _Comma case ';': + s.nextch() s.lit = "semicolon" s.tok = _Semi case ')': + s.nextch() s.nlsemi = true s.tok = _Rparen case ']': + s.nextch() s.nlsemi = true s.tok = _Rbrack case '}': + s.nextch() s.nlsemi = true s.tok = _Rbrace case ':': - if s.getr() == '=' { + s.nextch() + if s.ch == '=' { + s.nextch() s.tok = _Define break } - s.ungetr() s.tok = _Colon case '.': - c = s.getr() - if isDecimal(c) { - s.ungetr() - s.unread(1) // correct position of '.' (needed by startLit in number) - s.number('.') + s.nextch() + if isDecimal(s.ch) { + s.number(true) break } - if c == '.' { - c = s.getr() - if c == '.' { + if s.ch == '.' { + s.nextch() + if s.ch == '.' { + s.nextch() s.tok = _DotDotDot break } - s.unread(1) + s.rewind() // now s.ch holds 1st '.' + s.nextch() // consume 1st '.' again } - s.ungetr() s.tok = _Dot case '+': + s.nextch() s.op, s.prec = Add, precAdd - c = s.getr() - if c != '+' { + if s.ch != '+' { goto assignop } + s.nextch() s.nlsemi = true s.tok = _IncOp case '-': + s.nextch() s.op, s.prec = Sub, precAdd - c = s.getr() - if c != '-' { + if s.ch != '-' { goto assignop } + s.nextch() s.nlsemi = true s.tok = _IncOp case '*': + s.nextch() s.op, s.prec = Mul, precMul // don't goto assignop - want _Star token - if s.getr() == '=' { + if s.ch == '=' { + s.nextch() s.tok = _AssignOp break } - s.ungetr() s.tok = _Star case '/': - c = s.getr() - if c == '/' { + s.nextch() + if s.ch == '/' { + s.nextch() s.lineComment() goto redo } - if c == '*' { + if s.ch == '*' { + s.nextch() s.fullComment() - if s.source.line > s.line && nlsemi { + if line, _ := s.pos(); line > s.line && nlsemi { // A multi-line comment acts like a newline; // it translates to a ';' if nlsemi is set. s.lit = "newline" @@ -228,27 +243,29 @@ redo: goto assignop case '%': + s.nextch() s.op, s.prec = Rem, precMul - c = s.getr() goto assignop case '&': - c = s.getr() - if c == '&' { + s.nextch() + if s.ch == '&' { + s.nextch() s.op, s.prec = AndAnd, precAndAnd s.tok = _Operator break } s.op, s.prec = And, precMul - if c == '^' { + if s.ch == '^' { + s.nextch() s.op = AndNot - c = s.getr() } goto assignop case '|': - c = s.getr() - if c == '|' { + s.nextch() + if s.ch == '|' { + s.nextch() s.op, s.prec = OrOr, precOrOr s.tok = _Operator break @@ -257,106 +274,100 @@ redo: goto assignop case '^': + s.nextch() s.op, s.prec = Xor, precAdd - c = s.getr() goto assignop case '<': - c = s.getr() - if c == '=' { + s.nextch() + if s.ch == '=' { + s.nextch() s.op, s.prec = Leq, precCmp s.tok = _Operator break } - if c == '<' { + if s.ch == '<' { + s.nextch() s.op, s.prec = Shl, precMul - c = s.getr() goto assignop } - if c == '-' { + if s.ch == '-' { + s.nextch() s.tok = _Arrow break } - s.ungetr() s.op, s.prec = Lss, precCmp s.tok = _Operator case '>': - c = s.getr() - if c == '=' { + s.nextch() + if s.ch == '=' { + s.nextch() s.op, s.prec = Geq, precCmp s.tok = _Operator break } - if c == '>' { + if s.ch == '>' { + s.nextch() s.op, s.prec = Shr, precMul - c = s.getr() goto assignop } - s.ungetr() s.op, s.prec = Gtr, precCmp s.tok = _Operator case '=': - if s.getr() == '=' { + s.nextch() + if s.ch == '=' { + s.nextch() s.op, s.prec = Eql, precCmp s.tok = _Operator break } - s.ungetr() s.tok = _Assign case '!': - if s.getr() == '=' { + s.nextch() + if s.ch == '=' { + s.nextch() s.op, s.prec = Neq, precCmp s.tok = _Operator break } - s.ungetr() s.op, s.prec = Not, 0 s.tok = _Operator default: - s.tok = 0 - s.errorf("invalid character %#U", c) + s.errorf("invalid character %#U", s.ch) + s.nextch() goto redo } return assignop: - if c == '=' { + if s.ch == '=' { + s.nextch() s.tok = _AssignOp return } - s.ungetr() s.tok = _Operator } -func isLetter(c rune) bool { - return 'a' <= lower(c) && lower(c) <= 'z' || c == '_' -} - func (s *scanner) ident() { - s.startLit() - // accelerate common case (7bit ASCII) - c := s.getr() - for isLetter(c) || isDecimal(c) { - c = s.getr() + for isLetter(s.ch) || isDecimal(s.ch) { + s.nextch() } // general case - if c >= utf8.RuneSelf { - for s.isIdentRune(c, false) { - c = s.getr() + if s.ch >= utf8.RuneSelf { + for s.atIdentChar(false) { + s.nextch() } } - s.ungetr() - - lit := s.stopLit() // possibly a keyword + lit := s.segment() if len(lit) >= 2 { if tok := keywordMap[hash(lit)]; tok != 0 && tokStrFast(tok) == string(lit) { s.nlsemi = contains(1<<_Break|1<<_Continue|1<<_Fallthrough|1<<_Return, tok) @@ -376,16 +387,16 @@ func tokStrFast(tok token) string { return _token_name[_token_index[tok-1]:_token_index[tok]] } -func (s *scanner) isIdentRune(c rune, first bool) bool { +func (s *scanner) atIdentChar(first bool) bool { switch { - case unicode.IsLetter(c) || c == '_': + case unicode.IsLetter(s.ch) || s.ch == '_': // ok - case unicode.IsDigit(c): + case unicode.IsDigit(s.ch): if first { - s.errorf("identifier cannot begin with digit %#U", c) + s.errorf("identifier cannot begin with digit %#U", s.ch) } - case c >= utf8.RuneSelf: - s.errorf("invalid character %#U in identifier", c) + case s.ch >= utf8.RuneSelf: + s.errorf("invalid character %#U in identifier", s.ch) default: return false } @@ -411,46 +422,45 @@ func init() { } } -func lower(c rune) rune { return ('a' - 'A') | c } // returns lower-case c iff c is ASCII letter -func isDecimal(c rune) bool { return '0' <= c && c <= '9' } -func isHex(c rune) bool { return '0' <= c && c <= '9' || 'a' <= lower(c) && lower(c) <= 'f' } +func lower(ch rune) rune { return ('a' - 'A') | ch } // returns lower-case ch iff ch is ASCII letter +func isLetter(ch rune) bool { return 'a' <= lower(ch) && lower(ch) <= 'z' || ch == '_' } +func isDecimal(ch rune) bool { return '0' <= ch && ch <= '9' } +func isHex(ch rune) bool { return '0' <= ch && ch <= '9' || 'a' <= lower(ch) && lower(ch) <= 'f' } -// digits accepts the sequence { digit | '_' } starting with c0. +// digits accepts the sequence { digit | '_' }. // If base <= 10, digits accepts any decimal digit but records // the index (relative to the literal start) of a digit >= base // in *invalid, if *invalid < 0. -// digits returns the first rune that is not part of the sequence -// anymore, and a bitset describing whether the sequence contained +// digits returns a bitset describing whether the sequence contained // digits (bit 0 is set), or separators '_' (bit 1 is set). -func (s *scanner) digits(c0 rune, base int, invalid *int) (c rune, digsep int) { - c = c0 +func (s *scanner) digits(base int, invalid *int) (digsep int) { if base <= 10 { max := rune('0' + base) - for isDecimal(c) || c == '_' { + for isDecimal(s.ch) || s.ch == '_' { ds := 1 - if c == '_' { + if s.ch == '_' { ds = 2 - } else if c >= max && *invalid < 0 { - *invalid = int(s.col0 - s.col) // record invalid rune index + } else if s.ch >= max && *invalid < 0 { + _, col := s.pos() + *invalid = int(col - s.col) // record invalid rune index } digsep |= ds - c = s.getr() + s.nextch() } } else { - for isHex(c) || c == '_' { + for isHex(s.ch) || s.ch == '_' { ds := 1 - if c == '_' { + if s.ch == '_' { ds = 2 } digsep |= ds - c = s.getr() + s.nextch() } } return } -func (s *scanner) number(c rune) { - s.startLit() +func (s *scanner) number(seenPoint bool) { s.bad = false base := 10 // number base @@ -459,38 +469,39 @@ func (s *scanner) number(c rune) { invalid := -1 // index of invalid digit in literal, or < 0 // integer part - var ds int - if c != '.' { + if !seenPoint { s.kind = IntLit - if c == '0' { - c = s.getr() - switch lower(c) { + if s.ch == '0' { + s.nextch() + switch lower(s.ch) { case 'x': - c = s.getr() + s.nextch() base, prefix = 16, 'x' case 'o': - c = s.getr() + s.nextch() base, prefix = 8, 'o' case 'b': - c = s.getr() + s.nextch() base, prefix = 2, 'b' default: base, prefix = 8, '0' digsep = 1 // leading 0 } } - c, ds = s.digits(c, base, &invalid) - digsep |= ds + digsep |= s.digits(base, &invalid) + if s.ch == '.' { + if prefix == 'o' || prefix == 'b' { + s.errorf("invalid radix point in %s", litname(prefix)) + } + s.nextch() + seenPoint = true + } } // fractional part - if c == '.' { + if seenPoint { s.kind = FloatLit - if prefix == 'o' || prefix == 'b' { - s.errorf("invalid radix point in %s", litname(prefix)) - } - c, ds = s.digits(s.getr(), base, &invalid) - digsep |= ds + digsep |= s.digits(base, &invalid) } if digsep&1 == 0 && !s.bad { @@ -498,23 +509,22 @@ func (s *scanner) number(c rune) { } // exponent - if e := lower(c); e == 'e' || e == 'p' { + if e := lower(s.ch); e == 'e' || e == 'p' { if !s.bad { switch { case e == 'e' && prefix != 0 && prefix != '0': - s.errorf("%q exponent requires decimal mantissa", c) + s.errorf("%q exponent requires decimal mantissa", s.ch) case e == 'p' && prefix != 'x': - s.errorf("%q exponent requires hexadecimal mantissa", c) + s.errorf("%q exponent requires hexadecimal mantissa", s.ch) } } - c = s.getr() + s.nextch() s.kind = FloatLit - if c == '+' || c == '-' { - c = s.getr() + if s.ch == '+' || s.ch == '-' { + s.nextch() } - c, ds = s.digits(c, 10, nil) - digsep |= ds - if ds&1 == 0 && !s.bad { + digsep = s.digits(10, nil) | digsep&2 // don't lose sep bit + if digsep&1 == 0 && !s.bad { s.errorf("exponent has no digits") } } else if prefix == 'x' && s.kind == FloatLit && !s.bad { @@ -522,14 +532,13 @@ func (s *scanner) number(c rune) { } // suffix 'i' - if c == 'i' { + if s.ch == 'i' { s.kind = ImagLit - c = s.getr() + s.nextch() } - s.ungetr() s.nlsemi = true - s.lit = string(s.stopLit()) + s.lit = string(s.segment()) s.tok = _Literal if s.kind == IntLit && invalid >= 0 && !s.bad { @@ -596,199 +605,195 @@ func invalidSep(x string) int { } func (s *scanner) rune() { - s.startLit() s.bad = false + s.nextch() n := 0 for ; ; n++ { - r := s.getr() - if r == '\'' { + if s.ch == '\'' { + if !s.bad { + if n == 0 { + s.errorf("empty rune literal or unescaped '") + } else if n != 1 { + s.errorAtf(0, "more than one character in rune literal") + } + } + s.nextch() break } - if r == '\\' { + if s.ch == '\\' { + s.nextch() s.escape('\'') continue } - if r == '\n' { - s.ungetr() // assume newline is not part of literal + if s.ch == '\n' { if !s.bad { s.errorf("newline in rune literal") } break } - if r < 0 { + if s.ch < 0 { if !s.bad { s.errorAtf(0, "rune literal not terminated") } break } - } - - if !s.bad { - if n == 0 { - s.errorf("empty rune literal or unescaped '") - } else if n != 1 { - s.errorAtf(0, "more than one character in rune literal") - } + s.nextch() } s.nlsemi = true - s.lit = string(s.stopLit()) + s.lit = string(s.segment()) s.kind = RuneLit s.tok = _Literal } func (s *scanner) stdString() { - s.startLit() s.bad = false + s.nextch() for { - r := s.getr() - if r == '"' { + if s.ch == '"' { + s.nextch() break } - if r == '\\' { + if s.ch == '\\' { + s.nextch() s.escape('"') continue } - if r == '\n' { - s.ungetr() // assume newline is not part of literal + if s.ch == '\n' { s.errorf("newline in string") break } - if r < 0 { + if s.ch < 0 { s.errorAtf(0, "string not terminated") break } + s.nextch() } s.nlsemi = true - s.lit = string(s.stopLit()) + s.lit = string(s.segment()) s.kind = StringLit s.tok = _Literal } func (s *scanner) rawString() { - s.startLit() s.bad = false + s.nextch() for { - r := s.getr() - if r == '`' { + if s.ch == '`' { + s.nextch() break } - if r < 0 { + if s.ch < 0 { s.errorAtf(0, "string not terminated") break } + s.nextch() } // We leave CRs in the string since they are part of the // literal (even though they are not part of the literal // value). s.nlsemi = true - s.lit = string(s.stopLit()) + s.lit = string(s.segment()) s.kind = StringLit s.tok = _Literal } func (s *scanner) comment(text string) { - s.errh(s.line, s.col, text) + s.errorAtf(0, text) } -func (s *scanner) skipLine(r rune) { - for r >= 0 { - if r == '\n' { - s.ungetr() // don't consume '\n' - needed for nlsemi logic - break - } - r = s.getr() +func (s *scanner) skipLine() { + // don't consume '\n' - needed for nlsemi logic + for s.ch >= 0 && s.ch != '\n' { + s.nextch() } } func (s *scanner) lineComment() { - r := s.getr() + // opening has already been consumed if s.mode&comments != 0 { - s.startLit() - s.skipLine(r) - s.comment("//" + string(s.stopLit())) + s.skipLine() + s.comment(string(s.segment())) return } // directives must start at the beginning of the line (s.col == colbase) - if s.mode&directives == 0 || s.col != colbase || (r != 'g' && r != 'l') { - s.skipLine(r) + if s.mode&directives == 0 || s.col != colbase || (s.ch != 'g' && s.ch != 'l') { + s.stop() + s.skipLine() return } // recognize go: or line directives prefix := "go:" - if r == 'l' { + if s.ch == 'l' { prefix = "line " } for _, m := range prefix { - if r != m { - s.skipLine(r) + if s.ch != m { + s.stop() + s.skipLine() return } - r = s.getr() + s.nextch() } // directive text - s.startLit() - s.skipLine(r) - s.comment("//" + prefix + string(s.stopLit())) + s.skipLine() + s.comment(string(s.segment())) } -func (s *scanner) skipComment(r rune) bool { - for r >= 0 { - for r == '*' { - r = s.getr() - if r == '/' { +func (s *scanner) skipComment() bool { + for s.ch >= 0 { + for s.ch == '*' { + s.nextch() + if s.ch == '/' { + s.nextch() return true } } - r = s.getr() + s.nextch() } s.errorAtf(0, "comment not terminated") return false } func (s *scanner) fullComment() { - r := s.getr() + /* opening has already been consumed */ if s.mode&comments != 0 { - s.startLit() - if s.skipComment(r) { - s.comment("/*" + string(s.stopLit())) - } else { - s.killLit() // not a complete comment - ignore + if s.skipComment() { + s.comment(string(s.segment())) } return } - if s.mode&directives == 0 || r != 'l' { - s.skipComment(r) + if s.mode&directives == 0 || s.ch != 'l' { + s.stop() + s.skipComment() return } // recognize line directive const prefix = "line " for _, m := range prefix { - if r != m { - s.skipComment(r) + if s.ch != m { + s.stop() + s.skipComment() return } - r = s.getr() + s.nextch() } // directive text - s.startLit() - if s.skipComment(r) { - s.comment("/*" + prefix + string(s.stopLit())) - } else { - s.killLit() // not a complete comment - ignore + if s.skipComment() { + s.comment(string(s.segment())) } } @@ -796,23 +801,23 @@ func (s *scanner) escape(quote rune) { var n int var base, max uint32 - c := s.getr() - switch c { - case 'a', 'b', 'f', 'n', 'r', 't', 'v', '\\', quote: + switch s.ch { + case quote, 'a', 'b', 'f', 'n', 'r', 't', 'v', '\\': + s.nextch() return case '0', '1', '2', '3', '4', '5', '6', '7': n, base, max = 3, 8, 255 case 'x': - c = s.getr() + s.nextch() n, base, max = 2, 16, 255 case 'u': - c = s.getr() + s.nextch() n, base, max = 4, 16, unicode.MaxRune case 'U': - c = s.getr() + s.nextch() n, base, max = 8, 16, unicode.MaxRune default: - if c < 0 { + if s.ch < 0 { return // complain in caller about EOF } s.errorf("unknown escape") @@ -821,30 +826,27 @@ func (s *scanner) escape(quote rune) { var x uint32 for i := n; i > 0; i-- { + if s.ch < 0 { + return // complain in caller about EOF + } d := base - switch { - case isDecimal(c): - d = uint32(c) - '0' - case 'a' <= lower(c) && lower(c) <= 'f': - d = uint32(lower(c)) - ('a' - 10) + if isDecimal(s.ch) { + d = uint32(s.ch) - '0' + } else if 'a' <= lower(s.ch) && lower(s.ch) <= 'f' { + d = uint32(lower(s.ch)) - 'a' + 10 } if d >= base { - if c < 0 { - return // complain in caller about EOF - } kind := "hex" if base == 8 { kind = "octal" } - s.errorf("invalid character %q in %s escape", c, kind) - s.ungetr() + s.errorf("invalid character %q in %s escape", s.ch, kind) return } // d < base x = x*base + d - c = s.getr() + s.nextch() } - s.ungetr() if x > max && base == 8 { s.errorf("octal escape value %d > 255", x) diff --git a/src/cmd/compile/internal/syntax/scanner_test.go b/src/cmd/compile/internal/syntax/scanner_test.go index f683341650..78e470c45c 100644 --- a/src/cmd/compile/internal/syntax/scanner_test.go +++ b/src/cmd/compile/internal/syntax/scanner_test.go @@ -19,8 +19,8 @@ func errh(line, col uint, msg string) { // Don't bother with other tests if TestSmoke doesn't pass. func TestSmoke(t *testing.T) { - const src = "if (+foo\t+=..123/***/4.2_0e-0i'a'`raw`\"string\" ;//$" - tokens := []token{_If, _Lparen, _Operator, _Name, _AssignOp, _Dot, _Literal, _Literal, _Literal, _Literal, _Literal, _Semi, _EOF} + const src = "if (+foo\t+=..123/***/0.9_0e-0i'a'`raw`\"string\"..f;//$" + tokens := []token{_If, _Lparen, _Operator, _Name, _AssignOp, _Dot, _Literal, _Literal, _Literal, _Literal, _Literal, _Dot, _Dot, _Name, _Semi, _EOF} var got scanner got.init(strings.NewReader(src), errh, 0) diff --git a/src/cmd/compile/internal/syntax/source.go b/src/cmd/compile/internal/syntax/source.go index c671e3c11e..01b592152b 100644 --- a/src/cmd/compile/internal/syntax/source.go +++ b/src/cmd/compile/internal/syntax/source.go @@ -3,11 +3,10 @@ // license that can be found in the LICENSE file. // This file implements source, a buffered rune reader -// which is specialized for the needs of the Go scanner: -// Contiguous sequences of runes (literals) are extracted -// directly as []byte without the need to re-encode the -// runes in UTF-8 (as would be necessary with bufio.Reader). -// +// specialized for scanning Go code: Reading +// ASCII characters, maintaining current (line, col) +// position information, and recording of the most +// recently read source segment are highly optimized. // This file is self-contained (go tool compile source.go // compiles) and thus could be made into its own package. @@ -18,202 +17,202 @@ import ( "unicode/utf8" ) +// The source buffer is accessed using three indices b (begin), +// r (read), and e (end): +// +// - If b >= 0, it points to the beginning of a segment of most +// recently read characters (typically a Go literal). +// +// - r points to the byte immediately following the most recently +// read character ch, which starts at r-chw. +// +// - e points to the byte immediately following the last byte that +// was read into the buffer. +// +// The buffer content is terminated at buf[e] with the sentinel +// character utf8.RuneSelf. This makes it possible to test for +// the common case of ASCII characters with a single 'if' (see +// nextch method). +// +// +------ content in use -------+ +// v v +// buf [...read...|...segment...|ch|...unread...|s|...free...] +// ^ ^ ^ ^ +// | | | | +// b r-chw r e +// +// Invariant: -1 <= b < r <= e < len(buf) && buf[e] == sentinel + +type source struct { + in io.Reader + errh func(line, col uint, msg string) + + buf []byte // source buffer + ioerr error // pending I/O error, or nil + b, r, e int // buffer indices (see comment above) + line, col uint // source position of ch (0-based) + ch rune // most recently read character + chw int // width of ch +} + +const sentinel = utf8.RuneSelf + +func (s *source) init(in io.Reader, errh func(line, col uint, msg string)) { + s.in = in + s.errh = errh + + if s.buf == nil { + s.buf = make([]byte, nextSize(0)) + } + s.buf[0] = sentinel + s.ioerr = nil + s.b, s.r, s.e = -1, 0, 0 + s.line, s.col = 0, 0 + s.ch = ' ' + s.chw = 0 +} + // starting points for line and column numbers const linebase = 1 const colbase = 1 -// max. number of bytes to unread -const maxunread = 10 - -// buf [...read...|...|...unread...|s|...free...] -// ^ ^ ^ ^ -// | | | | -// suf r0 r w - -type source struct { - src io.Reader - errh func(line, pos uint, msg string) - - // source buffer - buf [4 << 10]byte - r0, r, w int // previous/current read and write buf positions, excluding sentinel - line0, line uint // previous/current line - col0, col uint // previous/current column (byte offsets from line start) - ioerr error // pending io error - - // literal buffer - lit []byte // literal prefix - suf int // literal suffix; suf >= 0 means we are scanning a literal -} - -// init initializes source to read from src and to report errors via errh. -// errh must not be nil. -func (s *source) init(src io.Reader, errh func(line, pos uint, msg string)) { - s.src = src - s.errh = errh - - s.buf[0] = utf8.RuneSelf // terminate with sentinel - s.r0, s.r, s.w = 0, 0, 0 - s.line0, s.line = 0, linebase - s.col0, s.col = 0, colbase - s.ioerr = nil - - s.lit = s.lit[:0] - s.suf = -1 -} - -// ungetr sets the reading position to a previous reading -// position, usually the one of the most recently read -// rune, but possibly earlier (see unread below). -func (s *source) ungetr() { - s.r, s.line, s.col = s.r0, s.line0, s.col0 -} - -// unread moves the previous reading position to a position -// that is n bytes earlier in the source. The next ungetr -// call will set the reading position to that moved position. -// The "unread" runes must be single byte and not contain any -// newlines; and 0 <= n <= maxunread must hold. -func (s *source) unread(n int) { - s.r0 -= n - s.col0 -= uint(n) +// pos returns the (line, col) source position of s.ch. +func (s *source) pos() (line, col uint) { + return linebase + s.line, colbase + s.col } +// error reports the error msg at source position s.pos(). func (s *source) error(msg string) { - s.errh(s.line0, s.col0, msg) + line, col := s.pos() + s.errh(line, col, msg) } -// getr reads and returns the next rune. -// -// If a read or source encoding error occurs, getr -// calls the error handler installed with init. -// The handler must exist. -// -// The (line, col) position passed to the error handler -// is always at the current source reading position. -func (s *source) getr() rune { +// start starts a new active source segment (including s.ch). +// As long as stop has not been called, the active segment's +// bytes (excluding s.ch) may be retrieved by calling segment. +func (s *source) start() { s.b = s.r - s.chw } +func (s *source) stop() { s.b = -1 } +func (s *source) segment() []byte { return s.buf[s.b : s.r-s.chw] } + +// rewind rewinds the scanner's read position and character s.ch +// to the start of the currently active segment, which must not +// contain any newlines (otherwise position information will be +// incorrect). Currently, rewind is only needed for handling the +// source sequence ".."; it must not be called outside an active +// segment. +func (s *source) rewind() { + // ok to verify precondition - rewind is rarely called + if s.b < 0 { + panic("no active segment") + } + s.col -= uint(s.r - s.b) + s.r = s.b + s.nextch() +} + +func (s *source) nextch() { redo: - s.r0, s.line0, s.col0 = s.r, s.line, s.col - - // We could avoid at least one test that is always taken in the - // for loop below by duplicating the common case code (ASCII) - // here since we always have at least the sentinel (utf8.RuneSelf) - // in the buffer. Measure and optimize if necessary. - - // make sure we have at least one rune in buffer, or we are at EOF - for s.r+utf8.UTFMax > s.w && !utf8.FullRune(s.buf[s.r:s.w]) && s.ioerr == nil && s.w-s.r < len(s.buf) { - s.fill() // s.w-s.r < len(s.buf) => buffer is not full + s.col += uint(s.chw) + if s.ch == '\n' { + s.line++ + s.col = 0 } - // common case: ASCII and enough bytes - // (invariant: s.buf[s.w] == utf8.RuneSelf) - if b := s.buf[s.r]; b < utf8.RuneSelf { + // fast common case: at least one ASCII character + if s.ch = rune(s.buf[s.r]); s.ch < sentinel { s.r++ - // TODO(gri) Optimization: Instead of adjusting s.col for each character, - // remember the line offset instead and then compute the offset as needed - // (which is less often). - s.col++ - if b == 0 { + s.chw = 1 + if s.ch == 0 { s.error("invalid NUL character") goto redo } - if b == '\n' { - s.line++ - s.col = colbase - } - return rune(b) + return + } + + // slower general case: add more bytes to buffer if we don't have a full rune + for s.e-s.r < utf8.UTFMax && !utf8.FullRune(s.buf[s.r:s.e]) && s.ioerr == nil { + s.fill() } // EOF - if s.r == s.w { + if s.r == s.e { if s.ioerr != io.EOF { // ensure we never start with a '/' (e.g., rooted path) in the error message s.error("I/O error: " + s.ioerr.Error()) + s.ioerr = nil } - return -1 + s.ch = -1 + s.chw = 0 + return } - // uncommon case: not ASCII - r, w := utf8.DecodeRune(s.buf[s.r:s.w]) - s.r += w - s.col += uint(w) + s.ch, s.chw = utf8.DecodeRune(s.buf[s.r:s.e]) + s.r += s.chw - if r == utf8.RuneError && w == 1 { + if s.ch == utf8.RuneError && s.chw == 1 { s.error("invalid UTF-8 encoding") goto redo } // BOM's are only allowed as the first character in a file const BOM = 0xfeff - if r == BOM { - if s.r0 > 0 { // s.r0 is always > 0 after 1st character (fill will set it to maxunread) + if s.ch == BOM { + if s.line > 0 || s.col > 0 { s.error("invalid BOM in the middle of the file") } goto redo } - - return r } +// fill reads more source bytes into s.buf. +// It returns with at least one more byte in the buffer, or with s.ioerr != nil. func (s *source) fill() { - // Slide unread bytes to beginning but preserve last read char - // (for one ungetr call) plus maxunread extra bytes (for one - // unread call). - if s.r0 > maxunread { - n := s.r0 - maxunread // number of bytes to slide down - // save literal prefix, if any - // (make sure we keep maxunread bytes and the last - // read char in the buffer) - if s.suf >= 0 { - // we have a literal - if s.suf < n { - // save literal prefix - s.lit = append(s.lit, s.buf[s.suf:n]...) - s.suf = 0 - } else { - s.suf -= n - } - } - copy(s.buf[:], s.buf[n:s.w]) - s.r0 = maxunread // eqv: s.r0 -= n - s.r -= n - s.w -= n + // determine content to preserve + b := s.r + if s.b >= 0 { + b = s.b + s.b = 0 // after buffer has grown or content has been moved down } + content := s.buf[b:s.e] + + // grow buffer or move content down + if len(content)*2 > len(s.buf) { + s.buf = make([]byte, nextSize(len(s.buf))) + copy(s.buf, content) + } else if b > 0 { + copy(s.buf, content) + } + s.r -= b + s.e -= b // read more data: try a limited number of times - for i := 100; i > 0; i-- { - n, err := s.src.Read(s.buf[s.w : len(s.buf)-1]) // -1 to leave space for sentinel + for i := 0; i < 10; i++ { + var n int + n, s.ioerr = s.in.Read(s.buf[s.e : len(s.buf)-1]) // -1 to leave space for sentinel if n < 0 { panic("negative read") // incorrect underlying io.Reader implementation } - s.w += n - if n > 0 || err != nil { - s.buf[s.w] = utf8.RuneSelf // sentinel - if err != nil { - s.ioerr = err - } + if n > 0 || s.ioerr != nil { + s.e += n + s.buf[s.e] = sentinel return } + // n == 0 } - s.buf[s.w] = utf8.RuneSelf // sentinel + s.buf[s.e] = sentinel s.ioerr = io.ErrNoProgress } -func (s *source) startLit() { - s.suf = s.r0 - s.lit = s.lit[:0] // reuse lit -} - -func (s *source) stopLit() []byte { - lit := s.buf[s.suf:s.r] - if len(s.lit) > 0 { - lit = append(s.lit, lit...) +// nextSize returns the next bigger size for a buffer of a given size. +func nextSize(size int) int { + const min = 4 << 10 // 4K: minimum buffer size + const max = 1 << 20 // 1M: maximum buffer size which is still doubled + if size < min { + return min } - s.killLit() - return lit -} - -func (s *source) killLit() { - s.suf = -1 // no pending literal + if size <= max { + return size << 1 + } + return size + max } From bda42a7a782dbcf4b123d617c5b60f3c848cbb82 Mon Sep 17 00:00:00 2001 From: Diogo Pinela Date: Thu, 5 Mar 2020 00:28:05 +0000 Subject: [PATCH 64/69] runtime: use staticuint64s instead of staticbytes for 1-length strings MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This was the last remaining use of staticbytes, so we can now delete it. The new code appears slightly faster on amd64: name old time/op new time/op delta SliceByteToString/1-4 6.29ns ± 2% 5.89ns ± 1% -6.46% (p=0.000 n=14+14) This may not be the case on the big-endian architectures, since they have to do an extra addition. Updates #37612 Change-Id: Icb84c5911ba025f798de152849992a55be99e4f3 Reviewed-on: https://go-review.googlesource.com/c/go/+/221979 Reviewed-by: Josh Bleecher Snyder Run-TryBot: Josh Bleecher Snyder TryBot-Result: Gobot Gobot --- src/runtime/iface.go | 36 ------------------------------------ src/runtime/string.go | 13 ++++++------- 2 files changed, 6 insertions(+), 43 deletions(-) diff --git a/src/runtime/iface.go b/src/runtime/iface.go index 892e5a400f..e4b0b6d3d3 100644 --- a/src/runtime/iface.go +++ b/src/runtime/iface.go @@ -527,42 +527,6 @@ func iterate_itabs(fn func(*itab)) { } } -// staticbytes is used to avoid convT2E for byte-sized values. -var staticbytes = [...]byte{ - 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, - 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, - 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, - 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, - 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, - 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, - 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, - 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, - 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, - 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, - 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, - 0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, - 0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, - 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, - 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, - 0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, - 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, - 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, - 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, - 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f, - 0xa0, 0xa1, 0xa2, 0xa3, 0xa4, 0xa5, 0xa6, 0xa7, - 0xa8, 0xa9, 0xaa, 0xab, 0xac, 0xad, 0xae, 0xaf, - 0xb0, 0xb1, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7, - 0xb8, 0xb9, 0xba, 0xbb, 0xbc, 0xbd, 0xbe, 0xbf, - 0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, - 0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf, - 0xd0, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, - 0xd8, 0xd9, 0xda, 0xdb, 0xdc, 0xdd, 0xde, 0xdf, - 0xe0, 0xe1, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, - 0xe8, 0xe9, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xef, - 0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, - 0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xff, -} - // staticuint64s is used to avoid allocating in convTx for small integer values. var staticuint64s = [...]uint64{ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, diff --git a/src/runtime/string.go b/src/runtime/string.go index 184245b105..7dc0bd789f 100644 --- a/src/runtime/string.go +++ b/src/runtime/string.go @@ -6,6 +6,7 @@ package runtime import ( "internal/bytealg" + "runtime/internal/sys" "unsafe" ) @@ -90,7 +91,11 @@ func slicebytetostring(buf *tmpBuf, b []byte) (str string) { msanread(unsafe.Pointer(&b[0]), uintptr(l)) } if l == 1 { - stringStructOf(&str).str = unsafe.Pointer(&staticbytes[b[0]]) + p := unsafe.Pointer(&staticuint64s[b[0]]) + if sys.BigEndian { + p = add(p, 7) + } + stringStructOf(&str).str = p stringStructOf(&str).len = 1 return } @@ -231,12 +236,6 @@ func stringStructOf(sp *string) *stringStruct { } func intstring(buf *[4]byte, v int64) (s string) { - if v >= 0 && v < runeSelf { - stringStructOf(&s).str = unsafe.Pointer(&staticbytes[v]) - stringStructOf(&s).len = 1 - return - } - var b []byte if buf != nil { b = buf[:] From 55d4cbfbe10c11e56e1642cbb6b108eaf2620e09 Mon Sep 17 00:00:00 2001 From: Robert Griesemer Date: Thu, 5 Mar 2020 12:55:44 -0800 Subject: [PATCH 65/69] cmd/compile/internal/scanner: report correct directive string (fix build) Change-Id: I01b244e97e4140545a46b3d494489a30126c2139 Reviewed-on: https://go-review.googlesource.com/c/go/+/222257 Run-TryBot: Robert Griesemer Reviewed-by: Matthew Dempsky --- src/cmd/compile/internal/syntax/scanner.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/cmd/compile/internal/syntax/scanner.go b/src/cmd/compile/internal/syntax/scanner.go index 2ce6203dd9..fc2efcced2 100644 --- a/src/cmd/compile/internal/syntax/scanner.go +++ b/src/cmd/compile/internal/syntax/scanner.go @@ -704,7 +704,7 @@ func (s *scanner) rawString() { } func (s *scanner) comment(text string) { - s.errorAtf(0, text) + s.errorAtf(0, "%s", text) } func (s *scanner) skipLine() { From 4a70ff425b3c16c19785b04bb89ca856749ed65b Mon Sep 17 00:00:00 2001 From: "Bryan C. Mills" Date: Thu, 5 Mar 2020 11:14:25 -0500 Subject: [PATCH 66/69] cmd/go/internal/renameio: skip test affected by kernel bug on macOS 10.14 builders The test will remain flaky on the -nocgo builder until #37695 is addressed. Updates #37695 Fixes #33041 Change-Id: I5d661ef39e82ab1dce3a76e0e4059cf556135e89 Reviewed-on: https://go-review.googlesource.com/c/go/+/222158 Run-TryBot: Bryan C. Mills TryBot-Result: Gobot Gobot Reviewed-by: Jay Conrod --- src/cmd/go/internal/renameio/renameio_test.go | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/src/cmd/go/internal/renameio/renameio_test.go b/src/cmd/go/internal/renameio/renameio_test.go index ee2f3ba1bb..df8ddabdb8 100644 --- a/src/cmd/go/internal/renameio/renameio_test.go +++ b/src/cmd/go/internal/renameio/renameio_test.go @@ -9,11 +9,13 @@ package renameio import ( "encoding/binary" "errors" + "internal/testenv" "io/ioutil" "math/rand" "os" "path/filepath" "runtime" + "strings" "sync" "sync/atomic" "syscall" @@ -24,6 +26,10 @@ import ( ) func TestConcurrentReadsAndWrites(t *testing.T) { + if runtime.GOOS == "darwin" && strings.HasSuffix(testenv.Builder(), "-10_14") { + testenv.SkipFlaky(t, 33041) + } + dir, err := ioutil.TempDir("", "renameio") if err != nil { t.Fatal(err) From fadbf7404d2b1aca63993e289448fcc3b6a23107 Mon Sep 17 00:00:00 2001 From: Michael Pratt Date: Fri, 28 Feb 2020 14:16:41 -0500 Subject: [PATCH 67/69] runtime/pprof: expand final stack frame to avoid truncation When generating stacks, the runtime automatically expands inline functions to inline all inline frames in the stack. However, due to the stack size limit, the final frame may be truncated in the middle of several inline frames at the same location. As-is, we assume that the final frame is a normal function, and emit and cache a Location for it. If we later receive a complete stack frame, we will first use the cached Location for the inlined function and then generate a new Location for the "caller" frame, in violation of the pprof requirement to merge inlined functions into the same Location. As a result, we: 1. Nondeterministically may generate a profile with the different stacks combined or split, depending on which is encountered first. This is particularly problematic when performing a diff of profiles. 2. When split stacks are generated, we lose the inlining information. We avoid both of these problems by performing a second expansion of the last stack frame to recover additional inline frames that may have been lost. This expansion is a bit simpler than the one done by the runtime because we don't have to handle skipping, and we know that the last emitted frame is not an elided wrapper, since it by definition is already included in the stack. Fixes #37446 Change-Id: If3ca2af25b21d252cf457cc867dd932f107d4c61 Reviewed-on: https://go-review.googlesource.com/c/go/+/221577 Run-TryBot: Michael Pratt TryBot-Result: Gobot Gobot Reviewed-by: Heschi Kreinick Reviewed-by: Keith Randall Reviewed-by: Hyang-Ah Hana Kim --- src/runtime/pprof/map.go | 3 +- src/runtime/pprof/pprof_test.go | 47 ++++++++++++++++++++++++----- src/runtime/pprof/proto.go | 40 +++++++++++-------------- src/runtime/pprof/runtime.go | 3 ++ src/runtime/symtab.go | 53 +++++++++++++++++++++++++++++++++ 5 files changed, 115 insertions(+), 31 deletions(-) diff --git a/src/runtime/pprof/map.go b/src/runtime/pprof/map.go index a271ad022e..7c75872351 100644 --- a/src/runtime/pprof/map.go +++ b/src/runtime/pprof/map.go @@ -68,7 +68,8 @@ Search: if len(m.freeStk) < len(stk) { m.freeStk = make([]uintptr, 1024) } - e.stk = m.freeStk[:len(stk)] + // Limit cap to prevent append from clobbering freeStk. + e.stk = m.freeStk[:len(stk):len(stk)] m.freeStk = m.freeStk[len(stk):] for j := range stk { diff --git a/src/runtime/pprof/pprof_test.go b/src/runtime/pprof/pprof_test.go index 20b44e1e01..5bfc3b6134 100644 --- a/src/runtime/pprof/pprof_test.go +++ b/src/runtime/pprof/pprof_test.go @@ -1172,16 +1172,25 @@ func TestTryAdd(t *testing.T) { {Value: []int64{20, 20 * period}, Location: []*profile.Location{{ID: 1}}}, }, }, { - name: "recursive_inlined_funcs", + // If a function is called recursively then it must not be + // inlined in the caller. + // + // N.B. We're generating an impossible profile here, with a + // recursive inlineCallee call. This is simulating a non-Go + // function that looks like an inlined Go function other than + // its recursive property. See pcDeck.tryAdd. + name: "recursive_func_is_not_inlined", input: []uint64{ 3, 0, 500, // hz = 500. Must match the period. 5, 0, 30, inlinedCalleePtr, inlinedCalleePtr, 4, 0, 40, inlinedCalleePtr, }, - wantLocs: [][]string{{"runtime/pprof.inlinedCallee"}}, + // inlinedCaller shows up here because + // runtime_expandFinalInlineFrame adds it to the stack frame. + wantLocs: [][]string{{"runtime/pprof.inlinedCallee"}, {"runtime/pprof.inlinedCaller"}}, wantSamples: []*profile.Sample{ - {Value: []int64{30, 30 * period}, Location: []*profile.Location{{ID: 1}, {ID: 1}}}, - {Value: []int64{40, 40 * period}, Location: []*profile.Location{{ID: 1}}}, + {Value: []int64{30, 30 * period}, Location: []*profile.Location{{ID: 1}, {ID: 1}, {ID: 2}}}, + {Value: []int64{40, 40 * period}, Location: []*profile.Location{{ID: 1}, {ID: 2}}}, }, }, { name: "truncated_stack_trace_later", @@ -1202,12 +1211,36 @@ func TestTryAdd(t *testing.T) { 4, 0, 70, inlinedCalleePtr, 5, 0, 80, inlinedCalleePtr, inlinedCallerPtr, }, - wantLocs: [][]string{ // the inline info is screwed up, but better than a crash. - {"runtime/pprof.inlinedCallee"}, + wantLocs: [][]string{{"runtime/pprof.inlinedCallee", "runtime/pprof.inlinedCaller"}}, + wantSamples: []*profile.Sample{ + {Value: []int64{70, 70 * period}, Location: []*profile.Location{{ID: 1}}}, + {Value: []int64{80, 80 * period}, Location: []*profile.Location{{ID: 1}}}, + }, + }, { + // We can recover the inlined caller from a truncated stack. + name: "truncated_stack_trace_only", + input: []uint64{ + 3, 0, 500, // hz = 500. Must match the period. + 4, 0, 70, inlinedCalleePtr, + }, + wantLocs: [][]string{{"runtime/pprof.inlinedCallee", "runtime/pprof.inlinedCaller"}}, + wantSamples: []*profile.Sample{ + {Value: []int64{70, 70 * period}, Location: []*profile.Location{{ID: 1}}}, + }, + }, { + // The same location is used for duplicated stacks. + name: "truncated_stack_trace_twice", + input: []uint64{ + 3, 0, 500, // hz = 500. Must match the period. + 4, 0, 70, inlinedCalleePtr, + 5, 0, 80, inlinedCallerPtr, inlinedCalleePtr, + }, + wantLocs: [][]string{ + {"runtime/pprof.inlinedCallee", "runtime/pprof.inlinedCaller"}, {"runtime/pprof.inlinedCaller"}}, wantSamples: []*profile.Sample{ {Value: []int64{70, 70 * period}, Location: []*profile.Location{{ID: 1}}}, - {Value: []int64{80, 80 * period}, Location: []*profile.Location{{ID: 1}, {ID: 2}}}, + {Value: []int64{80, 80 * period}, Location: []*profile.Location{{ID: 2}, {ID: 1}}}, }, }} diff --git a/src/runtime/pprof/proto.go b/src/runtime/pprof/proto.go index 8a30c7151d..416ace7ab2 100644 --- a/src/runtime/pprof/proto.go +++ b/src/runtime/pprof/proto.go @@ -384,6 +384,10 @@ func (b *profileBuilder) build() { // It may emit to b.pb, so there must be no message encoding in progress. func (b *profileBuilder) appendLocsForStack(locs []uint64, stk []uintptr) (newLocs []uint64) { b.deck.reset() + + // The last frame might be truncated. Recover lost inline frames. + stk = runtime_expandFinalInlineFrame(stk) + for len(stk) > 0 { addr := stk[0] if l, ok := b.locs[addr]; ok { @@ -395,22 +399,12 @@ func (b *profileBuilder) appendLocsForStack(locs []uint64, stk []uintptr) (newLo // then, record the cached location. locs = append(locs, l.id) - // The stk may be truncated due to the stack depth limit - // (e.g. See maxStack and maxCPUProfStack in runtime) or - // bugs in runtime. Avoid the crash in either case. - // TODO(hyangah): The correct fix may require using the exact - // pcs as the key for b.locs cache management instead of just - // relying on the very first pc. We are late in the go1.14 dev - // cycle, so this is a workaround with little code change. - if len(l.pcs) > len(stk) { - stk = nil - // TODO(hyangah): would be nice if we can enable - // debug print out on demand and report the problematic - // cached location entry and stack traces. Do we already - // have such facility to utilize (e.g. GODEBUG)? - } else { - stk = stk[len(l.pcs):] // skip the matching pcs. - } + // Skip the matching pcs. + // + // Even if stk was truncated due to the stack depth + // limit, expandFinalInlineFrame above has already + // fixed the truncation, ensuring it is long enough. + stk = stk[len(l.pcs):] continue } @@ -427,9 +421,9 @@ func (b *profileBuilder) appendLocsForStack(locs []uint64, stk []uintptr) (newLo stk = stk[1:] continue } - // add failed because this addr is not inlined with - // the existing PCs in the deck. Flush the deck and retry to - // handle this pc. + // add failed because this addr is not inlined with the + // existing PCs in the deck. Flush the deck and retry handling + // this pc. if id := b.emitLocation(); id > 0 { locs = append(locs, id) } @@ -463,8 +457,8 @@ func (b *profileBuilder) appendLocsForStack(locs []uint64, stk []uintptr) (newLo // the fake pcs and restore the inlined and entry functions. Inlined functions // have the following properties: // Frame's Func is nil (note: also true for non-Go functions), and -// Frame's Entry matches its entry function frame's Entry. (note: could also be true for recursive calls and non-Go functions), -// Frame's Name does not match its entry function frame's name. +// Frame's Entry matches its entry function frame's Entry (note: could also be true for recursive calls and non-Go functions), and +// Frame's Name does not match its entry function frame's name (note: inlined functions cannot be recursive). // // As reading and processing the pcs in a stack trace one by one (from leaf to the root), // we use pcDeck to temporarily hold the observed pcs and their expanded frames @@ -486,8 +480,8 @@ func (d *pcDeck) reset() { // to the deck. If it fails the caller needs to flush the deck and retry. func (d *pcDeck) tryAdd(pc uintptr, frames []runtime.Frame, symbolizeResult symbolizeFlag) (success bool) { if existing := len(d.pcs); existing > 0 { - // 'frames' are all expanded from one 'pc' and represent all inlined functions - // so we check only the last one. + // 'd.frames' are all expanded from one 'pc' and represent all + // inlined functions so we check only the last one. newFrame := frames[0] last := d.frames[existing-1] if last.Func != nil { // the last frame can't be inlined. Flush. diff --git a/src/runtime/pprof/runtime.go b/src/runtime/pprof/runtime.go index b71bbad9a6..dd2545b339 100644 --- a/src/runtime/pprof/runtime.go +++ b/src/runtime/pprof/runtime.go @@ -9,6 +9,9 @@ import ( "unsafe" ) +// runtime_expandFinalInlineFrame is defined in runtime/symtab.go. +func runtime_expandFinalInlineFrame(stk []uintptr) []uintptr + // runtime_setProfLabel is defined in runtime/proflabel.go. func runtime_setProfLabel(labels unsafe.Pointer) diff --git a/src/runtime/symtab.go b/src/runtime/symtab.go index a6e08d7214..997cfa3f7a 100644 --- a/src/runtime/symtab.go +++ b/src/runtime/symtab.go @@ -148,6 +148,59 @@ func (ci *Frames) Next() (frame Frame, more bool) { return } +// runtime_expandFinalInlineFrame expands the final pc in stk to include all +// "callers" if pc is inline. +// +//go:linkname runtime_expandFinalInlineFrame runtime/pprof.runtime_expandFinalInlineFrame +func runtime_expandFinalInlineFrame(stk []uintptr) []uintptr { + pc := stk[len(stk)-1] + tracepc := pc - 1 + + f := findfunc(tracepc) + if !f.valid() { + // Not a Go function. + return stk + } + + inldata := funcdata(f, _FUNCDATA_InlTree) + if inldata == nil { + // Nothing inline in f. + return stk + } + + // Treat the previous func as normal. We haven't actually checked, but + // since this pc was included in the stack, we know it shouldn't be + // elided. + lastFuncID := funcID_normal + + // Remove pc from stk; we'll re-add it below. + stk = stk[:len(stk)-1] + + // See inline expansion in gentraceback. + var cache pcvalueCache + inltree := (*[1 << 20]inlinedCall)(inldata) + for { + ix := pcdatavalue(f, _PCDATA_InlTreeIndex, tracepc, &cache) + if ix < 0 { + break + } + if inltree[ix].funcID == funcID_wrapper && elideWrapperCalling(lastFuncID) { + // ignore wrappers + } else { + stk = append(stk, pc) + } + lastFuncID = inltree[ix].funcID + // Back up to an instruction in the "caller". + tracepc = f.entry + uintptr(inltree[ix].parentPc) + pc = tracepc + 1 + } + + // N.B. we want to keep the last parentPC which is not inline. + stk = append(stk, pc) + + return stk +} + // expandCgoFrames expands frame information for pc, known to be // a non-Go function, using the cgoSymbolizer hook. expandCgoFrames // returns nil if pc could not be expanded. From 2b0f481278cc093e9f61945592257e6d651a169c Mon Sep 17 00:00:00 2001 From: Stefan Baebler Date: Fri, 6 Mar 2020 08:21:26 +0000 Subject: [PATCH 68/69] doc/go1.14: document that unparsable URL in net/url.Error is now quoted Fixes #37614 Updates #36878 Updates #29384 Updates #37630 Change-Id: I63dad8b554353197ae0f29fa2a84f17bffa58557 GitHub-Last-Rev: 5297df32200ea5b52b2e7b52c8ee022d37e44111 GitHub-Pull-Request: golang/go#37661 Reviewed-on: https://go-review.googlesource.com/c/go/+/222037 Reviewed-by: Ian Lance Taylor --- doc/go1.14.html | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/doc/go1.14.html b/doc/go1.14.html index 6e7c5dc9b6..eb35fa8cae 100644 --- a/doc/go1.14.html +++ b/doc/go1.14.html @@ -759,6 +759,19 @@ Do not send CLs removing the interior tags from such phrases.

+
net/url
+
+

+ When parsing of a URL fails + (for example by Parse + or ParseRequestURI), + the resulting Error message + will now quote the unparsable URL. + This provides clearer structure and consistency with other parsing errors. +

+
+
+
os/signal

From 5ea58c63468bbc7e8705ee13d0bddbf3693785fe Mon Sep 17 00:00:00 2001 From: Jay Conrod Date: Thu, 5 Mar 2020 11:11:47 -0500 Subject: [PATCH 69/69] cmd/go: make go test -json report failures for panicking/exiting tests 'go test -json' should report that a test failed if the test binary did not exit normally with status 0. This covers panics, non-zero exits, and abnormal terminations. These tests don't print a final result when run with -test.v (which is used by 'go test -json'). The final result should be "PASS" or "FAIL" on a line by itself. 'go test' prints "FAIL" in this case, but includes error information. test2json was changed in CL 192104 to report that a test passed if it does not report a final status. This caused 'go test -json' to report that a test passed after a panic or non-zero exit. With this change, test2json treats "FAIL" with error information the same as "FAIL" on a line by itself. This is intended to be a minimal fix for backporting, but it will likely be replaced by a complete solution for #29062. Fixes #37555 Updates #29062 Updates #31969 Change-Id: Icb67bcd36bed97e6a8d51f4d14bf71f73c83ac3d Reviewed-on: https://go-review.googlesource.com/c/go/+/222243 Run-TryBot: Jay Conrod TryBot-Result: Gobot Gobot Reviewed-by: Bryan C. Mills --- src/cmd/go/internal/test/test.go | 8 +++ .../testdata/script/test_json_panic_exit.txt | 69 +++++++++++++++++++ src/cmd/internal/test2json/test2json.go | 9 ++- .../internal/test2json/testdata/panic.json | 2 +- 4 files changed, 86 insertions(+), 2 deletions(-) create mode 100644 src/cmd/go/testdata/script/test_json_panic_exit.txt diff --git a/src/cmd/go/internal/test/test.go b/src/cmd/go/internal/test/test.go index 1c6fb0b97f..dbb899219d 100644 --- a/src/cmd/go/internal/test/test.go +++ b/src/cmd/go/internal/test/test.go @@ -1239,6 +1239,14 @@ func (c *runCache) builderRunTest(b *work.Builder, a *work.Action) error { if len(out) == 0 { fmt.Fprintf(cmd.Stdout, "%s\n", err) } + // NOTE(golang.org/issue/37555): test2json reports that a test passes + // unless "FAIL" is printed at the beginning of a line. The test may not + // actually print that if it panics, exits, or terminates abnormally, + // so we print it here. We can't always check whether it was printed + // because some tests need stdout to be a terminal (golang.org/issue/34791), + // not a pipe. + // TODO(golang.org/issue/29062): tests that exit with status 0 without + // printing a final result should fail. fmt.Fprintf(cmd.Stdout, "FAIL\t%s\t%s\n", a.Package.ImportPath, t) } diff --git a/src/cmd/go/testdata/script/test_json_panic_exit.txt b/src/cmd/go/testdata/script/test_json_panic_exit.txt new file mode 100644 index 0000000000..d0a7991fe5 --- /dev/null +++ b/src/cmd/go/testdata/script/test_json_panic_exit.txt @@ -0,0 +1,69 @@ +# Verifies golang.org/issue/37555. + +[short] skip + +# 'go test -json' should say a test passes if it says it passes. +go test -json ./pass +stdout '"Action":"pass".*\n\z' +! stdout '"Test":.*\n\z' + +# 'go test -json' should say a test passes if it exits 0 and prints nothing. +# TODO(golang.org/issue/29062): this should fail in the future. +go test -json ./exit0main +stdout '"Action":"pass".*\n\z' +! stdout '"Test":.*\n\z' + +# 'go test -json' should say a test fails if it exits 1 and prints nothing. +! go test -json ./exit1main +stdout '"Action":"fail".*\n\z' +! stdout '"Test":.*\n\z' + +# 'go test -json' should say a test fails if it panics. +! go test -json ./panic +stdout '"Action":"fail".*\n\z' +! stdout '"Test":.*\n\z' + +-- go.mod -- +module example.com/test + +go 1.14 + +-- pass/pass_test.go -- +package pass_test + +import "testing" + +func TestPass(t *testing.T) {} + +-- exit0main/exit0main_test.go -- +package exit0_test + +import ( + "os" + "testing" +) + +func TestMain(m *testing.M) { + os.Exit(0) +} + +-- exit1main/exit1main_test.go -- +package exit1_test + +import ( + "os" + "testing" +) + +func TestMain(m *testing.M) { + os.Exit(1) +} + +-- panic/panic_test.go -- +package panic_test + +import "testing" + +func TestPanic(t *testing.T) { + panic("oh no") +} diff --git a/src/cmd/internal/test2json/test2json.go b/src/cmd/internal/test2json/test2json.go index aa63c8b9a6..098128ef3a 100644 --- a/src/cmd/internal/test2json/test2json.go +++ b/src/cmd/internal/test2json/test2json.go @@ -128,9 +128,16 @@ func (c *converter) Write(b []byte) (int, error) { } var ( + // printed by test on successful run. bigPass = []byte("PASS\n") + + // printed by test after a normal test failure. bigFail = []byte("FAIL\n") + // printed by 'go test' along with an error if the test binary terminates + // with an error. + bigFailErrorPrefix = []byte("FAIL\t") + updates = [][]byte{ []byte("=== RUN "), []byte("=== PAUSE "), @@ -155,7 +162,7 @@ var ( // before or after emitting other events. func (c *converter) handleInputLine(line []byte) { // Final PASS or FAIL. - if bytes.Equal(line, bigPass) || bytes.Equal(line, bigFail) { + if bytes.Equal(line, bigPass) || bytes.Equal(line, bigFail) || bytes.HasPrefix(line, bigFailErrorPrefix) { c.flushReport(0) c.output.write(line) if bytes.Equal(line, bigPass) { diff --git a/src/cmd/internal/test2json/testdata/panic.json b/src/cmd/internal/test2json/testdata/panic.json index f99679c2e2..f7738142e6 100644 --- a/src/cmd/internal/test2json/testdata/panic.json +++ b/src/cmd/internal/test2json/testdata/panic.json @@ -13,7 +13,7 @@ {"Action":"output","Test":"TestPanic","Output":"\tgo/src/testing/testing.go:909 +0xc9\n"} {"Action":"output","Test":"TestPanic","Output":"created by testing.(*T).Run\n"} {"Action":"output","Test":"TestPanic","Output":"\tgo/src/testing/testing.go:960 +0x350\n"} -{"Action":"output","Test":"TestPanic","Output":"FAIL\tcommand-line-arguments\t0.042s\n"} {"Action":"fail","Test":"TestPanic"} +{"Action":"output","Output":"FAIL\tcommand-line-arguments\t0.042s\n"} {"Action":"output","Output":"FAIL\n"} {"Action":"fail"}